linux-user: Use safe_syscall wrapper for sleep syscalls
[qemu/kevin.git] / linux-user / syscall.c
bloba931919816f4f9bbed9f7f858ce8779e9d07bbe7
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/mman.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <sys/poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #endif
108 #include <linux/audit.h>
109 #include "linux_loop.h"
110 #include "uname.h"
112 #include "qemu.h"
114 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
115 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117 //#define DEBUG
118 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
119 * once. This exercises the codepaths for restart.
121 //#define DEBUG_ERESTARTSYS
123 //#include <linux/msdos_fs.h>
124 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
125 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 /* This is the size of the host kernel's sigset_t, needed where we make
128 * direct system calls that take a sigset_t pointer and a size.
130 #define SIGSET_T_SIZE (_NSIG / 8)
132 #undef _syscall0
133 #undef _syscall1
134 #undef _syscall2
135 #undef _syscall3
136 #undef _syscall4
137 #undef _syscall5
138 #undef _syscall6
140 #define _syscall0(type,name) \
141 static type name (void) \
143 return syscall(__NR_##name); \
146 #define _syscall1(type,name,type1,arg1) \
147 static type name (type1 arg1) \
149 return syscall(__NR_##name, arg1); \
152 #define _syscall2(type,name,type1,arg1,type2,arg2) \
153 static type name (type1 arg1,type2 arg2) \
155 return syscall(__NR_##name, arg1, arg2); \
158 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
159 static type name (type1 arg1,type2 arg2,type3 arg3) \
161 return syscall(__NR_##name, arg1, arg2, arg3); \
164 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
165 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
170 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
178 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
179 type5,arg5,type6,arg6) \
180 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
181 type6 arg6) \
183 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
187 #define __NR_sys_uname __NR_uname
188 #define __NR_sys_getcwd1 __NR_getcwd
189 #define __NR_sys_getdents __NR_getdents
190 #define __NR_sys_getdents64 __NR_getdents64
191 #define __NR_sys_getpriority __NR_getpriority
192 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
193 #define __NR_sys_syslog __NR_syslog
194 #define __NR_sys_futex __NR_futex
195 #define __NR_sys_inotify_init __NR_inotify_init
196 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
197 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
199 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
200 defined(__s390x__)
201 #define __NR__llseek __NR_lseek
202 #endif
204 /* Newer kernel ports have llseek() instead of _llseek() */
205 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
206 #define TARGET_NR__llseek TARGET_NR_llseek
207 #endif
209 #ifdef __NR_gettid
210 _syscall0(int, gettid)
211 #else
212 /* This is a replacement for the host gettid() and must return a host
213 errno. */
214 static int gettid(void) {
215 return -ENOSYS;
217 #endif
218 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
219 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
220 #endif
221 #if !defined(__NR_getdents) || \
222 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
223 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
224 #endif
225 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
226 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
227 loff_t *, res, uint, wh);
228 #endif
229 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
230 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group,int,error_code)
233 #endif
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address,int *,tidptr)
236 #endif
237 #if defined(TARGET_NR_futex) && defined(__NR_futex)
238 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
239 const struct timespec *,timeout,int *,uaddr2,int,val3)
240 #endif
241 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
242 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
243 unsigned long *, user_mask_ptr);
244 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
245 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
246 unsigned long *, user_mask_ptr);
247 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
248 void *, arg);
249 _syscall2(int, capget, struct __user_cap_header_struct *, header,
250 struct __user_cap_data_struct *, data);
251 _syscall2(int, capset, struct __user_cap_header_struct *, header,
252 struct __user_cap_data_struct *, data);
253 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
254 _syscall2(int, ioprio_get, int, which, int, who)
255 #endif
256 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
257 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
258 #endif
259 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
260 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
261 #endif
263 static bitmask_transtbl fcntl_flags_tbl[] = {
264 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
265 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
266 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
267 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
268 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
269 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
270 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
271 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
272 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
273 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
274 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
275 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
276 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
277 #if defined(O_DIRECT)
278 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
279 #endif
280 #if defined(O_NOATIME)
281 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
282 #endif
283 #if defined(O_CLOEXEC)
284 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
285 #endif
286 #if defined(O_PATH)
287 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
288 #endif
289 /* Don't terminate the list prematurely on 64-bit host+guest. */
290 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
291 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
292 #endif
293 { 0, 0, 0, 0 }
296 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
297 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
298 typedef struct TargetFdTrans {
299 TargetFdDataFunc host_to_target_data;
300 TargetFdDataFunc target_to_host_data;
301 TargetFdAddrFunc target_to_host_addr;
302 } TargetFdTrans;
304 static TargetFdTrans **target_fd_trans;
306 static unsigned int target_fd_max;
308 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
310 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
311 return target_fd_trans[fd]->target_to_host_data;
313 return NULL;
316 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
318 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
319 return target_fd_trans[fd]->host_to_target_data;
321 return NULL;
324 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
326 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
327 return target_fd_trans[fd]->target_to_host_addr;
329 return NULL;
332 static void fd_trans_register(int fd, TargetFdTrans *trans)
334 unsigned int oldmax;
336 if (fd >= target_fd_max) {
337 oldmax = target_fd_max;
338 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
339 target_fd_trans = g_renew(TargetFdTrans *,
340 target_fd_trans, target_fd_max);
341 memset((void *)(target_fd_trans + oldmax), 0,
342 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
344 target_fd_trans[fd] = trans;
347 static void fd_trans_unregister(int fd)
349 if (fd >= 0 && fd < target_fd_max) {
350 target_fd_trans[fd] = NULL;
354 static void fd_trans_dup(int oldfd, int newfd)
356 fd_trans_unregister(newfd);
357 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
358 fd_trans_register(newfd, target_fd_trans[oldfd]);
362 static int sys_getcwd1(char *buf, size_t size)
364 if (getcwd(buf, size) == NULL) {
365 /* getcwd() sets errno */
366 return (-1);
368 return strlen(buf)+1;
371 #ifdef TARGET_NR_utimensat
372 #ifdef CONFIG_UTIMENSAT
373 static int sys_utimensat(int dirfd, const char *pathname,
374 const struct timespec times[2], int flags)
376 if (pathname == NULL)
377 return futimens(dirfd, times);
378 else
379 return utimensat(dirfd, pathname, times, flags);
381 #elif defined(__NR_utimensat)
382 #define __NR_sys_utimensat __NR_utimensat
383 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
384 const struct timespec *,tsp,int,flags)
385 #else
386 static int sys_utimensat(int dirfd, const char *pathname,
387 const struct timespec times[2], int flags)
389 errno = ENOSYS;
390 return -1;
392 #endif
393 #endif /* TARGET_NR_utimensat */
395 #ifdef CONFIG_INOTIFY
396 #include <sys/inotify.h>
398 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
399 static int sys_inotify_init(void)
401 return (inotify_init());
403 #endif
404 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
405 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
407 return (inotify_add_watch(fd, pathname, mask));
409 #endif
410 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
411 static int sys_inotify_rm_watch(int fd, int32_t wd)
413 return (inotify_rm_watch(fd, wd));
415 #endif
416 #ifdef CONFIG_INOTIFY1
417 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
418 static int sys_inotify_init1(int flags)
420 return (inotify_init1(flags));
422 #endif
423 #endif
424 #else
425 /* Userspace can usually survive runtime without inotify */
426 #undef TARGET_NR_inotify_init
427 #undef TARGET_NR_inotify_init1
428 #undef TARGET_NR_inotify_add_watch
429 #undef TARGET_NR_inotify_rm_watch
430 #endif /* CONFIG_INOTIFY */
432 #if defined(TARGET_NR_ppoll)
433 #ifndef __NR_ppoll
434 # define __NR_ppoll -1
435 #endif
436 #define __NR_sys_ppoll __NR_ppoll
437 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
438 struct timespec *, timeout, const sigset_t *, sigmask,
439 size_t, sigsetsize)
440 #endif
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449 uint64_t rlim_cur;
450 uint64_t rlim_max;
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453 const struct host_rlimit64 *, new_limit,
454 struct host_rlimit64 *, old_limit)
455 #endif
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
464 int k ;
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467 if (g_posix_timers[k] == 0) {
468 g_posix_timers[k] = (timer_t) 1;
469 return k;
472 return -1;
474 #endif
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env) {
479 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
486 * r3 as arg1 */
487 static inline int regpairs_aligned(void *cpu_env) { return 1; }
488 #else
489 static inline int regpairs_aligned(void *cpu_env) { return 0; }
490 #endif
492 #define ERRNO_TABLE_SIZE 1200
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
503 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
504 [EAGAIN] = TARGET_EAGAIN,
505 [EIDRM] = TARGET_EIDRM,
506 [ECHRNG] = TARGET_ECHRNG,
507 [EL2NSYNC] = TARGET_EL2NSYNC,
508 [EL3HLT] = TARGET_EL3HLT,
509 [EL3RST] = TARGET_EL3RST,
510 [ELNRNG] = TARGET_ELNRNG,
511 [EUNATCH] = TARGET_EUNATCH,
512 [ENOCSI] = TARGET_ENOCSI,
513 [EL2HLT] = TARGET_EL2HLT,
514 [EDEADLK] = TARGET_EDEADLK,
515 [ENOLCK] = TARGET_ENOLCK,
516 [EBADE] = TARGET_EBADE,
517 [EBADR] = TARGET_EBADR,
518 [EXFULL] = TARGET_EXFULL,
519 [ENOANO] = TARGET_ENOANO,
520 [EBADRQC] = TARGET_EBADRQC,
521 [EBADSLT] = TARGET_EBADSLT,
522 [EBFONT] = TARGET_EBFONT,
523 [ENOSTR] = TARGET_ENOSTR,
524 [ENODATA] = TARGET_ENODATA,
525 [ETIME] = TARGET_ETIME,
526 [ENOSR] = TARGET_ENOSR,
527 [ENONET] = TARGET_ENONET,
528 [ENOPKG] = TARGET_ENOPKG,
529 [EREMOTE] = TARGET_EREMOTE,
530 [ENOLINK] = TARGET_ENOLINK,
531 [EADV] = TARGET_EADV,
532 [ESRMNT] = TARGET_ESRMNT,
533 [ECOMM] = TARGET_ECOMM,
534 [EPROTO] = TARGET_EPROTO,
535 [EDOTDOT] = TARGET_EDOTDOT,
536 [EMULTIHOP] = TARGET_EMULTIHOP,
537 [EBADMSG] = TARGET_EBADMSG,
538 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
539 [EOVERFLOW] = TARGET_EOVERFLOW,
540 [ENOTUNIQ] = TARGET_ENOTUNIQ,
541 [EBADFD] = TARGET_EBADFD,
542 [EREMCHG] = TARGET_EREMCHG,
543 [ELIBACC] = TARGET_ELIBACC,
544 [ELIBBAD] = TARGET_ELIBBAD,
545 [ELIBSCN] = TARGET_ELIBSCN,
546 [ELIBMAX] = TARGET_ELIBMAX,
547 [ELIBEXEC] = TARGET_ELIBEXEC,
548 [EILSEQ] = TARGET_EILSEQ,
549 [ENOSYS] = TARGET_ENOSYS,
550 [ELOOP] = TARGET_ELOOP,
551 [ERESTART] = TARGET_ERESTART,
552 [ESTRPIPE] = TARGET_ESTRPIPE,
553 [ENOTEMPTY] = TARGET_ENOTEMPTY,
554 [EUSERS] = TARGET_EUSERS,
555 [ENOTSOCK] = TARGET_ENOTSOCK,
556 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
557 [EMSGSIZE] = TARGET_EMSGSIZE,
558 [EPROTOTYPE] = TARGET_EPROTOTYPE,
559 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
560 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
561 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
562 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
563 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
564 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
565 [EADDRINUSE] = TARGET_EADDRINUSE,
566 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
567 [ENETDOWN] = TARGET_ENETDOWN,
568 [ENETUNREACH] = TARGET_ENETUNREACH,
569 [ENETRESET] = TARGET_ENETRESET,
570 [ECONNABORTED] = TARGET_ECONNABORTED,
571 [ECONNRESET] = TARGET_ECONNRESET,
572 [ENOBUFS] = TARGET_ENOBUFS,
573 [EISCONN] = TARGET_EISCONN,
574 [ENOTCONN] = TARGET_ENOTCONN,
575 [EUCLEAN] = TARGET_EUCLEAN,
576 [ENOTNAM] = TARGET_ENOTNAM,
577 [ENAVAIL] = TARGET_ENAVAIL,
578 [EISNAM] = TARGET_EISNAM,
579 [EREMOTEIO] = TARGET_EREMOTEIO,
580 [ESHUTDOWN] = TARGET_ESHUTDOWN,
581 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
582 [ETIMEDOUT] = TARGET_ETIMEDOUT,
583 [ECONNREFUSED] = TARGET_ECONNREFUSED,
584 [EHOSTDOWN] = TARGET_EHOSTDOWN,
585 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
586 [EALREADY] = TARGET_EALREADY,
587 [EINPROGRESS] = TARGET_EINPROGRESS,
588 [ESTALE] = TARGET_ESTALE,
589 [ECANCELED] = TARGET_ECANCELED,
590 [ENOMEDIUM] = TARGET_ENOMEDIUM,
591 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
592 #ifdef ENOKEY
593 [ENOKEY] = TARGET_ENOKEY,
594 #endif
595 #ifdef EKEYEXPIRED
596 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
597 #endif
598 #ifdef EKEYREVOKED
599 [EKEYREVOKED] = TARGET_EKEYREVOKED,
600 #endif
601 #ifdef EKEYREJECTED
602 [EKEYREJECTED] = TARGET_EKEYREJECTED,
603 #endif
604 #ifdef EOWNERDEAD
605 [EOWNERDEAD] = TARGET_EOWNERDEAD,
606 #endif
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
609 #endif
612 static inline int host_to_target_errno(int err)
614 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
615 host_to_target_errno_table[err]) {
616 return host_to_target_errno_table[err];
618 return err;
621 static inline int target_to_host_errno(int err)
623 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
624 target_to_host_errno_table[err]) {
625 return target_to_host_errno_table[err];
627 return err;
630 static inline abi_long get_errno(abi_long ret)
632 if (ret == -1)
633 return -host_to_target_errno(errno);
634 else
635 return ret;
638 static inline int is_error(abi_long ret)
640 return (abi_ulong)ret >= (abi_ulong)(-4096);
643 char *target_strerror(int err)
645 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
646 return NULL;
648 return strerror(target_to_host_errno(err));
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
654 return safe_syscall(__NR_##name); \
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
660 return safe_syscall(__NR_##name, arg1); \
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
666 return safe_syscall(__NR_##name, arg1, arg2); \
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
685 type5 arg5) \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
698 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
699 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
700 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
701 int, flags, mode_t, mode)
702 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
703 struct rusage *, rusage)
704 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
705 int, options, struct rusage *, rusage)
706 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
707 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
708 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
709 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
710 const struct timespec *,timeout,int *,uaddr2,int,val3)
711 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
712 safe_syscall2(int, kill, pid_t, pid, int, sig)
713 safe_syscall2(int, tkill, int, tid, int, sig)
714 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
715 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
716 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
717 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
718 socklen_t, addrlen)
719 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
720 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
721 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
722 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
723 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
724 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
725 safe_syscall2(int, flock, int, fd, int, operation)
726 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
727 const struct timespec *, uts, size_t, sigsetsize)
728 safe_syscall2(int, nanosleep, const struct timespec *, req,
729 struct timespec *, rem)
730 #ifdef TARGET_NR_clock_nanosleep
731 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
732 const struct timespec *, req, struct timespec *, rem)
733 #endif
734 #ifdef __NR_msgsnd
735 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
736 int, flags)
737 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
738 long, msgtype, int, flags)
739 #else
740 /* This host kernel architecture uses a single ipc syscall; fake up
741 * wrappers for the sub-operations to hide this implementation detail.
742 * Annoyingly we can't include linux/ipc.h to get the constant definitions
743 * for the call parameter because some structs in there conflict with the
744 * sys/ipc.h ones. So we just define them here, and rely on them being
745 * the same for all host architectures.
747 #define Q_MSGSND 11
748 #define Q_MSGRCV 12
749 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
751 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
752 void *, ptr, long, fifth)
753 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
755 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
757 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
759 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
761 #endif
762 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
763 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
764 size_t, len, unsigned, prio, const struct timespec *, timeout)
765 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
766 size_t, len, unsigned *, prio, const struct timespec *, timeout)
767 #endif
769 static inline int host_to_target_sock_type(int host_type)
771 int target_type;
773 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
774 case SOCK_DGRAM:
775 target_type = TARGET_SOCK_DGRAM;
776 break;
777 case SOCK_STREAM:
778 target_type = TARGET_SOCK_STREAM;
779 break;
780 default:
781 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
782 break;
785 #if defined(SOCK_CLOEXEC)
786 if (host_type & SOCK_CLOEXEC) {
787 target_type |= TARGET_SOCK_CLOEXEC;
789 #endif
791 #if defined(SOCK_NONBLOCK)
792 if (host_type & SOCK_NONBLOCK) {
793 target_type |= TARGET_SOCK_NONBLOCK;
795 #endif
797 return target_type;
800 static abi_ulong target_brk;
801 static abi_ulong target_original_brk;
802 static abi_ulong brk_page;
804 void target_set_brk(abi_ulong new_brk)
806 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
807 brk_page = HOST_PAGE_ALIGN(target_brk);
810 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
811 #define DEBUGF_BRK(message, args...)
813 /* do_brk() must return target values and target errnos. */
814 abi_long do_brk(abi_ulong new_brk)
816 abi_long mapped_addr;
817 int new_alloc_size;
819 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
821 if (!new_brk) {
822 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
823 return target_brk;
825 if (new_brk < target_original_brk) {
826 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
827 target_brk);
828 return target_brk;
831 /* If the new brk is less than the highest page reserved to the
832 * target heap allocation, set it and we're almost done... */
833 if (new_brk <= brk_page) {
834 /* Heap contents are initialized to zero, as for anonymous
835 * mapped pages. */
836 if (new_brk > target_brk) {
837 memset(g2h(target_brk), 0, new_brk - target_brk);
839 target_brk = new_brk;
840 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
841 return target_brk;
844 /* We need to allocate more memory after the brk... Note that
845 * we don't use MAP_FIXED because that will map over the top of
846 * any existing mapping (like the one with the host libc or qemu
847 * itself); instead we treat "mapped but at wrong address" as
848 * a failure and unmap again.
850 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
851 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
852 PROT_READ|PROT_WRITE,
853 MAP_ANON|MAP_PRIVATE, 0, 0));
855 if (mapped_addr == brk_page) {
856 /* Heap contents are initialized to zero, as for anonymous
857 * mapped pages. Technically the new pages are already
858 * initialized to zero since they *are* anonymous mapped
859 * pages, however we have to take care with the contents that
860 * come from the remaining part of the previous page: it may
861 * contains garbage data due to a previous heap usage (grown
862 * then shrunken). */
863 memset(g2h(target_brk), 0, brk_page - target_brk);
865 target_brk = new_brk;
866 brk_page = HOST_PAGE_ALIGN(target_brk);
867 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
868 target_brk);
869 return target_brk;
870 } else if (mapped_addr != -1) {
871 /* Mapped but at wrong address, meaning there wasn't actually
872 * enough space for this brk.
874 target_munmap(mapped_addr, new_alloc_size);
875 mapped_addr = -1;
876 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
878 else {
879 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
882 #if defined(TARGET_ALPHA)
883 /* We (partially) emulate OSF/1 on Alpha, which requires we
884 return a proper errno, not an unchanged brk value. */
885 return -TARGET_ENOMEM;
886 #endif
887 /* For everything else, return the previous break. */
888 return target_brk;
891 static inline abi_long copy_from_user_fdset(fd_set *fds,
892 abi_ulong target_fds_addr,
893 int n)
895 int i, nw, j, k;
896 abi_ulong b, *target_fds;
898 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
899 if (!(target_fds = lock_user(VERIFY_READ,
900 target_fds_addr,
901 sizeof(abi_ulong) * nw,
902 1)))
903 return -TARGET_EFAULT;
905 FD_ZERO(fds);
906 k = 0;
907 for (i = 0; i < nw; i++) {
908 /* grab the abi_ulong */
909 __get_user(b, &target_fds[i]);
910 for (j = 0; j < TARGET_ABI_BITS; j++) {
911 /* check the bit inside the abi_ulong */
912 if ((b >> j) & 1)
913 FD_SET(k, fds);
914 k++;
918 unlock_user(target_fds, target_fds_addr, 0);
920 return 0;
923 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
924 abi_ulong target_fds_addr,
925 int n)
927 if (target_fds_addr) {
928 if (copy_from_user_fdset(fds, target_fds_addr, n))
929 return -TARGET_EFAULT;
930 *fds_ptr = fds;
931 } else {
932 *fds_ptr = NULL;
934 return 0;
937 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
938 const fd_set *fds,
939 int n)
941 int i, nw, j, k;
942 abi_long v;
943 abi_ulong *target_fds;
945 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
946 if (!(target_fds = lock_user(VERIFY_WRITE,
947 target_fds_addr,
948 sizeof(abi_ulong) * nw,
949 0)))
950 return -TARGET_EFAULT;
952 k = 0;
953 for (i = 0; i < nw; i++) {
954 v = 0;
955 for (j = 0; j < TARGET_ABI_BITS; j++) {
956 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
957 k++;
959 __put_user(v, &target_fds[i]);
962 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964 return 0;
967 #if defined(__alpha__)
968 #define HOST_HZ 1024
969 #else
970 #define HOST_HZ 100
971 #endif
973 static inline abi_long host_to_target_clock_t(long ticks)
975 #if HOST_HZ == TARGET_HZ
976 return ticks;
977 #else
978 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
979 #endif
982 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
983 const struct rusage *rusage)
985 struct target_rusage *target_rusage;
987 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
988 return -TARGET_EFAULT;
989 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
990 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
991 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
992 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
993 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
994 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
995 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
996 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
997 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
998 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
999 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1000 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1001 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1002 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1003 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1004 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1005 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1006 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1007 unlock_user_struct(target_rusage, target_addr, 1);
1009 return 0;
1012 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1014 abi_ulong target_rlim_swap;
1015 rlim_t result;
1017 target_rlim_swap = tswapal(target_rlim);
1018 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1019 return RLIM_INFINITY;
1021 result = target_rlim_swap;
1022 if (target_rlim_swap != (rlim_t)result)
1023 return RLIM_INFINITY;
1025 return result;
1028 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1030 abi_ulong target_rlim_swap;
1031 abi_ulong result;
1033 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1034 target_rlim_swap = TARGET_RLIM_INFINITY;
1035 else
1036 target_rlim_swap = rlim;
1037 result = tswapal(target_rlim_swap);
1039 return result;
1042 static inline int target_to_host_resource(int code)
1044 switch (code) {
1045 case TARGET_RLIMIT_AS:
1046 return RLIMIT_AS;
1047 case TARGET_RLIMIT_CORE:
1048 return RLIMIT_CORE;
1049 case TARGET_RLIMIT_CPU:
1050 return RLIMIT_CPU;
1051 case TARGET_RLIMIT_DATA:
1052 return RLIMIT_DATA;
1053 case TARGET_RLIMIT_FSIZE:
1054 return RLIMIT_FSIZE;
1055 case TARGET_RLIMIT_LOCKS:
1056 return RLIMIT_LOCKS;
1057 case TARGET_RLIMIT_MEMLOCK:
1058 return RLIMIT_MEMLOCK;
1059 case TARGET_RLIMIT_MSGQUEUE:
1060 return RLIMIT_MSGQUEUE;
1061 case TARGET_RLIMIT_NICE:
1062 return RLIMIT_NICE;
1063 case TARGET_RLIMIT_NOFILE:
1064 return RLIMIT_NOFILE;
1065 case TARGET_RLIMIT_NPROC:
1066 return RLIMIT_NPROC;
1067 case TARGET_RLIMIT_RSS:
1068 return RLIMIT_RSS;
1069 case TARGET_RLIMIT_RTPRIO:
1070 return RLIMIT_RTPRIO;
1071 case TARGET_RLIMIT_SIGPENDING:
1072 return RLIMIT_SIGPENDING;
1073 case TARGET_RLIMIT_STACK:
1074 return RLIMIT_STACK;
1075 default:
1076 return code;
1080 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1081 abi_ulong target_tv_addr)
1083 struct target_timeval *target_tv;
1085 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1086 return -TARGET_EFAULT;
1088 __get_user(tv->tv_sec, &target_tv->tv_sec);
1089 __get_user(tv->tv_usec, &target_tv->tv_usec);
1091 unlock_user_struct(target_tv, target_tv_addr, 0);
1093 return 0;
1096 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1097 const struct timeval *tv)
1099 struct target_timeval *target_tv;
1101 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1102 return -TARGET_EFAULT;
1104 __put_user(tv->tv_sec, &target_tv->tv_sec);
1105 __put_user(tv->tv_usec, &target_tv->tv_usec);
1107 unlock_user_struct(target_tv, target_tv_addr, 1);
1109 return 0;
1112 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1113 abi_ulong target_tz_addr)
1115 struct target_timezone *target_tz;
1117 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1118 return -TARGET_EFAULT;
1121 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1122 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1124 unlock_user_struct(target_tz, target_tz_addr, 0);
1126 return 0;
1129 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1130 #include <mqueue.h>
1132 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1133 abi_ulong target_mq_attr_addr)
1135 struct target_mq_attr *target_mq_attr;
1137 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1138 target_mq_attr_addr, 1))
1139 return -TARGET_EFAULT;
1141 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1142 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1143 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1144 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1146 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1148 return 0;
1151 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1152 const struct mq_attr *attr)
1154 struct target_mq_attr *target_mq_attr;
1156 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1157 target_mq_attr_addr, 0))
1158 return -TARGET_EFAULT;
1160 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1161 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1162 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1163 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1165 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1167 return 0;
1169 #endif
1171 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1172 /* do_select() must return target values and target errnos. */
1173 static abi_long do_select(int n,
1174 abi_ulong rfd_addr, abi_ulong wfd_addr,
1175 abi_ulong efd_addr, abi_ulong target_tv_addr)
1177 fd_set rfds, wfds, efds;
1178 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1179 struct timeval tv;
1180 struct timespec ts, *ts_ptr;
1181 abi_long ret;
1183 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1184 if (ret) {
1185 return ret;
1187 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1188 if (ret) {
1189 return ret;
1191 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1192 if (ret) {
1193 return ret;
1196 if (target_tv_addr) {
1197 if (copy_from_user_timeval(&tv, target_tv_addr))
1198 return -TARGET_EFAULT;
1199 ts.tv_sec = tv.tv_sec;
1200 ts.tv_nsec = tv.tv_usec * 1000;
1201 ts_ptr = &ts;
1202 } else {
1203 ts_ptr = NULL;
1206 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1207 ts_ptr, NULL));
1209 if (!is_error(ret)) {
1210 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1211 return -TARGET_EFAULT;
1212 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1213 return -TARGET_EFAULT;
1214 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1215 return -TARGET_EFAULT;
1217 if (target_tv_addr) {
1218 tv.tv_sec = ts.tv_sec;
1219 tv.tv_usec = ts.tv_nsec / 1000;
1220 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1221 return -TARGET_EFAULT;
1226 return ret;
1228 #endif
1230 static abi_long do_pipe2(int host_pipe[], int flags)
1232 #ifdef CONFIG_PIPE2
1233 return pipe2(host_pipe, flags);
1234 #else
1235 return -ENOSYS;
1236 #endif
1239 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1240 int flags, int is_pipe2)
1242 int host_pipe[2];
1243 abi_long ret;
1244 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1246 if (is_error(ret))
1247 return get_errno(ret);
1249 /* Several targets have special calling conventions for the original
1250 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1251 if (!is_pipe2) {
1252 #if defined(TARGET_ALPHA)
1253 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1254 return host_pipe[0];
1255 #elif defined(TARGET_MIPS)
1256 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1257 return host_pipe[0];
1258 #elif defined(TARGET_SH4)
1259 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1260 return host_pipe[0];
1261 #elif defined(TARGET_SPARC)
1262 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1263 return host_pipe[0];
1264 #endif
1267 if (put_user_s32(host_pipe[0], pipedes)
1268 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1269 return -TARGET_EFAULT;
1270 return get_errno(ret);
1273 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1274 abi_ulong target_addr,
1275 socklen_t len)
1277 struct target_ip_mreqn *target_smreqn;
1279 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1280 if (!target_smreqn)
1281 return -TARGET_EFAULT;
1282 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1283 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1284 if (len == sizeof(struct target_ip_mreqn))
1285 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1286 unlock_user(target_smreqn, target_addr, 0);
1288 return 0;
1291 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1292 abi_ulong target_addr,
1293 socklen_t len)
1295 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1296 sa_family_t sa_family;
1297 struct target_sockaddr *target_saddr;
1299 if (fd_trans_target_to_host_addr(fd)) {
1300 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1303 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1304 if (!target_saddr)
1305 return -TARGET_EFAULT;
1307 sa_family = tswap16(target_saddr->sa_family);
1309 /* Oops. The caller might send a incomplete sun_path; sun_path
1310 * must be terminated by \0 (see the manual page), but
1311 * unfortunately it is quite common to specify sockaddr_un
1312 * length as "strlen(x->sun_path)" while it should be
1313 * "strlen(...) + 1". We'll fix that here if needed.
1314 * Linux kernel has a similar feature.
1317 if (sa_family == AF_UNIX) {
1318 if (len < unix_maxlen && len > 0) {
1319 char *cp = (char*)target_saddr;
1321 if ( cp[len-1] && !cp[len] )
1322 len++;
1324 if (len > unix_maxlen)
1325 len = unix_maxlen;
1328 memcpy(addr, target_saddr, len);
1329 addr->sa_family = sa_family;
1330 if (sa_family == AF_NETLINK) {
1331 struct sockaddr_nl *nladdr;
1333 nladdr = (struct sockaddr_nl *)addr;
1334 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1335 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1336 } else if (sa_family == AF_PACKET) {
1337 struct target_sockaddr_ll *lladdr;
1339 lladdr = (struct target_sockaddr_ll *)addr;
1340 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1341 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1343 unlock_user(target_saddr, target_addr, 0);
1345 return 0;
1348 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1349 struct sockaddr *addr,
1350 socklen_t len)
1352 struct target_sockaddr *target_saddr;
1354 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1355 if (!target_saddr)
1356 return -TARGET_EFAULT;
1357 memcpy(target_saddr, addr, len);
1358 target_saddr->sa_family = tswap16(addr->sa_family);
1359 if (addr->sa_family == AF_NETLINK) {
1360 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1361 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1362 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1364 unlock_user(target_saddr, target_addr, len);
1366 return 0;
1369 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1370 struct target_msghdr *target_msgh)
1372 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1373 abi_long msg_controllen;
1374 abi_ulong target_cmsg_addr;
1375 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1376 socklen_t space = 0;
1378 msg_controllen = tswapal(target_msgh->msg_controllen);
1379 if (msg_controllen < sizeof (struct target_cmsghdr))
1380 goto the_end;
1381 target_cmsg_addr = tswapal(target_msgh->msg_control);
1382 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1383 target_cmsg_start = target_cmsg;
1384 if (!target_cmsg)
1385 return -TARGET_EFAULT;
1387 while (cmsg && target_cmsg) {
1388 void *data = CMSG_DATA(cmsg);
1389 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1391 int len = tswapal(target_cmsg->cmsg_len)
1392 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1394 space += CMSG_SPACE(len);
1395 if (space > msgh->msg_controllen) {
1396 space -= CMSG_SPACE(len);
1397 /* This is a QEMU bug, since we allocated the payload
1398 * area ourselves (unlike overflow in host-to-target
1399 * conversion, which is just the guest giving us a buffer
1400 * that's too small). It can't happen for the payload types
1401 * we currently support; if it becomes an issue in future
1402 * we would need to improve our allocation strategy to
1403 * something more intelligent than "twice the size of the
1404 * target buffer we're reading from".
1406 gemu_log("Host cmsg overflow\n");
1407 break;
1410 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1411 cmsg->cmsg_level = SOL_SOCKET;
1412 } else {
1413 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1415 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1416 cmsg->cmsg_len = CMSG_LEN(len);
1418 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1419 int *fd = (int *)data;
1420 int *target_fd = (int *)target_data;
1421 int i, numfds = len / sizeof(int);
1423 for (i = 0; i < numfds; i++) {
1424 __get_user(fd[i], target_fd + i);
1426 } else if (cmsg->cmsg_level == SOL_SOCKET
1427 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1428 struct ucred *cred = (struct ucred *)data;
1429 struct target_ucred *target_cred =
1430 (struct target_ucred *)target_data;
1432 __get_user(cred->pid, &target_cred->pid);
1433 __get_user(cred->uid, &target_cred->uid);
1434 __get_user(cred->gid, &target_cred->gid);
1435 } else {
1436 gemu_log("Unsupported ancillary data: %d/%d\n",
1437 cmsg->cmsg_level, cmsg->cmsg_type);
1438 memcpy(data, target_data, len);
1441 cmsg = CMSG_NXTHDR(msgh, cmsg);
1442 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1443 target_cmsg_start);
1445 unlock_user(target_cmsg, target_cmsg_addr, 0);
1446 the_end:
1447 msgh->msg_controllen = space;
1448 return 0;
1451 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1452 struct msghdr *msgh)
1454 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1455 abi_long msg_controllen;
1456 abi_ulong target_cmsg_addr;
1457 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1458 socklen_t space = 0;
1460 msg_controllen = tswapal(target_msgh->msg_controllen);
1461 if (msg_controllen < sizeof (struct target_cmsghdr))
1462 goto the_end;
1463 target_cmsg_addr = tswapal(target_msgh->msg_control);
1464 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1465 target_cmsg_start = target_cmsg;
1466 if (!target_cmsg)
1467 return -TARGET_EFAULT;
1469 while (cmsg && target_cmsg) {
1470 void *data = CMSG_DATA(cmsg);
1471 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1473 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1474 int tgt_len, tgt_space;
1476 /* We never copy a half-header but may copy half-data;
1477 * this is Linux's behaviour in put_cmsg(). Note that
1478 * truncation here is a guest problem (which we report
1479 * to the guest via the CTRUNC bit), unlike truncation
1480 * in target_to_host_cmsg, which is a QEMU bug.
1482 if (msg_controllen < sizeof(struct cmsghdr)) {
1483 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1484 break;
1487 if (cmsg->cmsg_level == SOL_SOCKET) {
1488 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1489 } else {
1490 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1492 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1494 tgt_len = TARGET_CMSG_LEN(len);
1496 /* Payload types which need a different size of payload on
1497 * the target must adjust tgt_len here.
1499 switch (cmsg->cmsg_level) {
1500 case SOL_SOCKET:
1501 switch (cmsg->cmsg_type) {
1502 case SO_TIMESTAMP:
1503 tgt_len = sizeof(struct target_timeval);
1504 break;
1505 default:
1506 break;
1508 default:
1509 break;
1512 if (msg_controllen < tgt_len) {
1513 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1514 tgt_len = msg_controllen;
1517 /* We must now copy-and-convert len bytes of payload
1518 * into tgt_len bytes of destination space. Bear in mind
1519 * that in both source and destination we may be dealing
1520 * with a truncated value!
1522 switch (cmsg->cmsg_level) {
1523 case SOL_SOCKET:
1524 switch (cmsg->cmsg_type) {
1525 case SCM_RIGHTS:
1527 int *fd = (int *)data;
1528 int *target_fd = (int *)target_data;
1529 int i, numfds = tgt_len / sizeof(int);
1531 for (i = 0; i < numfds; i++) {
1532 __put_user(fd[i], target_fd + i);
1534 break;
1536 case SO_TIMESTAMP:
1538 struct timeval *tv = (struct timeval *)data;
1539 struct target_timeval *target_tv =
1540 (struct target_timeval *)target_data;
1542 if (len != sizeof(struct timeval) ||
1543 tgt_len != sizeof(struct target_timeval)) {
1544 goto unimplemented;
1547 /* copy struct timeval to target */
1548 __put_user(tv->tv_sec, &target_tv->tv_sec);
1549 __put_user(tv->tv_usec, &target_tv->tv_usec);
1550 break;
1552 case SCM_CREDENTIALS:
1554 struct ucred *cred = (struct ucred *)data;
1555 struct target_ucred *target_cred =
1556 (struct target_ucred *)target_data;
1558 __put_user(cred->pid, &target_cred->pid);
1559 __put_user(cred->uid, &target_cred->uid);
1560 __put_user(cred->gid, &target_cred->gid);
1561 break;
1563 default:
1564 goto unimplemented;
1566 break;
1568 default:
1569 unimplemented:
1570 gemu_log("Unsupported ancillary data: %d/%d\n",
1571 cmsg->cmsg_level, cmsg->cmsg_type);
1572 memcpy(target_data, data, MIN(len, tgt_len));
1573 if (tgt_len > len) {
1574 memset(target_data + len, 0, tgt_len - len);
1578 target_cmsg->cmsg_len = tswapal(tgt_len);
1579 tgt_space = TARGET_CMSG_SPACE(len);
1580 if (msg_controllen < tgt_space) {
1581 tgt_space = msg_controllen;
1583 msg_controllen -= tgt_space;
1584 space += tgt_space;
1585 cmsg = CMSG_NXTHDR(msgh, cmsg);
1586 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1587 target_cmsg_start);
1589 unlock_user(target_cmsg, target_cmsg_addr, space);
1590 the_end:
1591 target_msgh->msg_controllen = tswapal(space);
1592 return 0;
1595 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1597 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1598 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1599 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1600 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1601 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1604 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1605 size_t len,
1606 abi_long (*host_to_target_nlmsg)
1607 (struct nlmsghdr *))
1609 uint32_t nlmsg_len;
1610 abi_long ret;
1612 while (len > sizeof(struct nlmsghdr)) {
1614 nlmsg_len = nlh->nlmsg_len;
1615 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1616 nlmsg_len > len) {
1617 break;
1620 switch (nlh->nlmsg_type) {
1621 case NLMSG_DONE:
1622 tswap_nlmsghdr(nlh);
1623 return 0;
1624 case NLMSG_NOOP:
1625 break;
1626 case NLMSG_ERROR:
1628 struct nlmsgerr *e = NLMSG_DATA(nlh);
1629 e->error = tswap32(e->error);
1630 tswap_nlmsghdr(&e->msg);
1631 tswap_nlmsghdr(nlh);
1632 return 0;
1634 default:
1635 ret = host_to_target_nlmsg(nlh);
1636 if (ret < 0) {
1637 tswap_nlmsghdr(nlh);
1638 return ret;
1640 break;
1642 tswap_nlmsghdr(nlh);
1643 len -= NLMSG_ALIGN(nlmsg_len);
1644 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1646 return 0;
1649 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1650 size_t len,
1651 abi_long (*target_to_host_nlmsg)
1652 (struct nlmsghdr *))
1654 int ret;
1656 while (len > sizeof(struct nlmsghdr)) {
1657 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1658 tswap32(nlh->nlmsg_len) > len) {
1659 break;
1661 tswap_nlmsghdr(nlh);
1662 switch (nlh->nlmsg_type) {
1663 case NLMSG_DONE:
1664 return 0;
1665 case NLMSG_NOOP:
1666 break;
1667 case NLMSG_ERROR:
1669 struct nlmsgerr *e = NLMSG_DATA(nlh);
1670 e->error = tswap32(e->error);
1671 tswap_nlmsghdr(&e->msg);
1673 default:
1674 ret = target_to_host_nlmsg(nlh);
1675 if (ret < 0) {
1676 return ret;
1679 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1680 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1682 return 0;
1685 #ifdef CONFIG_RTNETLINK
1686 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1687 size_t len,
1688 abi_long (*host_to_target_rtattr)
1689 (struct rtattr *))
1691 unsigned short rta_len;
1692 abi_long ret;
1694 while (len > sizeof(struct rtattr)) {
1695 rta_len = rtattr->rta_len;
1696 if (rta_len < sizeof(struct rtattr) ||
1697 rta_len > len) {
1698 break;
1700 ret = host_to_target_rtattr(rtattr);
1701 rtattr->rta_len = tswap16(rtattr->rta_len);
1702 rtattr->rta_type = tswap16(rtattr->rta_type);
1703 if (ret < 0) {
1704 return ret;
1706 len -= RTA_ALIGN(rta_len);
1707 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1709 return 0;
1712 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1714 uint32_t *u32;
1715 struct rtnl_link_stats *st;
1716 struct rtnl_link_stats64 *st64;
1717 struct rtnl_link_ifmap *map;
1719 switch (rtattr->rta_type) {
1720 /* binary stream */
1721 case IFLA_ADDRESS:
1722 case IFLA_BROADCAST:
1723 /* string */
1724 case IFLA_IFNAME:
1725 case IFLA_QDISC:
1726 break;
1727 /* uin8_t */
1728 case IFLA_OPERSTATE:
1729 case IFLA_LINKMODE:
1730 case IFLA_CARRIER:
1731 case IFLA_PROTO_DOWN:
1732 break;
1733 /* uint32_t */
1734 case IFLA_MTU:
1735 case IFLA_LINK:
1736 case IFLA_WEIGHT:
1737 case IFLA_TXQLEN:
1738 case IFLA_CARRIER_CHANGES:
1739 case IFLA_NUM_RX_QUEUES:
1740 case IFLA_NUM_TX_QUEUES:
1741 case IFLA_PROMISCUITY:
1742 case IFLA_EXT_MASK:
1743 case IFLA_LINK_NETNSID:
1744 case IFLA_GROUP:
1745 case IFLA_MASTER:
1746 case IFLA_NUM_VF:
1747 u32 = RTA_DATA(rtattr);
1748 *u32 = tswap32(*u32);
1749 break;
1750 /* struct rtnl_link_stats */
1751 case IFLA_STATS:
1752 st = RTA_DATA(rtattr);
1753 st->rx_packets = tswap32(st->rx_packets);
1754 st->tx_packets = tswap32(st->tx_packets);
1755 st->rx_bytes = tswap32(st->rx_bytes);
1756 st->tx_bytes = tswap32(st->tx_bytes);
1757 st->rx_errors = tswap32(st->rx_errors);
1758 st->tx_errors = tswap32(st->tx_errors);
1759 st->rx_dropped = tswap32(st->rx_dropped);
1760 st->tx_dropped = tswap32(st->tx_dropped);
1761 st->multicast = tswap32(st->multicast);
1762 st->collisions = tswap32(st->collisions);
1764 /* detailed rx_errors: */
1765 st->rx_length_errors = tswap32(st->rx_length_errors);
1766 st->rx_over_errors = tswap32(st->rx_over_errors);
1767 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1768 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1769 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1770 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1772 /* detailed tx_errors */
1773 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1774 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1775 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1776 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1777 st->tx_window_errors = tswap32(st->tx_window_errors);
1779 /* for cslip etc */
1780 st->rx_compressed = tswap32(st->rx_compressed);
1781 st->tx_compressed = tswap32(st->tx_compressed);
1782 break;
1783 /* struct rtnl_link_stats64 */
1784 case IFLA_STATS64:
1785 st64 = RTA_DATA(rtattr);
1786 st64->rx_packets = tswap64(st64->rx_packets);
1787 st64->tx_packets = tswap64(st64->tx_packets);
1788 st64->rx_bytes = tswap64(st64->rx_bytes);
1789 st64->tx_bytes = tswap64(st64->tx_bytes);
1790 st64->rx_errors = tswap64(st64->rx_errors);
1791 st64->tx_errors = tswap64(st64->tx_errors);
1792 st64->rx_dropped = tswap64(st64->rx_dropped);
1793 st64->tx_dropped = tswap64(st64->tx_dropped);
1794 st64->multicast = tswap64(st64->multicast);
1795 st64->collisions = tswap64(st64->collisions);
1797 /* detailed rx_errors: */
1798 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1799 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1800 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1801 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1802 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1803 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1805 /* detailed tx_errors */
1806 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1807 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1808 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1809 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1810 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1812 /* for cslip etc */
1813 st64->rx_compressed = tswap64(st64->rx_compressed);
1814 st64->tx_compressed = tswap64(st64->tx_compressed);
1815 break;
1816 /* struct rtnl_link_ifmap */
1817 case IFLA_MAP:
1818 map = RTA_DATA(rtattr);
1819 map->mem_start = tswap64(map->mem_start);
1820 map->mem_end = tswap64(map->mem_end);
1821 map->base_addr = tswap64(map->base_addr);
1822 map->irq = tswap16(map->irq);
1823 break;
1824 /* nested */
1825 case IFLA_AF_SPEC:
1826 case IFLA_LINKINFO:
1827 /* FIXME: implement nested type */
1828 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1829 break;
1830 default:
1831 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1832 break;
1834 return 0;
1837 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1839 uint32_t *u32;
1840 struct ifa_cacheinfo *ci;
1842 switch (rtattr->rta_type) {
1843 /* binary: depends on family type */
1844 case IFA_ADDRESS:
1845 case IFA_LOCAL:
1846 break;
1847 /* string */
1848 case IFA_LABEL:
1849 break;
1850 /* u32 */
1851 case IFA_FLAGS:
1852 case IFA_BROADCAST:
1853 u32 = RTA_DATA(rtattr);
1854 *u32 = tswap32(*u32);
1855 break;
1856 /* struct ifa_cacheinfo */
1857 case IFA_CACHEINFO:
1858 ci = RTA_DATA(rtattr);
1859 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1860 ci->ifa_valid = tswap32(ci->ifa_valid);
1861 ci->cstamp = tswap32(ci->cstamp);
1862 ci->tstamp = tswap32(ci->tstamp);
1863 break;
1864 default:
1865 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1866 break;
1868 return 0;
1871 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1873 uint32_t *u32;
1874 switch (rtattr->rta_type) {
1875 /* binary: depends on family type */
1876 case RTA_GATEWAY:
1877 case RTA_DST:
1878 case RTA_PREFSRC:
1879 break;
1880 /* u32 */
1881 case RTA_PRIORITY:
1882 case RTA_TABLE:
1883 case RTA_OIF:
1884 u32 = RTA_DATA(rtattr);
1885 *u32 = tswap32(*u32);
1886 break;
1887 default:
1888 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1889 break;
1891 return 0;
1894 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1895 uint32_t rtattr_len)
1897 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1898 host_to_target_data_link_rtattr);
1901 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1902 uint32_t rtattr_len)
1904 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1905 host_to_target_data_addr_rtattr);
1908 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1909 uint32_t rtattr_len)
1911 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1912 host_to_target_data_route_rtattr);
1915 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1917 uint32_t nlmsg_len;
1918 struct ifinfomsg *ifi;
1919 struct ifaddrmsg *ifa;
1920 struct rtmsg *rtm;
1922 nlmsg_len = nlh->nlmsg_len;
1923 switch (nlh->nlmsg_type) {
1924 case RTM_NEWLINK:
1925 case RTM_DELLINK:
1926 case RTM_GETLINK:
1927 ifi = NLMSG_DATA(nlh);
1928 ifi->ifi_type = tswap16(ifi->ifi_type);
1929 ifi->ifi_index = tswap32(ifi->ifi_index);
1930 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1931 ifi->ifi_change = tswap32(ifi->ifi_change);
1932 host_to_target_link_rtattr(IFLA_RTA(ifi),
1933 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1934 break;
1935 case RTM_NEWADDR:
1936 case RTM_DELADDR:
1937 case RTM_GETADDR:
1938 ifa = NLMSG_DATA(nlh);
1939 ifa->ifa_index = tswap32(ifa->ifa_index);
1940 host_to_target_addr_rtattr(IFA_RTA(ifa),
1941 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1942 break;
1943 case RTM_NEWROUTE:
1944 case RTM_DELROUTE:
1945 case RTM_GETROUTE:
1946 rtm = NLMSG_DATA(nlh);
1947 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1948 host_to_target_route_rtattr(RTM_RTA(rtm),
1949 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1950 break;
1951 default:
1952 return -TARGET_EINVAL;
1954 return 0;
1957 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1958 size_t len)
1960 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1963 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1964 size_t len,
1965 abi_long (*target_to_host_rtattr)
1966 (struct rtattr *))
1968 abi_long ret;
1970 while (len >= sizeof(struct rtattr)) {
1971 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
1972 tswap16(rtattr->rta_len) > len) {
1973 break;
1975 rtattr->rta_len = tswap16(rtattr->rta_len);
1976 rtattr->rta_type = tswap16(rtattr->rta_type);
1977 ret = target_to_host_rtattr(rtattr);
1978 if (ret < 0) {
1979 return ret;
1981 len -= RTA_ALIGN(rtattr->rta_len);
1982 rtattr = (struct rtattr *)(((char *)rtattr) +
1983 RTA_ALIGN(rtattr->rta_len));
1985 return 0;
1988 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
1990 switch (rtattr->rta_type) {
1991 default:
1992 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
1993 break;
1995 return 0;
1998 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2000 switch (rtattr->rta_type) {
2001 /* binary: depends on family type */
2002 case IFA_LOCAL:
2003 case IFA_ADDRESS:
2004 break;
2005 default:
2006 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2007 break;
2009 return 0;
2012 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2014 uint32_t *u32;
2015 switch (rtattr->rta_type) {
2016 /* binary: depends on family type */
2017 case RTA_DST:
2018 case RTA_SRC:
2019 case RTA_GATEWAY:
2020 break;
2021 /* u32 */
2022 case RTA_OIF:
2023 u32 = RTA_DATA(rtattr);
2024 *u32 = tswap32(*u32);
2025 break;
2026 default:
2027 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2028 break;
2030 return 0;
2033 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2034 uint32_t rtattr_len)
2036 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2037 target_to_host_data_link_rtattr);
2040 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2041 uint32_t rtattr_len)
2043 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2044 target_to_host_data_addr_rtattr);
2047 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2048 uint32_t rtattr_len)
2050 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2051 target_to_host_data_route_rtattr);
2054 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2056 struct ifinfomsg *ifi;
2057 struct ifaddrmsg *ifa;
2058 struct rtmsg *rtm;
2060 switch (nlh->nlmsg_type) {
2061 case RTM_GETLINK:
2062 break;
2063 case RTM_NEWLINK:
2064 case RTM_DELLINK:
2065 ifi = NLMSG_DATA(nlh);
2066 ifi->ifi_type = tswap16(ifi->ifi_type);
2067 ifi->ifi_index = tswap32(ifi->ifi_index);
2068 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2069 ifi->ifi_change = tswap32(ifi->ifi_change);
2070 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2071 NLMSG_LENGTH(sizeof(*ifi)));
2072 break;
2073 case RTM_GETADDR:
2074 case RTM_NEWADDR:
2075 case RTM_DELADDR:
2076 ifa = NLMSG_DATA(nlh);
2077 ifa->ifa_index = tswap32(ifa->ifa_index);
2078 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2079 NLMSG_LENGTH(sizeof(*ifa)));
2080 break;
2081 case RTM_GETROUTE:
2082 break;
2083 case RTM_NEWROUTE:
2084 case RTM_DELROUTE:
2085 rtm = NLMSG_DATA(nlh);
2086 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2087 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2088 NLMSG_LENGTH(sizeof(*rtm)));
2089 break;
2090 default:
2091 return -TARGET_EOPNOTSUPP;
2093 return 0;
2096 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2098 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2100 #endif /* CONFIG_RTNETLINK */
2102 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2104 switch (nlh->nlmsg_type) {
2105 default:
2106 gemu_log("Unknown host audit message type %d\n",
2107 nlh->nlmsg_type);
2108 return -TARGET_EINVAL;
2110 return 0;
2113 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2114 size_t len)
2116 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2119 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2121 switch (nlh->nlmsg_type) {
2122 case AUDIT_USER:
2123 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2124 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2125 break;
2126 default:
2127 gemu_log("Unknown target audit message type %d\n",
2128 nlh->nlmsg_type);
2129 return -TARGET_EINVAL;
2132 return 0;
2135 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2137 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2140 /* do_setsockopt() Must return target values and target errnos. */
2141 static abi_long do_setsockopt(int sockfd, int level, int optname,
2142 abi_ulong optval_addr, socklen_t optlen)
2144 abi_long ret;
2145 int val;
2146 struct ip_mreqn *ip_mreq;
2147 struct ip_mreq_source *ip_mreq_source;
2149 switch(level) {
2150 case SOL_TCP:
2151 /* TCP options all take an 'int' value. */
2152 if (optlen < sizeof(uint32_t))
2153 return -TARGET_EINVAL;
2155 if (get_user_u32(val, optval_addr))
2156 return -TARGET_EFAULT;
2157 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2158 break;
2159 case SOL_IP:
2160 switch(optname) {
2161 case IP_TOS:
2162 case IP_TTL:
2163 case IP_HDRINCL:
2164 case IP_ROUTER_ALERT:
2165 case IP_RECVOPTS:
2166 case IP_RETOPTS:
2167 case IP_PKTINFO:
2168 case IP_MTU_DISCOVER:
2169 case IP_RECVERR:
2170 case IP_RECVTOS:
2171 #ifdef IP_FREEBIND
2172 case IP_FREEBIND:
2173 #endif
2174 case IP_MULTICAST_TTL:
2175 case IP_MULTICAST_LOOP:
2176 val = 0;
2177 if (optlen >= sizeof(uint32_t)) {
2178 if (get_user_u32(val, optval_addr))
2179 return -TARGET_EFAULT;
2180 } else if (optlen >= 1) {
2181 if (get_user_u8(val, optval_addr))
2182 return -TARGET_EFAULT;
2184 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2185 break;
2186 case IP_ADD_MEMBERSHIP:
2187 case IP_DROP_MEMBERSHIP:
2188 if (optlen < sizeof (struct target_ip_mreq) ||
2189 optlen > sizeof (struct target_ip_mreqn))
2190 return -TARGET_EINVAL;
2192 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2193 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2194 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2195 break;
2197 case IP_BLOCK_SOURCE:
2198 case IP_UNBLOCK_SOURCE:
2199 case IP_ADD_SOURCE_MEMBERSHIP:
2200 case IP_DROP_SOURCE_MEMBERSHIP:
2201 if (optlen != sizeof (struct target_ip_mreq_source))
2202 return -TARGET_EINVAL;
2204 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2205 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2206 unlock_user (ip_mreq_source, optval_addr, 0);
2207 break;
2209 default:
2210 goto unimplemented;
2212 break;
2213 case SOL_IPV6:
2214 switch (optname) {
2215 case IPV6_MTU_DISCOVER:
2216 case IPV6_MTU:
2217 case IPV6_V6ONLY:
2218 case IPV6_RECVPKTINFO:
2219 val = 0;
2220 if (optlen < sizeof(uint32_t)) {
2221 return -TARGET_EINVAL;
2223 if (get_user_u32(val, optval_addr)) {
2224 return -TARGET_EFAULT;
2226 ret = get_errno(setsockopt(sockfd, level, optname,
2227 &val, sizeof(val)));
2228 break;
2229 default:
2230 goto unimplemented;
2232 break;
2233 case SOL_RAW:
2234 switch (optname) {
2235 case ICMP_FILTER:
2236 /* struct icmp_filter takes an u32 value */
2237 if (optlen < sizeof(uint32_t)) {
2238 return -TARGET_EINVAL;
2241 if (get_user_u32(val, optval_addr)) {
2242 return -TARGET_EFAULT;
2244 ret = get_errno(setsockopt(sockfd, level, optname,
2245 &val, sizeof(val)));
2246 break;
2248 default:
2249 goto unimplemented;
2251 break;
2252 case TARGET_SOL_SOCKET:
2253 switch (optname) {
2254 case TARGET_SO_RCVTIMEO:
2256 struct timeval tv;
2258 optname = SO_RCVTIMEO;
2260 set_timeout:
2261 if (optlen != sizeof(struct target_timeval)) {
2262 return -TARGET_EINVAL;
2265 if (copy_from_user_timeval(&tv, optval_addr)) {
2266 return -TARGET_EFAULT;
2269 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2270 &tv, sizeof(tv)));
2271 return ret;
2273 case TARGET_SO_SNDTIMEO:
2274 optname = SO_SNDTIMEO;
2275 goto set_timeout;
2276 case TARGET_SO_ATTACH_FILTER:
2278 struct target_sock_fprog *tfprog;
2279 struct target_sock_filter *tfilter;
2280 struct sock_fprog fprog;
2281 struct sock_filter *filter;
2282 int i;
2284 if (optlen != sizeof(*tfprog)) {
2285 return -TARGET_EINVAL;
2287 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2288 return -TARGET_EFAULT;
2290 if (!lock_user_struct(VERIFY_READ, tfilter,
2291 tswapal(tfprog->filter), 0)) {
2292 unlock_user_struct(tfprog, optval_addr, 1);
2293 return -TARGET_EFAULT;
2296 fprog.len = tswap16(tfprog->len);
2297 filter = g_try_new(struct sock_filter, fprog.len);
2298 if (filter == NULL) {
2299 unlock_user_struct(tfilter, tfprog->filter, 1);
2300 unlock_user_struct(tfprog, optval_addr, 1);
2301 return -TARGET_ENOMEM;
2303 for (i = 0; i < fprog.len; i++) {
2304 filter[i].code = tswap16(tfilter[i].code);
2305 filter[i].jt = tfilter[i].jt;
2306 filter[i].jf = tfilter[i].jf;
2307 filter[i].k = tswap32(tfilter[i].k);
2309 fprog.filter = filter;
2311 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2312 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2313 g_free(filter);
2315 unlock_user_struct(tfilter, tfprog->filter, 1);
2316 unlock_user_struct(tfprog, optval_addr, 1);
2317 return ret;
2319 case TARGET_SO_BINDTODEVICE:
2321 char *dev_ifname, *addr_ifname;
2323 if (optlen > IFNAMSIZ - 1) {
2324 optlen = IFNAMSIZ - 1;
2326 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2327 if (!dev_ifname) {
2328 return -TARGET_EFAULT;
2330 optname = SO_BINDTODEVICE;
2331 addr_ifname = alloca(IFNAMSIZ);
2332 memcpy(addr_ifname, dev_ifname, optlen);
2333 addr_ifname[optlen] = 0;
2334 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2335 addr_ifname, optlen));
2336 unlock_user (dev_ifname, optval_addr, 0);
2337 return ret;
2339 /* Options with 'int' argument. */
2340 case TARGET_SO_DEBUG:
2341 optname = SO_DEBUG;
2342 break;
2343 case TARGET_SO_REUSEADDR:
2344 optname = SO_REUSEADDR;
2345 break;
2346 case TARGET_SO_TYPE:
2347 optname = SO_TYPE;
2348 break;
2349 case TARGET_SO_ERROR:
2350 optname = SO_ERROR;
2351 break;
2352 case TARGET_SO_DONTROUTE:
2353 optname = SO_DONTROUTE;
2354 break;
2355 case TARGET_SO_BROADCAST:
2356 optname = SO_BROADCAST;
2357 break;
2358 case TARGET_SO_SNDBUF:
2359 optname = SO_SNDBUF;
2360 break;
2361 case TARGET_SO_SNDBUFFORCE:
2362 optname = SO_SNDBUFFORCE;
2363 break;
2364 case TARGET_SO_RCVBUF:
2365 optname = SO_RCVBUF;
2366 break;
2367 case TARGET_SO_RCVBUFFORCE:
2368 optname = SO_RCVBUFFORCE;
2369 break;
2370 case TARGET_SO_KEEPALIVE:
2371 optname = SO_KEEPALIVE;
2372 break;
2373 case TARGET_SO_OOBINLINE:
2374 optname = SO_OOBINLINE;
2375 break;
2376 case TARGET_SO_NO_CHECK:
2377 optname = SO_NO_CHECK;
2378 break;
2379 case TARGET_SO_PRIORITY:
2380 optname = SO_PRIORITY;
2381 break;
2382 #ifdef SO_BSDCOMPAT
2383 case TARGET_SO_BSDCOMPAT:
2384 optname = SO_BSDCOMPAT;
2385 break;
2386 #endif
2387 case TARGET_SO_PASSCRED:
2388 optname = SO_PASSCRED;
2389 break;
2390 case TARGET_SO_PASSSEC:
2391 optname = SO_PASSSEC;
2392 break;
2393 case TARGET_SO_TIMESTAMP:
2394 optname = SO_TIMESTAMP;
2395 break;
2396 case TARGET_SO_RCVLOWAT:
2397 optname = SO_RCVLOWAT;
2398 break;
2399 break;
2400 default:
2401 goto unimplemented;
2403 if (optlen < sizeof(uint32_t))
2404 return -TARGET_EINVAL;
2406 if (get_user_u32(val, optval_addr))
2407 return -TARGET_EFAULT;
2408 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2409 break;
2410 default:
2411 unimplemented:
2412 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2413 ret = -TARGET_ENOPROTOOPT;
2415 return ret;
2418 /* do_getsockopt() Must return target values and target errnos. */
2419 static abi_long do_getsockopt(int sockfd, int level, int optname,
2420 abi_ulong optval_addr, abi_ulong optlen)
2422 abi_long ret;
2423 int len, val;
2424 socklen_t lv;
2426 switch(level) {
2427 case TARGET_SOL_SOCKET:
2428 level = SOL_SOCKET;
2429 switch (optname) {
2430 /* These don't just return a single integer */
2431 case TARGET_SO_LINGER:
2432 case TARGET_SO_RCVTIMEO:
2433 case TARGET_SO_SNDTIMEO:
2434 case TARGET_SO_PEERNAME:
2435 goto unimplemented;
2436 case TARGET_SO_PEERCRED: {
2437 struct ucred cr;
2438 socklen_t crlen;
2439 struct target_ucred *tcr;
2441 if (get_user_u32(len, optlen)) {
2442 return -TARGET_EFAULT;
2444 if (len < 0) {
2445 return -TARGET_EINVAL;
2448 crlen = sizeof(cr);
2449 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2450 &cr, &crlen));
2451 if (ret < 0) {
2452 return ret;
2454 if (len > crlen) {
2455 len = crlen;
2457 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2458 return -TARGET_EFAULT;
2460 __put_user(cr.pid, &tcr->pid);
2461 __put_user(cr.uid, &tcr->uid);
2462 __put_user(cr.gid, &tcr->gid);
2463 unlock_user_struct(tcr, optval_addr, 1);
2464 if (put_user_u32(len, optlen)) {
2465 return -TARGET_EFAULT;
2467 break;
2469 /* Options with 'int' argument. */
2470 case TARGET_SO_DEBUG:
2471 optname = SO_DEBUG;
2472 goto int_case;
2473 case TARGET_SO_REUSEADDR:
2474 optname = SO_REUSEADDR;
2475 goto int_case;
2476 case TARGET_SO_TYPE:
2477 optname = SO_TYPE;
2478 goto int_case;
2479 case TARGET_SO_ERROR:
2480 optname = SO_ERROR;
2481 goto int_case;
2482 case TARGET_SO_DONTROUTE:
2483 optname = SO_DONTROUTE;
2484 goto int_case;
2485 case TARGET_SO_BROADCAST:
2486 optname = SO_BROADCAST;
2487 goto int_case;
2488 case TARGET_SO_SNDBUF:
2489 optname = SO_SNDBUF;
2490 goto int_case;
2491 case TARGET_SO_RCVBUF:
2492 optname = SO_RCVBUF;
2493 goto int_case;
2494 case TARGET_SO_KEEPALIVE:
2495 optname = SO_KEEPALIVE;
2496 goto int_case;
2497 case TARGET_SO_OOBINLINE:
2498 optname = SO_OOBINLINE;
2499 goto int_case;
2500 case TARGET_SO_NO_CHECK:
2501 optname = SO_NO_CHECK;
2502 goto int_case;
2503 case TARGET_SO_PRIORITY:
2504 optname = SO_PRIORITY;
2505 goto int_case;
2506 #ifdef SO_BSDCOMPAT
2507 case TARGET_SO_BSDCOMPAT:
2508 optname = SO_BSDCOMPAT;
2509 goto int_case;
2510 #endif
2511 case TARGET_SO_PASSCRED:
2512 optname = SO_PASSCRED;
2513 goto int_case;
2514 case TARGET_SO_TIMESTAMP:
2515 optname = SO_TIMESTAMP;
2516 goto int_case;
2517 case TARGET_SO_RCVLOWAT:
2518 optname = SO_RCVLOWAT;
2519 goto int_case;
2520 case TARGET_SO_ACCEPTCONN:
2521 optname = SO_ACCEPTCONN;
2522 goto int_case;
2523 default:
2524 goto int_case;
2526 break;
2527 case SOL_TCP:
2528 /* TCP options all take an 'int' value. */
2529 int_case:
2530 if (get_user_u32(len, optlen))
2531 return -TARGET_EFAULT;
2532 if (len < 0)
2533 return -TARGET_EINVAL;
2534 lv = sizeof(lv);
2535 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2536 if (ret < 0)
2537 return ret;
2538 if (optname == SO_TYPE) {
2539 val = host_to_target_sock_type(val);
2541 if (len > lv)
2542 len = lv;
2543 if (len == 4) {
2544 if (put_user_u32(val, optval_addr))
2545 return -TARGET_EFAULT;
2546 } else {
2547 if (put_user_u8(val, optval_addr))
2548 return -TARGET_EFAULT;
2550 if (put_user_u32(len, optlen))
2551 return -TARGET_EFAULT;
2552 break;
2553 case SOL_IP:
2554 switch(optname) {
2555 case IP_TOS:
2556 case IP_TTL:
2557 case IP_HDRINCL:
2558 case IP_ROUTER_ALERT:
2559 case IP_RECVOPTS:
2560 case IP_RETOPTS:
2561 case IP_PKTINFO:
2562 case IP_MTU_DISCOVER:
2563 case IP_RECVERR:
2564 case IP_RECVTOS:
2565 #ifdef IP_FREEBIND
2566 case IP_FREEBIND:
2567 #endif
2568 case IP_MULTICAST_TTL:
2569 case IP_MULTICAST_LOOP:
2570 if (get_user_u32(len, optlen))
2571 return -TARGET_EFAULT;
2572 if (len < 0)
2573 return -TARGET_EINVAL;
2574 lv = sizeof(lv);
2575 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2576 if (ret < 0)
2577 return ret;
2578 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2579 len = 1;
2580 if (put_user_u32(len, optlen)
2581 || put_user_u8(val, optval_addr))
2582 return -TARGET_EFAULT;
2583 } else {
2584 if (len > sizeof(int))
2585 len = sizeof(int);
2586 if (put_user_u32(len, optlen)
2587 || put_user_u32(val, optval_addr))
2588 return -TARGET_EFAULT;
2590 break;
2591 default:
2592 ret = -TARGET_ENOPROTOOPT;
2593 break;
2595 break;
2596 default:
2597 unimplemented:
2598 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2599 level, optname);
2600 ret = -TARGET_EOPNOTSUPP;
2601 break;
2603 return ret;
2606 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2607 int count, int copy)
2609 struct target_iovec *target_vec;
2610 struct iovec *vec;
2611 abi_ulong total_len, max_len;
2612 int i;
2613 int err = 0;
2614 bool bad_address = false;
2616 if (count == 0) {
2617 errno = 0;
2618 return NULL;
2620 if (count < 0 || count > IOV_MAX) {
2621 errno = EINVAL;
2622 return NULL;
2625 vec = g_try_new0(struct iovec, count);
2626 if (vec == NULL) {
2627 errno = ENOMEM;
2628 return NULL;
2631 target_vec = lock_user(VERIFY_READ, target_addr,
2632 count * sizeof(struct target_iovec), 1);
2633 if (target_vec == NULL) {
2634 err = EFAULT;
2635 goto fail2;
2638 /* ??? If host page size > target page size, this will result in a
2639 value larger than what we can actually support. */
2640 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2641 total_len = 0;
2643 for (i = 0; i < count; i++) {
2644 abi_ulong base = tswapal(target_vec[i].iov_base);
2645 abi_long len = tswapal(target_vec[i].iov_len);
2647 if (len < 0) {
2648 err = EINVAL;
2649 goto fail;
2650 } else if (len == 0) {
2651 /* Zero length pointer is ignored. */
2652 vec[i].iov_base = 0;
2653 } else {
2654 vec[i].iov_base = lock_user(type, base, len, copy);
2655 /* If the first buffer pointer is bad, this is a fault. But
2656 * subsequent bad buffers will result in a partial write; this
2657 * is realized by filling the vector with null pointers and
2658 * zero lengths. */
2659 if (!vec[i].iov_base) {
2660 if (i == 0) {
2661 err = EFAULT;
2662 goto fail;
2663 } else {
2664 bad_address = true;
2667 if (bad_address) {
2668 len = 0;
2670 if (len > max_len - total_len) {
2671 len = max_len - total_len;
2674 vec[i].iov_len = len;
2675 total_len += len;
2678 unlock_user(target_vec, target_addr, 0);
2679 return vec;
2681 fail:
2682 while (--i >= 0) {
2683 if (tswapal(target_vec[i].iov_len) > 0) {
2684 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2687 unlock_user(target_vec, target_addr, 0);
2688 fail2:
2689 g_free(vec);
2690 errno = err;
2691 return NULL;
2694 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2695 int count, int copy)
2697 struct target_iovec *target_vec;
2698 int i;
2700 target_vec = lock_user(VERIFY_READ, target_addr,
2701 count * sizeof(struct target_iovec), 1);
2702 if (target_vec) {
2703 for (i = 0; i < count; i++) {
2704 abi_ulong base = tswapal(target_vec[i].iov_base);
2705 abi_long len = tswapal(target_vec[i].iov_len);
2706 if (len < 0) {
2707 break;
2709 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2711 unlock_user(target_vec, target_addr, 0);
2714 g_free(vec);
2717 static inline int target_to_host_sock_type(int *type)
2719 int host_type = 0;
2720 int target_type = *type;
2722 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2723 case TARGET_SOCK_DGRAM:
2724 host_type = SOCK_DGRAM;
2725 break;
2726 case TARGET_SOCK_STREAM:
2727 host_type = SOCK_STREAM;
2728 break;
2729 default:
2730 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2731 break;
2733 if (target_type & TARGET_SOCK_CLOEXEC) {
2734 #if defined(SOCK_CLOEXEC)
2735 host_type |= SOCK_CLOEXEC;
2736 #else
2737 return -TARGET_EINVAL;
2738 #endif
2740 if (target_type & TARGET_SOCK_NONBLOCK) {
2741 #if defined(SOCK_NONBLOCK)
2742 host_type |= SOCK_NONBLOCK;
2743 #elif !defined(O_NONBLOCK)
2744 return -TARGET_EINVAL;
2745 #endif
2747 *type = host_type;
2748 return 0;
2751 /* Try to emulate socket type flags after socket creation. */
2752 static int sock_flags_fixup(int fd, int target_type)
2754 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2755 if (target_type & TARGET_SOCK_NONBLOCK) {
2756 int flags = fcntl(fd, F_GETFL);
2757 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2758 close(fd);
2759 return -TARGET_EINVAL;
2762 #endif
2763 return fd;
2766 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2767 abi_ulong target_addr,
2768 socklen_t len)
2770 struct sockaddr *addr = host_addr;
2771 struct target_sockaddr *target_saddr;
2773 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2774 if (!target_saddr) {
2775 return -TARGET_EFAULT;
2778 memcpy(addr, target_saddr, len);
2779 addr->sa_family = tswap16(target_saddr->sa_family);
2780 /* spkt_protocol is big-endian */
2782 unlock_user(target_saddr, target_addr, 0);
2783 return 0;
2786 static TargetFdTrans target_packet_trans = {
2787 .target_to_host_addr = packet_target_to_host_sockaddr,
2790 #ifdef CONFIG_RTNETLINK
2791 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2793 return target_to_host_nlmsg_route(buf, len);
2796 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2798 return host_to_target_nlmsg_route(buf, len);
2801 static TargetFdTrans target_netlink_route_trans = {
2802 .target_to_host_data = netlink_route_target_to_host,
2803 .host_to_target_data = netlink_route_host_to_target,
2805 #endif /* CONFIG_RTNETLINK */
2807 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2809 return target_to_host_nlmsg_audit(buf, len);
2812 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2814 return host_to_target_nlmsg_audit(buf, len);
2817 static TargetFdTrans target_netlink_audit_trans = {
2818 .target_to_host_data = netlink_audit_target_to_host,
2819 .host_to_target_data = netlink_audit_host_to_target,
2822 /* do_socket() Must return target values and target errnos. */
2823 static abi_long do_socket(int domain, int type, int protocol)
2825 int target_type = type;
2826 int ret;
2828 ret = target_to_host_sock_type(&type);
2829 if (ret) {
2830 return ret;
2833 if (domain == PF_NETLINK && !(
2834 #ifdef CONFIG_RTNETLINK
2835 protocol == NETLINK_ROUTE ||
2836 #endif
2837 protocol == NETLINK_KOBJECT_UEVENT ||
2838 protocol == NETLINK_AUDIT)) {
2839 return -EPFNOSUPPORT;
2842 if (domain == AF_PACKET ||
2843 (domain == AF_INET && type == SOCK_PACKET)) {
2844 protocol = tswap16(protocol);
2847 ret = get_errno(socket(domain, type, protocol));
2848 if (ret >= 0) {
2849 ret = sock_flags_fixup(ret, target_type);
2850 if (type == SOCK_PACKET) {
2851 /* Manage an obsolete case :
2852 * if socket type is SOCK_PACKET, bind by name
2854 fd_trans_register(ret, &target_packet_trans);
2855 } else if (domain == PF_NETLINK) {
2856 switch (protocol) {
2857 #ifdef CONFIG_RTNETLINK
2858 case NETLINK_ROUTE:
2859 fd_trans_register(ret, &target_netlink_route_trans);
2860 break;
2861 #endif
2862 case NETLINK_KOBJECT_UEVENT:
2863 /* nothing to do: messages are strings */
2864 break;
2865 case NETLINK_AUDIT:
2866 fd_trans_register(ret, &target_netlink_audit_trans);
2867 break;
2868 default:
2869 g_assert_not_reached();
2873 return ret;
2876 /* do_bind() Must return target values and target errnos. */
2877 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2878 socklen_t addrlen)
2880 void *addr;
2881 abi_long ret;
2883 if ((int)addrlen < 0) {
2884 return -TARGET_EINVAL;
2887 addr = alloca(addrlen+1);
2889 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2890 if (ret)
2891 return ret;
2893 return get_errno(bind(sockfd, addr, addrlen));
2896 /* do_connect() Must return target values and target errnos. */
2897 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2898 socklen_t addrlen)
2900 void *addr;
2901 abi_long ret;
2903 if ((int)addrlen < 0) {
2904 return -TARGET_EINVAL;
2907 addr = alloca(addrlen+1);
2909 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2910 if (ret)
2911 return ret;
2913 return get_errno(safe_connect(sockfd, addr, addrlen));
2916 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2917 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2918 int flags, int send)
2920 abi_long ret, len;
2921 struct msghdr msg;
2922 int count;
2923 struct iovec *vec;
2924 abi_ulong target_vec;
2926 if (msgp->msg_name) {
2927 msg.msg_namelen = tswap32(msgp->msg_namelen);
2928 msg.msg_name = alloca(msg.msg_namelen+1);
2929 ret = target_to_host_sockaddr(fd, msg.msg_name,
2930 tswapal(msgp->msg_name),
2931 msg.msg_namelen);
2932 if (ret) {
2933 goto out2;
2935 } else {
2936 msg.msg_name = NULL;
2937 msg.msg_namelen = 0;
2939 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2940 msg.msg_control = alloca(msg.msg_controllen);
2941 msg.msg_flags = tswap32(msgp->msg_flags);
2943 count = tswapal(msgp->msg_iovlen);
2944 target_vec = tswapal(msgp->msg_iov);
2945 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2946 target_vec, count, send);
2947 if (vec == NULL) {
2948 ret = -host_to_target_errno(errno);
2949 goto out2;
2951 msg.msg_iovlen = count;
2952 msg.msg_iov = vec;
2954 if (send) {
2955 if (fd_trans_target_to_host_data(fd)) {
2956 ret = fd_trans_target_to_host_data(fd)(msg.msg_iov->iov_base,
2957 msg.msg_iov->iov_len);
2958 } else {
2959 ret = target_to_host_cmsg(&msg, msgp);
2961 if (ret == 0) {
2962 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2964 } else {
2965 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2966 if (!is_error(ret)) {
2967 len = ret;
2968 if (fd_trans_host_to_target_data(fd)) {
2969 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2970 msg.msg_iov->iov_len);
2971 } else {
2972 ret = host_to_target_cmsg(msgp, &msg);
2974 if (!is_error(ret)) {
2975 msgp->msg_namelen = tswap32(msg.msg_namelen);
2976 if (msg.msg_name != NULL) {
2977 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2978 msg.msg_name, msg.msg_namelen);
2979 if (ret) {
2980 goto out;
2984 ret = len;
2989 out:
2990 unlock_iovec(vec, target_vec, count, !send);
2991 out2:
2992 return ret;
2995 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2996 int flags, int send)
2998 abi_long ret;
2999 struct target_msghdr *msgp;
3001 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3002 msgp,
3003 target_msg,
3004 send ? 1 : 0)) {
3005 return -TARGET_EFAULT;
3007 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3008 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3009 return ret;
3012 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3013 * so it might not have this *mmsg-specific flag either.
3015 #ifndef MSG_WAITFORONE
3016 #define MSG_WAITFORONE 0x10000
3017 #endif
3019 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3020 unsigned int vlen, unsigned int flags,
3021 int send)
3023 struct target_mmsghdr *mmsgp;
3024 abi_long ret = 0;
3025 int i;
3027 if (vlen > UIO_MAXIOV) {
3028 vlen = UIO_MAXIOV;
3031 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3032 if (!mmsgp) {
3033 return -TARGET_EFAULT;
3036 for (i = 0; i < vlen; i++) {
3037 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3038 if (is_error(ret)) {
3039 break;
3041 mmsgp[i].msg_len = tswap32(ret);
3042 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3043 if (flags & MSG_WAITFORONE) {
3044 flags |= MSG_DONTWAIT;
3048 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3050 /* Return number of datagrams sent if we sent any at all;
3051 * otherwise return the error.
3053 if (i) {
3054 return i;
3056 return ret;
3059 /* If we don't have a system accept4() then just call accept.
3060 * The callsites to do_accept4() will ensure that they don't
3061 * pass a non-zero flags argument in this config.
3063 #ifndef CONFIG_ACCEPT4
3064 static inline int accept4(int sockfd, struct sockaddr *addr,
3065 socklen_t *addrlen, int flags)
3067 assert(flags == 0);
3068 return accept(sockfd, addr, addrlen);
3070 #endif
3072 /* do_accept4() Must return target values and target errnos. */
3073 static abi_long do_accept4(int fd, abi_ulong target_addr,
3074 abi_ulong target_addrlen_addr, int flags)
3076 socklen_t addrlen;
3077 void *addr;
3078 abi_long ret;
3079 int host_flags;
3081 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3083 if (target_addr == 0) {
3084 return get_errno(accept4(fd, NULL, NULL, host_flags));
3087 /* linux returns EINVAL if addrlen pointer is invalid */
3088 if (get_user_u32(addrlen, target_addrlen_addr))
3089 return -TARGET_EINVAL;
3091 if ((int)addrlen < 0) {
3092 return -TARGET_EINVAL;
3095 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3096 return -TARGET_EINVAL;
3098 addr = alloca(addrlen);
3100 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
3101 if (!is_error(ret)) {
3102 host_to_target_sockaddr(target_addr, addr, addrlen);
3103 if (put_user_u32(addrlen, target_addrlen_addr))
3104 ret = -TARGET_EFAULT;
3106 return ret;
3109 /* do_getpeername() Must return target values and target errnos. */
3110 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3111 abi_ulong target_addrlen_addr)
3113 socklen_t addrlen;
3114 void *addr;
3115 abi_long ret;
3117 if (get_user_u32(addrlen, target_addrlen_addr))
3118 return -TARGET_EFAULT;
3120 if ((int)addrlen < 0) {
3121 return -TARGET_EINVAL;
3124 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3125 return -TARGET_EFAULT;
3127 addr = alloca(addrlen);
3129 ret = get_errno(getpeername(fd, addr, &addrlen));
3130 if (!is_error(ret)) {
3131 host_to_target_sockaddr(target_addr, addr, addrlen);
3132 if (put_user_u32(addrlen, target_addrlen_addr))
3133 ret = -TARGET_EFAULT;
3135 return ret;
3138 /* do_getsockname() Must return target values and target errnos. */
3139 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3140 abi_ulong target_addrlen_addr)
3142 socklen_t addrlen;
3143 void *addr;
3144 abi_long ret;
3146 if (get_user_u32(addrlen, target_addrlen_addr))
3147 return -TARGET_EFAULT;
3149 if ((int)addrlen < 0) {
3150 return -TARGET_EINVAL;
3153 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3154 return -TARGET_EFAULT;
3156 addr = alloca(addrlen);
3158 ret = get_errno(getsockname(fd, addr, &addrlen));
3159 if (!is_error(ret)) {
3160 host_to_target_sockaddr(target_addr, addr, addrlen);
3161 if (put_user_u32(addrlen, target_addrlen_addr))
3162 ret = -TARGET_EFAULT;
3164 return ret;
3167 /* do_socketpair() Must return target values and target errnos. */
3168 static abi_long do_socketpair(int domain, int type, int protocol,
3169 abi_ulong target_tab_addr)
3171 int tab[2];
3172 abi_long ret;
3174 target_to_host_sock_type(&type);
3176 ret = get_errno(socketpair(domain, type, protocol, tab));
3177 if (!is_error(ret)) {
3178 if (put_user_s32(tab[0], target_tab_addr)
3179 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3180 ret = -TARGET_EFAULT;
3182 return ret;
3185 /* do_sendto() Must return target values and target errnos. */
3186 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3187 abi_ulong target_addr, socklen_t addrlen)
3189 void *addr;
3190 void *host_msg;
3191 abi_long ret;
3193 if ((int)addrlen < 0) {
3194 return -TARGET_EINVAL;
3197 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3198 if (!host_msg)
3199 return -TARGET_EFAULT;
3200 if (fd_trans_target_to_host_data(fd)) {
3201 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3202 if (ret < 0) {
3203 unlock_user(host_msg, msg, 0);
3204 return ret;
3207 if (target_addr) {
3208 addr = alloca(addrlen+1);
3209 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3210 if (ret) {
3211 unlock_user(host_msg, msg, 0);
3212 return ret;
3214 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3215 } else {
3216 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3218 unlock_user(host_msg, msg, 0);
3219 return ret;
3222 /* do_recvfrom() Must return target values and target errnos. */
3223 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3224 abi_ulong target_addr,
3225 abi_ulong target_addrlen)
3227 socklen_t addrlen;
3228 void *addr;
3229 void *host_msg;
3230 abi_long ret;
3232 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3233 if (!host_msg)
3234 return -TARGET_EFAULT;
3235 if (target_addr) {
3236 if (get_user_u32(addrlen, target_addrlen)) {
3237 ret = -TARGET_EFAULT;
3238 goto fail;
3240 if ((int)addrlen < 0) {
3241 ret = -TARGET_EINVAL;
3242 goto fail;
3244 addr = alloca(addrlen);
3245 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3246 addr, &addrlen));
3247 } else {
3248 addr = NULL; /* To keep compiler quiet. */
3249 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3251 if (!is_error(ret)) {
3252 if (target_addr) {
3253 host_to_target_sockaddr(target_addr, addr, addrlen);
3254 if (put_user_u32(addrlen, target_addrlen)) {
3255 ret = -TARGET_EFAULT;
3256 goto fail;
3259 unlock_user(host_msg, msg, len);
3260 } else {
3261 fail:
3262 unlock_user(host_msg, msg, 0);
3264 return ret;
3267 #ifdef TARGET_NR_socketcall
3268 /* do_socketcall() Must return target values and target errnos. */
3269 static abi_long do_socketcall(int num, abi_ulong vptr)
3271 static const unsigned ac[] = { /* number of arguments per call */
3272 [SOCKOP_socket] = 3, /* domain, type, protocol */
3273 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3274 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3275 [SOCKOP_listen] = 2, /* sockfd, backlog */
3276 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3277 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3278 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3279 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3280 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3281 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3282 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3283 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3284 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3285 [SOCKOP_shutdown] = 2, /* sockfd, how */
3286 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3287 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3288 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3289 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3290 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3291 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3293 abi_long a[6]; /* max 6 args */
3295 /* first, collect the arguments in a[] according to ac[] */
3296 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3297 unsigned i;
3298 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3299 for (i = 0; i < ac[num]; ++i) {
3300 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3301 return -TARGET_EFAULT;
3306 /* now when we have the args, actually handle the call */
3307 switch (num) {
3308 case SOCKOP_socket: /* domain, type, protocol */
3309 return do_socket(a[0], a[1], a[2]);
3310 case SOCKOP_bind: /* sockfd, addr, addrlen */
3311 return do_bind(a[0], a[1], a[2]);
3312 case SOCKOP_connect: /* sockfd, addr, addrlen */
3313 return do_connect(a[0], a[1], a[2]);
3314 case SOCKOP_listen: /* sockfd, backlog */
3315 return get_errno(listen(a[0], a[1]));
3316 case SOCKOP_accept: /* sockfd, addr, addrlen */
3317 return do_accept4(a[0], a[1], a[2], 0);
3318 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3319 return do_accept4(a[0], a[1], a[2], a[3]);
3320 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3321 return do_getsockname(a[0], a[1], a[2]);
3322 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3323 return do_getpeername(a[0], a[1], a[2]);
3324 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3325 return do_socketpair(a[0], a[1], a[2], a[3]);
3326 case SOCKOP_send: /* sockfd, msg, len, flags */
3327 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3328 case SOCKOP_recv: /* sockfd, msg, len, flags */
3329 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3330 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3331 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3332 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3333 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3334 case SOCKOP_shutdown: /* sockfd, how */
3335 return get_errno(shutdown(a[0], a[1]));
3336 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3337 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3338 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3339 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3340 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3341 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3342 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3343 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3344 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3345 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3346 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3347 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3348 default:
3349 gemu_log("Unsupported socketcall: %d\n", num);
3350 return -TARGET_ENOSYS;
3353 #endif
3355 #define N_SHM_REGIONS 32
3357 static struct shm_region {
3358 abi_ulong start;
3359 abi_ulong size;
3360 bool in_use;
3361 } shm_regions[N_SHM_REGIONS];
3363 struct target_semid_ds
3365 struct target_ipc_perm sem_perm;
3366 abi_ulong sem_otime;
3367 #if !defined(TARGET_PPC64)
3368 abi_ulong __unused1;
3369 #endif
3370 abi_ulong sem_ctime;
3371 #if !defined(TARGET_PPC64)
3372 abi_ulong __unused2;
3373 #endif
3374 abi_ulong sem_nsems;
3375 abi_ulong __unused3;
3376 abi_ulong __unused4;
3379 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3380 abi_ulong target_addr)
3382 struct target_ipc_perm *target_ip;
3383 struct target_semid_ds *target_sd;
3385 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3386 return -TARGET_EFAULT;
3387 target_ip = &(target_sd->sem_perm);
3388 host_ip->__key = tswap32(target_ip->__key);
3389 host_ip->uid = tswap32(target_ip->uid);
3390 host_ip->gid = tswap32(target_ip->gid);
3391 host_ip->cuid = tswap32(target_ip->cuid);
3392 host_ip->cgid = tswap32(target_ip->cgid);
3393 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3394 host_ip->mode = tswap32(target_ip->mode);
3395 #else
3396 host_ip->mode = tswap16(target_ip->mode);
3397 #endif
3398 #if defined(TARGET_PPC)
3399 host_ip->__seq = tswap32(target_ip->__seq);
3400 #else
3401 host_ip->__seq = tswap16(target_ip->__seq);
3402 #endif
3403 unlock_user_struct(target_sd, target_addr, 0);
3404 return 0;
3407 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3408 struct ipc_perm *host_ip)
3410 struct target_ipc_perm *target_ip;
3411 struct target_semid_ds *target_sd;
3413 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3414 return -TARGET_EFAULT;
3415 target_ip = &(target_sd->sem_perm);
3416 target_ip->__key = tswap32(host_ip->__key);
3417 target_ip->uid = tswap32(host_ip->uid);
3418 target_ip->gid = tswap32(host_ip->gid);
3419 target_ip->cuid = tswap32(host_ip->cuid);
3420 target_ip->cgid = tswap32(host_ip->cgid);
3421 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3422 target_ip->mode = tswap32(host_ip->mode);
3423 #else
3424 target_ip->mode = tswap16(host_ip->mode);
3425 #endif
3426 #if defined(TARGET_PPC)
3427 target_ip->__seq = tswap32(host_ip->__seq);
3428 #else
3429 target_ip->__seq = tswap16(host_ip->__seq);
3430 #endif
3431 unlock_user_struct(target_sd, target_addr, 1);
3432 return 0;
3435 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3436 abi_ulong target_addr)
3438 struct target_semid_ds *target_sd;
3440 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3441 return -TARGET_EFAULT;
3442 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3443 return -TARGET_EFAULT;
3444 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3445 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3446 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3447 unlock_user_struct(target_sd, target_addr, 0);
3448 return 0;
3451 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3452 struct semid_ds *host_sd)
3454 struct target_semid_ds *target_sd;
3456 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3457 return -TARGET_EFAULT;
3458 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3459 return -TARGET_EFAULT;
3460 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3461 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3462 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3463 unlock_user_struct(target_sd, target_addr, 1);
3464 return 0;
3467 struct target_seminfo {
3468 int semmap;
3469 int semmni;
3470 int semmns;
3471 int semmnu;
3472 int semmsl;
3473 int semopm;
3474 int semume;
3475 int semusz;
3476 int semvmx;
3477 int semaem;
3480 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3481 struct seminfo *host_seminfo)
3483 struct target_seminfo *target_seminfo;
3484 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3485 return -TARGET_EFAULT;
3486 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3487 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3488 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3489 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3490 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3491 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3492 __put_user(host_seminfo->semume, &target_seminfo->semume);
3493 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3494 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3495 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3496 unlock_user_struct(target_seminfo, target_addr, 1);
3497 return 0;
3500 union semun {
3501 int val;
3502 struct semid_ds *buf;
3503 unsigned short *array;
3504 struct seminfo *__buf;
3507 union target_semun {
3508 int val;
3509 abi_ulong buf;
3510 abi_ulong array;
3511 abi_ulong __buf;
3514 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3515 abi_ulong target_addr)
3517 int nsems;
3518 unsigned short *array;
3519 union semun semun;
3520 struct semid_ds semid_ds;
3521 int i, ret;
3523 semun.buf = &semid_ds;
3525 ret = semctl(semid, 0, IPC_STAT, semun);
3526 if (ret == -1)
3527 return get_errno(ret);
3529 nsems = semid_ds.sem_nsems;
3531 *host_array = g_try_new(unsigned short, nsems);
3532 if (!*host_array) {
3533 return -TARGET_ENOMEM;
3535 array = lock_user(VERIFY_READ, target_addr,
3536 nsems*sizeof(unsigned short), 1);
3537 if (!array) {
3538 g_free(*host_array);
3539 return -TARGET_EFAULT;
3542 for(i=0; i<nsems; i++) {
3543 __get_user((*host_array)[i], &array[i]);
3545 unlock_user(array, target_addr, 0);
3547 return 0;
3550 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3551 unsigned short **host_array)
3553 int nsems;
3554 unsigned short *array;
3555 union semun semun;
3556 struct semid_ds semid_ds;
3557 int i, ret;
3559 semun.buf = &semid_ds;
3561 ret = semctl(semid, 0, IPC_STAT, semun);
3562 if (ret == -1)
3563 return get_errno(ret);
3565 nsems = semid_ds.sem_nsems;
3567 array = lock_user(VERIFY_WRITE, target_addr,
3568 nsems*sizeof(unsigned short), 0);
3569 if (!array)
3570 return -TARGET_EFAULT;
3572 for(i=0; i<nsems; i++) {
3573 __put_user((*host_array)[i], &array[i]);
3575 g_free(*host_array);
3576 unlock_user(array, target_addr, 1);
3578 return 0;
3581 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3582 abi_ulong target_arg)
3584 union target_semun target_su = { .buf = target_arg };
3585 union semun arg;
3586 struct semid_ds dsarg;
3587 unsigned short *array = NULL;
3588 struct seminfo seminfo;
3589 abi_long ret = -TARGET_EINVAL;
3590 abi_long err;
3591 cmd &= 0xff;
3593 switch( cmd ) {
3594 case GETVAL:
3595 case SETVAL:
3596 /* In 64 bit cross-endian situations, we will erroneously pick up
3597 * the wrong half of the union for the "val" element. To rectify
3598 * this, the entire 8-byte structure is byteswapped, followed by
3599 * a swap of the 4 byte val field. In other cases, the data is
3600 * already in proper host byte order. */
3601 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3602 target_su.buf = tswapal(target_su.buf);
3603 arg.val = tswap32(target_su.val);
3604 } else {
3605 arg.val = target_su.val;
3607 ret = get_errno(semctl(semid, semnum, cmd, arg));
3608 break;
3609 case GETALL:
3610 case SETALL:
3611 err = target_to_host_semarray(semid, &array, target_su.array);
3612 if (err)
3613 return err;
3614 arg.array = array;
3615 ret = get_errno(semctl(semid, semnum, cmd, arg));
3616 err = host_to_target_semarray(semid, target_su.array, &array);
3617 if (err)
3618 return err;
3619 break;
3620 case IPC_STAT:
3621 case IPC_SET:
3622 case SEM_STAT:
3623 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3624 if (err)
3625 return err;
3626 arg.buf = &dsarg;
3627 ret = get_errno(semctl(semid, semnum, cmd, arg));
3628 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3629 if (err)
3630 return err;
3631 break;
3632 case IPC_INFO:
3633 case SEM_INFO:
3634 arg.__buf = &seminfo;
3635 ret = get_errno(semctl(semid, semnum, cmd, arg));
3636 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3637 if (err)
3638 return err;
3639 break;
3640 case IPC_RMID:
3641 case GETPID:
3642 case GETNCNT:
3643 case GETZCNT:
3644 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3645 break;
3648 return ret;
3651 struct target_sembuf {
3652 unsigned short sem_num;
3653 short sem_op;
3654 short sem_flg;
3657 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3658 abi_ulong target_addr,
3659 unsigned nsops)
3661 struct target_sembuf *target_sembuf;
3662 int i;
3664 target_sembuf = lock_user(VERIFY_READ, target_addr,
3665 nsops*sizeof(struct target_sembuf), 1);
3666 if (!target_sembuf)
3667 return -TARGET_EFAULT;
3669 for(i=0; i<nsops; i++) {
3670 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3671 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3672 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3675 unlock_user(target_sembuf, target_addr, 0);
3677 return 0;
3680 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3682 struct sembuf sops[nsops];
3684 if (target_to_host_sembuf(sops, ptr, nsops))
3685 return -TARGET_EFAULT;
3687 return get_errno(semop(semid, sops, nsops));
3690 struct target_msqid_ds
3692 struct target_ipc_perm msg_perm;
3693 abi_ulong msg_stime;
3694 #if TARGET_ABI_BITS == 32
3695 abi_ulong __unused1;
3696 #endif
3697 abi_ulong msg_rtime;
3698 #if TARGET_ABI_BITS == 32
3699 abi_ulong __unused2;
3700 #endif
3701 abi_ulong msg_ctime;
3702 #if TARGET_ABI_BITS == 32
3703 abi_ulong __unused3;
3704 #endif
3705 abi_ulong __msg_cbytes;
3706 abi_ulong msg_qnum;
3707 abi_ulong msg_qbytes;
3708 abi_ulong msg_lspid;
3709 abi_ulong msg_lrpid;
3710 abi_ulong __unused4;
3711 abi_ulong __unused5;
3714 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3715 abi_ulong target_addr)
3717 struct target_msqid_ds *target_md;
3719 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3720 return -TARGET_EFAULT;
3721 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3722 return -TARGET_EFAULT;
3723 host_md->msg_stime = tswapal(target_md->msg_stime);
3724 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3725 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3726 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3727 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3728 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3729 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3730 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3731 unlock_user_struct(target_md, target_addr, 0);
3732 return 0;
3735 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3736 struct msqid_ds *host_md)
3738 struct target_msqid_ds *target_md;
3740 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3741 return -TARGET_EFAULT;
3742 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3743 return -TARGET_EFAULT;
3744 target_md->msg_stime = tswapal(host_md->msg_stime);
3745 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3746 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3747 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3748 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3749 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3750 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3751 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3752 unlock_user_struct(target_md, target_addr, 1);
3753 return 0;
3756 struct target_msginfo {
3757 int msgpool;
3758 int msgmap;
3759 int msgmax;
3760 int msgmnb;
3761 int msgmni;
3762 int msgssz;
3763 int msgtql;
3764 unsigned short int msgseg;
3767 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3768 struct msginfo *host_msginfo)
3770 struct target_msginfo *target_msginfo;
3771 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3772 return -TARGET_EFAULT;
3773 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3774 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3775 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3776 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3777 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3778 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3779 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3780 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3781 unlock_user_struct(target_msginfo, target_addr, 1);
3782 return 0;
3785 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3787 struct msqid_ds dsarg;
3788 struct msginfo msginfo;
3789 abi_long ret = -TARGET_EINVAL;
3791 cmd &= 0xff;
3793 switch (cmd) {
3794 case IPC_STAT:
3795 case IPC_SET:
3796 case MSG_STAT:
3797 if (target_to_host_msqid_ds(&dsarg,ptr))
3798 return -TARGET_EFAULT;
3799 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3800 if (host_to_target_msqid_ds(ptr,&dsarg))
3801 return -TARGET_EFAULT;
3802 break;
3803 case IPC_RMID:
3804 ret = get_errno(msgctl(msgid, cmd, NULL));
3805 break;
3806 case IPC_INFO:
3807 case MSG_INFO:
3808 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3809 if (host_to_target_msginfo(ptr, &msginfo))
3810 return -TARGET_EFAULT;
3811 break;
3814 return ret;
3817 struct target_msgbuf {
3818 abi_long mtype;
3819 char mtext[1];
3822 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3823 ssize_t msgsz, int msgflg)
3825 struct target_msgbuf *target_mb;
3826 struct msgbuf *host_mb;
3827 abi_long ret = 0;
3829 if (msgsz < 0) {
3830 return -TARGET_EINVAL;
3833 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3834 return -TARGET_EFAULT;
3835 host_mb = g_try_malloc(msgsz + sizeof(long));
3836 if (!host_mb) {
3837 unlock_user_struct(target_mb, msgp, 0);
3838 return -TARGET_ENOMEM;
3840 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3841 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3842 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3843 g_free(host_mb);
3844 unlock_user_struct(target_mb, msgp, 0);
3846 return ret;
3849 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3850 ssize_t msgsz, abi_long msgtyp,
3851 int msgflg)
3853 struct target_msgbuf *target_mb;
3854 char *target_mtext;
3855 struct msgbuf *host_mb;
3856 abi_long ret = 0;
3858 if (msgsz < 0) {
3859 return -TARGET_EINVAL;
3862 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3863 return -TARGET_EFAULT;
3865 host_mb = g_try_malloc(msgsz + sizeof(long));
3866 if (!host_mb) {
3867 ret = -TARGET_ENOMEM;
3868 goto end;
3870 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3872 if (ret > 0) {
3873 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3874 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3875 if (!target_mtext) {
3876 ret = -TARGET_EFAULT;
3877 goto end;
3879 memcpy(target_mb->mtext, host_mb->mtext, ret);
3880 unlock_user(target_mtext, target_mtext_addr, ret);
3883 target_mb->mtype = tswapal(host_mb->mtype);
3885 end:
3886 if (target_mb)
3887 unlock_user_struct(target_mb, msgp, 1);
3888 g_free(host_mb);
3889 return ret;
3892 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3893 abi_ulong target_addr)
3895 struct target_shmid_ds *target_sd;
3897 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3898 return -TARGET_EFAULT;
3899 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3900 return -TARGET_EFAULT;
3901 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3902 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3903 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3904 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3905 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3906 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3907 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3908 unlock_user_struct(target_sd, target_addr, 0);
3909 return 0;
3912 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3913 struct shmid_ds *host_sd)
3915 struct target_shmid_ds *target_sd;
3917 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3918 return -TARGET_EFAULT;
3919 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3920 return -TARGET_EFAULT;
3921 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3922 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3923 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3924 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3925 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3926 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3927 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3928 unlock_user_struct(target_sd, target_addr, 1);
3929 return 0;
3932 struct target_shminfo {
3933 abi_ulong shmmax;
3934 abi_ulong shmmin;
3935 abi_ulong shmmni;
3936 abi_ulong shmseg;
3937 abi_ulong shmall;
3940 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3941 struct shminfo *host_shminfo)
3943 struct target_shminfo *target_shminfo;
3944 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3945 return -TARGET_EFAULT;
3946 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3947 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3948 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3949 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3950 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3951 unlock_user_struct(target_shminfo, target_addr, 1);
3952 return 0;
3955 struct target_shm_info {
3956 int used_ids;
3957 abi_ulong shm_tot;
3958 abi_ulong shm_rss;
3959 abi_ulong shm_swp;
3960 abi_ulong swap_attempts;
3961 abi_ulong swap_successes;
3964 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3965 struct shm_info *host_shm_info)
3967 struct target_shm_info *target_shm_info;
3968 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3969 return -TARGET_EFAULT;
3970 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3971 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3972 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3973 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3974 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3975 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3976 unlock_user_struct(target_shm_info, target_addr, 1);
3977 return 0;
3980 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3982 struct shmid_ds dsarg;
3983 struct shminfo shminfo;
3984 struct shm_info shm_info;
3985 abi_long ret = -TARGET_EINVAL;
3987 cmd &= 0xff;
3989 switch(cmd) {
3990 case IPC_STAT:
3991 case IPC_SET:
3992 case SHM_STAT:
3993 if (target_to_host_shmid_ds(&dsarg, buf))
3994 return -TARGET_EFAULT;
3995 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3996 if (host_to_target_shmid_ds(buf, &dsarg))
3997 return -TARGET_EFAULT;
3998 break;
3999 case IPC_INFO:
4000 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4001 if (host_to_target_shminfo(buf, &shminfo))
4002 return -TARGET_EFAULT;
4003 break;
4004 case SHM_INFO:
4005 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4006 if (host_to_target_shm_info(buf, &shm_info))
4007 return -TARGET_EFAULT;
4008 break;
4009 case IPC_RMID:
4010 case SHM_LOCK:
4011 case SHM_UNLOCK:
4012 ret = get_errno(shmctl(shmid, cmd, NULL));
4013 break;
4016 return ret;
4019 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4021 abi_long raddr;
4022 void *host_raddr;
4023 struct shmid_ds shm_info;
4024 int i,ret;
4026 /* find out the length of the shared memory segment */
4027 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4028 if (is_error(ret)) {
4029 /* can't get length, bail out */
4030 return ret;
4033 mmap_lock();
4035 if (shmaddr)
4036 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4037 else {
4038 abi_ulong mmap_start;
4040 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4042 if (mmap_start == -1) {
4043 errno = ENOMEM;
4044 host_raddr = (void *)-1;
4045 } else
4046 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4049 if (host_raddr == (void *)-1) {
4050 mmap_unlock();
4051 return get_errno((long)host_raddr);
4053 raddr=h2g((unsigned long)host_raddr);
4055 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4056 PAGE_VALID | PAGE_READ |
4057 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4059 for (i = 0; i < N_SHM_REGIONS; i++) {
4060 if (!shm_regions[i].in_use) {
4061 shm_regions[i].in_use = true;
4062 shm_regions[i].start = raddr;
4063 shm_regions[i].size = shm_info.shm_segsz;
4064 break;
4068 mmap_unlock();
4069 return raddr;
4073 static inline abi_long do_shmdt(abi_ulong shmaddr)
4075 int i;
4077 for (i = 0; i < N_SHM_REGIONS; ++i) {
4078 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4079 shm_regions[i].in_use = false;
4080 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4081 break;
4085 return get_errno(shmdt(g2h(shmaddr)));
4088 #ifdef TARGET_NR_ipc
4089 /* ??? This only works with linear mappings. */
4090 /* do_ipc() must return target values and target errnos. */
4091 static abi_long do_ipc(unsigned int call, abi_long first,
4092 abi_long second, abi_long third,
4093 abi_long ptr, abi_long fifth)
4095 int version;
4096 abi_long ret = 0;
4098 version = call >> 16;
4099 call &= 0xffff;
4101 switch (call) {
4102 case IPCOP_semop:
4103 ret = do_semop(first, ptr, second);
4104 break;
4106 case IPCOP_semget:
4107 ret = get_errno(semget(first, second, third));
4108 break;
4110 case IPCOP_semctl: {
4111 /* The semun argument to semctl is passed by value, so dereference the
4112 * ptr argument. */
4113 abi_ulong atptr;
4114 get_user_ual(atptr, ptr);
4115 ret = do_semctl(first, second, third, atptr);
4116 break;
4119 case IPCOP_msgget:
4120 ret = get_errno(msgget(first, second));
4121 break;
4123 case IPCOP_msgsnd:
4124 ret = do_msgsnd(first, ptr, second, third);
4125 break;
4127 case IPCOP_msgctl:
4128 ret = do_msgctl(first, second, ptr);
4129 break;
4131 case IPCOP_msgrcv:
4132 switch (version) {
4133 case 0:
4135 struct target_ipc_kludge {
4136 abi_long msgp;
4137 abi_long msgtyp;
4138 } *tmp;
4140 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4141 ret = -TARGET_EFAULT;
4142 break;
4145 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4147 unlock_user_struct(tmp, ptr, 0);
4148 break;
4150 default:
4151 ret = do_msgrcv(first, ptr, second, fifth, third);
4153 break;
4155 case IPCOP_shmat:
4156 switch (version) {
4157 default:
4159 abi_ulong raddr;
4160 raddr = do_shmat(first, ptr, second);
4161 if (is_error(raddr))
4162 return get_errno(raddr);
4163 if (put_user_ual(raddr, third))
4164 return -TARGET_EFAULT;
4165 break;
4167 case 1:
4168 ret = -TARGET_EINVAL;
4169 break;
4171 break;
4172 case IPCOP_shmdt:
4173 ret = do_shmdt(ptr);
4174 break;
4176 case IPCOP_shmget:
4177 /* IPC_* flag values are the same on all linux platforms */
4178 ret = get_errno(shmget(first, second, third));
4179 break;
4181 /* IPC_* and SHM_* command values are the same on all linux platforms */
4182 case IPCOP_shmctl:
4183 ret = do_shmctl(first, second, ptr);
4184 break;
4185 default:
4186 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4187 ret = -TARGET_ENOSYS;
4188 break;
4190 return ret;
4192 #endif
4194 /* kernel structure types definitions */
4196 #define STRUCT(name, ...) STRUCT_ ## name,
4197 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4198 enum {
4199 #include "syscall_types.h"
4200 STRUCT_MAX
4202 #undef STRUCT
4203 #undef STRUCT_SPECIAL
4205 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4206 #define STRUCT_SPECIAL(name)
4207 #include "syscall_types.h"
4208 #undef STRUCT
4209 #undef STRUCT_SPECIAL
4211 typedef struct IOCTLEntry IOCTLEntry;
4213 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4214 int fd, int cmd, abi_long arg);
4216 struct IOCTLEntry {
4217 int target_cmd;
4218 unsigned int host_cmd;
4219 const char *name;
4220 int access;
4221 do_ioctl_fn *do_ioctl;
4222 const argtype arg_type[5];
4225 #define IOC_R 0x0001
4226 #define IOC_W 0x0002
4227 #define IOC_RW (IOC_R | IOC_W)
4229 #define MAX_STRUCT_SIZE 4096
4231 #ifdef CONFIG_FIEMAP
4232 /* So fiemap access checks don't overflow on 32 bit systems.
4233 * This is very slightly smaller than the limit imposed by
4234 * the underlying kernel.
4236 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4237 / sizeof(struct fiemap_extent))
4239 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4240 int fd, int cmd, abi_long arg)
4242 /* The parameter for this ioctl is a struct fiemap followed
4243 * by an array of struct fiemap_extent whose size is set
4244 * in fiemap->fm_extent_count. The array is filled in by the
4245 * ioctl.
4247 int target_size_in, target_size_out;
4248 struct fiemap *fm;
4249 const argtype *arg_type = ie->arg_type;
4250 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4251 void *argptr, *p;
4252 abi_long ret;
4253 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4254 uint32_t outbufsz;
4255 int free_fm = 0;
4257 assert(arg_type[0] == TYPE_PTR);
4258 assert(ie->access == IOC_RW);
4259 arg_type++;
4260 target_size_in = thunk_type_size(arg_type, 0);
4261 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4262 if (!argptr) {
4263 return -TARGET_EFAULT;
4265 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4266 unlock_user(argptr, arg, 0);
4267 fm = (struct fiemap *)buf_temp;
4268 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4269 return -TARGET_EINVAL;
4272 outbufsz = sizeof (*fm) +
4273 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4275 if (outbufsz > MAX_STRUCT_SIZE) {
4276 /* We can't fit all the extents into the fixed size buffer.
4277 * Allocate one that is large enough and use it instead.
4279 fm = g_try_malloc(outbufsz);
4280 if (!fm) {
4281 return -TARGET_ENOMEM;
4283 memcpy(fm, buf_temp, sizeof(struct fiemap));
4284 free_fm = 1;
4286 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
4287 if (!is_error(ret)) {
4288 target_size_out = target_size_in;
4289 /* An extent_count of 0 means we were only counting the extents
4290 * so there are no structs to copy
4292 if (fm->fm_extent_count != 0) {
4293 target_size_out += fm->fm_mapped_extents * extent_size;
4295 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4296 if (!argptr) {
4297 ret = -TARGET_EFAULT;
4298 } else {
4299 /* Convert the struct fiemap */
4300 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4301 if (fm->fm_extent_count != 0) {
4302 p = argptr + target_size_in;
4303 /* ...and then all the struct fiemap_extents */
4304 for (i = 0; i < fm->fm_mapped_extents; i++) {
4305 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4306 THUNK_TARGET);
4307 p += extent_size;
4310 unlock_user(argptr, arg, target_size_out);
4313 if (free_fm) {
4314 g_free(fm);
4316 return ret;
4318 #endif
4320 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4321 int fd, int cmd, abi_long arg)
4323 const argtype *arg_type = ie->arg_type;
4324 int target_size;
4325 void *argptr;
4326 int ret;
4327 struct ifconf *host_ifconf;
4328 uint32_t outbufsz;
4329 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4330 int target_ifreq_size;
4331 int nb_ifreq;
4332 int free_buf = 0;
4333 int i;
4334 int target_ifc_len;
4335 abi_long target_ifc_buf;
4336 int host_ifc_len;
4337 char *host_ifc_buf;
4339 assert(arg_type[0] == TYPE_PTR);
4340 assert(ie->access == IOC_RW);
4342 arg_type++;
4343 target_size = thunk_type_size(arg_type, 0);
4345 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4346 if (!argptr)
4347 return -TARGET_EFAULT;
4348 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4349 unlock_user(argptr, arg, 0);
4351 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4352 target_ifc_len = host_ifconf->ifc_len;
4353 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4355 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4356 nb_ifreq = target_ifc_len / target_ifreq_size;
4357 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4359 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4360 if (outbufsz > MAX_STRUCT_SIZE) {
4361 /* We can't fit all the extents into the fixed size buffer.
4362 * Allocate one that is large enough and use it instead.
4364 host_ifconf = malloc(outbufsz);
4365 if (!host_ifconf) {
4366 return -TARGET_ENOMEM;
4368 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4369 free_buf = 1;
4371 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4373 host_ifconf->ifc_len = host_ifc_len;
4374 host_ifconf->ifc_buf = host_ifc_buf;
4376 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
4377 if (!is_error(ret)) {
4378 /* convert host ifc_len to target ifc_len */
4380 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4381 target_ifc_len = nb_ifreq * target_ifreq_size;
4382 host_ifconf->ifc_len = target_ifc_len;
4384 /* restore target ifc_buf */
4386 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4388 /* copy struct ifconf to target user */
4390 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4391 if (!argptr)
4392 return -TARGET_EFAULT;
4393 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4394 unlock_user(argptr, arg, target_size);
4396 /* copy ifreq[] to target user */
4398 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4399 for (i = 0; i < nb_ifreq ; i++) {
4400 thunk_convert(argptr + i * target_ifreq_size,
4401 host_ifc_buf + i * sizeof(struct ifreq),
4402 ifreq_arg_type, THUNK_TARGET);
4404 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4407 if (free_buf) {
4408 free(host_ifconf);
4411 return ret;
4414 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4415 int cmd, abi_long arg)
4417 void *argptr;
4418 struct dm_ioctl *host_dm;
4419 abi_long guest_data;
4420 uint32_t guest_data_size;
4421 int target_size;
4422 const argtype *arg_type = ie->arg_type;
4423 abi_long ret;
4424 void *big_buf = NULL;
4425 char *host_data;
4427 arg_type++;
4428 target_size = thunk_type_size(arg_type, 0);
4429 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4430 if (!argptr) {
4431 ret = -TARGET_EFAULT;
4432 goto out;
4434 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4435 unlock_user(argptr, arg, 0);
4437 /* buf_temp is too small, so fetch things into a bigger buffer */
4438 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4439 memcpy(big_buf, buf_temp, target_size);
4440 buf_temp = big_buf;
4441 host_dm = big_buf;
4443 guest_data = arg + host_dm->data_start;
4444 if ((guest_data - arg) < 0) {
4445 ret = -EINVAL;
4446 goto out;
4448 guest_data_size = host_dm->data_size - host_dm->data_start;
4449 host_data = (char*)host_dm + host_dm->data_start;
4451 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4452 switch (ie->host_cmd) {
4453 case DM_REMOVE_ALL:
4454 case DM_LIST_DEVICES:
4455 case DM_DEV_CREATE:
4456 case DM_DEV_REMOVE:
4457 case DM_DEV_SUSPEND:
4458 case DM_DEV_STATUS:
4459 case DM_DEV_WAIT:
4460 case DM_TABLE_STATUS:
4461 case DM_TABLE_CLEAR:
4462 case DM_TABLE_DEPS:
4463 case DM_LIST_VERSIONS:
4464 /* no input data */
4465 break;
4466 case DM_DEV_RENAME:
4467 case DM_DEV_SET_GEOMETRY:
4468 /* data contains only strings */
4469 memcpy(host_data, argptr, guest_data_size);
4470 break;
4471 case DM_TARGET_MSG:
4472 memcpy(host_data, argptr, guest_data_size);
4473 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4474 break;
4475 case DM_TABLE_LOAD:
4477 void *gspec = argptr;
4478 void *cur_data = host_data;
4479 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4480 int spec_size = thunk_type_size(arg_type, 0);
4481 int i;
4483 for (i = 0; i < host_dm->target_count; i++) {
4484 struct dm_target_spec *spec = cur_data;
4485 uint32_t next;
4486 int slen;
4488 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4489 slen = strlen((char*)gspec + spec_size) + 1;
4490 next = spec->next;
4491 spec->next = sizeof(*spec) + slen;
4492 strcpy((char*)&spec[1], gspec + spec_size);
4493 gspec += next;
4494 cur_data += spec->next;
4496 break;
4498 default:
4499 ret = -TARGET_EINVAL;
4500 unlock_user(argptr, guest_data, 0);
4501 goto out;
4503 unlock_user(argptr, guest_data, 0);
4505 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4506 if (!is_error(ret)) {
4507 guest_data = arg + host_dm->data_start;
4508 guest_data_size = host_dm->data_size - host_dm->data_start;
4509 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4510 switch (ie->host_cmd) {
4511 case DM_REMOVE_ALL:
4512 case DM_DEV_CREATE:
4513 case DM_DEV_REMOVE:
4514 case DM_DEV_RENAME:
4515 case DM_DEV_SUSPEND:
4516 case DM_DEV_STATUS:
4517 case DM_TABLE_LOAD:
4518 case DM_TABLE_CLEAR:
4519 case DM_TARGET_MSG:
4520 case DM_DEV_SET_GEOMETRY:
4521 /* no return data */
4522 break;
4523 case DM_LIST_DEVICES:
4525 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4526 uint32_t remaining_data = guest_data_size;
4527 void *cur_data = argptr;
4528 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4529 int nl_size = 12; /* can't use thunk_size due to alignment */
4531 while (1) {
4532 uint32_t next = nl->next;
4533 if (next) {
4534 nl->next = nl_size + (strlen(nl->name) + 1);
4536 if (remaining_data < nl->next) {
4537 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4538 break;
4540 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4541 strcpy(cur_data + nl_size, nl->name);
4542 cur_data += nl->next;
4543 remaining_data -= nl->next;
4544 if (!next) {
4545 break;
4547 nl = (void*)nl + next;
4549 break;
4551 case DM_DEV_WAIT:
4552 case DM_TABLE_STATUS:
4554 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4555 void *cur_data = argptr;
4556 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4557 int spec_size = thunk_type_size(arg_type, 0);
4558 int i;
4560 for (i = 0; i < host_dm->target_count; i++) {
4561 uint32_t next = spec->next;
4562 int slen = strlen((char*)&spec[1]) + 1;
4563 spec->next = (cur_data - argptr) + spec_size + slen;
4564 if (guest_data_size < spec->next) {
4565 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4566 break;
4568 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4569 strcpy(cur_data + spec_size, (char*)&spec[1]);
4570 cur_data = argptr + spec->next;
4571 spec = (void*)host_dm + host_dm->data_start + next;
4573 break;
4575 case DM_TABLE_DEPS:
4577 void *hdata = (void*)host_dm + host_dm->data_start;
4578 int count = *(uint32_t*)hdata;
4579 uint64_t *hdev = hdata + 8;
4580 uint64_t *gdev = argptr + 8;
4581 int i;
4583 *(uint32_t*)argptr = tswap32(count);
4584 for (i = 0; i < count; i++) {
4585 *gdev = tswap64(*hdev);
4586 gdev++;
4587 hdev++;
4589 break;
4591 case DM_LIST_VERSIONS:
4593 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4594 uint32_t remaining_data = guest_data_size;
4595 void *cur_data = argptr;
4596 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4597 int vers_size = thunk_type_size(arg_type, 0);
4599 while (1) {
4600 uint32_t next = vers->next;
4601 if (next) {
4602 vers->next = vers_size + (strlen(vers->name) + 1);
4604 if (remaining_data < vers->next) {
4605 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4606 break;
4608 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4609 strcpy(cur_data + vers_size, vers->name);
4610 cur_data += vers->next;
4611 remaining_data -= vers->next;
4612 if (!next) {
4613 break;
4615 vers = (void*)vers + next;
4617 break;
4619 default:
4620 unlock_user(argptr, guest_data, 0);
4621 ret = -TARGET_EINVAL;
4622 goto out;
4624 unlock_user(argptr, guest_data, guest_data_size);
4626 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4627 if (!argptr) {
4628 ret = -TARGET_EFAULT;
4629 goto out;
4631 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4632 unlock_user(argptr, arg, target_size);
4634 out:
4635 g_free(big_buf);
4636 return ret;
4639 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4640 int cmd, abi_long arg)
4642 void *argptr;
4643 int target_size;
4644 const argtype *arg_type = ie->arg_type;
4645 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4646 abi_long ret;
4648 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4649 struct blkpg_partition host_part;
4651 /* Read and convert blkpg */
4652 arg_type++;
4653 target_size = thunk_type_size(arg_type, 0);
4654 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4655 if (!argptr) {
4656 ret = -TARGET_EFAULT;
4657 goto out;
4659 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4660 unlock_user(argptr, arg, 0);
4662 switch (host_blkpg->op) {
4663 case BLKPG_ADD_PARTITION:
4664 case BLKPG_DEL_PARTITION:
4665 /* payload is struct blkpg_partition */
4666 break;
4667 default:
4668 /* Unknown opcode */
4669 ret = -TARGET_EINVAL;
4670 goto out;
4673 /* Read and convert blkpg->data */
4674 arg = (abi_long)(uintptr_t)host_blkpg->data;
4675 target_size = thunk_type_size(part_arg_type, 0);
4676 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4677 if (!argptr) {
4678 ret = -TARGET_EFAULT;
4679 goto out;
4681 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4682 unlock_user(argptr, arg, 0);
4684 /* Swizzle the data pointer to our local copy and call! */
4685 host_blkpg->data = &host_part;
4686 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
4688 out:
4689 return ret;
4692 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4693 int fd, int cmd, abi_long arg)
4695 const argtype *arg_type = ie->arg_type;
4696 const StructEntry *se;
4697 const argtype *field_types;
4698 const int *dst_offsets, *src_offsets;
4699 int target_size;
4700 void *argptr;
4701 abi_ulong *target_rt_dev_ptr;
4702 unsigned long *host_rt_dev_ptr;
4703 abi_long ret;
4704 int i;
4706 assert(ie->access == IOC_W);
4707 assert(*arg_type == TYPE_PTR);
4708 arg_type++;
4709 assert(*arg_type == TYPE_STRUCT);
4710 target_size = thunk_type_size(arg_type, 0);
4711 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4712 if (!argptr) {
4713 return -TARGET_EFAULT;
4715 arg_type++;
4716 assert(*arg_type == (int)STRUCT_rtentry);
4717 se = struct_entries + *arg_type++;
4718 assert(se->convert[0] == NULL);
4719 /* convert struct here to be able to catch rt_dev string */
4720 field_types = se->field_types;
4721 dst_offsets = se->field_offsets[THUNK_HOST];
4722 src_offsets = se->field_offsets[THUNK_TARGET];
4723 for (i = 0; i < se->nb_fields; i++) {
4724 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4725 assert(*field_types == TYPE_PTRVOID);
4726 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4727 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4728 if (*target_rt_dev_ptr != 0) {
4729 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4730 tswapal(*target_rt_dev_ptr));
4731 if (!*host_rt_dev_ptr) {
4732 unlock_user(argptr, arg, 0);
4733 return -TARGET_EFAULT;
4735 } else {
4736 *host_rt_dev_ptr = 0;
4738 field_types++;
4739 continue;
4741 field_types = thunk_convert(buf_temp + dst_offsets[i],
4742 argptr + src_offsets[i],
4743 field_types, THUNK_HOST);
4745 unlock_user(argptr, arg, 0);
4747 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4748 if (*host_rt_dev_ptr != 0) {
4749 unlock_user((void *)*host_rt_dev_ptr,
4750 *target_rt_dev_ptr, 0);
4752 return ret;
4755 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4756 int fd, int cmd, abi_long arg)
4758 int sig = target_to_host_signal(arg);
4759 return get_errno(ioctl(fd, ie->host_cmd, sig));
4762 static IOCTLEntry ioctl_entries[] = {
4763 #define IOCTL(cmd, access, ...) \
4764 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4765 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4766 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4767 #include "ioctls.h"
4768 { 0, 0, },
4771 /* ??? Implement proper locking for ioctls. */
4772 /* do_ioctl() Must return target values and target errnos. */
4773 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4775 const IOCTLEntry *ie;
4776 const argtype *arg_type;
4777 abi_long ret;
4778 uint8_t buf_temp[MAX_STRUCT_SIZE];
4779 int target_size;
4780 void *argptr;
4782 ie = ioctl_entries;
4783 for(;;) {
4784 if (ie->target_cmd == 0) {
4785 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4786 return -TARGET_ENOSYS;
4788 if (ie->target_cmd == cmd)
4789 break;
4790 ie++;
4792 arg_type = ie->arg_type;
4793 #if defined(DEBUG)
4794 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4795 #endif
4796 if (ie->do_ioctl) {
4797 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4800 switch(arg_type[0]) {
4801 case TYPE_NULL:
4802 /* no argument */
4803 ret = get_errno(ioctl(fd, ie->host_cmd));
4804 break;
4805 case TYPE_PTRVOID:
4806 case TYPE_INT:
4807 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4808 break;
4809 case TYPE_PTR:
4810 arg_type++;
4811 target_size = thunk_type_size(arg_type, 0);
4812 switch(ie->access) {
4813 case IOC_R:
4814 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4815 if (!is_error(ret)) {
4816 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4817 if (!argptr)
4818 return -TARGET_EFAULT;
4819 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4820 unlock_user(argptr, arg, target_size);
4822 break;
4823 case IOC_W:
4824 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4825 if (!argptr)
4826 return -TARGET_EFAULT;
4827 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4828 unlock_user(argptr, arg, 0);
4829 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4830 break;
4831 default:
4832 case IOC_RW:
4833 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4834 if (!argptr)
4835 return -TARGET_EFAULT;
4836 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4837 unlock_user(argptr, arg, 0);
4838 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4839 if (!is_error(ret)) {
4840 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4841 if (!argptr)
4842 return -TARGET_EFAULT;
4843 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4844 unlock_user(argptr, arg, target_size);
4846 break;
4848 break;
4849 default:
4850 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4851 (long)cmd, arg_type[0]);
4852 ret = -TARGET_ENOSYS;
4853 break;
4855 return ret;
4858 static const bitmask_transtbl iflag_tbl[] = {
4859 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4860 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4861 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4862 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4863 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4864 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4865 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4866 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4867 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4868 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4869 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4870 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4871 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4872 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4873 { 0, 0, 0, 0 }
4876 static const bitmask_transtbl oflag_tbl[] = {
4877 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4878 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4879 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4880 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4881 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4882 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4883 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4884 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4885 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4886 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4887 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4888 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4889 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4890 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4891 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4892 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4893 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4894 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4895 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4896 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4897 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4898 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4899 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4900 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4901 { 0, 0, 0, 0 }
4904 static const bitmask_transtbl cflag_tbl[] = {
4905 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4906 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4907 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4908 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4909 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4910 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4911 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4912 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4913 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4914 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4915 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4916 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4917 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4918 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4919 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4920 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4921 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4922 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4923 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4924 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4925 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4926 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4927 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4928 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4929 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4930 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4931 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4932 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4933 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4934 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4935 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4936 { 0, 0, 0, 0 }
4939 static const bitmask_transtbl lflag_tbl[] = {
4940 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4941 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4942 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4943 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4944 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4945 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4946 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4947 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4948 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4949 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4950 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4951 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4952 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4953 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4954 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4955 { 0, 0, 0, 0 }
4958 static void target_to_host_termios (void *dst, const void *src)
4960 struct host_termios *host = dst;
4961 const struct target_termios *target = src;
4963 host->c_iflag =
4964 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4965 host->c_oflag =
4966 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4967 host->c_cflag =
4968 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4969 host->c_lflag =
4970 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4971 host->c_line = target->c_line;
4973 memset(host->c_cc, 0, sizeof(host->c_cc));
4974 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4975 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4976 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4977 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4978 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4979 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4980 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4981 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4982 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4983 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4984 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4985 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4986 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4987 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4988 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4989 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4990 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4993 static void host_to_target_termios (void *dst, const void *src)
4995 struct target_termios *target = dst;
4996 const struct host_termios *host = src;
4998 target->c_iflag =
4999 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5000 target->c_oflag =
5001 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5002 target->c_cflag =
5003 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5004 target->c_lflag =
5005 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5006 target->c_line = host->c_line;
5008 memset(target->c_cc, 0, sizeof(target->c_cc));
5009 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5010 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5011 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5012 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5013 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5014 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5015 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5016 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5017 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5018 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5019 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5020 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5021 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5022 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5023 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5024 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5025 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5028 static const StructEntry struct_termios_def = {
5029 .convert = { host_to_target_termios, target_to_host_termios },
5030 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5031 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5034 static bitmask_transtbl mmap_flags_tbl[] = {
5035 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5036 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5037 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5038 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5039 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5040 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5041 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5042 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5043 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5044 MAP_NORESERVE },
5045 { 0, 0, 0, 0 }
5048 #if defined(TARGET_I386)
5050 /* NOTE: there is really one LDT for all the threads */
5051 static uint8_t *ldt_table;
5053 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5055 int size;
5056 void *p;
5058 if (!ldt_table)
5059 return 0;
5060 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5061 if (size > bytecount)
5062 size = bytecount;
5063 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5064 if (!p)
5065 return -TARGET_EFAULT;
5066 /* ??? Should this by byteswapped? */
5067 memcpy(p, ldt_table, size);
5068 unlock_user(p, ptr, size);
5069 return size;
5072 /* XXX: add locking support */
5073 static abi_long write_ldt(CPUX86State *env,
5074 abi_ulong ptr, unsigned long bytecount, int oldmode)
5076 struct target_modify_ldt_ldt_s ldt_info;
5077 struct target_modify_ldt_ldt_s *target_ldt_info;
5078 int seg_32bit, contents, read_exec_only, limit_in_pages;
5079 int seg_not_present, useable, lm;
5080 uint32_t *lp, entry_1, entry_2;
5082 if (bytecount != sizeof(ldt_info))
5083 return -TARGET_EINVAL;
5084 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5085 return -TARGET_EFAULT;
5086 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5087 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5088 ldt_info.limit = tswap32(target_ldt_info->limit);
5089 ldt_info.flags = tswap32(target_ldt_info->flags);
5090 unlock_user_struct(target_ldt_info, ptr, 0);
5092 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5093 return -TARGET_EINVAL;
5094 seg_32bit = ldt_info.flags & 1;
5095 contents = (ldt_info.flags >> 1) & 3;
5096 read_exec_only = (ldt_info.flags >> 3) & 1;
5097 limit_in_pages = (ldt_info.flags >> 4) & 1;
5098 seg_not_present = (ldt_info.flags >> 5) & 1;
5099 useable = (ldt_info.flags >> 6) & 1;
5100 #ifdef TARGET_ABI32
5101 lm = 0;
5102 #else
5103 lm = (ldt_info.flags >> 7) & 1;
5104 #endif
5105 if (contents == 3) {
5106 if (oldmode)
5107 return -TARGET_EINVAL;
5108 if (seg_not_present == 0)
5109 return -TARGET_EINVAL;
5111 /* allocate the LDT */
5112 if (!ldt_table) {
5113 env->ldt.base = target_mmap(0,
5114 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5115 PROT_READ|PROT_WRITE,
5116 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5117 if (env->ldt.base == -1)
5118 return -TARGET_ENOMEM;
5119 memset(g2h(env->ldt.base), 0,
5120 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5121 env->ldt.limit = 0xffff;
5122 ldt_table = g2h(env->ldt.base);
5125 /* NOTE: same code as Linux kernel */
5126 /* Allow LDTs to be cleared by the user. */
5127 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5128 if (oldmode ||
5129 (contents == 0 &&
5130 read_exec_only == 1 &&
5131 seg_32bit == 0 &&
5132 limit_in_pages == 0 &&
5133 seg_not_present == 1 &&
5134 useable == 0 )) {
5135 entry_1 = 0;
5136 entry_2 = 0;
5137 goto install;
5141 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5142 (ldt_info.limit & 0x0ffff);
5143 entry_2 = (ldt_info.base_addr & 0xff000000) |
5144 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5145 (ldt_info.limit & 0xf0000) |
5146 ((read_exec_only ^ 1) << 9) |
5147 (contents << 10) |
5148 ((seg_not_present ^ 1) << 15) |
5149 (seg_32bit << 22) |
5150 (limit_in_pages << 23) |
5151 (lm << 21) |
5152 0x7000;
5153 if (!oldmode)
5154 entry_2 |= (useable << 20);
5156 /* Install the new entry ... */
5157 install:
5158 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5159 lp[0] = tswap32(entry_1);
5160 lp[1] = tswap32(entry_2);
5161 return 0;
5164 /* specific and weird i386 syscalls */
5165 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5166 unsigned long bytecount)
5168 abi_long ret;
5170 switch (func) {
5171 case 0:
5172 ret = read_ldt(ptr, bytecount);
5173 break;
5174 case 1:
5175 ret = write_ldt(env, ptr, bytecount, 1);
5176 break;
5177 case 0x11:
5178 ret = write_ldt(env, ptr, bytecount, 0);
5179 break;
5180 default:
5181 ret = -TARGET_ENOSYS;
5182 break;
5184 return ret;
5187 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5188 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5190 uint64_t *gdt_table = g2h(env->gdt.base);
5191 struct target_modify_ldt_ldt_s ldt_info;
5192 struct target_modify_ldt_ldt_s *target_ldt_info;
5193 int seg_32bit, contents, read_exec_only, limit_in_pages;
5194 int seg_not_present, useable, lm;
5195 uint32_t *lp, entry_1, entry_2;
5196 int i;
5198 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5199 if (!target_ldt_info)
5200 return -TARGET_EFAULT;
5201 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5202 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5203 ldt_info.limit = tswap32(target_ldt_info->limit);
5204 ldt_info.flags = tswap32(target_ldt_info->flags);
5205 if (ldt_info.entry_number == -1) {
5206 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5207 if (gdt_table[i] == 0) {
5208 ldt_info.entry_number = i;
5209 target_ldt_info->entry_number = tswap32(i);
5210 break;
5214 unlock_user_struct(target_ldt_info, ptr, 1);
5216 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5217 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5218 return -TARGET_EINVAL;
5219 seg_32bit = ldt_info.flags & 1;
5220 contents = (ldt_info.flags >> 1) & 3;
5221 read_exec_only = (ldt_info.flags >> 3) & 1;
5222 limit_in_pages = (ldt_info.flags >> 4) & 1;
5223 seg_not_present = (ldt_info.flags >> 5) & 1;
5224 useable = (ldt_info.flags >> 6) & 1;
5225 #ifdef TARGET_ABI32
5226 lm = 0;
5227 #else
5228 lm = (ldt_info.flags >> 7) & 1;
5229 #endif
5231 if (contents == 3) {
5232 if (seg_not_present == 0)
5233 return -TARGET_EINVAL;
5236 /* NOTE: same code as Linux kernel */
5237 /* Allow LDTs to be cleared by the user. */
5238 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5239 if ((contents == 0 &&
5240 read_exec_only == 1 &&
5241 seg_32bit == 0 &&
5242 limit_in_pages == 0 &&
5243 seg_not_present == 1 &&
5244 useable == 0 )) {
5245 entry_1 = 0;
5246 entry_2 = 0;
5247 goto install;
5251 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5252 (ldt_info.limit & 0x0ffff);
5253 entry_2 = (ldt_info.base_addr & 0xff000000) |
5254 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5255 (ldt_info.limit & 0xf0000) |
5256 ((read_exec_only ^ 1) << 9) |
5257 (contents << 10) |
5258 ((seg_not_present ^ 1) << 15) |
5259 (seg_32bit << 22) |
5260 (limit_in_pages << 23) |
5261 (useable << 20) |
5262 (lm << 21) |
5263 0x7000;
5265 /* Install the new entry ... */
5266 install:
5267 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5268 lp[0] = tswap32(entry_1);
5269 lp[1] = tswap32(entry_2);
5270 return 0;
5273 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5275 struct target_modify_ldt_ldt_s *target_ldt_info;
5276 uint64_t *gdt_table = g2h(env->gdt.base);
5277 uint32_t base_addr, limit, flags;
5278 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5279 int seg_not_present, useable, lm;
5280 uint32_t *lp, entry_1, entry_2;
5282 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5283 if (!target_ldt_info)
5284 return -TARGET_EFAULT;
5285 idx = tswap32(target_ldt_info->entry_number);
5286 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5287 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5288 unlock_user_struct(target_ldt_info, ptr, 1);
5289 return -TARGET_EINVAL;
5291 lp = (uint32_t *)(gdt_table + idx);
5292 entry_1 = tswap32(lp[0]);
5293 entry_2 = tswap32(lp[1]);
5295 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5296 contents = (entry_2 >> 10) & 3;
5297 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5298 seg_32bit = (entry_2 >> 22) & 1;
5299 limit_in_pages = (entry_2 >> 23) & 1;
5300 useable = (entry_2 >> 20) & 1;
5301 #ifdef TARGET_ABI32
5302 lm = 0;
5303 #else
5304 lm = (entry_2 >> 21) & 1;
5305 #endif
5306 flags = (seg_32bit << 0) | (contents << 1) |
5307 (read_exec_only << 3) | (limit_in_pages << 4) |
5308 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5309 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5310 base_addr = (entry_1 >> 16) |
5311 (entry_2 & 0xff000000) |
5312 ((entry_2 & 0xff) << 16);
5313 target_ldt_info->base_addr = tswapal(base_addr);
5314 target_ldt_info->limit = tswap32(limit);
5315 target_ldt_info->flags = tswap32(flags);
5316 unlock_user_struct(target_ldt_info, ptr, 1);
5317 return 0;
5319 #endif /* TARGET_I386 && TARGET_ABI32 */
5321 #ifndef TARGET_ABI32
5322 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5324 abi_long ret = 0;
5325 abi_ulong val;
5326 int idx;
5328 switch(code) {
5329 case TARGET_ARCH_SET_GS:
5330 case TARGET_ARCH_SET_FS:
5331 if (code == TARGET_ARCH_SET_GS)
5332 idx = R_GS;
5333 else
5334 idx = R_FS;
5335 cpu_x86_load_seg(env, idx, 0);
5336 env->segs[idx].base = addr;
5337 break;
5338 case TARGET_ARCH_GET_GS:
5339 case TARGET_ARCH_GET_FS:
5340 if (code == TARGET_ARCH_GET_GS)
5341 idx = R_GS;
5342 else
5343 idx = R_FS;
5344 val = env->segs[idx].base;
5345 if (put_user(val, addr, abi_ulong))
5346 ret = -TARGET_EFAULT;
5347 break;
5348 default:
5349 ret = -TARGET_EINVAL;
5350 break;
5352 return ret;
5354 #endif
5356 #endif /* defined(TARGET_I386) */
5358 #define NEW_STACK_SIZE 0x40000
5361 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5362 typedef struct {
5363 CPUArchState *env;
5364 pthread_mutex_t mutex;
5365 pthread_cond_t cond;
5366 pthread_t thread;
5367 uint32_t tid;
5368 abi_ulong child_tidptr;
5369 abi_ulong parent_tidptr;
5370 sigset_t sigmask;
5371 } new_thread_info;
5373 static void *clone_func(void *arg)
5375 new_thread_info *info = arg;
5376 CPUArchState *env;
5377 CPUState *cpu;
5378 TaskState *ts;
5380 rcu_register_thread();
5381 env = info->env;
5382 cpu = ENV_GET_CPU(env);
5383 thread_cpu = cpu;
5384 ts = (TaskState *)cpu->opaque;
5385 info->tid = gettid();
5386 cpu->host_tid = info->tid;
5387 task_settid(ts);
5388 if (info->child_tidptr)
5389 put_user_u32(info->tid, info->child_tidptr);
5390 if (info->parent_tidptr)
5391 put_user_u32(info->tid, info->parent_tidptr);
5392 /* Enable signals. */
5393 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5394 /* Signal to the parent that we're ready. */
5395 pthread_mutex_lock(&info->mutex);
5396 pthread_cond_broadcast(&info->cond);
5397 pthread_mutex_unlock(&info->mutex);
5398 /* Wait until the parent has finshed initializing the tls state. */
5399 pthread_mutex_lock(&clone_lock);
5400 pthread_mutex_unlock(&clone_lock);
5401 cpu_loop(env);
5402 /* never exits */
5403 return NULL;
5406 /* do_fork() Must return host values and target errnos (unlike most
5407 do_*() functions). */
5408 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5409 abi_ulong parent_tidptr, target_ulong newtls,
5410 abi_ulong child_tidptr)
5412 CPUState *cpu = ENV_GET_CPU(env);
5413 int ret;
5414 TaskState *ts;
5415 CPUState *new_cpu;
5416 CPUArchState *new_env;
5417 unsigned int nptl_flags;
5418 sigset_t sigmask;
5420 /* Emulate vfork() with fork() */
5421 if (flags & CLONE_VFORK)
5422 flags &= ~(CLONE_VFORK | CLONE_VM);
5424 if (flags & CLONE_VM) {
5425 TaskState *parent_ts = (TaskState *)cpu->opaque;
5426 new_thread_info info;
5427 pthread_attr_t attr;
5429 ts = g_new0(TaskState, 1);
5430 init_task_state(ts);
5431 /* we create a new CPU instance. */
5432 new_env = cpu_copy(env);
5433 /* Init regs that differ from the parent. */
5434 cpu_clone_regs(new_env, newsp);
5435 new_cpu = ENV_GET_CPU(new_env);
5436 new_cpu->opaque = ts;
5437 ts->bprm = parent_ts->bprm;
5438 ts->info = parent_ts->info;
5439 ts->signal_mask = parent_ts->signal_mask;
5440 nptl_flags = flags;
5441 flags &= ~CLONE_NPTL_FLAGS2;
5443 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5444 ts->child_tidptr = child_tidptr;
5447 if (nptl_flags & CLONE_SETTLS)
5448 cpu_set_tls (new_env, newtls);
5450 /* Grab a mutex so that thread setup appears atomic. */
5451 pthread_mutex_lock(&clone_lock);
5453 memset(&info, 0, sizeof(info));
5454 pthread_mutex_init(&info.mutex, NULL);
5455 pthread_mutex_lock(&info.mutex);
5456 pthread_cond_init(&info.cond, NULL);
5457 info.env = new_env;
5458 if (nptl_flags & CLONE_CHILD_SETTID)
5459 info.child_tidptr = child_tidptr;
5460 if (nptl_flags & CLONE_PARENT_SETTID)
5461 info.parent_tidptr = parent_tidptr;
5463 ret = pthread_attr_init(&attr);
5464 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5465 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5466 /* It is not safe to deliver signals until the child has finished
5467 initializing, so temporarily block all signals. */
5468 sigfillset(&sigmask);
5469 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5471 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5472 /* TODO: Free new CPU state if thread creation failed. */
5474 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5475 pthread_attr_destroy(&attr);
5476 if (ret == 0) {
5477 /* Wait for the child to initialize. */
5478 pthread_cond_wait(&info.cond, &info.mutex);
5479 ret = info.tid;
5480 if (flags & CLONE_PARENT_SETTID)
5481 put_user_u32(ret, parent_tidptr);
5482 } else {
5483 ret = -1;
5485 pthread_mutex_unlock(&info.mutex);
5486 pthread_cond_destroy(&info.cond);
5487 pthread_mutex_destroy(&info.mutex);
5488 pthread_mutex_unlock(&clone_lock);
5489 } else {
5490 /* if no CLONE_VM, we consider it is a fork */
5491 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5492 return -TARGET_EINVAL;
5495 if (block_signals()) {
5496 return -TARGET_ERESTARTSYS;
5499 fork_start();
5500 ret = fork();
5501 if (ret == 0) {
5502 /* Child Process. */
5503 rcu_after_fork();
5504 cpu_clone_regs(env, newsp);
5505 fork_end(1);
5506 /* There is a race condition here. The parent process could
5507 theoretically read the TID in the child process before the child
5508 tid is set. This would require using either ptrace
5509 (not implemented) or having *_tidptr to point at a shared memory
5510 mapping. We can't repeat the spinlock hack used above because
5511 the child process gets its own copy of the lock. */
5512 if (flags & CLONE_CHILD_SETTID)
5513 put_user_u32(gettid(), child_tidptr);
5514 if (flags & CLONE_PARENT_SETTID)
5515 put_user_u32(gettid(), parent_tidptr);
5516 ts = (TaskState *)cpu->opaque;
5517 if (flags & CLONE_SETTLS)
5518 cpu_set_tls (env, newtls);
5519 if (flags & CLONE_CHILD_CLEARTID)
5520 ts->child_tidptr = child_tidptr;
5521 } else {
5522 fork_end(0);
5525 return ret;
5528 /* warning : doesn't handle linux specific flags... */
5529 static int target_to_host_fcntl_cmd(int cmd)
5531 switch(cmd) {
5532 case TARGET_F_DUPFD:
5533 case TARGET_F_GETFD:
5534 case TARGET_F_SETFD:
5535 case TARGET_F_GETFL:
5536 case TARGET_F_SETFL:
5537 return cmd;
5538 case TARGET_F_GETLK:
5539 return F_GETLK;
5540 case TARGET_F_SETLK:
5541 return F_SETLK;
5542 case TARGET_F_SETLKW:
5543 return F_SETLKW;
5544 case TARGET_F_GETOWN:
5545 return F_GETOWN;
5546 case TARGET_F_SETOWN:
5547 return F_SETOWN;
5548 case TARGET_F_GETSIG:
5549 return F_GETSIG;
5550 case TARGET_F_SETSIG:
5551 return F_SETSIG;
5552 #if TARGET_ABI_BITS == 32
5553 case TARGET_F_GETLK64:
5554 return F_GETLK64;
5555 case TARGET_F_SETLK64:
5556 return F_SETLK64;
5557 case TARGET_F_SETLKW64:
5558 return F_SETLKW64;
5559 #endif
5560 case TARGET_F_SETLEASE:
5561 return F_SETLEASE;
5562 case TARGET_F_GETLEASE:
5563 return F_GETLEASE;
5564 #ifdef F_DUPFD_CLOEXEC
5565 case TARGET_F_DUPFD_CLOEXEC:
5566 return F_DUPFD_CLOEXEC;
5567 #endif
5568 case TARGET_F_NOTIFY:
5569 return F_NOTIFY;
5570 #ifdef F_GETOWN_EX
5571 case TARGET_F_GETOWN_EX:
5572 return F_GETOWN_EX;
5573 #endif
5574 #ifdef F_SETOWN_EX
5575 case TARGET_F_SETOWN_EX:
5576 return F_SETOWN_EX;
5577 #endif
5578 default:
5579 return -TARGET_EINVAL;
5581 return -TARGET_EINVAL;
5584 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5585 static const bitmask_transtbl flock_tbl[] = {
5586 TRANSTBL_CONVERT(F_RDLCK),
5587 TRANSTBL_CONVERT(F_WRLCK),
5588 TRANSTBL_CONVERT(F_UNLCK),
5589 TRANSTBL_CONVERT(F_EXLCK),
5590 TRANSTBL_CONVERT(F_SHLCK),
5591 { 0, 0, 0, 0 }
5594 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5596 struct flock fl;
5597 struct target_flock *target_fl;
5598 struct flock64 fl64;
5599 struct target_flock64 *target_fl64;
5600 #ifdef F_GETOWN_EX
5601 struct f_owner_ex fox;
5602 struct target_f_owner_ex *target_fox;
5603 #endif
5604 abi_long ret;
5605 int host_cmd = target_to_host_fcntl_cmd(cmd);
5607 if (host_cmd == -TARGET_EINVAL)
5608 return host_cmd;
5610 switch(cmd) {
5611 case TARGET_F_GETLK:
5612 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5613 return -TARGET_EFAULT;
5614 fl.l_type =
5615 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5616 fl.l_whence = tswap16(target_fl->l_whence);
5617 fl.l_start = tswapal(target_fl->l_start);
5618 fl.l_len = tswapal(target_fl->l_len);
5619 fl.l_pid = tswap32(target_fl->l_pid);
5620 unlock_user_struct(target_fl, arg, 0);
5621 ret = get_errno(fcntl(fd, host_cmd, &fl));
5622 if (ret == 0) {
5623 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
5624 return -TARGET_EFAULT;
5625 target_fl->l_type =
5626 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
5627 target_fl->l_whence = tswap16(fl.l_whence);
5628 target_fl->l_start = tswapal(fl.l_start);
5629 target_fl->l_len = tswapal(fl.l_len);
5630 target_fl->l_pid = tswap32(fl.l_pid);
5631 unlock_user_struct(target_fl, arg, 1);
5633 break;
5635 case TARGET_F_SETLK:
5636 case TARGET_F_SETLKW:
5637 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5638 return -TARGET_EFAULT;
5639 fl.l_type =
5640 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5641 fl.l_whence = tswap16(target_fl->l_whence);
5642 fl.l_start = tswapal(target_fl->l_start);
5643 fl.l_len = tswapal(target_fl->l_len);
5644 fl.l_pid = tswap32(target_fl->l_pid);
5645 unlock_user_struct(target_fl, arg, 0);
5646 ret = get_errno(fcntl(fd, host_cmd, &fl));
5647 break;
5649 case TARGET_F_GETLK64:
5650 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5651 return -TARGET_EFAULT;
5652 fl64.l_type =
5653 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5654 fl64.l_whence = tswap16(target_fl64->l_whence);
5655 fl64.l_start = tswap64(target_fl64->l_start);
5656 fl64.l_len = tswap64(target_fl64->l_len);
5657 fl64.l_pid = tswap32(target_fl64->l_pid);
5658 unlock_user_struct(target_fl64, arg, 0);
5659 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5660 if (ret == 0) {
5661 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
5662 return -TARGET_EFAULT;
5663 target_fl64->l_type =
5664 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
5665 target_fl64->l_whence = tswap16(fl64.l_whence);
5666 target_fl64->l_start = tswap64(fl64.l_start);
5667 target_fl64->l_len = tswap64(fl64.l_len);
5668 target_fl64->l_pid = tswap32(fl64.l_pid);
5669 unlock_user_struct(target_fl64, arg, 1);
5671 break;
5672 case TARGET_F_SETLK64:
5673 case TARGET_F_SETLKW64:
5674 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5675 return -TARGET_EFAULT;
5676 fl64.l_type =
5677 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5678 fl64.l_whence = tswap16(target_fl64->l_whence);
5679 fl64.l_start = tswap64(target_fl64->l_start);
5680 fl64.l_len = tswap64(target_fl64->l_len);
5681 fl64.l_pid = tswap32(target_fl64->l_pid);
5682 unlock_user_struct(target_fl64, arg, 0);
5683 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5684 break;
5686 case TARGET_F_GETFL:
5687 ret = get_errno(fcntl(fd, host_cmd, arg));
5688 if (ret >= 0) {
5689 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5691 break;
5693 case TARGET_F_SETFL:
5694 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
5695 break;
5697 #ifdef F_GETOWN_EX
5698 case TARGET_F_GETOWN_EX:
5699 ret = get_errno(fcntl(fd, host_cmd, &fox));
5700 if (ret >= 0) {
5701 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5702 return -TARGET_EFAULT;
5703 target_fox->type = tswap32(fox.type);
5704 target_fox->pid = tswap32(fox.pid);
5705 unlock_user_struct(target_fox, arg, 1);
5707 break;
5708 #endif
5710 #ifdef F_SETOWN_EX
5711 case TARGET_F_SETOWN_EX:
5712 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5713 return -TARGET_EFAULT;
5714 fox.type = tswap32(target_fox->type);
5715 fox.pid = tswap32(target_fox->pid);
5716 unlock_user_struct(target_fox, arg, 0);
5717 ret = get_errno(fcntl(fd, host_cmd, &fox));
5718 break;
5719 #endif
5721 case TARGET_F_SETOWN:
5722 case TARGET_F_GETOWN:
5723 case TARGET_F_SETSIG:
5724 case TARGET_F_GETSIG:
5725 case TARGET_F_SETLEASE:
5726 case TARGET_F_GETLEASE:
5727 ret = get_errno(fcntl(fd, host_cmd, arg));
5728 break;
5730 default:
5731 ret = get_errno(fcntl(fd, cmd, arg));
5732 break;
5734 return ret;
5737 #ifdef USE_UID16
5739 static inline int high2lowuid(int uid)
5741 if (uid > 65535)
5742 return 65534;
5743 else
5744 return uid;
5747 static inline int high2lowgid(int gid)
5749 if (gid > 65535)
5750 return 65534;
5751 else
5752 return gid;
5755 static inline int low2highuid(int uid)
5757 if ((int16_t)uid == -1)
5758 return -1;
5759 else
5760 return uid;
5763 static inline int low2highgid(int gid)
5765 if ((int16_t)gid == -1)
5766 return -1;
5767 else
5768 return gid;
5770 static inline int tswapid(int id)
5772 return tswap16(id);
5775 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5777 #else /* !USE_UID16 */
5778 static inline int high2lowuid(int uid)
5780 return uid;
5782 static inline int high2lowgid(int gid)
5784 return gid;
5786 static inline int low2highuid(int uid)
5788 return uid;
5790 static inline int low2highgid(int gid)
5792 return gid;
5794 static inline int tswapid(int id)
5796 return tswap32(id);
5799 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5801 #endif /* USE_UID16 */
5803 /* We must do direct syscalls for setting UID/GID, because we want to
5804 * implement the Linux system call semantics of "change only for this thread",
5805 * not the libc/POSIX semantics of "change for all threads in process".
5806 * (See http://ewontfix.com/17/ for more details.)
5807 * We use the 32-bit version of the syscalls if present; if it is not
5808 * then either the host architecture supports 32-bit UIDs natively with
5809 * the standard syscall, or the 16-bit UID is the best we can do.
5811 #ifdef __NR_setuid32
5812 #define __NR_sys_setuid __NR_setuid32
5813 #else
5814 #define __NR_sys_setuid __NR_setuid
5815 #endif
5816 #ifdef __NR_setgid32
5817 #define __NR_sys_setgid __NR_setgid32
5818 #else
5819 #define __NR_sys_setgid __NR_setgid
5820 #endif
5821 #ifdef __NR_setresuid32
5822 #define __NR_sys_setresuid __NR_setresuid32
5823 #else
5824 #define __NR_sys_setresuid __NR_setresuid
5825 #endif
5826 #ifdef __NR_setresgid32
5827 #define __NR_sys_setresgid __NR_setresgid32
5828 #else
5829 #define __NR_sys_setresgid __NR_setresgid
5830 #endif
5832 _syscall1(int, sys_setuid, uid_t, uid)
5833 _syscall1(int, sys_setgid, gid_t, gid)
5834 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5835 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5837 void syscall_init(void)
5839 IOCTLEntry *ie;
5840 const argtype *arg_type;
5841 int size;
5842 int i;
5844 thunk_init(STRUCT_MAX);
5846 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5847 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5848 #include "syscall_types.h"
5849 #undef STRUCT
5850 #undef STRUCT_SPECIAL
5852 /* Build target_to_host_errno_table[] table from
5853 * host_to_target_errno_table[]. */
5854 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5855 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5858 /* we patch the ioctl size if necessary. We rely on the fact that
5859 no ioctl has all the bits at '1' in the size field */
5860 ie = ioctl_entries;
5861 while (ie->target_cmd != 0) {
5862 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5863 TARGET_IOC_SIZEMASK) {
5864 arg_type = ie->arg_type;
5865 if (arg_type[0] != TYPE_PTR) {
5866 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5867 ie->target_cmd);
5868 exit(1);
5870 arg_type++;
5871 size = thunk_type_size(arg_type, 0);
5872 ie->target_cmd = (ie->target_cmd &
5873 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5874 (size << TARGET_IOC_SIZESHIFT);
5877 /* automatic consistency check if same arch */
5878 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5879 (defined(__x86_64__) && defined(TARGET_X86_64))
5880 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5881 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5882 ie->name, ie->target_cmd, ie->host_cmd);
5884 #endif
5885 ie++;
5889 #if TARGET_ABI_BITS == 32
5890 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5892 #ifdef TARGET_WORDS_BIGENDIAN
5893 return ((uint64_t)word0 << 32) | word1;
5894 #else
5895 return ((uint64_t)word1 << 32) | word0;
5896 #endif
5898 #else /* TARGET_ABI_BITS == 32 */
5899 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5901 return word0;
5903 #endif /* TARGET_ABI_BITS != 32 */
5905 #ifdef TARGET_NR_truncate64
5906 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5907 abi_long arg2,
5908 abi_long arg3,
5909 abi_long arg4)
5911 if (regpairs_aligned(cpu_env)) {
5912 arg2 = arg3;
5913 arg3 = arg4;
5915 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5917 #endif
5919 #ifdef TARGET_NR_ftruncate64
5920 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5921 abi_long arg2,
5922 abi_long arg3,
5923 abi_long arg4)
5925 if (regpairs_aligned(cpu_env)) {
5926 arg2 = arg3;
5927 arg3 = arg4;
5929 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5931 #endif
5933 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5934 abi_ulong target_addr)
5936 struct target_timespec *target_ts;
5938 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5939 return -TARGET_EFAULT;
5940 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5941 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5942 unlock_user_struct(target_ts, target_addr, 0);
5943 return 0;
5946 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5947 struct timespec *host_ts)
5949 struct target_timespec *target_ts;
5951 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5952 return -TARGET_EFAULT;
5953 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5954 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5955 unlock_user_struct(target_ts, target_addr, 1);
5956 return 0;
5959 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5960 abi_ulong target_addr)
5962 struct target_itimerspec *target_itspec;
5964 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5965 return -TARGET_EFAULT;
5968 host_itspec->it_interval.tv_sec =
5969 tswapal(target_itspec->it_interval.tv_sec);
5970 host_itspec->it_interval.tv_nsec =
5971 tswapal(target_itspec->it_interval.tv_nsec);
5972 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5973 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5975 unlock_user_struct(target_itspec, target_addr, 1);
5976 return 0;
5979 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5980 struct itimerspec *host_its)
5982 struct target_itimerspec *target_itspec;
5984 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5985 return -TARGET_EFAULT;
5988 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5989 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5991 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5992 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5994 unlock_user_struct(target_itspec, target_addr, 0);
5995 return 0;
5998 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5999 abi_ulong target_addr)
6001 struct target_sigevent *target_sevp;
6003 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6004 return -TARGET_EFAULT;
6007 /* This union is awkward on 64 bit systems because it has a 32 bit
6008 * integer and a pointer in it; we follow the conversion approach
6009 * used for handling sigval types in signal.c so the guest should get
6010 * the correct value back even if we did a 64 bit byteswap and it's
6011 * using the 32 bit integer.
6013 host_sevp->sigev_value.sival_ptr =
6014 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6015 host_sevp->sigev_signo =
6016 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6017 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6018 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6020 unlock_user_struct(target_sevp, target_addr, 1);
6021 return 0;
6024 #if defined(TARGET_NR_mlockall)
6025 static inline int target_to_host_mlockall_arg(int arg)
6027 int result = 0;
6029 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6030 result |= MCL_CURRENT;
6032 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6033 result |= MCL_FUTURE;
6035 return result;
6037 #endif
6039 static inline abi_long host_to_target_stat64(void *cpu_env,
6040 abi_ulong target_addr,
6041 struct stat *host_st)
6043 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6044 if (((CPUARMState *)cpu_env)->eabi) {
6045 struct target_eabi_stat64 *target_st;
6047 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6048 return -TARGET_EFAULT;
6049 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6050 __put_user(host_st->st_dev, &target_st->st_dev);
6051 __put_user(host_st->st_ino, &target_st->st_ino);
6052 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6053 __put_user(host_st->st_ino, &target_st->__st_ino);
6054 #endif
6055 __put_user(host_st->st_mode, &target_st->st_mode);
6056 __put_user(host_st->st_nlink, &target_st->st_nlink);
6057 __put_user(host_st->st_uid, &target_st->st_uid);
6058 __put_user(host_st->st_gid, &target_st->st_gid);
6059 __put_user(host_st->st_rdev, &target_st->st_rdev);
6060 __put_user(host_st->st_size, &target_st->st_size);
6061 __put_user(host_st->st_blksize, &target_st->st_blksize);
6062 __put_user(host_st->st_blocks, &target_st->st_blocks);
6063 __put_user(host_st->st_atime, &target_st->target_st_atime);
6064 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6065 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6066 unlock_user_struct(target_st, target_addr, 1);
6067 } else
6068 #endif
6070 #if defined(TARGET_HAS_STRUCT_STAT64)
6071 struct target_stat64 *target_st;
6072 #else
6073 struct target_stat *target_st;
6074 #endif
6076 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6077 return -TARGET_EFAULT;
6078 memset(target_st, 0, sizeof(*target_st));
6079 __put_user(host_st->st_dev, &target_st->st_dev);
6080 __put_user(host_st->st_ino, &target_st->st_ino);
6081 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6082 __put_user(host_st->st_ino, &target_st->__st_ino);
6083 #endif
6084 __put_user(host_st->st_mode, &target_st->st_mode);
6085 __put_user(host_st->st_nlink, &target_st->st_nlink);
6086 __put_user(host_st->st_uid, &target_st->st_uid);
6087 __put_user(host_st->st_gid, &target_st->st_gid);
6088 __put_user(host_st->st_rdev, &target_st->st_rdev);
6089 /* XXX: better use of kernel struct */
6090 __put_user(host_st->st_size, &target_st->st_size);
6091 __put_user(host_st->st_blksize, &target_st->st_blksize);
6092 __put_user(host_st->st_blocks, &target_st->st_blocks);
6093 __put_user(host_st->st_atime, &target_st->target_st_atime);
6094 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6095 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6096 unlock_user_struct(target_st, target_addr, 1);
6099 return 0;
6102 /* ??? Using host futex calls even when target atomic operations
6103 are not really atomic probably breaks things. However implementing
6104 futexes locally would make futexes shared between multiple processes
6105 tricky. However they're probably useless because guest atomic
6106 operations won't work either. */
6107 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6108 target_ulong uaddr2, int val3)
6110 struct timespec ts, *pts;
6111 int base_op;
6113 /* ??? We assume FUTEX_* constants are the same on both host
6114 and target. */
6115 #ifdef FUTEX_CMD_MASK
6116 base_op = op & FUTEX_CMD_MASK;
6117 #else
6118 base_op = op;
6119 #endif
6120 switch (base_op) {
6121 case FUTEX_WAIT:
6122 case FUTEX_WAIT_BITSET:
6123 if (timeout) {
6124 pts = &ts;
6125 target_to_host_timespec(pts, timeout);
6126 } else {
6127 pts = NULL;
6129 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6130 pts, NULL, val3));
6131 case FUTEX_WAKE:
6132 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6133 case FUTEX_FD:
6134 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6135 case FUTEX_REQUEUE:
6136 case FUTEX_CMP_REQUEUE:
6137 case FUTEX_WAKE_OP:
6138 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6139 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6140 But the prototype takes a `struct timespec *'; insert casts
6141 to satisfy the compiler. We do not need to tswap TIMEOUT
6142 since it's not compared to guest memory. */
6143 pts = (struct timespec *)(uintptr_t) timeout;
6144 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6145 g2h(uaddr2),
6146 (base_op == FUTEX_CMP_REQUEUE
6147 ? tswap32(val3)
6148 : val3)));
6149 default:
6150 return -TARGET_ENOSYS;
6153 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6154 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6155 abi_long handle, abi_long mount_id,
6156 abi_long flags)
6158 struct file_handle *target_fh;
6159 struct file_handle *fh;
6160 int mid = 0;
6161 abi_long ret;
6162 char *name;
6163 unsigned int size, total_size;
6165 if (get_user_s32(size, handle)) {
6166 return -TARGET_EFAULT;
6169 name = lock_user_string(pathname);
6170 if (!name) {
6171 return -TARGET_EFAULT;
6174 total_size = sizeof(struct file_handle) + size;
6175 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6176 if (!target_fh) {
6177 unlock_user(name, pathname, 0);
6178 return -TARGET_EFAULT;
6181 fh = g_malloc0(total_size);
6182 fh->handle_bytes = size;
6184 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6185 unlock_user(name, pathname, 0);
6187 /* man name_to_handle_at(2):
6188 * Other than the use of the handle_bytes field, the caller should treat
6189 * the file_handle structure as an opaque data type
6192 memcpy(target_fh, fh, total_size);
6193 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6194 target_fh->handle_type = tswap32(fh->handle_type);
6195 g_free(fh);
6196 unlock_user(target_fh, handle, total_size);
6198 if (put_user_s32(mid, mount_id)) {
6199 return -TARGET_EFAULT;
6202 return ret;
6205 #endif
6207 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6208 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6209 abi_long flags)
6211 struct file_handle *target_fh;
6212 struct file_handle *fh;
6213 unsigned int size, total_size;
6214 abi_long ret;
6216 if (get_user_s32(size, handle)) {
6217 return -TARGET_EFAULT;
6220 total_size = sizeof(struct file_handle) + size;
6221 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6222 if (!target_fh) {
6223 return -TARGET_EFAULT;
6226 fh = g_memdup(target_fh, total_size);
6227 fh->handle_bytes = size;
6228 fh->handle_type = tswap32(target_fh->handle_type);
6230 ret = get_errno(open_by_handle_at(mount_fd, fh,
6231 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6233 g_free(fh);
6235 unlock_user(target_fh, handle, total_size);
6237 return ret;
6239 #endif
6241 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6243 /* signalfd siginfo conversion */
6245 static void
6246 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6247 const struct signalfd_siginfo *info)
6249 int sig = host_to_target_signal(info->ssi_signo);
6251 /* linux/signalfd.h defines a ssi_addr_lsb
6252 * not defined in sys/signalfd.h but used by some kernels
6255 #ifdef BUS_MCEERR_AO
6256 if (tinfo->ssi_signo == SIGBUS &&
6257 (tinfo->ssi_code == BUS_MCEERR_AR ||
6258 tinfo->ssi_code == BUS_MCEERR_AO)) {
6259 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6260 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6261 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6263 #endif
6265 tinfo->ssi_signo = tswap32(sig);
6266 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6267 tinfo->ssi_code = tswap32(info->ssi_code);
6268 tinfo->ssi_pid = tswap32(info->ssi_pid);
6269 tinfo->ssi_uid = tswap32(info->ssi_uid);
6270 tinfo->ssi_fd = tswap32(info->ssi_fd);
6271 tinfo->ssi_tid = tswap32(info->ssi_tid);
6272 tinfo->ssi_band = tswap32(info->ssi_band);
6273 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6274 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6275 tinfo->ssi_status = tswap32(info->ssi_status);
6276 tinfo->ssi_int = tswap32(info->ssi_int);
6277 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6278 tinfo->ssi_utime = tswap64(info->ssi_utime);
6279 tinfo->ssi_stime = tswap64(info->ssi_stime);
6280 tinfo->ssi_addr = tswap64(info->ssi_addr);
6283 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6285 int i;
6287 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6288 host_to_target_signalfd_siginfo(buf + i, buf + i);
6291 return len;
6294 static TargetFdTrans target_signalfd_trans = {
6295 .host_to_target_data = host_to_target_data_signalfd,
6298 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6300 int host_flags;
6301 target_sigset_t *target_mask;
6302 sigset_t host_mask;
6303 abi_long ret;
6305 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6306 return -TARGET_EINVAL;
6308 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6309 return -TARGET_EFAULT;
6312 target_to_host_sigset(&host_mask, target_mask);
6314 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6316 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6317 if (ret >= 0) {
6318 fd_trans_register(ret, &target_signalfd_trans);
6321 unlock_user_struct(target_mask, mask, 0);
6323 return ret;
6325 #endif
6327 /* Map host to target signal numbers for the wait family of syscalls.
6328 Assume all other status bits are the same. */
6329 int host_to_target_waitstatus(int status)
6331 if (WIFSIGNALED(status)) {
6332 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6334 if (WIFSTOPPED(status)) {
6335 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6336 | (status & 0xff);
6338 return status;
6341 static int open_self_cmdline(void *cpu_env, int fd)
6343 int fd_orig = -1;
6344 bool word_skipped = false;
6346 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6347 if (fd_orig < 0) {
6348 return fd_orig;
6351 while (true) {
6352 ssize_t nb_read;
6353 char buf[128];
6354 char *cp_buf = buf;
6356 nb_read = read(fd_orig, buf, sizeof(buf));
6357 if (nb_read < 0) {
6358 int e = errno;
6359 fd_orig = close(fd_orig);
6360 errno = e;
6361 return -1;
6362 } else if (nb_read == 0) {
6363 break;
6366 if (!word_skipped) {
6367 /* Skip the first string, which is the path to qemu-*-static
6368 instead of the actual command. */
6369 cp_buf = memchr(buf, 0, sizeof(buf));
6370 if (cp_buf) {
6371 /* Null byte found, skip one string */
6372 cp_buf++;
6373 nb_read -= cp_buf - buf;
6374 word_skipped = true;
6378 if (word_skipped) {
6379 if (write(fd, cp_buf, nb_read) != nb_read) {
6380 int e = errno;
6381 close(fd_orig);
6382 errno = e;
6383 return -1;
6388 return close(fd_orig);
6391 static int open_self_maps(void *cpu_env, int fd)
6393 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6394 TaskState *ts = cpu->opaque;
6395 FILE *fp;
6396 char *line = NULL;
6397 size_t len = 0;
6398 ssize_t read;
6400 fp = fopen("/proc/self/maps", "r");
6401 if (fp == NULL) {
6402 return -1;
6405 while ((read = getline(&line, &len, fp)) != -1) {
6406 int fields, dev_maj, dev_min, inode;
6407 uint64_t min, max, offset;
6408 char flag_r, flag_w, flag_x, flag_p;
6409 char path[512] = "";
6410 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6411 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6412 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6414 if ((fields < 10) || (fields > 11)) {
6415 continue;
6417 if (h2g_valid(min)) {
6418 int flags = page_get_flags(h2g(min));
6419 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6420 if (page_check_range(h2g(min), max - min, flags) == -1) {
6421 continue;
6423 if (h2g(min) == ts->info->stack_limit) {
6424 pstrcpy(path, sizeof(path), " [stack]");
6426 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6427 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6428 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6429 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6430 path[0] ? " " : "", path);
6434 free(line);
6435 fclose(fp);
6437 return 0;
6440 static int open_self_stat(void *cpu_env, int fd)
6442 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6443 TaskState *ts = cpu->opaque;
6444 abi_ulong start_stack = ts->info->start_stack;
6445 int i;
6447 for (i = 0; i < 44; i++) {
6448 char buf[128];
6449 int len;
6450 uint64_t val = 0;
6452 if (i == 0) {
6453 /* pid */
6454 val = getpid();
6455 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6456 } else if (i == 1) {
6457 /* app name */
6458 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6459 } else if (i == 27) {
6460 /* stack bottom */
6461 val = start_stack;
6462 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6463 } else {
6464 /* for the rest, there is MasterCard */
6465 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6468 len = strlen(buf);
6469 if (write(fd, buf, len) != len) {
6470 return -1;
6474 return 0;
6477 static int open_self_auxv(void *cpu_env, int fd)
6479 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6480 TaskState *ts = cpu->opaque;
6481 abi_ulong auxv = ts->info->saved_auxv;
6482 abi_ulong len = ts->info->auxv_len;
6483 char *ptr;
6486 * Auxiliary vector is stored in target process stack.
6487 * read in whole auxv vector and copy it to file
6489 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6490 if (ptr != NULL) {
6491 while (len > 0) {
6492 ssize_t r;
6493 r = write(fd, ptr, len);
6494 if (r <= 0) {
6495 break;
6497 len -= r;
6498 ptr += r;
6500 lseek(fd, 0, SEEK_SET);
6501 unlock_user(ptr, auxv, len);
6504 return 0;
6507 static int is_proc_myself(const char *filename, const char *entry)
6509 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6510 filename += strlen("/proc/");
6511 if (!strncmp(filename, "self/", strlen("self/"))) {
6512 filename += strlen("self/");
6513 } else if (*filename >= '1' && *filename <= '9') {
6514 char myself[80];
6515 snprintf(myself, sizeof(myself), "%d/", getpid());
6516 if (!strncmp(filename, myself, strlen(myself))) {
6517 filename += strlen(myself);
6518 } else {
6519 return 0;
6521 } else {
6522 return 0;
6524 if (!strcmp(filename, entry)) {
6525 return 1;
6528 return 0;
6531 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6532 static int is_proc(const char *filename, const char *entry)
6534 return strcmp(filename, entry) == 0;
6537 static int open_net_route(void *cpu_env, int fd)
6539 FILE *fp;
6540 char *line = NULL;
6541 size_t len = 0;
6542 ssize_t read;
6544 fp = fopen("/proc/net/route", "r");
6545 if (fp == NULL) {
6546 return -1;
6549 /* read header */
6551 read = getline(&line, &len, fp);
6552 dprintf(fd, "%s", line);
6554 /* read routes */
6556 while ((read = getline(&line, &len, fp)) != -1) {
6557 char iface[16];
6558 uint32_t dest, gw, mask;
6559 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6560 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6561 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6562 &mask, &mtu, &window, &irtt);
6563 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6564 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6565 metric, tswap32(mask), mtu, window, irtt);
6568 free(line);
6569 fclose(fp);
6571 return 0;
6573 #endif
6575 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6577 struct fake_open {
6578 const char *filename;
6579 int (*fill)(void *cpu_env, int fd);
6580 int (*cmp)(const char *s1, const char *s2);
6582 const struct fake_open *fake_open;
6583 static const struct fake_open fakes[] = {
6584 { "maps", open_self_maps, is_proc_myself },
6585 { "stat", open_self_stat, is_proc_myself },
6586 { "auxv", open_self_auxv, is_proc_myself },
6587 { "cmdline", open_self_cmdline, is_proc_myself },
6588 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6589 { "/proc/net/route", open_net_route, is_proc },
6590 #endif
6591 { NULL, NULL, NULL }
6594 if (is_proc_myself(pathname, "exe")) {
6595 int execfd = qemu_getauxval(AT_EXECFD);
6596 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6599 for (fake_open = fakes; fake_open->filename; fake_open++) {
6600 if (fake_open->cmp(pathname, fake_open->filename)) {
6601 break;
6605 if (fake_open->filename) {
6606 const char *tmpdir;
6607 char filename[PATH_MAX];
6608 int fd, r;
6610 /* create temporary file to map stat to */
6611 tmpdir = getenv("TMPDIR");
6612 if (!tmpdir)
6613 tmpdir = "/tmp";
6614 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6615 fd = mkstemp(filename);
6616 if (fd < 0) {
6617 return fd;
6619 unlink(filename);
6621 if ((r = fake_open->fill(cpu_env, fd))) {
6622 int e = errno;
6623 close(fd);
6624 errno = e;
6625 return r;
6627 lseek(fd, 0, SEEK_SET);
6629 return fd;
6632 return safe_openat(dirfd, path(pathname), flags, mode);
6635 #define TIMER_MAGIC 0x0caf0000
6636 #define TIMER_MAGIC_MASK 0xffff0000
6638 /* Convert QEMU provided timer ID back to internal 16bit index format */
6639 static target_timer_t get_timer_id(abi_long arg)
6641 target_timer_t timerid = arg;
6643 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6644 return -TARGET_EINVAL;
6647 timerid &= 0xffff;
6649 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6650 return -TARGET_EINVAL;
6653 return timerid;
6656 /* do_syscall() should always have a single exit point at the end so
6657 that actions, such as logging of syscall results, can be performed.
6658 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6659 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6660 abi_long arg2, abi_long arg3, abi_long arg4,
6661 abi_long arg5, abi_long arg6, abi_long arg7,
6662 abi_long arg8)
6664 CPUState *cpu = ENV_GET_CPU(cpu_env);
6665 abi_long ret;
6666 struct stat st;
6667 struct statfs stfs;
6668 void *p;
6670 #if defined(DEBUG_ERESTARTSYS)
6671 /* Debug-only code for exercising the syscall-restart code paths
6672 * in the per-architecture cpu main loops: restart every syscall
6673 * the guest makes once before letting it through.
6676 static int flag;
6678 flag = !flag;
6679 if (flag) {
6680 return -TARGET_ERESTARTSYS;
6683 #endif
6685 #ifdef DEBUG
6686 gemu_log("syscall %d", num);
6687 #endif
6688 if(do_strace)
6689 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6691 switch(num) {
6692 case TARGET_NR_exit:
6693 /* In old applications this may be used to implement _exit(2).
6694 However in threaded applictions it is used for thread termination,
6695 and _exit_group is used for application termination.
6696 Do thread termination if we have more then one thread. */
6698 if (block_signals()) {
6699 ret = -TARGET_ERESTARTSYS;
6700 break;
6703 if (CPU_NEXT(first_cpu)) {
6704 TaskState *ts;
6706 cpu_list_lock();
6707 /* Remove the CPU from the list. */
6708 QTAILQ_REMOVE(&cpus, cpu, node);
6709 cpu_list_unlock();
6710 ts = cpu->opaque;
6711 if (ts->child_tidptr) {
6712 put_user_u32(0, ts->child_tidptr);
6713 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6714 NULL, NULL, 0);
6716 thread_cpu = NULL;
6717 object_unref(OBJECT(cpu));
6718 g_free(ts);
6719 rcu_unregister_thread();
6720 pthread_exit(NULL);
6722 #ifdef TARGET_GPROF
6723 _mcleanup();
6724 #endif
6725 gdb_exit(cpu_env, arg1);
6726 _exit(arg1);
6727 ret = 0; /* avoid warning */
6728 break;
6729 case TARGET_NR_read:
6730 if (arg3 == 0)
6731 ret = 0;
6732 else {
6733 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6734 goto efault;
6735 ret = get_errno(safe_read(arg1, p, arg3));
6736 if (ret >= 0 &&
6737 fd_trans_host_to_target_data(arg1)) {
6738 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6740 unlock_user(p, arg2, ret);
6742 break;
6743 case TARGET_NR_write:
6744 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6745 goto efault;
6746 ret = get_errno(safe_write(arg1, p, arg3));
6747 unlock_user(p, arg2, 0);
6748 break;
6749 #ifdef TARGET_NR_open
6750 case TARGET_NR_open:
6751 if (!(p = lock_user_string(arg1)))
6752 goto efault;
6753 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6754 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6755 arg3));
6756 fd_trans_unregister(ret);
6757 unlock_user(p, arg1, 0);
6758 break;
6759 #endif
6760 case TARGET_NR_openat:
6761 if (!(p = lock_user_string(arg2)))
6762 goto efault;
6763 ret = get_errno(do_openat(cpu_env, arg1, p,
6764 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6765 arg4));
6766 fd_trans_unregister(ret);
6767 unlock_user(p, arg2, 0);
6768 break;
6769 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6770 case TARGET_NR_name_to_handle_at:
6771 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6772 break;
6773 #endif
6774 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6775 case TARGET_NR_open_by_handle_at:
6776 ret = do_open_by_handle_at(arg1, arg2, arg3);
6777 fd_trans_unregister(ret);
6778 break;
6779 #endif
6780 case TARGET_NR_close:
6781 fd_trans_unregister(arg1);
6782 ret = get_errno(close(arg1));
6783 break;
6784 case TARGET_NR_brk:
6785 ret = do_brk(arg1);
6786 break;
6787 #ifdef TARGET_NR_fork
6788 case TARGET_NR_fork:
6789 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6790 break;
6791 #endif
6792 #ifdef TARGET_NR_waitpid
6793 case TARGET_NR_waitpid:
6795 int status;
6796 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6797 if (!is_error(ret) && arg2 && ret
6798 && put_user_s32(host_to_target_waitstatus(status), arg2))
6799 goto efault;
6801 break;
6802 #endif
6803 #ifdef TARGET_NR_waitid
6804 case TARGET_NR_waitid:
6806 siginfo_t info;
6807 info.si_pid = 0;
6808 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6809 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6810 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6811 goto efault;
6812 host_to_target_siginfo(p, &info);
6813 unlock_user(p, arg3, sizeof(target_siginfo_t));
6816 break;
6817 #endif
6818 #ifdef TARGET_NR_creat /* not on alpha */
6819 case TARGET_NR_creat:
6820 if (!(p = lock_user_string(arg1)))
6821 goto efault;
6822 ret = get_errno(creat(p, arg2));
6823 fd_trans_unregister(ret);
6824 unlock_user(p, arg1, 0);
6825 break;
6826 #endif
6827 #ifdef TARGET_NR_link
6828 case TARGET_NR_link:
6830 void * p2;
6831 p = lock_user_string(arg1);
6832 p2 = lock_user_string(arg2);
6833 if (!p || !p2)
6834 ret = -TARGET_EFAULT;
6835 else
6836 ret = get_errno(link(p, p2));
6837 unlock_user(p2, arg2, 0);
6838 unlock_user(p, arg1, 0);
6840 break;
6841 #endif
6842 #if defined(TARGET_NR_linkat)
6843 case TARGET_NR_linkat:
6845 void * p2 = NULL;
6846 if (!arg2 || !arg4)
6847 goto efault;
6848 p = lock_user_string(arg2);
6849 p2 = lock_user_string(arg4);
6850 if (!p || !p2)
6851 ret = -TARGET_EFAULT;
6852 else
6853 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6854 unlock_user(p, arg2, 0);
6855 unlock_user(p2, arg4, 0);
6857 break;
6858 #endif
6859 #ifdef TARGET_NR_unlink
6860 case TARGET_NR_unlink:
6861 if (!(p = lock_user_string(arg1)))
6862 goto efault;
6863 ret = get_errno(unlink(p));
6864 unlock_user(p, arg1, 0);
6865 break;
6866 #endif
6867 #if defined(TARGET_NR_unlinkat)
6868 case TARGET_NR_unlinkat:
6869 if (!(p = lock_user_string(arg2)))
6870 goto efault;
6871 ret = get_errno(unlinkat(arg1, p, arg3));
6872 unlock_user(p, arg2, 0);
6873 break;
6874 #endif
6875 case TARGET_NR_execve:
6877 char **argp, **envp;
6878 int argc, envc;
6879 abi_ulong gp;
6880 abi_ulong guest_argp;
6881 abi_ulong guest_envp;
6882 abi_ulong addr;
6883 char **q;
6884 int total_size = 0;
6886 argc = 0;
6887 guest_argp = arg2;
6888 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6889 if (get_user_ual(addr, gp))
6890 goto efault;
6891 if (!addr)
6892 break;
6893 argc++;
6895 envc = 0;
6896 guest_envp = arg3;
6897 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6898 if (get_user_ual(addr, gp))
6899 goto efault;
6900 if (!addr)
6901 break;
6902 envc++;
6905 argp = alloca((argc + 1) * sizeof(void *));
6906 envp = alloca((envc + 1) * sizeof(void *));
6908 for (gp = guest_argp, q = argp; gp;
6909 gp += sizeof(abi_ulong), q++) {
6910 if (get_user_ual(addr, gp))
6911 goto execve_efault;
6912 if (!addr)
6913 break;
6914 if (!(*q = lock_user_string(addr)))
6915 goto execve_efault;
6916 total_size += strlen(*q) + 1;
6918 *q = NULL;
6920 for (gp = guest_envp, q = envp; gp;
6921 gp += sizeof(abi_ulong), q++) {
6922 if (get_user_ual(addr, gp))
6923 goto execve_efault;
6924 if (!addr)
6925 break;
6926 if (!(*q = lock_user_string(addr)))
6927 goto execve_efault;
6928 total_size += strlen(*q) + 1;
6930 *q = NULL;
6932 if (!(p = lock_user_string(arg1)))
6933 goto execve_efault;
6934 /* Although execve() is not an interruptible syscall it is
6935 * a special case where we must use the safe_syscall wrapper:
6936 * if we allow a signal to happen before we make the host
6937 * syscall then we will 'lose' it, because at the point of
6938 * execve the process leaves QEMU's control. So we use the
6939 * safe syscall wrapper to ensure that we either take the
6940 * signal as a guest signal, or else it does not happen
6941 * before the execve completes and makes it the other
6942 * program's problem.
6944 ret = get_errno(safe_execve(p, argp, envp));
6945 unlock_user(p, arg1, 0);
6947 goto execve_end;
6949 execve_efault:
6950 ret = -TARGET_EFAULT;
6952 execve_end:
6953 for (gp = guest_argp, q = argp; *q;
6954 gp += sizeof(abi_ulong), q++) {
6955 if (get_user_ual(addr, gp)
6956 || !addr)
6957 break;
6958 unlock_user(*q, addr, 0);
6960 for (gp = guest_envp, q = envp; *q;
6961 gp += sizeof(abi_ulong), q++) {
6962 if (get_user_ual(addr, gp)
6963 || !addr)
6964 break;
6965 unlock_user(*q, addr, 0);
6968 break;
6969 case TARGET_NR_chdir:
6970 if (!(p = lock_user_string(arg1)))
6971 goto efault;
6972 ret = get_errno(chdir(p));
6973 unlock_user(p, arg1, 0);
6974 break;
6975 #ifdef TARGET_NR_time
6976 case TARGET_NR_time:
6978 time_t host_time;
6979 ret = get_errno(time(&host_time));
6980 if (!is_error(ret)
6981 && arg1
6982 && put_user_sal(host_time, arg1))
6983 goto efault;
6985 break;
6986 #endif
6987 #ifdef TARGET_NR_mknod
6988 case TARGET_NR_mknod:
6989 if (!(p = lock_user_string(arg1)))
6990 goto efault;
6991 ret = get_errno(mknod(p, arg2, arg3));
6992 unlock_user(p, arg1, 0);
6993 break;
6994 #endif
6995 #if defined(TARGET_NR_mknodat)
6996 case TARGET_NR_mknodat:
6997 if (!(p = lock_user_string(arg2)))
6998 goto efault;
6999 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7000 unlock_user(p, arg2, 0);
7001 break;
7002 #endif
7003 #ifdef TARGET_NR_chmod
7004 case TARGET_NR_chmod:
7005 if (!(p = lock_user_string(arg1)))
7006 goto efault;
7007 ret = get_errno(chmod(p, arg2));
7008 unlock_user(p, arg1, 0);
7009 break;
7010 #endif
7011 #ifdef TARGET_NR_break
7012 case TARGET_NR_break:
7013 goto unimplemented;
7014 #endif
7015 #ifdef TARGET_NR_oldstat
7016 case TARGET_NR_oldstat:
7017 goto unimplemented;
7018 #endif
7019 case TARGET_NR_lseek:
7020 ret = get_errno(lseek(arg1, arg2, arg3));
7021 break;
7022 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7023 /* Alpha specific */
7024 case TARGET_NR_getxpid:
7025 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7026 ret = get_errno(getpid());
7027 break;
7028 #endif
7029 #ifdef TARGET_NR_getpid
7030 case TARGET_NR_getpid:
7031 ret = get_errno(getpid());
7032 break;
7033 #endif
7034 case TARGET_NR_mount:
7036 /* need to look at the data field */
7037 void *p2, *p3;
7039 if (arg1) {
7040 p = lock_user_string(arg1);
7041 if (!p) {
7042 goto efault;
7044 } else {
7045 p = NULL;
7048 p2 = lock_user_string(arg2);
7049 if (!p2) {
7050 if (arg1) {
7051 unlock_user(p, arg1, 0);
7053 goto efault;
7056 if (arg3) {
7057 p3 = lock_user_string(arg3);
7058 if (!p3) {
7059 if (arg1) {
7060 unlock_user(p, arg1, 0);
7062 unlock_user(p2, arg2, 0);
7063 goto efault;
7065 } else {
7066 p3 = NULL;
7069 /* FIXME - arg5 should be locked, but it isn't clear how to
7070 * do that since it's not guaranteed to be a NULL-terminated
7071 * string.
7073 if (!arg5) {
7074 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7075 } else {
7076 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7078 ret = get_errno(ret);
7080 if (arg1) {
7081 unlock_user(p, arg1, 0);
7083 unlock_user(p2, arg2, 0);
7084 if (arg3) {
7085 unlock_user(p3, arg3, 0);
7088 break;
7089 #ifdef TARGET_NR_umount
7090 case TARGET_NR_umount:
7091 if (!(p = lock_user_string(arg1)))
7092 goto efault;
7093 ret = get_errno(umount(p));
7094 unlock_user(p, arg1, 0);
7095 break;
7096 #endif
7097 #ifdef TARGET_NR_stime /* not on alpha */
7098 case TARGET_NR_stime:
7100 time_t host_time;
7101 if (get_user_sal(host_time, arg1))
7102 goto efault;
7103 ret = get_errno(stime(&host_time));
7105 break;
7106 #endif
7107 case TARGET_NR_ptrace:
7108 goto unimplemented;
7109 #ifdef TARGET_NR_alarm /* not on alpha */
7110 case TARGET_NR_alarm:
7111 ret = alarm(arg1);
7112 break;
7113 #endif
7114 #ifdef TARGET_NR_oldfstat
7115 case TARGET_NR_oldfstat:
7116 goto unimplemented;
7117 #endif
7118 #ifdef TARGET_NR_pause /* not on alpha */
7119 case TARGET_NR_pause:
7120 if (!block_signals()) {
7121 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7123 ret = -TARGET_EINTR;
7124 break;
7125 #endif
7126 #ifdef TARGET_NR_utime
7127 case TARGET_NR_utime:
7129 struct utimbuf tbuf, *host_tbuf;
7130 struct target_utimbuf *target_tbuf;
7131 if (arg2) {
7132 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7133 goto efault;
7134 tbuf.actime = tswapal(target_tbuf->actime);
7135 tbuf.modtime = tswapal(target_tbuf->modtime);
7136 unlock_user_struct(target_tbuf, arg2, 0);
7137 host_tbuf = &tbuf;
7138 } else {
7139 host_tbuf = NULL;
7141 if (!(p = lock_user_string(arg1)))
7142 goto efault;
7143 ret = get_errno(utime(p, host_tbuf));
7144 unlock_user(p, arg1, 0);
7146 break;
7147 #endif
7148 #ifdef TARGET_NR_utimes
7149 case TARGET_NR_utimes:
7151 struct timeval *tvp, tv[2];
7152 if (arg2) {
7153 if (copy_from_user_timeval(&tv[0], arg2)
7154 || copy_from_user_timeval(&tv[1],
7155 arg2 + sizeof(struct target_timeval)))
7156 goto efault;
7157 tvp = tv;
7158 } else {
7159 tvp = NULL;
7161 if (!(p = lock_user_string(arg1)))
7162 goto efault;
7163 ret = get_errno(utimes(p, tvp));
7164 unlock_user(p, arg1, 0);
7166 break;
7167 #endif
7168 #if defined(TARGET_NR_futimesat)
7169 case TARGET_NR_futimesat:
7171 struct timeval *tvp, tv[2];
7172 if (arg3) {
7173 if (copy_from_user_timeval(&tv[0], arg3)
7174 || copy_from_user_timeval(&tv[1],
7175 arg3 + sizeof(struct target_timeval)))
7176 goto efault;
7177 tvp = tv;
7178 } else {
7179 tvp = NULL;
7181 if (!(p = lock_user_string(arg2)))
7182 goto efault;
7183 ret = get_errno(futimesat(arg1, path(p), tvp));
7184 unlock_user(p, arg2, 0);
7186 break;
7187 #endif
7188 #ifdef TARGET_NR_stty
7189 case TARGET_NR_stty:
7190 goto unimplemented;
7191 #endif
7192 #ifdef TARGET_NR_gtty
7193 case TARGET_NR_gtty:
7194 goto unimplemented;
7195 #endif
7196 #ifdef TARGET_NR_access
7197 case TARGET_NR_access:
7198 if (!(p = lock_user_string(arg1)))
7199 goto efault;
7200 ret = get_errno(access(path(p), arg2));
7201 unlock_user(p, arg1, 0);
7202 break;
7203 #endif
7204 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7205 case TARGET_NR_faccessat:
7206 if (!(p = lock_user_string(arg2)))
7207 goto efault;
7208 ret = get_errno(faccessat(arg1, p, arg3, 0));
7209 unlock_user(p, arg2, 0);
7210 break;
7211 #endif
7212 #ifdef TARGET_NR_nice /* not on alpha */
7213 case TARGET_NR_nice:
7214 ret = get_errno(nice(arg1));
7215 break;
7216 #endif
7217 #ifdef TARGET_NR_ftime
7218 case TARGET_NR_ftime:
7219 goto unimplemented;
7220 #endif
7221 case TARGET_NR_sync:
7222 sync();
7223 ret = 0;
7224 break;
7225 case TARGET_NR_kill:
7226 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7227 break;
7228 #ifdef TARGET_NR_rename
7229 case TARGET_NR_rename:
7231 void *p2;
7232 p = lock_user_string(arg1);
7233 p2 = lock_user_string(arg2);
7234 if (!p || !p2)
7235 ret = -TARGET_EFAULT;
7236 else
7237 ret = get_errno(rename(p, p2));
7238 unlock_user(p2, arg2, 0);
7239 unlock_user(p, arg1, 0);
7241 break;
7242 #endif
7243 #if defined(TARGET_NR_renameat)
7244 case TARGET_NR_renameat:
7246 void *p2;
7247 p = lock_user_string(arg2);
7248 p2 = lock_user_string(arg4);
7249 if (!p || !p2)
7250 ret = -TARGET_EFAULT;
7251 else
7252 ret = get_errno(renameat(arg1, p, arg3, p2));
7253 unlock_user(p2, arg4, 0);
7254 unlock_user(p, arg2, 0);
7256 break;
7257 #endif
7258 #ifdef TARGET_NR_mkdir
7259 case TARGET_NR_mkdir:
7260 if (!(p = lock_user_string(arg1)))
7261 goto efault;
7262 ret = get_errno(mkdir(p, arg2));
7263 unlock_user(p, arg1, 0);
7264 break;
7265 #endif
7266 #if defined(TARGET_NR_mkdirat)
7267 case TARGET_NR_mkdirat:
7268 if (!(p = lock_user_string(arg2)))
7269 goto efault;
7270 ret = get_errno(mkdirat(arg1, p, arg3));
7271 unlock_user(p, arg2, 0);
7272 break;
7273 #endif
7274 #ifdef TARGET_NR_rmdir
7275 case TARGET_NR_rmdir:
7276 if (!(p = lock_user_string(arg1)))
7277 goto efault;
7278 ret = get_errno(rmdir(p));
7279 unlock_user(p, arg1, 0);
7280 break;
7281 #endif
7282 case TARGET_NR_dup:
7283 ret = get_errno(dup(arg1));
7284 if (ret >= 0) {
7285 fd_trans_dup(arg1, ret);
7287 break;
7288 #ifdef TARGET_NR_pipe
7289 case TARGET_NR_pipe:
7290 ret = do_pipe(cpu_env, arg1, 0, 0);
7291 break;
7292 #endif
7293 #ifdef TARGET_NR_pipe2
7294 case TARGET_NR_pipe2:
7295 ret = do_pipe(cpu_env, arg1,
7296 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7297 break;
7298 #endif
7299 case TARGET_NR_times:
7301 struct target_tms *tmsp;
7302 struct tms tms;
7303 ret = get_errno(times(&tms));
7304 if (arg1) {
7305 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7306 if (!tmsp)
7307 goto efault;
7308 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7309 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7310 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7311 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7313 if (!is_error(ret))
7314 ret = host_to_target_clock_t(ret);
7316 break;
7317 #ifdef TARGET_NR_prof
7318 case TARGET_NR_prof:
7319 goto unimplemented;
7320 #endif
7321 #ifdef TARGET_NR_signal
7322 case TARGET_NR_signal:
7323 goto unimplemented;
7324 #endif
7325 case TARGET_NR_acct:
7326 if (arg1 == 0) {
7327 ret = get_errno(acct(NULL));
7328 } else {
7329 if (!(p = lock_user_string(arg1)))
7330 goto efault;
7331 ret = get_errno(acct(path(p)));
7332 unlock_user(p, arg1, 0);
7334 break;
7335 #ifdef TARGET_NR_umount2
7336 case TARGET_NR_umount2:
7337 if (!(p = lock_user_string(arg1)))
7338 goto efault;
7339 ret = get_errno(umount2(p, arg2));
7340 unlock_user(p, arg1, 0);
7341 break;
7342 #endif
7343 #ifdef TARGET_NR_lock
7344 case TARGET_NR_lock:
7345 goto unimplemented;
7346 #endif
7347 case TARGET_NR_ioctl:
7348 ret = do_ioctl(arg1, arg2, arg3);
7349 break;
7350 case TARGET_NR_fcntl:
7351 ret = do_fcntl(arg1, arg2, arg3);
7352 break;
7353 #ifdef TARGET_NR_mpx
7354 case TARGET_NR_mpx:
7355 goto unimplemented;
7356 #endif
7357 case TARGET_NR_setpgid:
7358 ret = get_errno(setpgid(arg1, arg2));
7359 break;
7360 #ifdef TARGET_NR_ulimit
7361 case TARGET_NR_ulimit:
7362 goto unimplemented;
7363 #endif
7364 #ifdef TARGET_NR_oldolduname
7365 case TARGET_NR_oldolduname:
7366 goto unimplemented;
7367 #endif
7368 case TARGET_NR_umask:
7369 ret = get_errno(umask(arg1));
7370 break;
7371 case TARGET_NR_chroot:
7372 if (!(p = lock_user_string(arg1)))
7373 goto efault;
7374 ret = get_errno(chroot(p));
7375 unlock_user(p, arg1, 0);
7376 break;
7377 #ifdef TARGET_NR_ustat
7378 case TARGET_NR_ustat:
7379 goto unimplemented;
7380 #endif
7381 #ifdef TARGET_NR_dup2
7382 case TARGET_NR_dup2:
7383 ret = get_errno(dup2(arg1, arg2));
7384 if (ret >= 0) {
7385 fd_trans_dup(arg1, arg2);
7387 break;
7388 #endif
7389 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7390 case TARGET_NR_dup3:
7391 ret = get_errno(dup3(arg1, arg2, arg3));
7392 if (ret >= 0) {
7393 fd_trans_dup(arg1, arg2);
7395 break;
7396 #endif
7397 #ifdef TARGET_NR_getppid /* not on alpha */
7398 case TARGET_NR_getppid:
7399 ret = get_errno(getppid());
7400 break;
7401 #endif
7402 #ifdef TARGET_NR_getpgrp
7403 case TARGET_NR_getpgrp:
7404 ret = get_errno(getpgrp());
7405 break;
7406 #endif
7407 case TARGET_NR_setsid:
7408 ret = get_errno(setsid());
7409 break;
7410 #ifdef TARGET_NR_sigaction
7411 case TARGET_NR_sigaction:
7413 #if defined(TARGET_ALPHA)
7414 struct target_sigaction act, oact, *pact = 0;
7415 struct target_old_sigaction *old_act;
7416 if (arg2) {
7417 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7418 goto efault;
7419 act._sa_handler = old_act->_sa_handler;
7420 target_siginitset(&act.sa_mask, old_act->sa_mask);
7421 act.sa_flags = old_act->sa_flags;
7422 act.sa_restorer = 0;
7423 unlock_user_struct(old_act, arg2, 0);
7424 pact = &act;
7426 ret = get_errno(do_sigaction(arg1, pact, &oact));
7427 if (!is_error(ret) && arg3) {
7428 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7429 goto efault;
7430 old_act->_sa_handler = oact._sa_handler;
7431 old_act->sa_mask = oact.sa_mask.sig[0];
7432 old_act->sa_flags = oact.sa_flags;
7433 unlock_user_struct(old_act, arg3, 1);
7435 #elif defined(TARGET_MIPS)
7436 struct target_sigaction act, oact, *pact, *old_act;
7438 if (arg2) {
7439 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7440 goto efault;
7441 act._sa_handler = old_act->_sa_handler;
7442 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7443 act.sa_flags = old_act->sa_flags;
7444 unlock_user_struct(old_act, arg2, 0);
7445 pact = &act;
7446 } else {
7447 pact = NULL;
7450 ret = get_errno(do_sigaction(arg1, pact, &oact));
7452 if (!is_error(ret) && arg3) {
7453 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7454 goto efault;
7455 old_act->_sa_handler = oact._sa_handler;
7456 old_act->sa_flags = oact.sa_flags;
7457 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7458 old_act->sa_mask.sig[1] = 0;
7459 old_act->sa_mask.sig[2] = 0;
7460 old_act->sa_mask.sig[3] = 0;
7461 unlock_user_struct(old_act, arg3, 1);
7463 #else
7464 struct target_old_sigaction *old_act;
7465 struct target_sigaction act, oact, *pact;
7466 if (arg2) {
7467 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7468 goto efault;
7469 act._sa_handler = old_act->_sa_handler;
7470 target_siginitset(&act.sa_mask, old_act->sa_mask);
7471 act.sa_flags = old_act->sa_flags;
7472 act.sa_restorer = old_act->sa_restorer;
7473 unlock_user_struct(old_act, arg2, 0);
7474 pact = &act;
7475 } else {
7476 pact = NULL;
7478 ret = get_errno(do_sigaction(arg1, pact, &oact));
7479 if (!is_error(ret) && arg3) {
7480 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7481 goto efault;
7482 old_act->_sa_handler = oact._sa_handler;
7483 old_act->sa_mask = oact.sa_mask.sig[0];
7484 old_act->sa_flags = oact.sa_flags;
7485 old_act->sa_restorer = oact.sa_restorer;
7486 unlock_user_struct(old_act, arg3, 1);
7488 #endif
7490 break;
7491 #endif
7492 case TARGET_NR_rt_sigaction:
7494 #if defined(TARGET_ALPHA)
7495 struct target_sigaction act, oact, *pact = 0;
7496 struct target_rt_sigaction *rt_act;
7497 /* ??? arg4 == sizeof(sigset_t). */
7498 if (arg2) {
7499 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7500 goto efault;
7501 act._sa_handler = rt_act->_sa_handler;
7502 act.sa_mask = rt_act->sa_mask;
7503 act.sa_flags = rt_act->sa_flags;
7504 act.sa_restorer = arg5;
7505 unlock_user_struct(rt_act, arg2, 0);
7506 pact = &act;
7508 ret = get_errno(do_sigaction(arg1, pact, &oact));
7509 if (!is_error(ret) && arg3) {
7510 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7511 goto efault;
7512 rt_act->_sa_handler = oact._sa_handler;
7513 rt_act->sa_mask = oact.sa_mask;
7514 rt_act->sa_flags = oact.sa_flags;
7515 unlock_user_struct(rt_act, arg3, 1);
7517 #else
7518 struct target_sigaction *act;
7519 struct target_sigaction *oact;
7521 if (arg2) {
7522 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7523 goto efault;
7524 } else
7525 act = NULL;
7526 if (arg3) {
7527 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7528 ret = -TARGET_EFAULT;
7529 goto rt_sigaction_fail;
7531 } else
7532 oact = NULL;
7533 ret = get_errno(do_sigaction(arg1, act, oact));
7534 rt_sigaction_fail:
7535 if (act)
7536 unlock_user_struct(act, arg2, 0);
7537 if (oact)
7538 unlock_user_struct(oact, arg3, 1);
7539 #endif
7541 break;
7542 #ifdef TARGET_NR_sgetmask /* not on alpha */
7543 case TARGET_NR_sgetmask:
7545 sigset_t cur_set;
7546 abi_ulong target_set;
7547 ret = do_sigprocmask(0, NULL, &cur_set);
7548 if (!ret) {
7549 host_to_target_old_sigset(&target_set, &cur_set);
7550 ret = target_set;
7553 break;
7554 #endif
7555 #ifdef TARGET_NR_ssetmask /* not on alpha */
7556 case TARGET_NR_ssetmask:
7558 sigset_t set, oset, cur_set;
7559 abi_ulong target_set = arg1;
7560 /* We only have one word of the new mask so we must read
7561 * the rest of it with do_sigprocmask() and OR in this word.
7562 * We are guaranteed that a do_sigprocmask() that only queries
7563 * the signal mask will not fail.
7565 ret = do_sigprocmask(0, NULL, &cur_set);
7566 assert(!ret);
7567 target_to_host_old_sigset(&set, &target_set);
7568 sigorset(&set, &set, &cur_set);
7569 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7570 if (!ret) {
7571 host_to_target_old_sigset(&target_set, &oset);
7572 ret = target_set;
7575 break;
7576 #endif
7577 #ifdef TARGET_NR_sigprocmask
7578 case TARGET_NR_sigprocmask:
7580 #if defined(TARGET_ALPHA)
7581 sigset_t set, oldset;
7582 abi_ulong mask;
7583 int how;
7585 switch (arg1) {
7586 case TARGET_SIG_BLOCK:
7587 how = SIG_BLOCK;
7588 break;
7589 case TARGET_SIG_UNBLOCK:
7590 how = SIG_UNBLOCK;
7591 break;
7592 case TARGET_SIG_SETMASK:
7593 how = SIG_SETMASK;
7594 break;
7595 default:
7596 ret = -TARGET_EINVAL;
7597 goto fail;
7599 mask = arg2;
7600 target_to_host_old_sigset(&set, &mask);
7602 ret = do_sigprocmask(how, &set, &oldset);
7603 if (!is_error(ret)) {
7604 host_to_target_old_sigset(&mask, &oldset);
7605 ret = mask;
7606 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7608 #else
7609 sigset_t set, oldset, *set_ptr;
7610 int how;
7612 if (arg2) {
7613 switch (arg1) {
7614 case TARGET_SIG_BLOCK:
7615 how = SIG_BLOCK;
7616 break;
7617 case TARGET_SIG_UNBLOCK:
7618 how = SIG_UNBLOCK;
7619 break;
7620 case TARGET_SIG_SETMASK:
7621 how = SIG_SETMASK;
7622 break;
7623 default:
7624 ret = -TARGET_EINVAL;
7625 goto fail;
7627 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7628 goto efault;
7629 target_to_host_old_sigset(&set, p);
7630 unlock_user(p, arg2, 0);
7631 set_ptr = &set;
7632 } else {
7633 how = 0;
7634 set_ptr = NULL;
7636 ret = do_sigprocmask(how, set_ptr, &oldset);
7637 if (!is_error(ret) && arg3) {
7638 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7639 goto efault;
7640 host_to_target_old_sigset(p, &oldset);
7641 unlock_user(p, arg3, sizeof(target_sigset_t));
7643 #endif
7645 break;
7646 #endif
7647 case TARGET_NR_rt_sigprocmask:
7649 int how = arg1;
7650 sigset_t set, oldset, *set_ptr;
7652 if (arg2) {
7653 switch(how) {
7654 case TARGET_SIG_BLOCK:
7655 how = SIG_BLOCK;
7656 break;
7657 case TARGET_SIG_UNBLOCK:
7658 how = SIG_UNBLOCK;
7659 break;
7660 case TARGET_SIG_SETMASK:
7661 how = SIG_SETMASK;
7662 break;
7663 default:
7664 ret = -TARGET_EINVAL;
7665 goto fail;
7667 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7668 goto efault;
7669 target_to_host_sigset(&set, p);
7670 unlock_user(p, arg2, 0);
7671 set_ptr = &set;
7672 } else {
7673 how = 0;
7674 set_ptr = NULL;
7676 ret = do_sigprocmask(how, set_ptr, &oldset);
7677 if (!is_error(ret) && arg3) {
7678 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7679 goto efault;
7680 host_to_target_sigset(p, &oldset);
7681 unlock_user(p, arg3, sizeof(target_sigset_t));
7684 break;
7685 #ifdef TARGET_NR_sigpending
7686 case TARGET_NR_sigpending:
7688 sigset_t set;
7689 ret = get_errno(sigpending(&set));
7690 if (!is_error(ret)) {
7691 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7692 goto efault;
7693 host_to_target_old_sigset(p, &set);
7694 unlock_user(p, arg1, sizeof(target_sigset_t));
7697 break;
7698 #endif
7699 case TARGET_NR_rt_sigpending:
7701 sigset_t set;
7702 ret = get_errno(sigpending(&set));
7703 if (!is_error(ret)) {
7704 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7705 goto efault;
7706 host_to_target_sigset(p, &set);
7707 unlock_user(p, arg1, sizeof(target_sigset_t));
7710 break;
7711 #ifdef TARGET_NR_sigsuspend
7712 case TARGET_NR_sigsuspend:
7714 TaskState *ts = cpu->opaque;
7715 #if defined(TARGET_ALPHA)
7716 abi_ulong mask = arg1;
7717 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7718 #else
7719 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7720 goto efault;
7721 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7722 unlock_user(p, arg1, 0);
7723 #endif
7724 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7725 SIGSET_T_SIZE));
7726 if (ret != -TARGET_ERESTARTSYS) {
7727 ts->in_sigsuspend = 1;
7730 break;
7731 #endif
7732 case TARGET_NR_rt_sigsuspend:
7734 TaskState *ts = cpu->opaque;
7735 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7736 goto efault;
7737 target_to_host_sigset(&ts->sigsuspend_mask, p);
7738 unlock_user(p, arg1, 0);
7739 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7740 SIGSET_T_SIZE));
7741 if (ret != -TARGET_ERESTARTSYS) {
7742 ts->in_sigsuspend = 1;
7745 break;
7746 case TARGET_NR_rt_sigtimedwait:
7748 sigset_t set;
7749 struct timespec uts, *puts;
7750 siginfo_t uinfo;
7752 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7753 goto efault;
7754 target_to_host_sigset(&set, p);
7755 unlock_user(p, arg1, 0);
7756 if (arg3) {
7757 puts = &uts;
7758 target_to_host_timespec(puts, arg3);
7759 } else {
7760 puts = NULL;
7762 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7763 SIGSET_T_SIZE));
7764 if (!is_error(ret)) {
7765 if (arg2) {
7766 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7768 if (!p) {
7769 goto efault;
7771 host_to_target_siginfo(p, &uinfo);
7772 unlock_user(p, arg2, sizeof(target_siginfo_t));
7774 ret = host_to_target_signal(ret);
7777 break;
7778 case TARGET_NR_rt_sigqueueinfo:
7780 siginfo_t uinfo;
7781 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7782 goto efault;
7783 target_to_host_siginfo(&uinfo, p);
7784 unlock_user(p, arg1, 0);
7785 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7787 break;
7788 #ifdef TARGET_NR_sigreturn
7789 case TARGET_NR_sigreturn:
7790 if (block_signals()) {
7791 ret = -TARGET_ERESTARTSYS;
7792 } else {
7793 ret = do_sigreturn(cpu_env);
7795 break;
7796 #endif
7797 case TARGET_NR_rt_sigreturn:
7798 if (block_signals()) {
7799 ret = -TARGET_ERESTARTSYS;
7800 } else {
7801 ret = do_rt_sigreturn(cpu_env);
7803 break;
7804 case TARGET_NR_sethostname:
7805 if (!(p = lock_user_string(arg1)))
7806 goto efault;
7807 ret = get_errno(sethostname(p, arg2));
7808 unlock_user(p, arg1, 0);
7809 break;
7810 case TARGET_NR_setrlimit:
7812 int resource = target_to_host_resource(arg1);
7813 struct target_rlimit *target_rlim;
7814 struct rlimit rlim;
7815 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7816 goto efault;
7817 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7818 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7819 unlock_user_struct(target_rlim, arg2, 0);
7820 ret = get_errno(setrlimit(resource, &rlim));
7822 break;
7823 case TARGET_NR_getrlimit:
7825 int resource = target_to_host_resource(arg1);
7826 struct target_rlimit *target_rlim;
7827 struct rlimit rlim;
7829 ret = get_errno(getrlimit(resource, &rlim));
7830 if (!is_error(ret)) {
7831 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7832 goto efault;
7833 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7834 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7835 unlock_user_struct(target_rlim, arg2, 1);
7838 break;
7839 case TARGET_NR_getrusage:
7841 struct rusage rusage;
7842 ret = get_errno(getrusage(arg1, &rusage));
7843 if (!is_error(ret)) {
7844 ret = host_to_target_rusage(arg2, &rusage);
7847 break;
7848 case TARGET_NR_gettimeofday:
7850 struct timeval tv;
7851 ret = get_errno(gettimeofday(&tv, NULL));
7852 if (!is_error(ret)) {
7853 if (copy_to_user_timeval(arg1, &tv))
7854 goto efault;
7857 break;
7858 case TARGET_NR_settimeofday:
7860 struct timeval tv, *ptv = NULL;
7861 struct timezone tz, *ptz = NULL;
7863 if (arg1) {
7864 if (copy_from_user_timeval(&tv, arg1)) {
7865 goto efault;
7867 ptv = &tv;
7870 if (arg2) {
7871 if (copy_from_user_timezone(&tz, arg2)) {
7872 goto efault;
7874 ptz = &tz;
7877 ret = get_errno(settimeofday(ptv, ptz));
7879 break;
7880 #if defined(TARGET_NR_select)
7881 case TARGET_NR_select:
7882 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7883 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7884 #else
7886 struct target_sel_arg_struct *sel;
7887 abi_ulong inp, outp, exp, tvp;
7888 long nsel;
7890 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7891 goto efault;
7892 nsel = tswapal(sel->n);
7893 inp = tswapal(sel->inp);
7894 outp = tswapal(sel->outp);
7895 exp = tswapal(sel->exp);
7896 tvp = tswapal(sel->tvp);
7897 unlock_user_struct(sel, arg1, 0);
7898 ret = do_select(nsel, inp, outp, exp, tvp);
7900 #endif
7901 break;
7902 #endif
7903 #ifdef TARGET_NR_pselect6
7904 case TARGET_NR_pselect6:
7906 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7907 fd_set rfds, wfds, efds;
7908 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7909 struct timespec ts, *ts_ptr;
7912 * The 6th arg is actually two args smashed together,
7913 * so we cannot use the C library.
7915 sigset_t set;
7916 struct {
7917 sigset_t *set;
7918 size_t size;
7919 } sig, *sig_ptr;
7921 abi_ulong arg_sigset, arg_sigsize, *arg7;
7922 target_sigset_t *target_sigset;
7924 n = arg1;
7925 rfd_addr = arg2;
7926 wfd_addr = arg3;
7927 efd_addr = arg4;
7928 ts_addr = arg5;
7930 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7931 if (ret) {
7932 goto fail;
7934 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7935 if (ret) {
7936 goto fail;
7938 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7939 if (ret) {
7940 goto fail;
7944 * This takes a timespec, and not a timeval, so we cannot
7945 * use the do_select() helper ...
7947 if (ts_addr) {
7948 if (target_to_host_timespec(&ts, ts_addr)) {
7949 goto efault;
7951 ts_ptr = &ts;
7952 } else {
7953 ts_ptr = NULL;
7956 /* Extract the two packed args for the sigset */
7957 if (arg6) {
7958 sig_ptr = &sig;
7959 sig.size = SIGSET_T_SIZE;
7961 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7962 if (!arg7) {
7963 goto efault;
7965 arg_sigset = tswapal(arg7[0]);
7966 arg_sigsize = tswapal(arg7[1]);
7967 unlock_user(arg7, arg6, 0);
7969 if (arg_sigset) {
7970 sig.set = &set;
7971 if (arg_sigsize != sizeof(*target_sigset)) {
7972 /* Like the kernel, we enforce correct size sigsets */
7973 ret = -TARGET_EINVAL;
7974 goto fail;
7976 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7977 sizeof(*target_sigset), 1);
7978 if (!target_sigset) {
7979 goto efault;
7981 target_to_host_sigset(&set, target_sigset);
7982 unlock_user(target_sigset, arg_sigset, 0);
7983 } else {
7984 sig.set = NULL;
7986 } else {
7987 sig_ptr = NULL;
7990 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7991 ts_ptr, sig_ptr));
7993 if (!is_error(ret)) {
7994 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7995 goto efault;
7996 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7997 goto efault;
7998 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7999 goto efault;
8001 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8002 goto efault;
8005 break;
8006 #endif
8007 #ifdef TARGET_NR_symlink
8008 case TARGET_NR_symlink:
8010 void *p2;
8011 p = lock_user_string(arg1);
8012 p2 = lock_user_string(arg2);
8013 if (!p || !p2)
8014 ret = -TARGET_EFAULT;
8015 else
8016 ret = get_errno(symlink(p, p2));
8017 unlock_user(p2, arg2, 0);
8018 unlock_user(p, arg1, 0);
8020 break;
8021 #endif
8022 #if defined(TARGET_NR_symlinkat)
8023 case TARGET_NR_symlinkat:
8025 void *p2;
8026 p = lock_user_string(arg1);
8027 p2 = lock_user_string(arg3);
8028 if (!p || !p2)
8029 ret = -TARGET_EFAULT;
8030 else
8031 ret = get_errno(symlinkat(p, arg2, p2));
8032 unlock_user(p2, arg3, 0);
8033 unlock_user(p, arg1, 0);
8035 break;
8036 #endif
8037 #ifdef TARGET_NR_oldlstat
8038 case TARGET_NR_oldlstat:
8039 goto unimplemented;
8040 #endif
8041 #ifdef TARGET_NR_readlink
8042 case TARGET_NR_readlink:
8044 void *p2;
8045 p = lock_user_string(arg1);
8046 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8047 if (!p || !p2) {
8048 ret = -TARGET_EFAULT;
8049 } else if (!arg3) {
8050 /* Short circuit this for the magic exe check. */
8051 ret = -TARGET_EINVAL;
8052 } else if (is_proc_myself((const char *)p, "exe")) {
8053 char real[PATH_MAX], *temp;
8054 temp = realpath(exec_path, real);
8055 /* Return value is # of bytes that we wrote to the buffer. */
8056 if (temp == NULL) {
8057 ret = get_errno(-1);
8058 } else {
8059 /* Don't worry about sign mismatch as earlier mapping
8060 * logic would have thrown a bad address error. */
8061 ret = MIN(strlen(real), arg3);
8062 /* We cannot NUL terminate the string. */
8063 memcpy(p2, real, ret);
8065 } else {
8066 ret = get_errno(readlink(path(p), p2, arg3));
8068 unlock_user(p2, arg2, ret);
8069 unlock_user(p, arg1, 0);
8071 break;
8072 #endif
8073 #if defined(TARGET_NR_readlinkat)
8074 case TARGET_NR_readlinkat:
8076 void *p2;
8077 p = lock_user_string(arg2);
8078 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8079 if (!p || !p2) {
8080 ret = -TARGET_EFAULT;
8081 } else if (is_proc_myself((const char *)p, "exe")) {
8082 char real[PATH_MAX], *temp;
8083 temp = realpath(exec_path, real);
8084 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8085 snprintf((char *)p2, arg4, "%s", real);
8086 } else {
8087 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8089 unlock_user(p2, arg3, ret);
8090 unlock_user(p, arg2, 0);
8092 break;
8093 #endif
8094 #ifdef TARGET_NR_uselib
8095 case TARGET_NR_uselib:
8096 goto unimplemented;
8097 #endif
8098 #ifdef TARGET_NR_swapon
8099 case TARGET_NR_swapon:
8100 if (!(p = lock_user_string(arg1)))
8101 goto efault;
8102 ret = get_errno(swapon(p, arg2));
8103 unlock_user(p, arg1, 0);
8104 break;
8105 #endif
8106 case TARGET_NR_reboot:
8107 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8108 /* arg4 must be ignored in all other cases */
8109 p = lock_user_string(arg4);
8110 if (!p) {
8111 goto efault;
8113 ret = get_errno(reboot(arg1, arg2, arg3, p));
8114 unlock_user(p, arg4, 0);
8115 } else {
8116 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8118 break;
8119 #ifdef TARGET_NR_readdir
8120 case TARGET_NR_readdir:
8121 goto unimplemented;
8122 #endif
8123 #ifdef TARGET_NR_mmap
8124 case TARGET_NR_mmap:
8125 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8126 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8127 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8128 || defined(TARGET_S390X)
8130 abi_ulong *v;
8131 abi_ulong v1, v2, v3, v4, v5, v6;
8132 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8133 goto efault;
8134 v1 = tswapal(v[0]);
8135 v2 = tswapal(v[1]);
8136 v3 = tswapal(v[2]);
8137 v4 = tswapal(v[3]);
8138 v5 = tswapal(v[4]);
8139 v6 = tswapal(v[5]);
8140 unlock_user(v, arg1, 0);
8141 ret = get_errno(target_mmap(v1, v2, v3,
8142 target_to_host_bitmask(v4, mmap_flags_tbl),
8143 v5, v6));
8145 #else
8146 ret = get_errno(target_mmap(arg1, arg2, arg3,
8147 target_to_host_bitmask(arg4, mmap_flags_tbl),
8148 arg5,
8149 arg6));
8150 #endif
8151 break;
8152 #endif
8153 #ifdef TARGET_NR_mmap2
8154 case TARGET_NR_mmap2:
8155 #ifndef MMAP_SHIFT
8156 #define MMAP_SHIFT 12
8157 #endif
8158 ret = get_errno(target_mmap(arg1, arg2, arg3,
8159 target_to_host_bitmask(arg4, mmap_flags_tbl),
8160 arg5,
8161 arg6 << MMAP_SHIFT));
8162 break;
8163 #endif
8164 case TARGET_NR_munmap:
8165 ret = get_errno(target_munmap(arg1, arg2));
8166 break;
8167 case TARGET_NR_mprotect:
8169 TaskState *ts = cpu->opaque;
8170 /* Special hack to detect libc making the stack executable. */
8171 if ((arg3 & PROT_GROWSDOWN)
8172 && arg1 >= ts->info->stack_limit
8173 && arg1 <= ts->info->start_stack) {
8174 arg3 &= ~PROT_GROWSDOWN;
8175 arg2 = arg2 + arg1 - ts->info->stack_limit;
8176 arg1 = ts->info->stack_limit;
8179 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8180 break;
8181 #ifdef TARGET_NR_mremap
8182 case TARGET_NR_mremap:
8183 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8184 break;
8185 #endif
8186 /* ??? msync/mlock/munlock are broken for softmmu. */
8187 #ifdef TARGET_NR_msync
8188 case TARGET_NR_msync:
8189 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8190 break;
8191 #endif
8192 #ifdef TARGET_NR_mlock
8193 case TARGET_NR_mlock:
8194 ret = get_errno(mlock(g2h(arg1), arg2));
8195 break;
8196 #endif
8197 #ifdef TARGET_NR_munlock
8198 case TARGET_NR_munlock:
8199 ret = get_errno(munlock(g2h(arg1), arg2));
8200 break;
8201 #endif
8202 #ifdef TARGET_NR_mlockall
8203 case TARGET_NR_mlockall:
8204 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8205 break;
8206 #endif
8207 #ifdef TARGET_NR_munlockall
8208 case TARGET_NR_munlockall:
8209 ret = get_errno(munlockall());
8210 break;
8211 #endif
8212 case TARGET_NR_truncate:
8213 if (!(p = lock_user_string(arg1)))
8214 goto efault;
8215 ret = get_errno(truncate(p, arg2));
8216 unlock_user(p, arg1, 0);
8217 break;
8218 case TARGET_NR_ftruncate:
8219 ret = get_errno(ftruncate(arg1, arg2));
8220 break;
8221 case TARGET_NR_fchmod:
8222 ret = get_errno(fchmod(arg1, arg2));
8223 break;
8224 #if defined(TARGET_NR_fchmodat)
8225 case TARGET_NR_fchmodat:
8226 if (!(p = lock_user_string(arg2)))
8227 goto efault;
8228 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8229 unlock_user(p, arg2, 0);
8230 break;
8231 #endif
8232 case TARGET_NR_getpriority:
8233 /* Note that negative values are valid for getpriority, so we must
8234 differentiate based on errno settings. */
8235 errno = 0;
8236 ret = getpriority(arg1, arg2);
8237 if (ret == -1 && errno != 0) {
8238 ret = -host_to_target_errno(errno);
8239 break;
8241 #ifdef TARGET_ALPHA
8242 /* Return value is the unbiased priority. Signal no error. */
8243 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8244 #else
8245 /* Return value is a biased priority to avoid negative numbers. */
8246 ret = 20 - ret;
8247 #endif
8248 break;
8249 case TARGET_NR_setpriority:
8250 ret = get_errno(setpriority(arg1, arg2, arg3));
8251 break;
8252 #ifdef TARGET_NR_profil
8253 case TARGET_NR_profil:
8254 goto unimplemented;
8255 #endif
8256 case TARGET_NR_statfs:
8257 if (!(p = lock_user_string(arg1)))
8258 goto efault;
8259 ret = get_errno(statfs(path(p), &stfs));
8260 unlock_user(p, arg1, 0);
8261 convert_statfs:
8262 if (!is_error(ret)) {
8263 struct target_statfs *target_stfs;
8265 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8266 goto efault;
8267 __put_user(stfs.f_type, &target_stfs->f_type);
8268 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8269 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8270 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8271 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8272 __put_user(stfs.f_files, &target_stfs->f_files);
8273 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8274 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8275 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8276 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8277 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8278 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8279 unlock_user_struct(target_stfs, arg2, 1);
8281 break;
8282 case TARGET_NR_fstatfs:
8283 ret = get_errno(fstatfs(arg1, &stfs));
8284 goto convert_statfs;
8285 #ifdef TARGET_NR_statfs64
8286 case TARGET_NR_statfs64:
8287 if (!(p = lock_user_string(arg1)))
8288 goto efault;
8289 ret = get_errno(statfs(path(p), &stfs));
8290 unlock_user(p, arg1, 0);
8291 convert_statfs64:
8292 if (!is_error(ret)) {
8293 struct target_statfs64 *target_stfs;
8295 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8296 goto efault;
8297 __put_user(stfs.f_type, &target_stfs->f_type);
8298 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8299 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8300 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8301 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8302 __put_user(stfs.f_files, &target_stfs->f_files);
8303 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8304 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8305 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8306 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8307 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8308 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8309 unlock_user_struct(target_stfs, arg3, 1);
8311 break;
8312 case TARGET_NR_fstatfs64:
8313 ret = get_errno(fstatfs(arg1, &stfs));
8314 goto convert_statfs64;
8315 #endif
8316 #ifdef TARGET_NR_ioperm
8317 case TARGET_NR_ioperm:
8318 goto unimplemented;
8319 #endif
8320 #ifdef TARGET_NR_socketcall
8321 case TARGET_NR_socketcall:
8322 ret = do_socketcall(arg1, arg2);
8323 break;
8324 #endif
8325 #ifdef TARGET_NR_accept
8326 case TARGET_NR_accept:
8327 ret = do_accept4(arg1, arg2, arg3, 0);
8328 break;
8329 #endif
8330 #ifdef TARGET_NR_accept4
8331 case TARGET_NR_accept4:
8332 #ifdef CONFIG_ACCEPT4
8333 ret = do_accept4(arg1, arg2, arg3, arg4);
8334 #else
8335 goto unimplemented;
8336 #endif
8337 break;
8338 #endif
8339 #ifdef TARGET_NR_bind
8340 case TARGET_NR_bind:
8341 ret = do_bind(arg1, arg2, arg3);
8342 break;
8343 #endif
8344 #ifdef TARGET_NR_connect
8345 case TARGET_NR_connect:
8346 ret = do_connect(arg1, arg2, arg3);
8347 break;
8348 #endif
8349 #ifdef TARGET_NR_getpeername
8350 case TARGET_NR_getpeername:
8351 ret = do_getpeername(arg1, arg2, arg3);
8352 break;
8353 #endif
8354 #ifdef TARGET_NR_getsockname
8355 case TARGET_NR_getsockname:
8356 ret = do_getsockname(arg1, arg2, arg3);
8357 break;
8358 #endif
8359 #ifdef TARGET_NR_getsockopt
8360 case TARGET_NR_getsockopt:
8361 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8362 break;
8363 #endif
8364 #ifdef TARGET_NR_listen
8365 case TARGET_NR_listen:
8366 ret = get_errno(listen(arg1, arg2));
8367 break;
8368 #endif
8369 #ifdef TARGET_NR_recv
8370 case TARGET_NR_recv:
8371 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8372 break;
8373 #endif
8374 #ifdef TARGET_NR_recvfrom
8375 case TARGET_NR_recvfrom:
8376 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8377 break;
8378 #endif
8379 #ifdef TARGET_NR_recvmsg
8380 case TARGET_NR_recvmsg:
8381 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8382 break;
8383 #endif
8384 #ifdef TARGET_NR_send
8385 case TARGET_NR_send:
8386 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8387 break;
8388 #endif
8389 #ifdef TARGET_NR_sendmsg
8390 case TARGET_NR_sendmsg:
8391 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8392 break;
8393 #endif
8394 #ifdef TARGET_NR_sendmmsg
8395 case TARGET_NR_sendmmsg:
8396 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8397 break;
8398 case TARGET_NR_recvmmsg:
8399 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8400 break;
8401 #endif
8402 #ifdef TARGET_NR_sendto
8403 case TARGET_NR_sendto:
8404 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8405 break;
8406 #endif
8407 #ifdef TARGET_NR_shutdown
8408 case TARGET_NR_shutdown:
8409 ret = get_errno(shutdown(arg1, arg2));
8410 break;
8411 #endif
8412 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8413 case TARGET_NR_getrandom:
8414 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8415 if (!p) {
8416 goto efault;
8418 ret = get_errno(getrandom(p, arg2, arg3));
8419 unlock_user(p, arg1, ret);
8420 break;
8421 #endif
8422 #ifdef TARGET_NR_socket
8423 case TARGET_NR_socket:
8424 ret = do_socket(arg1, arg2, arg3);
8425 fd_trans_unregister(ret);
8426 break;
8427 #endif
8428 #ifdef TARGET_NR_socketpair
8429 case TARGET_NR_socketpair:
8430 ret = do_socketpair(arg1, arg2, arg3, arg4);
8431 break;
8432 #endif
8433 #ifdef TARGET_NR_setsockopt
8434 case TARGET_NR_setsockopt:
8435 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8436 break;
8437 #endif
8439 case TARGET_NR_syslog:
8440 if (!(p = lock_user_string(arg2)))
8441 goto efault;
8442 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8443 unlock_user(p, arg2, 0);
8444 break;
8446 case TARGET_NR_setitimer:
8448 struct itimerval value, ovalue, *pvalue;
8450 if (arg2) {
8451 pvalue = &value;
8452 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8453 || copy_from_user_timeval(&pvalue->it_value,
8454 arg2 + sizeof(struct target_timeval)))
8455 goto efault;
8456 } else {
8457 pvalue = NULL;
8459 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8460 if (!is_error(ret) && arg3) {
8461 if (copy_to_user_timeval(arg3,
8462 &ovalue.it_interval)
8463 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8464 &ovalue.it_value))
8465 goto efault;
8468 break;
8469 case TARGET_NR_getitimer:
8471 struct itimerval value;
8473 ret = get_errno(getitimer(arg1, &value));
8474 if (!is_error(ret) && arg2) {
8475 if (copy_to_user_timeval(arg2,
8476 &value.it_interval)
8477 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8478 &value.it_value))
8479 goto efault;
8482 break;
8483 #ifdef TARGET_NR_stat
8484 case TARGET_NR_stat:
8485 if (!(p = lock_user_string(arg1)))
8486 goto efault;
8487 ret = get_errno(stat(path(p), &st));
8488 unlock_user(p, arg1, 0);
8489 goto do_stat;
8490 #endif
8491 #ifdef TARGET_NR_lstat
8492 case TARGET_NR_lstat:
8493 if (!(p = lock_user_string(arg1)))
8494 goto efault;
8495 ret = get_errno(lstat(path(p), &st));
8496 unlock_user(p, arg1, 0);
8497 goto do_stat;
8498 #endif
8499 case TARGET_NR_fstat:
8501 ret = get_errno(fstat(arg1, &st));
8502 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8503 do_stat:
8504 #endif
8505 if (!is_error(ret)) {
8506 struct target_stat *target_st;
8508 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8509 goto efault;
8510 memset(target_st, 0, sizeof(*target_st));
8511 __put_user(st.st_dev, &target_st->st_dev);
8512 __put_user(st.st_ino, &target_st->st_ino);
8513 __put_user(st.st_mode, &target_st->st_mode);
8514 __put_user(st.st_uid, &target_st->st_uid);
8515 __put_user(st.st_gid, &target_st->st_gid);
8516 __put_user(st.st_nlink, &target_st->st_nlink);
8517 __put_user(st.st_rdev, &target_st->st_rdev);
8518 __put_user(st.st_size, &target_st->st_size);
8519 __put_user(st.st_blksize, &target_st->st_blksize);
8520 __put_user(st.st_blocks, &target_st->st_blocks);
8521 __put_user(st.st_atime, &target_st->target_st_atime);
8522 __put_user(st.st_mtime, &target_st->target_st_mtime);
8523 __put_user(st.st_ctime, &target_st->target_st_ctime);
8524 unlock_user_struct(target_st, arg2, 1);
8527 break;
8528 #ifdef TARGET_NR_olduname
8529 case TARGET_NR_olduname:
8530 goto unimplemented;
8531 #endif
8532 #ifdef TARGET_NR_iopl
8533 case TARGET_NR_iopl:
8534 goto unimplemented;
8535 #endif
8536 case TARGET_NR_vhangup:
8537 ret = get_errno(vhangup());
8538 break;
8539 #ifdef TARGET_NR_idle
8540 case TARGET_NR_idle:
8541 goto unimplemented;
8542 #endif
8543 #ifdef TARGET_NR_syscall
8544 case TARGET_NR_syscall:
8545 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8546 arg6, arg7, arg8, 0);
8547 break;
8548 #endif
8549 case TARGET_NR_wait4:
8551 int status;
8552 abi_long status_ptr = arg2;
8553 struct rusage rusage, *rusage_ptr;
8554 abi_ulong target_rusage = arg4;
8555 abi_long rusage_err;
8556 if (target_rusage)
8557 rusage_ptr = &rusage;
8558 else
8559 rusage_ptr = NULL;
8560 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8561 if (!is_error(ret)) {
8562 if (status_ptr && ret) {
8563 status = host_to_target_waitstatus(status);
8564 if (put_user_s32(status, status_ptr))
8565 goto efault;
8567 if (target_rusage) {
8568 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8569 if (rusage_err) {
8570 ret = rusage_err;
8575 break;
8576 #ifdef TARGET_NR_swapoff
8577 case TARGET_NR_swapoff:
8578 if (!(p = lock_user_string(arg1)))
8579 goto efault;
8580 ret = get_errno(swapoff(p));
8581 unlock_user(p, arg1, 0);
8582 break;
8583 #endif
8584 case TARGET_NR_sysinfo:
8586 struct target_sysinfo *target_value;
8587 struct sysinfo value;
8588 ret = get_errno(sysinfo(&value));
8589 if (!is_error(ret) && arg1)
8591 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8592 goto efault;
8593 __put_user(value.uptime, &target_value->uptime);
8594 __put_user(value.loads[0], &target_value->loads[0]);
8595 __put_user(value.loads[1], &target_value->loads[1]);
8596 __put_user(value.loads[2], &target_value->loads[2]);
8597 __put_user(value.totalram, &target_value->totalram);
8598 __put_user(value.freeram, &target_value->freeram);
8599 __put_user(value.sharedram, &target_value->sharedram);
8600 __put_user(value.bufferram, &target_value->bufferram);
8601 __put_user(value.totalswap, &target_value->totalswap);
8602 __put_user(value.freeswap, &target_value->freeswap);
8603 __put_user(value.procs, &target_value->procs);
8604 __put_user(value.totalhigh, &target_value->totalhigh);
8605 __put_user(value.freehigh, &target_value->freehigh);
8606 __put_user(value.mem_unit, &target_value->mem_unit);
8607 unlock_user_struct(target_value, arg1, 1);
8610 break;
8611 #ifdef TARGET_NR_ipc
8612 case TARGET_NR_ipc:
8613 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8614 break;
8615 #endif
8616 #ifdef TARGET_NR_semget
8617 case TARGET_NR_semget:
8618 ret = get_errno(semget(arg1, arg2, arg3));
8619 break;
8620 #endif
8621 #ifdef TARGET_NR_semop
8622 case TARGET_NR_semop:
8623 ret = do_semop(arg1, arg2, arg3);
8624 break;
8625 #endif
8626 #ifdef TARGET_NR_semctl
8627 case TARGET_NR_semctl:
8628 ret = do_semctl(arg1, arg2, arg3, arg4);
8629 break;
8630 #endif
8631 #ifdef TARGET_NR_msgctl
8632 case TARGET_NR_msgctl:
8633 ret = do_msgctl(arg1, arg2, arg3);
8634 break;
8635 #endif
8636 #ifdef TARGET_NR_msgget
8637 case TARGET_NR_msgget:
8638 ret = get_errno(msgget(arg1, arg2));
8639 break;
8640 #endif
8641 #ifdef TARGET_NR_msgrcv
8642 case TARGET_NR_msgrcv:
8643 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8644 break;
8645 #endif
8646 #ifdef TARGET_NR_msgsnd
8647 case TARGET_NR_msgsnd:
8648 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8649 break;
8650 #endif
8651 #ifdef TARGET_NR_shmget
8652 case TARGET_NR_shmget:
8653 ret = get_errno(shmget(arg1, arg2, arg3));
8654 break;
8655 #endif
8656 #ifdef TARGET_NR_shmctl
8657 case TARGET_NR_shmctl:
8658 ret = do_shmctl(arg1, arg2, arg3);
8659 break;
8660 #endif
8661 #ifdef TARGET_NR_shmat
8662 case TARGET_NR_shmat:
8663 ret = do_shmat(arg1, arg2, arg3);
8664 break;
8665 #endif
8666 #ifdef TARGET_NR_shmdt
8667 case TARGET_NR_shmdt:
8668 ret = do_shmdt(arg1);
8669 break;
8670 #endif
8671 case TARGET_NR_fsync:
8672 ret = get_errno(fsync(arg1));
8673 break;
8674 case TARGET_NR_clone:
8675 /* Linux manages to have three different orderings for its
8676 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8677 * match the kernel's CONFIG_CLONE_* settings.
8678 * Microblaze is further special in that it uses a sixth
8679 * implicit argument to clone for the TLS pointer.
8681 #if defined(TARGET_MICROBLAZE)
8682 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8683 #elif defined(TARGET_CLONE_BACKWARDS)
8684 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8685 #elif defined(TARGET_CLONE_BACKWARDS2)
8686 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8687 #else
8688 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8689 #endif
8690 break;
8691 #ifdef __NR_exit_group
8692 /* new thread calls */
8693 case TARGET_NR_exit_group:
8694 #ifdef TARGET_GPROF
8695 _mcleanup();
8696 #endif
8697 gdb_exit(cpu_env, arg1);
8698 ret = get_errno(exit_group(arg1));
8699 break;
8700 #endif
8701 case TARGET_NR_setdomainname:
8702 if (!(p = lock_user_string(arg1)))
8703 goto efault;
8704 ret = get_errno(setdomainname(p, arg2));
8705 unlock_user(p, arg1, 0);
8706 break;
8707 case TARGET_NR_uname:
8708 /* no need to transcode because we use the linux syscall */
8710 struct new_utsname * buf;
8712 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8713 goto efault;
8714 ret = get_errno(sys_uname(buf));
8715 if (!is_error(ret)) {
8716 /* Overrite the native machine name with whatever is being
8717 emulated. */
8718 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8719 /* Allow the user to override the reported release. */
8720 if (qemu_uname_release && *qemu_uname_release)
8721 strcpy (buf->release, qemu_uname_release);
8723 unlock_user_struct(buf, arg1, 1);
8725 break;
8726 #ifdef TARGET_I386
8727 case TARGET_NR_modify_ldt:
8728 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8729 break;
8730 #if !defined(TARGET_X86_64)
8731 case TARGET_NR_vm86old:
8732 goto unimplemented;
8733 case TARGET_NR_vm86:
8734 ret = do_vm86(cpu_env, arg1, arg2);
8735 break;
8736 #endif
8737 #endif
8738 case TARGET_NR_adjtimex:
8739 goto unimplemented;
8740 #ifdef TARGET_NR_create_module
8741 case TARGET_NR_create_module:
8742 #endif
8743 case TARGET_NR_init_module:
8744 case TARGET_NR_delete_module:
8745 #ifdef TARGET_NR_get_kernel_syms
8746 case TARGET_NR_get_kernel_syms:
8747 #endif
8748 goto unimplemented;
8749 case TARGET_NR_quotactl:
8750 goto unimplemented;
8751 case TARGET_NR_getpgid:
8752 ret = get_errno(getpgid(arg1));
8753 break;
8754 case TARGET_NR_fchdir:
8755 ret = get_errno(fchdir(arg1));
8756 break;
8757 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8758 case TARGET_NR_bdflush:
8759 goto unimplemented;
8760 #endif
8761 #ifdef TARGET_NR_sysfs
8762 case TARGET_NR_sysfs:
8763 goto unimplemented;
8764 #endif
8765 case TARGET_NR_personality:
8766 ret = get_errno(personality(arg1));
8767 break;
8768 #ifdef TARGET_NR_afs_syscall
8769 case TARGET_NR_afs_syscall:
8770 goto unimplemented;
8771 #endif
8772 #ifdef TARGET_NR__llseek /* Not on alpha */
8773 case TARGET_NR__llseek:
8775 int64_t res;
8776 #if !defined(__NR_llseek)
8777 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8778 if (res == -1) {
8779 ret = get_errno(res);
8780 } else {
8781 ret = 0;
8783 #else
8784 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8785 #endif
8786 if ((ret == 0) && put_user_s64(res, arg4)) {
8787 goto efault;
8790 break;
8791 #endif
8792 #ifdef TARGET_NR_getdents
8793 case TARGET_NR_getdents:
8794 #ifdef __NR_getdents
8795 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8797 struct target_dirent *target_dirp;
8798 struct linux_dirent *dirp;
8799 abi_long count = arg3;
8801 dirp = g_try_malloc(count);
8802 if (!dirp) {
8803 ret = -TARGET_ENOMEM;
8804 goto fail;
8807 ret = get_errno(sys_getdents(arg1, dirp, count));
8808 if (!is_error(ret)) {
8809 struct linux_dirent *de;
8810 struct target_dirent *tde;
8811 int len = ret;
8812 int reclen, treclen;
8813 int count1, tnamelen;
8815 count1 = 0;
8816 de = dirp;
8817 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8818 goto efault;
8819 tde = target_dirp;
8820 while (len > 0) {
8821 reclen = de->d_reclen;
8822 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8823 assert(tnamelen >= 0);
8824 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8825 assert(count1 + treclen <= count);
8826 tde->d_reclen = tswap16(treclen);
8827 tde->d_ino = tswapal(de->d_ino);
8828 tde->d_off = tswapal(de->d_off);
8829 memcpy(tde->d_name, de->d_name, tnamelen);
8830 de = (struct linux_dirent *)((char *)de + reclen);
8831 len -= reclen;
8832 tde = (struct target_dirent *)((char *)tde + treclen);
8833 count1 += treclen;
8835 ret = count1;
8836 unlock_user(target_dirp, arg2, ret);
8838 g_free(dirp);
8840 #else
8842 struct linux_dirent *dirp;
8843 abi_long count = arg3;
8845 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8846 goto efault;
8847 ret = get_errno(sys_getdents(arg1, dirp, count));
8848 if (!is_error(ret)) {
8849 struct linux_dirent *de;
8850 int len = ret;
8851 int reclen;
8852 de = dirp;
8853 while (len > 0) {
8854 reclen = de->d_reclen;
8855 if (reclen > len)
8856 break;
8857 de->d_reclen = tswap16(reclen);
8858 tswapls(&de->d_ino);
8859 tswapls(&de->d_off);
8860 de = (struct linux_dirent *)((char *)de + reclen);
8861 len -= reclen;
8864 unlock_user(dirp, arg2, ret);
8866 #endif
8867 #else
8868 /* Implement getdents in terms of getdents64 */
8870 struct linux_dirent64 *dirp;
8871 abi_long count = arg3;
8873 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8874 if (!dirp) {
8875 goto efault;
8877 ret = get_errno(sys_getdents64(arg1, dirp, count));
8878 if (!is_error(ret)) {
8879 /* Convert the dirent64 structs to target dirent. We do this
8880 * in-place, since we can guarantee that a target_dirent is no
8881 * larger than a dirent64; however this means we have to be
8882 * careful to read everything before writing in the new format.
8884 struct linux_dirent64 *de;
8885 struct target_dirent *tde;
8886 int len = ret;
8887 int tlen = 0;
8889 de = dirp;
8890 tde = (struct target_dirent *)dirp;
8891 while (len > 0) {
8892 int namelen, treclen;
8893 int reclen = de->d_reclen;
8894 uint64_t ino = de->d_ino;
8895 int64_t off = de->d_off;
8896 uint8_t type = de->d_type;
8898 namelen = strlen(de->d_name);
8899 treclen = offsetof(struct target_dirent, d_name)
8900 + namelen + 2;
8901 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8903 memmove(tde->d_name, de->d_name, namelen + 1);
8904 tde->d_ino = tswapal(ino);
8905 tde->d_off = tswapal(off);
8906 tde->d_reclen = tswap16(treclen);
8907 /* The target_dirent type is in what was formerly a padding
8908 * byte at the end of the structure:
8910 *(((char *)tde) + treclen - 1) = type;
8912 de = (struct linux_dirent64 *)((char *)de + reclen);
8913 tde = (struct target_dirent *)((char *)tde + treclen);
8914 len -= reclen;
8915 tlen += treclen;
8917 ret = tlen;
8919 unlock_user(dirp, arg2, ret);
8921 #endif
8922 break;
8923 #endif /* TARGET_NR_getdents */
8924 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8925 case TARGET_NR_getdents64:
8927 struct linux_dirent64 *dirp;
8928 abi_long count = arg3;
8929 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8930 goto efault;
8931 ret = get_errno(sys_getdents64(arg1, dirp, count));
8932 if (!is_error(ret)) {
8933 struct linux_dirent64 *de;
8934 int len = ret;
8935 int reclen;
8936 de = dirp;
8937 while (len > 0) {
8938 reclen = de->d_reclen;
8939 if (reclen > len)
8940 break;
8941 de->d_reclen = tswap16(reclen);
8942 tswap64s((uint64_t *)&de->d_ino);
8943 tswap64s((uint64_t *)&de->d_off);
8944 de = (struct linux_dirent64 *)((char *)de + reclen);
8945 len -= reclen;
8948 unlock_user(dirp, arg2, ret);
8950 break;
8951 #endif /* TARGET_NR_getdents64 */
8952 #if defined(TARGET_NR__newselect)
8953 case TARGET_NR__newselect:
8954 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8955 break;
8956 #endif
8957 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8958 # ifdef TARGET_NR_poll
8959 case TARGET_NR_poll:
8960 # endif
8961 # ifdef TARGET_NR_ppoll
8962 case TARGET_NR_ppoll:
8963 # endif
8965 struct target_pollfd *target_pfd;
8966 unsigned int nfds = arg2;
8967 int timeout = arg3;
8968 struct pollfd *pfd;
8969 unsigned int i;
8971 pfd = NULL;
8972 target_pfd = NULL;
8973 if (nfds) {
8974 target_pfd = lock_user(VERIFY_WRITE, arg1,
8975 sizeof(struct target_pollfd) * nfds, 1);
8976 if (!target_pfd) {
8977 goto efault;
8980 pfd = alloca(sizeof(struct pollfd) * nfds);
8981 for (i = 0; i < nfds; i++) {
8982 pfd[i].fd = tswap32(target_pfd[i].fd);
8983 pfd[i].events = tswap16(target_pfd[i].events);
8987 # ifdef TARGET_NR_ppoll
8988 if (num == TARGET_NR_ppoll) {
8989 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8990 target_sigset_t *target_set;
8991 sigset_t _set, *set = &_set;
8993 if (arg3) {
8994 if (target_to_host_timespec(timeout_ts, arg3)) {
8995 unlock_user(target_pfd, arg1, 0);
8996 goto efault;
8998 } else {
8999 timeout_ts = NULL;
9002 if (arg4) {
9003 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9004 if (!target_set) {
9005 unlock_user(target_pfd, arg1, 0);
9006 goto efault;
9008 target_to_host_sigset(set, target_set);
9009 } else {
9010 set = NULL;
9013 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts,
9014 set, SIGSET_T_SIZE));
9016 if (!is_error(ret) && arg3) {
9017 host_to_target_timespec(arg3, timeout_ts);
9019 if (arg4) {
9020 unlock_user(target_set, arg4, 0);
9022 } else
9023 # endif
9024 ret = get_errno(poll(pfd, nfds, timeout));
9026 if (!is_error(ret)) {
9027 for(i = 0; i < nfds; i++) {
9028 target_pfd[i].revents = tswap16(pfd[i].revents);
9031 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9033 break;
9034 #endif
9035 case TARGET_NR_flock:
9036 /* NOTE: the flock constant seems to be the same for every
9037 Linux platform */
9038 ret = get_errno(safe_flock(arg1, arg2));
9039 break;
9040 case TARGET_NR_readv:
9042 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9043 if (vec != NULL) {
9044 ret = get_errno(safe_readv(arg1, vec, arg3));
9045 unlock_iovec(vec, arg2, arg3, 1);
9046 } else {
9047 ret = -host_to_target_errno(errno);
9050 break;
9051 case TARGET_NR_writev:
9053 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9054 if (vec != NULL) {
9055 ret = get_errno(safe_writev(arg1, vec, arg3));
9056 unlock_iovec(vec, arg2, arg3, 0);
9057 } else {
9058 ret = -host_to_target_errno(errno);
9061 break;
9062 case TARGET_NR_getsid:
9063 ret = get_errno(getsid(arg1));
9064 break;
9065 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9066 case TARGET_NR_fdatasync:
9067 ret = get_errno(fdatasync(arg1));
9068 break;
9069 #endif
9070 #ifdef TARGET_NR__sysctl
9071 case TARGET_NR__sysctl:
9072 /* We don't implement this, but ENOTDIR is always a safe
9073 return value. */
9074 ret = -TARGET_ENOTDIR;
9075 break;
9076 #endif
9077 case TARGET_NR_sched_getaffinity:
9079 unsigned int mask_size;
9080 unsigned long *mask;
9083 * sched_getaffinity needs multiples of ulong, so need to take
9084 * care of mismatches between target ulong and host ulong sizes.
9086 if (arg2 & (sizeof(abi_ulong) - 1)) {
9087 ret = -TARGET_EINVAL;
9088 break;
9090 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9092 mask = alloca(mask_size);
9093 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9095 if (!is_error(ret)) {
9096 if (ret > arg2) {
9097 /* More data returned than the caller's buffer will fit.
9098 * This only happens if sizeof(abi_long) < sizeof(long)
9099 * and the caller passed us a buffer holding an odd number
9100 * of abi_longs. If the host kernel is actually using the
9101 * extra 4 bytes then fail EINVAL; otherwise we can just
9102 * ignore them and only copy the interesting part.
9104 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9105 if (numcpus > arg2 * 8) {
9106 ret = -TARGET_EINVAL;
9107 break;
9109 ret = arg2;
9112 if (copy_to_user(arg3, mask, ret)) {
9113 goto efault;
9117 break;
9118 case TARGET_NR_sched_setaffinity:
9120 unsigned int mask_size;
9121 unsigned long *mask;
9124 * sched_setaffinity needs multiples of ulong, so need to take
9125 * care of mismatches between target ulong and host ulong sizes.
9127 if (arg2 & (sizeof(abi_ulong) - 1)) {
9128 ret = -TARGET_EINVAL;
9129 break;
9131 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9133 mask = alloca(mask_size);
9134 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9135 goto efault;
9137 memcpy(mask, p, arg2);
9138 unlock_user_struct(p, arg2, 0);
9140 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9142 break;
9143 case TARGET_NR_sched_setparam:
9145 struct sched_param *target_schp;
9146 struct sched_param schp;
9148 if (arg2 == 0) {
9149 return -TARGET_EINVAL;
9151 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9152 goto efault;
9153 schp.sched_priority = tswap32(target_schp->sched_priority);
9154 unlock_user_struct(target_schp, arg2, 0);
9155 ret = get_errno(sched_setparam(arg1, &schp));
9157 break;
9158 case TARGET_NR_sched_getparam:
9160 struct sched_param *target_schp;
9161 struct sched_param schp;
9163 if (arg2 == 0) {
9164 return -TARGET_EINVAL;
9166 ret = get_errno(sched_getparam(arg1, &schp));
9167 if (!is_error(ret)) {
9168 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9169 goto efault;
9170 target_schp->sched_priority = tswap32(schp.sched_priority);
9171 unlock_user_struct(target_schp, arg2, 1);
9174 break;
9175 case TARGET_NR_sched_setscheduler:
9177 struct sched_param *target_schp;
9178 struct sched_param schp;
9179 if (arg3 == 0) {
9180 return -TARGET_EINVAL;
9182 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9183 goto efault;
9184 schp.sched_priority = tswap32(target_schp->sched_priority);
9185 unlock_user_struct(target_schp, arg3, 0);
9186 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9188 break;
9189 case TARGET_NR_sched_getscheduler:
9190 ret = get_errno(sched_getscheduler(arg1));
9191 break;
9192 case TARGET_NR_sched_yield:
9193 ret = get_errno(sched_yield());
9194 break;
9195 case TARGET_NR_sched_get_priority_max:
9196 ret = get_errno(sched_get_priority_max(arg1));
9197 break;
9198 case TARGET_NR_sched_get_priority_min:
9199 ret = get_errno(sched_get_priority_min(arg1));
9200 break;
9201 case TARGET_NR_sched_rr_get_interval:
9203 struct timespec ts;
9204 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9205 if (!is_error(ret)) {
9206 ret = host_to_target_timespec(arg2, &ts);
9209 break;
9210 case TARGET_NR_nanosleep:
9212 struct timespec req, rem;
9213 target_to_host_timespec(&req, arg1);
9214 ret = get_errno(safe_nanosleep(&req, &rem));
9215 if (is_error(ret) && arg2) {
9216 host_to_target_timespec(arg2, &rem);
9219 break;
9220 #ifdef TARGET_NR_query_module
9221 case TARGET_NR_query_module:
9222 goto unimplemented;
9223 #endif
9224 #ifdef TARGET_NR_nfsservctl
9225 case TARGET_NR_nfsservctl:
9226 goto unimplemented;
9227 #endif
9228 case TARGET_NR_prctl:
9229 switch (arg1) {
9230 case PR_GET_PDEATHSIG:
9232 int deathsig;
9233 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9234 if (!is_error(ret) && arg2
9235 && put_user_ual(deathsig, arg2)) {
9236 goto efault;
9238 break;
9240 #ifdef PR_GET_NAME
9241 case PR_GET_NAME:
9243 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9244 if (!name) {
9245 goto efault;
9247 ret = get_errno(prctl(arg1, (unsigned long)name,
9248 arg3, arg4, arg5));
9249 unlock_user(name, arg2, 16);
9250 break;
9252 case PR_SET_NAME:
9254 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9255 if (!name) {
9256 goto efault;
9258 ret = get_errno(prctl(arg1, (unsigned long)name,
9259 arg3, arg4, arg5));
9260 unlock_user(name, arg2, 0);
9261 break;
9263 #endif
9264 default:
9265 /* Most prctl options have no pointer arguments */
9266 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9267 break;
9269 break;
9270 #ifdef TARGET_NR_arch_prctl
9271 case TARGET_NR_arch_prctl:
9272 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9273 ret = do_arch_prctl(cpu_env, arg1, arg2);
9274 break;
9275 #else
9276 goto unimplemented;
9277 #endif
9278 #endif
9279 #ifdef TARGET_NR_pread64
9280 case TARGET_NR_pread64:
9281 if (regpairs_aligned(cpu_env)) {
9282 arg4 = arg5;
9283 arg5 = arg6;
9285 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9286 goto efault;
9287 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9288 unlock_user(p, arg2, ret);
9289 break;
9290 case TARGET_NR_pwrite64:
9291 if (regpairs_aligned(cpu_env)) {
9292 arg4 = arg5;
9293 arg5 = arg6;
9295 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9296 goto efault;
9297 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9298 unlock_user(p, arg2, 0);
9299 break;
9300 #endif
9301 case TARGET_NR_getcwd:
9302 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9303 goto efault;
9304 ret = get_errno(sys_getcwd1(p, arg2));
9305 unlock_user(p, arg1, ret);
9306 break;
9307 case TARGET_NR_capget:
9308 case TARGET_NR_capset:
9310 struct target_user_cap_header *target_header;
9311 struct target_user_cap_data *target_data = NULL;
9312 struct __user_cap_header_struct header;
9313 struct __user_cap_data_struct data[2];
9314 struct __user_cap_data_struct *dataptr = NULL;
9315 int i, target_datalen;
9316 int data_items = 1;
9318 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9319 goto efault;
9321 header.version = tswap32(target_header->version);
9322 header.pid = tswap32(target_header->pid);
9324 if (header.version != _LINUX_CAPABILITY_VERSION) {
9325 /* Version 2 and up takes pointer to two user_data structs */
9326 data_items = 2;
9329 target_datalen = sizeof(*target_data) * data_items;
9331 if (arg2) {
9332 if (num == TARGET_NR_capget) {
9333 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9334 } else {
9335 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9337 if (!target_data) {
9338 unlock_user_struct(target_header, arg1, 0);
9339 goto efault;
9342 if (num == TARGET_NR_capset) {
9343 for (i = 0; i < data_items; i++) {
9344 data[i].effective = tswap32(target_data[i].effective);
9345 data[i].permitted = tswap32(target_data[i].permitted);
9346 data[i].inheritable = tswap32(target_data[i].inheritable);
9350 dataptr = data;
9353 if (num == TARGET_NR_capget) {
9354 ret = get_errno(capget(&header, dataptr));
9355 } else {
9356 ret = get_errno(capset(&header, dataptr));
9359 /* The kernel always updates version for both capget and capset */
9360 target_header->version = tswap32(header.version);
9361 unlock_user_struct(target_header, arg1, 1);
9363 if (arg2) {
9364 if (num == TARGET_NR_capget) {
9365 for (i = 0; i < data_items; i++) {
9366 target_data[i].effective = tswap32(data[i].effective);
9367 target_data[i].permitted = tswap32(data[i].permitted);
9368 target_data[i].inheritable = tswap32(data[i].inheritable);
9370 unlock_user(target_data, arg2, target_datalen);
9371 } else {
9372 unlock_user(target_data, arg2, 0);
9375 break;
9377 case TARGET_NR_sigaltstack:
9378 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9379 break;
9381 #ifdef CONFIG_SENDFILE
9382 case TARGET_NR_sendfile:
9384 off_t *offp = NULL;
9385 off_t off;
9386 if (arg3) {
9387 ret = get_user_sal(off, arg3);
9388 if (is_error(ret)) {
9389 break;
9391 offp = &off;
9393 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9394 if (!is_error(ret) && arg3) {
9395 abi_long ret2 = put_user_sal(off, arg3);
9396 if (is_error(ret2)) {
9397 ret = ret2;
9400 break;
9402 #ifdef TARGET_NR_sendfile64
9403 case TARGET_NR_sendfile64:
9405 off_t *offp = NULL;
9406 off_t off;
9407 if (arg3) {
9408 ret = get_user_s64(off, arg3);
9409 if (is_error(ret)) {
9410 break;
9412 offp = &off;
9414 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9415 if (!is_error(ret) && arg3) {
9416 abi_long ret2 = put_user_s64(off, arg3);
9417 if (is_error(ret2)) {
9418 ret = ret2;
9421 break;
9423 #endif
9424 #else
9425 case TARGET_NR_sendfile:
9426 #ifdef TARGET_NR_sendfile64
9427 case TARGET_NR_sendfile64:
9428 #endif
9429 goto unimplemented;
9430 #endif
9432 #ifdef TARGET_NR_getpmsg
9433 case TARGET_NR_getpmsg:
9434 goto unimplemented;
9435 #endif
9436 #ifdef TARGET_NR_putpmsg
9437 case TARGET_NR_putpmsg:
9438 goto unimplemented;
9439 #endif
9440 #ifdef TARGET_NR_vfork
9441 case TARGET_NR_vfork:
9442 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9443 0, 0, 0, 0));
9444 break;
9445 #endif
9446 #ifdef TARGET_NR_ugetrlimit
9447 case TARGET_NR_ugetrlimit:
9449 struct rlimit rlim;
9450 int resource = target_to_host_resource(arg1);
9451 ret = get_errno(getrlimit(resource, &rlim));
9452 if (!is_error(ret)) {
9453 struct target_rlimit *target_rlim;
9454 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9455 goto efault;
9456 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9457 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9458 unlock_user_struct(target_rlim, arg2, 1);
9460 break;
9462 #endif
9463 #ifdef TARGET_NR_truncate64
9464 case TARGET_NR_truncate64:
9465 if (!(p = lock_user_string(arg1)))
9466 goto efault;
9467 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9468 unlock_user(p, arg1, 0);
9469 break;
9470 #endif
9471 #ifdef TARGET_NR_ftruncate64
9472 case TARGET_NR_ftruncate64:
9473 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9474 break;
9475 #endif
9476 #ifdef TARGET_NR_stat64
9477 case TARGET_NR_stat64:
9478 if (!(p = lock_user_string(arg1)))
9479 goto efault;
9480 ret = get_errno(stat(path(p), &st));
9481 unlock_user(p, arg1, 0);
9482 if (!is_error(ret))
9483 ret = host_to_target_stat64(cpu_env, arg2, &st);
9484 break;
9485 #endif
9486 #ifdef TARGET_NR_lstat64
9487 case TARGET_NR_lstat64:
9488 if (!(p = lock_user_string(arg1)))
9489 goto efault;
9490 ret = get_errno(lstat(path(p), &st));
9491 unlock_user(p, arg1, 0);
9492 if (!is_error(ret))
9493 ret = host_to_target_stat64(cpu_env, arg2, &st);
9494 break;
9495 #endif
9496 #ifdef TARGET_NR_fstat64
9497 case TARGET_NR_fstat64:
9498 ret = get_errno(fstat(arg1, &st));
9499 if (!is_error(ret))
9500 ret = host_to_target_stat64(cpu_env, arg2, &st);
9501 break;
9502 #endif
9503 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9504 #ifdef TARGET_NR_fstatat64
9505 case TARGET_NR_fstatat64:
9506 #endif
9507 #ifdef TARGET_NR_newfstatat
9508 case TARGET_NR_newfstatat:
9509 #endif
9510 if (!(p = lock_user_string(arg2)))
9511 goto efault;
9512 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9513 if (!is_error(ret))
9514 ret = host_to_target_stat64(cpu_env, arg3, &st);
9515 break;
9516 #endif
9517 #ifdef TARGET_NR_lchown
9518 case TARGET_NR_lchown:
9519 if (!(p = lock_user_string(arg1)))
9520 goto efault;
9521 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9522 unlock_user(p, arg1, 0);
9523 break;
9524 #endif
9525 #ifdef TARGET_NR_getuid
9526 case TARGET_NR_getuid:
9527 ret = get_errno(high2lowuid(getuid()));
9528 break;
9529 #endif
9530 #ifdef TARGET_NR_getgid
9531 case TARGET_NR_getgid:
9532 ret = get_errno(high2lowgid(getgid()));
9533 break;
9534 #endif
9535 #ifdef TARGET_NR_geteuid
9536 case TARGET_NR_geteuid:
9537 ret = get_errno(high2lowuid(geteuid()));
9538 break;
9539 #endif
9540 #ifdef TARGET_NR_getegid
9541 case TARGET_NR_getegid:
9542 ret = get_errno(high2lowgid(getegid()));
9543 break;
9544 #endif
9545 case TARGET_NR_setreuid:
9546 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9547 break;
9548 case TARGET_NR_setregid:
9549 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9550 break;
9551 case TARGET_NR_getgroups:
9553 int gidsetsize = arg1;
9554 target_id *target_grouplist;
9555 gid_t *grouplist;
9556 int i;
9558 grouplist = alloca(gidsetsize * sizeof(gid_t));
9559 ret = get_errno(getgroups(gidsetsize, grouplist));
9560 if (gidsetsize == 0)
9561 break;
9562 if (!is_error(ret)) {
9563 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9564 if (!target_grouplist)
9565 goto efault;
9566 for(i = 0;i < ret; i++)
9567 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9568 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9571 break;
9572 case TARGET_NR_setgroups:
9574 int gidsetsize = arg1;
9575 target_id *target_grouplist;
9576 gid_t *grouplist = NULL;
9577 int i;
9578 if (gidsetsize) {
9579 grouplist = alloca(gidsetsize * sizeof(gid_t));
9580 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9581 if (!target_grouplist) {
9582 ret = -TARGET_EFAULT;
9583 goto fail;
9585 for (i = 0; i < gidsetsize; i++) {
9586 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9588 unlock_user(target_grouplist, arg2, 0);
9590 ret = get_errno(setgroups(gidsetsize, grouplist));
9592 break;
9593 case TARGET_NR_fchown:
9594 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9595 break;
9596 #if defined(TARGET_NR_fchownat)
9597 case TARGET_NR_fchownat:
9598 if (!(p = lock_user_string(arg2)))
9599 goto efault;
9600 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9601 low2highgid(arg4), arg5));
9602 unlock_user(p, arg2, 0);
9603 break;
9604 #endif
9605 #ifdef TARGET_NR_setresuid
9606 case TARGET_NR_setresuid:
9607 ret = get_errno(sys_setresuid(low2highuid(arg1),
9608 low2highuid(arg2),
9609 low2highuid(arg3)));
9610 break;
9611 #endif
9612 #ifdef TARGET_NR_getresuid
9613 case TARGET_NR_getresuid:
9615 uid_t ruid, euid, suid;
9616 ret = get_errno(getresuid(&ruid, &euid, &suid));
9617 if (!is_error(ret)) {
9618 if (put_user_id(high2lowuid(ruid), arg1)
9619 || put_user_id(high2lowuid(euid), arg2)
9620 || put_user_id(high2lowuid(suid), arg3))
9621 goto efault;
9624 break;
9625 #endif
9626 #ifdef TARGET_NR_getresgid
9627 case TARGET_NR_setresgid:
9628 ret = get_errno(sys_setresgid(low2highgid(arg1),
9629 low2highgid(arg2),
9630 low2highgid(arg3)));
9631 break;
9632 #endif
9633 #ifdef TARGET_NR_getresgid
9634 case TARGET_NR_getresgid:
9636 gid_t rgid, egid, sgid;
9637 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9638 if (!is_error(ret)) {
9639 if (put_user_id(high2lowgid(rgid), arg1)
9640 || put_user_id(high2lowgid(egid), arg2)
9641 || put_user_id(high2lowgid(sgid), arg3))
9642 goto efault;
9645 break;
9646 #endif
9647 #ifdef TARGET_NR_chown
9648 case TARGET_NR_chown:
9649 if (!(p = lock_user_string(arg1)))
9650 goto efault;
9651 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9652 unlock_user(p, arg1, 0);
9653 break;
9654 #endif
9655 case TARGET_NR_setuid:
9656 ret = get_errno(sys_setuid(low2highuid(arg1)));
9657 break;
9658 case TARGET_NR_setgid:
9659 ret = get_errno(sys_setgid(low2highgid(arg1)));
9660 break;
9661 case TARGET_NR_setfsuid:
9662 ret = get_errno(setfsuid(arg1));
9663 break;
9664 case TARGET_NR_setfsgid:
9665 ret = get_errno(setfsgid(arg1));
9666 break;
9668 #ifdef TARGET_NR_lchown32
9669 case TARGET_NR_lchown32:
9670 if (!(p = lock_user_string(arg1)))
9671 goto efault;
9672 ret = get_errno(lchown(p, arg2, arg3));
9673 unlock_user(p, arg1, 0);
9674 break;
9675 #endif
9676 #ifdef TARGET_NR_getuid32
9677 case TARGET_NR_getuid32:
9678 ret = get_errno(getuid());
9679 break;
9680 #endif
9682 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9683 /* Alpha specific */
9684 case TARGET_NR_getxuid:
9686 uid_t euid;
9687 euid=geteuid();
9688 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9690 ret = get_errno(getuid());
9691 break;
9692 #endif
9693 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9694 /* Alpha specific */
9695 case TARGET_NR_getxgid:
9697 uid_t egid;
9698 egid=getegid();
9699 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9701 ret = get_errno(getgid());
9702 break;
9703 #endif
9704 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9705 /* Alpha specific */
9706 case TARGET_NR_osf_getsysinfo:
9707 ret = -TARGET_EOPNOTSUPP;
9708 switch (arg1) {
9709 case TARGET_GSI_IEEE_FP_CONTROL:
9711 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9713 /* Copied from linux ieee_fpcr_to_swcr. */
9714 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9715 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9716 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9717 | SWCR_TRAP_ENABLE_DZE
9718 | SWCR_TRAP_ENABLE_OVF);
9719 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9720 | SWCR_TRAP_ENABLE_INE);
9721 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9722 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9724 if (put_user_u64 (swcr, arg2))
9725 goto efault;
9726 ret = 0;
9728 break;
9730 /* case GSI_IEEE_STATE_AT_SIGNAL:
9731 -- Not implemented in linux kernel.
9732 case GSI_UACPROC:
9733 -- Retrieves current unaligned access state; not much used.
9734 case GSI_PROC_TYPE:
9735 -- Retrieves implver information; surely not used.
9736 case GSI_GET_HWRPB:
9737 -- Grabs a copy of the HWRPB; surely not used.
9740 break;
9741 #endif
9742 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9743 /* Alpha specific */
9744 case TARGET_NR_osf_setsysinfo:
9745 ret = -TARGET_EOPNOTSUPP;
9746 switch (arg1) {
9747 case TARGET_SSI_IEEE_FP_CONTROL:
9749 uint64_t swcr, fpcr, orig_fpcr;
9751 if (get_user_u64 (swcr, arg2)) {
9752 goto efault;
9754 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9755 fpcr = orig_fpcr & FPCR_DYN_MASK;
9757 /* Copied from linux ieee_swcr_to_fpcr. */
9758 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9759 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9760 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9761 | SWCR_TRAP_ENABLE_DZE
9762 | SWCR_TRAP_ENABLE_OVF)) << 48;
9763 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9764 | SWCR_TRAP_ENABLE_INE)) << 57;
9765 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9766 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9768 cpu_alpha_store_fpcr(cpu_env, fpcr);
9769 ret = 0;
9771 break;
9773 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9775 uint64_t exc, fpcr, orig_fpcr;
9776 int si_code;
9778 if (get_user_u64(exc, arg2)) {
9779 goto efault;
9782 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9784 /* We only add to the exception status here. */
9785 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9787 cpu_alpha_store_fpcr(cpu_env, fpcr);
9788 ret = 0;
9790 /* Old exceptions are not signaled. */
9791 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9793 /* If any exceptions set by this call,
9794 and are unmasked, send a signal. */
9795 si_code = 0;
9796 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9797 si_code = TARGET_FPE_FLTRES;
9799 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9800 si_code = TARGET_FPE_FLTUND;
9802 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9803 si_code = TARGET_FPE_FLTOVF;
9805 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9806 si_code = TARGET_FPE_FLTDIV;
9808 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9809 si_code = TARGET_FPE_FLTINV;
9811 if (si_code != 0) {
9812 target_siginfo_t info;
9813 info.si_signo = SIGFPE;
9814 info.si_errno = 0;
9815 info.si_code = si_code;
9816 info._sifields._sigfault._addr
9817 = ((CPUArchState *)cpu_env)->pc;
9818 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9821 break;
9823 /* case SSI_NVPAIRS:
9824 -- Used with SSIN_UACPROC to enable unaligned accesses.
9825 case SSI_IEEE_STATE_AT_SIGNAL:
9826 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9827 -- Not implemented in linux kernel
9830 break;
9831 #endif
9832 #ifdef TARGET_NR_osf_sigprocmask
9833 /* Alpha specific. */
9834 case TARGET_NR_osf_sigprocmask:
9836 abi_ulong mask;
9837 int how;
9838 sigset_t set, oldset;
9840 switch(arg1) {
9841 case TARGET_SIG_BLOCK:
9842 how = SIG_BLOCK;
9843 break;
9844 case TARGET_SIG_UNBLOCK:
9845 how = SIG_UNBLOCK;
9846 break;
9847 case TARGET_SIG_SETMASK:
9848 how = SIG_SETMASK;
9849 break;
9850 default:
9851 ret = -TARGET_EINVAL;
9852 goto fail;
9854 mask = arg2;
9855 target_to_host_old_sigset(&set, &mask);
9856 ret = do_sigprocmask(how, &set, &oldset);
9857 if (!ret) {
9858 host_to_target_old_sigset(&mask, &oldset);
9859 ret = mask;
9862 break;
9863 #endif
9865 #ifdef TARGET_NR_getgid32
9866 case TARGET_NR_getgid32:
9867 ret = get_errno(getgid());
9868 break;
9869 #endif
9870 #ifdef TARGET_NR_geteuid32
9871 case TARGET_NR_geteuid32:
9872 ret = get_errno(geteuid());
9873 break;
9874 #endif
9875 #ifdef TARGET_NR_getegid32
9876 case TARGET_NR_getegid32:
9877 ret = get_errno(getegid());
9878 break;
9879 #endif
9880 #ifdef TARGET_NR_setreuid32
9881 case TARGET_NR_setreuid32:
9882 ret = get_errno(setreuid(arg1, arg2));
9883 break;
9884 #endif
9885 #ifdef TARGET_NR_setregid32
9886 case TARGET_NR_setregid32:
9887 ret = get_errno(setregid(arg1, arg2));
9888 break;
9889 #endif
9890 #ifdef TARGET_NR_getgroups32
9891 case TARGET_NR_getgroups32:
9893 int gidsetsize = arg1;
9894 uint32_t *target_grouplist;
9895 gid_t *grouplist;
9896 int i;
9898 grouplist = alloca(gidsetsize * sizeof(gid_t));
9899 ret = get_errno(getgroups(gidsetsize, grouplist));
9900 if (gidsetsize == 0)
9901 break;
9902 if (!is_error(ret)) {
9903 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9904 if (!target_grouplist) {
9905 ret = -TARGET_EFAULT;
9906 goto fail;
9908 for(i = 0;i < ret; i++)
9909 target_grouplist[i] = tswap32(grouplist[i]);
9910 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9913 break;
9914 #endif
9915 #ifdef TARGET_NR_setgroups32
9916 case TARGET_NR_setgroups32:
9918 int gidsetsize = arg1;
9919 uint32_t *target_grouplist;
9920 gid_t *grouplist;
9921 int i;
9923 grouplist = alloca(gidsetsize * sizeof(gid_t));
9924 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9925 if (!target_grouplist) {
9926 ret = -TARGET_EFAULT;
9927 goto fail;
9929 for(i = 0;i < gidsetsize; i++)
9930 grouplist[i] = tswap32(target_grouplist[i]);
9931 unlock_user(target_grouplist, arg2, 0);
9932 ret = get_errno(setgroups(gidsetsize, grouplist));
9934 break;
9935 #endif
9936 #ifdef TARGET_NR_fchown32
9937 case TARGET_NR_fchown32:
9938 ret = get_errno(fchown(arg1, arg2, arg3));
9939 break;
9940 #endif
9941 #ifdef TARGET_NR_setresuid32
9942 case TARGET_NR_setresuid32:
9943 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
9944 break;
9945 #endif
9946 #ifdef TARGET_NR_getresuid32
9947 case TARGET_NR_getresuid32:
9949 uid_t ruid, euid, suid;
9950 ret = get_errno(getresuid(&ruid, &euid, &suid));
9951 if (!is_error(ret)) {
9952 if (put_user_u32(ruid, arg1)
9953 || put_user_u32(euid, arg2)
9954 || put_user_u32(suid, arg3))
9955 goto efault;
9958 break;
9959 #endif
9960 #ifdef TARGET_NR_setresgid32
9961 case TARGET_NR_setresgid32:
9962 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
9963 break;
9964 #endif
9965 #ifdef TARGET_NR_getresgid32
9966 case TARGET_NR_getresgid32:
9968 gid_t rgid, egid, sgid;
9969 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9970 if (!is_error(ret)) {
9971 if (put_user_u32(rgid, arg1)
9972 || put_user_u32(egid, arg2)
9973 || put_user_u32(sgid, arg3))
9974 goto efault;
9977 break;
9978 #endif
9979 #ifdef TARGET_NR_chown32
9980 case TARGET_NR_chown32:
9981 if (!(p = lock_user_string(arg1)))
9982 goto efault;
9983 ret = get_errno(chown(p, arg2, arg3));
9984 unlock_user(p, arg1, 0);
9985 break;
9986 #endif
9987 #ifdef TARGET_NR_setuid32
9988 case TARGET_NR_setuid32:
9989 ret = get_errno(sys_setuid(arg1));
9990 break;
9991 #endif
9992 #ifdef TARGET_NR_setgid32
9993 case TARGET_NR_setgid32:
9994 ret = get_errno(sys_setgid(arg1));
9995 break;
9996 #endif
9997 #ifdef TARGET_NR_setfsuid32
9998 case TARGET_NR_setfsuid32:
9999 ret = get_errno(setfsuid(arg1));
10000 break;
10001 #endif
10002 #ifdef TARGET_NR_setfsgid32
10003 case TARGET_NR_setfsgid32:
10004 ret = get_errno(setfsgid(arg1));
10005 break;
10006 #endif
10008 case TARGET_NR_pivot_root:
10009 goto unimplemented;
10010 #ifdef TARGET_NR_mincore
10011 case TARGET_NR_mincore:
10013 void *a;
10014 ret = -TARGET_EFAULT;
10015 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10016 goto efault;
10017 if (!(p = lock_user_string(arg3)))
10018 goto mincore_fail;
10019 ret = get_errno(mincore(a, arg2, p));
10020 unlock_user(p, arg3, ret);
10021 mincore_fail:
10022 unlock_user(a, arg1, 0);
10024 break;
10025 #endif
10026 #ifdef TARGET_NR_arm_fadvise64_64
10027 case TARGET_NR_arm_fadvise64_64:
10028 /* arm_fadvise64_64 looks like fadvise64_64 but
10029 * with different argument order: fd, advice, offset, len
10030 * rather than the usual fd, offset, len, advice.
10031 * Note that offset and len are both 64-bit so appear as
10032 * pairs of 32-bit registers.
10034 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10035 target_offset64(arg5, arg6), arg2);
10036 ret = -host_to_target_errno(ret);
10037 break;
10038 #endif
10040 #if TARGET_ABI_BITS == 32
10042 #ifdef TARGET_NR_fadvise64_64
10043 case TARGET_NR_fadvise64_64:
10044 /* 6 args: fd, offset (high, low), len (high, low), advice */
10045 if (regpairs_aligned(cpu_env)) {
10046 /* offset is in (3,4), len in (5,6) and advice in 7 */
10047 arg2 = arg3;
10048 arg3 = arg4;
10049 arg4 = arg5;
10050 arg5 = arg6;
10051 arg6 = arg7;
10053 ret = -host_to_target_errno(posix_fadvise(arg1,
10054 target_offset64(arg2, arg3),
10055 target_offset64(arg4, arg5),
10056 arg6));
10057 break;
10058 #endif
10060 #ifdef TARGET_NR_fadvise64
10061 case TARGET_NR_fadvise64:
10062 /* 5 args: fd, offset (high, low), len, advice */
10063 if (regpairs_aligned(cpu_env)) {
10064 /* offset is in (3,4), len in 5 and advice in 6 */
10065 arg2 = arg3;
10066 arg3 = arg4;
10067 arg4 = arg5;
10068 arg5 = arg6;
10070 ret = -host_to_target_errno(posix_fadvise(arg1,
10071 target_offset64(arg2, arg3),
10072 arg4, arg5));
10073 break;
10074 #endif
10076 #else /* not a 32-bit ABI */
10077 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10078 #ifdef TARGET_NR_fadvise64_64
10079 case TARGET_NR_fadvise64_64:
10080 #endif
10081 #ifdef TARGET_NR_fadvise64
10082 case TARGET_NR_fadvise64:
10083 #endif
10084 #ifdef TARGET_S390X
10085 switch (arg4) {
10086 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10087 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10088 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10089 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10090 default: break;
10092 #endif
10093 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10094 break;
10095 #endif
10096 #endif /* end of 64-bit ABI fadvise handling */
10098 #ifdef TARGET_NR_madvise
10099 case TARGET_NR_madvise:
10100 /* A straight passthrough may not be safe because qemu sometimes
10101 turns private file-backed mappings into anonymous mappings.
10102 This will break MADV_DONTNEED.
10103 This is a hint, so ignoring and returning success is ok. */
10104 ret = get_errno(0);
10105 break;
10106 #endif
10107 #if TARGET_ABI_BITS == 32
10108 case TARGET_NR_fcntl64:
10110 int cmd;
10111 struct flock64 fl;
10112 struct target_flock64 *target_fl;
10113 #ifdef TARGET_ARM
10114 struct target_eabi_flock64 *target_efl;
10115 #endif
10117 cmd = target_to_host_fcntl_cmd(arg2);
10118 if (cmd == -TARGET_EINVAL) {
10119 ret = cmd;
10120 break;
10123 switch(arg2) {
10124 case TARGET_F_GETLK64:
10125 #ifdef TARGET_ARM
10126 if (((CPUARMState *)cpu_env)->eabi) {
10127 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10128 goto efault;
10129 fl.l_type = tswap16(target_efl->l_type);
10130 fl.l_whence = tswap16(target_efl->l_whence);
10131 fl.l_start = tswap64(target_efl->l_start);
10132 fl.l_len = tswap64(target_efl->l_len);
10133 fl.l_pid = tswap32(target_efl->l_pid);
10134 unlock_user_struct(target_efl, arg3, 0);
10135 } else
10136 #endif
10138 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10139 goto efault;
10140 fl.l_type = tswap16(target_fl->l_type);
10141 fl.l_whence = tswap16(target_fl->l_whence);
10142 fl.l_start = tswap64(target_fl->l_start);
10143 fl.l_len = tswap64(target_fl->l_len);
10144 fl.l_pid = tswap32(target_fl->l_pid);
10145 unlock_user_struct(target_fl, arg3, 0);
10147 ret = get_errno(fcntl(arg1, cmd, &fl));
10148 if (ret == 0) {
10149 #ifdef TARGET_ARM
10150 if (((CPUARMState *)cpu_env)->eabi) {
10151 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
10152 goto efault;
10153 target_efl->l_type = tswap16(fl.l_type);
10154 target_efl->l_whence = tswap16(fl.l_whence);
10155 target_efl->l_start = tswap64(fl.l_start);
10156 target_efl->l_len = tswap64(fl.l_len);
10157 target_efl->l_pid = tswap32(fl.l_pid);
10158 unlock_user_struct(target_efl, arg3, 1);
10159 } else
10160 #endif
10162 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
10163 goto efault;
10164 target_fl->l_type = tswap16(fl.l_type);
10165 target_fl->l_whence = tswap16(fl.l_whence);
10166 target_fl->l_start = tswap64(fl.l_start);
10167 target_fl->l_len = tswap64(fl.l_len);
10168 target_fl->l_pid = tswap32(fl.l_pid);
10169 unlock_user_struct(target_fl, arg3, 1);
10172 break;
10174 case TARGET_F_SETLK64:
10175 case TARGET_F_SETLKW64:
10176 #ifdef TARGET_ARM
10177 if (((CPUARMState *)cpu_env)->eabi) {
10178 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10179 goto efault;
10180 fl.l_type = tswap16(target_efl->l_type);
10181 fl.l_whence = tswap16(target_efl->l_whence);
10182 fl.l_start = tswap64(target_efl->l_start);
10183 fl.l_len = tswap64(target_efl->l_len);
10184 fl.l_pid = tswap32(target_efl->l_pid);
10185 unlock_user_struct(target_efl, arg3, 0);
10186 } else
10187 #endif
10189 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10190 goto efault;
10191 fl.l_type = tswap16(target_fl->l_type);
10192 fl.l_whence = tswap16(target_fl->l_whence);
10193 fl.l_start = tswap64(target_fl->l_start);
10194 fl.l_len = tswap64(target_fl->l_len);
10195 fl.l_pid = tswap32(target_fl->l_pid);
10196 unlock_user_struct(target_fl, arg3, 0);
10198 ret = get_errno(fcntl(arg1, cmd, &fl));
10199 break;
10200 default:
10201 ret = do_fcntl(arg1, arg2, arg3);
10202 break;
10204 break;
10206 #endif
10207 #ifdef TARGET_NR_cacheflush
10208 case TARGET_NR_cacheflush:
10209 /* self-modifying code is handled automatically, so nothing needed */
10210 ret = 0;
10211 break;
10212 #endif
10213 #ifdef TARGET_NR_security
10214 case TARGET_NR_security:
10215 goto unimplemented;
10216 #endif
10217 #ifdef TARGET_NR_getpagesize
10218 case TARGET_NR_getpagesize:
10219 ret = TARGET_PAGE_SIZE;
10220 break;
10221 #endif
10222 case TARGET_NR_gettid:
10223 ret = get_errno(gettid());
10224 break;
10225 #ifdef TARGET_NR_readahead
10226 case TARGET_NR_readahead:
10227 #if TARGET_ABI_BITS == 32
10228 if (regpairs_aligned(cpu_env)) {
10229 arg2 = arg3;
10230 arg3 = arg4;
10231 arg4 = arg5;
10233 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10234 #else
10235 ret = get_errno(readahead(arg1, arg2, arg3));
10236 #endif
10237 break;
10238 #endif
10239 #ifdef CONFIG_ATTR
10240 #ifdef TARGET_NR_setxattr
10241 case TARGET_NR_listxattr:
10242 case TARGET_NR_llistxattr:
10244 void *p, *b = 0;
10245 if (arg2) {
10246 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10247 if (!b) {
10248 ret = -TARGET_EFAULT;
10249 break;
10252 p = lock_user_string(arg1);
10253 if (p) {
10254 if (num == TARGET_NR_listxattr) {
10255 ret = get_errno(listxattr(p, b, arg3));
10256 } else {
10257 ret = get_errno(llistxattr(p, b, arg3));
10259 } else {
10260 ret = -TARGET_EFAULT;
10262 unlock_user(p, arg1, 0);
10263 unlock_user(b, arg2, arg3);
10264 break;
10266 case TARGET_NR_flistxattr:
10268 void *b = 0;
10269 if (arg2) {
10270 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10271 if (!b) {
10272 ret = -TARGET_EFAULT;
10273 break;
10276 ret = get_errno(flistxattr(arg1, b, arg3));
10277 unlock_user(b, arg2, arg3);
10278 break;
10280 case TARGET_NR_setxattr:
10281 case TARGET_NR_lsetxattr:
10283 void *p, *n, *v = 0;
10284 if (arg3) {
10285 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10286 if (!v) {
10287 ret = -TARGET_EFAULT;
10288 break;
10291 p = lock_user_string(arg1);
10292 n = lock_user_string(arg2);
10293 if (p && n) {
10294 if (num == TARGET_NR_setxattr) {
10295 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10296 } else {
10297 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10299 } else {
10300 ret = -TARGET_EFAULT;
10302 unlock_user(p, arg1, 0);
10303 unlock_user(n, arg2, 0);
10304 unlock_user(v, arg3, 0);
10306 break;
10307 case TARGET_NR_fsetxattr:
10309 void *n, *v = 0;
10310 if (arg3) {
10311 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10312 if (!v) {
10313 ret = -TARGET_EFAULT;
10314 break;
10317 n = lock_user_string(arg2);
10318 if (n) {
10319 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10320 } else {
10321 ret = -TARGET_EFAULT;
10323 unlock_user(n, arg2, 0);
10324 unlock_user(v, arg3, 0);
10326 break;
10327 case TARGET_NR_getxattr:
10328 case TARGET_NR_lgetxattr:
10330 void *p, *n, *v = 0;
10331 if (arg3) {
10332 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10333 if (!v) {
10334 ret = -TARGET_EFAULT;
10335 break;
10338 p = lock_user_string(arg1);
10339 n = lock_user_string(arg2);
10340 if (p && n) {
10341 if (num == TARGET_NR_getxattr) {
10342 ret = get_errno(getxattr(p, n, v, arg4));
10343 } else {
10344 ret = get_errno(lgetxattr(p, n, v, arg4));
10346 } else {
10347 ret = -TARGET_EFAULT;
10349 unlock_user(p, arg1, 0);
10350 unlock_user(n, arg2, 0);
10351 unlock_user(v, arg3, arg4);
10353 break;
10354 case TARGET_NR_fgetxattr:
10356 void *n, *v = 0;
10357 if (arg3) {
10358 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10359 if (!v) {
10360 ret = -TARGET_EFAULT;
10361 break;
10364 n = lock_user_string(arg2);
10365 if (n) {
10366 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10367 } else {
10368 ret = -TARGET_EFAULT;
10370 unlock_user(n, arg2, 0);
10371 unlock_user(v, arg3, arg4);
10373 break;
10374 case TARGET_NR_removexattr:
10375 case TARGET_NR_lremovexattr:
10377 void *p, *n;
10378 p = lock_user_string(arg1);
10379 n = lock_user_string(arg2);
10380 if (p && n) {
10381 if (num == TARGET_NR_removexattr) {
10382 ret = get_errno(removexattr(p, n));
10383 } else {
10384 ret = get_errno(lremovexattr(p, n));
10386 } else {
10387 ret = -TARGET_EFAULT;
10389 unlock_user(p, arg1, 0);
10390 unlock_user(n, arg2, 0);
10392 break;
10393 case TARGET_NR_fremovexattr:
10395 void *n;
10396 n = lock_user_string(arg2);
10397 if (n) {
10398 ret = get_errno(fremovexattr(arg1, n));
10399 } else {
10400 ret = -TARGET_EFAULT;
10402 unlock_user(n, arg2, 0);
10404 break;
10405 #endif
10406 #endif /* CONFIG_ATTR */
10407 #ifdef TARGET_NR_set_thread_area
10408 case TARGET_NR_set_thread_area:
10409 #if defined(TARGET_MIPS)
10410 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10411 ret = 0;
10412 break;
10413 #elif defined(TARGET_CRIS)
10414 if (arg1 & 0xff)
10415 ret = -TARGET_EINVAL;
10416 else {
10417 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10418 ret = 0;
10420 break;
10421 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10422 ret = do_set_thread_area(cpu_env, arg1);
10423 break;
10424 #elif defined(TARGET_M68K)
10426 TaskState *ts = cpu->opaque;
10427 ts->tp_value = arg1;
10428 ret = 0;
10429 break;
10431 #else
10432 goto unimplemented_nowarn;
10433 #endif
10434 #endif
10435 #ifdef TARGET_NR_get_thread_area
10436 case TARGET_NR_get_thread_area:
10437 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10438 ret = do_get_thread_area(cpu_env, arg1);
10439 break;
10440 #elif defined(TARGET_M68K)
10442 TaskState *ts = cpu->opaque;
10443 ret = ts->tp_value;
10444 break;
10446 #else
10447 goto unimplemented_nowarn;
10448 #endif
10449 #endif
10450 #ifdef TARGET_NR_getdomainname
10451 case TARGET_NR_getdomainname:
10452 goto unimplemented_nowarn;
10453 #endif
10455 #ifdef TARGET_NR_clock_gettime
10456 case TARGET_NR_clock_gettime:
10458 struct timespec ts;
10459 ret = get_errno(clock_gettime(arg1, &ts));
10460 if (!is_error(ret)) {
10461 host_to_target_timespec(arg2, &ts);
10463 break;
10465 #endif
10466 #ifdef TARGET_NR_clock_getres
10467 case TARGET_NR_clock_getres:
10469 struct timespec ts;
10470 ret = get_errno(clock_getres(arg1, &ts));
10471 if (!is_error(ret)) {
10472 host_to_target_timespec(arg2, &ts);
10474 break;
10476 #endif
10477 #ifdef TARGET_NR_clock_nanosleep
10478 case TARGET_NR_clock_nanosleep:
10480 struct timespec ts;
10481 target_to_host_timespec(&ts, arg3);
10482 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10483 &ts, arg4 ? &ts : NULL));
10484 if (arg4)
10485 host_to_target_timespec(arg4, &ts);
10487 #if defined(TARGET_PPC)
10488 /* clock_nanosleep is odd in that it returns positive errno values.
10489 * On PPC, CR0 bit 3 should be set in such a situation. */
10490 if (ret && ret != -TARGET_ERESTARTSYS) {
10491 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10493 #endif
10494 break;
10496 #endif
10498 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10499 case TARGET_NR_set_tid_address:
10500 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10501 break;
10502 #endif
10504 case TARGET_NR_tkill:
10505 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10506 break;
10508 case TARGET_NR_tgkill:
10509 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
10510 target_to_host_signal(arg3)));
10511 break;
10513 #ifdef TARGET_NR_set_robust_list
10514 case TARGET_NR_set_robust_list:
10515 case TARGET_NR_get_robust_list:
10516 /* The ABI for supporting robust futexes has userspace pass
10517 * the kernel a pointer to a linked list which is updated by
10518 * userspace after the syscall; the list is walked by the kernel
10519 * when the thread exits. Since the linked list in QEMU guest
10520 * memory isn't a valid linked list for the host and we have
10521 * no way to reliably intercept the thread-death event, we can't
10522 * support these. Silently return ENOSYS so that guest userspace
10523 * falls back to a non-robust futex implementation (which should
10524 * be OK except in the corner case of the guest crashing while
10525 * holding a mutex that is shared with another process via
10526 * shared memory).
10528 goto unimplemented_nowarn;
10529 #endif
10531 #if defined(TARGET_NR_utimensat)
10532 case TARGET_NR_utimensat:
10534 struct timespec *tsp, ts[2];
10535 if (!arg3) {
10536 tsp = NULL;
10537 } else {
10538 target_to_host_timespec(ts, arg3);
10539 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10540 tsp = ts;
10542 if (!arg2)
10543 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10544 else {
10545 if (!(p = lock_user_string(arg2))) {
10546 ret = -TARGET_EFAULT;
10547 goto fail;
10549 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10550 unlock_user(p, arg2, 0);
10553 break;
10554 #endif
10555 case TARGET_NR_futex:
10556 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10557 break;
10558 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10559 case TARGET_NR_inotify_init:
10560 ret = get_errno(sys_inotify_init());
10561 break;
10562 #endif
10563 #ifdef CONFIG_INOTIFY1
10564 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10565 case TARGET_NR_inotify_init1:
10566 ret = get_errno(sys_inotify_init1(arg1));
10567 break;
10568 #endif
10569 #endif
10570 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10571 case TARGET_NR_inotify_add_watch:
10572 p = lock_user_string(arg2);
10573 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10574 unlock_user(p, arg2, 0);
10575 break;
10576 #endif
10577 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10578 case TARGET_NR_inotify_rm_watch:
10579 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10580 break;
10581 #endif
10583 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10584 case TARGET_NR_mq_open:
10586 struct mq_attr posix_mq_attr, *attrp;
10588 p = lock_user_string(arg1 - 1);
10589 if (arg4 != 0) {
10590 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10591 attrp = &posix_mq_attr;
10592 } else {
10593 attrp = 0;
10595 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10596 unlock_user (p, arg1, 0);
10598 break;
10600 case TARGET_NR_mq_unlink:
10601 p = lock_user_string(arg1 - 1);
10602 ret = get_errno(mq_unlink(p));
10603 unlock_user (p, arg1, 0);
10604 break;
10606 case TARGET_NR_mq_timedsend:
10608 struct timespec ts;
10610 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10611 if (arg5 != 0) {
10612 target_to_host_timespec(&ts, arg5);
10613 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10614 host_to_target_timespec(arg5, &ts);
10615 } else {
10616 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10618 unlock_user (p, arg2, arg3);
10620 break;
10622 case TARGET_NR_mq_timedreceive:
10624 struct timespec ts;
10625 unsigned int prio;
10627 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10628 if (arg5 != 0) {
10629 target_to_host_timespec(&ts, arg5);
10630 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10631 &prio, &ts));
10632 host_to_target_timespec(arg5, &ts);
10633 } else {
10634 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10635 &prio, NULL));
10637 unlock_user (p, arg2, arg3);
10638 if (arg4 != 0)
10639 put_user_u32(prio, arg4);
10641 break;
10643 /* Not implemented for now... */
10644 /* case TARGET_NR_mq_notify: */
10645 /* break; */
10647 case TARGET_NR_mq_getsetattr:
10649 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10650 ret = 0;
10651 if (arg3 != 0) {
10652 ret = mq_getattr(arg1, &posix_mq_attr_out);
10653 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10655 if (arg2 != 0) {
10656 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10657 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10661 break;
10662 #endif
10664 #ifdef CONFIG_SPLICE
10665 #ifdef TARGET_NR_tee
10666 case TARGET_NR_tee:
10668 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10670 break;
10671 #endif
10672 #ifdef TARGET_NR_splice
10673 case TARGET_NR_splice:
10675 loff_t loff_in, loff_out;
10676 loff_t *ploff_in = NULL, *ploff_out = NULL;
10677 if (arg2) {
10678 if (get_user_u64(loff_in, arg2)) {
10679 goto efault;
10681 ploff_in = &loff_in;
10683 if (arg4) {
10684 if (get_user_u64(loff_out, arg4)) {
10685 goto efault;
10687 ploff_out = &loff_out;
10689 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10690 if (arg2) {
10691 if (put_user_u64(loff_in, arg2)) {
10692 goto efault;
10695 if (arg4) {
10696 if (put_user_u64(loff_out, arg4)) {
10697 goto efault;
10701 break;
10702 #endif
10703 #ifdef TARGET_NR_vmsplice
10704 case TARGET_NR_vmsplice:
10706 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10707 if (vec != NULL) {
10708 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10709 unlock_iovec(vec, arg2, arg3, 0);
10710 } else {
10711 ret = -host_to_target_errno(errno);
10714 break;
10715 #endif
10716 #endif /* CONFIG_SPLICE */
10717 #ifdef CONFIG_EVENTFD
10718 #if defined(TARGET_NR_eventfd)
10719 case TARGET_NR_eventfd:
10720 ret = get_errno(eventfd(arg1, 0));
10721 fd_trans_unregister(ret);
10722 break;
10723 #endif
10724 #if defined(TARGET_NR_eventfd2)
10725 case TARGET_NR_eventfd2:
10727 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10728 if (arg2 & TARGET_O_NONBLOCK) {
10729 host_flags |= O_NONBLOCK;
10731 if (arg2 & TARGET_O_CLOEXEC) {
10732 host_flags |= O_CLOEXEC;
10734 ret = get_errno(eventfd(arg1, host_flags));
10735 fd_trans_unregister(ret);
10736 break;
10738 #endif
10739 #endif /* CONFIG_EVENTFD */
10740 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10741 case TARGET_NR_fallocate:
10742 #if TARGET_ABI_BITS == 32
10743 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10744 target_offset64(arg5, arg6)));
10745 #else
10746 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10747 #endif
10748 break;
10749 #endif
10750 #if defined(CONFIG_SYNC_FILE_RANGE)
10751 #if defined(TARGET_NR_sync_file_range)
10752 case TARGET_NR_sync_file_range:
10753 #if TARGET_ABI_BITS == 32
10754 #if defined(TARGET_MIPS)
10755 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10756 target_offset64(arg5, arg6), arg7));
10757 #else
10758 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10759 target_offset64(arg4, arg5), arg6));
10760 #endif /* !TARGET_MIPS */
10761 #else
10762 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10763 #endif
10764 break;
10765 #endif
10766 #if defined(TARGET_NR_sync_file_range2)
10767 case TARGET_NR_sync_file_range2:
10768 /* This is like sync_file_range but the arguments are reordered */
10769 #if TARGET_ABI_BITS == 32
10770 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10771 target_offset64(arg5, arg6), arg2));
10772 #else
10773 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10774 #endif
10775 break;
10776 #endif
10777 #endif
10778 #if defined(TARGET_NR_signalfd4)
10779 case TARGET_NR_signalfd4:
10780 ret = do_signalfd4(arg1, arg2, arg4);
10781 break;
10782 #endif
10783 #if defined(TARGET_NR_signalfd)
10784 case TARGET_NR_signalfd:
10785 ret = do_signalfd4(arg1, arg2, 0);
10786 break;
10787 #endif
10788 #if defined(CONFIG_EPOLL)
10789 #if defined(TARGET_NR_epoll_create)
10790 case TARGET_NR_epoll_create:
10791 ret = get_errno(epoll_create(arg1));
10792 break;
10793 #endif
10794 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10795 case TARGET_NR_epoll_create1:
10796 ret = get_errno(epoll_create1(arg1));
10797 break;
10798 #endif
10799 #if defined(TARGET_NR_epoll_ctl)
10800 case TARGET_NR_epoll_ctl:
10802 struct epoll_event ep;
10803 struct epoll_event *epp = 0;
10804 if (arg4) {
10805 struct target_epoll_event *target_ep;
10806 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10807 goto efault;
10809 ep.events = tswap32(target_ep->events);
10810 /* The epoll_data_t union is just opaque data to the kernel,
10811 * so we transfer all 64 bits across and need not worry what
10812 * actual data type it is.
10814 ep.data.u64 = tswap64(target_ep->data.u64);
10815 unlock_user_struct(target_ep, arg4, 0);
10816 epp = &ep;
10818 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10819 break;
10821 #endif
10823 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10824 #define IMPLEMENT_EPOLL_PWAIT
10825 #endif
10826 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10827 #if defined(TARGET_NR_epoll_wait)
10828 case TARGET_NR_epoll_wait:
10829 #endif
10830 #if defined(IMPLEMENT_EPOLL_PWAIT)
10831 case TARGET_NR_epoll_pwait:
10832 #endif
10834 struct target_epoll_event *target_ep;
10835 struct epoll_event *ep;
10836 int epfd = arg1;
10837 int maxevents = arg3;
10838 int timeout = arg4;
10840 target_ep = lock_user(VERIFY_WRITE, arg2,
10841 maxevents * sizeof(struct target_epoll_event), 1);
10842 if (!target_ep) {
10843 goto efault;
10846 ep = alloca(maxevents * sizeof(struct epoll_event));
10848 switch (num) {
10849 #if defined(IMPLEMENT_EPOLL_PWAIT)
10850 case TARGET_NR_epoll_pwait:
10852 target_sigset_t *target_set;
10853 sigset_t _set, *set = &_set;
10855 if (arg5) {
10856 target_set = lock_user(VERIFY_READ, arg5,
10857 sizeof(target_sigset_t), 1);
10858 if (!target_set) {
10859 unlock_user(target_ep, arg2, 0);
10860 goto efault;
10862 target_to_host_sigset(set, target_set);
10863 unlock_user(target_set, arg5, 0);
10864 } else {
10865 set = NULL;
10868 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10869 break;
10871 #endif
10872 #if defined(TARGET_NR_epoll_wait)
10873 case TARGET_NR_epoll_wait:
10874 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10875 break;
10876 #endif
10877 default:
10878 ret = -TARGET_ENOSYS;
10880 if (!is_error(ret)) {
10881 int i;
10882 for (i = 0; i < ret; i++) {
10883 target_ep[i].events = tswap32(ep[i].events);
10884 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10887 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10888 break;
10890 #endif
10891 #endif
10892 #ifdef TARGET_NR_prlimit64
10893 case TARGET_NR_prlimit64:
10895 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10896 struct target_rlimit64 *target_rnew, *target_rold;
10897 struct host_rlimit64 rnew, rold, *rnewp = 0;
10898 int resource = target_to_host_resource(arg2);
10899 if (arg3) {
10900 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10901 goto efault;
10903 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10904 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10905 unlock_user_struct(target_rnew, arg3, 0);
10906 rnewp = &rnew;
10909 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10910 if (!is_error(ret) && arg4) {
10911 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10912 goto efault;
10914 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10915 target_rold->rlim_max = tswap64(rold.rlim_max);
10916 unlock_user_struct(target_rold, arg4, 1);
10918 break;
10920 #endif
10921 #ifdef TARGET_NR_gethostname
10922 case TARGET_NR_gethostname:
10924 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10925 if (name) {
10926 ret = get_errno(gethostname(name, arg2));
10927 unlock_user(name, arg1, arg2);
10928 } else {
10929 ret = -TARGET_EFAULT;
10931 break;
10933 #endif
10934 #ifdef TARGET_NR_atomic_cmpxchg_32
10935 case TARGET_NR_atomic_cmpxchg_32:
10937 /* should use start_exclusive from main.c */
10938 abi_ulong mem_value;
10939 if (get_user_u32(mem_value, arg6)) {
10940 target_siginfo_t info;
10941 info.si_signo = SIGSEGV;
10942 info.si_errno = 0;
10943 info.si_code = TARGET_SEGV_MAPERR;
10944 info._sifields._sigfault._addr = arg6;
10945 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10946 ret = 0xdeadbeef;
10949 if (mem_value == arg2)
10950 put_user_u32(arg1, arg6);
10951 ret = mem_value;
10952 break;
10954 #endif
10955 #ifdef TARGET_NR_atomic_barrier
10956 case TARGET_NR_atomic_barrier:
10958 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10959 ret = 0;
10960 break;
10962 #endif
10964 #ifdef TARGET_NR_timer_create
10965 case TARGET_NR_timer_create:
10967 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10969 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10971 int clkid = arg1;
10972 int timer_index = next_free_host_timer();
10974 if (timer_index < 0) {
10975 ret = -TARGET_EAGAIN;
10976 } else {
10977 timer_t *phtimer = g_posix_timers + timer_index;
10979 if (arg2) {
10980 phost_sevp = &host_sevp;
10981 ret = target_to_host_sigevent(phost_sevp, arg2);
10982 if (ret != 0) {
10983 break;
10987 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10988 if (ret) {
10989 phtimer = NULL;
10990 } else {
10991 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10992 goto efault;
10996 break;
10998 #endif
11000 #ifdef TARGET_NR_timer_settime
11001 case TARGET_NR_timer_settime:
11003 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11004 * struct itimerspec * old_value */
11005 target_timer_t timerid = get_timer_id(arg1);
11007 if (timerid < 0) {
11008 ret = timerid;
11009 } else if (arg3 == 0) {
11010 ret = -TARGET_EINVAL;
11011 } else {
11012 timer_t htimer = g_posix_timers[timerid];
11013 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11015 target_to_host_itimerspec(&hspec_new, arg3);
11016 ret = get_errno(
11017 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11018 host_to_target_itimerspec(arg2, &hspec_old);
11020 break;
11022 #endif
11024 #ifdef TARGET_NR_timer_gettime
11025 case TARGET_NR_timer_gettime:
11027 /* args: timer_t timerid, struct itimerspec *curr_value */
11028 target_timer_t timerid = get_timer_id(arg1);
11030 if (timerid < 0) {
11031 ret = timerid;
11032 } else if (!arg2) {
11033 ret = -TARGET_EFAULT;
11034 } else {
11035 timer_t htimer = g_posix_timers[timerid];
11036 struct itimerspec hspec;
11037 ret = get_errno(timer_gettime(htimer, &hspec));
11039 if (host_to_target_itimerspec(arg2, &hspec)) {
11040 ret = -TARGET_EFAULT;
11043 break;
11045 #endif
11047 #ifdef TARGET_NR_timer_getoverrun
11048 case TARGET_NR_timer_getoverrun:
11050 /* args: timer_t timerid */
11051 target_timer_t timerid = get_timer_id(arg1);
11053 if (timerid < 0) {
11054 ret = timerid;
11055 } else {
11056 timer_t htimer = g_posix_timers[timerid];
11057 ret = get_errno(timer_getoverrun(htimer));
11059 fd_trans_unregister(ret);
11060 break;
11062 #endif
11064 #ifdef TARGET_NR_timer_delete
11065 case TARGET_NR_timer_delete:
11067 /* args: timer_t timerid */
11068 target_timer_t timerid = get_timer_id(arg1);
11070 if (timerid < 0) {
11071 ret = timerid;
11072 } else {
11073 timer_t htimer = g_posix_timers[timerid];
11074 ret = get_errno(timer_delete(htimer));
11075 g_posix_timers[timerid] = 0;
11077 break;
11079 #endif
11081 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11082 case TARGET_NR_timerfd_create:
11083 ret = get_errno(timerfd_create(arg1,
11084 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11085 break;
11086 #endif
11088 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11089 case TARGET_NR_timerfd_gettime:
11091 struct itimerspec its_curr;
11093 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11095 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11096 goto efault;
11099 break;
11100 #endif
11102 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11103 case TARGET_NR_timerfd_settime:
11105 struct itimerspec its_new, its_old, *p_new;
11107 if (arg3) {
11108 if (target_to_host_itimerspec(&its_new, arg3)) {
11109 goto efault;
11111 p_new = &its_new;
11112 } else {
11113 p_new = NULL;
11116 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11118 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11119 goto efault;
11122 break;
11123 #endif
11125 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11126 case TARGET_NR_ioprio_get:
11127 ret = get_errno(ioprio_get(arg1, arg2));
11128 break;
11129 #endif
11131 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11132 case TARGET_NR_ioprio_set:
11133 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11134 break;
11135 #endif
11137 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11138 case TARGET_NR_setns:
11139 ret = get_errno(setns(arg1, arg2));
11140 break;
11141 #endif
11142 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11143 case TARGET_NR_unshare:
11144 ret = get_errno(unshare(arg1));
11145 break;
11146 #endif
11148 default:
11149 unimplemented:
11150 gemu_log("qemu: Unsupported syscall: %d\n", num);
11151 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11152 unimplemented_nowarn:
11153 #endif
11154 ret = -TARGET_ENOSYS;
11155 break;
11157 fail:
11158 #ifdef DEBUG
11159 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11160 #endif
11161 if(do_strace)
11162 print_syscall_ret(num, ret);
11163 return ret;
11164 efault:
11165 ret = -TARGET_EFAULT;
11166 goto fail;