trace: [*-user] Add events to trace guest syscalls in syscall emulation mode
[qemu/ar7.git] / linux-user / syscall.c
blobe59f16d607ce0043ad8324d82e9886108949b4cb
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <linux/netlink.h>
104 #ifdef CONFIG_RTNETLINK
105 #include <linux/rtnetlink.h>
106 #endif
107 #include <linux/audit.h>
108 #include "linux_loop.h"
109 #include "uname.h"
111 #include "qemu.h"
113 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
114 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
116 //#define DEBUG
117 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
118 * once. This exercises the codepaths for restart.
120 //#define DEBUG_ERESTARTSYS
122 //#include <linux/msdos_fs.h>
123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126 /* This is the size of the host kernel's sigset_t, needed where we make
127 * direct system calls that take a sigset_t pointer and a size.
129 #define SIGSET_T_SIZE (_NSIG / 8)
131 #undef _syscall0
132 #undef _syscall1
133 #undef _syscall2
134 #undef _syscall3
135 #undef _syscall4
136 #undef _syscall5
137 #undef _syscall6
139 #define _syscall0(type,name) \
140 static type name (void) \
142 return syscall(__NR_##name); \
145 #define _syscall1(type,name,type1,arg1) \
146 static type name (type1 arg1) \
148 return syscall(__NR_##name, arg1); \
151 #define _syscall2(type,name,type1,arg1,type2,arg2) \
152 static type name (type1 arg1,type2 arg2) \
154 return syscall(__NR_##name, arg1, arg2); \
157 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
158 static type name (type1 arg1,type2 arg2,type3 arg3) \
160 return syscall(__NR_##name, arg1, arg2, arg3); \
163 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
169 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
170 type5,arg5) \
171 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
173 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
177 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
178 type5,arg5,type6,arg6) \
179 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
180 type6 arg6) \
182 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
186 #define __NR_sys_uname __NR_uname
187 #define __NR_sys_getcwd1 __NR_getcwd
188 #define __NR_sys_getdents __NR_getdents
189 #define __NR_sys_getdents64 __NR_getdents64
190 #define __NR_sys_getpriority __NR_getpriority
191 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
199 defined(__s390x__)
200 #define __NR__llseek __NR_lseek
201 #endif
203 /* Newer kernel ports have llseek() instead of _llseek() */
204 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
205 #define TARGET_NR__llseek TARGET_NR_llseek
206 #endif
208 #ifdef __NR_gettid
209 _syscall0(int, gettid)
210 #else
211 /* This is a replacement for the host gettid() and must return a host
212 errno. */
213 static int gettid(void) {
214 return -ENOSYS;
216 #endif
217 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
218 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
219 #endif
220 #if !defined(__NR_getdents) || \
221 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
222 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
223 #endif
224 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
225 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
226 loff_t *, res, uint, wh);
227 #endif
228 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
229 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group,int,error_code)
232 #endif
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address,int *,tidptr)
235 #endif
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
238 const struct timespec *,timeout,int *,uaddr2,int,val3)
239 #endif
240 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
241 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
244 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
247 void *, arg);
248 _syscall2(int, capget, struct __user_cap_header_struct *, header,
249 struct __user_cap_data_struct *, data);
250 _syscall2(int, capset, struct __user_cap_header_struct *, header,
251 struct __user_cap_data_struct *, data);
252 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
253 _syscall2(int, ioprio_get, int, which, int, who)
254 #endif
255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
256 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
257 #endif
258 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
259 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
260 #endif
262 static bitmask_transtbl fcntl_flags_tbl[] = {
263 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
264 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
265 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
266 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
267 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
268 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
269 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
270 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
271 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
272 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
273 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
274 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
275 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
278 #endif
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
281 #endif
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
284 #endif
285 #if defined(O_PATH)
286 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
287 #endif
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
291 #endif
292 { 0, 0, 0, 0 }
295 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
296 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
297 typedef struct TargetFdTrans {
298 TargetFdDataFunc host_to_target_data;
299 TargetFdDataFunc target_to_host_data;
300 TargetFdAddrFunc target_to_host_addr;
301 } TargetFdTrans;
303 static TargetFdTrans **target_fd_trans;
305 static unsigned int target_fd_max;
307 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
309 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
310 return target_fd_trans[fd]->target_to_host_data;
312 return NULL;
315 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
317 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
318 return target_fd_trans[fd]->host_to_target_data;
320 return NULL;
323 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
325 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
326 return target_fd_trans[fd]->target_to_host_addr;
328 return NULL;
331 static void fd_trans_register(int fd, TargetFdTrans *trans)
333 unsigned int oldmax;
335 if (fd >= target_fd_max) {
336 oldmax = target_fd_max;
337 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
338 target_fd_trans = g_renew(TargetFdTrans *,
339 target_fd_trans, target_fd_max);
340 memset((void *)(target_fd_trans + oldmax), 0,
341 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
343 target_fd_trans[fd] = trans;
346 static void fd_trans_unregister(int fd)
348 if (fd >= 0 && fd < target_fd_max) {
349 target_fd_trans[fd] = NULL;
353 static void fd_trans_dup(int oldfd, int newfd)
355 fd_trans_unregister(newfd);
356 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
357 fd_trans_register(newfd, target_fd_trans[oldfd]);
361 static int sys_getcwd1(char *buf, size_t size)
363 if (getcwd(buf, size) == NULL) {
364 /* getcwd() sets errno */
365 return (-1);
367 return strlen(buf)+1;
370 #ifdef TARGET_NR_utimensat
371 #ifdef CONFIG_UTIMENSAT
372 static int sys_utimensat(int dirfd, const char *pathname,
373 const struct timespec times[2], int flags)
375 if (pathname == NULL)
376 return futimens(dirfd, times);
377 else
378 return utimensat(dirfd, pathname, times, flags);
380 #elif defined(__NR_utimensat)
381 #define __NR_sys_utimensat __NR_utimensat
382 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
383 const struct timespec *,tsp,int,flags)
384 #else
385 static int sys_utimensat(int dirfd, const char *pathname,
386 const struct timespec times[2], int flags)
388 errno = ENOSYS;
389 return -1;
391 #endif
392 #endif /* TARGET_NR_utimensat */
394 #ifdef CONFIG_INOTIFY
395 #include <sys/inotify.h>
397 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
398 static int sys_inotify_init(void)
400 return (inotify_init());
402 #endif
403 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
404 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
406 return (inotify_add_watch(fd, pathname, mask));
408 #endif
409 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
410 static int sys_inotify_rm_watch(int fd, int32_t wd)
412 return (inotify_rm_watch(fd, wd));
414 #endif
415 #ifdef CONFIG_INOTIFY1
416 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
417 static int sys_inotify_init1(int flags)
419 return (inotify_init1(flags));
421 #endif
422 #endif
423 #else
424 /* Userspace can usually survive runtime without inotify */
425 #undef TARGET_NR_inotify_init
426 #undef TARGET_NR_inotify_init1
427 #undef TARGET_NR_inotify_add_watch
428 #undef TARGET_NR_inotify_rm_watch
429 #endif /* CONFIG_INOTIFY */
431 #if defined(TARGET_NR_prlimit64)
432 #ifndef __NR_prlimit64
433 # define __NR_prlimit64 -1
434 #endif
435 #define __NR_sys_prlimit64 __NR_prlimit64
436 /* The glibc rlimit structure may not be that used by the underlying syscall */
437 struct host_rlimit64 {
438 uint64_t rlim_cur;
439 uint64_t rlim_max;
441 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
442 const struct host_rlimit64 *, new_limit,
443 struct host_rlimit64 *, old_limit)
444 #endif
447 #if defined(TARGET_NR_timer_create)
448 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
449 static timer_t g_posix_timers[32] = { 0, } ;
451 static inline int next_free_host_timer(void)
453 int k ;
454 /* FIXME: Does finding the next free slot require a lock? */
455 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
456 if (g_posix_timers[k] == 0) {
457 g_posix_timers[k] = (timer_t) 1;
458 return k;
461 return -1;
463 #endif
465 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
466 #ifdef TARGET_ARM
467 static inline int regpairs_aligned(void *cpu_env) {
468 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
470 #elif defined(TARGET_MIPS)
471 static inline int regpairs_aligned(void *cpu_env) { return 1; }
472 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
473 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
474 * of registers which translates to the same as ARM/MIPS, because we start with
475 * r3 as arg1 */
476 static inline int regpairs_aligned(void *cpu_env) { return 1; }
477 #else
478 static inline int regpairs_aligned(void *cpu_env) { return 0; }
479 #endif
481 #define ERRNO_TABLE_SIZE 1200
483 /* target_to_host_errno_table[] is initialized from
484 * host_to_target_errno_table[] in syscall_init(). */
485 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
489 * This list is the union of errno values overridden in asm-<arch>/errno.h
490 * minus the errnos that are not actually generic to all archs.
492 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
493 [EAGAIN] = TARGET_EAGAIN,
494 [EIDRM] = TARGET_EIDRM,
495 [ECHRNG] = TARGET_ECHRNG,
496 [EL2NSYNC] = TARGET_EL2NSYNC,
497 [EL3HLT] = TARGET_EL3HLT,
498 [EL3RST] = TARGET_EL3RST,
499 [ELNRNG] = TARGET_ELNRNG,
500 [EUNATCH] = TARGET_EUNATCH,
501 [ENOCSI] = TARGET_ENOCSI,
502 [EL2HLT] = TARGET_EL2HLT,
503 [EDEADLK] = TARGET_EDEADLK,
504 [ENOLCK] = TARGET_ENOLCK,
505 [EBADE] = TARGET_EBADE,
506 [EBADR] = TARGET_EBADR,
507 [EXFULL] = TARGET_EXFULL,
508 [ENOANO] = TARGET_ENOANO,
509 [EBADRQC] = TARGET_EBADRQC,
510 [EBADSLT] = TARGET_EBADSLT,
511 [EBFONT] = TARGET_EBFONT,
512 [ENOSTR] = TARGET_ENOSTR,
513 [ENODATA] = TARGET_ENODATA,
514 [ETIME] = TARGET_ETIME,
515 [ENOSR] = TARGET_ENOSR,
516 [ENONET] = TARGET_ENONET,
517 [ENOPKG] = TARGET_ENOPKG,
518 [EREMOTE] = TARGET_EREMOTE,
519 [ENOLINK] = TARGET_ENOLINK,
520 [EADV] = TARGET_EADV,
521 [ESRMNT] = TARGET_ESRMNT,
522 [ECOMM] = TARGET_ECOMM,
523 [EPROTO] = TARGET_EPROTO,
524 [EDOTDOT] = TARGET_EDOTDOT,
525 [EMULTIHOP] = TARGET_EMULTIHOP,
526 [EBADMSG] = TARGET_EBADMSG,
527 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
528 [EOVERFLOW] = TARGET_EOVERFLOW,
529 [ENOTUNIQ] = TARGET_ENOTUNIQ,
530 [EBADFD] = TARGET_EBADFD,
531 [EREMCHG] = TARGET_EREMCHG,
532 [ELIBACC] = TARGET_ELIBACC,
533 [ELIBBAD] = TARGET_ELIBBAD,
534 [ELIBSCN] = TARGET_ELIBSCN,
535 [ELIBMAX] = TARGET_ELIBMAX,
536 [ELIBEXEC] = TARGET_ELIBEXEC,
537 [EILSEQ] = TARGET_EILSEQ,
538 [ENOSYS] = TARGET_ENOSYS,
539 [ELOOP] = TARGET_ELOOP,
540 [ERESTART] = TARGET_ERESTART,
541 [ESTRPIPE] = TARGET_ESTRPIPE,
542 [ENOTEMPTY] = TARGET_ENOTEMPTY,
543 [EUSERS] = TARGET_EUSERS,
544 [ENOTSOCK] = TARGET_ENOTSOCK,
545 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
546 [EMSGSIZE] = TARGET_EMSGSIZE,
547 [EPROTOTYPE] = TARGET_EPROTOTYPE,
548 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
549 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
550 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
551 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
552 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
553 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
554 [EADDRINUSE] = TARGET_EADDRINUSE,
555 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
556 [ENETDOWN] = TARGET_ENETDOWN,
557 [ENETUNREACH] = TARGET_ENETUNREACH,
558 [ENETRESET] = TARGET_ENETRESET,
559 [ECONNABORTED] = TARGET_ECONNABORTED,
560 [ECONNRESET] = TARGET_ECONNRESET,
561 [ENOBUFS] = TARGET_ENOBUFS,
562 [EISCONN] = TARGET_EISCONN,
563 [ENOTCONN] = TARGET_ENOTCONN,
564 [EUCLEAN] = TARGET_EUCLEAN,
565 [ENOTNAM] = TARGET_ENOTNAM,
566 [ENAVAIL] = TARGET_ENAVAIL,
567 [EISNAM] = TARGET_EISNAM,
568 [EREMOTEIO] = TARGET_EREMOTEIO,
569 [ESHUTDOWN] = TARGET_ESHUTDOWN,
570 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
571 [ETIMEDOUT] = TARGET_ETIMEDOUT,
572 [ECONNREFUSED] = TARGET_ECONNREFUSED,
573 [EHOSTDOWN] = TARGET_EHOSTDOWN,
574 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
575 [EALREADY] = TARGET_EALREADY,
576 [EINPROGRESS] = TARGET_EINPROGRESS,
577 [ESTALE] = TARGET_ESTALE,
578 [ECANCELED] = TARGET_ECANCELED,
579 [ENOMEDIUM] = TARGET_ENOMEDIUM,
580 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
581 #ifdef ENOKEY
582 [ENOKEY] = TARGET_ENOKEY,
583 #endif
584 #ifdef EKEYEXPIRED
585 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
586 #endif
587 #ifdef EKEYREVOKED
588 [EKEYREVOKED] = TARGET_EKEYREVOKED,
589 #endif
590 #ifdef EKEYREJECTED
591 [EKEYREJECTED] = TARGET_EKEYREJECTED,
592 #endif
593 #ifdef EOWNERDEAD
594 [EOWNERDEAD] = TARGET_EOWNERDEAD,
595 #endif
596 #ifdef ENOTRECOVERABLE
597 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
598 #endif
601 static inline int host_to_target_errno(int err)
603 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
604 host_to_target_errno_table[err]) {
605 return host_to_target_errno_table[err];
607 return err;
610 static inline int target_to_host_errno(int err)
612 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
613 target_to_host_errno_table[err]) {
614 return target_to_host_errno_table[err];
616 return err;
619 static inline abi_long get_errno(abi_long ret)
621 if (ret == -1)
622 return -host_to_target_errno(errno);
623 else
624 return ret;
627 static inline int is_error(abi_long ret)
629 return (abi_ulong)ret >= (abi_ulong)(-4096);
632 const char *target_strerror(int err)
634 if (err == TARGET_ERESTARTSYS) {
635 return "To be restarted";
637 if (err == TARGET_QEMU_ESIGRETURN) {
638 return "Successful exit from sigreturn";
641 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
642 return NULL;
644 return strerror(target_to_host_errno(err));
647 #define safe_syscall0(type, name) \
648 static type safe_##name(void) \
650 return safe_syscall(__NR_##name); \
653 #define safe_syscall1(type, name, type1, arg1) \
654 static type safe_##name(type1 arg1) \
656 return safe_syscall(__NR_##name, arg1); \
659 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
660 static type safe_##name(type1 arg1, type2 arg2) \
662 return safe_syscall(__NR_##name, arg1, arg2); \
665 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
666 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
668 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
671 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
672 type4, arg4) \
673 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
675 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
678 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
679 type4, arg4, type5, arg5) \
680 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
681 type5 arg5) \
683 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
686 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
687 type4, arg4, type5, arg5, type6, arg6) \
688 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
689 type5 arg5, type6 arg6) \
691 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
694 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
695 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
696 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
697 int, flags, mode_t, mode)
698 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
699 struct rusage *, rusage)
700 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
701 int, options, struct rusage *, rusage)
702 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
703 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
704 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
705 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
706 struct timespec *, tsp, const sigset_t *, sigmask,
707 size_t, sigsetsize)
708 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
709 int, maxevents, int, timeout, const sigset_t *, sigmask,
710 size_t, sigsetsize)
711 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
712 const struct timespec *,timeout,int *,uaddr2,int,val3)
713 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
714 safe_syscall2(int, kill, pid_t, pid, int, sig)
715 safe_syscall2(int, tkill, int, tid, int, sig)
716 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
717 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
718 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
719 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
720 socklen_t, addrlen)
721 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
722 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
723 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
724 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
725 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
726 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
727 safe_syscall2(int, flock, int, fd, int, operation)
728 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
729 const struct timespec *, uts, size_t, sigsetsize)
730 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
731 int, flags)
732 safe_syscall2(int, nanosleep, const struct timespec *, req,
733 struct timespec *, rem)
734 #ifdef TARGET_NR_clock_nanosleep
735 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
736 const struct timespec *, req, struct timespec *, rem)
737 #endif
738 #ifdef __NR_msgsnd
739 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
740 int, flags)
741 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
742 long, msgtype, int, flags)
743 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
744 unsigned, nsops, const struct timespec *, timeout)
745 #else
746 /* This host kernel architecture uses a single ipc syscall; fake up
747 * wrappers for the sub-operations to hide this implementation detail.
748 * Annoyingly we can't include linux/ipc.h to get the constant definitions
749 * for the call parameter because some structs in there conflict with the
750 * sys/ipc.h ones. So we just define them here, and rely on them being
751 * the same for all host architectures.
753 #define Q_SEMTIMEDOP 4
754 #define Q_MSGSND 11
755 #define Q_MSGRCV 12
756 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
758 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
759 void *, ptr, long, fifth)
760 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
762 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
764 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
766 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
768 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
769 const struct timespec *timeout)
771 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
772 (long)timeout);
774 #endif
775 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
776 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
777 size_t, len, unsigned, prio, const struct timespec *, timeout)
778 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
779 size_t, len, unsigned *, prio, const struct timespec *, timeout)
780 #endif
781 /* We do ioctl like this rather than via safe_syscall3 to preserve the
782 * "third argument might be integer or pointer or not present" behaviour of
783 * the libc function.
785 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
787 static inline int host_to_target_sock_type(int host_type)
789 int target_type;
791 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
792 case SOCK_DGRAM:
793 target_type = TARGET_SOCK_DGRAM;
794 break;
795 case SOCK_STREAM:
796 target_type = TARGET_SOCK_STREAM;
797 break;
798 default:
799 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
800 break;
803 #if defined(SOCK_CLOEXEC)
804 if (host_type & SOCK_CLOEXEC) {
805 target_type |= TARGET_SOCK_CLOEXEC;
807 #endif
809 #if defined(SOCK_NONBLOCK)
810 if (host_type & SOCK_NONBLOCK) {
811 target_type |= TARGET_SOCK_NONBLOCK;
813 #endif
815 return target_type;
818 static abi_ulong target_brk;
819 static abi_ulong target_original_brk;
820 static abi_ulong brk_page;
822 void target_set_brk(abi_ulong new_brk)
824 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
825 brk_page = HOST_PAGE_ALIGN(target_brk);
828 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
829 #define DEBUGF_BRK(message, args...)
831 /* do_brk() must return target values and target errnos. */
832 abi_long do_brk(abi_ulong new_brk)
834 abi_long mapped_addr;
835 int new_alloc_size;
837 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
839 if (!new_brk) {
840 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
841 return target_brk;
843 if (new_brk < target_original_brk) {
844 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
845 target_brk);
846 return target_brk;
849 /* If the new brk is less than the highest page reserved to the
850 * target heap allocation, set it and we're almost done... */
851 if (new_brk <= brk_page) {
852 /* Heap contents are initialized to zero, as for anonymous
853 * mapped pages. */
854 if (new_brk > target_brk) {
855 memset(g2h(target_brk), 0, new_brk - target_brk);
857 target_brk = new_brk;
858 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
859 return target_brk;
862 /* We need to allocate more memory after the brk... Note that
863 * we don't use MAP_FIXED because that will map over the top of
864 * any existing mapping (like the one with the host libc or qemu
865 * itself); instead we treat "mapped but at wrong address" as
866 * a failure and unmap again.
868 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
869 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
870 PROT_READ|PROT_WRITE,
871 MAP_ANON|MAP_PRIVATE, 0, 0));
873 if (mapped_addr == brk_page) {
874 /* Heap contents are initialized to zero, as for anonymous
875 * mapped pages. Technically the new pages are already
876 * initialized to zero since they *are* anonymous mapped
877 * pages, however we have to take care with the contents that
878 * come from the remaining part of the previous page: it may
879 * contains garbage data due to a previous heap usage (grown
880 * then shrunken). */
881 memset(g2h(target_brk), 0, brk_page - target_brk);
883 target_brk = new_brk;
884 brk_page = HOST_PAGE_ALIGN(target_brk);
885 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
886 target_brk);
887 return target_brk;
888 } else if (mapped_addr != -1) {
889 /* Mapped but at wrong address, meaning there wasn't actually
890 * enough space for this brk.
892 target_munmap(mapped_addr, new_alloc_size);
893 mapped_addr = -1;
894 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
896 else {
897 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
900 #if defined(TARGET_ALPHA)
901 /* We (partially) emulate OSF/1 on Alpha, which requires we
902 return a proper errno, not an unchanged brk value. */
903 return -TARGET_ENOMEM;
904 #endif
905 /* For everything else, return the previous break. */
906 return target_brk;
909 static inline abi_long copy_from_user_fdset(fd_set *fds,
910 abi_ulong target_fds_addr,
911 int n)
913 int i, nw, j, k;
914 abi_ulong b, *target_fds;
916 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
917 if (!(target_fds = lock_user(VERIFY_READ,
918 target_fds_addr,
919 sizeof(abi_ulong) * nw,
920 1)))
921 return -TARGET_EFAULT;
923 FD_ZERO(fds);
924 k = 0;
925 for (i = 0; i < nw; i++) {
926 /* grab the abi_ulong */
927 __get_user(b, &target_fds[i]);
928 for (j = 0; j < TARGET_ABI_BITS; j++) {
929 /* check the bit inside the abi_ulong */
930 if ((b >> j) & 1)
931 FD_SET(k, fds);
932 k++;
936 unlock_user(target_fds, target_fds_addr, 0);
938 return 0;
941 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
942 abi_ulong target_fds_addr,
943 int n)
945 if (target_fds_addr) {
946 if (copy_from_user_fdset(fds, target_fds_addr, n))
947 return -TARGET_EFAULT;
948 *fds_ptr = fds;
949 } else {
950 *fds_ptr = NULL;
952 return 0;
955 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
956 const fd_set *fds,
957 int n)
959 int i, nw, j, k;
960 abi_long v;
961 abi_ulong *target_fds;
963 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
964 if (!(target_fds = lock_user(VERIFY_WRITE,
965 target_fds_addr,
966 sizeof(abi_ulong) * nw,
967 0)))
968 return -TARGET_EFAULT;
970 k = 0;
971 for (i = 0; i < nw; i++) {
972 v = 0;
973 for (j = 0; j < TARGET_ABI_BITS; j++) {
974 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
975 k++;
977 __put_user(v, &target_fds[i]);
980 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
982 return 0;
985 #if defined(__alpha__)
986 #define HOST_HZ 1024
987 #else
988 #define HOST_HZ 100
989 #endif
991 static inline abi_long host_to_target_clock_t(long ticks)
993 #if HOST_HZ == TARGET_HZ
994 return ticks;
995 #else
996 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
997 #endif
1000 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1001 const struct rusage *rusage)
1003 struct target_rusage *target_rusage;
1005 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1006 return -TARGET_EFAULT;
1007 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1008 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1009 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1010 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1011 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1012 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1013 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1014 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1015 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1016 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1017 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1018 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1019 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1020 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1021 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1022 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1023 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1024 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1025 unlock_user_struct(target_rusage, target_addr, 1);
1027 return 0;
1030 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1032 abi_ulong target_rlim_swap;
1033 rlim_t result;
1035 target_rlim_swap = tswapal(target_rlim);
1036 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1037 return RLIM_INFINITY;
1039 result = target_rlim_swap;
1040 if (target_rlim_swap != (rlim_t)result)
1041 return RLIM_INFINITY;
1043 return result;
1046 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1048 abi_ulong target_rlim_swap;
1049 abi_ulong result;
1051 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1052 target_rlim_swap = TARGET_RLIM_INFINITY;
1053 else
1054 target_rlim_swap = rlim;
1055 result = tswapal(target_rlim_swap);
1057 return result;
1060 static inline int target_to_host_resource(int code)
1062 switch (code) {
1063 case TARGET_RLIMIT_AS:
1064 return RLIMIT_AS;
1065 case TARGET_RLIMIT_CORE:
1066 return RLIMIT_CORE;
1067 case TARGET_RLIMIT_CPU:
1068 return RLIMIT_CPU;
1069 case TARGET_RLIMIT_DATA:
1070 return RLIMIT_DATA;
1071 case TARGET_RLIMIT_FSIZE:
1072 return RLIMIT_FSIZE;
1073 case TARGET_RLIMIT_LOCKS:
1074 return RLIMIT_LOCKS;
1075 case TARGET_RLIMIT_MEMLOCK:
1076 return RLIMIT_MEMLOCK;
1077 case TARGET_RLIMIT_MSGQUEUE:
1078 return RLIMIT_MSGQUEUE;
1079 case TARGET_RLIMIT_NICE:
1080 return RLIMIT_NICE;
1081 case TARGET_RLIMIT_NOFILE:
1082 return RLIMIT_NOFILE;
1083 case TARGET_RLIMIT_NPROC:
1084 return RLIMIT_NPROC;
1085 case TARGET_RLIMIT_RSS:
1086 return RLIMIT_RSS;
1087 case TARGET_RLIMIT_RTPRIO:
1088 return RLIMIT_RTPRIO;
1089 case TARGET_RLIMIT_SIGPENDING:
1090 return RLIMIT_SIGPENDING;
1091 case TARGET_RLIMIT_STACK:
1092 return RLIMIT_STACK;
1093 default:
1094 return code;
1098 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1099 abi_ulong target_tv_addr)
1101 struct target_timeval *target_tv;
1103 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1104 return -TARGET_EFAULT;
1106 __get_user(tv->tv_sec, &target_tv->tv_sec);
1107 __get_user(tv->tv_usec, &target_tv->tv_usec);
1109 unlock_user_struct(target_tv, target_tv_addr, 0);
1111 return 0;
1114 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1115 const struct timeval *tv)
1117 struct target_timeval *target_tv;
1119 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1120 return -TARGET_EFAULT;
1122 __put_user(tv->tv_sec, &target_tv->tv_sec);
1123 __put_user(tv->tv_usec, &target_tv->tv_usec);
1125 unlock_user_struct(target_tv, target_tv_addr, 1);
1127 return 0;
1130 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1131 abi_ulong target_tz_addr)
1133 struct target_timezone *target_tz;
1135 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1136 return -TARGET_EFAULT;
1139 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1140 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1142 unlock_user_struct(target_tz, target_tz_addr, 0);
1144 return 0;
1147 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1148 #include <mqueue.h>
1150 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1151 abi_ulong target_mq_attr_addr)
1153 struct target_mq_attr *target_mq_attr;
1155 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1156 target_mq_attr_addr, 1))
1157 return -TARGET_EFAULT;
1159 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1160 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1161 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1162 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1164 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1166 return 0;
1169 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1170 const struct mq_attr *attr)
1172 struct target_mq_attr *target_mq_attr;
1174 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1175 target_mq_attr_addr, 0))
1176 return -TARGET_EFAULT;
1178 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1179 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1180 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1181 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1183 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1185 return 0;
1187 #endif
1189 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1190 /* do_select() must return target values and target errnos. */
1191 static abi_long do_select(int n,
1192 abi_ulong rfd_addr, abi_ulong wfd_addr,
1193 abi_ulong efd_addr, abi_ulong target_tv_addr)
1195 fd_set rfds, wfds, efds;
1196 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1197 struct timeval tv;
1198 struct timespec ts, *ts_ptr;
1199 abi_long ret;
1201 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1202 if (ret) {
1203 return ret;
1205 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1206 if (ret) {
1207 return ret;
1209 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1210 if (ret) {
1211 return ret;
1214 if (target_tv_addr) {
1215 if (copy_from_user_timeval(&tv, target_tv_addr))
1216 return -TARGET_EFAULT;
1217 ts.tv_sec = tv.tv_sec;
1218 ts.tv_nsec = tv.tv_usec * 1000;
1219 ts_ptr = &ts;
1220 } else {
1221 ts_ptr = NULL;
1224 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1225 ts_ptr, NULL));
1227 if (!is_error(ret)) {
1228 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1229 return -TARGET_EFAULT;
1230 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1231 return -TARGET_EFAULT;
1232 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1233 return -TARGET_EFAULT;
1235 if (target_tv_addr) {
1236 tv.tv_sec = ts.tv_sec;
1237 tv.tv_usec = ts.tv_nsec / 1000;
1238 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1239 return -TARGET_EFAULT;
1244 return ret;
1246 #endif
1248 static abi_long do_pipe2(int host_pipe[], int flags)
1250 #ifdef CONFIG_PIPE2
1251 return pipe2(host_pipe, flags);
1252 #else
1253 return -ENOSYS;
1254 #endif
1257 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1258 int flags, int is_pipe2)
1260 int host_pipe[2];
1261 abi_long ret;
1262 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1264 if (is_error(ret))
1265 return get_errno(ret);
1267 /* Several targets have special calling conventions for the original
1268 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1269 if (!is_pipe2) {
1270 #if defined(TARGET_ALPHA)
1271 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1272 return host_pipe[0];
1273 #elif defined(TARGET_MIPS)
1274 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1275 return host_pipe[0];
1276 #elif defined(TARGET_SH4)
1277 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1278 return host_pipe[0];
1279 #elif defined(TARGET_SPARC)
1280 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1281 return host_pipe[0];
1282 #endif
1285 if (put_user_s32(host_pipe[0], pipedes)
1286 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1287 return -TARGET_EFAULT;
1288 return get_errno(ret);
1291 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1292 abi_ulong target_addr,
1293 socklen_t len)
1295 struct target_ip_mreqn *target_smreqn;
1297 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1298 if (!target_smreqn)
1299 return -TARGET_EFAULT;
1300 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1301 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1302 if (len == sizeof(struct target_ip_mreqn))
1303 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1304 unlock_user(target_smreqn, target_addr, 0);
1306 return 0;
1309 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1310 abi_ulong target_addr,
1311 socklen_t len)
1313 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1314 sa_family_t sa_family;
1315 struct target_sockaddr *target_saddr;
1317 if (fd_trans_target_to_host_addr(fd)) {
1318 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1321 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1322 if (!target_saddr)
1323 return -TARGET_EFAULT;
1325 sa_family = tswap16(target_saddr->sa_family);
1327 /* Oops. The caller might send a incomplete sun_path; sun_path
1328 * must be terminated by \0 (see the manual page), but
1329 * unfortunately it is quite common to specify sockaddr_un
1330 * length as "strlen(x->sun_path)" while it should be
1331 * "strlen(...) + 1". We'll fix that here if needed.
1332 * Linux kernel has a similar feature.
1335 if (sa_family == AF_UNIX) {
1336 if (len < unix_maxlen && len > 0) {
1337 char *cp = (char*)target_saddr;
1339 if ( cp[len-1] && !cp[len] )
1340 len++;
1342 if (len > unix_maxlen)
1343 len = unix_maxlen;
1346 memcpy(addr, target_saddr, len);
1347 addr->sa_family = sa_family;
1348 if (sa_family == AF_NETLINK) {
1349 struct sockaddr_nl *nladdr;
1351 nladdr = (struct sockaddr_nl *)addr;
1352 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1353 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1354 } else if (sa_family == AF_PACKET) {
1355 struct target_sockaddr_ll *lladdr;
1357 lladdr = (struct target_sockaddr_ll *)addr;
1358 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1359 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1361 unlock_user(target_saddr, target_addr, 0);
1363 return 0;
1366 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1367 struct sockaddr *addr,
1368 socklen_t len)
1370 struct target_sockaddr *target_saddr;
1372 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1373 if (!target_saddr)
1374 return -TARGET_EFAULT;
1375 memcpy(target_saddr, addr, len);
1376 target_saddr->sa_family = tswap16(addr->sa_family);
1377 if (addr->sa_family == AF_NETLINK) {
1378 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1379 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1380 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1382 unlock_user(target_saddr, target_addr, len);
1384 return 0;
1387 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1388 struct target_msghdr *target_msgh)
1390 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1391 abi_long msg_controllen;
1392 abi_ulong target_cmsg_addr;
1393 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1394 socklen_t space = 0;
1396 msg_controllen = tswapal(target_msgh->msg_controllen);
1397 if (msg_controllen < sizeof (struct target_cmsghdr))
1398 goto the_end;
1399 target_cmsg_addr = tswapal(target_msgh->msg_control);
1400 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1401 target_cmsg_start = target_cmsg;
1402 if (!target_cmsg)
1403 return -TARGET_EFAULT;
1405 while (cmsg && target_cmsg) {
1406 void *data = CMSG_DATA(cmsg);
1407 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1409 int len = tswapal(target_cmsg->cmsg_len)
1410 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1412 space += CMSG_SPACE(len);
1413 if (space > msgh->msg_controllen) {
1414 space -= CMSG_SPACE(len);
1415 /* This is a QEMU bug, since we allocated the payload
1416 * area ourselves (unlike overflow in host-to-target
1417 * conversion, which is just the guest giving us a buffer
1418 * that's too small). It can't happen for the payload types
1419 * we currently support; if it becomes an issue in future
1420 * we would need to improve our allocation strategy to
1421 * something more intelligent than "twice the size of the
1422 * target buffer we're reading from".
1424 gemu_log("Host cmsg overflow\n");
1425 break;
1428 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1429 cmsg->cmsg_level = SOL_SOCKET;
1430 } else {
1431 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1433 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1434 cmsg->cmsg_len = CMSG_LEN(len);
1436 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1437 int *fd = (int *)data;
1438 int *target_fd = (int *)target_data;
1439 int i, numfds = len / sizeof(int);
1441 for (i = 0; i < numfds; i++) {
1442 __get_user(fd[i], target_fd + i);
1444 } else if (cmsg->cmsg_level == SOL_SOCKET
1445 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1446 struct ucred *cred = (struct ucred *)data;
1447 struct target_ucred *target_cred =
1448 (struct target_ucred *)target_data;
1450 __get_user(cred->pid, &target_cred->pid);
1451 __get_user(cred->uid, &target_cred->uid);
1452 __get_user(cred->gid, &target_cred->gid);
1453 } else {
1454 gemu_log("Unsupported ancillary data: %d/%d\n",
1455 cmsg->cmsg_level, cmsg->cmsg_type);
1456 memcpy(data, target_data, len);
1459 cmsg = CMSG_NXTHDR(msgh, cmsg);
1460 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1461 target_cmsg_start);
1463 unlock_user(target_cmsg, target_cmsg_addr, 0);
1464 the_end:
1465 msgh->msg_controllen = space;
1466 return 0;
1469 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1470 struct msghdr *msgh)
1472 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1473 abi_long msg_controllen;
1474 abi_ulong target_cmsg_addr;
1475 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1476 socklen_t space = 0;
1478 msg_controllen = tswapal(target_msgh->msg_controllen);
1479 if (msg_controllen < sizeof (struct target_cmsghdr))
1480 goto the_end;
1481 target_cmsg_addr = tswapal(target_msgh->msg_control);
1482 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1483 target_cmsg_start = target_cmsg;
1484 if (!target_cmsg)
1485 return -TARGET_EFAULT;
1487 while (cmsg && target_cmsg) {
1488 void *data = CMSG_DATA(cmsg);
1489 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1491 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1492 int tgt_len, tgt_space;
1494 /* We never copy a half-header but may copy half-data;
1495 * this is Linux's behaviour in put_cmsg(). Note that
1496 * truncation here is a guest problem (which we report
1497 * to the guest via the CTRUNC bit), unlike truncation
1498 * in target_to_host_cmsg, which is a QEMU bug.
1500 if (msg_controllen < sizeof(struct cmsghdr)) {
1501 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1502 break;
1505 if (cmsg->cmsg_level == SOL_SOCKET) {
1506 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1507 } else {
1508 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1510 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1512 tgt_len = TARGET_CMSG_LEN(len);
1514 /* Payload types which need a different size of payload on
1515 * the target must adjust tgt_len here.
1517 switch (cmsg->cmsg_level) {
1518 case SOL_SOCKET:
1519 switch (cmsg->cmsg_type) {
1520 case SO_TIMESTAMP:
1521 tgt_len = sizeof(struct target_timeval);
1522 break;
1523 default:
1524 break;
1526 default:
1527 break;
1530 if (msg_controllen < tgt_len) {
1531 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1532 tgt_len = msg_controllen;
1535 /* We must now copy-and-convert len bytes of payload
1536 * into tgt_len bytes of destination space. Bear in mind
1537 * that in both source and destination we may be dealing
1538 * with a truncated value!
1540 switch (cmsg->cmsg_level) {
1541 case SOL_SOCKET:
1542 switch (cmsg->cmsg_type) {
1543 case SCM_RIGHTS:
1545 int *fd = (int *)data;
1546 int *target_fd = (int *)target_data;
1547 int i, numfds = tgt_len / sizeof(int);
1549 for (i = 0; i < numfds; i++) {
1550 __put_user(fd[i], target_fd + i);
1552 break;
1554 case SO_TIMESTAMP:
1556 struct timeval *tv = (struct timeval *)data;
1557 struct target_timeval *target_tv =
1558 (struct target_timeval *)target_data;
1560 if (len != sizeof(struct timeval) ||
1561 tgt_len != sizeof(struct target_timeval)) {
1562 goto unimplemented;
1565 /* copy struct timeval to target */
1566 __put_user(tv->tv_sec, &target_tv->tv_sec);
1567 __put_user(tv->tv_usec, &target_tv->tv_usec);
1568 break;
1570 case SCM_CREDENTIALS:
1572 struct ucred *cred = (struct ucred *)data;
1573 struct target_ucred *target_cred =
1574 (struct target_ucred *)target_data;
1576 __put_user(cred->pid, &target_cred->pid);
1577 __put_user(cred->uid, &target_cred->uid);
1578 __put_user(cred->gid, &target_cred->gid);
1579 break;
1581 default:
1582 goto unimplemented;
1584 break;
1586 default:
1587 unimplemented:
1588 gemu_log("Unsupported ancillary data: %d/%d\n",
1589 cmsg->cmsg_level, cmsg->cmsg_type);
1590 memcpy(target_data, data, MIN(len, tgt_len));
1591 if (tgt_len > len) {
1592 memset(target_data + len, 0, tgt_len - len);
1596 target_cmsg->cmsg_len = tswapal(tgt_len);
1597 tgt_space = TARGET_CMSG_SPACE(len);
1598 if (msg_controllen < tgt_space) {
1599 tgt_space = msg_controllen;
1601 msg_controllen -= tgt_space;
1602 space += tgt_space;
1603 cmsg = CMSG_NXTHDR(msgh, cmsg);
1604 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1605 target_cmsg_start);
1607 unlock_user(target_cmsg, target_cmsg_addr, space);
1608 the_end:
1609 target_msgh->msg_controllen = tswapal(space);
1610 return 0;
1613 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1615 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1616 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1617 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1618 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1619 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1622 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1623 size_t len,
1624 abi_long (*host_to_target_nlmsg)
1625 (struct nlmsghdr *))
1627 uint32_t nlmsg_len;
1628 abi_long ret;
1630 while (len > sizeof(struct nlmsghdr)) {
1632 nlmsg_len = nlh->nlmsg_len;
1633 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1634 nlmsg_len > len) {
1635 break;
1638 switch (nlh->nlmsg_type) {
1639 case NLMSG_DONE:
1640 tswap_nlmsghdr(nlh);
1641 return 0;
1642 case NLMSG_NOOP:
1643 break;
1644 case NLMSG_ERROR:
1646 struct nlmsgerr *e = NLMSG_DATA(nlh);
1647 e->error = tswap32(e->error);
1648 tswap_nlmsghdr(&e->msg);
1649 tswap_nlmsghdr(nlh);
1650 return 0;
1652 default:
1653 ret = host_to_target_nlmsg(nlh);
1654 if (ret < 0) {
1655 tswap_nlmsghdr(nlh);
1656 return ret;
1658 break;
1660 tswap_nlmsghdr(nlh);
1661 len -= NLMSG_ALIGN(nlmsg_len);
1662 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1664 return 0;
1667 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1668 size_t len,
1669 abi_long (*target_to_host_nlmsg)
1670 (struct nlmsghdr *))
1672 int ret;
1674 while (len > sizeof(struct nlmsghdr)) {
1675 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1676 tswap32(nlh->nlmsg_len) > len) {
1677 break;
1679 tswap_nlmsghdr(nlh);
1680 switch (nlh->nlmsg_type) {
1681 case NLMSG_DONE:
1682 return 0;
1683 case NLMSG_NOOP:
1684 break;
1685 case NLMSG_ERROR:
1687 struct nlmsgerr *e = NLMSG_DATA(nlh);
1688 e->error = tswap32(e->error);
1689 tswap_nlmsghdr(&e->msg);
1691 default:
1692 ret = target_to_host_nlmsg(nlh);
1693 if (ret < 0) {
1694 return ret;
1697 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1698 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1700 return 0;
1703 #ifdef CONFIG_RTNETLINK
1704 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1705 size_t len,
1706 abi_long (*host_to_target_rtattr)
1707 (struct rtattr *))
1709 unsigned short rta_len;
1710 abi_long ret;
1712 while (len > sizeof(struct rtattr)) {
1713 rta_len = rtattr->rta_len;
1714 if (rta_len < sizeof(struct rtattr) ||
1715 rta_len > len) {
1716 break;
1718 ret = host_to_target_rtattr(rtattr);
1719 rtattr->rta_len = tswap16(rtattr->rta_len);
1720 rtattr->rta_type = tswap16(rtattr->rta_type);
1721 if (ret < 0) {
1722 return ret;
1724 len -= RTA_ALIGN(rta_len);
1725 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1727 return 0;
1730 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1732 uint32_t *u32;
1733 struct rtnl_link_stats *st;
1734 struct rtnl_link_stats64 *st64;
1735 struct rtnl_link_ifmap *map;
1737 switch (rtattr->rta_type) {
1738 /* binary stream */
1739 case IFLA_ADDRESS:
1740 case IFLA_BROADCAST:
1741 /* string */
1742 case IFLA_IFNAME:
1743 case IFLA_QDISC:
1744 break;
1745 /* uin8_t */
1746 case IFLA_OPERSTATE:
1747 case IFLA_LINKMODE:
1748 case IFLA_CARRIER:
1749 case IFLA_PROTO_DOWN:
1750 break;
1751 /* uint32_t */
1752 case IFLA_MTU:
1753 case IFLA_LINK:
1754 case IFLA_WEIGHT:
1755 case IFLA_TXQLEN:
1756 case IFLA_CARRIER_CHANGES:
1757 case IFLA_NUM_RX_QUEUES:
1758 case IFLA_NUM_TX_QUEUES:
1759 case IFLA_PROMISCUITY:
1760 case IFLA_EXT_MASK:
1761 case IFLA_LINK_NETNSID:
1762 case IFLA_GROUP:
1763 case IFLA_MASTER:
1764 case IFLA_NUM_VF:
1765 u32 = RTA_DATA(rtattr);
1766 *u32 = tswap32(*u32);
1767 break;
1768 /* struct rtnl_link_stats */
1769 case IFLA_STATS:
1770 st = RTA_DATA(rtattr);
1771 st->rx_packets = tswap32(st->rx_packets);
1772 st->tx_packets = tswap32(st->tx_packets);
1773 st->rx_bytes = tswap32(st->rx_bytes);
1774 st->tx_bytes = tswap32(st->tx_bytes);
1775 st->rx_errors = tswap32(st->rx_errors);
1776 st->tx_errors = tswap32(st->tx_errors);
1777 st->rx_dropped = tswap32(st->rx_dropped);
1778 st->tx_dropped = tswap32(st->tx_dropped);
1779 st->multicast = tswap32(st->multicast);
1780 st->collisions = tswap32(st->collisions);
1782 /* detailed rx_errors: */
1783 st->rx_length_errors = tswap32(st->rx_length_errors);
1784 st->rx_over_errors = tswap32(st->rx_over_errors);
1785 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1786 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1787 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1788 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1790 /* detailed tx_errors */
1791 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1792 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1793 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1794 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1795 st->tx_window_errors = tswap32(st->tx_window_errors);
1797 /* for cslip etc */
1798 st->rx_compressed = tswap32(st->rx_compressed);
1799 st->tx_compressed = tswap32(st->tx_compressed);
1800 break;
1801 /* struct rtnl_link_stats64 */
1802 case IFLA_STATS64:
1803 st64 = RTA_DATA(rtattr);
1804 st64->rx_packets = tswap64(st64->rx_packets);
1805 st64->tx_packets = tswap64(st64->tx_packets);
1806 st64->rx_bytes = tswap64(st64->rx_bytes);
1807 st64->tx_bytes = tswap64(st64->tx_bytes);
1808 st64->rx_errors = tswap64(st64->rx_errors);
1809 st64->tx_errors = tswap64(st64->tx_errors);
1810 st64->rx_dropped = tswap64(st64->rx_dropped);
1811 st64->tx_dropped = tswap64(st64->tx_dropped);
1812 st64->multicast = tswap64(st64->multicast);
1813 st64->collisions = tswap64(st64->collisions);
1815 /* detailed rx_errors: */
1816 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1817 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1818 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1819 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1820 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1821 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1823 /* detailed tx_errors */
1824 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1825 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1826 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1827 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1828 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1830 /* for cslip etc */
1831 st64->rx_compressed = tswap64(st64->rx_compressed);
1832 st64->tx_compressed = tswap64(st64->tx_compressed);
1833 break;
1834 /* struct rtnl_link_ifmap */
1835 case IFLA_MAP:
1836 map = RTA_DATA(rtattr);
1837 map->mem_start = tswap64(map->mem_start);
1838 map->mem_end = tswap64(map->mem_end);
1839 map->base_addr = tswap64(map->base_addr);
1840 map->irq = tswap16(map->irq);
1841 break;
1842 /* nested */
1843 case IFLA_AF_SPEC:
1844 case IFLA_LINKINFO:
1845 /* FIXME: implement nested type */
1846 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1847 break;
1848 default:
1849 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1850 break;
1852 return 0;
1855 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1857 uint32_t *u32;
1858 struct ifa_cacheinfo *ci;
1860 switch (rtattr->rta_type) {
1861 /* binary: depends on family type */
1862 case IFA_ADDRESS:
1863 case IFA_LOCAL:
1864 break;
1865 /* string */
1866 case IFA_LABEL:
1867 break;
1868 /* u32 */
1869 case IFA_FLAGS:
1870 case IFA_BROADCAST:
1871 u32 = RTA_DATA(rtattr);
1872 *u32 = tswap32(*u32);
1873 break;
1874 /* struct ifa_cacheinfo */
1875 case IFA_CACHEINFO:
1876 ci = RTA_DATA(rtattr);
1877 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1878 ci->ifa_valid = tswap32(ci->ifa_valid);
1879 ci->cstamp = tswap32(ci->cstamp);
1880 ci->tstamp = tswap32(ci->tstamp);
1881 break;
1882 default:
1883 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1884 break;
1886 return 0;
1889 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1891 uint32_t *u32;
1892 switch (rtattr->rta_type) {
1893 /* binary: depends on family type */
1894 case RTA_GATEWAY:
1895 case RTA_DST:
1896 case RTA_PREFSRC:
1897 break;
1898 /* u32 */
1899 case RTA_PRIORITY:
1900 case RTA_TABLE:
1901 case RTA_OIF:
1902 u32 = RTA_DATA(rtattr);
1903 *u32 = tswap32(*u32);
1904 break;
1905 default:
1906 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1907 break;
1909 return 0;
1912 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1913 uint32_t rtattr_len)
1915 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1916 host_to_target_data_link_rtattr);
1919 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1920 uint32_t rtattr_len)
1922 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1923 host_to_target_data_addr_rtattr);
1926 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1927 uint32_t rtattr_len)
1929 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1930 host_to_target_data_route_rtattr);
1933 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1935 uint32_t nlmsg_len;
1936 struct ifinfomsg *ifi;
1937 struct ifaddrmsg *ifa;
1938 struct rtmsg *rtm;
1940 nlmsg_len = nlh->nlmsg_len;
1941 switch (nlh->nlmsg_type) {
1942 case RTM_NEWLINK:
1943 case RTM_DELLINK:
1944 case RTM_GETLINK:
1945 ifi = NLMSG_DATA(nlh);
1946 ifi->ifi_type = tswap16(ifi->ifi_type);
1947 ifi->ifi_index = tswap32(ifi->ifi_index);
1948 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1949 ifi->ifi_change = tswap32(ifi->ifi_change);
1950 host_to_target_link_rtattr(IFLA_RTA(ifi),
1951 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1952 break;
1953 case RTM_NEWADDR:
1954 case RTM_DELADDR:
1955 case RTM_GETADDR:
1956 ifa = NLMSG_DATA(nlh);
1957 ifa->ifa_index = tswap32(ifa->ifa_index);
1958 host_to_target_addr_rtattr(IFA_RTA(ifa),
1959 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1960 break;
1961 case RTM_NEWROUTE:
1962 case RTM_DELROUTE:
1963 case RTM_GETROUTE:
1964 rtm = NLMSG_DATA(nlh);
1965 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1966 host_to_target_route_rtattr(RTM_RTA(rtm),
1967 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1968 break;
1969 default:
1970 return -TARGET_EINVAL;
1972 return 0;
1975 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1976 size_t len)
1978 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1981 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1982 size_t len,
1983 abi_long (*target_to_host_rtattr)
1984 (struct rtattr *))
1986 abi_long ret;
1988 while (len >= sizeof(struct rtattr)) {
1989 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
1990 tswap16(rtattr->rta_len) > len) {
1991 break;
1993 rtattr->rta_len = tswap16(rtattr->rta_len);
1994 rtattr->rta_type = tswap16(rtattr->rta_type);
1995 ret = target_to_host_rtattr(rtattr);
1996 if (ret < 0) {
1997 return ret;
1999 len -= RTA_ALIGN(rtattr->rta_len);
2000 rtattr = (struct rtattr *)(((char *)rtattr) +
2001 RTA_ALIGN(rtattr->rta_len));
2003 return 0;
2006 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2008 switch (rtattr->rta_type) {
2009 default:
2010 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
2011 break;
2013 return 0;
2016 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2018 switch (rtattr->rta_type) {
2019 /* binary: depends on family type */
2020 case IFA_LOCAL:
2021 case IFA_ADDRESS:
2022 break;
2023 default:
2024 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2025 break;
2027 return 0;
2030 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2032 uint32_t *u32;
2033 switch (rtattr->rta_type) {
2034 /* binary: depends on family type */
2035 case RTA_DST:
2036 case RTA_SRC:
2037 case RTA_GATEWAY:
2038 break;
2039 /* u32 */
2040 case RTA_OIF:
2041 u32 = RTA_DATA(rtattr);
2042 *u32 = tswap32(*u32);
2043 break;
2044 default:
2045 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2046 break;
2048 return 0;
2051 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2052 uint32_t rtattr_len)
2054 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2055 target_to_host_data_link_rtattr);
2058 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2059 uint32_t rtattr_len)
2061 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2062 target_to_host_data_addr_rtattr);
2065 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2066 uint32_t rtattr_len)
2068 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2069 target_to_host_data_route_rtattr);
2072 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2074 struct ifinfomsg *ifi;
2075 struct ifaddrmsg *ifa;
2076 struct rtmsg *rtm;
2078 switch (nlh->nlmsg_type) {
2079 case RTM_GETLINK:
2080 break;
2081 case RTM_NEWLINK:
2082 case RTM_DELLINK:
2083 ifi = NLMSG_DATA(nlh);
2084 ifi->ifi_type = tswap16(ifi->ifi_type);
2085 ifi->ifi_index = tswap32(ifi->ifi_index);
2086 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2087 ifi->ifi_change = tswap32(ifi->ifi_change);
2088 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2089 NLMSG_LENGTH(sizeof(*ifi)));
2090 break;
2091 case RTM_GETADDR:
2092 case RTM_NEWADDR:
2093 case RTM_DELADDR:
2094 ifa = NLMSG_DATA(nlh);
2095 ifa->ifa_index = tswap32(ifa->ifa_index);
2096 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2097 NLMSG_LENGTH(sizeof(*ifa)));
2098 break;
2099 case RTM_GETROUTE:
2100 break;
2101 case RTM_NEWROUTE:
2102 case RTM_DELROUTE:
2103 rtm = NLMSG_DATA(nlh);
2104 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2105 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2106 NLMSG_LENGTH(sizeof(*rtm)));
2107 break;
2108 default:
2109 return -TARGET_EOPNOTSUPP;
2111 return 0;
2114 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2116 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2118 #endif /* CONFIG_RTNETLINK */
2120 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2122 switch (nlh->nlmsg_type) {
2123 default:
2124 gemu_log("Unknown host audit message type %d\n",
2125 nlh->nlmsg_type);
2126 return -TARGET_EINVAL;
2128 return 0;
2131 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2132 size_t len)
2134 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2137 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2139 switch (nlh->nlmsg_type) {
2140 case AUDIT_USER:
2141 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2142 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2143 break;
2144 default:
2145 gemu_log("Unknown target audit message type %d\n",
2146 nlh->nlmsg_type);
2147 return -TARGET_EINVAL;
2150 return 0;
2153 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2155 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2158 /* do_setsockopt() Must return target values and target errnos. */
2159 static abi_long do_setsockopt(int sockfd, int level, int optname,
2160 abi_ulong optval_addr, socklen_t optlen)
2162 abi_long ret;
2163 int val;
2164 struct ip_mreqn *ip_mreq;
2165 struct ip_mreq_source *ip_mreq_source;
2167 switch(level) {
2168 case SOL_TCP:
2169 /* TCP options all take an 'int' value. */
2170 if (optlen < sizeof(uint32_t))
2171 return -TARGET_EINVAL;
2173 if (get_user_u32(val, optval_addr))
2174 return -TARGET_EFAULT;
2175 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2176 break;
2177 case SOL_IP:
2178 switch(optname) {
2179 case IP_TOS:
2180 case IP_TTL:
2181 case IP_HDRINCL:
2182 case IP_ROUTER_ALERT:
2183 case IP_RECVOPTS:
2184 case IP_RETOPTS:
2185 case IP_PKTINFO:
2186 case IP_MTU_DISCOVER:
2187 case IP_RECVERR:
2188 case IP_RECVTOS:
2189 #ifdef IP_FREEBIND
2190 case IP_FREEBIND:
2191 #endif
2192 case IP_MULTICAST_TTL:
2193 case IP_MULTICAST_LOOP:
2194 val = 0;
2195 if (optlen >= sizeof(uint32_t)) {
2196 if (get_user_u32(val, optval_addr))
2197 return -TARGET_EFAULT;
2198 } else if (optlen >= 1) {
2199 if (get_user_u8(val, optval_addr))
2200 return -TARGET_EFAULT;
2202 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2203 break;
2204 case IP_ADD_MEMBERSHIP:
2205 case IP_DROP_MEMBERSHIP:
2206 if (optlen < sizeof (struct target_ip_mreq) ||
2207 optlen > sizeof (struct target_ip_mreqn))
2208 return -TARGET_EINVAL;
2210 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2211 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2212 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2213 break;
2215 case IP_BLOCK_SOURCE:
2216 case IP_UNBLOCK_SOURCE:
2217 case IP_ADD_SOURCE_MEMBERSHIP:
2218 case IP_DROP_SOURCE_MEMBERSHIP:
2219 if (optlen != sizeof (struct target_ip_mreq_source))
2220 return -TARGET_EINVAL;
2222 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2223 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2224 unlock_user (ip_mreq_source, optval_addr, 0);
2225 break;
2227 default:
2228 goto unimplemented;
2230 break;
2231 case SOL_IPV6:
2232 switch (optname) {
2233 case IPV6_MTU_DISCOVER:
2234 case IPV6_MTU:
2235 case IPV6_V6ONLY:
2236 case IPV6_RECVPKTINFO:
2237 val = 0;
2238 if (optlen < sizeof(uint32_t)) {
2239 return -TARGET_EINVAL;
2241 if (get_user_u32(val, optval_addr)) {
2242 return -TARGET_EFAULT;
2244 ret = get_errno(setsockopt(sockfd, level, optname,
2245 &val, sizeof(val)));
2246 break;
2247 default:
2248 goto unimplemented;
2250 break;
2251 case SOL_RAW:
2252 switch (optname) {
2253 case ICMP_FILTER:
2254 /* struct icmp_filter takes an u32 value */
2255 if (optlen < sizeof(uint32_t)) {
2256 return -TARGET_EINVAL;
2259 if (get_user_u32(val, optval_addr)) {
2260 return -TARGET_EFAULT;
2262 ret = get_errno(setsockopt(sockfd, level, optname,
2263 &val, sizeof(val)));
2264 break;
2266 default:
2267 goto unimplemented;
2269 break;
2270 case TARGET_SOL_SOCKET:
2271 switch (optname) {
2272 case TARGET_SO_RCVTIMEO:
2274 struct timeval tv;
2276 optname = SO_RCVTIMEO;
2278 set_timeout:
2279 if (optlen != sizeof(struct target_timeval)) {
2280 return -TARGET_EINVAL;
2283 if (copy_from_user_timeval(&tv, optval_addr)) {
2284 return -TARGET_EFAULT;
2287 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2288 &tv, sizeof(tv)));
2289 return ret;
2291 case TARGET_SO_SNDTIMEO:
2292 optname = SO_SNDTIMEO;
2293 goto set_timeout;
2294 case TARGET_SO_ATTACH_FILTER:
2296 struct target_sock_fprog *tfprog;
2297 struct target_sock_filter *tfilter;
2298 struct sock_fprog fprog;
2299 struct sock_filter *filter;
2300 int i;
2302 if (optlen != sizeof(*tfprog)) {
2303 return -TARGET_EINVAL;
2305 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2306 return -TARGET_EFAULT;
2308 if (!lock_user_struct(VERIFY_READ, tfilter,
2309 tswapal(tfprog->filter), 0)) {
2310 unlock_user_struct(tfprog, optval_addr, 1);
2311 return -TARGET_EFAULT;
2314 fprog.len = tswap16(tfprog->len);
2315 filter = g_try_new(struct sock_filter, fprog.len);
2316 if (filter == NULL) {
2317 unlock_user_struct(tfilter, tfprog->filter, 1);
2318 unlock_user_struct(tfprog, optval_addr, 1);
2319 return -TARGET_ENOMEM;
2321 for (i = 0; i < fprog.len; i++) {
2322 filter[i].code = tswap16(tfilter[i].code);
2323 filter[i].jt = tfilter[i].jt;
2324 filter[i].jf = tfilter[i].jf;
2325 filter[i].k = tswap32(tfilter[i].k);
2327 fprog.filter = filter;
2329 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2330 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2331 g_free(filter);
2333 unlock_user_struct(tfilter, tfprog->filter, 1);
2334 unlock_user_struct(tfprog, optval_addr, 1);
2335 return ret;
2337 case TARGET_SO_BINDTODEVICE:
2339 char *dev_ifname, *addr_ifname;
2341 if (optlen > IFNAMSIZ - 1) {
2342 optlen = IFNAMSIZ - 1;
2344 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2345 if (!dev_ifname) {
2346 return -TARGET_EFAULT;
2348 optname = SO_BINDTODEVICE;
2349 addr_ifname = alloca(IFNAMSIZ);
2350 memcpy(addr_ifname, dev_ifname, optlen);
2351 addr_ifname[optlen] = 0;
2352 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2353 addr_ifname, optlen));
2354 unlock_user (dev_ifname, optval_addr, 0);
2355 return ret;
2357 /* Options with 'int' argument. */
2358 case TARGET_SO_DEBUG:
2359 optname = SO_DEBUG;
2360 break;
2361 case TARGET_SO_REUSEADDR:
2362 optname = SO_REUSEADDR;
2363 break;
2364 case TARGET_SO_TYPE:
2365 optname = SO_TYPE;
2366 break;
2367 case TARGET_SO_ERROR:
2368 optname = SO_ERROR;
2369 break;
2370 case TARGET_SO_DONTROUTE:
2371 optname = SO_DONTROUTE;
2372 break;
2373 case TARGET_SO_BROADCAST:
2374 optname = SO_BROADCAST;
2375 break;
2376 case TARGET_SO_SNDBUF:
2377 optname = SO_SNDBUF;
2378 break;
2379 case TARGET_SO_SNDBUFFORCE:
2380 optname = SO_SNDBUFFORCE;
2381 break;
2382 case TARGET_SO_RCVBUF:
2383 optname = SO_RCVBUF;
2384 break;
2385 case TARGET_SO_RCVBUFFORCE:
2386 optname = SO_RCVBUFFORCE;
2387 break;
2388 case TARGET_SO_KEEPALIVE:
2389 optname = SO_KEEPALIVE;
2390 break;
2391 case TARGET_SO_OOBINLINE:
2392 optname = SO_OOBINLINE;
2393 break;
2394 case TARGET_SO_NO_CHECK:
2395 optname = SO_NO_CHECK;
2396 break;
2397 case TARGET_SO_PRIORITY:
2398 optname = SO_PRIORITY;
2399 break;
2400 #ifdef SO_BSDCOMPAT
2401 case TARGET_SO_BSDCOMPAT:
2402 optname = SO_BSDCOMPAT;
2403 break;
2404 #endif
2405 case TARGET_SO_PASSCRED:
2406 optname = SO_PASSCRED;
2407 break;
2408 case TARGET_SO_PASSSEC:
2409 optname = SO_PASSSEC;
2410 break;
2411 case TARGET_SO_TIMESTAMP:
2412 optname = SO_TIMESTAMP;
2413 break;
2414 case TARGET_SO_RCVLOWAT:
2415 optname = SO_RCVLOWAT;
2416 break;
2417 break;
2418 default:
2419 goto unimplemented;
2421 if (optlen < sizeof(uint32_t))
2422 return -TARGET_EINVAL;
2424 if (get_user_u32(val, optval_addr))
2425 return -TARGET_EFAULT;
2426 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2427 break;
2428 default:
2429 unimplemented:
2430 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2431 ret = -TARGET_ENOPROTOOPT;
2433 return ret;
2436 /* do_getsockopt() Must return target values and target errnos. */
2437 static abi_long do_getsockopt(int sockfd, int level, int optname,
2438 abi_ulong optval_addr, abi_ulong optlen)
2440 abi_long ret;
2441 int len, val;
2442 socklen_t lv;
2444 switch(level) {
2445 case TARGET_SOL_SOCKET:
2446 level = SOL_SOCKET;
2447 switch (optname) {
2448 /* These don't just return a single integer */
2449 case TARGET_SO_LINGER:
2450 case TARGET_SO_RCVTIMEO:
2451 case TARGET_SO_SNDTIMEO:
2452 case TARGET_SO_PEERNAME:
2453 goto unimplemented;
2454 case TARGET_SO_PEERCRED: {
2455 struct ucred cr;
2456 socklen_t crlen;
2457 struct target_ucred *tcr;
2459 if (get_user_u32(len, optlen)) {
2460 return -TARGET_EFAULT;
2462 if (len < 0) {
2463 return -TARGET_EINVAL;
2466 crlen = sizeof(cr);
2467 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2468 &cr, &crlen));
2469 if (ret < 0) {
2470 return ret;
2472 if (len > crlen) {
2473 len = crlen;
2475 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2476 return -TARGET_EFAULT;
2478 __put_user(cr.pid, &tcr->pid);
2479 __put_user(cr.uid, &tcr->uid);
2480 __put_user(cr.gid, &tcr->gid);
2481 unlock_user_struct(tcr, optval_addr, 1);
2482 if (put_user_u32(len, optlen)) {
2483 return -TARGET_EFAULT;
2485 break;
2487 /* Options with 'int' argument. */
2488 case TARGET_SO_DEBUG:
2489 optname = SO_DEBUG;
2490 goto int_case;
2491 case TARGET_SO_REUSEADDR:
2492 optname = SO_REUSEADDR;
2493 goto int_case;
2494 case TARGET_SO_TYPE:
2495 optname = SO_TYPE;
2496 goto int_case;
2497 case TARGET_SO_ERROR:
2498 optname = SO_ERROR;
2499 goto int_case;
2500 case TARGET_SO_DONTROUTE:
2501 optname = SO_DONTROUTE;
2502 goto int_case;
2503 case TARGET_SO_BROADCAST:
2504 optname = SO_BROADCAST;
2505 goto int_case;
2506 case TARGET_SO_SNDBUF:
2507 optname = SO_SNDBUF;
2508 goto int_case;
2509 case TARGET_SO_RCVBUF:
2510 optname = SO_RCVBUF;
2511 goto int_case;
2512 case TARGET_SO_KEEPALIVE:
2513 optname = SO_KEEPALIVE;
2514 goto int_case;
2515 case TARGET_SO_OOBINLINE:
2516 optname = SO_OOBINLINE;
2517 goto int_case;
2518 case TARGET_SO_NO_CHECK:
2519 optname = SO_NO_CHECK;
2520 goto int_case;
2521 case TARGET_SO_PRIORITY:
2522 optname = SO_PRIORITY;
2523 goto int_case;
2524 #ifdef SO_BSDCOMPAT
2525 case TARGET_SO_BSDCOMPAT:
2526 optname = SO_BSDCOMPAT;
2527 goto int_case;
2528 #endif
2529 case TARGET_SO_PASSCRED:
2530 optname = SO_PASSCRED;
2531 goto int_case;
2532 case TARGET_SO_TIMESTAMP:
2533 optname = SO_TIMESTAMP;
2534 goto int_case;
2535 case TARGET_SO_RCVLOWAT:
2536 optname = SO_RCVLOWAT;
2537 goto int_case;
2538 case TARGET_SO_ACCEPTCONN:
2539 optname = SO_ACCEPTCONN;
2540 goto int_case;
2541 default:
2542 goto int_case;
2544 break;
2545 case SOL_TCP:
2546 /* TCP options all take an 'int' value. */
2547 int_case:
2548 if (get_user_u32(len, optlen))
2549 return -TARGET_EFAULT;
2550 if (len < 0)
2551 return -TARGET_EINVAL;
2552 lv = sizeof(lv);
2553 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2554 if (ret < 0)
2555 return ret;
2556 if (optname == SO_TYPE) {
2557 val = host_to_target_sock_type(val);
2559 if (len > lv)
2560 len = lv;
2561 if (len == 4) {
2562 if (put_user_u32(val, optval_addr))
2563 return -TARGET_EFAULT;
2564 } else {
2565 if (put_user_u8(val, optval_addr))
2566 return -TARGET_EFAULT;
2568 if (put_user_u32(len, optlen))
2569 return -TARGET_EFAULT;
2570 break;
2571 case SOL_IP:
2572 switch(optname) {
2573 case IP_TOS:
2574 case IP_TTL:
2575 case IP_HDRINCL:
2576 case IP_ROUTER_ALERT:
2577 case IP_RECVOPTS:
2578 case IP_RETOPTS:
2579 case IP_PKTINFO:
2580 case IP_MTU_DISCOVER:
2581 case IP_RECVERR:
2582 case IP_RECVTOS:
2583 #ifdef IP_FREEBIND
2584 case IP_FREEBIND:
2585 #endif
2586 case IP_MULTICAST_TTL:
2587 case IP_MULTICAST_LOOP:
2588 if (get_user_u32(len, optlen))
2589 return -TARGET_EFAULT;
2590 if (len < 0)
2591 return -TARGET_EINVAL;
2592 lv = sizeof(lv);
2593 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2594 if (ret < 0)
2595 return ret;
2596 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2597 len = 1;
2598 if (put_user_u32(len, optlen)
2599 || put_user_u8(val, optval_addr))
2600 return -TARGET_EFAULT;
2601 } else {
2602 if (len > sizeof(int))
2603 len = sizeof(int);
2604 if (put_user_u32(len, optlen)
2605 || put_user_u32(val, optval_addr))
2606 return -TARGET_EFAULT;
2608 break;
2609 default:
2610 ret = -TARGET_ENOPROTOOPT;
2611 break;
2613 break;
2614 default:
2615 unimplemented:
2616 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2617 level, optname);
2618 ret = -TARGET_EOPNOTSUPP;
2619 break;
2621 return ret;
2624 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2625 int count, int copy)
2627 struct target_iovec *target_vec;
2628 struct iovec *vec;
2629 abi_ulong total_len, max_len;
2630 int i;
2631 int err = 0;
2632 bool bad_address = false;
2634 if (count == 0) {
2635 errno = 0;
2636 return NULL;
2638 if (count < 0 || count > IOV_MAX) {
2639 errno = EINVAL;
2640 return NULL;
2643 vec = g_try_new0(struct iovec, count);
2644 if (vec == NULL) {
2645 errno = ENOMEM;
2646 return NULL;
2649 target_vec = lock_user(VERIFY_READ, target_addr,
2650 count * sizeof(struct target_iovec), 1);
2651 if (target_vec == NULL) {
2652 err = EFAULT;
2653 goto fail2;
2656 /* ??? If host page size > target page size, this will result in a
2657 value larger than what we can actually support. */
2658 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2659 total_len = 0;
2661 for (i = 0; i < count; i++) {
2662 abi_ulong base = tswapal(target_vec[i].iov_base);
2663 abi_long len = tswapal(target_vec[i].iov_len);
2665 if (len < 0) {
2666 err = EINVAL;
2667 goto fail;
2668 } else if (len == 0) {
2669 /* Zero length pointer is ignored. */
2670 vec[i].iov_base = 0;
2671 } else {
2672 vec[i].iov_base = lock_user(type, base, len, copy);
2673 /* If the first buffer pointer is bad, this is a fault. But
2674 * subsequent bad buffers will result in a partial write; this
2675 * is realized by filling the vector with null pointers and
2676 * zero lengths. */
2677 if (!vec[i].iov_base) {
2678 if (i == 0) {
2679 err = EFAULT;
2680 goto fail;
2681 } else {
2682 bad_address = true;
2685 if (bad_address) {
2686 len = 0;
2688 if (len > max_len - total_len) {
2689 len = max_len - total_len;
2692 vec[i].iov_len = len;
2693 total_len += len;
2696 unlock_user(target_vec, target_addr, 0);
2697 return vec;
2699 fail:
2700 while (--i >= 0) {
2701 if (tswapal(target_vec[i].iov_len) > 0) {
2702 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2705 unlock_user(target_vec, target_addr, 0);
2706 fail2:
2707 g_free(vec);
2708 errno = err;
2709 return NULL;
2712 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2713 int count, int copy)
2715 struct target_iovec *target_vec;
2716 int i;
2718 target_vec = lock_user(VERIFY_READ, target_addr,
2719 count * sizeof(struct target_iovec), 1);
2720 if (target_vec) {
2721 for (i = 0; i < count; i++) {
2722 abi_ulong base = tswapal(target_vec[i].iov_base);
2723 abi_long len = tswapal(target_vec[i].iov_len);
2724 if (len < 0) {
2725 break;
2727 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2729 unlock_user(target_vec, target_addr, 0);
2732 g_free(vec);
2735 static inline int target_to_host_sock_type(int *type)
2737 int host_type = 0;
2738 int target_type = *type;
2740 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2741 case TARGET_SOCK_DGRAM:
2742 host_type = SOCK_DGRAM;
2743 break;
2744 case TARGET_SOCK_STREAM:
2745 host_type = SOCK_STREAM;
2746 break;
2747 default:
2748 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2749 break;
2751 if (target_type & TARGET_SOCK_CLOEXEC) {
2752 #if defined(SOCK_CLOEXEC)
2753 host_type |= SOCK_CLOEXEC;
2754 #else
2755 return -TARGET_EINVAL;
2756 #endif
2758 if (target_type & TARGET_SOCK_NONBLOCK) {
2759 #if defined(SOCK_NONBLOCK)
2760 host_type |= SOCK_NONBLOCK;
2761 #elif !defined(O_NONBLOCK)
2762 return -TARGET_EINVAL;
2763 #endif
2765 *type = host_type;
2766 return 0;
2769 /* Try to emulate socket type flags after socket creation. */
2770 static int sock_flags_fixup(int fd, int target_type)
2772 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2773 if (target_type & TARGET_SOCK_NONBLOCK) {
2774 int flags = fcntl(fd, F_GETFL);
2775 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2776 close(fd);
2777 return -TARGET_EINVAL;
2780 #endif
2781 return fd;
2784 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2785 abi_ulong target_addr,
2786 socklen_t len)
2788 struct sockaddr *addr = host_addr;
2789 struct target_sockaddr *target_saddr;
2791 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2792 if (!target_saddr) {
2793 return -TARGET_EFAULT;
2796 memcpy(addr, target_saddr, len);
2797 addr->sa_family = tswap16(target_saddr->sa_family);
2798 /* spkt_protocol is big-endian */
2800 unlock_user(target_saddr, target_addr, 0);
2801 return 0;
2804 static TargetFdTrans target_packet_trans = {
2805 .target_to_host_addr = packet_target_to_host_sockaddr,
2808 #ifdef CONFIG_RTNETLINK
2809 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2811 return target_to_host_nlmsg_route(buf, len);
2814 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2816 return host_to_target_nlmsg_route(buf, len);
2819 static TargetFdTrans target_netlink_route_trans = {
2820 .target_to_host_data = netlink_route_target_to_host,
2821 .host_to_target_data = netlink_route_host_to_target,
2823 #endif /* CONFIG_RTNETLINK */
2825 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2827 return target_to_host_nlmsg_audit(buf, len);
2830 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2832 return host_to_target_nlmsg_audit(buf, len);
2835 static TargetFdTrans target_netlink_audit_trans = {
2836 .target_to_host_data = netlink_audit_target_to_host,
2837 .host_to_target_data = netlink_audit_host_to_target,
2840 /* do_socket() Must return target values and target errnos. */
2841 static abi_long do_socket(int domain, int type, int protocol)
2843 int target_type = type;
2844 int ret;
2846 ret = target_to_host_sock_type(&type);
2847 if (ret) {
2848 return ret;
2851 if (domain == PF_NETLINK && !(
2852 #ifdef CONFIG_RTNETLINK
2853 protocol == NETLINK_ROUTE ||
2854 #endif
2855 protocol == NETLINK_KOBJECT_UEVENT ||
2856 protocol == NETLINK_AUDIT)) {
2857 return -EPFNOSUPPORT;
2860 if (domain == AF_PACKET ||
2861 (domain == AF_INET && type == SOCK_PACKET)) {
2862 protocol = tswap16(protocol);
2865 ret = get_errno(socket(domain, type, protocol));
2866 if (ret >= 0) {
2867 ret = sock_flags_fixup(ret, target_type);
2868 if (type == SOCK_PACKET) {
2869 /* Manage an obsolete case :
2870 * if socket type is SOCK_PACKET, bind by name
2872 fd_trans_register(ret, &target_packet_trans);
2873 } else if (domain == PF_NETLINK) {
2874 switch (protocol) {
2875 #ifdef CONFIG_RTNETLINK
2876 case NETLINK_ROUTE:
2877 fd_trans_register(ret, &target_netlink_route_trans);
2878 break;
2879 #endif
2880 case NETLINK_KOBJECT_UEVENT:
2881 /* nothing to do: messages are strings */
2882 break;
2883 case NETLINK_AUDIT:
2884 fd_trans_register(ret, &target_netlink_audit_trans);
2885 break;
2886 default:
2887 g_assert_not_reached();
2891 return ret;
2894 /* do_bind() Must return target values and target errnos. */
2895 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2896 socklen_t addrlen)
2898 void *addr;
2899 abi_long ret;
2901 if ((int)addrlen < 0) {
2902 return -TARGET_EINVAL;
2905 addr = alloca(addrlen+1);
2907 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2908 if (ret)
2909 return ret;
2911 return get_errno(bind(sockfd, addr, addrlen));
2914 /* do_connect() Must return target values and target errnos. */
2915 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2916 socklen_t addrlen)
2918 void *addr;
2919 abi_long ret;
2921 if ((int)addrlen < 0) {
2922 return -TARGET_EINVAL;
2925 addr = alloca(addrlen+1);
2927 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2928 if (ret)
2929 return ret;
2931 return get_errno(safe_connect(sockfd, addr, addrlen));
2934 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2935 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2936 int flags, int send)
2938 abi_long ret, len;
2939 struct msghdr msg;
2940 int count;
2941 struct iovec *vec;
2942 abi_ulong target_vec;
2944 if (msgp->msg_name) {
2945 msg.msg_namelen = tswap32(msgp->msg_namelen);
2946 msg.msg_name = alloca(msg.msg_namelen+1);
2947 ret = target_to_host_sockaddr(fd, msg.msg_name,
2948 tswapal(msgp->msg_name),
2949 msg.msg_namelen);
2950 if (ret) {
2951 goto out2;
2953 } else {
2954 msg.msg_name = NULL;
2955 msg.msg_namelen = 0;
2957 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2958 msg.msg_control = alloca(msg.msg_controllen);
2959 msg.msg_flags = tswap32(msgp->msg_flags);
2961 count = tswapal(msgp->msg_iovlen);
2962 target_vec = tswapal(msgp->msg_iov);
2963 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2964 target_vec, count, send);
2965 if (vec == NULL) {
2966 ret = -host_to_target_errno(errno);
2967 goto out2;
2969 msg.msg_iovlen = count;
2970 msg.msg_iov = vec;
2972 if (send) {
2973 if (fd_trans_target_to_host_data(fd)) {
2974 ret = fd_trans_target_to_host_data(fd)(msg.msg_iov->iov_base,
2975 msg.msg_iov->iov_len);
2976 } else {
2977 ret = target_to_host_cmsg(&msg, msgp);
2979 if (ret == 0) {
2980 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2982 } else {
2983 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2984 if (!is_error(ret)) {
2985 len = ret;
2986 if (fd_trans_host_to_target_data(fd)) {
2987 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2988 msg.msg_iov->iov_len);
2989 } else {
2990 ret = host_to_target_cmsg(msgp, &msg);
2992 if (!is_error(ret)) {
2993 msgp->msg_namelen = tswap32(msg.msg_namelen);
2994 if (msg.msg_name != NULL) {
2995 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2996 msg.msg_name, msg.msg_namelen);
2997 if (ret) {
2998 goto out;
3002 ret = len;
3007 out:
3008 unlock_iovec(vec, target_vec, count, !send);
3009 out2:
3010 return ret;
3013 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3014 int flags, int send)
3016 abi_long ret;
3017 struct target_msghdr *msgp;
3019 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3020 msgp,
3021 target_msg,
3022 send ? 1 : 0)) {
3023 return -TARGET_EFAULT;
3025 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3026 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3027 return ret;
3030 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3031 * so it might not have this *mmsg-specific flag either.
3033 #ifndef MSG_WAITFORONE
3034 #define MSG_WAITFORONE 0x10000
3035 #endif
3037 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3038 unsigned int vlen, unsigned int flags,
3039 int send)
3041 struct target_mmsghdr *mmsgp;
3042 abi_long ret = 0;
3043 int i;
3045 if (vlen > UIO_MAXIOV) {
3046 vlen = UIO_MAXIOV;
3049 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3050 if (!mmsgp) {
3051 return -TARGET_EFAULT;
3054 for (i = 0; i < vlen; i++) {
3055 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3056 if (is_error(ret)) {
3057 break;
3059 mmsgp[i].msg_len = tswap32(ret);
3060 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3061 if (flags & MSG_WAITFORONE) {
3062 flags |= MSG_DONTWAIT;
3066 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3068 /* Return number of datagrams sent if we sent any at all;
3069 * otherwise return the error.
3071 if (i) {
3072 return i;
3074 return ret;
3077 /* do_accept4() Must return target values and target errnos. */
3078 static abi_long do_accept4(int fd, abi_ulong target_addr,
3079 abi_ulong target_addrlen_addr, int flags)
3081 socklen_t addrlen;
3082 void *addr;
3083 abi_long ret;
3084 int host_flags;
3086 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3088 if (target_addr == 0) {
3089 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3092 /* linux returns EINVAL if addrlen pointer is invalid */
3093 if (get_user_u32(addrlen, target_addrlen_addr))
3094 return -TARGET_EINVAL;
3096 if ((int)addrlen < 0) {
3097 return -TARGET_EINVAL;
3100 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3101 return -TARGET_EINVAL;
3103 addr = alloca(addrlen);
3105 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3106 if (!is_error(ret)) {
3107 host_to_target_sockaddr(target_addr, addr, addrlen);
3108 if (put_user_u32(addrlen, target_addrlen_addr))
3109 ret = -TARGET_EFAULT;
3111 return ret;
3114 /* do_getpeername() Must return target values and target errnos. */
3115 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3116 abi_ulong target_addrlen_addr)
3118 socklen_t addrlen;
3119 void *addr;
3120 abi_long ret;
3122 if (get_user_u32(addrlen, target_addrlen_addr))
3123 return -TARGET_EFAULT;
3125 if ((int)addrlen < 0) {
3126 return -TARGET_EINVAL;
3129 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3130 return -TARGET_EFAULT;
3132 addr = alloca(addrlen);
3134 ret = get_errno(getpeername(fd, addr, &addrlen));
3135 if (!is_error(ret)) {
3136 host_to_target_sockaddr(target_addr, addr, addrlen);
3137 if (put_user_u32(addrlen, target_addrlen_addr))
3138 ret = -TARGET_EFAULT;
3140 return ret;
3143 /* do_getsockname() Must return target values and target errnos. */
3144 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3145 abi_ulong target_addrlen_addr)
3147 socklen_t addrlen;
3148 void *addr;
3149 abi_long ret;
3151 if (get_user_u32(addrlen, target_addrlen_addr))
3152 return -TARGET_EFAULT;
3154 if ((int)addrlen < 0) {
3155 return -TARGET_EINVAL;
3158 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3159 return -TARGET_EFAULT;
3161 addr = alloca(addrlen);
3163 ret = get_errno(getsockname(fd, addr, &addrlen));
3164 if (!is_error(ret)) {
3165 host_to_target_sockaddr(target_addr, addr, addrlen);
3166 if (put_user_u32(addrlen, target_addrlen_addr))
3167 ret = -TARGET_EFAULT;
3169 return ret;
3172 /* do_socketpair() Must return target values and target errnos. */
3173 static abi_long do_socketpair(int domain, int type, int protocol,
3174 abi_ulong target_tab_addr)
3176 int tab[2];
3177 abi_long ret;
3179 target_to_host_sock_type(&type);
3181 ret = get_errno(socketpair(domain, type, protocol, tab));
3182 if (!is_error(ret)) {
3183 if (put_user_s32(tab[0], target_tab_addr)
3184 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3185 ret = -TARGET_EFAULT;
3187 return ret;
3190 /* do_sendto() Must return target values and target errnos. */
3191 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3192 abi_ulong target_addr, socklen_t addrlen)
3194 void *addr;
3195 void *host_msg;
3196 abi_long ret;
3198 if ((int)addrlen < 0) {
3199 return -TARGET_EINVAL;
3202 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3203 if (!host_msg)
3204 return -TARGET_EFAULT;
3205 if (fd_trans_target_to_host_data(fd)) {
3206 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3207 if (ret < 0) {
3208 unlock_user(host_msg, msg, 0);
3209 return ret;
3212 if (target_addr) {
3213 addr = alloca(addrlen+1);
3214 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3215 if (ret) {
3216 unlock_user(host_msg, msg, 0);
3217 return ret;
3219 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3220 } else {
3221 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3223 unlock_user(host_msg, msg, 0);
3224 return ret;
3227 /* do_recvfrom() Must return target values and target errnos. */
3228 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3229 abi_ulong target_addr,
3230 abi_ulong target_addrlen)
3232 socklen_t addrlen;
3233 void *addr;
3234 void *host_msg;
3235 abi_long ret;
3237 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3238 if (!host_msg)
3239 return -TARGET_EFAULT;
3240 if (target_addr) {
3241 if (get_user_u32(addrlen, target_addrlen)) {
3242 ret = -TARGET_EFAULT;
3243 goto fail;
3245 if ((int)addrlen < 0) {
3246 ret = -TARGET_EINVAL;
3247 goto fail;
3249 addr = alloca(addrlen);
3250 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3251 addr, &addrlen));
3252 } else {
3253 addr = NULL; /* To keep compiler quiet. */
3254 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3256 if (!is_error(ret)) {
3257 if (target_addr) {
3258 host_to_target_sockaddr(target_addr, addr, addrlen);
3259 if (put_user_u32(addrlen, target_addrlen)) {
3260 ret = -TARGET_EFAULT;
3261 goto fail;
3264 unlock_user(host_msg, msg, len);
3265 } else {
3266 fail:
3267 unlock_user(host_msg, msg, 0);
3269 return ret;
3272 #ifdef TARGET_NR_socketcall
3273 /* do_socketcall() Must return target values and target errnos. */
3274 static abi_long do_socketcall(int num, abi_ulong vptr)
3276 static const unsigned ac[] = { /* number of arguments per call */
3277 [SOCKOP_socket] = 3, /* domain, type, protocol */
3278 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3279 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3280 [SOCKOP_listen] = 2, /* sockfd, backlog */
3281 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3282 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3283 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3284 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3285 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3286 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3287 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3288 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3289 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3290 [SOCKOP_shutdown] = 2, /* sockfd, how */
3291 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3292 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3293 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3294 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3295 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3296 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3298 abi_long a[6]; /* max 6 args */
3300 /* first, collect the arguments in a[] according to ac[] */
3301 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3302 unsigned i;
3303 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3304 for (i = 0; i < ac[num]; ++i) {
3305 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3306 return -TARGET_EFAULT;
3311 /* now when we have the args, actually handle the call */
3312 switch (num) {
3313 case SOCKOP_socket: /* domain, type, protocol */
3314 return do_socket(a[0], a[1], a[2]);
3315 case SOCKOP_bind: /* sockfd, addr, addrlen */
3316 return do_bind(a[0], a[1], a[2]);
3317 case SOCKOP_connect: /* sockfd, addr, addrlen */
3318 return do_connect(a[0], a[1], a[2]);
3319 case SOCKOP_listen: /* sockfd, backlog */
3320 return get_errno(listen(a[0], a[1]));
3321 case SOCKOP_accept: /* sockfd, addr, addrlen */
3322 return do_accept4(a[0], a[1], a[2], 0);
3323 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3324 return do_accept4(a[0], a[1], a[2], a[3]);
3325 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3326 return do_getsockname(a[0], a[1], a[2]);
3327 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3328 return do_getpeername(a[0], a[1], a[2]);
3329 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3330 return do_socketpair(a[0], a[1], a[2], a[3]);
3331 case SOCKOP_send: /* sockfd, msg, len, flags */
3332 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3333 case SOCKOP_recv: /* sockfd, msg, len, flags */
3334 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3335 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3336 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3337 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3338 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3339 case SOCKOP_shutdown: /* sockfd, how */
3340 return get_errno(shutdown(a[0], a[1]));
3341 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3342 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3343 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3344 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3345 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3346 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3347 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3348 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3349 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3350 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3351 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3352 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3353 default:
3354 gemu_log("Unsupported socketcall: %d\n", num);
3355 return -TARGET_ENOSYS;
3358 #endif
3360 #define N_SHM_REGIONS 32
3362 static struct shm_region {
3363 abi_ulong start;
3364 abi_ulong size;
3365 bool in_use;
3366 } shm_regions[N_SHM_REGIONS];
3368 struct target_semid_ds
3370 struct target_ipc_perm sem_perm;
3371 abi_ulong sem_otime;
3372 #if !defined(TARGET_PPC64)
3373 abi_ulong __unused1;
3374 #endif
3375 abi_ulong sem_ctime;
3376 #if !defined(TARGET_PPC64)
3377 abi_ulong __unused2;
3378 #endif
3379 abi_ulong sem_nsems;
3380 abi_ulong __unused3;
3381 abi_ulong __unused4;
3384 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3385 abi_ulong target_addr)
3387 struct target_ipc_perm *target_ip;
3388 struct target_semid_ds *target_sd;
3390 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3391 return -TARGET_EFAULT;
3392 target_ip = &(target_sd->sem_perm);
3393 host_ip->__key = tswap32(target_ip->__key);
3394 host_ip->uid = tswap32(target_ip->uid);
3395 host_ip->gid = tswap32(target_ip->gid);
3396 host_ip->cuid = tswap32(target_ip->cuid);
3397 host_ip->cgid = tswap32(target_ip->cgid);
3398 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3399 host_ip->mode = tswap32(target_ip->mode);
3400 #else
3401 host_ip->mode = tswap16(target_ip->mode);
3402 #endif
3403 #if defined(TARGET_PPC)
3404 host_ip->__seq = tswap32(target_ip->__seq);
3405 #else
3406 host_ip->__seq = tswap16(target_ip->__seq);
3407 #endif
3408 unlock_user_struct(target_sd, target_addr, 0);
3409 return 0;
3412 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3413 struct ipc_perm *host_ip)
3415 struct target_ipc_perm *target_ip;
3416 struct target_semid_ds *target_sd;
3418 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3419 return -TARGET_EFAULT;
3420 target_ip = &(target_sd->sem_perm);
3421 target_ip->__key = tswap32(host_ip->__key);
3422 target_ip->uid = tswap32(host_ip->uid);
3423 target_ip->gid = tswap32(host_ip->gid);
3424 target_ip->cuid = tswap32(host_ip->cuid);
3425 target_ip->cgid = tswap32(host_ip->cgid);
3426 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3427 target_ip->mode = tswap32(host_ip->mode);
3428 #else
3429 target_ip->mode = tswap16(host_ip->mode);
3430 #endif
3431 #if defined(TARGET_PPC)
3432 target_ip->__seq = tswap32(host_ip->__seq);
3433 #else
3434 target_ip->__seq = tswap16(host_ip->__seq);
3435 #endif
3436 unlock_user_struct(target_sd, target_addr, 1);
3437 return 0;
3440 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3441 abi_ulong target_addr)
3443 struct target_semid_ds *target_sd;
3445 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3446 return -TARGET_EFAULT;
3447 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3448 return -TARGET_EFAULT;
3449 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3450 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3451 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3452 unlock_user_struct(target_sd, target_addr, 0);
3453 return 0;
3456 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3457 struct semid_ds *host_sd)
3459 struct target_semid_ds *target_sd;
3461 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3462 return -TARGET_EFAULT;
3463 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3464 return -TARGET_EFAULT;
3465 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3466 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3467 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3468 unlock_user_struct(target_sd, target_addr, 1);
3469 return 0;
3472 struct target_seminfo {
3473 int semmap;
3474 int semmni;
3475 int semmns;
3476 int semmnu;
3477 int semmsl;
3478 int semopm;
3479 int semume;
3480 int semusz;
3481 int semvmx;
3482 int semaem;
3485 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3486 struct seminfo *host_seminfo)
3488 struct target_seminfo *target_seminfo;
3489 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3490 return -TARGET_EFAULT;
3491 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3492 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3493 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3494 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3495 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3496 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3497 __put_user(host_seminfo->semume, &target_seminfo->semume);
3498 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3499 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3500 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3501 unlock_user_struct(target_seminfo, target_addr, 1);
3502 return 0;
3505 union semun {
3506 int val;
3507 struct semid_ds *buf;
3508 unsigned short *array;
3509 struct seminfo *__buf;
3512 union target_semun {
3513 int val;
3514 abi_ulong buf;
3515 abi_ulong array;
3516 abi_ulong __buf;
3519 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3520 abi_ulong target_addr)
3522 int nsems;
3523 unsigned short *array;
3524 union semun semun;
3525 struct semid_ds semid_ds;
3526 int i, ret;
3528 semun.buf = &semid_ds;
3530 ret = semctl(semid, 0, IPC_STAT, semun);
3531 if (ret == -1)
3532 return get_errno(ret);
3534 nsems = semid_ds.sem_nsems;
3536 *host_array = g_try_new(unsigned short, nsems);
3537 if (!*host_array) {
3538 return -TARGET_ENOMEM;
3540 array = lock_user(VERIFY_READ, target_addr,
3541 nsems*sizeof(unsigned short), 1);
3542 if (!array) {
3543 g_free(*host_array);
3544 return -TARGET_EFAULT;
3547 for(i=0; i<nsems; i++) {
3548 __get_user((*host_array)[i], &array[i]);
3550 unlock_user(array, target_addr, 0);
3552 return 0;
3555 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3556 unsigned short **host_array)
3558 int nsems;
3559 unsigned short *array;
3560 union semun semun;
3561 struct semid_ds semid_ds;
3562 int i, ret;
3564 semun.buf = &semid_ds;
3566 ret = semctl(semid, 0, IPC_STAT, semun);
3567 if (ret == -1)
3568 return get_errno(ret);
3570 nsems = semid_ds.sem_nsems;
3572 array = lock_user(VERIFY_WRITE, target_addr,
3573 nsems*sizeof(unsigned short), 0);
3574 if (!array)
3575 return -TARGET_EFAULT;
3577 for(i=0; i<nsems; i++) {
3578 __put_user((*host_array)[i], &array[i]);
3580 g_free(*host_array);
3581 unlock_user(array, target_addr, 1);
3583 return 0;
3586 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3587 abi_ulong target_arg)
3589 union target_semun target_su = { .buf = target_arg };
3590 union semun arg;
3591 struct semid_ds dsarg;
3592 unsigned short *array = NULL;
3593 struct seminfo seminfo;
3594 abi_long ret = -TARGET_EINVAL;
3595 abi_long err;
3596 cmd &= 0xff;
3598 switch( cmd ) {
3599 case GETVAL:
3600 case SETVAL:
3601 /* In 64 bit cross-endian situations, we will erroneously pick up
3602 * the wrong half of the union for the "val" element. To rectify
3603 * this, the entire 8-byte structure is byteswapped, followed by
3604 * a swap of the 4 byte val field. In other cases, the data is
3605 * already in proper host byte order. */
3606 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3607 target_su.buf = tswapal(target_su.buf);
3608 arg.val = tswap32(target_su.val);
3609 } else {
3610 arg.val = target_su.val;
3612 ret = get_errno(semctl(semid, semnum, cmd, arg));
3613 break;
3614 case GETALL:
3615 case SETALL:
3616 err = target_to_host_semarray(semid, &array, target_su.array);
3617 if (err)
3618 return err;
3619 arg.array = array;
3620 ret = get_errno(semctl(semid, semnum, cmd, arg));
3621 err = host_to_target_semarray(semid, target_su.array, &array);
3622 if (err)
3623 return err;
3624 break;
3625 case IPC_STAT:
3626 case IPC_SET:
3627 case SEM_STAT:
3628 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3629 if (err)
3630 return err;
3631 arg.buf = &dsarg;
3632 ret = get_errno(semctl(semid, semnum, cmd, arg));
3633 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3634 if (err)
3635 return err;
3636 break;
3637 case IPC_INFO:
3638 case SEM_INFO:
3639 arg.__buf = &seminfo;
3640 ret = get_errno(semctl(semid, semnum, cmd, arg));
3641 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3642 if (err)
3643 return err;
3644 break;
3645 case IPC_RMID:
3646 case GETPID:
3647 case GETNCNT:
3648 case GETZCNT:
3649 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3650 break;
3653 return ret;
3656 struct target_sembuf {
3657 unsigned short sem_num;
3658 short sem_op;
3659 short sem_flg;
3662 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3663 abi_ulong target_addr,
3664 unsigned nsops)
3666 struct target_sembuf *target_sembuf;
3667 int i;
3669 target_sembuf = lock_user(VERIFY_READ, target_addr,
3670 nsops*sizeof(struct target_sembuf), 1);
3671 if (!target_sembuf)
3672 return -TARGET_EFAULT;
3674 for(i=0; i<nsops; i++) {
3675 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3676 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3677 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3680 unlock_user(target_sembuf, target_addr, 0);
3682 return 0;
3685 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3687 struct sembuf sops[nsops];
3689 if (target_to_host_sembuf(sops, ptr, nsops))
3690 return -TARGET_EFAULT;
3692 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3695 struct target_msqid_ds
3697 struct target_ipc_perm msg_perm;
3698 abi_ulong msg_stime;
3699 #if TARGET_ABI_BITS == 32
3700 abi_ulong __unused1;
3701 #endif
3702 abi_ulong msg_rtime;
3703 #if TARGET_ABI_BITS == 32
3704 abi_ulong __unused2;
3705 #endif
3706 abi_ulong msg_ctime;
3707 #if TARGET_ABI_BITS == 32
3708 abi_ulong __unused3;
3709 #endif
3710 abi_ulong __msg_cbytes;
3711 abi_ulong msg_qnum;
3712 abi_ulong msg_qbytes;
3713 abi_ulong msg_lspid;
3714 abi_ulong msg_lrpid;
3715 abi_ulong __unused4;
3716 abi_ulong __unused5;
3719 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3720 abi_ulong target_addr)
3722 struct target_msqid_ds *target_md;
3724 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3725 return -TARGET_EFAULT;
3726 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3727 return -TARGET_EFAULT;
3728 host_md->msg_stime = tswapal(target_md->msg_stime);
3729 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3730 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3731 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3732 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3733 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3734 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3735 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3736 unlock_user_struct(target_md, target_addr, 0);
3737 return 0;
3740 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3741 struct msqid_ds *host_md)
3743 struct target_msqid_ds *target_md;
3745 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3746 return -TARGET_EFAULT;
3747 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3748 return -TARGET_EFAULT;
3749 target_md->msg_stime = tswapal(host_md->msg_stime);
3750 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3751 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3752 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3753 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3754 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3755 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3756 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3757 unlock_user_struct(target_md, target_addr, 1);
3758 return 0;
3761 struct target_msginfo {
3762 int msgpool;
3763 int msgmap;
3764 int msgmax;
3765 int msgmnb;
3766 int msgmni;
3767 int msgssz;
3768 int msgtql;
3769 unsigned short int msgseg;
3772 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3773 struct msginfo *host_msginfo)
3775 struct target_msginfo *target_msginfo;
3776 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3777 return -TARGET_EFAULT;
3778 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3779 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3780 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3781 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3782 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3783 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3784 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3785 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3786 unlock_user_struct(target_msginfo, target_addr, 1);
3787 return 0;
3790 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3792 struct msqid_ds dsarg;
3793 struct msginfo msginfo;
3794 abi_long ret = -TARGET_EINVAL;
3796 cmd &= 0xff;
3798 switch (cmd) {
3799 case IPC_STAT:
3800 case IPC_SET:
3801 case MSG_STAT:
3802 if (target_to_host_msqid_ds(&dsarg,ptr))
3803 return -TARGET_EFAULT;
3804 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3805 if (host_to_target_msqid_ds(ptr,&dsarg))
3806 return -TARGET_EFAULT;
3807 break;
3808 case IPC_RMID:
3809 ret = get_errno(msgctl(msgid, cmd, NULL));
3810 break;
3811 case IPC_INFO:
3812 case MSG_INFO:
3813 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3814 if (host_to_target_msginfo(ptr, &msginfo))
3815 return -TARGET_EFAULT;
3816 break;
3819 return ret;
3822 struct target_msgbuf {
3823 abi_long mtype;
3824 char mtext[1];
3827 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3828 ssize_t msgsz, int msgflg)
3830 struct target_msgbuf *target_mb;
3831 struct msgbuf *host_mb;
3832 abi_long ret = 0;
3834 if (msgsz < 0) {
3835 return -TARGET_EINVAL;
3838 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3839 return -TARGET_EFAULT;
3840 host_mb = g_try_malloc(msgsz + sizeof(long));
3841 if (!host_mb) {
3842 unlock_user_struct(target_mb, msgp, 0);
3843 return -TARGET_ENOMEM;
3845 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3846 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3847 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3848 g_free(host_mb);
3849 unlock_user_struct(target_mb, msgp, 0);
3851 return ret;
3854 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3855 ssize_t msgsz, abi_long msgtyp,
3856 int msgflg)
3858 struct target_msgbuf *target_mb;
3859 char *target_mtext;
3860 struct msgbuf *host_mb;
3861 abi_long ret = 0;
3863 if (msgsz < 0) {
3864 return -TARGET_EINVAL;
3867 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3868 return -TARGET_EFAULT;
3870 host_mb = g_try_malloc(msgsz + sizeof(long));
3871 if (!host_mb) {
3872 ret = -TARGET_ENOMEM;
3873 goto end;
3875 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3877 if (ret > 0) {
3878 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3879 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3880 if (!target_mtext) {
3881 ret = -TARGET_EFAULT;
3882 goto end;
3884 memcpy(target_mb->mtext, host_mb->mtext, ret);
3885 unlock_user(target_mtext, target_mtext_addr, ret);
3888 target_mb->mtype = tswapal(host_mb->mtype);
3890 end:
3891 if (target_mb)
3892 unlock_user_struct(target_mb, msgp, 1);
3893 g_free(host_mb);
3894 return ret;
3897 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3898 abi_ulong target_addr)
3900 struct target_shmid_ds *target_sd;
3902 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3903 return -TARGET_EFAULT;
3904 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3905 return -TARGET_EFAULT;
3906 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3907 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3908 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3909 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3910 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3911 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3912 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3913 unlock_user_struct(target_sd, target_addr, 0);
3914 return 0;
3917 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3918 struct shmid_ds *host_sd)
3920 struct target_shmid_ds *target_sd;
3922 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3923 return -TARGET_EFAULT;
3924 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3925 return -TARGET_EFAULT;
3926 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3927 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3928 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3929 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3930 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3931 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3932 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3933 unlock_user_struct(target_sd, target_addr, 1);
3934 return 0;
3937 struct target_shminfo {
3938 abi_ulong shmmax;
3939 abi_ulong shmmin;
3940 abi_ulong shmmni;
3941 abi_ulong shmseg;
3942 abi_ulong shmall;
3945 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3946 struct shminfo *host_shminfo)
3948 struct target_shminfo *target_shminfo;
3949 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3950 return -TARGET_EFAULT;
3951 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3952 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3953 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3954 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3955 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3956 unlock_user_struct(target_shminfo, target_addr, 1);
3957 return 0;
3960 struct target_shm_info {
3961 int used_ids;
3962 abi_ulong shm_tot;
3963 abi_ulong shm_rss;
3964 abi_ulong shm_swp;
3965 abi_ulong swap_attempts;
3966 abi_ulong swap_successes;
3969 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3970 struct shm_info *host_shm_info)
3972 struct target_shm_info *target_shm_info;
3973 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3974 return -TARGET_EFAULT;
3975 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3976 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3977 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3978 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3979 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3980 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3981 unlock_user_struct(target_shm_info, target_addr, 1);
3982 return 0;
3985 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3987 struct shmid_ds dsarg;
3988 struct shminfo shminfo;
3989 struct shm_info shm_info;
3990 abi_long ret = -TARGET_EINVAL;
3992 cmd &= 0xff;
3994 switch(cmd) {
3995 case IPC_STAT:
3996 case IPC_SET:
3997 case SHM_STAT:
3998 if (target_to_host_shmid_ds(&dsarg, buf))
3999 return -TARGET_EFAULT;
4000 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4001 if (host_to_target_shmid_ds(buf, &dsarg))
4002 return -TARGET_EFAULT;
4003 break;
4004 case IPC_INFO:
4005 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4006 if (host_to_target_shminfo(buf, &shminfo))
4007 return -TARGET_EFAULT;
4008 break;
4009 case SHM_INFO:
4010 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4011 if (host_to_target_shm_info(buf, &shm_info))
4012 return -TARGET_EFAULT;
4013 break;
4014 case IPC_RMID:
4015 case SHM_LOCK:
4016 case SHM_UNLOCK:
4017 ret = get_errno(shmctl(shmid, cmd, NULL));
4018 break;
4021 return ret;
4024 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4026 abi_long raddr;
4027 void *host_raddr;
4028 struct shmid_ds shm_info;
4029 int i,ret;
4031 /* find out the length of the shared memory segment */
4032 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4033 if (is_error(ret)) {
4034 /* can't get length, bail out */
4035 return ret;
4038 mmap_lock();
4040 if (shmaddr)
4041 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4042 else {
4043 abi_ulong mmap_start;
4045 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4047 if (mmap_start == -1) {
4048 errno = ENOMEM;
4049 host_raddr = (void *)-1;
4050 } else
4051 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4054 if (host_raddr == (void *)-1) {
4055 mmap_unlock();
4056 return get_errno((long)host_raddr);
4058 raddr=h2g((unsigned long)host_raddr);
4060 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4061 PAGE_VALID | PAGE_READ |
4062 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4064 for (i = 0; i < N_SHM_REGIONS; i++) {
4065 if (!shm_regions[i].in_use) {
4066 shm_regions[i].in_use = true;
4067 shm_regions[i].start = raddr;
4068 shm_regions[i].size = shm_info.shm_segsz;
4069 break;
4073 mmap_unlock();
4074 return raddr;
4078 static inline abi_long do_shmdt(abi_ulong shmaddr)
4080 int i;
4082 for (i = 0; i < N_SHM_REGIONS; ++i) {
4083 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4084 shm_regions[i].in_use = false;
4085 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4086 break;
4090 return get_errno(shmdt(g2h(shmaddr)));
4093 #ifdef TARGET_NR_ipc
4094 /* ??? This only works with linear mappings. */
4095 /* do_ipc() must return target values and target errnos. */
4096 static abi_long do_ipc(unsigned int call, abi_long first,
4097 abi_long second, abi_long third,
4098 abi_long ptr, abi_long fifth)
4100 int version;
4101 abi_long ret = 0;
4103 version = call >> 16;
4104 call &= 0xffff;
4106 switch (call) {
4107 case IPCOP_semop:
4108 ret = do_semop(first, ptr, second);
4109 break;
4111 case IPCOP_semget:
4112 ret = get_errno(semget(first, second, third));
4113 break;
4115 case IPCOP_semctl: {
4116 /* The semun argument to semctl is passed by value, so dereference the
4117 * ptr argument. */
4118 abi_ulong atptr;
4119 get_user_ual(atptr, ptr);
4120 ret = do_semctl(first, second, third, atptr);
4121 break;
4124 case IPCOP_msgget:
4125 ret = get_errno(msgget(first, second));
4126 break;
4128 case IPCOP_msgsnd:
4129 ret = do_msgsnd(first, ptr, second, third);
4130 break;
4132 case IPCOP_msgctl:
4133 ret = do_msgctl(first, second, ptr);
4134 break;
4136 case IPCOP_msgrcv:
4137 switch (version) {
4138 case 0:
4140 struct target_ipc_kludge {
4141 abi_long msgp;
4142 abi_long msgtyp;
4143 } *tmp;
4145 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4146 ret = -TARGET_EFAULT;
4147 break;
4150 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4152 unlock_user_struct(tmp, ptr, 0);
4153 break;
4155 default:
4156 ret = do_msgrcv(first, ptr, second, fifth, third);
4158 break;
4160 case IPCOP_shmat:
4161 switch (version) {
4162 default:
4164 abi_ulong raddr;
4165 raddr = do_shmat(first, ptr, second);
4166 if (is_error(raddr))
4167 return get_errno(raddr);
4168 if (put_user_ual(raddr, third))
4169 return -TARGET_EFAULT;
4170 break;
4172 case 1:
4173 ret = -TARGET_EINVAL;
4174 break;
4176 break;
4177 case IPCOP_shmdt:
4178 ret = do_shmdt(ptr);
4179 break;
4181 case IPCOP_shmget:
4182 /* IPC_* flag values are the same on all linux platforms */
4183 ret = get_errno(shmget(first, second, third));
4184 break;
4186 /* IPC_* and SHM_* command values are the same on all linux platforms */
4187 case IPCOP_shmctl:
4188 ret = do_shmctl(first, second, ptr);
4189 break;
4190 default:
4191 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4192 ret = -TARGET_ENOSYS;
4193 break;
4195 return ret;
4197 #endif
4199 /* kernel structure types definitions */
4201 #define STRUCT(name, ...) STRUCT_ ## name,
4202 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4203 enum {
4204 #include "syscall_types.h"
4205 STRUCT_MAX
4207 #undef STRUCT
4208 #undef STRUCT_SPECIAL
4210 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4211 #define STRUCT_SPECIAL(name)
4212 #include "syscall_types.h"
4213 #undef STRUCT
4214 #undef STRUCT_SPECIAL
4216 typedef struct IOCTLEntry IOCTLEntry;
4218 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4219 int fd, int cmd, abi_long arg);
4221 struct IOCTLEntry {
4222 int target_cmd;
4223 unsigned int host_cmd;
4224 const char *name;
4225 int access;
4226 do_ioctl_fn *do_ioctl;
4227 const argtype arg_type[5];
4230 #define IOC_R 0x0001
4231 #define IOC_W 0x0002
4232 #define IOC_RW (IOC_R | IOC_W)
4234 #define MAX_STRUCT_SIZE 4096
4236 #ifdef CONFIG_FIEMAP
4237 /* So fiemap access checks don't overflow on 32 bit systems.
4238 * This is very slightly smaller than the limit imposed by
4239 * the underlying kernel.
4241 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4242 / sizeof(struct fiemap_extent))
4244 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4245 int fd, int cmd, abi_long arg)
4247 /* The parameter for this ioctl is a struct fiemap followed
4248 * by an array of struct fiemap_extent whose size is set
4249 * in fiemap->fm_extent_count. The array is filled in by the
4250 * ioctl.
4252 int target_size_in, target_size_out;
4253 struct fiemap *fm;
4254 const argtype *arg_type = ie->arg_type;
4255 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4256 void *argptr, *p;
4257 abi_long ret;
4258 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4259 uint32_t outbufsz;
4260 int free_fm = 0;
4262 assert(arg_type[0] == TYPE_PTR);
4263 assert(ie->access == IOC_RW);
4264 arg_type++;
4265 target_size_in = thunk_type_size(arg_type, 0);
4266 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4267 if (!argptr) {
4268 return -TARGET_EFAULT;
4270 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4271 unlock_user(argptr, arg, 0);
4272 fm = (struct fiemap *)buf_temp;
4273 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4274 return -TARGET_EINVAL;
4277 outbufsz = sizeof (*fm) +
4278 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4280 if (outbufsz > MAX_STRUCT_SIZE) {
4281 /* We can't fit all the extents into the fixed size buffer.
4282 * Allocate one that is large enough and use it instead.
4284 fm = g_try_malloc(outbufsz);
4285 if (!fm) {
4286 return -TARGET_ENOMEM;
4288 memcpy(fm, buf_temp, sizeof(struct fiemap));
4289 free_fm = 1;
4291 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4292 if (!is_error(ret)) {
4293 target_size_out = target_size_in;
4294 /* An extent_count of 0 means we were only counting the extents
4295 * so there are no structs to copy
4297 if (fm->fm_extent_count != 0) {
4298 target_size_out += fm->fm_mapped_extents * extent_size;
4300 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4301 if (!argptr) {
4302 ret = -TARGET_EFAULT;
4303 } else {
4304 /* Convert the struct fiemap */
4305 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4306 if (fm->fm_extent_count != 0) {
4307 p = argptr + target_size_in;
4308 /* ...and then all the struct fiemap_extents */
4309 for (i = 0; i < fm->fm_mapped_extents; i++) {
4310 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4311 THUNK_TARGET);
4312 p += extent_size;
4315 unlock_user(argptr, arg, target_size_out);
4318 if (free_fm) {
4319 g_free(fm);
4321 return ret;
4323 #endif
4325 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4326 int fd, int cmd, abi_long arg)
4328 const argtype *arg_type = ie->arg_type;
4329 int target_size;
4330 void *argptr;
4331 int ret;
4332 struct ifconf *host_ifconf;
4333 uint32_t outbufsz;
4334 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4335 int target_ifreq_size;
4336 int nb_ifreq;
4337 int free_buf = 0;
4338 int i;
4339 int target_ifc_len;
4340 abi_long target_ifc_buf;
4341 int host_ifc_len;
4342 char *host_ifc_buf;
4344 assert(arg_type[0] == TYPE_PTR);
4345 assert(ie->access == IOC_RW);
4347 arg_type++;
4348 target_size = thunk_type_size(arg_type, 0);
4350 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4351 if (!argptr)
4352 return -TARGET_EFAULT;
4353 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4354 unlock_user(argptr, arg, 0);
4356 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4357 target_ifc_len = host_ifconf->ifc_len;
4358 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4360 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4361 nb_ifreq = target_ifc_len / target_ifreq_size;
4362 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4364 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4365 if (outbufsz > MAX_STRUCT_SIZE) {
4366 /* We can't fit all the extents into the fixed size buffer.
4367 * Allocate one that is large enough and use it instead.
4369 host_ifconf = malloc(outbufsz);
4370 if (!host_ifconf) {
4371 return -TARGET_ENOMEM;
4373 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4374 free_buf = 1;
4376 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4378 host_ifconf->ifc_len = host_ifc_len;
4379 host_ifconf->ifc_buf = host_ifc_buf;
4381 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4382 if (!is_error(ret)) {
4383 /* convert host ifc_len to target ifc_len */
4385 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4386 target_ifc_len = nb_ifreq * target_ifreq_size;
4387 host_ifconf->ifc_len = target_ifc_len;
4389 /* restore target ifc_buf */
4391 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4393 /* copy struct ifconf to target user */
4395 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4396 if (!argptr)
4397 return -TARGET_EFAULT;
4398 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4399 unlock_user(argptr, arg, target_size);
4401 /* copy ifreq[] to target user */
4403 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4404 for (i = 0; i < nb_ifreq ; i++) {
4405 thunk_convert(argptr + i * target_ifreq_size,
4406 host_ifc_buf + i * sizeof(struct ifreq),
4407 ifreq_arg_type, THUNK_TARGET);
4409 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4412 if (free_buf) {
4413 free(host_ifconf);
4416 return ret;
4419 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4420 int cmd, abi_long arg)
4422 void *argptr;
4423 struct dm_ioctl *host_dm;
4424 abi_long guest_data;
4425 uint32_t guest_data_size;
4426 int target_size;
4427 const argtype *arg_type = ie->arg_type;
4428 abi_long ret;
4429 void *big_buf = NULL;
4430 char *host_data;
4432 arg_type++;
4433 target_size = thunk_type_size(arg_type, 0);
4434 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4435 if (!argptr) {
4436 ret = -TARGET_EFAULT;
4437 goto out;
4439 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4440 unlock_user(argptr, arg, 0);
4442 /* buf_temp is too small, so fetch things into a bigger buffer */
4443 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4444 memcpy(big_buf, buf_temp, target_size);
4445 buf_temp = big_buf;
4446 host_dm = big_buf;
4448 guest_data = arg + host_dm->data_start;
4449 if ((guest_data - arg) < 0) {
4450 ret = -EINVAL;
4451 goto out;
4453 guest_data_size = host_dm->data_size - host_dm->data_start;
4454 host_data = (char*)host_dm + host_dm->data_start;
4456 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4457 switch (ie->host_cmd) {
4458 case DM_REMOVE_ALL:
4459 case DM_LIST_DEVICES:
4460 case DM_DEV_CREATE:
4461 case DM_DEV_REMOVE:
4462 case DM_DEV_SUSPEND:
4463 case DM_DEV_STATUS:
4464 case DM_DEV_WAIT:
4465 case DM_TABLE_STATUS:
4466 case DM_TABLE_CLEAR:
4467 case DM_TABLE_DEPS:
4468 case DM_LIST_VERSIONS:
4469 /* no input data */
4470 break;
4471 case DM_DEV_RENAME:
4472 case DM_DEV_SET_GEOMETRY:
4473 /* data contains only strings */
4474 memcpy(host_data, argptr, guest_data_size);
4475 break;
4476 case DM_TARGET_MSG:
4477 memcpy(host_data, argptr, guest_data_size);
4478 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4479 break;
4480 case DM_TABLE_LOAD:
4482 void *gspec = argptr;
4483 void *cur_data = host_data;
4484 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4485 int spec_size = thunk_type_size(arg_type, 0);
4486 int i;
4488 for (i = 0; i < host_dm->target_count; i++) {
4489 struct dm_target_spec *spec = cur_data;
4490 uint32_t next;
4491 int slen;
4493 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4494 slen = strlen((char*)gspec + spec_size) + 1;
4495 next = spec->next;
4496 spec->next = sizeof(*spec) + slen;
4497 strcpy((char*)&spec[1], gspec + spec_size);
4498 gspec += next;
4499 cur_data += spec->next;
4501 break;
4503 default:
4504 ret = -TARGET_EINVAL;
4505 unlock_user(argptr, guest_data, 0);
4506 goto out;
4508 unlock_user(argptr, guest_data, 0);
4510 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4511 if (!is_error(ret)) {
4512 guest_data = arg + host_dm->data_start;
4513 guest_data_size = host_dm->data_size - host_dm->data_start;
4514 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4515 switch (ie->host_cmd) {
4516 case DM_REMOVE_ALL:
4517 case DM_DEV_CREATE:
4518 case DM_DEV_REMOVE:
4519 case DM_DEV_RENAME:
4520 case DM_DEV_SUSPEND:
4521 case DM_DEV_STATUS:
4522 case DM_TABLE_LOAD:
4523 case DM_TABLE_CLEAR:
4524 case DM_TARGET_MSG:
4525 case DM_DEV_SET_GEOMETRY:
4526 /* no return data */
4527 break;
4528 case DM_LIST_DEVICES:
4530 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4531 uint32_t remaining_data = guest_data_size;
4532 void *cur_data = argptr;
4533 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4534 int nl_size = 12; /* can't use thunk_size due to alignment */
4536 while (1) {
4537 uint32_t next = nl->next;
4538 if (next) {
4539 nl->next = nl_size + (strlen(nl->name) + 1);
4541 if (remaining_data < nl->next) {
4542 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4543 break;
4545 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4546 strcpy(cur_data + nl_size, nl->name);
4547 cur_data += nl->next;
4548 remaining_data -= nl->next;
4549 if (!next) {
4550 break;
4552 nl = (void*)nl + next;
4554 break;
4556 case DM_DEV_WAIT:
4557 case DM_TABLE_STATUS:
4559 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4560 void *cur_data = argptr;
4561 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4562 int spec_size = thunk_type_size(arg_type, 0);
4563 int i;
4565 for (i = 0; i < host_dm->target_count; i++) {
4566 uint32_t next = spec->next;
4567 int slen = strlen((char*)&spec[1]) + 1;
4568 spec->next = (cur_data - argptr) + spec_size + slen;
4569 if (guest_data_size < spec->next) {
4570 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4571 break;
4573 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4574 strcpy(cur_data + spec_size, (char*)&spec[1]);
4575 cur_data = argptr + spec->next;
4576 spec = (void*)host_dm + host_dm->data_start + next;
4578 break;
4580 case DM_TABLE_DEPS:
4582 void *hdata = (void*)host_dm + host_dm->data_start;
4583 int count = *(uint32_t*)hdata;
4584 uint64_t *hdev = hdata + 8;
4585 uint64_t *gdev = argptr + 8;
4586 int i;
4588 *(uint32_t*)argptr = tswap32(count);
4589 for (i = 0; i < count; i++) {
4590 *gdev = tswap64(*hdev);
4591 gdev++;
4592 hdev++;
4594 break;
4596 case DM_LIST_VERSIONS:
4598 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4599 uint32_t remaining_data = guest_data_size;
4600 void *cur_data = argptr;
4601 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4602 int vers_size = thunk_type_size(arg_type, 0);
4604 while (1) {
4605 uint32_t next = vers->next;
4606 if (next) {
4607 vers->next = vers_size + (strlen(vers->name) + 1);
4609 if (remaining_data < vers->next) {
4610 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4611 break;
4613 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4614 strcpy(cur_data + vers_size, vers->name);
4615 cur_data += vers->next;
4616 remaining_data -= vers->next;
4617 if (!next) {
4618 break;
4620 vers = (void*)vers + next;
4622 break;
4624 default:
4625 unlock_user(argptr, guest_data, 0);
4626 ret = -TARGET_EINVAL;
4627 goto out;
4629 unlock_user(argptr, guest_data, guest_data_size);
4631 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4632 if (!argptr) {
4633 ret = -TARGET_EFAULT;
4634 goto out;
4636 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4637 unlock_user(argptr, arg, target_size);
4639 out:
4640 g_free(big_buf);
4641 return ret;
4644 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4645 int cmd, abi_long arg)
4647 void *argptr;
4648 int target_size;
4649 const argtype *arg_type = ie->arg_type;
4650 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4651 abi_long ret;
4653 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4654 struct blkpg_partition host_part;
4656 /* Read and convert blkpg */
4657 arg_type++;
4658 target_size = thunk_type_size(arg_type, 0);
4659 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4660 if (!argptr) {
4661 ret = -TARGET_EFAULT;
4662 goto out;
4664 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4665 unlock_user(argptr, arg, 0);
4667 switch (host_blkpg->op) {
4668 case BLKPG_ADD_PARTITION:
4669 case BLKPG_DEL_PARTITION:
4670 /* payload is struct blkpg_partition */
4671 break;
4672 default:
4673 /* Unknown opcode */
4674 ret = -TARGET_EINVAL;
4675 goto out;
4678 /* Read and convert blkpg->data */
4679 arg = (abi_long)(uintptr_t)host_blkpg->data;
4680 target_size = thunk_type_size(part_arg_type, 0);
4681 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4682 if (!argptr) {
4683 ret = -TARGET_EFAULT;
4684 goto out;
4686 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4687 unlock_user(argptr, arg, 0);
4689 /* Swizzle the data pointer to our local copy and call! */
4690 host_blkpg->data = &host_part;
4691 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4693 out:
4694 return ret;
4697 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4698 int fd, int cmd, abi_long arg)
4700 const argtype *arg_type = ie->arg_type;
4701 const StructEntry *se;
4702 const argtype *field_types;
4703 const int *dst_offsets, *src_offsets;
4704 int target_size;
4705 void *argptr;
4706 abi_ulong *target_rt_dev_ptr;
4707 unsigned long *host_rt_dev_ptr;
4708 abi_long ret;
4709 int i;
4711 assert(ie->access == IOC_W);
4712 assert(*arg_type == TYPE_PTR);
4713 arg_type++;
4714 assert(*arg_type == TYPE_STRUCT);
4715 target_size = thunk_type_size(arg_type, 0);
4716 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4717 if (!argptr) {
4718 return -TARGET_EFAULT;
4720 arg_type++;
4721 assert(*arg_type == (int)STRUCT_rtentry);
4722 se = struct_entries + *arg_type++;
4723 assert(se->convert[0] == NULL);
4724 /* convert struct here to be able to catch rt_dev string */
4725 field_types = se->field_types;
4726 dst_offsets = se->field_offsets[THUNK_HOST];
4727 src_offsets = se->field_offsets[THUNK_TARGET];
4728 for (i = 0; i < se->nb_fields; i++) {
4729 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4730 assert(*field_types == TYPE_PTRVOID);
4731 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4732 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4733 if (*target_rt_dev_ptr != 0) {
4734 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4735 tswapal(*target_rt_dev_ptr));
4736 if (!*host_rt_dev_ptr) {
4737 unlock_user(argptr, arg, 0);
4738 return -TARGET_EFAULT;
4740 } else {
4741 *host_rt_dev_ptr = 0;
4743 field_types++;
4744 continue;
4746 field_types = thunk_convert(buf_temp + dst_offsets[i],
4747 argptr + src_offsets[i],
4748 field_types, THUNK_HOST);
4750 unlock_user(argptr, arg, 0);
4752 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4753 if (*host_rt_dev_ptr != 0) {
4754 unlock_user((void *)*host_rt_dev_ptr,
4755 *target_rt_dev_ptr, 0);
4757 return ret;
4760 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4761 int fd, int cmd, abi_long arg)
4763 int sig = target_to_host_signal(arg);
4764 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4767 static IOCTLEntry ioctl_entries[] = {
4768 #define IOCTL(cmd, access, ...) \
4769 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4770 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4771 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4772 #include "ioctls.h"
4773 { 0, 0, },
4776 /* ??? Implement proper locking for ioctls. */
4777 /* do_ioctl() Must return target values and target errnos. */
4778 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4780 const IOCTLEntry *ie;
4781 const argtype *arg_type;
4782 abi_long ret;
4783 uint8_t buf_temp[MAX_STRUCT_SIZE];
4784 int target_size;
4785 void *argptr;
4787 ie = ioctl_entries;
4788 for(;;) {
4789 if (ie->target_cmd == 0) {
4790 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4791 return -TARGET_ENOSYS;
4793 if (ie->target_cmd == cmd)
4794 break;
4795 ie++;
4797 arg_type = ie->arg_type;
4798 #if defined(DEBUG)
4799 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4800 #endif
4801 if (ie->do_ioctl) {
4802 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4805 switch(arg_type[0]) {
4806 case TYPE_NULL:
4807 /* no argument */
4808 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4809 break;
4810 case TYPE_PTRVOID:
4811 case TYPE_INT:
4812 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4813 break;
4814 case TYPE_PTR:
4815 arg_type++;
4816 target_size = thunk_type_size(arg_type, 0);
4817 switch(ie->access) {
4818 case IOC_R:
4819 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4820 if (!is_error(ret)) {
4821 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4822 if (!argptr)
4823 return -TARGET_EFAULT;
4824 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4825 unlock_user(argptr, arg, target_size);
4827 break;
4828 case IOC_W:
4829 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4830 if (!argptr)
4831 return -TARGET_EFAULT;
4832 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4833 unlock_user(argptr, arg, 0);
4834 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4835 break;
4836 default:
4837 case IOC_RW:
4838 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4839 if (!argptr)
4840 return -TARGET_EFAULT;
4841 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4842 unlock_user(argptr, arg, 0);
4843 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4844 if (!is_error(ret)) {
4845 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4846 if (!argptr)
4847 return -TARGET_EFAULT;
4848 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4849 unlock_user(argptr, arg, target_size);
4851 break;
4853 break;
4854 default:
4855 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4856 (long)cmd, arg_type[0]);
4857 ret = -TARGET_ENOSYS;
4858 break;
4860 return ret;
4863 static const bitmask_transtbl iflag_tbl[] = {
4864 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4865 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4866 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4867 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4868 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4869 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4870 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4871 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4872 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4873 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4874 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4875 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4876 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4877 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4878 { 0, 0, 0, 0 }
4881 static const bitmask_transtbl oflag_tbl[] = {
4882 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4883 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4884 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4885 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4886 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4887 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4888 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4889 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4890 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4891 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4892 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4893 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4894 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4895 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4896 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4897 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4898 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4899 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4900 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4901 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4902 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4903 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4904 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4905 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4906 { 0, 0, 0, 0 }
4909 static const bitmask_transtbl cflag_tbl[] = {
4910 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4911 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4912 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4913 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4914 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4915 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4916 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4917 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4918 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4919 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4920 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4921 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4922 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4923 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4924 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4925 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4926 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4927 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4928 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4929 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4930 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4931 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4932 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4933 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4934 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4935 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4936 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4937 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4938 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4939 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4940 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4941 { 0, 0, 0, 0 }
4944 static const bitmask_transtbl lflag_tbl[] = {
4945 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4946 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4947 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4948 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4949 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4950 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4951 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4952 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4953 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4954 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4955 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4956 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4957 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4958 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4959 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4960 { 0, 0, 0, 0 }
4963 static void target_to_host_termios (void *dst, const void *src)
4965 struct host_termios *host = dst;
4966 const struct target_termios *target = src;
4968 host->c_iflag =
4969 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4970 host->c_oflag =
4971 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4972 host->c_cflag =
4973 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4974 host->c_lflag =
4975 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4976 host->c_line = target->c_line;
4978 memset(host->c_cc, 0, sizeof(host->c_cc));
4979 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4980 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4981 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4982 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4983 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4984 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4985 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4986 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4987 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4988 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4989 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4990 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4991 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4992 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4993 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4994 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4995 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4998 static void host_to_target_termios (void *dst, const void *src)
5000 struct target_termios *target = dst;
5001 const struct host_termios *host = src;
5003 target->c_iflag =
5004 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5005 target->c_oflag =
5006 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5007 target->c_cflag =
5008 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5009 target->c_lflag =
5010 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5011 target->c_line = host->c_line;
5013 memset(target->c_cc, 0, sizeof(target->c_cc));
5014 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5015 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5016 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5017 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5018 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5019 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5020 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5021 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5022 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5023 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5024 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5025 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5026 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5027 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5028 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5029 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5030 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5033 static const StructEntry struct_termios_def = {
5034 .convert = { host_to_target_termios, target_to_host_termios },
5035 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5036 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5039 static bitmask_transtbl mmap_flags_tbl[] = {
5040 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5041 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5042 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5043 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5044 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5045 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5046 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5047 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5048 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5049 MAP_NORESERVE },
5050 { 0, 0, 0, 0 }
5053 #if defined(TARGET_I386)
5055 /* NOTE: there is really one LDT for all the threads */
5056 static uint8_t *ldt_table;
5058 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5060 int size;
5061 void *p;
5063 if (!ldt_table)
5064 return 0;
5065 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5066 if (size > bytecount)
5067 size = bytecount;
5068 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5069 if (!p)
5070 return -TARGET_EFAULT;
5071 /* ??? Should this by byteswapped? */
5072 memcpy(p, ldt_table, size);
5073 unlock_user(p, ptr, size);
5074 return size;
5077 /* XXX: add locking support */
5078 static abi_long write_ldt(CPUX86State *env,
5079 abi_ulong ptr, unsigned long bytecount, int oldmode)
5081 struct target_modify_ldt_ldt_s ldt_info;
5082 struct target_modify_ldt_ldt_s *target_ldt_info;
5083 int seg_32bit, contents, read_exec_only, limit_in_pages;
5084 int seg_not_present, useable, lm;
5085 uint32_t *lp, entry_1, entry_2;
5087 if (bytecount != sizeof(ldt_info))
5088 return -TARGET_EINVAL;
5089 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5090 return -TARGET_EFAULT;
5091 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5092 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5093 ldt_info.limit = tswap32(target_ldt_info->limit);
5094 ldt_info.flags = tswap32(target_ldt_info->flags);
5095 unlock_user_struct(target_ldt_info, ptr, 0);
5097 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5098 return -TARGET_EINVAL;
5099 seg_32bit = ldt_info.flags & 1;
5100 contents = (ldt_info.flags >> 1) & 3;
5101 read_exec_only = (ldt_info.flags >> 3) & 1;
5102 limit_in_pages = (ldt_info.flags >> 4) & 1;
5103 seg_not_present = (ldt_info.flags >> 5) & 1;
5104 useable = (ldt_info.flags >> 6) & 1;
5105 #ifdef TARGET_ABI32
5106 lm = 0;
5107 #else
5108 lm = (ldt_info.flags >> 7) & 1;
5109 #endif
5110 if (contents == 3) {
5111 if (oldmode)
5112 return -TARGET_EINVAL;
5113 if (seg_not_present == 0)
5114 return -TARGET_EINVAL;
5116 /* allocate the LDT */
5117 if (!ldt_table) {
5118 env->ldt.base = target_mmap(0,
5119 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5120 PROT_READ|PROT_WRITE,
5121 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5122 if (env->ldt.base == -1)
5123 return -TARGET_ENOMEM;
5124 memset(g2h(env->ldt.base), 0,
5125 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5126 env->ldt.limit = 0xffff;
5127 ldt_table = g2h(env->ldt.base);
5130 /* NOTE: same code as Linux kernel */
5131 /* Allow LDTs to be cleared by the user. */
5132 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5133 if (oldmode ||
5134 (contents == 0 &&
5135 read_exec_only == 1 &&
5136 seg_32bit == 0 &&
5137 limit_in_pages == 0 &&
5138 seg_not_present == 1 &&
5139 useable == 0 )) {
5140 entry_1 = 0;
5141 entry_2 = 0;
5142 goto install;
5146 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5147 (ldt_info.limit & 0x0ffff);
5148 entry_2 = (ldt_info.base_addr & 0xff000000) |
5149 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5150 (ldt_info.limit & 0xf0000) |
5151 ((read_exec_only ^ 1) << 9) |
5152 (contents << 10) |
5153 ((seg_not_present ^ 1) << 15) |
5154 (seg_32bit << 22) |
5155 (limit_in_pages << 23) |
5156 (lm << 21) |
5157 0x7000;
5158 if (!oldmode)
5159 entry_2 |= (useable << 20);
5161 /* Install the new entry ... */
5162 install:
5163 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5164 lp[0] = tswap32(entry_1);
5165 lp[1] = tswap32(entry_2);
5166 return 0;
5169 /* specific and weird i386 syscalls */
5170 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5171 unsigned long bytecount)
5173 abi_long ret;
5175 switch (func) {
5176 case 0:
5177 ret = read_ldt(ptr, bytecount);
5178 break;
5179 case 1:
5180 ret = write_ldt(env, ptr, bytecount, 1);
5181 break;
5182 case 0x11:
5183 ret = write_ldt(env, ptr, bytecount, 0);
5184 break;
5185 default:
5186 ret = -TARGET_ENOSYS;
5187 break;
5189 return ret;
5192 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5193 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5195 uint64_t *gdt_table = g2h(env->gdt.base);
5196 struct target_modify_ldt_ldt_s ldt_info;
5197 struct target_modify_ldt_ldt_s *target_ldt_info;
5198 int seg_32bit, contents, read_exec_only, limit_in_pages;
5199 int seg_not_present, useable, lm;
5200 uint32_t *lp, entry_1, entry_2;
5201 int i;
5203 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5204 if (!target_ldt_info)
5205 return -TARGET_EFAULT;
5206 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5207 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5208 ldt_info.limit = tswap32(target_ldt_info->limit);
5209 ldt_info.flags = tswap32(target_ldt_info->flags);
5210 if (ldt_info.entry_number == -1) {
5211 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5212 if (gdt_table[i] == 0) {
5213 ldt_info.entry_number = i;
5214 target_ldt_info->entry_number = tswap32(i);
5215 break;
5219 unlock_user_struct(target_ldt_info, ptr, 1);
5221 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5222 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5223 return -TARGET_EINVAL;
5224 seg_32bit = ldt_info.flags & 1;
5225 contents = (ldt_info.flags >> 1) & 3;
5226 read_exec_only = (ldt_info.flags >> 3) & 1;
5227 limit_in_pages = (ldt_info.flags >> 4) & 1;
5228 seg_not_present = (ldt_info.flags >> 5) & 1;
5229 useable = (ldt_info.flags >> 6) & 1;
5230 #ifdef TARGET_ABI32
5231 lm = 0;
5232 #else
5233 lm = (ldt_info.flags >> 7) & 1;
5234 #endif
5236 if (contents == 3) {
5237 if (seg_not_present == 0)
5238 return -TARGET_EINVAL;
5241 /* NOTE: same code as Linux kernel */
5242 /* Allow LDTs to be cleared by the user. */
5243 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5244 if ((contents == 0 &&
5245 read_exec_only == 1 &&
5246 seg_32bit == 0 &&
5247 limit_in_pages == 0 &&
5248 seg_not_present == 1 &&
5249 useable == 0 )) {
5250 entry_1 = 0;
5251 entry_2 = 0;
5252 goto install;
5256 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5257 (ldt_info.limit & 0x0ffff);
5258 entry_2 = (ldt_info.base_addr & 0xff000000) |
5259 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5260 (ldt_info.limit & 0xf0000) |
5261 ((read_exec_only ^ 1) << 9) |
5262 (contents << 10) |
5263 ((seg_not_present ^ 1) << 15) |
5264 (seg_32bit << 22) |
5265 (limit_in_pages << 23) |
5266 (useable << 20) |
5267 (lm << 21) |
5268 0x7000;
5270 /* Install the new entry ... */
5271 install:
5272 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5273 lp[0] = tswap32(entry_1);
5274 lp[1] = tswap32(entry_2);
5275 return 0;
5278 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5280 struct target_modify_ldt_ldt_s *target_ldt_info;
5281 uint64_t *gdt_table = g2h(env->gdt.base);
5282 uint32_t base_addr, limit, flags;
5283 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5284 int seg_not_present, useable, lm;
5285 uint32_t *lp, entry_1, entry_2;
5287 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5288 if (!target_ldt_info)
5289 return -TARGET_EFAULT;
5290 idx = tswap32(target_ldt_info->entry_number);
5291 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5292 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5293 unlock_user_struct(target_ldt_info, ptr, 1);
5294 return -TARGET_EINVAL;
5296 lp = (uint32_t *)(gdt_table + idx);
5297 entry_1 = tswap32(lp[0]);
5298 entry_2 = tswap32(lp[1]);
5300 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5301 contents = (entry_2 >> 10) & 3;
5302 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5303 seg_32bit = (entry_2 >> 22) & 1;
5304 limit_in_pages = (entry_2 >> 23) & 1;
5305 useable = (entry_2 >> 20) & 1;
5306 #ifdef TARGET_ABI32
5307 lm = 0;
5308 #else
5309 lm = (entry_2 >> 21) & 1;
5310 #endif
5311 flags = (seg_32bit << 0) | (contents << 1) |
5312 (read_exec_only << 3) | (limit_in_pages << 4) |
5313 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5314 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5315 base_addr = (entry_1 >> 16) |
5316 (entry_2 & 0xff000000) |
5317 ((entry_2 & 0xff) << 16);
5318 target_ldt_info->base_addr = tswapal(base_addr);
5319 target_ldt_info->limit = tswap32(limit);
5320 target_ldt_info->flags = tswap32(flags);
5321 unlock_user_struct(target_ldt_info, ptr, 1);
5322 return 0;
5324 #endif /* TARGET_I386 && TARGET_ABI32 */
5326 #ifndef TARGET_ABI32
5327 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5329 abi_long ret = 0;
5330 abi_ulong val;
5331 int idx;
5333 switch(code) {
5334 case TARGET_ARCH_SET_GS:
5335 case TARGET_ARCH_SET_FS:
5336 if (code == TARGET_ARCH_SET_GS)
5337 idx = R_GS;
5338 else
5339 idx = R_FS;
5340 cpu_x86_load_seg(env, idx, 0);
5341 env->segs[idx].base = addr;
5342 break;
5343 case TARGET_ARCH_GET_GS:
5344 case TARGET_ARCH_GET_FS:
5345 if (code == TARGET_ARCH_GET_GS)
5346 idx = R_GS;
5347 else
5348 idx = R_FS;
5349 val = env->segs[idx].base;
5350 if (put_user(val, addr, abi_ulong))
5351 ret = -TARGET_EFAULT;
5352 break;
5353 default:
5354 ret = -TARGET_EINVAL;
5355 break;
5357 return ret;
5359 #endif
5361 #endif /* defined(TARGET_I386) */
5363 #define NEW_STACK_SIZE 0x40000
5366 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5367 typedef struct {
5368 CPUArchState *env;
5369 pthread_mutex_t mutex;
5370 pthread_cond_t cond;
5371 pthread_t thread;
5372 uint32_t tid;
5373 abi_ulong child_tidptr;
5374 abi_ulong parent_tidptr;
5375 sigset_t sigmask;
5376 } new_thread_info;
5378 static void *clone_func(void *arg)
5380 new_thread_info *info = arg;
5381 CPUArchState *env;
5382 CPUState *cpu;
5383 TaskState *ts;
5385 rcu_register_thread();
5386 env = info->env;
5387 cpu = ENV_GET_CPU(env);
5388 thread_cpu = cpu;
5389 ts = (TaskState *)cpu->opaque;
5390 info->tid = gettid();
5391 cpu->host_tid = info->tid;
5392 task_settid(ts);
5393 if (info->child_tidptr)
5394 put_user_u32(info->tid, info->child_tidptr);
5395 if (info->parent_tidptr)
5396 put_user_u32(info->tid, info->parent_tidptr);
5397 /* Enable signals. */
5398 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5399 /* Signal to the parent that we're ready. */
5400 pthread_mutex_lock(&info->mutex);
5401 pthread_cond_broadcast(&info->cond);
5402 pthread_mutex_unlock(&info->mutex);
5403 /* Wait until the parent has finshed initializing the tls state. */
5404 pthread_mutex_lock(&clone_lock);
5405 pthread_mutex_unlock(&clone_lock);
5406 cpu_loop(env);
5407 /* never exits */
5408 return NULL;
5411 /* do_fork() Must return host values and target errnos (unlike most
5412 do_*() functions). */
5413 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5414 abi_ulong parent_tidptr, target_ulong newtls,
5415 abi_ulong child_tidptr)
5417 CPUState *cpu = ENV_GET_CPU(env);
5418 int ret;
5419 TaskState *ts;
5420 CPUState *new_cpu;
5421 CPUArchState *new_env;
5422 unsigned int nptl_flags;
5423 sigset_t sigmask;
5425 /* Emulate vfork() with fork() */
5426 if (flags & CLONE_VFORK)
5427 flags &= ~(CLONE_VFORK | CLONE_VM);
5429 if (flags & CLONE_VM) {
5430 TaskState *parent_ts = (TaskState *)cpu->opaque;
5431 new_thread_info info;
5432 pthread_attr_t attr;
5434 ts = g_new0(TaskState, 1);
5435 init_task_state(ts);
5436 /* we create a new CPU instance. */
5437 new_env = cpu_copy(env);
5438 /* Init regs that differ from the parent. */
5439 cpu_clone_regs(new_env, newsp);
5440 new_cpu = ENV_GET_CPU(new_env);
5441 new_cpu->opaque = ts;
5442 ts->bprm = parent_ts->bprm;
5443 ts->info = parent_ts->info;
5444 ts->signal_mask = parent_ts->signal_mask;
5445 nptl_flags = flags;
5446 flags &= ~CLONE_NPTL_FLAGS2;
5448 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5449 ts->child_tidptr = child_tidptr;
5452 if (nptl_flags & CLONE_SETTLS)
5453 cpu_set_tls (new_env, newtls);
5455 /* Grab a mutex so that thread setup appears atomic. */
5456 pthread_mutex_lock(&clone_lock);
5458 memset(&info, 0, sizeof(info));
5459 pthread_mutex_init(&info.mutex, NULL);
5460 pthread_mutex_lock(&info.mutex);
5461 pthread_cond_init(&info.cond, NULL);
5462 info.env = new_env;
5463 if (nptl_flags & CLONE_CHILD_SETTID)
5464 info.child_tidptr = child_tidptr;
5465 if (nptl_flags & CLONE_PARENT_SETTID)
5466 info.parent_tidptr = parent_tidptr;
5468 ret = pthread_attr_init(&attr);
5469 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5470 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5471 /* It is not safe to deliver signals until the child has finished
5472 initializing, so temporarily block all signals. */
5473 sigfillset(&sigmask);
5474 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5476 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5477 /* TODO: Free new CPU state if thread creation failed. */
5479 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5480 pthread_attr_destroy(&attr);
5481 if (ret == 0) {
5482 /* Wait for the child to initialize. */
5483 pthread_cond_wait(&info.cond, &info.mutex);
5484 ret = info.tid;
5485 if (flags & CLONE_PARENT_SETTID)
5486 put_user_u32(ret, parent_tidptr);
5487 } else {
5488 ret = -1;
5490 pthread_mutex_unlock(&info.mutex);
5491 pthread_cond_destroy(&info.cond);
5492 pthread_mutex_destroy(&info.mutex);
5493 pthread_mutex_unlock(&clone_lock);
5494 } else {
5495 /* if no CLONE_VM, we consider it is a fork */
5496 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5497 return -TARGET_EINVAL;
5500 if (block_signals()) {
5501 return -TARGET_ERESTARTSYS;
5504 fork_start();
5505 ret = fork();
5506 if (ret == 0) {
5507 /* Child Process. */
5508 rcu_after_fork();
5509 cpu_clone_regs(env, newsp);
5510 fork_end(1);
5511 /* There is a race condition here. The parent process could
5512 theoretically read the TID in the child process before the child
5513 tid is set. This would require using either ptrace
5514 (not implemented) or having *_tidptr to point at a shared memory
5515 mapping. We can't repeat the spinlock hack used above because
5516 the child process gets its own copy of the lock. */
5517 if (flags & CLONE_CHILD_SETTID)
5518 put_user_u32(gettid(), child_tidptr);
5519 if (flags & CLONE_PARENT_SETTID)
5520 put_user_u32(gettid(), parent_tidptr);
5521 ts = (TaskState *)cpu->opaque;
5522 if (flags & CLONE_SETTLS)
5523 cpu_set_tls (env, newtls);
5524 if (flags & CLONE_CHILD_CLEARTID)
5525 ts->child_tidptr = child_tidptr;
5526 } else {
5527 fork_end(0);
5530 return ret;
5533 /* warning : doesn't handle linux specific flags... */
5534 static int target_to_host_fcntl_cmd(int cmd)
5536 switch(cmd) {
5537 case TARGET_F_DUPFD:
5538 case TARGET_F_GETFD:
5539 case TARGET_F_SETFD:
5540 case TARGET_F_GETFL:
5541 case TARGET_F_SETFL:
5542 return cmd;
5543 case TARGET_F_GETLK:
5544 return F_GETLK;
5545 case TARGET_F_SETLK:
5546 return F_SETLK;
5547 case TARGET_F_SETLKW:
5548 return F_SETLKW;
5549 case TARGET_F_GETOWN:
5550 return F_GETOWN;
5551 case TARGET_F_SETOWN:
5552 return F_SETOWN;
5553 case TARGET_F_GETSIG:
5554 return F_GETSIG;
5555 case TARGET_F_SETSIG:
5556 return F_SETSIG;
5557 #if TARGET_ABI_BITS == 32
5558 case TARGET_F_GETLK64:
5559 return F_GETLK64;
5560 case TARGET_F_SETLK64:
5561 return F_SETLK64;
5562 case TARGET_F_SETLKW64:
5563 return F_SETLKW64;
5564 #endif
5565 case TARGET_F_SETLEASE:
5566 return F_SETLEASE;
5567 case TARGET_F_GETLEASE:
5568 return F_GETLEASE;
5569 #ifdef F_DUPFD_CLOEXEC
5570 case TARGET_F_DUPFD_CLOEXEC:
5571 return F_DUPFD_CLOEXEC;
5572 #endif
5573 case TARGET_F_NOTIFY:
5574 return F_NOTIFY;
5575 #ifdef F_GETOWN_EX
5576 case TARGET_F_GETOWN_EX:
5577 return F_GETOWN_EX;
5578 #endif
5579 #ifdef F_SETOWN_EX
5580 case TARGET_F_SETOWN_EX:
5581 return F_SETOWN_EX;
5582 #endif
5583 default:
5584 return -TARGET_EINVAL;
5586 return -TARGET_EINVAL;
5589 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5590 static const bitmask_transtbl flock_tbl[] = {
5591 TRANSTBL_CONVERT(F_RDLCK),
5592 TRANSTBL_CONVERT(F_WRLCK),
5593 TRANSTBL_CONVERT(F_UNLCK),
5594 TRANSTBL_CONVERT(F_EXLCK),
5595 TRANSTBL_CONVERT(F_SHLCK),
5596 { 0, 0, 0, 0 }
5599 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5601 struct flock fl;
5602 struct target_flock *target_fl;
5603 struct flock64 fl64;
5604 struct target_flock64 *target_fl64;
5605 #ifdef F_GETOWN_EX
5606 struct f_owner_ex fox;
5607 struct target_f_owner_ex *target_fox;
5608 #endif
5609 abi_long ret;
5610 int host_cmd = target_to_host_fcntl_cmd(cmd);
5612 if (host_cmd == -TARGET_EINVAL)
5613 return host_cmd;
5615 switch(cmd) {
5616 case TARGET_F_GETLK:
5617 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5618 return -TARGET_EFAULT;
5619 fl.l_type =
5620 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5621 fl.l_whence = tswap16(target_fl->l_whence);
5622 fl.l_start = tswapal(target_fl->l_start);
5623 fl.l_len = tswapal(target_fl->l_len);
5624 fl.l_pid = tswap32(target_fl->l_pid);
5625 unlock_user_struct(target_fl, arg, 0);
5626 ret = get_errno(fcntl(fd, host_cmd, &fl));
5627 if (ret == 0) {
5628 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
5629 return -TARGET_EFAULT;
5630 target_fl->l_type =
5631 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
5632 target_fl->l_whence = tswap16(fl.l_whence);
5633 target_fl->l_start = tswapal(fl.l_start);
5634 target_fl->l_len = tswapal(fl.l_len);
5635 target_fl->l_pid = tswap32(fl.l_pid);
5636 unlock_user_struct(target_fl, arg, 1);
5638 break;
5640 case TARGET_F_SETLK:
5641 case TARGET_F_SETLKW:
5642 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
5643 return -TARGET_EFAULT;
5644 fl.l_type =
5645 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
5646 fl.l_whence = tswap16(target_fl->l_whence);
5647 fl.l_start = tswapal(target_fl->l_start);
5648 fl.l_len = tswapal(target_fl->l_len);
5649 fl.l_pid = tswap32(target_fl->l_pid);
5650 unlock_user_struct(target_fl, arg, 0);
5651 ret = get_errno(fcntl(fd, host_cmd, &fl));
5652 break;
5654 case TARGET_F_GETLK64:
5655 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5656 return -TARGET_EFAULT;
5657 fl64.l_type =
5658 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5659 fl64.l_whence = tswap16(target_fl64->l_whence);
5660 fl64.l_start = tswap64(target_fl64->l_start);
5661 fl64.l_len = tswap64(target_fl64->l_len);
5662 fl64.l_pid = tswap32(target_fl64->l_pid);
5663 unlock_user_struct(target_fl64, arg, 0);
5664 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5665 if (ret == 0) {
5666 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
5667 return -TARGET_EFAULT;
5668 target_fl64->l_type =
5669 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
5670 target_fl64->l_whence = tswap16(fl64.l_whence);
5671 target_fl64->l_start = tswap64(fl64.l_start);
5672 target_fl64->l_len = tswap64(fl64.l_len);
5673 target_fl64->l_pid = tswap32(fl64.l_pid);
5674 unlock_user_struct(target_fl64, arg, 1);
5676 break;
5677 case TARGET_F_SETLK64:
5678 case TARGET_F_SETLKW64:
5679 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
5680 return -TARGET_EFAULT;
5681 fl64.l_type =
5682 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
5683 fl64.l_whence = tswap16(target_fl64->l_whence);
5684 fl64.l_start = tswap64(target_fl64->l_start);
5685 fl64.l_len = tswap64(target_fl64->l_len);
5686 fl64.l_pid = tswap32(target_fl64->l_pid);
5687 unlock_user_struct(target_fl64, arg, 0);
5688 ret = get_errno(fcntl(fd, host_cmd, &fl64));
5689 break;
5691 case TARGET_F_GETFL:
5692 ret = get_errno(fcntl(fd, host_cmd, arg));
5693 if (ret >= 0) {
5694 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5696 break;
5698 case TARGET_F_SETFL:
5699 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
5700 break;
5702 #ifdef F_GETOWN_EX
5703 case TARGET_F_GETOWN_EX:
5704 ret = get_errno(fcntl(fd, host_cmd, &fox));
5705 if (ret >= 0) {
5706 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5707 return -TARGET_EFAULT;
5708 target_fox->type = tswap32(fox.type);
5709 target_fox->pid = tswap32(fox.pid);
5710 unlock_user_struct(target_fox, arg, 1);
5712 break;
5713 #endif
5715 #ifdef F_SETOWN_EX
5716 case TARGET_F_SETOWN_EX:
5717 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5718 return -TARGET_EFAULT;
5719 fox.type = tswap32(target_fox->type);
5720 fox.pid = tswap32(target_fox->pid);
5721 unlock_user_struct(target_fox, arg, 0);
5722 ret = get_errno(fcntl(fd, host_cmd, &fox));
5723 break;
5724 #endif
5726 case TARGET_F_SETOWN:
5727 case TARGET_F_GETOWN:
5728 case TARGET_F_SETSIG:
5729 case TARGET_F_GETSIG:
5730 case TARGET_F_SETLEASE:
5731 case TARGET_F_GETLEASE:
5732 ret = get_errno(fcntl(fd, host_cmd, arg));
5733 break;
5735 default:
5736 ret = get_errno(fcntl(fd, cmd, arg));
5737 break;
5739 return ret;
5742 #ifdef USE_UID16
5744 static inline int high2lowuid(int uid)
5746 if (uid > 65535)
5747 return 65534;
5748 else
5749 return uid;
5752 static inline int high2lowgid(int gid)
5754 if (gid > 65535)
5755 return 65534;
5756 else
5757 return gid;
5760 static inline int low2highuid(int uid)
5762 if ((int16_t)uid == -1)
5763 return -1;
5764 else
5765 return uid;
5768 static inline int low2highgid(int gid)
5770 if ((int16_t)gid == -1)
5771 return -1;
5772 else
5773 return gid;
5775 static inline int tswapid(int id)
5777 return tswap16(id);
5780 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5782 #else /* !USE_UID16 */
5783 static inline int high2lowuid(int uid)
5785 return uid;
5787 static inline int high2lowgid(int gid)
5789 return gid;
5791 static inline int low2highuid(int uid)
5793 return uid;
5795 static inline int low2highgid(int gid)
5797 return gid;
5799 static inline int tswapid(int id)
5801 return tswap32(id);
5804 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5806 #endif /* USE_UID16 */
5808 /* We must do direct syscalls for setting UID/GID, because we want to
5809 * implement the Linux system call semantics of "change only for this thread",
5810 * not the libc/POSIX semantics of "change for all threads in process".
5811 * (See http://ewontfix.com/17/ for more details.)
5812 * We use the 32-bit version of the syscalls if present; if it is not
5813 * then either the host architecture supports 32-bit UIDs natively with
5814 * the standard syscall, or the 16-bit UID is the best we can do.
5816 #ifdef __NR_setuid32
5817 #define __NR_sys_setuid __NR_setuid32
5818 #else
5819 #define __NR_sys_setuid __NR_setuid
5820 #endif
5821 #ifdef __NR_setgid32
5822 #define __NR_sys_setgid __NR_setgid32
5823 #else
5824 #define __NR_sys_setgid __NR_setgid
5825 #endif
5826 #ifdef __NR_setresuid32
5827 #define __NR_sys_setresuid __NR_setresuid32
5828 #else
5829 #define __NR_sys_setresuid __NR_setresuid
5830 #endif
5831 #ifdef __NR_setresgid32
5832 #define __NR_sys_setresgid __NR_setresgid32
5833 #else
5834 #define __NR_sys_setresgid __NR_setresgid
5835 #endif
5837 _syscall1(int, sys_setuid, uid_t, uid)
5838 _syscall1(int, sys_setgid, gid_t, gid)
5839 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5840 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5842 void syscall_init(void)
5844 IOCTLEntry *ie;
5845 const argtype *arg_type;
5846 int size;
5847 int i;
5849 thunk_init(STRUCT_MAX);
5851 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5852 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5853 #include "syscall_types.h"
5854 #undef STRUCT
5855 #undef STRUCT_SPECIAL
5857 /* Build target_to_host_errno_table[] table from
5858 * host_to_target_errno_table[]. */
5859 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5860 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5863 /* we patch the ioctl size if necessary. We rely on the fact that
5864 no ioctl has all the bits at '1' in the size field */
5865 ie = ioctl_entries;
5866 while (ie->target_cmd != 0) {
5867 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5868 TARGET_IOC_SIZEMASK) {
5869 arg_type = ie->arg_type;
5870 if (arg_type[0] != TYPE_PTR) {
5871 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5872 ie->target_cmd);
5873 exit(1);
5875 arg_type++;
5876 size = thunk_type_size(arg_type, 0);
5877 ie->target_cmd = (ie->target_cmd &
5878 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5879 (size << TARGET_IOC_SIZESHIFT);
5882 /* automatic consistency check if same arch */
5883 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5884 (defined(__x86_64__) && defined(TARGET_X86_64))
5885 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5886 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5887 ie->name, ie->target_cmd, ie->host_cmd);
5889 #endif
5890 ie++;
5894 #if TARGET_ABI_BITS == 32
5895 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5897 #ifdef TARGET_WORDS_BIGENDIAN
5898 return ((uint64_t)word0 << 32) | word1;
5899 #else
5900 return ((uint64_t)word1 << 32) | word0;
5901 #endif
5903 #else /* TARGET_ABI_BITS == 32 */
5904 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5906 return word0;
5908 #endif /* TARGET_ABI_BITS != 32 */
5910 #ifdef TARGET_NR_truncate64
5911 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5912 abi_long arg2,
5913 abi_long arg3,
5914 abi_long arg4)
5916 if (regpairs_aligned(cpu_env)) {
5917 arg2 = arg3;
5918 arg3 = arg4;
5920 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5922 #endif
5924 #ifdef TARGET_NR_ftruncate64
5925 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5926 abi_long arg2,
5927 abi_long arg3,
5928 abi_long arg4)
5930 if (regpairs_aligned(cpu_env)) {
5931 arg2 = arg3;
5932 arg3 = arg4;
5934 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5936 #endif
5938 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5939 abi_ulong target_addr)
5941 struct target_timespec *target_ts;
5943 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5944 return -TARGET_EFAULT;
5945 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5946 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5947 unlock_user_struct(target_ts, target_addr, 0);
5948 return 0;
5951 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5952 struct timespec *host_ts)
5954 struct target_timespec *target_ts;
5956 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5957 return -TARGET_EFAULT;
5958 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5959 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5960 unlock_user_struct(target_ts, target_addr, 1);
5961 return 0;
5964 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5965 abi_ulong target_addr)
5967 struct target_itimerspec *target_itspec;
5969 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5970 return -TARGET_EFAULT;
5973 host_itspec->it_interval.tv_sec =
5974 tswapal(target_itspec->it_interval.tv_sec);
5975 host_itspec->it_interval.tv_nsec =
5976 tswapal(target_itspec->it_interval.tv_nsec);
5977 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5978 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5980 unlock_user_struct(target_itspec, target_addr, 1);
5981 return 0;
5984 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5985 struct itimerspec *host_its)
5987 struct target_itimerspec *target_itspec;
5989 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5990 return -TARGET_EFAULT;
5993 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5994 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5996 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5997 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5999 unlock_user_struct(target_itspec, target_addr, 0);
6000 return 0;
6003 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6004 abi_ulong target_addr)
6006 struct target_sigevent *target_sevp;
6008 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6009 return -TARGET_EFAULT;
6012 /* This union is awkward on 64 bit systems because it has a 32 bit
6013 * integer and a pointer in it; we follow the conversion approach
6014 * used for handling sigval types in signal.c so the guest should get
6015 * the correct value back even if we did a 64 bit byteswap and it's
6016 * using the 32 bit integer.
6018 host_sevp->sigev_value.sival_ptr =
6019 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6020 host_sevp->sigev_signo =
6021 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6022 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6023 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6025 unlock_user_struct(target_sevp, target_addr, 1);
6026 return 0;
6029 #if defined(TARGET_NR_mlockall)
6030 static inline int target_to_host_mlockall_arg(int arg)
6032 int result = 0;
6034 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6035 result |= MCL_CURRENT;
6037 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6038 result |= MCL_FUTURE;
6040 return result;
6042 #endif
6044 static inline abi_long host_to_target_stat64(void *cpu_env,
6045 abi_ulong target_addr,
6046 struct stat *host_st)
6048 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6049 if (((CPUARMState *)cpu_env)->eabi) {
6050 struct target_eabi_stat64 *target_st;
6052 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6053 return -TARGET_EFAULT;
6054 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6055 __put_user(host_st->st_dev, &target_st->st_dev);
6056 __put_user(host_st->st_ino, &target_st->st_ino);
6057 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6058 __put_user(host_st->st_ino, &target_st->__st_ino);
6059 #endif
6060 __put_user(host_st->st_mode, &target_st->st_mode);
6061 __put_user(host_st->st_nlink, &target_st->st_nlink);
6062 __put_user(host_st->st_uid, &target_st->st_uid);
6063 __put_user(host_st->st_gid, &target_st->st_gid);
6064 __put_user(host_st->st_rdev, &target_st->st_rdev);
6065 __put_user(host_st->st_size, &target_st->st_size);
6066 __put_user(host_st->st_blksize, &target_st->st_blksize);
6067 __put_user(host_st->st_blocks, &target_st->st_blocks);
6068 __put_user(host_st->st_atime, &target_st->target_st_atime);
6069 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6070 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6071 unlock_user_struct(target_st, target_addr, 1);
6072 } else
6073 #endif
6075 #if defined(TARGET_HAS_STRUCT_STAT64)
6076 struct target_stat64 *target_st;
6077 #else
6078 struct target_stat *target_st;
6079 #endif
6081 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6082 return -TARGET_EFAULT;
6083 memset(target_st, 0, sizeof(*target_st));
6084 __put_user(host_st->st_dev, &target_st->st_dev);
6085 __put_user(host_st->st_ino, &target_st->st_ino);
6086 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6087 __put_user(host_st->st_ino, &target_st->__st_ino);
6088 #endif
6089 __put_user(host_st->st_mode, &target_st->st_mode);
6090 __put_user(host_st->st_nlink, &target_st->st_nlink);
6091 __put_user(host_st->st_uid, &target_st->st_uid);
6092 __put_user(host_st->st_gid, &target_st->st_gid);
6093 __put_user(host_st->st_rdev, &target_st->st_rdev);
6094 /* XXX: better use of kernel struct */
6095 __put_user(host_st->st_size, &target_st->st_size);
6096 __put_user(host_st->st_blksize, &target_st->st_blksize);
6097 __put_user(host_st->st_blocks, &target_st->st_blocks);
6098 __put_user(host_st->st_atime, &target_st->target_st_atime);
6099 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6100 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6101 unlock_user_struct(target_st, target_addr, 1);
6104 return 0;
6107 /* ??? Using host futex calls even when target atomic operations
6108 are not really atomic probably breaks things. However implementing
6109 futexes locally would make futexes shared between multiple processes
6110 tricky. However they're probably useless because guest atomic
6111 operations won't work either. */
6112 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6113 target_ulong uaddr2, int val3)
6115 struct timespec ts, *pts;
6116 int base_op;
6118 /* ??? We assume FUTEX_* constants are the same on both host
6119 and target. */
6120 #ifdef FUTEX_CMD_MASK
6121 base_op = op & FUTEX_CMD_MASK;
6122 #else
6123 base_op = op;
6124 #endif
6125 switch (base_op) {
6126 case FUTEX_WAIT:
6127 case FUTEX_WAIT_BITSET:
6128 if (timeout) {
6129 pts = &ts;
6130 target_to_host_timespec(pts, timeout);
6131 } else {
6132 pts = NULL;
6134 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6135 pts, NULL, val3));
6136 case FUTEX_WAKE:
6137 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6138 case FUTEX_FD:
6139 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6140 case FUTEX_REQUEUE:
6141 case FUTEX_CMP_REQUEUE:
6142 case FUTEX_WAKE_OP:
6143 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6144 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6145 But the prototype takes a `struct timespec *'; insert casts
6146 to satisfy the compiler. We do not need to tswap TIMEOUT
6147 since it's not compared to guest memory. */
6148 pts = (struct timespec *)(uintptr_t) timeout;
6149 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6150 g2h(uaddr2),
6151 (base_op == FUTEX_CMP_REQUEUE
6152 ? tswap32(val3)
6153 : val3)));
6154 default:
6155 return -TARGET_ENOSYS;
6158 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6159 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6160 abi_long handle, abi_long mount_id,
6161 abi_long flags)
6163 struct file_handle *target_fh;
6164 struct file_handle *fh;
6165 int mid = 0;
6166 abi_long ret;
6167 char *name;
6168 unsigned int size, total_size;
6170 if (get_user_s32(size, handle)) {
6171 return -TARGET_EFAULT;
6174 name = lock_user_string(pathname);
6175 if (!name) {
6176 return -TARGET_EFAULT;
6179 total_size = sizeof(struct file_handle) + size;
6180 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6181 if (!target_fh) {
6182 unlock_user(name, pathname, 0);
6183 return -TARGET_EFAULT;
6186 fh = g_malloc0(total_size);
6187 fh->handle_bytes = size;
6189 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6190 unlock_user(name, pathname, 0);
6192 /* man name_to_handle_at(2):
6193 * Other than the use of the handle_bytes field, the caller should treat
6194 * the file_handle structure as an opaque data type
6197 memcpy(target_fh, fh, total_size);
6198 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6199 target_fh->handle_type = tswap32(fh->handle_type);
6200 g_free(fh);
6201 unlock_user(target_fh, handle, total_size);
6203 if (put_user_s32(mid, mount_id)) {
6204 return -TARGET_EFAULT;
6207 return ret;
6210 #endif
6212 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6213 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6214 abi_long flags)
6216 struct file_handle *target_fh;
6217 struct file_handle *fh;
6218 unsigned int size, total_size;
6219 abi_long ret;
6221 if (get_user_s32(size, handle)) {
6222 return -TARGET_EFAULT;
6225 total_size = sizeof(struct file_handle) + size;
6226 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6227 if (!target_fh) {
6228 return -TARGET_EFAULT;
6231 fh = g_memdup(target_fh, total_size);
6232 fh->handle_bytes = size;
6233 fh->handle_type = tswap32(target_fh->handle_type);
6235 ret = get_errno(open_by_handle_at(mount_fd, fh,
6236 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6238 g_free(fh);
6240 unlock_user(target_fh, handle, total_size);
6242 return ret;
6244 #endif
6246 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6248 /* signalfd siginfo conversion */
6250 static void
6251 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6252 const struct signalfd_siginfo *info)
6254 int sig = host_to_target_signal(info->ssi_signo);
6256 /* linux/signalfd.h defines a ssi_addr_lsb
6257 * not defined in sys/signalfd.h but used by some kernels
6260 #ifdef BUS_MCEERR_AO
6261 if (tinfo->ssi_signo == SIGBUS &&
6262 (tinfo->ssi_code == BUS_MCEERR_AR ||
6263 tinfo->ssi_code == BUS_MCEERR_AO)) {
6264 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6265 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6266 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6268 #endif
6270 tinfo->ssi_signo = tswap32(sig);
6271 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6272 tinfo->ssi_code = tswap32(info->ssi_code);
6273 tinfo->ssi_pid = tswap32(info->ssi_pid);
6274 tinfo->ssi_uid = tswap32(info->ssi_uid);
6275 tinfo->ssi_fd = tswap32(info->ssi_fd);
6276 tinfo->ssi_tid = tswap32(info->ssi_tid);
6277 tinfo->ssi_band = tswap32(info->ssi_band);
6278 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6279 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6280 tinfo->ssi_status = tswap32(info->ssi_status);
6281 tinfo->ssi_int = tswap32(info->ssi_int);
6282 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6283 tinfo->ssi_utime = tswap64(info->ssi_utime);
6284 tinfo->ssi_stime = tswap64(info->ssi_stime);
6285 tinfo->ssi_addr = tswap64(info->ssi_addr);
6288 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6290 int i;
6292 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6293 host_to_target_signalfd_siginfo(buf + i, buf + i);
6296 return len;
6299 static TargetFdTrans target_signalfd_trans = {
6300 .host_to_target_data = host_to_target_data_signalfd,
6303 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6305 int host_flags;
6306 target_sigset_t *target_mask;
6307 sigset_t host_mask;
6308 abi_long ret;
6310 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6311 return -TARGET_EINVAL;
6313 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6314 return -TARGET_EFAULT;
6317 target_to_host_sigset(&host_mask, target_mask);
6319 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6321 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6322 if (ret >= 0) {
6323 fd_trans_register(ret, &target_signalfd_trans);
6326 unlock_user_struct(target_mask, mask, 0);
6328 return ret;
6330 #endif
6332 /* Map host to target signal numbers for the wait family of syscalls.
6333 Assume all other status bits are the same. */
6334 int host_to_target_waitstatus(int status)
6336 if (WIFSIGNALED(status)) {
6337 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6339 if (WIFSTOPPED(status)) {
6340 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6341 | (status & 0xff);
6343 return status;
6346 static int open_self_cmdline(void *cpu_env, int fd)
6348 int fd_orig = -1;
6349 bool word_skipped = false;
6351 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6352 if (fd_orig < 0) {
6353 return fd_orig;
6356 while (true) {
6357 ssize_t nb_read;
6358 char buf[128];
6359 char *cp_buf = buf;
6361 nb_read = read(fd_orig, buf, sizeof(buf));
6362 if (nb_read < 0) {
6363 int e = errno;
6364 fd_orig = close(fd_orig);
6365 errno = e;
6366 return -1;
6367 } else if (nb_read == 0) {
6368 break;
6371 if (!word_skipped) {
6372 /* Skip the first string, which is the path to qemu-*-static
6373 instead of the actual command. */
6374 cp_buf = memchr(buf, 0, sizeof(buf));
6375 if (cp_buf) {
6376 /* Null byte found, skip one string */
6377 cp_buf++;
6378 nb_read -= cp_buf - buf;
6379 word_skipped = true;
6383 if (word_skipped) {
6384 if (write(fd, cp_buf, nb_read) != nb_read) {
6385 int e = errno;
6386 close(fd_orig);
6387 errno = e;
6388 return -1;
6393 return close(fd_orig);
6396 static int open_self_maps(void *cpu_env, int fd)
6398 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6399 TaskState *ts = cpu->opaque;
6400 FILE *fp;
6401 char *line = NULL;
6402 size_t len = 0;
6403 ssize_t read;
6405 fp = fopen("/proc/self/maps", "r");
6406 if (fp == NULL) {
6407 return -1;
6410 while ((read = getline(&line, &len, fp)) != -1) {
6411 int fields, dev_maj, dev_min, inode;
6412 uint64_t min, max, offset;
6413 char flag_r, flag_w, flag_x, flag_p;
6414 char path[512] = "";
6415 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6416 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6417 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6419 if ((fields < 10) || (fields > 11)) {
6420 continue;
6422 if (h2g_valid(min)) {
6423 int flags = page_get_flags(h2g(min));
6424 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6425 if (page_check_range(h2g(min), max - min, flags) == -1) {
6426 continue;
6428 if (h2g(min) == ts->info->stack_limit) {
6429 pstrcpy(path, sizeof(path), " [stack]");
6431 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6432 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6433 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6434 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6435 path[0] ? " " : "", path);
6439 free(line);
6440 fclose(fp);
6442 return 0;
6445 static int open_self_stat(void *cpu_env, int fd)
6447 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6448 TaskState *ts = cpu->opaque;
6449 abi_ulong start_stack = ts->info->start_stack;
6450 int i;
6452 for (i = 0; i < 44; i++) {
6453 char buf[128];
6454 int len;
6455 uint64_t val = 0;
6457 if (i == 0) {
6458 /* pid */
6459 val = getpid();
6460 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6461 } else if (i == 1) {
6462 /* app name */
6463 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6464 } else if (i == 27) {
6465 /* stack bottom */
6466 val = start_stack;
6467 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6468 } else {
6469 /* for the rest, there is MasterCard */
6470 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6473 len = strlen(buf);
6474 if (write(fd, buf, len) != len) {
6475 return -1;
6479 return 0;
6482 static int open_self_auxv(void *cpu_env, int fd)
6484 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6485 TaskState *ts = cpu->opaque;
6486 abi_ulong auxv = ts->info->saved_auxv;
6487 abi_ulong len = ts->info->auxv_len;
6488 char *ptr;
6491 * Auxiliary vector is stored in target process stack.
6492 * read in whole auxv vector and copy it to file
6494 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6495 if (ptr != NULL) {
6496 while (len > 0) {
6497 ssize_t r;
6498 r = write(fd, ptr, len);
6499 if (r <= 0) {
6500 break;
6502 len -= r;
6503 ptr += r;
6505 lseek(fd, 0, SEEK_SET);
6506 unlock_user(ptr, auxv, len);
6509 return 0;
6512 static int is_proc_myself(const char *filename, const char *entry)
6514 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6515 filename += strlen("/proc/");
6516 if (!strncmp(filename, "self/", strlen("self/"))) {
6517 filename += strlen("self/");
6518 } else if (*filename >= '1' && *filename <= '9') {
6519 char myself[80];
6520 snprintf(myself, sizeof(myself), "%d/", getpid());
6521 if (!strncmp(filename, myself, strlen(myself))) {
6522 filename += strlen(myself);
6523 } else {
6524 return 0;
6526 } else {
6527 return 0;
6529 if (!strcmp(filename, entry)) {
6530 return 1;
6533 return 0;
6536 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6537 static int is_proc(const char *filename, const char *entry)
6539 return strcmp(filename, entry) == 0;
6542 static int open_net_route(void *cpu_env, int fd)
6544 FILE *fp;
6545 char *line = NULL;
6546 size_t len = 0;
6547 ssize_t read;
6549 fp = fopen("/proc/net/route", "r");
6550 if (fp == NULL) {
6551 return -1;
6554 /* read header */
6556 read = getline(&line, &len, fp);
6557 dprintf(fd, "%s", line);
6559 /* read routes */
6561 while ((read = getline(&line, &len, fp)) != -1) {
6562 char iface[16];
6563 uint32_t dest, gw, mask;
6564 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6565 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6566 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6567 &mask, &mtu, &window, &irtt);
6568 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6569 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6570 metric, tswap32(mask), mtu, window, irtt);
6573 free(line);
6574 fclose(fp);
6576 return 0;
6578 #endif
6580 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6582 struct fake_open {
6583 const char *filename;
6584 int (*fill)(void *cpu_env, int fd);
6585 int (*cmp)(const char *s1, const char *s2);
6587 const struct fake_open *fake_open;
6588 static const struct fake_open fakes[] = {
6589 { "maps", open_self_maps, is_proc_myself },
6590 { "stat", open_self_stat, is_proc_myself },
6591 { "auxv", open_self_auxv, is_proc_myself },
6592 { "cmdline", open_self_cmdline, is_proc_myself },
6593 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6594 { "/proc/net/route", open_net_route, is_proc },
6595 #endif
6596 { NULL, NULL, NULL }
6599 if (is_proc_myself(pathname, "exe")) {
6600 int execfd = qemu_getauxval(AT_EXECFD);
6601 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6604 for (fake_open = fakes; fake_open->filename; fake_open++) {
6605 if (fake_open->cmp(pathname, fake_open->filename)) {
6606 break;
6610 if (fake_open->filename) {
6611 const char *tmpdir;
6612 char filename[PATH_MAX];
6613 int fd, r;
6615 /* create temporary file to map stat to */
6616 tmpdir = getenv("TMPDIR");
6617 if (!tmpdir)
6618 tmpdir = "/tmp";
6619 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6620 fd = mkstemp(filename);
6621 if (fd < 0) {
6622 return fd;
6624 unlink(filename);
6626 if ((r = fake_open->fill(cpu_env, fd))) {
6627 int e = errno;
6628 close(fd);
6629 errno = e;
6630 return r;
6632 lseek(fd, 0, SEEK_SET);
6634 return fd;
6637 return safe_openat(dirfd, path(pathname), flags, mode);
6640 #define TIMER_MAGIC 0x0caf0000
6641 #define TIMER_MAGIC_MASK 0xffff0000
6643 /* Convert QEMU provided timer ID back to internal 16bit index format */
6644 static target_timer_t get_timer_id(abi_long arg)
6646 target_timer_t timerid = arg;
6648 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6649 return -TARGET_EINVAL;
6652 timerid &= 0xffff;
6654 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6655 return -TARGET_EINVAL;
6658 return timerid;
6661 /* do_syscall() should always have a single exit point at the end so
6662 that actions, such as logging of syscall results, can be performed.
6663 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6664 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6665 abi_long arg2, abi_long arg3, abi_long arg4,
6666 abi_long arg5, abi_long arg6, abi_long arg7,
6667 abi_long arg8)
6669 CPUState *cpu = ENV_GET_CPU(cpu_env);
6670 abi_long ret;
6671 struct stat st;
6672 struct statfs stfs;
6673 void *p;
6675 #if defined(DEBUG_ERESTARTSYS)
6676 /* Debug-only code for exercising the syscall-restart code paths
6677 * in the per-architecture cpu main loops: restart every syscall
6678 * the guest makes once before letting it through.
6681 static int flag;
6683 flag = !flag;
6684 if (flag) {
6685 return -TARGET_ERESTARTSYS;
6688 #endif
6690 #ifdef DEBUG
6691 gemu_log("syscall %d", num);
6692 #endif
6693 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
6694 if(do_strace)
6695 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6697 switch(num) {
6698 case TARGET_NR_exit:
6699 /* In old applications this may be used to implement _exit(2).
6700 However in threaded applictions it is used for thread termination,
6701 and _exit_group is used for application termination.
6702 Do thread termination if we have more then one thread. */
6704 if (block_signals()) {
6705 ret = -TARGET_ERESTARTSYS;
6706 break;
6709 if (CPU_NEXT(first_cpu)) {
6710 TaskState *ts;
6712 cpu_list_lock();
6713 /* Remove the CPU from the list. */
6714 QTAILQ_REMOVE(&cpus, cpu, node);
6715 cpu_list_unlock();
6716 ts = cpu->opaque;
6717 if (ts->child_tidptr) {
6718 put_user_u32(0, ts->child_tidptr);
6719 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6720 NULL, NULL, 0);
6722 thread_cpu = NULL;
6723 object_unref(OBJECT(cpu));
6724 g_free(ts);
6725 rcu_unregister_thread();
6726 pthread_exit(NULL);
6728 #ifdef TARGET_GPROF
6729 _mcleanup();
6730 #endif
6731 gdb_exit(cpu_env, arg1);
6732 _exit(arg1);
6733 ret = 0; /* avoid warning */
6734 break;
6735 case TARGET_NR_read:
6736 if (arg3 == 0)
6737 ret = 0;
6738 else {
6739 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6740 goto efault;
6741 ret = get_errno(safe_read(arg1, p, arg3));
6742 if (ret >= 0 &&
6743 fd_trans_host_to_target_data(arg1)) {
6744 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6746 unlock_user(p, arg2, ret);
6748 break;
6749 case TARGET_NR_write:
6750 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6751 goto efault;
6752 ret = get_errno(safe_write(arg1, p, arg3));
6753 unlock_user(p, arg2, 0);
6754 break;
6755 #ifdef TARGET_NR_open
6756 case TARGET_NR_open:
6757 if (!(p = lock_user_string(arg1)))
6758 goto efault;
6759 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6760 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6761 arg3));
6762 fd_trans_unregister(ret);
6763 unlock_user(p, arg1, 0);
6764 break;
6765 #endif
6766 case TARGET_NR_openat:
6767 if (!(p = lock_user_string(arg2)))
6768 goto efault;
6769 ret = get_errno(do_openat(cpu_env, arg1, p,
6770 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6771 arg4));
6772 fd_trans_unregister(ret);
6773 unlock_user(p, arg2, 0);
6774 break;
6775 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6776 case TARGET_NR_name_to_handle_at:
6777 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6778 break;
6779 #endif
6780 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6781 case TARGET_NR_open_by_handle_at:
6782 ret = do_open_by_handle_at(arg1, arg2, arg3);
6783 fd_trans_unregister(ret);
6784 break;
6785 #endif
6786 case TARGET_NR_close:
6787 fd_trans_unregister(arg1);
6788 ret = get_errno(close(arg1));
6789 break;
6790 case TARGET_NR_brk:
6791 ret = do_brk(arg1);
6792 break;
6793 #ifdef TARGET_NR_fork
6794 case TARGET_NR_fork:
6795 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6796 break;
6797 #endif
6798 #ifdef TARGET_NR_waitpid
6799 case TARGET_NR_waitpid:
6801 int status;
6802 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6803 if (!is_error(ret) && arg2 && ret
6804 && put_user_s32(host_to_target_waitstatus(status), arg2))
6805 goto efault;
6807 break;
6808 #endif
6809 #ifdef TARGET_NR_waitid
6810 case TARGET_NR_waitid:
6812 siginfo_t info;
6813 info.si_pid = 0;
6814 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6815 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6816 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6817 goto efault;
6818 host_to_target_siginfo(p, &info);
6819 unlock_user(p, arg3, sizeof(target_siginfo_t));
6822 break;
6823 #endif
6824 #ifdef TARGET_NR_creat /* not on alpha */
6825 case TARGET_NR_creat:
6826 if (!(p = lock_user_string(arg1)))
6827 goto efault;
6828 ret = get_errno(creat(p, arg2));
6829 fd_trans_unregister(ret);
6830 unlock_user(p, arg1, 0);
6831 break;
6832 #endif
6833 #ifdef TARGET_NR_link
6834 case TARGET_NR_link:
6836 void * p2;
6837 p = lock_user_string(arg1);
6838 p2 = lock_user_string(arg2);
6839 if (!p || !p2)
6840 ret = -TARGET_EFAULT;
6841 else
6842 ret = get_errno(link(p, p2));
6843 unlock_user(p2, arg2, 0);
6844 unlock_user(p, arg1, 0);
6846 break;
6847 #endif
6848 #if defined(TARGET_NR_linkat)
6849 case TARGET_NR_linkat:
6851 void * p2 = NULL;
6852 if (!arg2 || !arg4)
6853 goto efault;
6854 p = lock_user_string(arg2);
6855 p2 = lock_user_string(arg4);
6856 if (!p || !p2)
6857 ret = -TARGET_EFAULT;
6858 else
6859 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6860 unlock_user(p, arg2, 0);
6861 unlock_user(p2, arg4, 0);
6863 break;
6864 #endif
6865 #ifdef TARGET_NR_unlink
6866 case TARGET_NR_unlink:
6867 if (!(p = lock_user_string(arg1)))
6868 goto efault;
6869 ret = get_errno(unlink(p));
6870 unlock_user(p, arg1, 0);
6871 break;
6872 #endif
6873 #if defined(TARGET_NR_unlinkat)
6874 case TARGET_NR_unlinkat:
6875 if (!(p = lock_user_string(arg2)))
6876 goto efault;
6877 ret = get_errno(unlinkat(arg1, p, arg3));
6878 unlock_user(p, arg2, 0);
6879 break;
6880 #endif
6881 case TARGET_NR_execve:
6883 char **argp, **envp;
6884 int argc, envc;
6885 abi_ulong gp;
6886 abi_ulong guest_argp;
6887 abi_ulong guest_envp;
6888 abi_ulong addr;
6889 char **q;
6890 int total_size = 0;
6892 argc = 0;
6893 guest_argp = arg2;
6894 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6895 if (get_user_ual(addr, gp))
6896 goto efault;
6897 if (!addr)
6898 break;
6899 argc++;
6901 envc = 0;
6902 guest_envp = arg3;
6903 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6904 if (get_user_ual(addr, gp))
6905 goto efault;
6906 if (!addr)
6907 break;
6908 envc++;
6911 argp = alloca((argc + 1) * sizeof(void *));
6912 envp = alloca((envc + 1) * sizeof(void *));
6914 for (gp = guest_argp, q = argp; gp;
6915 gp += sizeof(abi_ulong), q++) {
6916 if (get_user_ual(addr, gp))
6917 goto execve_efault;
6918 if (!addr)
6919 break;
6920 if (!(*q = lock_user_string(addr)))
6921 goto execve_efault;
6922 total_size += strlen(*q) + 1;
6924 *q = NULL;
6926 for (gp = guest_envp, q = envp; gp;
6927 gp += sizeof(abi_ulong), q++) {
6928 if (get_user_ual(addr, gp))
6929 goto execve_efault;
6930 if (!addr)
6931 break;
6932 if (!(*q = lock_user_string(addr)))
6933 goto execve_efault;
6934 total_size += strlen(*q) + 1;
6936 *q = NULL;
6938 if (!(p = lock_user_string(arg1)))
6939 goto execve_efault;
6940 /* Although execve() is not an interruptible syscall it is
6941 * a special case where we must use the safe_syscall wrapper:
6942 * if we allow a signal to happen before we make the host
6943 * syscall then we will 'lose' it, because at the point of
6944 * execve the process leaves QEMU's control. So we use the
6945 * safe syscall wrapper to ensure that we either take the
6946 * signal as a guest signal, or else it does not happen
6947 * before the execve completes and makes it the other
6948 * program's problem.
6950 ret = get_errno(safe_execve(p, argp, envp));
6951 unlock_user(p, arg1, 0);
6953 goto execve_end;
6955 execve_efault:
6956 ret = -TARGET_EFAULT;
6958 execve_end:
6959 for (gp = guest_argp, q = argp; *q;
6960 gp += sizeof(abi_ulong), q++) {
6961 if (get_user_ual(addr, gp)
6962 || !addr)
6963 break;
6964 unlock_user(*q, addr, 0);
6966 for (gp = guest_envp, q = envp; *q;
6967 gp += sizeof(abi_ulong), q++) {
6968 if (get_user_ual(addr, gp)
6969 || !addr)
6970 break;
6971 unlock_user(*q, addr, 0);
6974 break;
6975 case TARGET_NR_chdir:
6976 if (!(p = lock_user_string(arg1)))
6977 goto efault;
6978 ret = get_errno(chdir(p));
6979 unlock_user(p, arg1, 0);
6980 break;
6981 #ifdef TARGET_NR_time
6982 case TARGET_NR_time:
6984 time_t host_time;
6985 ret = get_errno(time(&host_time));
6986 if (!is_error(ret)
6987 && arg1
6988 && put_user_sal(host_time, arg1))
6989 goto efault;
6991 break;
6992 #endif
6993 #ifdef TARGET_NR_mknod
6994 case TARGET_NR_mknod:
6995 if (!(p = lock_user_string(arg1)))
6996 goto efault;
6997 ret = get_errno(mknod(p, arg2, arg3));
6998 unlock_user(p, arg1, 0);
6999 break;
7000 #endif
7001 #if defined(TARGET_NR_mknodat)
7002 case TARGET_NR_mknodat:
7003 if (!(p = lock_user_string(arg2)))
7004 goto efault;
7005 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7006 unlock_user(p, arg2, 0);
7007 break;
7008 #endif
7009 #ifdef TARGET_NR_chmod
7010 case TARGET_NR_chmod:
7011 if (!(p = lock_user_string(arg1)))
7012 goto efault;
7013 ret = get_errno(chmod(p, arg2));
7014 unlock_user(p, arg1, 0);
7015 break;
7016 #endif
7017 #ifdef TARGET_NR_break
7018 case TARGET_NR_break:
7019 goto unimplemented;
7020 #endif
7021 #ifdef TARGET_NR_oldstat
7022 case TARGET_NR_oldstat:
7023 goto unimplemented;
7024 #endif
7025 case TARGET_NR_lseek:
7026 ret = get_errno(lseek(arg1, arg2, arg3));
7027 break;
7028 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7029 /* Alpha specific */
7030 case TARGET_NR_getxpid:
7031 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7032 ret = get_errno(getpid());
7033 break;
7034 #endif
7035 #ifdef TARGET_NR_getpid
7036 case TARGET_NR_getpid:
7037 ret = get_errno(getpid());
7038 break;
7039 #endif
7040 case TARGET_NR_mount:
7042 /* need to look at the data field */
7043 void *p2, *p3;
7045 if (arg1) {
7046 p = lock_user_string(arg1);
7047 if (!p) {
7048 goto efault;
7050 } else {
7051 p = NULL;
7054 p2 = lock_user_string(arg2);
7055 if (!p2) {
7056 if (arg1) {
7057 unlock_user(p, arg1, 0);
7059 goto efault;
7062 if (arg3) {
7063 p3 = lock_user_string(arg3);
7064 if (!p3) {
7065 if (arg1) {
7066 unlock_user(p, arg1, 0);
7068 unlock_user(p2, arg2, 0);
7069 goto efault;
7071 } else {
7072 p3 = NULL;
7075 /* FIXME - arg5 should be locked, but it isn't clear how to
7076 * do that since it's not guaranteed to be a NULL-terminated
7077 * string.
7079 if (!arg5) {
7080 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7081 } else {
7082 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7084 ret = get_errno(ret);
7086 if (arg1) {
7087 unlock_user(p, arg1, 0);
7089 unlock_user(p2, arg2, 0);
7090 if (arg3) {
7091 unlock_user(p3, arg3, 0);
7094 break;
7095 #ifdef TARGET_NR_umount
7096 case TARGET_NR_umount:
7097 if (!(p = lock_user_string(arg1)))
7098 goto efault;
7099 ret = get_errno(umount(p));
7100 unlock_user(p, arg1, 0);
7101 break;
7102 #endif
7103 #ifdef TARGET_NR_stime /* not on alpha */
7104 case TARGET_NR_stime:
7106 time_t host_time;
7107 if (get_user_sal(host_time, arg1))
7108 goto efault;
7109 ret = get_errno(stime(&host_time));
7111 break;
7112 #endif
7113 case TARGET_NR_ptrace:
7114 goto unimplemented;
7115 #ifdef TARGET_NR_alarm /* not on alpha */
7116 case TARGET_NR_alarm:
7117 ret = alarm(arg1);
7118 break;
7119 #endif
7120 #ifdef TARGET_NR_oldfstat
7121 case TARGET_NR_oldfstat:
7122 goto unimplemented;
7123 #endif
7124 #ifdef TARGET_NR_pause /* not on alpha */
7125 case TARGET_NR_pause:
7126 if (!block_signals()) {
7127 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7129 ret = -TARGET_EINTR;
7130 break;
7131 #endif
7132 #ifdef TARGET_NR_utime
7133 case TARGET_NR_utime:
7135 struct utimbuf tbuf, *host_tbuf;
7136 struct target_utimbuf *target_tbuf;
7137 if (arg2) {
7138 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7139 goto efault;
7140 tbuf.actime = tswapal(target_tbuf->actime);
7141 tbuf.modtime = tswapal(target_tbuf->modtime);
7142 unlock_user_struct(target_tbuf, arg2, 0);
7143 host_tbuf = &tbuf;
7144 } else {
7145 host_tbuf = NULL;
7147 if (!(p = lock_user_string(arg1)))
7148 goto efault;
7149 ret = get_errno(utime(p, host_tbuf));
7150 unlock_user(p, arg1, 0);
7152 break;
7153 #endif
7154 #ifdef TARGET_NR_utimes
7155 case TARGET_NR_utimes:
7157 struct timeval *tvp, tv[2];
7158 if (arg2) {
7159 if (copy_from_user_timeval(&tv[0], arg2)
7160 || copy_from_user_timeval(&tv[1],
7161 arg2 + sizeof(struct target_timeval)))
7162 goto efault;
7163 tvp = tv;
7164 } else {
7165 tvp = NULL;
7167 if (!(p = lock_user_string(arg1)))
7168 goto efault;
7169 ret = get_errno(utimes(p, tvp));
7170 unlock_user(p, arg1, 0);
7172 break;
7173 #endif
7174 #if defined(TARGET_NR_futimesat)
7175 case TARGET_NR_futimesat:
7177 struct timeval *tvp, tv[2];
7178 if (arg3) {
7179 if (copy_from_user_timeval(&tv[0], arg3)
7180 || copy_from_user_timeval(&tv[1],
7181 arg3 + sizeof(struct target_timeval)))
7182 goto efault;
7183 tvp = tv;
7184 } else {
7185 tvp = NULL;
7187 if (!(p = lock_user_string(arg2)))
7188 goto efault;
7189 ret = get_errno(futimesat(arg1, path(p), tvp));
7190 unlock_user(p, arg2, 0);
7192 break;
7193 #endif
7194 #ifdef TARGET_NR_stty
7195 case TARGET_NR_stty:
7196 goto unimplemented;
7197 #endif
7198 #ifdef TARGET_NR_gtty
7199 case TARGET_NR_gtty:
7200 goto unimplemented;
7201 #endif
7202 #ifdef TARGET_NR_access
7203 case TARGET_NR_access:
7204 if (!(p = lock_user_string(arg1)))
7205 goto efault;
7206 ret = get_errno(access(path(p), arg2));
7207 unlock_user(p, arg1, 0);
7208 break;
7209 #endif
7210 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7211 case TARGET_NR_faccessat:
7212 if (!(p = lock_user_string(arg2)))
7213 goto efault;
7214 ret = get_errno(faccessat(arg1, p, arg3, 0));
7215 unlock_user(p, arg2, 0);
7216 break;
7217 #endif
7218 #ifdef TARGET_NR_nice /* not on alpha */
7219 case TARGET_NR_nice:
7220 ret = get_errno(nice(arg1));
7221 break;
7222 #endif
7223 #ifdef TARGET_NR_ftime
7224 case TARGET_NR_ftime:
7225 goto unimplemented;
7226 #endif
7227 case TARGET_NR_sync:
7228 sync();
7229 ret = 0;
7230 break;
7231 case TARGET_NR_kill:
7232 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7233 break;
7234 #ifdef TARGET_NR_rename
7235 case TARGET_NR_rename:
7237 void *p2;
7238 p = lock_user_string(arg1);
7239 p2 = lock_user_string(arg2);
7240 if (!p || !p2)
7241 ret = -TARGET_EFAULT;
7242 else
7243 ret = get_errno(rename(p, p2));
7244 unlock_user(p2, arg2, 0);
7245 unlock_user(p, arg1, 0);
7247 break;
7248 #endif
7249 #if defined(TARGET_NR_renameat)
7250 case TARGET_NR_renameat:
7252 void *p2;
7253 p = lock_user_string(arg2);
7254 p2 = lock_user_string(arg4);
7255 if (!p || !p2)
7256 ret = -TARGET_EFAULT;
7257 else
7258 ret = get_errno(renameat(arg1, p, arg3, p2));
7259 unlock_user(p2, arg4, 0);
7260 unlock_user(p, arg2, 0);
7262 break;
7263 #endif
7264 #ifdef TARGET_NR_mkdir
7265 case TARGET_NR_mkdir:
7266 if (!(p = lock_user_string(arg1)))
7267 goto efault;
7268 ret = get_errno(mkdir(p, arg2));
7269 unlock_user(p, arg1, 0);
7270 break;
7271 #endif
7272 #if defined(TARGET_NR_mkdirat)
7273 case TARGET_NR_mkdirat:
7274 if (!(p = lock_user_string(arg2)))
7275 goto efault;
7276 ret = get_errno(mkdirat(arg1, p, arg3));
7277 unlock_user(p, arg2, 0);
7278 break;
7279 #endif
7280 #ifdef TARGET_NR_rmdir
7281 case TARGET_NR_rmdir:
7282 if (!(p = lock_user_string(arg1)))
7283 goto efault;
7284 ret = get_errno(rmdir(p));
7285 unlock_user(p, arg1, 0);
7286 break;
7287 #endif
7288 case TARGET_NR_dup:
7289 ret = get_errno(dup(arg1));
7290 if (ret >= 0) {
7291 fd_trans_dup(arg1, ret);
7293 break;
7294 #ifdef TARGET_NR_pipe
7295 case TARGET_NR_pipe:
7296 ret = do_pipe(cpu_env, arg1, 0, 0);
7297 break;
7298 #endif
7299 #ifdef TARGET_NR_pipe2
7300 case TARGET_NR_pipe2:
7301 ret = do_pipe(cpu_env, arg1,
7302 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7303 break;
7304 #endif
7305 case TARGET_NR_times:
7307 struct target_tms *tmsp;
7308 struct tms tms;
7309 ret = get_errno(times(&tms));
7310 if (arg1) {
7311 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7312 if (!tmsp)
7313 goto efault;
7314 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7315 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7316 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7317 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7319 if (!is_error(ret))
7320 ret = host_to_target_clock_t(ret);
7322 break;
7323 #ifdef TARGET_NR_prof
7324 case TARGET_NR_prof:
7325 goto unimplemented;
7326 #endif
7327 #ifdef TARGET_NR_signal
7328 case TARGET_NR_signal:
7329 goto unimplemented;
7330 #endif
7331 case TARGET_NR_acct:
7332 if (arg1 == 0) {
7333 ret = get_errno(acct(NULL));
7334 } else {
7335 if (!(p = lock_user_string(arg1)))
7336 goto efault;
7337 ret = get_errno(acct(path(p)));
7338 unlock_user(p, arg1, 0);
7340 break;
7341 #ifdef TARGET_NR_umount2
7342 case TARGET_NR_umount2:
7343 if (!(p = lock_user_string(arg1)))
7344 goto efault;
7345 ret = get_errno(umount2(p, arg2));
7346 unlock_user(p, arg1, 0);
7347 break;
7348 #endif
7349 #ifdef TARGET_NR_lock
7350 case TARGET_NR_lock:
7351 goto unimplemented;
7352 #endif
7353 case TARGET_NR_ioctl:
7354 ret = do_ioctl(arg1, arg2, arg3);
7355 break;
7356 case TARGET_NR_fcntl:
7357 ret = do_fcntl(arg1, arg2, arg3);
7358 break;
7359 #ifdef TARGET_NR_mpx
7360 case TARGET_NR_mpx:
7361 goto unimplemented;
7362 #endif
7363 case TARGET_NR_setpgid:
7364 ret = get_errno(setpgid(arg1, arg2));
7365 break;
7366 #ifdef TARGET_NR_ulimit
7367 case TARGET_NR_ulimit:
7368 goto unimplemented;
7369 #endif
7370 #ifdef TARGET_NR_oldolduname
7371 case TARGET_NR_oldolduname:
7372 goto unimplemented;
7373 #endif
7374 case TARGET_NR_umask:
7375 ret = get_errno(umask(arg1));
7376 break;
7377 case TARGET_NR_chroot:
7378 if (!(p = lock_user_string(arg1)))
7379 goto efault;
7380 ret = get_errno(chroot(p));
7381 unlock_user(p, arg1, 0);
7382 break;
7383 #ifdef TARGET_NR_ustat
7384 case TARGET_NR_ustat:
7385 goto unimplemented;
7386 #endif
7387 #ifdef TARGET_NR_dup2
7388 case TARGET_NR_dup2:
7389 ret = get_errno(dup2(arg1, arg2));
7390 if (ret >= 0) {
7391 fd_trans_dup(arg1, arg2);
7393 break;
7394 #endif
7395 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7396 case TARGET_NR_dup3:
7397 ret = get_errno(dup3(arg1, arg2, arg3));
7398 if (ret >= 0) {
7399 fd_trans_dup(arg1, arg2);
7401 break;
7402 #endif
7403 #ifdef TARGET_NR_getppid /* not on alpha */
7404 case TARGET_NR_getppid:
7405 ret = get_errno(getppid());
7406 break;
7407 #endif
7408 #ifdef TARGET_NR_getpgrp
7409 case TARGET_NR_getpgrp:
7410 ret = get_errno(getpgrp());
7411 break;
7412 #endif
7413 case TARGET_NR_setsid:
7414 ret = get_errno(setsid());
7415 break;
7416 #ifdef TARGET_NR_sigaction
7417 case TARGET_NR_sigaction:
7419 #if defined(TARGET_ALPHA)
7420 struct target_sigaction act, oact, *pact = 0;
7421 struct target_old_sigaction *old_act;
7422 if (arg2) {
7423 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7424 goto efault;
7425 act._sa_handler = old_act->_sa_handler;
7426 target_siginitset(&act.sa_mask, old_act->sa_mask);
7427 act.sa_flags = old_act->sa_flags;
7428 act.sa_restorer = 0;
7429 unlock_user_struct(old_act, arg2, 0);
7430 pact = &act;
7432 ret = get_errno(do_sigaction(arg1, pact, &oact));
7433 if (!is_error(ret) && arg3) {
7434 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7435 goto efault;
7436 old_act->_sa_handler = oact._sa_handler;
7437 old_act->sa_mask = oact.sa_mask.sig[0];
7438 old_act->sa_flags = oact.sa_flags;
7439 unlock_user_struct(old_act, arg3, 1);
7441 #elif defined(TARGET_MIPS)
7442 struct target_sigaction act, oact, *pact, *old_act;
7444 if (arg2) {
7445 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7446 goto efault;
7447 act._sa_handler = old_act->_sa_handler;
7448 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7449 act.sa_flags = old_act->sa_flags;
7450 unlock_user_struct(old_act, arg2, 0);
7451 pact = &act;
7452 } else {
7453 pact = NULL;
7456 ret = get_errno(do_sigaction(arg1, pact, &oact));
7458 if (!is_error(ret) && arg3) {
7459 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7460 goto efault;
7461 old_act->_sa_handler = oact._sa_handler;
7462 old_act->sa_flags = oact.sa_flags;
7463 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7464 old_act->sa_mask.sig[1] = 0;
7465 old_act->sa_mask.sig[2] = 0;
7466 old_act->sa_mask.sig[3] = 0;
7467 unlock_user_struct(old_act, arg3, 1);
7469 #else
7470 struct target_old_sigaction *old_act;
7471 struct target_sigaction act, oact, *pact;
7472 if (arg2) {
7473 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7474 goto efault;
7475 act._sa_handler = old_act->_sa_handler;
7476 target_siginitset(&act.sa_mask, old_act->sa_mask);
7477 act.sa_flags = old_act->sa_flags;
7478 act.sa_restorer = old_act->sa_restorer;
7479 unlock_user_struct(old_act, arg2, 0);
7480 pact = &act;
7481 } else {
7482 pact = NULL;
7484 ret = get_errno(do_sigaction(arg1, pact, &oact));
7485 if (!is_error(ret) && arg3) {
7486 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7487 goto efault;
7488 old_act->_sa_handler = oact._sa_handler;
7489 old_act->sa_mask = oact.sa_mask.sig[0];
7490 old_act->sa_flags = oact.sa_flags;
7491 old_act->sa_restorer = oact.sa_restorer;
7492 unlock_user_struct(old_act, arg3, 1);
7494 #endif
7496 break;
7497 #endif
7498 case TARGET_NR_rt_sigaction:
7500 #if defined(TARGET_ALPHA)
7501 struct target_sigaction act, oact, *pact = 0;
7502 struct target_rt_sigaction *rt_act;
7503 /* ??? arg4 == sizeof(sigset_t). */
7504 if (arg2) {
7505 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7506 goto efault;
7507 act._sa_handler = rt_act->_sa_handler;
7508 act.sa_mask = rt_act->sa_mask;
7509 act.sa_flags = rt_act->sa_flags;
7510 act.sa_restorer = arg5;
7511 unlock_user_struct(rt_act, arg2, 0);
7512 pact = &act;
7514 ret = get_errno(do_sigaction(arg1, pact, &oact));
7515 if (!is_error(ret) && arg3) {
7516 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7517 goto efault;
7518 rt_act->_sa_handler = oact._sa_handler;
7519 rt_act->sa_mask = oact.sa_mask;
7520 rt_act->sa_flags = oact.sa_flags;
7521 unlock_user_struct(rt_act, arg3, 1);
7523 #else
7524 struct target_sigaction *act;
7525 struct target_sigaction *oact;
7527 if (arg2) {
7528 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7529 goto efault;
7530 } else
7531 act = NULL;
7532 if (arg3) {
7533 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7534 ret = -TARGET_EFAULT;
7535 goto rt_sigaction_fail;
7537 } else
7538 oact = NULL;
7539 ret = get_errno(do_sigaction(arg1, act, oact));
7540 rt_sigaction_fail:
7541 if (act)
7542 unlock_user_struct(act, arg2, 0);
7543 if (oact)
7544 unlock_user_struct(oact, arg3, 1);
7545 #endif
7547 break;
7548 #ifdef TARGET_NR_sgetmask /* not on alpha */
7549 case TARGET_NR_sgetmask:
7551 sigset_t cur_set;
7552 abi_ulong target_set;
7553 ret = do_sigprocmask(0, NULL, &cur_set);
7554 if (!ret) {
7555 host_to_target_old_sigset(&target_set, &cur_set);
7556 ret = target_set;
7559 break;
7560 #endif
7561 #ifdef TARGET_NR_ssetmask /* not on alpha */
7562 case TARGET_NR_ssetmask:
7564 sigset_t set, oset, cur_set;
7565 abi_ulong target_set = arg1;
7566 /* We only have one word of the new mask so we must read
7567 * the rest of it with do_sigprocmask() and OR in this word.
7568 * We are guaranteed that a do_sigprocmask() that only queries
7569 * the signal mask will not fail.
7571 ret = do_sigprocmask(0, NULL, &cur_set);
7572 assert(!ret);
7573 target_to_host_old_sigset(&set, &target_set);
7574 sigorset(&set, &set, &cur_set);
7575 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7576 if (!ret) {
7577 host_to_target_old_sigset(&target_set, &oset);
7578 ret = target_set;
7581 break;
7582 #endif
7583 #ifdef TARGET_NR_sigprocmask
7584 case TARGET_NR_sigprocmask:
7586 #if defined(TARGET_ALPHA)
7587 sigset_t set, oldset;
7588 abi_ulong mask;
7589 int how;
7591 switch (arg1) {
7592 case TARGET_SIG_BLOCK:
7593 how = SIG_BLOCK;
7594 break;
7595 case TARGET_SIG_UNBLOCK:
7596 how = SIG_UNBLOCK;
7597 break;
7598 case TARGET_SIG_SETMASK:
7599 how = SIG_SETMASK;
7600 break;
7601 default:
7602 ret = -TARGET_EINVAL;
7603 goto fail;
7605 mask = arg2;
7606 target_to_host_old_sigset(&set, &mask);
7608 ret = do_sigprocmask(how, &set, &oldset);
7609 if (!is_error(ret)) {
7610 host_to_target_old_sigset(&mask, &oldset);
7611 ret = mask;
7612 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7614 #else
7615 sigset_t set, oldset, *set_ptr;
7616 int how;
7618 if (arg2) {
7619 switch (arg1) {
7620 case TARGET_SIG_BLOCK:
7621 how = SIG_BLOCK;
7622 break;
7623 case TARGET_SIG_UNBLOCK:
7624 how = SIG_UNBLOCK;
7625 break;
7626 case TARGET_SIG_SETMASK:
7627 how = SIG_SETMASK;
7628 break;
7629 default:
7630 ret = -TARGET_EINVAL;
7631 goto fail;
7633 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7634 goto efault;
7635 target_to_host_old_sigset(&set, p);
7636 unlock_user(p, arg2, 0);
7637 set_ptr = &set;
7638 } else {
7639 how = 0;
7640 set_ptr = NULL;
7642 ret = do_sigprocmask(how, set_ptr, &oldset);
7643 if (!is_error(ret) && arg3) {
7644 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7645 goto efault;
7646 host_to_target_old_sigset(p, &oldset);
7647 unlock_user(p, arg3, sizeof(target_sigset_t));
7649 #endif
7651 break;
7652 #endif
7653 case TARGET_NR_rt_sigprocmask:
7655 int how = arg1;
7656 sigset_t set, oldset, *set_ptr;
7658 if (arg2) {
7659 switch(how) {
7660 case TARGET_SIG_BLOCK:
7661 how = SIG_BLOCK;
7662 break;
7663 case TARGET_SIG_UNBLOCK:
7664 how = SIG_UNBLOCK;
7665 break;
7666 case TARGET_SIG_SETMASK:
7667 how = SIG_SETMASK;
7668 break;
7669 default:
7670 ret = -TARGET_EINVAL;
7671 goto fail;
7673 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7674 goto efault;
7675 target_to_host_sigset(&set, p);
7676 unlock_user(p, arg2, 0);
7677 set_ptr = &set;
7678 } else {
7679 how = 0;
7680 set_ptr = NULL;
7682 ret = do_sigprocmask(how, set_ptr, &oldset);
7683 if (!is_error(ret) && arg3) {
7684 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7685 goto efault;
7686 host_to_target_sigset(p, &oldset);
7687 unlock_user(p, arg3, sizeof(target_sigset_t));
7690 break;
7691 #ifdef TARGET_NR_sigpending
7692 case TARGET_NR_sigpending:
7694 sigset_t set;
7695 ret = get_errno(sigpending(&set));
7696 if (!is_error(ret)) {
7697 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7698 goto efault;
7699 host_to_target_old_sigset(p, &set);
7700 unlock_user(p, arg1, sizeof(target_sigset_t));
7703 break;
7704 #endif
7705 case TARGET_NR_rt_sigpending:
7707 sigset_t set;
7708 ret = get_errno(sigpending(&set));
7709 if (!is_error(ret)) {
7710 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7711 goto efault;
7712 host_to_target_sigset(p, &set);
7713 unlock_user(p, arg1, sizeof(target_sigset_t));
7716 break;
7717 #ifdef TARGET_NR_sigsuspend
7718 case TARGET_NR_sigsuspend:
7720 TaskState *ts = cpu->opaque;
7721 #if defined(TARGET_ALPHA)
7722 abi_ulong mask = arg1;
7723 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7724 #else
7725 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7726 goto efault;
7727 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7728 unlock_user(p, arg1, 0);
7729 #endif
7730 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7731 SIGSET_T_SIZE));
7732 if (ret != -TARGET_ERESTARTSYS) {
7733 ts->in_sigsuspend = 1;
7736 break;
7737 #endif
7738 case TARGET_NR_rt_sigsuspend:
7740 TaskState *ts = cpu->opaque;
7741 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7742 goto efault;
7743 target_to_host_sigset(&ts->sigsuspend_mask, p);
7744 unlock_user(p, arg1, 0);
7745 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7746 SIGSET_T_SIZE));
7747 if (ret != -TARGET_ERESTARTSYS) {
7748 ts->in_sigsuspend = 1;
7751 break;
7752 case TARGET_NR_rt_sigtimedwait:
7754 sigset_t set;
7755 struct timespec uts, *puts;
7756 siginfo_t uinfo;
7758 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7759 goto efault;
7760 target_to_host_sigset(&set, p);
7761 unlock_user(p, arg1, 0);
7762 if (arg3) {
7763 puts = &uts;
7764 target_to_host_timespec(puts, arg3);
7765 } else {
7766 puts = NULL;
7768 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7769 SIGSET_T_SIZE));
7770 if (!is_error(ret)) {
7771 if (arg2) {
7772 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7774 if (!p) {
7775 goto efault;
7777 host_to_target_siginfo(p, &uinfo);
7778 unlock_user(p, arg2, sizeof(target_siginfo_t));
7780 ret = host_to_target_signal(ret);
7783 break;
7784 case TARGET_NR_rt_sigqueueinfo:
7786 siginfo_t uinfo;
7787 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7788 goto efault;
7789 target_to_host_siginfo(&uinfo, p);
7790 unlock_user(p, arg1, 0);
7791 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7793 break;
7794 #ifdef TARGET_NR_sigreturn
7795 case TARGET_NR_sigreturn:
7796 if (block_signals()) {
7797 ret = -TARGET_ERESTARTSYS;
7798 } else {
7799 ret = do_sigreturn(cpu_env);
7801 break;
7802 #endif
7803 case TARGET_NR_rt_sigreturn:
7804 if (block_signals()) {
7805 ret = -TARGET_ERESTARTSYS;
7806 } else {
7807 ret = do_rt_sigreturn(cpu_env);
7809 break;
7810 case TARGET_NR_sethostname:
7811 if (!(p = lock_user_string(arg1)))
7812 goto efault;
7813 ret = get_errno(sethostname(p, arg2));
7814 unlock_user(p, arg1, 0);
7815 break;
7816 case TARGET_NR_setrlimit:
7818 int resource = target_to_host_resource(arg1);
7819 struct target_rlimit *target_rlim;
7820 struct rlimit rlim;
7821 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7822 goto efault;
7823 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7824 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7825 unlock_user_struct(target_rlim, arg2, 0);
7826 ret = get_errno(setrlimit(resource, &rlim));
7828 break;
7829 case TARGET_NR_getrlimit:
7831 int resource = target_to_host_resource(arg1);
7832 struct target_rlimit *target_rlim;
7833 struct rlimit rlim;
7835 ret = get_errno(getrlimit(resource, &rlim));
7836 if (!is_error(ret)) {
7837 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7838 goto efault;
7839 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7840 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7841 unlock_user_struct(target_rlim, arg2, 1);
7844 break;
7845 case TARGET_NR_getrusage:
7847 struct rusage rusage;
7848 ret = get_errno(getrusage(arg1, &rusage));
7849 if (!is_error(ret)) {
7850 ret = host_to_target_rusage(arg2, &rusage);
7853 break;
7854 case TARGET_NR_gettimeofday:
7856 struct timeval tv;
7857 ret = get_errno(gettimeofday(&tv, NULL));
7858 if (!is_error(ret)) {
7859 if (copy_to_user_timeval(arg1, &tv))
7860 goto efault;
7863 break;
7864 case TARGET_NR_settimeofday:
7866 struct timeval tv, *ptv = NULL;
7867 struct timezone tz, *ptz = NULL;
7869 if (arg1) {
7870 if (copy_from_user_timeval(&tv, arg1)) {
7871 goto efault;
7873 ptv = &tv;
7876 if (arg2) {
7877 if (copy_from_user_timezone(&tz, arg2)) {
7878 goto efault;
7880 ptz = &tz;
7883 ret = get_errno(settimeofday(ptv, ptz));
7885 break;
7886 #if defined(TARGET_NR_select)
7887 case TARGET_NR_select:
7888 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7889 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7890 #else
7892 struct target_sel_arg_struct *sel;
7893 abi_ulong inp, outp, exp, tvp;
7894 long nsel;
7896 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7897 goto efault;
7898 nsel = tswapal(sel->n);
7899 inp = tswapal(sel->inp);
7900 outp = tswapal(sel->outp);
7901 exp = tswapal(sel->exp);
7902 tvp = tswapal(sel->tvp);
7903 unlock_user_struct(sel, arg1, 0);
7904 ret = do_select(nsel, inp, outp, exp, tvp);
7906 #endif
7907 break;
7908 #endif
7909 #ifdef TARGET_NR_pselect6
7910 case TARGET_NR_pselect6:
7912 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7913 fd_set rfds, wfds, efds;
7914 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7915 struct timespec ts, *ts_ptr;
7918 * The 6th arg is actually two args smashed together,
7919 * so we cannot use the C library.
7921 sigset_t set;
7922 struct {
7923 sigset_t *set;
7924 size_t size;
7925 } sig, *sig_ptr;
7927 abi_ulong arg_sigset, arg_sigsize, *arg7;
7928 target_sigset_t *target_sigset;
7930 n = arg1;
7931 rfd_addr = arg2;
7932 wfd_addr = arg3;
7933 efd_addr = arg4;
7934 ts_addr = arg5;
7936 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7937 if (ret) {
7938 goto fail;
7940 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7941 if (ret) {
7942 goto fail;
7944 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7945 if (ret) {
7946 goto fail;
7950 * This takes a timespec, and not a timeval, so we cannot
7951 * use the do_select() helper ...
7953 if (ts_addr) {
7954 if (target_to_host_timespec(&ts, ts_addr)) {
7955 goto efault;
7957 ts_ptr = &ts;
7958 } else {
7959 ts_ptr = NULL;
7962 /* Extract the two packed args for the sigset */
7963 if (arg6) {
7964 sig_ptr = &sig;
7965 sig.size = SIGSET_T_SIZE;
7967 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7968 if (!arg7) {
7969 goto efault;
7971 arg_sigset = tswapal(arg7[0]);
7972 arg_sigsize = tswapal(arg7[1]);
7973 unlock_user(arg7, arg6, 0);
7975 if (arg_sigset) {
7976 sig.set = &set;
7977 if (arg_sigsize != sizeof(*target_sigset)) {
7978 /* Like the kernel, we enforce correct size sigsets */
7979 ret = -TARGET_EINVAL;
7980 goto fail;
7982 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7983 sizeof(*target_sigset), 1);
7984 if (!target_sigset) {
7985 goto efault;
7987 target_to_host_sigset(&set, target_sigset);
7988 unlock_user(target_sigset, arg_sigset, 0);
7989 } else {
7990 sig.set = NULL;
7992 } else {
7993 sig_ptr = NULL;
7996 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7997 ts_ptr, sig_ptr));
7999 if (!is_error(ret)) {
8000 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8001 goto efault;
8002 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8003 goto efault;
8004 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8005 goto efault;
8007 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8008 goto efault;
8011 break;
8012 #endif
8013 #ifdef TARGET_NR_symlink
8014 case TARGET_NR_symlink:
8016 void *p2;
8017 p = lock_user_string(arg1);
8018 p2 = lock_user_string(arg2);
8019 if (!p || !p2)
8020 ret = -TARGET_EFAULT;
8021 else
8022 ret = get_errno(symlink(p, p2));
8023 unlock_user(p2, arg2, 0);
8024 unlock_user(p, arg1, 0);
8026 break;
8027 #endif
8028 #if defined(TARGET_NR_symlinkat)
8029 case TARGET_NR_symlinkat:
8031 void *p2;
8032 p = lock_user_string(arg1);
8033 p2 = lock_user_string(arg3);
8034 if (!p || !p2)
8035 ret = -TARGET_EFAULT;
8036 else
8037 ret = get_errno(symlinkat(p, arg2, p2));
8038 unlock_user(p2, arg3, 0);
8039 unlock_user(p, arg1, 0);
8041 break;
8042 #endif
8043 #ifdef TARGET_NR_oldlstat
8044 case TARGET_NR_oldlstat:
8045 goto unimplemented;
8046 #endif
8047 #ifdef TARGET_NR_readlink
8048 case TARGET_NR_readlink:
8050 void *p2;
8051 p = lock_user_string(arg1);
8052 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8053 if (!p || !p2) {
8054 ret = -TARGET_EFAULT;
8055 } else if (!arg3) {
8056 /* Short circuit this for the magic exe check. */
8057 ret = -TARGET_EINVAL;
8058 } else if (is_proc_myself((const char *)p, "exe")) {
8059 char real[PATH_MAX], *temp;
8060 temp = realpath(exec_path, real);
8061 /* Return value is # of bytes that we wrote to the buffer. */
8062 if (temp == NULL) {
8063 ret = get_errno(-1);
8064 } else {
8065 /* Don't worry about sign mismatch as earlier mapping
8066 * logic would have thrown a bad address error. */
8067 ret = MIN(strlen(real), arg3);
8068 /* We cannot NUL terminate the string. */
8069 memcpy(p2, real, ret);
8071 } else {
8072 ret = get_errno(readlink(path(p), p2, arg3));
8074 unlock_user(p2, arg2, ret);
8075 unlock_user(p, arg1, 0);
8077 break;
8078 #endif
8079 #if defined(TARGET_NR_readlinkat)
8080 case TARGET_NR_readlinkat:
8082 void *p2;
8083 p = lock_user_string(arg2);
8084 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8085 if (!p || !p2) {
8086 ret = -TARGET_EFAULT;
8087 } else if (is_proc_myself((const char *)p, "exe")) {
8088 char real[PATH_MAX], *temp;
8089 temp = realpath(exec_path, real);
8090 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8091 snprintf((char *)p2, arg4, "%s", real);
8092 } else {
8093 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8095 unlock_user(p2, arg3, ret);
8096 unlock_user(p, arg2, 0);
8098 break;
8099 #endif
8100 #ifdef TARGET_NR_uselib
8101 case TARGET_NR_uselib:
8102 goto unimplemented;
8103 #endif
8104 #ifdef TARGET_NR_swapon
8105 case TARGET_NR_swapon:
8106 if (!(p = lock_user_string(arg1)))
8107 goto efault;
8108 ret = get_errno(swapon(p, arg2));
8109 unlock_user(p, arg1, 0);
8110 break;
8111 #endif
8112 case TARGET_NR_reboot:
8113 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8114 /* arg4 must be ignored in all other cases */
8115 p = lock_user_string(arg4);
8116 if (!p) {
8117 goto efault;
8119 ret = get_errno(reboot(arg1, arg2, arg3, p));
8120 unlock_user(p, arg4, 0);
8121 } else {
8122 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8124 break;
8125 #ifdef TARGET_NR_readdir
8126 case TARGET_NR_readdir:
8127 goto unimplemented;
8128 #endif
8129 #ifdef TARGET_NR_mmap
8130 case TARGET_NR_mmap:
8131 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8132 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8133 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8134 || defined(TARGET_S390X)
8136 abi_ulong *v;
8137 abi_ulong v1, v2, v3, v4, v5, v6;
8138 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8139 goto efault;
8140 v1 = tswapal(v[0]);
8141 v2 = tswapal(v[1]);
8142 v3 = tswapal(v[2]);
8143 v4 = tswapal(v[3]);
8144 v5 = tswapal(v[4]);
8145 v6 = tswapal(v[5]);
8146 unlock_user(v, arg1, 0);
8147 ret = get_errno(target_mmap(v1, v2, v3,
8148 target_to_host_bitmask(v4, mmap_flags_tbl),
8149 v5, v6));
8151 #else
8152 ret = get_errno(target_mmap(arg1, arg2, arg3,
8153 target_to_host_bitmask(arg4, mmap_flags_tbl),
8154 arg5,
8155 arg6));
8156 #endif
8157 break;
8158 #endif
8159 #ifdef TARGET_NR_mmap2
8160 case TARGET_NR_mmap2:
8161 #ifndef MMAP_SHIFT
8162 #define MMAP_SHIFT 12
8163 #endif
8164 ret = get_errno(target_mmap(arg1, arg2, arg3,
8165 target_to_host_bitmask(arg4, mmap_flags_tbl),
8166 arg5,
8167 arg6 << MMAP_SHIFT));
8168 break;
8169 #endif
8170 case TARGET_NR_munmap:
8171 ret = get_errno(target_munmap(arg1, arg2));
8172 break;
8173 case TARGET_NR_mprotect:
8175 TaskState *ts = cpu->opaque;
8176 /* Special hack to detect libc making the stack executable. */
8177 if ((arg3 & PROT_GROWSDOWN)
8178 && arg1 >= ts->info->stack_limit
8179 && arg1 <= ts->info->start_stack) {
8180 arg3 &= ~PROT_GROWSDOWN;
8181 arg2 = arg2 + arg1 - ts->info->stack_limit;
8182 arg1 = ts->info->stack_limit;
8185 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8186 break;
8187 #ifdef TARGET_NR_mremap
8188 case TARGET_NR_mremap:
8189 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8190 break;
8191 #endif
8192 /* ??? msync/mlock/munlock are broken for softmmu. */
8193 #ifdef TARGET_NR_msync
8194 case TARGET_NR_msync:
8195 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8196 break;
8197 #endif
8198 #ifdef TARGET_NR_mlock
8199 case TARGET_NR_mlock:
8200 ret = get_errno(mlock(g2h(arg1), arg2));
8201 break;
8202 #endif
8203 #ifdef TARGET_NR_munlock
8204 case TARGET_NR_munlock:
8205 ret = get_errno(munlock(g2h(arg1), arg2));
8206 break;
8207 #endif
8208 #ifdef TARGET_NR_mlockall
8209 case TARGET_NR_mlockall:
8210 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8211 break;
8212 #endif
8213 #ifdef TARGET_NR_munlockall
8214 case TARGET_NR_munlockall:
8215 ret = get_errno(munlockall());
8216 break;
8217 #endif
8218 case TARGET_NR_truncate:
8219 if (!(p = lock_user_string(arg1)))
8220 goto efault;
8221 ret = get_errno(truncate(p, arg2));
8222 unlock_user(p, arg1, 0);
8223 break;
8224 case TARGET_NR_ftruncate:
8225 ret = get_errno(ftruncate(arg1, arg2));
8226 break;
8227 case TARGET_NR_fchmod:
8228 ret = get_errno(fchmod(arg1, arg2));
8229 break;
8230 #if defined(TARGET_NR_fchmodat)
8231 case TARGET_NR_fchmodat:
8232 if (!(p = lock_user_string(arg2)))
8233 goto efault;
8234 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8235 unlock_user(p, arg2, 0);
8236 break;
8237 #endif
8238 case TARGET_NR_getpriority:
8239 /* Note that negative values are valid for getpriority, so we must
8240 differentiate based on errno settings. */
8241 errno = 0;
8242 ret = getpriority(arg1, arg2);
8243 if (ret == -1 && errno != 0) {
8244 ret = -host_to_target_errno(errno);
8245 break;
8247 #ifdef TARGET_ALPHA
8248 /* Return value is the unbiased priority. Signal no error. */
8249 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8250 #else
8251 /* Return value is a biased priority to avoid negative numbers. */
8252 ret = 20 - ret;
8253 #endif
8254 break;
8255 case TARGET_NR_setpriority:
8256 ret = get_errno(setpriority(arg1, arg2, arg3));
8257 break;
8258 #ifdef TARGET_NR_profil
8259 case TARGET_NR_profil:
8260 goto unimplemented;
8261 #endif
8262 case TARGET_NR_statfs:
8263 if (!(p = lock_user_string(arg1)))
8264 goto efault;
8265 ret = get_errno(statfs(path(p), &stfs));
8266 unlock_user(p, arg1, 0);
8267 convert_statfs:
8268 if (!is_error(ret)) {
8269 struct target_statfs *target_stfs;
8271 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8272 goto efault;
8273 __put_user(stfs.f_type, &target_stfs->f_type);
8274 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8275 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8276 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8277 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8278 __put_user(stfs.f_files, &target_stfs->f_files);
8279 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8280 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8281 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8282 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8283 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8284 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8285 unlock_user_struct(target_stfs, arg2, 1);
8287 break;
8288 case TARGET_NR_fstatfs:
8289 ret = get_errno(fstatfs(arg1, &stfs));
8290 goto convert_statfs;
8291 #ifdef TARGET_NR_statfs64
8292 case TARGET_NR_statfs64:
8293 if (!(p = lock_user_string(arg1)))
8294 goto efault;
8295 ret = get_errno(statfs(path(p), &stfs));
8296 unlock_user(p, arg1, 0);
8297 convert_statfs64:
8298 if (!is_error(ret)) {
8299 struct target_statfs64 *target_stfs;
8301 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8302 goto efault;
8303 __put_user(stfs.f_type, &target_stfs->f_type);
8304 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8305 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8306 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8307 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8308 __put_user(stfs.f_files, &target_stfs->f_files);
8309 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8310 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8311 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8312 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8313 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8314 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8315 unlock_user_struct(target_stfs, arg3, 1);
8317 break;
8318 case TARGET_NR_fstatfs64:
8319 ret = get_errno(fstatfs(arg1, &stfs));
8320 goto convert_statfs64;
8321 #endif
8322 #ifdef TARGET_NR_ioperm
8323 case TARGET_NR_ioperm:
8324 goto unimplemented;
8325 #endif
8326 #ifdef TARGET_NR_socketcall
8327 case TARGET_NR_socketcall:
8328 ret = do_socketcall(arg1, arg2);
8329 break;
8330 #endif
8331 #ifdef TARGET_NR_accept
8332 case TARGET_NR_accept:
8333 ret = do_accept4(arg1, arg2, arg3, 0);
8334 break;
8335 #endif
8336 #ifdef TARGET_NR_accept4
8337 case TARGET_NR_accept4:
8338 ret = do_accept4(arg1, arg2, arg3, arg4);
8339 break;
8340 #endif
8341 #ifdef TARGET_NR_bind
8342 case TARGET_NR_bind:
8343 ret = do_bind(arg1, arg2, arg3);
8344 break;
8345 #endif
8346 #ifdef TARGET_NR_connect
8347 case TARGET_NR_connect:
8348 ret = do_connect(arg1, arg2, arg3);
8349 break;
8350 #endif
8351 #ifdef TARGET_NR_getpeername
8352 case TARGET_NR_getpeername:
8353 ret = do_getpeername(arg1, arg2, arg3);
8354 break;
8355 #endif
8356 #ifdef TARGET_NR_getsockname
8357 case TARGET_NR_getsockname:
8358 ret = do_getsockname(arg1, arg2, arg3);
8359 break;
8360 #endif
8361 #ifdef TARGET_NR_getsockopt
8362 case TARGET_NR_getsockopt:
8363 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8364 break;
8365 #endif
8366 #ifdef TARGET_NR_listen
8367 case TARGET_NR_listen:
8368 ret = get_errno(listen(arg1, arg2));
8369 break;
8370 #endif
8371 #ifdef TARGET_NR_recv
8372 case TARGET_NR_recv:
8373 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8374 break;
8375 #endif
8376 #ifdef TARGET_NR_recvfrom
8377 case TARGET_NR_recvfrom:
8378 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8379 break;
8380 #endif
8381 #ifdef TARGET_NR_recvmsg
8382 case TARGET_NR_recvmsg:
8383 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8384 break;
8385 #endif
8386 #ifdef TARGET_NR_send
8387 case TARGET_NR_send:
8388 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8389 break;
8390 #endif
8391 #ifdef TARGET_NR_sendmsg
8392 case TARGET_NR_sendmsg:
8393 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8394 break;
8395 #endif
8396 #ifdef TARGET_NR_sendmmsg
8397 case TARGET_NR_sendmmsg:
8398 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8399 break;
8400 case TARGET_NR_recvmmsg:
8401 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8402 break;
8403 #endif
8404 #ifdef TARGET_NR_sendto
8405 case TARGET_NR_sendto:
8406 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8407 break;
8408 #endif
8409 #ifdef TARGET_NR_shutdown
8410 case TARGET_NR_shutdown:
8411 ret = get_errno(shutdown(arg1, arg2));
8412 break;
8413 #endif
8414 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8415 case TARGET_NR_getrandom:
8416 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8417 if (!p) {
8418 goto efault;
8420 ret = get_errno(getrandom(p, arg2, arg3));
8421 unlock_user(p, arg1, ret);
8422 break;
8423 #endif
8424 #ifdef TARGET_NR_socket
8425 case TARGET_NR_socket:
8426 ret = do_socket(arg1, arg2, arg3);
8427 fd_trans_unregister(ret);
8428 break;
8429 #endif
8430 #ifdef TARGET_NR_socketpair
8431 case TARGET_NR_socketpair:
8432 ret = do_socketpair(arg1, arg2, arg3, arg4);
8433 break;
8434 #endif
8435 #ifdef TARGET_NR_setsockopt
8436 case TARGET_NR_setsockopt:
8437 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8438 break;
8439 #endif
8441 case TARGET_NR_syslog:
8442 if (!(p = lock_user_string(arg2)))
8443 goto efault;
8444 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8445 unlock_user(p, arg2, 0);
8446 break;
8448 case TARGET_NR_setitimer:
8450 struct itimerval value, ovalue, *pvalue;
8452 if (arg2) {
8453 pvalue = &value;
8454 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8455 || copy_from_user_timeval(&pvalue->it_value,
8456 arg2 + sizeof(struct target_timeval)))
8457 goto efault;
8458 } else {
8459 pvalue = NULL;
8461 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8462 if (!is_error(ret) && arg3) {
8463 if (copy_to_user_timeval(arg3,
8464 &ovalue.it_interval)
8465 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8466 &ovalue.it_value))
8467 goto efault;
8470 break;
8471 case TARGET_NR_getitimer:
8473 struct itimerval value;
8475 ret = get_errno(getitimer(arg1, &value));
8476 if (!is_error(ret) && arg2) {
8477 if (copy_to_user_timeval(arg2,
8478 &value.it_interval)
8479 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8480 &value.it_value))
8481 goto efault;
8484 break;
8485 #ifdef TARGET_NR_stat
8486 case TARGET_NR_stat:
8487 if (!(p = lock_user_string(arg1)))
8488 goto efault;
8489 ret = get_errno(stat(path(p), &st));
8490 unlock_user(p, arg1, 0);
8491 goto do_stat;
8492 #endif
8493 #ifdef TARGET_NR_lstat
8494 case TARGET_NR_lstat:
8495 if (!(p = lock_user_string(arg1)))
8496 goto efault;
8497 ret = get_errno(lstat(path(p), &st));
8498 unlock_user(p, arg1, 0);
8499 goto do_stat;
8500 #endif
8501 case TARGET_NR_fstat:
8503 ret = get_errno(fstat(arg1, &st));
8504 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8505 do_stat:
8506 #endif
8507 if (!is_error(ret)) {
8508 struct target_stat *target_st;
8510 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8511 goto efault;
8512 memset(target_st, 0, sizeof(*target_st));
8513 __put_user(st.st_dev, &target_st->st_dev);
8514 __put_user(st.st_ino, &target_st->st_ino);
8515 __put_user(st.st_mode, &target_st->st_mode);
8516 __put_user(st.st_uid, &target_st->st_uid);
8517 __put_user(st.st_gid, &target_st->st_gid);
8518 __put_user(st.st_nlink, &target_st->st_nlink);
8519 __put_user(st.st_rdev, &target_st->st_rdev);
8520 __put_user(st.st_size, &target_st->st_size);
8521 __put_user(st.st_blksize, &target_st->st_blksize);
8522 __put_user(st.st_blocks, &target_st->st_blocks);
8523 __put_user(st.st_atime, &target_st->target_st_atime);
8524 __put_user(st.st_mtime, &target_st->target_st_mtime);
8525 __put_user(st.st_ctime, &target_st->target_st_ctime);
8526 unlock_user_struct(target_st, arg2, 1);
8529 break;
8530 #ifdef TARGET_NR_olduname
8531 case TARGET_NR_olduname:
8532 goto unimplemented;
8533 #endif
8534 #ifdef TARGET_NR_iopl
8535 case TARGET_NR_iopl:
8536 goto unimplemented;
8537 #endif
8538 case TARGET_NR_vhangup:
8539 ret = get_errno(vhangup());
8540 break;
8541 #ifdef TARGET_NR_idle
8542 case TARGET_NR_idle:
8543 goto unimplemented;
8544 #endif
8545 #ifdef TARGET_NR_syscall
8546 case TARGET_NR_syscall:
8547 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8548 arg6, arg7, arg8, 0);
8549 break;
8550 #endif
8551 case TARGET_NR_wait4:
8553 int status;
8554 abi_long status_ptr = arg2;
8555 struct rusage rusage, *rusage_ptr;
8556 abi_ulong target_rusage = arg4;
8557 abi_long rusage_err;
8558 if (target_rusage)
8559 rusage_ptr = &rusage;
8560 else
8561 rusage_ptr = NULL;
8562 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8563 if (!is_error(ret)) {
8564 if (status_ptr && ret) {
8565 status = host_to_target_waitstatus(status);
8566 if (put_user_s32(status, status_ptr))
8567 goto efault;
8569 if (target_rusage) {
8570 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8571 if (rusage_err) {
8572 ret = rusage_err;
8577 break;
8578 #ifdef TARGET_NR_swapoff
8579 case TARGET_NR_swapoff:
8580 if (!(p = lock_user_string(arg1)))
8581 goto efault;
8582 ret = get_errno(swapoff(p));
8583 unlock_user(p, arg1, 0);
8584 break;
8585 #endif
8586 case TARGET_NR_sysinfo:
8588 struct target_sysinfo *target_value;
8589 struct sysinfo value;
8590 ret = get_errno(sysinfo(&value));
8591 if (!is_error(ret) && arg1)
8593 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8594 goto efault;
8595 __put_user(value.uptime, &target_value->uptime);
8596 __put_user(value.loads[0], &target_value->loads[0]);
8597 __put_user(value.loads[1], &target_value->loads[1]);
8598 __put_user(value.loads[2], &target_value->loads[2]);
8599 __put_user(value.totalram, &target_value->totalram);
8600 __put_user(value.freeram, &target_value->freeram);
8601 __put_user(value.sharedram, &target_value->sharedram);
8602 __put_user(value.bufferram, &target_value->bufferram);
8603 __put_user(value.totalswap, &target_value->totalswap);
8604 __put_user(value.freeswap, &target_value->freeswap);
8605 __put_user(value.procs, &target_value->procs);
8606 __put_user(value.totalhigh, &target_value->totalhigh);
8607 __put_user(value.freehigh, &target_value->freehigh);
8608 __put_user(value.mem_unit, &target_value->mem_unit);
8609 unlock_user_struct(target_value, arg1, 1);
8612 break;
8613 #ifdef TARGET_NR_ipc
8614 case TARGET_NR_ipc:
8615 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8616 break;
8617 #endif
8618 #ifdef TARGET_NR_semget
8619 case TARGET_NR_semget:
8620 ret = get_errno(semget(arg1, arg2, arg3));
8621 break;
8622 #endif
8623 #ifdef TARGET_NR_semop
8624 case TARGET_NR_semop:
8625 ret = do_semop(arg1, arg2, arg3);
8626 break;
8627 #endif
8628 #ifdef TARGET_NR_semctl
8629 case TARGET_NR_semctl:
8630 ret = do_semctl(arg1, arg2, arg3, arg4);
8631 break;
8632 #endif
8633 #ifdef TARGET_NR_msgctl
8634 case TARGET_NR_msgctl:
8635 ret = do_msgctl(arg1, arg2, arg3);
8636 break;
8637 #endif
8638 #ifdef TARGET_NR_msgget
8639 case TARGET_NR_msgget:
8640 ret = get_errno(msgget(arg1, arg2));
8641 break;
8642 #endif
8643 #ifdef TARGET_NR_msgrcv
8644 case TARGET_NR_msgrcv:
8645 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8646 break;
8647 #endif
8648 #ifdef TARGET_NR_msgsnd
8649 case TARGET_NR_msgsnd:
8650 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8651 break;
8652 #endif
8653 #ifdef TARGET_NR_shmget
8654 case TARGET_NR_shmget:
8655 ret = get_errno(shmget(arg1, arg2, arg3));
8656 break;
8657 #endif
8658 #ifdef TARGET_NR_shmctl
8659 case TARGET_NR_shmctl:
8660 ret = do_shmctl(arg1, arg2, arg3);
8661 break;
8662 #endif
8663 #ifdef TARGET_NR_shmat
8664 case TARGET_NR_shmat:
8665 ret = do_shmat(arg1, arg2, arg3);
8666 break;
8667 #endif
8668 #ifdef TARGET_NR_shmdt
8669 case TARGET_NR_shmdt:
8670 ret = do_shmdt(arg1);
8671 break;
8672 #endif
8673 case TARGET_NR_fsync:
8674 ret = get_errno(fsync(arg1));
8675 break;
8676 case TARGET_NR_clone:
8677 /* Linux manages to have three different orderings for its
8678 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8679 * match the kernel's CONFIG_CLONE_* settings.
8680 * Microblaze is further special in that it uses a sixth
8681 * implicit argument to clone for the TLS pointer.
8683 #if defined(TARGET_MICROBLAZE)
8684 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8685 #elif defined(TARGET_CLONE_BACKWARDS)
8686 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8687 #elif defined(TARGET_CLONE_BACKWARDS2)
8688 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8689 #else
8690 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8691 #endif
8692 break;
8693 #ifdef __NR_exit_group
8694 /* new thread calls */
8695 case TARGET_NR_exit_group:
8696 #ifdef TARGET_GPROF
8697 _mcleanup();
8698 #endif
8699 gdb_exit(cpu_env, arg1);
8700 ret = get_errno(exit_group(arg1));
8701 break;
8702 #endif
8703 case TARGET_NR_setdomainname:
8704 if (!(p = lock_user_string(arg1)))
8705 goto efault;
8706 ret = get_errno(setdomainname(p, arg2));
8707 unlock_user(p, arg1, 0);
8708 break;
8709 case TARGET_NR_uname:
8710 /* no need to transcode because we use the linux syscall */
8712 struct new_utsname * buf;
8714 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8715 goto efault;
8716 ret = get_errno(sys_uname(buf));
8717 if (!is_error(ret)) {
8718 /* Overrite the native machine name with whatever is being
8719 emulated. */
8720 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8721 /* Allow the user to override the reported release. */
8722 if (qemu_uname_release && *qemu_uname_release)
8723 strcpy (buf->release, qemu_uname_release);
8725 unlock_user_struct(buf, arg1, 1);
8727 break;
8728 #ifdef TARGET_I386
8729 case TARGET_NR_modify_ldt:
8730 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8731 break;
8732 #if !defined(TARGET_X86_64)
8733 case TARGET_NR_vm86old:
8734 goto unimplemented;
8735 case TARGET_NR_vm86:
8736 ret = do_vm86(cpu_env, arg1, arg2);
8737 break;
8738 #endif
8739 #endif
8740 case TARGET_NR_adjtimex:
8741 goto unimplemented;
8742 #ifdef TARGET_NR_create_module
8743 case TARGET_NR_create_module:
8744 #endif
8745 case TARGET_NR_init_module:
8746 case TARGET_NR_delete_module:
8747 #ifdef TARGET_NR_get_kernel_syms
8748 case TARGET_NR_get_kernel_syms:
8749 #endif
8750 goto unimplemented;
8751 case TARGET_NR_quotactl:
8752 goto unimplemented;
8753 case TARGET_NR_getpgid:
8754 ret = get_errno(getpgid(arg1));
8755 break;
8756 case TARGET_NR_fchdir:
8757 ret = get_errno(fchdir(arg1));
8758 break;
8759 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8760 case TARGET_NR_bdflush:
8761 goto unimplemented;
8762 #endif
8763 #ifdef TARGET_NR_sysfs
8764 case TARGET_NR_sysfs:
8765 goto unimplemented;
8766 #endif
8767 case TARGET_NR_personality:
8768 ret = get_errno(personality(arg1));
8769 break;
8770 #ifdef TARGET_NR_afs_syscall
8771 case TARGET_NR_afs_syscall:
8772 goto unimplemented;
8773 #endif
8774 #ifdef TARGET_NR__llseek /* Not on alpha */
8775 case TARGET_NR__llseek:
8777 int64_t res;
8778 #if !defined(__NR_llseek)
8779 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8780 if (res == -1) {
8781 ret = get_errno(res);
8782 } else {
8783 ret = 0;
8785 #else
8786 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8787 #endif
8788 if ((ret == 0) && put_user_s64(res, arg4)) {
8789 goto efault;
8792 break;
8793 #endif
8794 #ifdef TARGET_NR_getdents
8795 case TARGET_NR_getdents:
8796 #ifdef __NR_getdents
8797 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8799 struct target_dirent *target_dirp;
8800 struct linux_dirent *dirp;
8801 abi_long count = arg3;
8803 dirp = g_try_malloc(count);
8804 if (!dirp) {
8805 ret = -TARGET_ENOMEM;
8806 goto fail;
8809 ret = get_errno(sys_getdents(arg1, dirp, count));
8810 if (!is_error(ret)) {
8811 struct linux_dirent *de;
8812 struct target_dirent *tde;
8813 int len = ret;
8814 int reclen, treclen;
8815 int count1, tnamelen;
8817 count1 = 0;
8818 de = dirp;
8819 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8820 goto efault;
8821 tde = target_dirp;
8822 while (len > 0) {
8823 reclen = de->d_reclen;
8824 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8825 assert(tnamelen >= 0);
8826 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8827 assert(count1 + treclen <= count);
8828 tde->d_reclen = tswap16(treclen);
8829 tde->d_ino = tswapal(de->d_ino);
8830 tde->d_off = tswapal(de->d_off);
8831 memcpy(tde->d_name, de->d_name, tnamelen);
8832 de = (struct linux_dirent *)((char *)de + reclen);
8833 len -= reclen;
8834 tde = (struct target_dirent *)((char *)tde + treclen);
8835 count1 += treclen;
8837 ret = count1;
8838 unlock_user(target_dirp, arg2, ret);
8840 g_free(dirp);
8842 #else
8844 struct linux_dirent *dirp;
8845 abi_long count = arg3;
8847 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8848 goto efault;
8849 ret = get_errno(sys_getdents(arg1, dirp, count));
8850 if (!is_error(ret)) {
8851 struct linux_dirent *de;
8852 int len = ret;
8853 int reclen;
8854 de = dirp;
8855 while (len > 0) {
8856 reclen = de->d_reclen;
8857 if (reclen > len)
8858 break;
8859 de->d_reclen = tswap16(reclen);
8860 tswapls(&de->d_ino);
8861 tswapls(&de->d_off);
8862 de = (struct linux_dirent *)((char *)de + reclen);
8863 len -= reclen;
8866 unlock_user(dirp, arg2, ret);
8868 #endif
8869 #else
8870 /* Implement getdents in terms of getdents64 */
8872 struct linux_dirent64 *dirp;
8873 abi_long count = arg3;
8875 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8876 if (!dirp) {
8877 goto efault;
8879 ret = get_errno(sys_getdents64(arg1, dirp, count));
8880 if (!is_error(ret)) {
8881 /* Convert the dirent64 structs to target dirent. We do this
8882 * in-place, since we can guarantee that a target_dirent is no
8883 * larger than a dirent64; however this means we have to be
8884 * careful to read everything before writing in the new format.
8886 struct linux_dirent64 *de;
8887 struct target_dirent *tde;
8888 int len = ret;
8889 int tlen = 0;
8891 de = dirp;
8892 tde = (struct target_dirent *)dirp;
8893 while (len > 0) {
8894 int namelen, treclen;
8895 int reclen = de->d_reclen;
8896 uint64_t ino = de->d_ino;
8897 int64_t off = de->d_off;
8898 uint8_t type = de->d_type;
8900 namelen = strlen(de->d_name);
8901 treclen = offsetof(struct target_dirent, d_name)
8902 + namelen + 2;
8903 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8905 memmove(tde->d_name, de->d_name, namelen + 1);
8906 tde->d_ino = tswapal(ino);
8907 tde->d_off = tswapal(off);
8908 tde->d_reclen = tswap16(treclen);
8909 /* The target_dirent type is in what was formerly a padding
8910 * byte at the end of the structure:
8912 *(((char *)tde) + treclen - 1) = type;
8914 de = (struct linux_dirent64 *)((char *)de + reclen);
8915 tde = (struct target_dirent *)((char *)tde + treclen);
8916 len -= reclen;
8917 tlen += treclen;
8919 ret = tlen;
8921 unlock_user(dirp, arg2, ret);
8923 #endif
8924 break;
8925 #endif /* TARGET_NR_getdents */
8926 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8927 case TARGET_NR_getdents64:
8929 struct linux_dirent64 *dirp;
8930 abi_long count = arg3;
8931 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8932 goto efault;
8933 ret = get_errno(sys_getdents64(arg1, dirp, count));
8934 if (!is_error(ret)) {
8935 struct linux_dirent64 *de;
8936 int len = ret;
8937 int reclen;
8938 de = dirp;
8939 while (len > 0) {
8940 reclen = de->d_reclen;
8941 if (reclen > len)
8942 break;
8943 de->d_reclen = tswap16(reclen);
8944 tswap64s((uint64_t *)&de->d_ino);
8945 tswap64s((uint64_t *)&de->d_off);
8946 de = (struct linux_dirent64 *)((char *)de + reclen);
8947 len -= reclen;
8950 unlock_user(dirp, arg2, ret);
8952 break;
8953 #endif /* TARGET_NR_getdents64 */
8954 #if defined(TARGET_NR__newselect)
8955 case TARGET_NR__newselect:
8956 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8957 break;
8958 #endif
8959 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8960 # ifdef TARGET_NR_poll
8961 case TARGET_NR_poll:
8962 # endif
8963 # ifdef TARGET_NR_ppoll
8964 case TARGET_NR_ppoll:
8965 # endif
8967 struct target_pollfd *target_pfd;
8968 unsigned int nfds = arg2;
8969 struct pollfd *pfd;
8970 unsigned int i;
8972 pfd = NULL;
8973 target_pfd = NULL;
8974 if (nfds) {
8975 target_pfd = lock_user(VERIFY_WRITE, arg1,
8976 sizeof(struct target_pollfd) * nfds, 1);
8977 if (!target_pfd) {
8978 goto efault;
8981 pfd = alloca(sizeof(struct pollfd) * nfds);
8982 for (i = 0; i < nfds; i++) {
8983 pfd[i].fd = tswap32(target_pfd[i].fd);
8984 pfd[i].events = tswap16(target_pfd[i].events);
8988 switch (num) {
8989 # ifdef TARGET_NR_ppoll
8990 case TARGET_NR_ppoll:
8992 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8993 target_sigset_t *target_set;
8994 sigset_t _set, *set = &_set;
8996 if (arg3) {
8997 if (target_to_host_timespec(timeout_ts, arg3)) {
8998 unlock_user(target_pfd, arg1, 0);
8999 goto efault;
9001 } else {
9002 timeout_ts = NULL;
9005 if (arg4) {
9006 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9007 if (!target_set) {
9008 unlock_user(target_pfd, arg1, 0);
9009 goto efault;
9011 target_to_host_sigset(set, target_set);
9012 } else {
9013 set = NULL;
9016 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9017 set, SIGSET_T_SIZE));
9019 if (!is_error(ret) && arg3) {
9020 host_to_target_timespec(arg3, timeout_ts);
9022 if (arg4) {
9023 unlock_user(target_set, arg4, 0);
9025 break;
9027 # endif
9028 # ifdef TARGET_NR_poll
9029 case TARGET_NR_poll:
9031 struct timespec ts, *pts;
9033 if (arg3 >= 0) {
9034 /* Convert ms to secs, ns */
9035 ts.tv_sec = arg3 / 1000;
9036 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9037 pts = &ts;
9038 } else {
9039 /* -ve poll() timeout means "infinite" */
9040 pts = NULL;
9042 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9043 break;
9045 # endif
9046 default:
9047 g_assert_not_reached();
9050 if (!is_error(ret)) {
9051 for(i = 0; i < nfds; i++) {
9052 target_pfd[i].revents = tswap16(pfd[i].revents);
9055 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9057 break;
9058 #endif
9059 case TARGET_NR_flock:
9060 /* NOTE: the flock constant seems to be the same for every
9061 Linux platform */
9062 ret = get_errno(safe_flock(arg1, arg2));
9063 break;
9064 case TARGET_NR_readv:
9066 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9067 if (vec != NULL) {
9068 ret = get_errno(safe_readv(arg1, vec, arg3));
9069 unlock_iovec(vec, arg2, arg3, 1);
9070 } else {
9071 ret = -host_to_target_errno(errno);
9074 break;
9075 case TARGET_NR_writev:
9077 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9078 if (vec != NULL) {
9079 ret = get_errno(safe_writev(arg1, vec, arg3));
9080 unlock_iovec(vec, arg2, arg3, 0);
9081 } else {
9082 ret = -host_to_target_errno(errno);
9085 break;
9086 case TARGET_NR_getsid:
9087 ret = get_errno(getsid(arg1));
9088 break;
9089 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9090 case TARGET_NR_fdatasync:
9091 ret = get_errno(fdatasync(arg1));
9092 break;
9093 #endif
9094 #ifdef TARGET_NR__sysctl
9095 case TARGET_NR__sysctl:
9096 /* We don't implement this, but ENOTDIR is always a safe
9097 return value. */
9098 ret = -TARGET_ENOTDIR;
9099 break;
9100 #endif
9101 case TARGET_NR_sched_getaffinity:
9103 unsigned int mask_size;
9104 unsigned long *mask;
9107 * sched_getaffinity needs multiples of ulong, so need to take
9108 * care of mismatches between target ulong and host ulong sizes.
9110 if (arg2 & (sizeof(abi_ulong) - 1)) {
9111 ret = -TARGET_EINVAL;
9112 break;
9114 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9116 mask = alloca(mask_size);
9117 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9119 if (!is_error(ret)) {
9120 if (ret > arg2) {
9121 /* More data returned than the caller's buffer will fit.
9122 * This only happens if sizeof(abi_long) < sizeof(long)
9123 * and the caller passed us a buffer holding an odd number
9124 * of abi_longs. If the host kernel is actually using the
9125 * extra 4 bytes then fail EINVAL; otherwise we can just
9126 * ignore them and only copy the interesting part.
9128 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9129 if (numcpus > arg2 * 8) {
9130 ret = -TARGET_EINVAL;
9131 break;
9133 ret = arg2;
9136 if (copy_to_user(arg3, mask, ret)) {
9137 goto efault;
9141 break;
9142 case TARGET_NR_sched_setaffinity:
9144 unsigned int mask_size;
9145 unsigned long *mask;
9148 * sched_setaffinity needs multiples of ulong, so need to take
9149 * care of mismatches between target ulong and host ulong sizes.
9151 if (arg2 & (sizeof(abi_ulong) - 1)) {
9152 ret = -TARGET_EINVAL;
9153 break;
9155 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9157 mask = alloca(mask_size);
9158 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9159 goto efault;
9161 memcpy(mask, p, arg2);
9162 unlock_user_struct(p, arg2, 0);
9164 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9166 break;
9167 case TARGET_NR_sched_setparam:
9169 struct sched_param *target_schp;
9170 struct sched_param schp;
9172 if (arg2 == 0) {
9173 return -TARGET_EINVAL;
9175 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9176 goto efault;
9177 schp.sched_priority = tswap32(target_schp->sched_priority);
9178 unlock_user_struct(target_schp, arg2, 0);
9179 ret = get_errno(sched_setparam(arg1, &schp));
9181 break;
9182 case TARGET_NR_sched_getparam:
9184 struct sched_param *target_schp;
9185 struct sched_param schp;
9187 if (arg2 == 0) {
9188 return -TARGET_EINVAL;
9190 ret = get_errno(sched_getparam(arg1, &schp));
9191 if (!is_error(ret)) {
9192 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9193 goto efault;
9194 target_schp->sched_priority = tswap32(schp.sched_priority);
9195 unlock_user_struct(target_schp, arg2, 1);
9198 break;
9199 case TARGET_NR_sched_setscheduler:
9201 struct sched_param *target_schp;
9202 struct sched_param schp;
9203 if (arg3 == 0) {
9204 return -TARGET_EINVAL;
9206 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9207 goto efault;
9208 schp.sched_priority = tswap32(target_schp->sched_priority);
9209 unlock_user_struct(target_schp, arg3, 0);
9210 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9212 break;
9213 case TARGET_NR_sched_getscheduler:
9214 ret = get_errno(sched_getscheduler(arg1));
9215 break;
9216 case TARGET_NR_sched_yield:
9217 ret = get_errno(sched_yield());
9218 break;
9219 case TARGET_NR_sched_get_priority_max:
9220 ret = get_errno(sched_get_priority_max(arg1));
9221 break;
9222 case TARGET_NR_sched_get_priority_min:
9223 ret = get_errno(sched_get_priority_min(arg1));
9224 break;
9225 case TARGET_NR_sched_rr_get_interval:
9227 struct timespec ts;
9228 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9229 if (!is_error(ret)) {
9230 ret = host_to_target_timespec(arg2, &ts);
9233 break;
9234 case TARGET_NR_nanosleep:
9236 struct timespec req, rem;
9237 target_to_host_timespec(&req, arg1);
9238 ret = get_errno(safe_nanosleep(&req, &rem));
9239 if (is_error(ret) && arg2) {
9240 host_to_target_timespec(arg2, &rem);
9243 break;
9244 #ifdef TARGET_NR_query_module
9245 case TARGET_NR_query_module:
9246 goto unimplemented;
9247 #endif
9248 #ifdef TARGET_NR_nfsservctl
9249 case TARGET_NR_nfsservctl:
9250 goto unimplemented;
9251 #endif
9252 case TARGET_NR_prctl:
9253 switch (arg1) {
9254 case PR_GET_PDEATHSIG:
9256 int deathsig;
9257 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9258 if (!is_error(ret) && arg2
9259 && put_user_ual(deathsig, arg2)) {
9260 goto efault;
9262 break;
9264 #ifdef PR_GET_NAME
9265 case PR_GET_NAME:
9267 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9268 if (!name) {
9269 goto efault;
9271 ret = get_errno(prctl(arg1, (unsigned long)name,
9272 arg3, arg4, arg5));
9273 unlock_user(name, arg2, 16);
9274 break;
9276 case PR_SET_NAME:
9278 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9279 if (!name) {
9280 goto efault;
9282 ret = get_errno(prctl(arg1, (unsigned long)name,
9283 arg3, arg4, arg5));
9284 unlock_user(name, arg2, 0);
9285 break;
9287 #endif
9288 default:
9289 /* Most prctl options have no pointer arguments */
9290 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9291 break;
9293 break;
9294 #ifdef TARGET_NR_arch_prctl
9295 case TARGET_NR_arch_prctl:
9296 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9297 ret = do_arch_prctl(cpu_env, arg1, arg2);
9298 break;
9299 #else
9300 goto unimplemented;
9301 #endif
9302 #endif
9303 #ifdef TARGET_NR_pread64
9304 case TARGET_NR_pread64:
9305 if (regpairs_aligned(cpu_env)) {
9306 arg4 = arg5;
9307 arg5 = arg6;
9309 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9310 goto efault;
9311 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9312 unlock_user(p, arg2, ret);
9313 break;
9314 case TARGET_NR_pwrite64:
9315 if (regpairs_aligned(cpu_env)) {
9316 arg4 = arg5;
9317 arg5 = arg6;
9319 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9320 goto efault;
9321 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9322 unlock_user(p, arg2, 0);
9323 break;
9324 #endif
9325 case TARGET_NR_getcwd:
9326 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9327 goto efault;
9328 ret = get_errno(sys_getcwd1(p, arg2));
9329 unlock_user(p, arg1, ret);
9330 break;
9331 case TARGET_NR_capget:
9332 case TARGET_NR_capset:
9334 struct target_user_cap_header *target_header;
9335 struct target_user_cap_data *target_data = NULL;
9336 struct __user_cap_header_struct header;
9337 struct __user_cap_data_struct data[2];
9338 struct __user_cap_data_struct *dataptr = NULL;
9339 int i, target_datalen;
9340 int data_items = 1;
9342 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9343 goto efault;
9345 header.version = tswap32(target_header->version);
9346 header.pid = tswap32(target_header->pid);
9348 if (header.version != _LINUX_CAPABILITY_VERSION) {
9349 /* Version 2 and up takes pointer to two user_data structs */
9350 data_items = 2;
9353 target_datalen = sizeof(*target_data) * data_items;
9355 if (arg2) {
9356 if (num == TARGET_NR_capget) {
9357 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9358 } else {
9359 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9361 if (!target_data) {
9362 unlock_user_struct(target_header, arg1, 0);
9363 goto efault;
9366 if (num == TARGET_NR_capset) {
9367 for (i = 0; i < data_items; i++) {
9368 data[i].effective = tswap32(target_data[i].effective);
9369 data[i].permitted = tswap32(target_data[i].permitted);
9370 data[i].inheritable = tswap32(target_data[i].inheritable);
9374 dataptr = data;
9377 if (num == TARGET_NR_capget) {
9378 ret = get_errno(capget(&header, dataptr));
9379 } else {
9380 ret = get_errno(capset(&header, dataptr));
9383 /* The kernel always updates version for both capget and capset */
9384 target_header->version = tswap32(header.version);
9385 unlock_user_struct(target_header, arg1, 1);
9387 if (arg2) {
9388 if (num == TARGET_NR_capget) {
9389 for (i = 0; i < data_items; i++) {
9390 target_data[i].effective = tswap32(data[i].effective);
9391 target_data[i].permitted = tswap32(data[i].permitted);
9392 target_data[i].inheritable = tswap32(data[i].inheritable);
9394 unlock_user(target_data, arg2, target_datalen);
9395 } else {
9396 unlock_user(target_data, arg2, 0);
9399 break;
9401 case TARGET_NR_sigaltstack:
9402 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9403 break;
9405 #ifdef CONFIG_SENDFILE
9406 case TARGET_NR_sendfile:
9408 off_t *offp = NULL;
9409 off_t off;
9410 if (arg3) {
9411 ret = get_user_sal(off, arg3);
9412 if (is_error(ret)) {
9413 break;
9415 offp = &off;
9417 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9418 if (!is_error(ret) && arg3) {
9419 abi_long ret2 = put_user_sal(off, arg3);
9420 if (is_error(ret2)) {
9421 ret = ret2;
9424 break;
9426 #ifdef TARGET_NR_sendfile64
9427 case TARGET_NR_sendfile64:
9429 off_t *offp = NULL;
9430 off_t off;
9431 if (arg3) {
9432 ret = get_user_s64(off, arg3);
9433 if (is_error(ret)) {
9434 break;
9436 offp = &off;
9438 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9439 if (!is_error(ret) && arg3) {
9440 abi_long ret2 = put_user_s64(off, arg3);
9441 if (is_error(ret2)) {
9442 ret = ret2;
9445 break;
9447 #endif
9448 #else
9449 case TARGET_NR_sendfile:
9450 #ifdef TARGET_NR_sendfile64
9451 case TARGET_NR_sendfile64:
9452 #endif
9453 goto unimplemented;
9454 #endif
9456 #ifdef TARGET_NR_getpmsg
9457 case TARGET_NR_getpmsg:
9458 goto unimplemented;
9459 #endif
9460 #ifdef TARGET_NR_putpmsg
9461 case TARGET_NR_putpmsg:
9462 goto unimplemented;
9463 #endif
9464 #ifdef TARGET_NR_vfork
9465 case TARGET_NR_vfork:
9466 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9467 0, 0, 0, 0));
9468 break;
9469 #endif
9470 #ifdef TARGET_NR_ugetrlimit
9471 case TARGET_NR_ugetrlimit:
9473 struct rlimit rlim;
9474 int resource = target_to_host_resource(arg1);
9475 ret = get_errno(getrlimit(resource, &rlim));
9476 if (!is_error(ret)) {
9477 struct target_rlimit *target_rlim;
9478 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9479 goto efault;
9480 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9481 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9482 unlock_user_struct(target_rlim, arg2, 1);
9484 break;
9486 #endif
9487 #ifdef TARGET_NR_truncate64
9488 case TARGET_NR_truncate64:
9489 if (!(p = lock_user_string(arg1)))
9490 goto efault;
9491 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9492 unlock_user(p, arg1, 0);
9493 break;
9494 #endif
9495 #ifdef TARGET_NR_ftruncate64
9496 case TARGET_NR_ftruncate64:
9497 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9498 break;
9499 #endif
9500 #ifdef TARGET_NR_stat64
9501 case TARGET_NR_stat64:
9502 if (!(p = lock_user_string(arg1)))
9503 goto efault;
9504 ret = get_errno(stat(path(p), &st));
9505 unlock_user(p, arg1, 0);
9506 if (!is_error(ret))
9507 ret = host_to_target_stat64(cpu_env, arg2, &st);
9508 break;
9509 #endif
9510 #ifdef TARGET_NR_lstat64
9511 case TARGET_NR_lstat64:
9512 if (!(p = lock_user_string(arg1)))
9513 goto efault;
9514 ret = get_errno(lstat(path(p), &st));
9515 unlock_user(p, arg1, 0);
9516 if (!is_error(ret))
9517 ret = host_to_target_stat64(cpu_env, arg2, &st);
9518 break;
9519 #endif
9520 #ifdef TARGET_NR_fstat64
9521 case TARGET_NR_fstat64:
9522 ret = get_errno(fstat(arg1, &st));
9523 if (!is_error(ret))
9524 ret = host_to_target_stat64(cpu_env, arg2, &st);
9525 break;
9526 #endif
9527 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9528 #ifdef TARGET_NR_fstatat64
9529 case TARGET_NR_fstatat64:
9530 #endif
9531 #ifdef TARGET_NR_newfstatat
9532 case TARGET_NR_newfstatat:
9533 #endif
9534 if (!(p = lock_user_string(arg2)))
9535 goto efault;
9536 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9537 if (!is_error(ret))
9538 ret = host_to_target_stat64(cpu_env, arg3, &st);
9539 break;
9540 #endif
9541 #ifdef TARGET_NR_lchown
9542 case TARGET_NR_lchown:
9543 if (!(p = lock_user_string(arg1)))
9544 goto efault;
9545 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9546 unlock_user(p, arg1, 0);
9547 break;
9548 #endif
9549 #ifdef TARGET_NR_getuid
9550 case TARGET_NR_getuid:
9551 ret = get_errno(high2lowuid(getuid()));
9552 break;
9553 #endif
9554 #ifdef TARGET_NR_getgid
9555 case TARGET_NR_getgid:
9556 ret = get_errno(high2lowgid(getgid()));
9557 break;
9558 #endif
9559 #ifdef TARGET_NR_geteuid
9560 case TARGET_NR_geteuid:
9561 ret = get_errno(high2lowuid(geteuid()));
9562 break;
9563 #endif
9564 #ifdef TARGET_NR_getegid
9565 case TARGET_NR_getegid:
9566 ret = get_errno(high2lowgid(getegid()));
9567 break;
9568 #endif
9569 case TARGET_NR_setreuid:
9570 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9571 break;
9572 case TARGET_NR_setregid:
9573 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9574 break;
9575 case TARGET_NR_getgroups:
9577 int gidsetsize = arg1;
9578 target_id *target_grouplist;
9579 gid_t *grouplist;
9580 int i;
9582 grouplist = alloca(gidsetsize * sizeof(gid_t));
9583 ret = get_errno(getgroups(gidsetsize, grouplist));
9584 if (gidsetsize == 0)
9585 break;
9586 if (!is_error(ret)) {
9587 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9588 if (!target_grouplist)
9589 goto efault;
9590 for(i = 0;i < ret; i++)
9591 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9592 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9595 break;
9596 case TARGET_NR_setgroups:
9598 int gidsetsize = arg1;
9599 target_id *target_grouplist;
9600 gid_t *grouplist = NULL;
9601 int i;
9602 if (gidsetsize) {
9603 grouplist = alloca(gidsetsize * sizeof(gid_t));
9604 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9605 if (!target_grouplist) {
9606 ret = -TARGET_EFAULT;
9607 goto fail;
9609 for (i = 0; i < gidsetsize; i++) {
9610 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9612 unlock_user(target_grouplist, arg2, 0);
9614 ret = get_errno(setgroups(gidsetsize, grouplist));
9616 break;
9617 case TARGET_NR_fchown:
9618 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9619 break;
9620 #if defined(TARGET_NR_fchownat)
9621 case TARGET_NR_fchownat:
9622 if (!(p = lock_user_string(arg2)))
9623 goto efault;
9624 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9625 low2highgid(arg4), arg5));
9626 unlock_user(p, arg2, 0);
9627 break;
9628 #endif
9629 #ifdef TARGET_NR_setresuid
9630 case TARGET_NR_setresuid:
9631 ret = get_errno(sys_setresuid(low2highuid(arg1),
9632 low2highuid(arg2),
9633 low2highuid(arg3)));
9634 break;
9635 #endif
9636 #ifdef TARGET_NR_getresuid
9637 case TARGET_NR_getresuid:
9639 uid_t ruid, euid, suid;
9640 ret = get_errno(getresuid(&ruid, &euid, &suid));
9641 if (!is_error(ret)) {
9642 if (put_user_id(high2lowuid(ruid), arg1)
9643 || put_user_id(high2lowuid(euid), arg2)
9644 || put_user_id(high2lowuid(suid), arg3))
9645 goto efault;
9648 break;
9649 #endif
9650 #ifdef TARGET_NR_getresgid
9651 case TARGET_NR_setresgid:
9652 ret = get_errno(sys_setresgid(low2highgid(arg1),
9653 low2highgid(arg2),
9654 low2highgid(arg3)));
9655 break;
9656 #endif
9657 #ifdef TARGET_NR_getresgid
9658 case TARGET_NR_getresgid:
9660 gid_t rgid, egid, sgid;
9661 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9662 if (!is_error(ret)) {
9663 if (put_user_id(high2lowgid(rgid), arg1)
9664 || put_user_id(high2lowgid(egid), arg2)
9665 || put_user_id(high2lowgid(sgid), arg3))
9666 goto efault;
9669 break;
9670 #endif
9671 #ifdef TARGET_NR_chown
9672 case TARGET_NR_chown:
9673 if (!(p = lock_user_string(arg1)))
9674 goto efault;
9675 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9676 unlock_user(p, arg1, 0);
9677 break;
9678 #endif
9679 case TARGET_NR_setuid:
9680 ret = get_errno(sys_setuid(low2highuid(arg1)));
9681 break;
9682 case TARGET_NR_setgid:
9683 ret = get_errno(sys_setgid(low2highgid(arg1)));
9684 break;
9685 case TARGET_NR_setfsuid:
9686 ret = get_errno(setfsuid(arg1));
9687 break;
9688 case TARGET_NR_setfsgid:
9689 ret = get_errno(setfsgid(arg1));
9690 break;
9692 #ifdef TARGET_NR_lchown32
9693 case TARGET_NR_lchown32:
9694 if (!(p = lock_user_string(arg1)))
9695 goto efault;
9696 ret = get_errno(lchown(p, arg2, arg3));
9697 unlock_user(p, arg1, 0);
9698 break;
9699 #endif
9700 #ifdef TARGET_NR_getuid32
9701 case TARGET_NR_getuid32:
9702 ret = get_errno(getuid());
9703 break;
9704 #endif
9706 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9707 /* Alpha specific */
9708 case TARGET_NR_getxuid:
9710 uid_t euid;
9711 euid=geteuid();
9712 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9714 ret = get_errno(getuid());
9715 break;
9716 #endif
9717 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9718 /* Alpha specific */
9719 case TARGET_NR_getxgid:
9721 uid_t egid;
9722 egid=getegid();
9723 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9725 ret = get_errno(getgid());
9726 break;
9727 #endif
9728 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9729 /* Alpha specific */
9730 case TARGET_NR_osf_getsysinfo:
9731 ret = -TARGET_EOPNOTSUPP;
9732 switch (arg1) {
9733 case TARGET_GSI_IEEE_FP_CONTROL:
9735 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9737 /* Copied from linux ieee_fpcr_to_swcr. */
9738 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9739 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9740 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9741 | SWCR_TRAP_ENABLE_DZE
9742 | SWCR_TRAP_ENABLE_OVF);
9743 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9744 | SWCR_TRAP_ENABLE_INE);
9745 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9746 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9748 if (put_user_u64 (swcr, arg2))
9749 goto efault;
9750 ret = 0;
9752 break;
9754 /* case GSI_IEEE_STATE_AT_SIGNAL:
9755 -- Not implemented in linux kernel.
9756 case GSI_UACPROC:
9757 -- Retrieves current unaligned access state; not much used.
9758 case GSI_PROC_TYPE:
9759 -- Retrieves implver information; surely not used.
9760 case GSI_GET_HWRPB:
9761 -- Grabs a copy of the HWRPB; surely not used.
9764 break;
9765 #endif
9766 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9767 /* Alpha specific */
9768 case TARGET_NR_osf_setsysinfo:
9769 ret = -TARGET_EOPNOTSUPP;
9770 switch (arg1) {
9771 case TARGET_SSI_IEEE_FP_CONTROL:
9773 uint64_t swcr, fpcr, orig_fpcr;
9775 if (get_user_u64 (swcr, arg2)) {
9776 goto efault;
9778 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9779 fpcr = orig_fpcr & FPCR_DYN_MASK;
9781 /* Copied from linux ieee_swcr_to_fpcr. */
9782 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9783 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9784 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9785 | SWCR_TRAP_ENABLE_DZE
9786 | SWCR_TRAP_ENABLE_OVF)) << 48;
9787 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9788 | SWCR_TRAP_ENABLE_INE)) << 57;
9789 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9790 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9792 cpu_alpha_store_fpcr(cpu_env, fpcr);
9793 ret = 0;
9795 break;
9797 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9799 uint64_t exc, fpcr, orig_fpcr;
9800 int si_code;
9802 if (get_user_u64(exc, arg2)) {
9803 goto efault;
9806 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9808 /* We only add to the exception status here. */
9809 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9811 cpu_alpha_store_fpcr(cpu_env, fpcr);
9812 ret = 0;
9814 /* Old exceptions are not signaled. */
9815 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9817 /* If any exceptions set by this call,
9818 and are unmasked, send a signal. */
9819 si_code = 0;
9820 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9821 si_code = TARGET_FPE_FLTRES;
9823 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9824 si_code = TARGET_FPE_FLTUND;
9826 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9827 si_code = TARGET_FPE_FLTOVF;
9829 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9830 si_code = TARGET_FPE_FLTDIV;
9832 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9833 si_code = TARGET_FPE_FLTINV;
9835 if (si_code != 0) {
9836 target_siginfo_t info;
9837 info.si_signo = SIGFPE;
9838 info.si_errno = 0;
9839 info.si_code = si_code;
9840 info._sifields._sigfault._addr
9841 = ((CPUArchState *)cpu_env)->pc;
9842 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9845 break;
9847 /* case SSI_NVPAIRS:
9848 -- Used with SSIN_UACPROC to enable unaligned accesses.
9849 case SSI_IEEE_STATE_AT_SIGNAL:
9850 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9851 -- Not implemented in linux kernel
9854 break;
9855 #endif
9856 #ifdef TARGET_NR_osf_sigprocmask
9857 /* Alpha specific. */
9858 case TARGET_NR_osf_sigprocmask:
9860 abi_ulong mask;
9861 int how;
9862 sigset_t set, oldset;
9864 switch(arg1) {
9865 case TARGET_SIG_BLOCK:
9866 how = SIG_BLOCK;
9867 break;
9868 case TARGET_SIG_UNBLOCK:
9869 how = SIG_UNBLOCK;
9870 break;
9871 case TARGET_SIG_SETMASK:
9872 how = SIG_SETMASK;
9873 break;
9874 default:
9875 ret = -TARGET_EINVAL;
9876 goto fail;
9878 mask = arg2;
9879 target_to_host_old_sigset(&set, &mask);
9880 ret = do_sigprocmask(how, &set, &oldset);
9881 if (!ret) {
9882 host_to_target_old_sigset(&mask, &oldset);
9883 ret = mask;
9886 break;
9887 #endif
9889 #ifdef TARGET_NR_getgid32
9890 case TARGET_NR_getgid32:
9891 ret = get_errno(getgid());
9892 break;
9893 #endif
9894 #ifdef TARGET_NR_geteuid32
9895 case TARGET_NR_geteuid32:
9896 ret = get_errno(geteuid());
9897 break;
9898 #endif
9899 #ifdef TARGET_NR_getegid32
9900 case TARGET_NR_getegid32:
9901 ret = get_errno(getegid());
9902 break;
9903 #endif
9904 #ifdef TARGET_NR_setreuid32
9905 case TARGET_NR_setreuid32:
9906 ret = get_errno(setreuid(arg1, arg2));
9907 break;
9908 #endif
9909 #ifdef TARGET_NR_setregid32
9910 case TARGET_NR_setregid32:
9911 ret = get_errno(setregid(arg1, arg2));
9912 break;
9913 #endif
9914 #ifdef TARGET_NR_getgroups32
9915 case TARGET_NR_getgroups32:
9917 int gidsetsize = arg1;
9918 uint32_t *target_grouplist;
9919 gid_t *grouplist;
9920 int i;
9922 grouplist = alloca(gidsetsize * sizeof(gid_t));
9923 ret = get_errno(getgroups(gidsetsize, grouplist));
9924 if (gidsetsize == 0)
9925 break;
9926 if (!is_error(ret)) {
9927 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9928 if (!target_grouplist) {
9929 ret = -TARGET_EFAULT;
9930 goto fail;
9932 for(i = 0;i < ret; i++)
9933 target_grouplist[i] = tswap32(grouplist[i]);
9934 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9937 break;
9938 #endif
9939 #ifdef TARGET_NR_setgroups32
9940 case TARGET_NR_setgroups32:
9942 int gidsetsize = arg1;
9943 uint32_t *target_grouplist;
9944 gid_t *grouplist;
9945 int i;
9947 grouplist = alloca(gidsetsize * sizeof(gid_t));
9948 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9949 if (!target_grouplist) {
9950 ret = -TARGET_EFAULT;
9951 goto fail;
9953 for(i = 0;i < gidsetsize; i++)
9954 grouplist[i] = tswap32(target_grouplist[i]);
9955 unlock_user(target_grouplist, arg2, 0);
9956 ret = get_errno(setgroups(gidsetsize, grouplist));
9958 break;
9959 #endif
9960 #ifdef TARGET_NR_fchown32
9961 case TARGET_NR_fchown32:
9962 ret = get_errno(fchown(arg1, arg2, arg3));
9963 break;
9964 #endif
9965 #ifdef TARGET_NR_setresuid32
9966 case TARGET_NR_setresuid32:
9967 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
9968 break;
9969 #endif
9970 #ifdef TARGET_NR_getresuid32
9971 case TARGET_NR_getresuid32:
9973 uid_t ruid, euid, suid;
9974 ret = get_errno(getresuid(&ruid, &euid, &suid));
9975 if (!is_error(ret)) {
9976 if (put_user_u32(ruid, arg1)
9977 || put_user_u32(euid, arg2)
9978 || put_user_u32(suid, arg3))
9979 goto efault;
9982 break;
9983 #endif
9984 #ifdef TARGET_NR_setresgid32
9985 case TARGET_NR_setresgid32:
9986 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
9987 break;
9988 #endif
9989 #ifdef TARGET_NR_getresgid32
9990 case TARGET_NR_getresgid32:
9992 gid_t rgid, egid, sgid;
9993 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9994 if (!is_error(ret)) {
9995 if (put_user_u32(rgid, arg1)
9996 || put_user_u32(egid, arg2)
9997 || put_user_u32(sgid, arg3))
9998 goto efault;
10001 break;
10002 #endif
10003 #ifdef TARGET_NR_chown32
10004 case TARGET_NR_chown32:
10005 if (!(p = lock_user_string(arg1)))
10006 goto efault;
10007 ret = get_errno(chown(p, arg2, arg3));
10008 unlock_user(p, arg1, 0);
10009 break;
10010 #endif
10011 #ifdef TARGET_NR_setuid32
10012 case TARGET_NR_setuid32:
10013 ret = get_errno(sys_setuid(arg1));
10014 break;
10015 #endif
10016 #ifdef TARGET_NR_setgid32
10017 case TARGET_NR_setgid32:
10018 ret = get_errno(sys_setgid(arg1));
10019 break;
10020 #endif
10021 #ifdef TARGET_NR_setfsuid32
10022 case TARGET_NR_setfsuid32:
10023 ret = get_errno(setfsuid(arg1));
10024 break;
10025 #endif
10026 #ifdef TARGET_NR_setfsgid32
10027 case TARGET_NR_setfsgid32:
10028 ret = get_errno(setfsgid(arg1));
10029 break;
10030 #endif
10032 case TARGET_NR_pivot_root:
10033 goto unimplemented;
10034 #ifdef TARGET_NR_mincore
10035 case TARGET_NR_mincore:
10037 void *a;
10038 ret = -TARGET_EFAULT;
10039 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10040 goto efault;
10041 if (!(p = lock_user_string(arg3)))
10042 goto mincore_fail;
10043 ret = get_errno(mincore(a, arg2, p));
10044 unlock_user(p, arg3, ret);
10045 mincore_fail:
10046 unlock_user(a, arg1, 0);
10048 break;
10049 #endif
10050 #ifdef TARGET_NR_arm_fadvise64_64
10051 case TARGET_NR_arm_fadvise64_64:
10052 /* arm_fadvise64_64 looks like fadvise64_64 but
10053 * with different argument order: fd, advice, offset, len
10054 * rather than the usual fd, offset, len, advice.
10055 * Note that offset and len are both 64-bit so appear as
10056 * pairs of 32-bit registers.
10058 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10059 target_offset64(arg5, arg6), arg2);
10060 ret = -host_to_target_errno(ret);
10061 break;
10062 #endif
10064 #if TARGET_ABI_BITS == 32
10066 #ifdef TARGET_NR_fadvise64_64
10067 case TARGET_NR_fadvise64_64:
10068 /* 6 args: fd, offset (high, low), len (high, low), advice */
10069 if (regpairs_aligned(cpu_env)) {
10070 /* offset is in (3,4), len in (5,6) and advice in 7 */
10071 arg2 = arg3;
10072 arg3 = arg4;
10073 arg4 = arg5;
10074 arg5 = arg6;
10075 arg6 = arg7;
10077 ret = -host_to_target_errno(posix_fadvise(arg1,
10078 target_offset64(arg2, arg3),
10079 target_offset64(arg4, arg5),
10080 arg6));
10081 break;
10082 #endif
10084 #ifdef TARGET_NR_fadvise64
10085 case TARGET_NR_fadvise64:
10086 /* 5 args: fd, offset (high, low), len, advice */
10087 if (regpairs_aligned(cpu_env)) {
10088 /* offset is in (3,4), len in 5 and advice in 6 */
10089 arg2 = arg3;
10090 arg3 = arg4;
10091 arg4 = arg5;
10092 arg5 = arg6;
10094 ret = -host_to_target_errno(posix_fadvise(arg1,
10095 target_offset64(arg2, arg3),
10096 arg4, arg5));
10097 break;
10098 #endif
10100 #else /* not a 32-bit ABI */
10101 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10102 #ifdef TARGET_NR_fadvise64_64
10103 case TARGET_NR_fadvise64_64:
10104 #endif
10105 #ifdef TARGET_NR_fadvise64
10106 case TARGET_NR_fadvise64:
10107 #endif
10108 #ifdef TARGET_S390X
10109 switch (arg4) {
10110 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10111 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10112 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10113 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10114 default: break;
10116 #endif
10117 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10118 break;
10119 #endif
10120 #endif /* end of 64-bit ABI fadvise handling */
10122 #ifdef TARGET_NR_madvise
10123 case TARGET_NR_madvise:
10124 /* A straight passthrough may not be safe because qemu sometimes
10125 turns private file-backed mappings into anonymous mappings.
10126 This will break MADV_DONTNEED.
10127 This is a hint, so ignoring and returning success is ok. */
10128 ret = get_errno(0);
10129 break;
10130 #endif
10131 #if TARGET_ABI_BITS == 32
10132 case TARGET_NR_fcntl64:
10134 int cmd;
10135 struct flock64 fl;
10136 struct target_flock64 *target_fl;
10137 #ifdef TARGET_ARM
10138 struct target_eabi_flock64 *target_efl;
10139 #endif
10141 cmd = target_to_host_fcntl_cmd(arg2);
10142 if (cmd == -TARGET_EINVAL) {
10143 ret = cmd;
10144 break;
10147 switch(arg2) {
10148 case TARGET_F_GETLK64:
10149 #ifdef TARGET_ARM
10150 if (((CPUARMState *)cpu_env)->eabi) {
10151 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10152 goto efault;
10153 fl.l_type = tswap16(target_efl->l_type);
10154 fl.l_whence = tswap16(target_efl->l_whence);
10155 fl.l_start = tswap64(target_efl->l_start);
10156 fl.l_len = tswap64(target_efl->l_len);
10157 fl.l_pid = tswap32(target_efl->l_pid);
10158 unlock_user_struct(target_efl, arg3, 0);
10159 } else
10160 #endif
10162 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10163 goto efault;
10164 fl.l_type = tswap16(target_fl->l_type);
10165 fl.l_whence = tswap16(target_fl->l_whence);
10166 fl.l_start = tswap64(target_fl->l_start);
10167 fl.l_len = tswap64(target_fl->l_len);
10168 fl.l_pid = tswap32(target_fl->l_pid);
10169 unlock_user_struct(target_fl, arg3, 0);
10171 ret = get_errno(fcntl(arg1, cmd, &fl));
10172 if (ret == 0) {
10173 #ifdef TARGET_ARM
10174 if (((CPUARMState *)cpu_env)->eabi) {
10175 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
10176 goto efault;
10177 target_efl->l_type = tswap16(fl.l_type);
10178 target_efl->l_whence = tswap16(fl.l_whence);
10179 target_efl->l_start = tswap64(fl.l_start);
10180 target_efl->l_len = tswap64(fl.l_len);
10181 target_efl->l_pid = tswap32(fl.l_pid);
10182 unlock_user_struct(target_efl, arg3, 1);
10183 } else
10184 #endif
10186 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
10187 goto efault;
10188 target_fl->l_type = tswap16(fl.l_type);
10189 target_fl->l_whence = tswap16(fl.l_whence);
10190 target_fl->l_start = tswap64(fl.l_start);
10191 target_fl->l_len = tswap64(fl.l_len);
10192 target_fl->l_pid = tswap32(fl.l_pid);
10193 unlock_user_struct(target_fl, arg3, 1);
10196 break;
10198 case TARGET_F_SETLK64:
10199 case TARGET_F_SETLKW64:
10200 #ifdef TARGET_ARM
10201 if (((CPUARMState *)cpu_env)->eabi) {
10202 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
10203 goto efault;
10204 fl.l_type = tswap16(target_efl->l_type);
10205 fl.l_whence = tswap16(target_efl->l_whence);
10206 fl.l_start = tswap64(target_efl->l_start);
10207 fl.l_len = tswap64(target_efl->l_len);
10208 fl.l_pid = tswap32(target_efl->l_pid);
10209 unlock_user_struct(target_efl, arg3, 0);
10210 } else
10211 #endif
10213 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
10214 goto efault;
10215 fl.l_type = tswap16(target_fl->l_type);
10216 fl.l_whence = tswap16(target_fl->l_whence);
10217 fl.l_start = tswap64(target_fl->l_start);
10218 fl.l_len = tswap64(target_fl->l_len);
10219 fl.l_pid = tswap32(target_fl->l_pid);
10220 unlock_user_struct(target_fl, arg3, 0);
10222 ret = get_errno(fcntl(arg1, cmd, &fl));
10223 break;
10224 default:
10225 ret = do_fcntl(arg1, arg2, arg3);
10226 break;
10228 break;
10230 #endif
10231 #ifdef TARGET_NR_cacheflush
10232 case TARGET_NR_cacheflush:
10233 /* self-modifying code is handled automatically, so nothing needed */
10234 ret = 0;
10235 break;
10236 #endif
10237 #ifdef TARGET_NR_security
10238 case TARGET_NR_security:
10239 goto unimplemented;
10240 #endif
10241 #ifdef TARGET_NR_getpagesize
10242 case TARGET_NR_getpagesize:
10243 ret = TARGET_PAGE_SIZE;
10244 break;
10245 #endif
10246 case TARGET_NR_gettid:
10247 ret = get_errno(gettid());
10248 break;
10249 #ifdef TARGET_NR_readahead
10250 case TARGET_NR_readahead:
10251 #if TARGET_ABI_BITS == 32
10252 if (regpairs_aligned(cpu_env)) {
10253 arg2 = arg3;
10254 arg3 = arg4;
10255 arg4 = arg5;
10257 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10258 #else
10259 ret = get_errno(readahead(arg1, arg2, arg3));
10260 #endif
10261 break;
10262 #endif
10263 #ifdef CONFIG_ATTR
10264 #ifdef TARGET_NR_setxattr
10265 case TARGET_NR_listxattr:
10266 case TARGET_NR_llistxattr:
10268 void *p, *b = 0;
10269 if (arg2) {
10270 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10271 if (!b) {
10272 ret = -TARGET_EFAULT;
10273 break;
10276 p = lock_user_string(arg1);
10277 if (p) {
10278 if (num == TARGET_NR_listxattr) {
10279 ret = get_errno(listxattr(p, b, arg3));
10280 } else {
10281 ret = get_errno(llistxattr(p, b, arg3));
10283 } else {
10284 ret = -TARGET_EFAULT;
10286 unlock_user(p, arg1, 0);
10287 unlock_user(b, arg2, arg3);
10288 break;
10290 case TARGET_NR_flistxattr:
10292 void *b = 0;
10293 if (arg2) {
10294 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10295 if (!b) {
10296 ret = -TARGET_EFAULT;
10297 break;
10300 ret = get_errno(flistxattr(arg1, b, arg3));
10301 unlock_user(b, arg2, arg3);
10302 break;
10304 case TARGET_NR_setxattr:
10305 case TARGET_NR_lsetxattr:
10307 void *p, *n, *v = 0;
10308 if (arg3) {
10309 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10310 if (!v) {
10311 ret = -TARGET_EFAULT;
10312 break;
10315 p = lock_user_string(arg1);
10316 n = lock_user_string(arg2);
10317 if (p && n) {
10318 if (num == TARGET_NR_setxattr) {
10319 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10320 } else {
10321 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10323 } else {
10324 ret = -TARGET_EFAULT;
10326 unlock_user(p, arg1, 0);
10327 unlock_user(n, arg2, 0);
10328 unlock_user(v, arg3, 0);
10330 break;
10331 case TARGET_NR_fsetxattr:
10333 void *n, *v = 0;
10334 if (arg3) {
10335 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10336 if (!v) {
10337 ret = -TARGET_EFAULT;
10338 break;
10341 n = lock_user_string(arg2);
10342 if (n) {
10343 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10344 } else {
10345 ret = -TARGET_EFAULT;
10347 unlock_user(n, arg2, 0);
10348 unlock_user(v, arg3, 0);
10350 break;
10351 case TARGET_NR_getxattr:
10352 case TARGET_NR_lgetxattr:
10354 void *p, *n, *v = 0;
10355 if (arg3) {
10356 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10357 if (!v) {
10358 ret = -TARGET_EFAULT;
10359 break;
10362 p = lock_user_string(arg1);
10363 n = lock_user_string(arg2);
10364 if (p && n) {
10365 if (num == TARGET_NR_getxattr) {
10366 ret = get_errno(getxattr(p, n, v, arg4));
10367 } else {
10368 ret = get_errno(lgetxattr(p, n, v, arg4));
10370 } else {
10371 ret = -TARGET_EFAULT;
10373 unlock_user(p, arg1, 0);
10374 unlock_user(n, arg2, 0);
10375 unlock_user(v, arg3, arg4);
10377 break;
10378 case TARGET_NR_fgetxattr:
10380 void *n, *v = 0;
10381 if (arg3) {
10382 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10383 if (!v) {
10384 ret = -TARGET_EFAULT;
10385 break;
10388 n = lock_user_string(arg2);
10389 if (n) {
10390 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10391 } else {
10392 ret = -TARGET_EFAULT;
10394 unlock_user(n, arg2, 0);
10395 unlock_user(v, arg3, arg4);
10397 break;
10398 case TARGET_NR_removexattr:
10399 case TARGET_NR_lremovexattr:
10401 void *p, *n;
10402 p = lock_user_string(arg1);
10403 n = lock_user_string(arg2);
10404 if (p && n) {
10405 if (num == TARGET_NR_removexattr) {
10406 ret = get_errno(removexattr(p, n));
10407 } else {
10408 ret = get_errno(lremovexattr(p, n));
10410 } else {
10411 ret = -TARGET_EFAULT;
10413 unlock_user(p, arg1, 0);
10414 unlock_user(n, arg2, 0);
10416 break;
10417 case TARGET_NR_fremovexattr:
10419 void *n;
10420 n = lock_user_string(arg2);
10421 if (n) {
10422 ret = get_errno(fremovexattr(arg1, n));
10423 } else {
10424 ret = -TARGET_EFAULT;
10426 unlock_user(n, arg2, 0);
10428 break;
10429 #endif
10430 #endif /* CONFIG_ATTR */
10431 #ifdef TARGET_NR_set_thread_area
10432 case TARGET_NR_set_thread_area:
10433 #if defined(TARGET_MIPS)
10434 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10435 ret = 0;
10436 break;
10437 #elif defined(TARGET_CRIS)
10438 if (arg1 & 0xff)
10439 ret = -TARGET_EINVAL;
10440 else {
10441 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10442 ret = 0;
10444 break;
10445 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10446 ret = do_set_thread_area(cpu_env, arg1);
10447 break;
10448 #elif defined(TARGET_M68K)
10450 TaskState *ts = cpu->opaque;
10451 ts->tp_value = arg1;
10452 ret = 0;
10453 break;
10455 #else
10456 goto unimplemented_nowarn;
10457 #endif
10458 #endif
10459 #ifdef TARGET_NR_get_thread_area
10460 case TARGET_NR_get_thread_area:
10461 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10462 ret = do_get_thread_area(cpu_env, arg1);
10463 break;
10464 #elif defined(TARGET_M68K)
10466 TaskState *ts = cpu->opaque;
10467 ret = ts->tp_value;
10468 break;
10470 #else
10471 goto unimplemented_nowarn;
10472 #endif
10473 #endif
10474 #ifdef TARGET_NR_getdomainname
10475 case TARGET_NR_getdomainname:
10476 goto unimplemented_nowarn;
10477 #endif
10479 #ifdef TARGET_NR_clock_gettime
10480 case TARGET_NR_clock_gettime:
10482 struct timespec ts;
10483 ret = get_errno(clock_gettime(arg1, &ts));
10484 if (!is_error(ret)) {
10485 host_to_target_timespec(arg2, &ts);
10487 break;
10489 #endif
10490 #ifdef TARGET_NR_clock_getres
10491 case TARGET_NR_clock_getres:
10493 struct timespec ts;
10494 ret = get_errno(clock_getres(arg1, &ts));
10495 if (!is_error(ret)) {
10496 host_to_target_timespec(arg2, &ts);
10498 break;
10500 #endif
10501 #ifdef TARGET_NR_clock_nanosleep
10502 case TARGET_NR_clock_nanosleep:
10504 struct timespec ts;
10505 target_to_host_timespec(&ts, arg3);
10506 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10507 &ts, arg4 ? &ts : NULL));
10508 if (arg4)
10509 host_to_target_timespec(arg4, &ts);
10511 #if defined(TARGET_PPC)
10512 /* clock_nanosleep is odd in that it returns positive errno values.
10513 * On PPC, CR0 bit 3 should be set in such a situation. */
10514 if (ret && ret != -TARGET_ERESTARTSYS) {
10515 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10517 #endif
10518 break;
10520 #endif
10522 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10523 case TARGET_NR_set_tid_address:
10524 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10525 break;
10526 #endif
10528 case TARGET_NR_tkill:
10529 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10530 break;
10532 case TARGET_NR_tgkill:
10533 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
10534 target_to_host_signal(arg3)));
10535 break;
10537 #ifdef TARGET_NR_set_robust_list
10538 case TARGET_NR_set_robust_list:
10539 case TARGET_NR_get_robust_list:
10540 /* The ABI for supporting robust futexes has userspace pass
10541 * the kernel a pointer to a linked list which is updated by
10542 * userspace after the syscall; the list is walked by the kernel
10543 * when the thread exits. Since the linked list in QEMU guest
10544 * memory isn't a valid linked list for the host and we have
10545 * no way to reliably intercept the thread-death event, we can't
10546 * support these. Silently return ENOSYS so that guest userspace
10547 * falls back to a non-robust futex implementation (which should
10548 * be OK except in the corner case of the guest crashing while
10549 * holding a mutex that is shared with another process via
10550 * shared memory).
10552 goto unimplemented_nowarn;
10553 #endif
10555 #if defined(TARGET_NR_utimensat)
10556 case TARGET_NR_utimensat:
10558 struct timespec *tsp, ts[2];
10559 if (!arg3) {
10560 tsp = NULL;
10561 } else {
10562 target_to_host_timespec(ts, arg3);
10563 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10564 tsp = ts;
10566 if (!arg2)
10567 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10568 else {
10569 if (!(p = lock_user_string(arg2))) {
10570 ret = -TARGET_EFAULT;
10571 goto fail;
10573 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10574 unlock_user(p, arg2, 0);
10577 break;
10578 #endif
10579 case TARGET_NR_futex:
10580 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10581 break;
10582 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10583 case TARGET_NR_inotify_init:
10584 ret = get_errno(sys_inotify_init());
10585 break;
10586 #endif
10587 #ifdef CONFIG_INOTIFY1
10588 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10589 case TARGET_NR_inotify_init1:
10590 ret = get_errno(sys_inotify_init1(arg1));
10591 break;
10592 #endif
10593 #endif
10594 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10595 case TARGET_NR_inotify_add_watch:
10596 p = lock_user_string(arg2);
10597 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10598 unlock_user(p, arg2, 0);
10599 break;
10600 #endif
10601 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10602 case TARGET_NR_inotify_rm_watch:
10603 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10604 break;
10605 #endif
10607 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10608 case TARGET_NR_mq_open:
10610 struct mq_attr posix_mq_attr, *attrp;
10612 p = lock_user_string(arg1 - 1);
10613 if (arg4 != 0) {
10614 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10615 attrp = &posix_mq_attr;
10616 } else {
10617 attrp = 0;
10619 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10620 unlock_user (p, arg1, 0);
10622 break;
10624 case TARGET_NR_mq_unlink:
10625 p = lock_user_string(arg1 - 1);
10626 ret = get_errno(mq_unlink(p));
10627 unlock_user (p, arg1, 0);
10628 break;
10630 case TARGET_NR_mq_timedsend:
10632 struct timespec ts;
10634 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10635 if (arg5 != 0) {
10636 target_to_host_timespec(&ts, arg5);
10637 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10638 host_to_target_timespec(arg5, &ts);
10639 } else {
10640 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10642 unlock_user (p, arg2, arg3);
10644 break;
10646 case TARGET_NR_mq_timedreceive:
10648 struct timespec ts;
10649 unsigned int prio;
10651 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10652 if (arg5 != 0) {
10653 target_to_host_timespec(&ts, arg5);
10654 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10655 &prio, &ts));
10656 host_to_target_timespec(arg5, &ts);
10657 } else {
10658 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10659 &prio, NULL));
10661 unlock_user (p, arg2, arg3);
10662 if (arg4 != 0)
10663 put_user_u32(prio, arg4);
10665 break;
10667 /* Not implemented for now... */
10668 /* case TARGET_NR_mq_notify: */
10669 /* break; */
10671 case TARGET_NR_mq_getsetattr:
10673 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10674 ret = 0;
10675 if (arg3 != 0) {
10676 ret = mq_getattr(arg1, &posix_mq_attr_out);
10677 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10679 if (arg2 != 0) {
10680 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10681 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10685 break;
10686 #endif
10688 #ifdef CONFIG_SPLICE
10689 #ifdef TARGET_NR_tee
10690 case TARGET_NR_tee:
10692 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10694 break;
10695 #endif
10696 #ifdef TARGET_NR_splice
10697 case TARGET_NR_splice:
10699 loff_t loff_in, loff_out;
10700 loff_t *ploff_in = NULL, *ploff_out = NULL;
10701 if (arg2) {
10702 if (get_user_u64(loff_in, arg2)) {
10703 goto efault;
10705 ploff_in = &loff_in;
10707 if (arg4) {
10708 if (get_user_u64(loff_out, arg4)) {
10709 goto efault;
10711 ploff_out = &loff_out;
10713 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10714 if (arg2) {
10715 if (put_user_u64(loff_in, arg2)) {
10716 goto efault;
10719 if (arg4) {
10720 if (put_user_u64(loff_out, arg4)) {
10721 goto efault;
10725 break;
10726 #endif
10727 #ifdef TARGET_NR_vmsplice
10728 case TARGET_NR_vmsplice:
10730 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10731 if (vec != NULL) {
10732 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10733 unlock_iovec(vec, arg2, arg3, 0);
10734 } else {
10735 ret = -host_to_target_errno(errno);
10738 break;
10739 #endif
10740 #endif /* CONFIG_SPLICE */
10741 #ifdef CONFIG_EVENTFD
10742 #if defined(TARGET_NR_eventfd)
10743 case TARGET_NR_eventfd:
10744 ret = get_errno(eventfd(arg1, 0));
10745 fd_trans_unregister(ret);
10746 break;
10747 #endif
10748 #if defined(TARGET_NR_eventfd2)
10749 case TARGET_NR_eventfd2:
10751 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10752 if (arg2 & TARGET_O_NONBLOCK) {
10753 host_flags |= O_NONBLOCK;
10755 if (arg2 & TARGET_O_CLOEXEC) {
10756 host_flags |= O_CLOEXEC;
10758 ret = get_errno(eventfd(arg1, host_flags));
10759 fd_trans_unregister(ret);
10760 break;
10762 #endif
10763 #endif /* CONFIG_EVENTFD */
10764 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10765 case TARGET_NR_fallocate:
10766 #if TARGET_ABI_BITS == 32
10767 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10768 target_offset64(arg5, arg6)));
10769 #else
10770 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10771 #endif
10772 break;
10773 #endif
10774 #if defined(CONFIG_SYNC_FILE_RANGE)
10775 #if defined(TARGET_NR_sync_file_range)
10776 case TARGET_NR_sync_file_range:
10777 #if TARGET_ABI_BITS == 32
10778 #if defined(TARGET_MIPS)
10779 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10780 target_offset64(arg5, arg6), arg7));
10781 #else
10782 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10783 target_offset64(arg4, arg5), arg6));
10784 #endif /* !TARGET_MIPS */
10785 #else
10786 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10787 #endif
10788 break;
10789 #endif
10790 #if defined(TARGET_NR_sync_file_range2)
10791 case TARGET_NR_sync_file_range2:
10792 /* This is like sync_file_range but the arguments are reordered */
10793 #if TARGET_ABI_BITS == 32
10794 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10795 target_offset64(arg5, arg6), arg2));
10796 #else
10797 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10798 #endif
10799 break;
10800 #endif
10801 #endif
10802 #if defined(TARGET_NR_signalfd4)
10803 case TARGET_NR_signalfd4:
10804 ret = do_signalfd4(arg1, arg2, arg4);
10805 break;
10806 #endif
10807 #if defined(TARGET_NR_signalfd)
10808 case TARGET_NR_signalfd:
10809 ret = do_signalfd4(arg1, arg2, 0);
10810 break;
10811 #endif
10812 #if defined(CONFIG_EPOLL)
10813 #if defined(TARGET_NR_epoll_create)
10814 case TARGET_NR_epoll_create:
10815 ret = get_errno(epoll_create(arg1));
10816 break;
10817 #endif
10818 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10819 case TARGET_NR_epoll_create1:
10820 ret = get_errno(epoll_create1(arg1));
10821 break;
10822 #endif
10823 #if defined(TARGET_NR_epoll_ctl)
10824 case TARGET_NR_epoll_ctl:
10826 struct epoll_event ep;
10827 struct epoll_event *epp = 0;
10828 if (arg4) {
10829 struct target_epoll_event *target_ep;
10830 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10831 goto efault;
10833 ep.events = tswap32(target_ep->events);
10834 /* The epoll_data_t union is just opaque data to the kernel,
10835 * so we transfer all 64 bits across and need not worry what
10836 * actual data type it is.
10838 ep.data.u64 = tswap64(target_ep->data.u64);
10839 unlock_user_struct(target_ep, arg4, 0);
10840 epp = &ep;
10842 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10843 break;
10845 #endif
10847 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10848 #if defined(TARGET_NR_epoll_wait)
10849 case TARGET_NR_epoll_wait:
10850 #endif
10851 #if defined(TARGET_NR_epoll_pwait)
10852 case TARGET_NR_epoll_pwait:
10853 #endif
10855 struct target_epoll_event *target_ep;
10856 struct epoll_event *ep;
10857 int epfd = arg1;
10858 int maxevents = arg3;
10859 int timeout = arg4;
10861 target_ep = lock_user(VERIFY_WRITE, arg2,
10862 maxevents * sizeof(struct target_epoll_event), 1);
10863 if (!target_ep) {
10864 goto efault;
10867 ep = alloca(maxevents * sizeof(struct epoll_event));
10869 switch (num) {
10870 #if defined(TARGET_NR_epoll_pwait)
10871 case TARGET_NR_epoll_pwait:
10873 target_sigset_t *target_set;
10874 sigset_t _set, *set = &_set;
10876 if (arg5) {
10877 target_set = lock_user(VERIFY_READ, arg5,
10878 sizeof(target_sigset_t), 1);
10879 if (!target_set) {
10880 unlock_user(target_ep, arg2, 0);
10881 goto efault;
10883 target_to_host_sigset(set, target_set);
10884 unlock_user(target_set, arg5, 0);
10885 } else {
10886 set = NULL;
10889 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10890 set, SIGSET_T_SIZE));
10891 break;
10893 #endif
10894 #if defined(TARGET_NR_epoll_wait)
10895 case TARGET_NR_epoll_wait:
10896 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
10897 NULL, 0));
10898 break;
10899 #endif
10900 default:
10901 ret = -TARGET_ENOSYS;
10903 if (!is_error(ret)) {
10904 int i;
10905 for (i = 0; i < ret; i++) {
10906 target_ep[i].events = tswap32(ep[i].events);
10907 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10910 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10911 break;
10913 #endif
10914 #endif
10915 #ifdef TARGET_NR_prlimit64
10916 case TARGET_NR_prlimit64:
10918 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10919 struct target_rlimit64 *target_rnew, *target_rold;
10920 struct host_rlimit64 rnew, rold, *rnewp = 0;
10921 int resource = target_to_host_resource(arg2);
10922 if (arg3) {
10923 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10924 goto efault;
10926 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10927 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10928 unlock_user_struct(target_rnew, arg3, 0);
10929 rnewp = &rnew;
10932 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10933 if (!is_error(ret) && arg4) {
10934 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10935 goto efault;
10937 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10938 target_rold->rlim_max = tswap64(rold.rlim_max);
10939 unlock_user_struct(target_rold, arg4, 1);
10941 break;
10943 #endif
10944 #ifdef TARGET_NR_gethostname
10945 case TARGET_NR_gethostname:
10947 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10948 if (name) {
10949 ret = get_errno(gethostname(name, arg2));
10950 unlock_user(name, arg1, arg2);
10951 } else {
10952 ret = -TARGET_EFAULT;
10954 break;
10956 #endif
10957 #ifdef TARGET_NR_atomic_cmpxchg_32
10958 case TARGET_NR_atomic_cmpxchg_32:
10960 /* should use start_exclusive from main.c */
10961 abi_ulong mem_value;
10962 if (get_user_u32(mem_value, arg6)) {
10963 target_siginfo_t info;
10964 info.si_signo = SIGSEGV;
10965 info.si_errno = 0;
10966 info.si_code = TARGET_SEGV_MAPERR;
10967 info._sifields._sigfault._addr = arg6;
10968 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10969 ret = 0xdeadbeef;
10972 if (mem_value == arg2)
10973 put_user_u32(arg1, arg6);
10974 ret = mem_value;
10975 break;
10977 #endif
10978 #ifdef TARGET_NR_atomic_barrier
10979 case TARGET_NR_atomic_barrier:
10981 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10982 ret = 0;
10983 break;
10985 #endif
10987 #ifdef TARGET_NR_timer_create
10988 case TARGET_NR_timer_create:
10990 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10992 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10994 int clkid = arg1;
10995 int timer_index = next_free_host_timer();
10997 if (timer_index < 0) {
10998 ret = -TARGET_EAGAIN;
10999 } else {
11000 timer_t *phtimer = g_posix_timers + timer_index;
11002 if (arg2) {
11003 phost_sevp = &host_sevp;
11004 ret = target_to_host_sigevent(phost_sevp, arg2);
11005 if (ret != 0) {
11006 break;
11010 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11011 if (ret) {
11012 phtimer = NULL;
11013 } else {
11014 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11015 goto efault;
11019 break;
11021 #endif
11023 #ifdef TARGET_NR_timer_settime
11024 case TARGET_NR_timer_settime:
11026 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11027 * struct itimerspec * old_value */
11028 target_timer_t timerid = get_timer_id(arg1);
11030 if (timerid < 0) {
11031 ret = timerid;
11032 } else if (arg3 == 0) {
11033 ret = -TARGET_EINVAL;
11034 } else {
11035 timer_t htimer = g_posix_timers[timerid];
11036 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11038 target_to_host_itimerspec(&hspec_new, arg3);
11039 ret = get_errno(
11040 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11041 host_to_target_itimerspec(arg2, &hspec_old);
11043 break;
11045 #endif
11047 #ifdef TARGET_NR_timer_gettime
11048 case TARGET_NR_timer_gettime:
11050 /* args: timer_t timerid, struct itimerspec *curr_value */
11051 target_timer_t timerid = get_timer_id(arg1);
11053 if (timerid < 0) {
11054 ret = timerid;
11055 } else if (!arg2) {
11056 ret = -TARGET_EFAULT;
11057 } else {
11058 timer_t htimer = g_posix_timers[timerid];
11059 struct itimerspec hspec;
11060 ret = get_errno(timer_gettime(htimer, &hspec));
11062 if (host_to_target_itimerspec(arg2, &hspec)) {
11063 ret = -TARGET_EFAULT;
11066 break;
11068 #endif
11070 #ifdef TARGET_NR_timer_getoverrun
11071 case TARGET_NR_timer_getoverrun:
11073 /* args: timer_t timerid */
11074 target_timer_t timerid = get_timer_id(arg1);
11076 if (timerid < 0) {
11077 ret = timerid;
11078 } else {
11079 timer_t htimer = g_posix_timers[timerid];
11080 ret = get_errno(timer_getoverrun(htimer));
11082 fd_trans_unregister(ret);
11083 break;
11085 #endif
11087 #ifdef TARGET_NR_timer_delete
11088 case TARGET_NR_timer_delete:
11090 /* args: timer_t timerid */
11091 target_timer_t timerid = get_timer_id(arg1);
11093 if (timerid < 0) {
11094 ret = timerid;
11095 } else {
11096 timer_t htimer = g_posix_timers[timerid];
11097 ret = get_errno(timer_delete(htimer));
11098 g_posix_timers[timerid] = 0;
11100 break;
11102 #endif
11104 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11105 case TARGET_NR_timerfd_create:
11106 ret = get_errno(timerfd_create(arg1,
11107 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11108 break;
11109 #endif
11111 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11112 case TARGET_NR_timerfd_gettime:
11114 struct itimerspec its_curr;
11116 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11118 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11119 goto efault;
11122 break;
11123 #endif
11125 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11126 case TARGET_NR_timerfd_settime:
11128 struct itimerspec its_new, its_old, *p_new;
11130 if (arg3) {
11131 if (target_to_host_itimerspec(&its_new, arg3)) {
11132 goto efault;
11134 p_new = &its_new;
11135 } else {
11136 p_new = NULL;
11139 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11141 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11142 goto efault;
11145 break;
11146 #endif
11148 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11149 case TARGET_NR_ioprio_get:
11150 ret = get_errno(ioprio_get(arg1, arg2));
11151 break;
11152 #endif
11154 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11155 case TARGET_NR_ioprio_set:
11156 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11157 break;
11158 #endif
11160 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11161 case TARGET_NR_setns:
11162 ret = get_errno(setns(arg1, arg2));
11163 break;
11164 #endif
11165 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11166 case TARGET_NR_unshare:
11167 ret = get_errno(unshare(arg1));
11168 break;
11169 #endif
11171 default:
11172 unimplemented:
11173 gemu_log("qemu: Unsupported syscall: %d\n", num);
11174 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11175 unimplemented_nowarn:
11176 #endif
11177 ret = -TARGET_ENOSYS;
11178 break;
11180 fail:
11181 #ifdef DEBUG
11182 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11183 #endif
11184 if(do_strace)
11185 print_syscall_ret(num, ret);
11186 trace_guest_user_syscall_ret(cpu, num, ret);
11187 return ret;
11188 efault:
11189 ret = -TARGET_EFAULT;
11190 goto fail;