linux-user: Check sigsetsize argument to syscalls
[qemu/ar7.git] / linux-user / syscall.c
blob0a99af89128cf4d766bafddb2abb482e35aa2884
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 //#define DEBUG
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 defined(__s390x__)
197 #define __NR__llseek __NR_lseek
198 #endif
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
203 #endif
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
213 #endif
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
216 #endif
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group,int,error_code)
229 #endif
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address,int *,tidptr)
232 #endif
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
235 const struct timespec *,timeout,int *,uaddr2,int,val3)
236 #endif
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
239 unsigned long *, user_mask_ptr);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
244 void *, arg);
245 _syscall2(int, capget, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 _syscall2(int, capset, struct __user_cap_header_struct *, header,
248 struct __user_cap_data_struct *, data);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get, int, which, int, who)
251 #endif
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
254 #endif
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
257 #endif
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
275 #endif
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
278 #endif
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
281 #endif
282 #if defined(O_PATH)
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
284 #endif
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
288 #endif
289 { 0, 0, 0, 0 }
292 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
293 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
294 typedef struct TargetFdTrans {
295 TargetFdDataFunc host_to_target_data;
296 TargetFdDataFunc target_to_host_data;
297 TargetFdAddrFunc target_to_host_addr;
298 } TargetFdTrans;
300 static TargetFdTrans **target_fd_trans;
302 static unsigned int target_fd_max;
304 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
306 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
307 return target_fd_trans[fd]->target_to_host_data;
309 return NULL;
312 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
314 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
315 return target_fd_trans[fd]->host_to_target_data;
317 return NULL;
320 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
322 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
323 return target_fd_trans[fd]->target_to_host_addr;
325 return NULL;
328 static void fd_trans_register(int fd, TargetFdTrans *trans)
330 unsigned int oldmax;
332 if (fd >= target_fd_max) {
333 oldmax = target_fd_max;
334 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
335 target_fd_trans = g_renew(TargetFdTrans *,
336 target_fd_trans, target_fd_max);
337 memset((void *)(target_fd_trans + oldmax), 0,
338 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
340 target_fd_trans[fd] = trans;
343 static void fd_trans_unregister(int fd)
345 if (fd >= 0 && fd < target_fd_max) {
346 target_fd_trans[fd] = NULL;
350 static void fd_trans_dup(int oldfd, int newfd)
352 fd_trans_unregister(newfd);
353 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
354 fd_trans_register(newfd, target_fd_trans[oldfd]);
358 static int sys_getcwd1(char *buf, size_t size)
360 if (getcwd(buf, size) == NULL) {
361 /* getcwd() sets errno */
362 return (-1);
364 return strlen(buf)+1;
367 #ifdef TARGET_NR_utimensat
368 #ifdef CONFIG_UTIMENSAT
369 static int sys_utimensat(int dirfd, const char *pathname,
370 const struct timespec times[2], int flags)
372 if (pathname == NULL)
373 return futimens(dirfd, times);
374 else
375 return utimensat(dirfd, pathname, times, flags);
377 #elif defined(__NR_utimensat)
378 #define __NR_sys_utimensat __NR_utimensat
379 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
380 const struct timespec *,tsp,int,flags)
381 #else
382 static int sys_utimensat(int dirfd, const char *pathname,
383 const struct timespec times[2], int flags)
385 errno = ENOSYS;
386 return -1;
388 #endif
389 #endif /* TARGET_NR_utimensat */
391 #ifdef CONFIG_INOTIFY
392 #include <sys/inotify.h>
394 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
395 static int sys_inotify_init(void)
397 return (inotify_init());
399 #endif
400 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
401 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
403 return (inotify_add_watch(fd, pathname, mask));
405 #endif
406 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
407 static int sys_inotify_rm_watch(int fd, int32_t wd)
409 return (inotify_rm_watch(fd, wd));
411 #endif
412 #ifdef CONFIG_INOTIFY1
413 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
414 static int sys_inotify_init1(int flags)
416 return (inotify_init1(flags));
418 #endif
419 #endif
420 #else
421 /* Userspace can usually survive runtime without inotify */
422 #undef TARGET_NR_inotify_init
423 #undef TARGET_NR_inotify_init1
424 #undef TARGET_NR_inotify_add_watch
425 #undef TARGET_NR_inotify_rm_watch
426 #endif /* CONFIG_INOTIFY */
428 #if defined(TARGET_NR_prlimit64)
429 #ifndef __NR_prlimit64
430 # define __NR_prlimit64 -1
431 #endif
432 #define __NR_sys_prlimit64 __NR_prlimit64
433 /* The glibc rlimit structure may not be that used by the underlying syscall */
434 struct host_rlimit64 {
435 uint64_t rlim_cur;
436 uint64_t rlim_max;
438 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
439 const struct host_rlimit64 *, new_limit,
440 struct host_rlimit64 *, old_limit)
441 #endif
444 #if defined(TARGET_NR_timer_create)
445 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
446 static timer_t g_posix_timers[32] = { 0, } ;
448 static inline int next_free_host_timer(void)
450 int k ;
451 /* FIXME: Does finding the next free slot require a lock? */
452 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
453 if (g_posix_timers[k] == 0) {
454 g_posix_timers[k] = (timer_t) 1;
455 return k;
458 return -1;
460 #endif
462 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
463 #ifdef TARGET_ARM
464 static inline int regpairs_aligned(void *cpu_env) {
465 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
467 #elif defined(TARGET_MIPS)
468 static inline int regpairs_aligned(void *cpu_env) { return 1; }
469 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
470 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
471 * of registers which translates to the same as ARM/MIPS, because we start with
472 * r3 as arg1 */
473 static inline int regpairs_aligned(void *cpu_env) { return 1; }
474 #else
475 static inline int regpairs_aligned(void *cpu_env) { return 0; }
476 #endif
478 #define ERRNO_TABLE_SIZE 1200
480 /* target_to_host_errno_table[] is initialized from
481 * host_to_target_errno_table[] in syscall_init(). */
482 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
486 * This list is the union of errno values overridden in asm-<arch>/errno.h
487 * minus the errnos that are not actually generic to all archs.
489 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
490 [EAGAIN] = TARGET_EAGAIN,
491 [EIDRM] = TARGET_EIDRM,
492 [ECHRNG] = TARGET_ECHRNG,
493 [EL2NSYNC] = TARGET_EL2NSYNC,
494 [EL3HLT] = TARGET_EL3HLT,
495 [EL3RST] = TARGET_EL3RST,
496 [ELNRNG] = TARGET_ELNRNG,
497 [EUNATCH] = TARGET_EUNATCH,
498 [ENOCSI] = TARGET_ENOCSI,
499 [EL2HLT] = TARGET_EL2HLT,
500 [EDEADLK] = TARGET_EDEADLK,
501 [ENOLCK] = TARGET_ENOLCK,
502 [EBADE] = TARGET_EBADE,
503 [EBADR] = TARGET_EBADR,
504 [EXFULL] = TARGET_EXFULL,
505 [ENOANO] = TARGET_ENOANO,
506 [EBADRQC] = TARGET_EBADRQC,
507 [EBADSLT] = TARGET_EBADSLT,
508 [EBFONT] = TARGET_EBFONT,
509 [ENOSTR] = TARGET_ENOSTR,
510 [ENODATA] = TARGET_ENODATA,
511 [ETIME] = TARGET_ETIME,
512 [ENOSR] = TARGET_ENOSR,
513 [ENONET] = TARGET_ENONET,
514 [ENOPKG] = TARGET_ENOPKG,
515 [EREMOTE] = TARGET_EREMOTE,
516 [ENOLINK] = TARGET_ENOLINK,
517 [EADV] = TARGET_EADV,
518 [ESRMNT] = TARGET_ESRMNT,
519 [ECOMM] = TARGET_ECOMM,
520 [EPROTO] = TARGET_EPROTO,
521 [EDOTDOT] = TARGET_EDOTDOT,
522 [EMULTIHOP] = TARGET_EMULTIHOP,
523 [EBADMSG] = TARGET_EBADMSG,
524 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
525 [EOVERFLOW] = TARGET_EOVERFLOW,
526 [ENOTUNIQ] = TARGET_ENOTUNIQ,
527 [EBADFD] = TARGET_EBADFD,
528 [EREMCHG] = TARGET_EREMCHG,
529 [ELIBACC] = TARGET_ELIBACC,
530 [ELIBBAD] = TARGET_ELIBBAD,
531 [ELIBSCN] = TARGET_ELIBSCN,
532 [ELIBMAX] = TARGET_ELIBMAX,
533 [ELIBEXEC] = TARGET_ELIBEXEC,
534 [EILSEQ] = TARGET_EILSEQ,
535 [ENOSYS] = TARGET_ENOSYS,
536 [ELOOP] = TARGET_ELOOP,
537 [ERESTART] = TARGET_ERESTART,
538 [ESTRPIPE] = TARGET_ESTRPIPE,
539 [ENOTEMPTY] = TARGET_ENOTEMPTY,
540 [EUSERS] = TARGET_EUSERS,
541 [ENOTSOCK] = TARGET_ENOTSOCK,
542 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
543 [EMSGSIZE] = TARGET_EMSGSIZE,
544 [EPROTOTYPE] = TARGET_EPROTOTYPE,
545 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
546 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
547 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
548 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
549 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
550 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
551 [EADDRINUSE] = TARGET_EADDRINUSE,
552 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
553 [ENETDOWN] = TARGET_ENETDOWN,
554 [ENETUNREACH] = TARGET_ENETUNREACH,
555 [ENETRESET] = TARGET_ENETRESET,
556 [ECONNABORTED] = TARGET_ECONNABORTED,
557 [ECONNRESET] = TARGET_ECONNRESET,
558 [ENOBUFS] = TARGET_ENOBUFS,
559 [EISCONN] = TARGET_EISCONN,
560 [ENOTCONN] = TARGET_ENOTCONN,
561 [EUCLEAN] = TARGET_EUCLEAN,
562 [ENOTNAM] = TARGET_ENOTNAM,
563 [ENAVAIL] = TARGET_ENAVAIL,
564 [EISNAM] = TARGET_EISNAM,
565 [EREMOTEIO] = TARGET_EREMOTEIO,
566 [ESHUTDOWN] = TARGET_ESHUTDOWN,
567 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
568 [ETIMEDOUT] = TARGET_ETIMEDOUT,
569 [ECONNREFUSED] = TARGET_ECONNREFUSED,
570 [EHOSTDOWN] = TARGET_EHOSTDOWN,
571 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
572 [EALREADY] = TARGET_EALREADY,
573 [EINPROGRESS] = TARGET_EINPROGRESS,
574 [ESTALE] = TARGET_ESTALE,
575 [ECANCELED] = TARGET_ECANCELED,
576 [ENOMEDIUM] = TARGET_ENOMEDIUM,
577 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
578 #ifdef ENOKEY
579 [ENOKEY] = TARGET_ENOKEY,
580 #endif
581 #ifdef EKEYEXPIRED
582 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
583 #endif
584 #ifdef EKEYREVOKED
585 [EKEYREVOKED] = TARGET_EKEYREVOKED,
586 #endif
587 #ifdef EKEYREJECTED
588 [EKEYREJECTED] = TARGET_EKEYREJECTED,
589 #endif
590 #ifdef EOWNERDEAD
591 [EOWNERDEAD] = TARGET_EOWNERDEAD,
592 #endif
593 #ifdef ENOTRECOVERABLE
594 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
595 #endif
598 static inline int host_to_target_errno(int err)
600 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
601 host_to_target_errno_table[err]) {
602 return host_to_target_errno_table[err];
604 return err;
607 static inline int target_to_host_errno(int err)
609 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
610 target_to_host_errno_table[err]) {
611 return target_to_host_errno_table[err];
613 return err;
616 static inline abi_long get_errno(abi_long ret)
618 if (ret == -1)
619 return -host_to_target_errno(errno);
620 else
621 return ret;
624 static inline int is_error(abi_long ret)
626 return (abi_ulong)ret >= (abi_ulong)(-4096);
629 const char *target_strerror(int err)
631 if (err == TARGET_ERESTARTSYS) {
632 return "To be restarted";
634 if (err == TARGET_QEMU_ESIGRETURN) {
635 return "Successful exit from sigreturn";
638 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
639 return NULL;
641 return strerror(target_to_host_errno(err));
644 #define safe_syscall0(type, name) \
645 static type safe_##name(void) \
647 return safe_syscall(__NR_##name); \
650 #define safe_syscall1(type, name, type1, arg1) \
651 static type safe_##name(type1 arg1) \
653 return safe_syscall(__NR_##name, arg1); \
656 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
657 static type safe_##name(type1 arg1, type2 arg2) \
659 return safe_syscall(__NR_##name, arg1, arg2); \
662 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
663 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
665 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
668 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
669 type4, arg4) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
675 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4, type5, arg5) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678 type5 arg5) \
680 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
683 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
684 type4, arg4, type5, arg5, type6, arg6) \
685 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
686 type5 arg5, type6 arg6) \
688 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
691 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
692 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
693 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
694 int, flags, mode_t, mode)
695 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
696 struct rusage *, rusage)
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698 int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
701 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
702 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
703 struct timespec *, tsp, const sigset_t *, sigmask,
704 size_t, sigsetsize)
705 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
706 int, maxevents, int, timeout, const sigset_t *, sigmask,
707 size_t, sigsetsize)
708 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
709 const struct timespec *,timeout,int *,uaddr2,int,val3)
710 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
711 safe_syscall2(int, kill, pid_t, pid, int, sig)
712 safe_syscall2(int, tkill, int, tid, int, sig)
713 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
714 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
715 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
716 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
717 socklen_t, addrlen)
718 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
719 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
720 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
721 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
722 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
723 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
724 safe_syscall2(int, flock, int, fd, int, operation)
725 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
726 const struct timespec *, uts, size_t, sigsetsize)
727 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
728 int, flags)
729 safe_syscall2(int, nanosleep, const struct timespec *, req,
730 struct timespec *, rem)
731 #ifdef TARGET_NR_clock_nanosleep
732 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
733 const struct timespec *, req, struct timespec *, rem)
734 #endif
735 #ifdef __NR_msgsnd
736 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
737 int, flags)
738 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
739 long, msgtype, int, flags)
740 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
741 unsigned, nsops, const struct timespec *, timeout)
742 #else
743 /* This host kernel architecture uses a single ipc syscall; fake up
744 * wrappers for the sub-operations to hide this implementation detail.
745 * Annoyingly we can't include linux/ipc.h to get the constant definitions
746 * for the call parameter because some structs in there conflict with the
747 * sys/ipc.h ones. So we just define them here, and rely on them being
748 * the same for all host architectures.
750 #define Q_SEMTIMEDOP 4
751 #define Q_MSGSND 11
752 #define Q_MSGRCV 12
753 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
755 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
756 void *, ptr, long, fifth)
757 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
759 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
761 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
763 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
765 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
766 const struct timespec *timeout)
768 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
769 (long)timeout);
771 #endif
772 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
773 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
774 size_t, len, unsigned, prio, const struct timespec *, timeout)
775 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
776 size_t, len, unsigned *, prio, const struct timespec *, timeout)
777 #endif
778 /* We do ioctl like this rather than via safe_syscall3 to preserve the
779 * "third argument might be integer or pointer or not present" behaviour of
780 * the libc function.
782 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
783 /* Similarly for fcntl. Note that callers must always:
784 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
785 * use the flock64 struct rather than unsuffixed flock
786 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
788 #ifdef __NR_fcntl64
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
790 #else
791 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
792 #endif
794 static inline int host_to_target_sock_type(int host_type)
796 int target_type;
798 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
799 case SOCK_DGRAM:
800 target_type = TARGET_SOCK_DGRAM;
801 break;
802 case SOCK_STREAM:
803 target_type = TARGET_SOCK_STREAM;
804 break;
805 default:
806 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
807 break;
810 #if defined(SOCK_CLOEXEC)
811 if (host_type & SOCK_CLOEXEC) {
812 target_type |= TARGET_SOCK_CLOEXEC;
814 #endif
816 #if defined(SOCK_NONBLOCK)
817 if (host_type & SOCK_NONBLOCK) {
818 target_type |= TARGET_SOCK_NONBLOCK;
820 #endif
822 return target_type;
825 static abi_ulong target_brk;
826 static abi_ulong target_original_brk;
827 static abi_ulong brk_page;
829 void target_set_brk(abi_ulong new_brk)
831 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
832 brk_page = HOST_PAGE_ALIGN(target_brk);
835 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
836 #define DEBUGF_BRK(message, args...)
838 /* do_brk() must return target values and target errnos. */
839 abi_long do_brk(abi_ulong new_brk)
841 abi_long mapped_addr;
842 int new_alloc_size;
844 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
846 if (!new_brk) {
847 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
848 return target_brk;
850 if (new_brk < target_original_brk) {
851 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
852 target_brk);
853 return target_brk;
856 /* If the new brk is less than the highest page reserved to the
857 * target heap allocation, set it and we're almost done... */
858 if (new_brk <= brk_page) {
859 /* Heap contents are initialized to zero, as for anonymous
860 * mapped pages. */
861 if (new_brk > target_brk) {
862 memset(g2h(target_brk), 0, new_brk - target_brk);
864 target_brk = new_brk;
865 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
866 return target_brk;
869 /* We need to allocate more memory after the brk... Note that
870 * we don't use MAP_FIXED because that will map over the top of
871 * any existing mapping (like the one with the host libc or qemu
872 * itself); instead we treat "mapped but at wrong address" as
873 * a failure and unmap again.
875 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
876 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
877 PROT_READ|PROT_WRITE,
878 MAP_ANON|MAP_PRIVATE, 0, 0));
880 if (mapped_addr == brk_page) {
881 /* Heap contents are initialized to zero, as for anonymous
882 * mapped pages. Technically the new pages are already
883 * initialized to zero since they *are* anonymous mapped
884 * pages, however we have to take care with the contents that
885 * come from the remaining part of the previous page: it may
886 * contains garbage data due to a previous heap usage (grown
887 * then shrunken). */
888 memset(g2h(target_brk), 0, brk_page - target_brk);
890 target_brk = new_brk;
891 brk_page = HOST_PAGE_ALIGN(target_brk);
892 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
893 target_brk);
894 return target_brk;
895 } else if (mapped_addr != -1) {
896 /* Mapped but at wrong address, meaning there wasn't actually
897 * enough space for this brk.
899 target_munmap(mapped_addr, new_alloc_size);
900 mapped_addr = -1;
901 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
903 else {
904 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
907 #if defined(TARGET_ALPHA)
908 /* We (partially) emulate OSF/1 on Alpha, which requires we
909 return a proper errno, not an unchanged brk value. */
910 return -TARGET_ENOMEM;
911 #endif
912 /* For everything else, return the previous break. */
913 return target_brk;
916 static inline abi_long copy_from_user_fdset(fd_set *fds,
917 abi_ulong target_fds_addr,
918 int n)
920 int i, nw, j, k;
921 abi_ulong b, *target_fds;
923 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
924 if (!(target_fds = lock_user(VERIFY_READ,
925 target_fds_addr,
926 sizeof(abi_ulong) * nw,
927 1)))
928 return -TARGET_EFAULT;
930 FD_ZERO(fds);
931 k = 0;
932 for (i = 0; i < nw; i++) {
933 /* grab the abi_ulong */
934 __get_user(b, &target_fds[i]);
935 for (j = 0; j < TARGET_ABI_BITS; j++) {
936 /* check the bit inside the abi_ulong */
937 if ((b >> j) & 1)
938 FD_SET(k, fds);
939 k++;
943 unlock_user(target_fds, target_fds_addr, 0);
945 return 0;
948 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
949 abi_ulong target_fds_addr,
950 int n)
952 if (target_fds_addr) {
953 if (copy_from_user_fdset(fds, target_fds_addr, n))
954 return -TARGET_EFAULT;
955 *fds_ptr = fds;
956 } else {
957 *fds_ptr = NULL;
959 return 0;
962 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
963 const fd_set *fds,
964 int n)
966 int i, nw, j, k;
967 abi_long v;
968 abi_ulong *target_fds;
970 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
971 if (!(target_fds = lock_user(VERIFY_WRITE,
972 target_fds_addr,
973 sizeof(abi_ulong) * nw,
974 0)))
975 return -TARGET_EFAULT;
977 k = 0;
978 for (i = 0; i < nw; i++) {
979 v = 0;
980 for (j = 0; j < TARGET_ABI_BITS; j++) {
981 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
982 k++;
984 __put_user(v, &target_fds[i]);
987 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
989 return 0;
992 #if defined(__alpha__)
993 #define HOST_HZ 1024
994 #else
995 #define HOST_HZ 100
996 #endif
998 static inline abi_long host_to_target_clock_t(long ticks)
1000 #if HOST_HZ == TARGET_HZ
1001 return ticks;
1002 #else
1003 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1004 #endif
1007 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1008 const struct rusage *rusage)
1010 struct target_rusage *target_rusage;
1012 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1013 return -TARGET_EFAULT;
1014 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1015 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1016 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1017 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1018 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1019 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1020 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1021 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1022 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1023 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1024 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1025 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1026 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1027 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1028 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1029 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1030 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1031 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1032 unlock_user_struct(target_rusage, target_addr, 1);
1034 return 0;
1037 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1039 abi_ulong target_rlim_swap;
1040 rlim_t result;
1042 target_rlim_swap = tswapal(target_rlim);
1043 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1044 return RLIM_INFINITY;
1046 result = target_rlim_swap;
1047 if (target_rlim_swap != (rlim_t)result)
1048 return RLIM_INFINITY;
1050 return result;
1053 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1055 abi_ulong target_rlim_swap;
1056 abi_ulong result;
1058 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1059 target_rlim_swap = TARGET_RLIM_INFINITY;
1060 else
1061 target_rlim_swap = rlim;
1062 result = tswapal(target_rlim_swap);
1064 return result;
1067 static inline int target_to_host_resource(int code)
1069 switch (code) {
1070 case TARGET_RLIMIT_AS:
1071 return RLIMIT_AS;
1072 case TARGET_RLIMIT_CORE:
1073 return RLIMIT_CORE;
1074 case TARGET_RLIMIT_CPU:
1075 return RLIMIT_CPU;
1076 case TARGET_RLIMIT_DATA:
1077 return RLIMIT_DATA;
1078 case TARGET_RLIMIT_FSIZE:
1079 return RLIMIT_FSIZE;
1080 case TARGET_RLIMIT_LOCKS:
1081 return RLIMIT_LOCKS;
1082 case TARGET_RLIMIT_MEMLOCK:
1083 return RLIMIT_MEMLOCK;
1084 case TARGET_RLIMIT_MSGQUEUE:
1085 return RLIMIT_MSGQUEUE;
1086 case TARGET_RLIMIT_NICE:
1087 return RLIMIT_NICE;
1088 case TARGET_RLIMIT_NOFILE:
1089 return RLIMIT_NOFILE;
1090 case TARGET_RLIMIT_NPROC:
1091 return RLIMIT_NPROC;
1092 case TARGET_RLIMIT_RSS:
1093 return RLIMIT_RSS;
1094 case TARGET_RLIMIT_RTPRIO:
1095 return RLIMIT_RTPRIO;
1096 case TARGET_RLIMIT_SIGPENDING:
1097 return RLIMIT_SIGPENDING;
1098 case TARGET_RLIMIT_STACK:
1099 return RLIMIT_STACK;
1100 default:
1101 return code;
1105 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1106 abi_ulong target_tv_addr)
1108 struct target_timeval *target_tv;
1110 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1111 return -TARGET_EFAULT;
1113 __get_user(tv->tv_sec, &target_tv->tv_sec);
1114 __get_user(tv->tv_usec, &target_tv->tv_usec);
1116 unlock_user_struct(target_tv, target_tv_addr, 0);
1118 return 0;
1121 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1122 const struct timeval *tv)
1124 struct target_timeval *target_tv;
1126 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1127 return -TARGET_EFAULT;
1129 __put_user(tv->tv_sec, &target_tv->tv_sec);
1130 __put_user(tv->tv_usec, &target_tv->tv_usec);
1132 unlock_user_struct(target_tv, target_tv_addr, 1);
1134 return 0;
1137 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1138 abi_ulong target_tz_addr)
1140 struct target_timezone *target_tz;
1142 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1143 return -TARGET_EFAULT;
1146 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1147 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1149 unlock_user_struct(target_tz, target_tz_addr, 0);
1151 return 0;
1154 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1155 #include <mqueue.h>
1157 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1158 abi_ulong target_mq_attr_addr)
1160 struct target_mq_attr *target_mq_attr;
1162 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1163 target_mq_attr_addr, 1))
1164 return -TARGET_EFAULT;
1166 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1167 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1168 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1169 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1171 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1173 return 0;
1176 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1177 const struct mq_attr *attr)
1179 struct target_mq_attr *target_mq_attr;
1181 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1182 target_mq_attr_addr, 0))
1183 return -TARGET_EFAULT;
1185 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1186 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1187 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1188 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1190 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1192 return 0;
1194 #endif
1196 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1197 /* do_select() must return target values and target errnos. */
1198 static abi_long do_select(int n,
1199 abi_ulong rfd_addr, abi_ulong wfd_addr,
1200 abi_ulong efd_addr, abi_ulong target_tv_addr)
1202 fd_set rfds, wfds, efds;
1203 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1204 struct timeval tv;
1205 struct timespec ts, *ts_ptr;
1206 abi_long ret;
1208 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1209 if (ret) {
1210 return ret;
1212 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1213 if (ret) {
1214 return ret;
1216 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1217 if (ret) {
1218 return ret;
1221 if (target_tv_addr) {
1222 if (copy_from_user_timeval(&tv, target_tv_addr))
1223 return -TARGET_EFAULT;
1224 ts.tv_sec = tv.tv_sec;
1225 ts.tv_nsec = tv.tv_usec * 1000;
1226 ts_ptr = &ts;
1227 } else {
1228 ts_ptr = NULL;
1231 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1232 ts_ptr, NULL));
1234 if (!is_error(ret)) {
1235 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1236 return -TARGET_EFAULT;
1237 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1238 return -TARGET_EFAULT;
1239 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1240 return -TARGET_EFAULT;
1242 if (target_tv_addr) {
1243 tv.tv_sec = ts.tv_sec;
1244 tv.tv_usec = ts.tv_nsec / 1000;
1245 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1246 return -TARGET_EFAULT;
1251 return ret;
1253 #endif
1255 static abi_long do_pipe2(int host_pipe[], int flags)
1257 #ifdef CONFIG_PIPE2
1258 return pipe2(host_pipe, flags);
1259 #else
1260 return -ENOSYS;
1261 #endif
1264 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1265 int flags, int is_pipe2)
1267 int host_pipe[2];
1268 abi_long ret;
1269 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1271 if (is_error(ret))
1272 return get_errno(ret);
1274 /* Several targets have special calling conventions for the original
1275 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1276 if (!is_pipe2) {
1277 #if defined(TARGET_ALPHA)
1278 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1279 return host_pipe[0];
1280 #elif defined(TARGET_MIPS)
1281 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1282 return host_pipe[0];
1283 #elif defined(TARGET_SH4)
1284 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1285 return host_pipe[0];
1286 #elif defined(TARGET_SPARC)
1287 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1288 return host_pipe[0];
1289 #endif
1292 if (put_user_s32(host_pipe[0], pipedes)
1293 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1294 return -TARGET_EFAULT;
1295 return get_errno(ret);
1298 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1299 abi_ulong target_addr,
1300 socklen_t len)
1302 struct target_ip_mreqn *target_smreqn;
1304 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1305 if (!target_smreqn)
1306 return -TARGET_EFAULT;
1307 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1308 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1309 if (len == sizeof(struct target_ip_mreqn))
1310 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1311 unlock_user(target_smreqn, target_addr, 0);
1313 return 0;
1316 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1317 abi_ulong target_addr,
1318 socklen_t len)
1320 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1321 sa_family_t sa_family;
1322 struct target_sockaddr *target_saddr;
1324 if (fd_trans_target_to_host_addr(fd)) {
1325 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1328 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1329 if (!target_saddr)
1330 return -TARGET_EFAULT;
1332 sa_family = tswap16(target_saddr->sa_family);
1334 /* Oops. The caller might send a incomplete sun_path; sun_path
1335 * must be terminated by \0 (see the manual page), but
1336 * unfortunately it is quite common to specify sockaddr_un
1337 * length as "strlen(x->sun_path)" while it should be
1338 * "strlen(...) + 1". We'll fix that here if needed.
1339 * Linux kernel has a similar feature.
1342 if (sa_family == AF_UNIX) {
1343 if (len < unix_maxlen && len > 0) {
1344 char *cp = (char*)target_saddr;
1346 if ( cp[len-1] && !cp[len] )
1347 len++;
1349 if (len > unix_maxlen)
1350 len = unix_maxlen;
1353 memcpy(addr, target_saddr, len);
1354 addr->sa_family = sa_family;
1355 if (sa_family == AF_NETLINK) {
1356 struct sockaddr_nl *nladdr;
1358 nladdr = (struct sockaddr_nl *)addr;
1359 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1360 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1361 } else if (sa_family == AF_PACKET) {
1362 struct target_sockaddr_ll *lladdr;
1364 lladdr = (struct target_sockaddr_ll *)addr;
1365 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1366 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1368 unlock_user(target_saddr, target_addr, 0);
1370 return 0;
1373 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1374 struct sockaddr *addr,
1375 socklen_t len)
1377 struct target_sockaddr *target_saddr;
1379 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1380 if (!target_saddr)
1381 return -TARGET_EFAULT;
1382 memcpy(target_saddr, addr, len);
1383 target_saddr->sa_family = tswap16(addr->sa_family);
1384 if (addr->sa_family == AF_NETLINK) {
1385 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1386 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1387 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1388 } else if (addr->sa_family == AF_PACKET) {
1389 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1390 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1391 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1393 unlock_user(target_saddr, target_addr, len);
1395 return 0;
1398 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1399 struct target_msghdr *target_msgh)
1401 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1402 abi_long msg_controllen;
1403 abi_ulong target_cmsg_addr;
1404 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1405 socklen_t space = 0;
1407 msg_controllen = tswapal(target_msgh->msg_controllen);
1408 if (msg_controllen < sizeof (struct target_cmsghdr))
1409 goto the_end;
1410 target_cmsg_addr = tswapal(target_msgh->msg_control);
1411 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1412 target_cmsg_start = target_cmsg;
1413 if (!target_cmsg)
1414 return -TARGET_EFAULT;
1416 while (cmsg && target_cmsg) {
1417 void *data = CMSG_DATA(cmsg);
1418 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1420 int len = tswapal(target_cmsg->cmsg_len)
1421 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1423 space += CMSG_SPACE(len);
1424 if (space > msgh->msg_controllen) {
1425 space -= CMSG_SPACE(len);
1426 /* This is a QEMU bug, since we allocated the payload
1427 * area ourselves (unlike overflow in host-to-target
1428 * conversion, which is just the guest giving us a buffer
1429 * that's too small). It can't happen for the payload types
1430 * we currently support; if it becomes an issue in future
1431 * we would need to improve our allocation strategy to
1432 * something more intelligent than "twice the size of the
1433 * target buffer we're reading from".
1435 gemu_log("Host cmsg overflow\n");
1436 break;
1439 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1440 cmsg->cmsg_level = SOL_SOCKET;
1441 } else {
1442 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1444 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1445 cmsg->cmsg_len = CMSG_LEN(len);
1447 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1448 int *fd = (int *)data;
1449 int *target_fd = (int *)target_data;
1450 int i, numfds = len / sizeof(int);
1452 for (i = 0; i < numfds; i++) {
1453 __get_user(fd[i], target_fd + i);
1455 } else if (cmsg->cmsg_level == SOL_SOCKET
1456 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1457 struct ucred *cred = (struct ucred *)data;
1458 struct target_ucred *target_cred =
1459 (struct target_ucred *)target_data;
1461 __get_user(cred->pid, &target_cred->pid);
1462 __get_user(cred->uid, &target_cred->uid);
1463 __get_user(cred->gid, &target_cred->gid);
1464 } else {
1465 gemu_log("Unsupported ancillary data: %d/%d\n",
1466 cmsg->cmsg_level, cmsg->cmsg_type);
1467 memcpy(data, target_data, len);
1470 cmsg = CMSG_NXTHDR(msgh, cmsg);
1471 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1472 target_cmsg_start);
1474 unlock_user(target_cmsg, target_cmsg_addr, 0);
1475 the_end:
1476 msgh->msg_controllen = space;
1477 return 0;
1480 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1481 struct msghdr *msgh)
1483 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1484 abi_long msg_controllen;
1485 abi_ulong target_cmsg_addr;
1486 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1487 socklen_t space = 0;
1489 msg_controllen = tswapal(target_msgh->msg_controllen);
1490 if (msg_controllen < sizeof (struct target_cmsghdr))
1491 goto the_end;
1492 target_cmsg_addr = tswapal(target_msgh->msg_control);
1493 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1494 target_cmsg_start = target_cmsg;
1495 if (!target_cmsg)
1496 return -TARGET_EFAULT;
1498 while (cmsg && target_cmsg) {
1499 void *data = CMSG_DATA(cmsg);
1500 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1502 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1503 int tgt_len, tgt_space;
1505 /* We never copy a half-header but may copy half-data;
1506 * this is Linux's behaviour in put_cmsg(). Note that
1507 * truncation here is a guest problem (which we report
1508 * to the guest via the CTRUNC bit), unlike truncation
1509 * in target_to_host_cmsg, which is a QEMU bug.
1511 if (msg_controllen < sizeof(struct cmsghdr)) {
1512 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1513 break;
1516 if (cmsg->cmsg_level == SOL_SOCKET) {
1517 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1518 } else {
1519 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1521 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1523 tgt_len = TARGET_CMSG_LEN(len);
1525 /* Payload types which need a different size of payload on
1526 * the target must adjust tgt_len here.
1528 switch (cmsg->cmsg_level) {
1529 case SOL_SOCKET:
1530 switch (cmsg->cmsg_type) {
1531 case SO_TIMESTAMP:
1532 tgt_len = sizeof(struct target_timeval);
1533 break;
1534 default:
1535 break;
1537 default:
1538 break;
1541 if (msg_controllen < tgt_len) {
1542 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1543 tgt_len = msg_controllen;
1546 /* We must now copy-and-convert len bytes of payload
1547 * into tgt_len bytes of destination space. Bear in mind
1548 * that in both source and destination we may be dealing
1549 * with a truncated value!
1551 switch (cmsg->cmsg_level) {
1552 case SOL_SOCKET:
1553 switch (cmsg->cmsg_type) {
1554 case SCM_RIGHTS:
1556 int *fd = (int *)data;
1557 int *target_fd = (int *)target_data;
1558 int i, numfds = tgt_len / sizeof(int);
1560 for (i = 0; i < numfds; i++) {
1561 __put_user(fd[i], target_fd + i);
1563 break;
1565 case SO_TIMESTAMP:
1567 struct timeval *tv = (struct timeval *)data;
1568 struct target_timeval *target_tv =
1569 (struct target_timeval *)target_data;
1571 if (len != sizeof(struct timeval) ||
1572 tgt_len != sizeof(struct target_timeval)) {
1573 goto unimplemented;
1576 /* copy struct timeval to target */
1577 __put_user(tv->tv_sec, &target_tv->tv_sec);
1578 __put_user(tv->tv_usec, &target_tv->tv_usec);
1579 break;
1581 case SCM_CREDENTIALS:
1583 struct ucred *cred = (struct ucred *)data;
1584 struct target_ucred *target_cred =
1585 (struct target_ucred *)target_data;
1587 __put_user(cred->pid, &target_cred->pid);
1588 __put_user(cred->uid, &target_cred->uid);
1589 __put_user(cred->gid, &target_cred->gid);
1590 break;
1592 default:
1593 goto unimplemented;
1595 break;
1597 default:
1598 unimplemented:
1599 gemu_log("Unsupported ancillary data: %d/%d\n",
1600 cmsg->cmsg_level, cmsg->cmsg_type);
1601 memcpy(target_data, data, MIN(len, tgt_len));
1602 if (tgt_len > len) {
1603 memset(target_data + len, 0, tgt_len - len);
1607 target_cmsg->cmsg_len = tswapal(tgt_len);
1608 tgt_space = TARGET_CMSG_SPACE(len);
1609 if (msg_controllen < tgt_space) {
1610 tgt_space = msg_controllen;
1612 msg_controllen -= tgt_space;
1613 space += tgt_space;
1614 cmsg = CMSG_NXTHDR(msgh, cmsg);
1615 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1616 target_cmsg_start);
1618 unlock_user(target_cmsg, target_cmsg_addr, space);
1619 the_end:
1620 target_msgh->msg_controllen = tswapal(space);
1621 return 0;
1624 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1626 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1627 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1628 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1629 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1630 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1633 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1634 size_t len,
1635 abi_long (*host_to_target_nlmsg)
1636 (struct nlmsghdr *))
1638 uint32_t nlmsg_len;
1639 abi_long ret;
1641 while (len > sizeof(struct nlmsghdr)) {
1643 nlmsg_len = nlh->nlmsg_len;
1644 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1645 nlmsg_len > len) {
1646 break;
1649 switch (nlh->nlmsg_type) {
1650 case NLMSG_DONE:
1651 tswap_nlmsghdr(nlh);
1652 return 0;
1653 case NLMSG_NOOP:
1654 break;
1655 case NLMSG_ERROR:
1657 struct nlmsgerr *e = NLMSG_DATA(nlh);
1658 e->error = tswap32(e->error);
1659 tswap_nlmsghdr(&e->msg);
1660 tswap_nlmsghdr(nlh);
1661 return 0;
1663 default:
1664 ret = host_to_target_nlmsg(nlh);
1665 if (ret < 0) {
1666 tswap_nlmsghdr(nlh);
1667 return ret;
1669 break;
1671 tswap_nlmsghdr(nlh);
1672 len -= NLMSG_ALIGN(nlmsg_len);
1673 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1675 return 0;
1678 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1679 size_t len,
1680 abi_long (*target_to_host_nlmsg)
1681 (struct nlmsghdr *))
1683 int ret;
1685 while (len > sizeof(struct nlmsghdr)) {
1686 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1687 tswap32(nlh->nlmsg_len) > len) {
1688 break;
1690 tswap_nlmsghdr(nlh);
1691 switch (nlh->nlmsg_type) {
1692 case NLMSG_DONE:
1693 return 0;
1694 case NLMSG_NOOP:
1695 break;
1696 case NLMSG_ERROR:
1698 struct nlmsgerr *e = NLMSG_DATA(nlh);
1699 e->error = tswap32(e->error);
1700 tswap_nlmsghdr(&e->msg);
1701 return 0;
1703 default:
1704 ret = target_to_host_nlmsg(nlh);
1705 if (ret < 0) {
1706 return ret;
1709 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1710 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1712 return 0;
1715 #ifdef CONFIG_RTNETLINK
1716 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1717 size_t len, void *context,
1718 abi_long (*host_to_target_nlattr)
1719 (struct nlattr *,
1720 void *context))
1722 unsigned short nla_len;
1723 abi_long ret;
1725 while (len > sizeof(struct nlattr)) {
1726 nla_len = nlattr->nla_len;
1727 if (nla_len < sizeof(struct nlattr) ||
1728 nla_len > len) {
1729 break;
1731 ret = host_to_target_nlattr(nlattr, context);
1732 nlattr->nla_len = tswap16(nlattr->nla_len);
1733 nlattr->nla_type = tswap16(nlattr->nla_type);
1734 if (ret < 0) {
1735 return ret;
1737 len -= NLA_ALIGN(nla_len);
1738 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1740 return 0;
1743 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1744 size_t len,
1745 abi_long (*host_to_target_rtattr)
1746 (struct rtattr *))
1748 unsigned short rta_len;
1749 abi_long ret;
1751 while (len > sizeof(struct rtattr)) {
1752 rta_len = rtattr->rta_len;
1753 if (rta_len < sizeof(struct rtattr) ||
1754 rta_len > len) {
1755 break;
1757 ret = host_to_target_rtattr(rtattr);
1758 rtattr->rta_len = tswap16(rtattr->rta_len);
1759 rtattr->rta_type = tswap16(rtattr->rta_type);
1760 if (ret < 0) {
1761 return ret;
1763 len -= RTA_ALIGN(rta_len);
1764 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1766 return 0;
1769 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1771 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1772 void *context)
1774 uint16_t *u16;
1775 uint32_t *u32;
1776 uint64_t *u64;
1778 switch (nlattr->nla_type) {
1779 /* no data */
1780 case IFLA_BR_FDB_FLUSH:
1781 break;
1782 /* binary */
1783 case IFLA_BR_GROUP_ADDR:
1784 break;
1785 /* uint8_t */
1786 case IFLA_BR_VLAN_FILTERING:
1787 case IFLA_BR_TOPOLOGY_CHANGE:
1788 case IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
1789 case IFLA_BR_MCAST_ROUTER:
1790 case IFLA_BR_MCAST_SNOOPING:
1791 case IFLA_BR_MCAST_QUERY_USE_IFADDR:
1792 case IFLA_BR_MCAST_QUERIER:
1793 case IFLA_BR_NF_CALL_IPTABLES:
1794 case IFLA_BR_NF_CALL_IP6TABLES:
1795 case IFLA_BR_NF_CALL_ARPTABLES:
1796 break;
1797 /* uint16_t */
1798 case IFLA_BR_PRIORITY:
1799 case IFLA_BR_VLAN_PROTOCOL:
1800 case IFLA_BR_GROUP_FWD_MASK:
1801 case IFLA_BR_ROOT_PORT:
1802 case IFLA_BR_VLAN_DEFAULT_PVID:
1803 u16 = NLA_DATA(nlattr);
1804 *u16 = tswap16(*u16);
1805 break;
1806 /* uint32_t */
1807 case IFLA_BR_FORWARD_DELAY:
1808 case IFLA_BR_HELLO_TIME:
1809 case IFLA_BR_MAX_AGE:
1810 case IFLA_BR_AGEING_TIME:
1811 case IFLA_BR_STP_STATE:
1812 case IFLA_BR_ROOT_PATH_COST:
1813 case IFLA_BR_MCAST_HASH_ELASTICITY:
1814 case IFLA_BR_MCAST_HASH_MAX:
1815 case IFLA_BR_MCAST_LAST_MEMBER_CNT:
1816 case IFLA_BR_MCAST_STARTUP_QUERY_CNT:
1817 u32 = NLA_DATA(nlattr);
1818 *u32 = tswap32(*u32);
1819 break;
1820 /* uint64_t */
1821 case IFLA_BR_HELLO_TIMER:
1822 case IFLA_BR_TCN_TIMER:
1823 case IFLA_BR_GC_TIMER:
1824 case IFLA_BR_TOPOLOGY_CHANGE_TIMER:
1825 case IFLA_BR_MCAST_LAST_MEMBER_INTVL:
1826 case IFLA_BR_MCAST_MEMBERSHIP_INTVL:
1827 case IFLA_BR_MCAST_QUERIER_INTVL:
1828 case IFLA_BR_MCAST_QUERY_INTVL:
1829 case IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
1830 case IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
1831 u64 = NLA_DATA(nlattr);
1832 *u64 = tswap64(*u64);
1833 break;
1834 /* ifla_bridge_id: uin8_t[] */
1835 case IFLA_BR_ROOT_ID:
1836 case IFLA_BR_BRIDGE_ID:
1837 break;
1838 default:
1839 gemu_log("Unknown IFLA_BR type %d\n", nlattr->nla_type);
1840 break;
1842 return 0;
1845 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
1846 void *context)
1848 uint16_t *u16;
1849 uint32_t *u32;
1850 uint64_t *u64;
1852 switch (nlattr->nla_type) {
1853 /* uint8_t */
1854 case IFLA_BRPORT_STATE:
1855 case IFLA_BRPORT_MODE:
1856 case IFLA_BRPORT_GUARD:
1857 case IFLA_BRPORT_PROTECT:
1858 case IFLA_BRPORT_FAST_LEAVE:
1859 case IFLA_BRPORT_LEARNING:
1860 case IFLA_BRPORT_UNICAST_FLOOD:
1861 case IFLA_BRPORT_PROXYARP:
1862 case IFLA_BRPORT_LEARNING_SYNC:
1863 case IFLA_BRPORT_PROXYARP_WIFI:
1864 case IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
1865 case IFLA_BRPORT_CONFIG_PENDING:
1866 case IFLA_BRPORT_MULTICAST_ROUTER:
1867 break;
1868 /* uint16_t */
1869 case IFLA_BRPORT_PRIORITY:
1870 case IFLA_BRPORT_DESIGNATED_PORT:
1871 case IFLA_BRPORT_DESIGNATED_COST:
1872 case IFLA_BRPORT_ID:
1873 case IFLA_BRPORT_NO:
1874 u16 = NLA_DATA(nlattr);
1875 *u16 = tswap16(*u16);
1876 break;
1877 /* uin32_t */
1878 case IFLA_BRPORT_COST:
1879 u32 = NLA_DATA(nlattr);
1880 *u32 = tswap32(*u32);
1881 break;
1882 /* uint64_t */
1883 case IFLA_BRPORT_MESSAGE_AGE_TIMER:
1884 case IFLA_BRPORT_FORWARD_DELAY_TIMER:
1885 case IFLA_BRPORT_HOLD_TIMER:
1886 u64 = NLA_DATA(nlattr);
1887 *u64 = tswap64(*u64);
1888 break;
1889 /* ifla_bridge_id: uint8_t[] */
1890 case IFLA_BRPORT_ROOT_ID:
1891 case IFLA_BRPORT_BRIDGE_ID:
1892 break;
1893 default:
1894 gemu_log("Unknown IFLA_BRPORT type %d\n", nlattr->nla_type);
1895 break;
1897 return 0;
1900 struct linkinfo_context {
1901 int len;
1902 char *name;
1903 int slave_len;
1904 char *slave_name;
1907 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
1908 void *context)
1910 struct linkinfo_context *li_context = context;
1912 switch (nlattr->nla_type) {
1913 /* string */
1914 case IFLA_INFO_KIND:
1915 li_context->name = NLA_DATA(nlattr);
1916 li_context->len = nlattr->nla_len - NLA_HDRLEN;
1917 break;
1918 case IFLA_INFO_SLAVE_KIND:
1919 li_context->slave_name = NLA_DATA(nlattr);
1920 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
1921 break;
1922 /* stats */
1923 case IFLA_INFO_XSTATS:
1924 /* FIXME: only used by CAN */
1925 break;
1926 /* nested */
1927 case IFLA_INFO_DATA:
1928 if (strncmp(li_context->name, "bridge",
1929 li_context->len) == 0) {
1930 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
1931 nlattr->nla_len,
1932 NULL,
1933 host_to_target_data_bridge_nlattr);
1934 } else {
1935 gemu_log("Unknown IFLA_INFO_KIND %s\n", li_context->name);
1937 break;
1938 case IFLA_INFO_SLAVE_DATA:
1939 if (strncmp(li_context->slave_name, "bridge",
1940 li_context->slave_len) == 0) {
1941 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
1942 nlattr->nla_len,
1943 NULL,
1944 host_to_target_slave_data_bridge_nlattr);
1945 } else {
1946 gemu_log("Unknown IFLA_INFO_SLAVE_KIND %s\n",
1947 li_context->slave_name);
1949 break;
1950 default:
1951 gemu_log("Unknown host IFLA_INFO type: %d\n", nlattr->nla_type);
1952 break;
1955 return 0;
1958 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
1959 void *context)
1961 uint32_t *u32;
1962 int i;
1964 switch (nlattr->nla_type) {
1965 case IFLA_INET_CONF:
1966 u32 = NLA_DATA(nlattr);
1967 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
1968 i++) {
1969 u32[i] = tswap32(u32[i]);
1971 break;
1972 default:
1973 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
1975 return 0;
1978 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
1979 void *context)
1981 uint32_t *u32;
1982 uint64_t *u64;
1983 struct ifla_cacheinfo *ci;
1984 int i;
1986 switch (nlattr->nla_type) {
1987 /* binaries */
1988 case IFLA_INET6_TOKEN:
1989 break;
1990 /* uint8_t */
1991 case IFLA_INET6_ADDR_GEN_MODE:
1992 break;
1993 /* uint32_t */
1994 case IFLA_INET6_FLAGS:
1995 u32 = NLA_DATA(nlattr);
1996 *u32 = tswap32(*u32);
1997 break;
1998 /* uint32_t[] */
1999 case IFLA_INET6_CONF:
2000 u32 = NLA_DATA(nlattr);
2001 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2002 i++) {
2003 u32[i] = tswap32(u32[i]);
2005 break;
2006 /* ifla_cacheinfo */
2007 case IFLA_INET6_CACHEINFO:
2008 ci = NLA_DATA(nlattr);
2009 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2010 ci->tstamp = tswap32(ci->tstamp);
2011 ci->reachable_time = tswap32(ci->reachable_time);
2012 ci->retrans_time = tswap32(ci->retrans_time);
2013 break;
2014 /* uint64_t[] */
2015 case IFLA_INET6_STATS:
2016 case IFLA_INET6_ICMP6STATS:
2017 u64 = NLA_DATA(nlattr);
2018 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2019 i++) {
2020 u64[i] = tswap64(u64[i]);
2022 break;
2023 default:
2024 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2026 return 0;
2029 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2030 void *context)
2032 switch (nlattr->nla_type) {
2033 case AF_INET:
2034 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2035 NULL,
2036 host_to_target_data_inet_nlattr);
2037 case AF_INET6:
2038 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2039 NULL,
2040 host_to_target_data_inet6_nlattr);
2041 default:
2042 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2043 break;
2045 return 0;
2048 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2050 uint32_t *u32;
2051 struct rtnl_link_stats *st;
2052 struct rtnl_link_stats64 *st64;
2053 struct rtnl_link_ifmap *map;
2054 struct linkinfo_context li_context;
2056 switch (rtattr->rta_type) {
2057 /* binary stream */
2058 case IFLA_ADDRESS:
2059 case IFLA_BROADCAST:
2060 /* string */
2061 case IFLA_IFNAME:
2062 case IFLA_QDISC:
2063 break;
2064 /* uin8_t */
2065 case IFLA_OPERSTATE:
2066 case IFLA_LINKMODE:
2067 case IFLA_CARRIER:
2068 case IFLA_PROTO_DOWN:
2069 break;
2070 /* uint32_t */
2071 case IFLA_MTU:
2072 case IFLA_LINK:
2073 case IFLA_WEIGHT:
2074 case IFLA_TXQLEN:
2075 case IFLA_CARRIER_CHANGES:
2076 case IFLA_NUM_RX_QUEUES:
2077 case IFLA_NUM_TX_QUEUES:
2078 case IFLA_PROMISCUITY:
2079 case IFLA_EXT_MASK:
2080 case IFLA_LINK_NETNSID:
2081 case IFLA_GROUP:
2082 case IFLA_MASTER:
2083 case IFLA_NUM_VF:
2084 u32 = RTA_DATA(rtattr);
2085 *u32 = tswap32(*u32);
2086 break;
2087 /* struct rtnl_link_stats */
2088 case IFLA_STATS:
2089 st = RTA_DATA(rtattr);
2090 st->rx_packets = tswap32(st->rx_packets);
2091 st->tx_packets = tswap32(st->tx_packets);
2092 st->rx_bytes = tswap32(st->rx_bytes);
2093 st->tx_bytes = tswap32(st->tx_bytes);
2094 st->rx_errors = tswap32(st->rx_errors);
2095 st->tx_errors = tswap32(st->tx_errors);
2096 st->rx_dropped = tswap32(st->rx_dropped);
2097 st->tx_dropped = tswap32(st->tx_dropped);
2098 st->multicast = tswap32(st->multicast);
2099 st->collisions = tswap32(st->collisions);
2101 /* detailed rx_errors: */
2102 st->rx_length_errors = tswap32(st->rx_length_errors);
2103 st->rx_over_errors = tswap32(st->rx_over_errors);
2104 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2105 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2106 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2107 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2109 /* detailed tx_errors */
2110 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2111 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2112 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2113 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2114 st->tx_window_errors = tswap32(st->tx_window_errors);
2116 /* for cslip etc */
2117 st->rx_compressed = tswap32(st->rx_compressed);
2118 st->tx_compressed = tswap32(st->tx_compressed);
2119 break;
2120 /* struct rtnl_link_stats64 */
2121 case IFLA_STATS64:
2122 st64 = RTA_DATA(rtattr);
2123 st64->rx_packets = tswap64(st64->rx_packets);
2124 st64->tx_packets = tswap64(st64->tx_packets);
2125 st64->rx_bytes = tswap64(st64->rx_bytes);
2126 st64->tx_bytes = tswap64(st64->tx_bytes);
2127 st64->rx_errors = tswap64(st64->rx_errors);
2128 st64->tx_errors = tswap64(st64->tx_errors);
2129 st64->rx_dropped = tswap64(st64->rx_dropped);
2130 st64->tx_dropped = tswap64(st64->tx_dropped);
2131 st64->multicast = tswap64(st64->multicast);
2132 st64->collisions = tswap64(st64->collisions);
2134 /* detailed rx_errors: */
2135 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2136 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2137 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2138 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2139 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2140 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2142 /* detailed tx_errors */
2143 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2144 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2145 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2146 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2147 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2149 /* for cslip etc */
2150 st64->rx_compressed = tswap64(st64->rx_compressed);
2151 st64->tx_compressed = tswap64(st64->tx_compressed);
2152 break;
2153 /* struct rtnl_link_ifmap */
2154 case IFLA_MAP:
2155 map = RTA_DATA(rtattr);
2156 map->mem_start = tswap64(map->mem_start);
2157 map->mem_end = tswap64(map->mem_end);
2158 map->base_addr = tswap64(map->base_addr);
2159 map->irq = tswap16(map->irq);
2160 break;
2161 /* nested */
2162 case IFLA_LINKINFO:
2163 memset(&li_context, 0, sizeof(li_context));
2164 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2165 &li_context,
2166 host_to_target_data_linkinfo_nlattr);
2167 case IFLA_AF_SPEC:
2168 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2169 NULL,
2170 host_to_target_data_spec_nlattr);
2171 default:
2172 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
2173 break;
2175 return 0;
2178 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2180 uint32_t *u32;
2181 struct ifa_cacheinfo *ci;
2183 switch (rtattr->rta_type) {
2184 /* binary: depends on family type */
2185 case IFA_ADDRESS:
2186 case IFA_LOCAL:
2187 break;
2188 /* string */
2189 case IFA_LABEL:
2190 break;
2191 /* u32 */
2192 case IFA_FLAGS:
2193 case IFA_BROADCAST:
2194 u32 = RTA_DATA(rtattr);
2195 *u32 = tswap32(*u32);
2196 break;
2197 /* struct ifa_cacheinfo */
2198 case IFA_CACHEINFO:
2199 ci = RTA_DATA(rtattr);
2200 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2201 ci->ifa_valid = tswap32(ci->ifa_valid);
2202 ci->cstamp = tswap32(ci->cstamp);
2203 ci->tstamp = tswap32(ci->tstamp);
2204 break;
2205 default:
2206 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2207 break;
2209 return 0;
2212 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2214 uint32_t *u32;
2215 switch (rtattr->rta_type) {
2216 /* binary: depends on family type */
2217 case RTA_GATEWAY:
2218 case RTA_DST:
2219 case RTA_PREFSRC:
2220 break;
2221 /* u32 */
2222 case RTA_PRIORITY:
2223 case RTA_TABLE:
2224 case RTA_OIF:
2225 u32 = RTA_DATA(rtattr);
2226 *u32 = tswap32(*u32);
2227 break;
2228 default:
2229 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2230 break;
2232 return 0;
2235 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2236 uint32_t rtattr_len)
2238 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2239 host_to_target_data_link_rtattr);
2242 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2243 uint32_t rtattr_len)
2245 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2246 host_to_target_data_addr_rtattr);
2249 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2250 uint32_t rtattr_len)
2252 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2253 host_to_target_data_route_rtattr);
2256 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2258 uint32_t nlmsg_len;
2259 struct ifinfomsg *ifi;
2260 struct ifaddrmsg *ifa;
2261 struct rtmsg *rtm;
2263 nlmsg_len = nlh->nlmsg_len;
2264 switch (nlh->nlmsg_type) {
2265 case RTM_NEWLINK:
2266 case RTM_DELLINK:
2267 case RTM_GETLINK:
2268 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2269 ifi = NLMSG_DATA(nlh);
2270 ifi->ifi_type = tswap16(ifi->ifi_type);
2271 ifi->ifi_index = tswap32(ifi->ifi_index);
2272 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2273 ifi->ifi_change = tswap32(ifi->ifi_change);
2274 host_to_target_link_rtattr(IFLA_RTA(ifi),
2275 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2277 break;
2278 case RTM_NEWADDR:
2279 case RTM_DELADDR:
2280 case RTM_GETADDR:
2281 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2282 ifa = NLMSG_DATA(nlh);
2283 ifa->ifa_index = tswap32(ifa->ifa_index);
2284 host_to_target_addr_rtattr(IFA_RTA(ifa),
2285 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2287 break;
2288 case RTM_NEWROUTE:
2289 case RTM_DELROUTE:
2290 case RTM_GETROUTE:
2291 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2292 rtm = NLMSG_DATA(nlh);
2293 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2294 host_to_target_route_rtattr(RTM_RTA(rtm),
2295 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2297 break;
2298 default:
2299 return -TARGET_EINVAL;
2301 return 0;
2304 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2305 size_t len)
2307 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2310 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2311 size_t len,
2312 abi_long (*target_to_host_rtattr)
2313 (struct rtattr *))
2315 abi_long ret;
2317 while (len >= sizeof(struct rtattr)) {
2318 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2319 tswap16(rtattr->rta_len) > len) {
2320 break;
2322 rtattr->rta_len = tswap16(rtattr->rta_len);
2323 rtattr->rta_type = tswap16(rtattr->rta_type);
2324 ret = target_to_host_rtattr(rtattr);
2325 if (ret < 0) {
2326 return ret;
2328 len -= RTA_ALIGN(rtattr->rta_len);
2329 rtattr = (struct rtattr *)(((char *)rtattr) +
2330 RTA_ALIGN(rtattr->rta_len));
2332 return 0;
2335 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2337 switch (rtattr->rta_type) {
2338 default:
2339 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
2340 break;
2342 return 0;
2345 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2347 switch (rtattr->rta_type) {
2348 /* binary: depends on family type */
2349 case IFA_LOCAL:
2350 case IFA_ADDRESS:
2351 break;
2352 default:
2353 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2354 break;
2356 return 0;
2359 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2361 uint32_t *u32;
2362 switch (rtattr->rta_type) {
2363 /* binary: depends on family type */
2364 case RTA_DST:
2365 case RTA_SRC:
2366 case RTA_GATEWAY:
2367 break;
2368 /* u32 */
2369 case RTA_OIF:
2370 u32 = RTA_DATA(rtattr);
2371 *u32 = tswap32(*u32);
2372 break;
2373 default:
2374 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2375 break;
2377 return 0;
2380 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2381 uint32_t rtattr_len)
2383 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2384 target_to_host_data_link_rtattr);
2387 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2388 uint32_t rtattr_len)
2390 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2391 target_to_host_data_addr_rtattr);
2394 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2395 uint32_t rtattr_len)
2397 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2398 target_to_host_data_route_rtattr);
2401 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2403 struct ifinfomsg *ifi;
2404 struct ifaddrmsg *ifa;
2405 struct rtmsg *rtm;
2407 switch (nlh->nlmsg_type) {
2408 case RTM_GETLINK:
2409 break;
2410 case RTM_NEWLINK:
2411 case RTM_DELLINK:
2412 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2413 ifi = NLMSG_DATA(nlh);
2414 ifi->ifi_type = tswap16(ifi->ifi_type);
2415 ifi->ifi_index = tswap32(ifi->ifi_index);
2416 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2417 ifi->ifi_change = tswap32(ifi->ifi_change);
2418 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2419 NLMSG_LENGTH(sizeof(*ifi)));
2421 break;
2422 case RTM_GETADDR:
2423 case RTM_NEWADDR:
2424 case RTM_DELADDR:
2425 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2426 ifa = NLMSG_DATA(nlh);
2427 ifa->ifa_index = tswap32(ifa->ifa_index);
2428 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2429 NLMSG_LENGTH(sizeof(*ifa)));
2431 break;
2432 case RTM_GETROUTE:
2433 break;
2434 case RTM_NEWROUTE:
2435 case RTM_DELROUTE:
2436 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2437 rtm = NLMSG_DATA(nlh);
2438 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2439 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2440 NLMSG_LENGTH(sizeof(*rtm)));
2442 break;
2443 default:
2444 return -TARGET_EOPNOTSUPP;
2446 return 0;
2449 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2451 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2453 #endif /* CONFIG_RTNETLINK */
2455 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2457 switch (nlh->nlmsg_type) {
2458 default:
2459 gemu_log("Unknown host audit message type %d\n",
2460 nlh->nlmsg_type);
2461 return -TARGET_EINVAL;
2463 return 0;
2466 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2467 size_t len)
2469 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2472 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2474 switch (nlh->nlmsg_type) {
2475 case AUDIT_USER:
2476 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2477 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2478 break;
2479 default:
2480 gemu_log("Unknown target audit message type %d\n",
2481 nlh->nlmsg_type);
2482 return -TARGET_EINVAL;
2485 return 0;
2488 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2490 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2493 /* do_setsockopt() Must return target values and target errnos. */
2494 static abi_long do_setsockopt(int sockfd, int level, int optname,
2495 abi_ulong optval_addr, socklen_t optlen)
2497 abi_long ret;
2498 int val;
2499 struct ip_mreqn *ip_mreq;
2500 struct ip_mreq_source *ip_mreq_source;
2502 switch(level) {
2503 case SOL_TCP:
2504 /* TCP options all take an 'int' value. */
2505 if (optlen < sizeof(uint32_t))
2506 return -TARGET_EINVAL;
2508 if (get_user_u32(val, optval_addr))
2509 return -TARGET_EFAULT;
2510 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2511 break;
2512 case SOL_IP:
2513 switch(optname) {
2514 case IP_TOS:
2515 case IP_TTL:
2516 case IP_HDRINCL:
2517 case IP_ROUTER_ALERT:
2518 case IP_RECVOPTS:
2519 case IP_RETOPTS:
2520 case IP_PKTINFO:
2521 case IP_MTU_DISCOVER:
2522 case IP_RECVERR:
2523 case IP_RECVTOS:
2524 #ifdef IP_FREEBIND
2525 case IP_FREEBIND:
2526 #endif
2527 case IP_MULTICAST_TTL:
2528 case IP_MULTICAST_LOOP:
2529 val = 0;
2530 if (optlen >= sizeof(uint32_t)) {
2531 if (get_user_u32(val, optval_addr))
2532 return -TARGET_EFAULT;
2533 } else if (optlen >= 1) {
2534 if (get_user_u8(val, optval_addr))
2535 return -TARGET_EFAULT;
2537 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2538 break;
2539 case IP_ADD_MEMBERSHIP:
2540 case IP_DROP_MEMBERSHIP:
2541 if (optlen < sizeof (struct target_ip_mreq) ||
2542 optlen > sizeof (struct target_ip_mreqn))
2543 return -TARGET_EINVAL;
2545 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2546 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2547 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2548 break;
2550 case IP_BLOCK_SOURCE:
2551 case IP_UNBLOCK_SOURCE:
2552 case IP_ADD_SOURCE_MEMBERSHIP:
2553 case IP_DROP_SOURCE_MEMBERSHIP:
2554 if (optlen != sizeof (struct target_ip_mreq_source))
2555 return -TARGET_EINVAL;
2557 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2558 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2559 unlock_user (ip_mreq_source, optval_addr, 0);
2560 break;
2562 default:
2563 goto unimplemented;
2565 break;
2566 case SOL_IPV6:
2567 switch (optname) {
2568 case IPV6_MTU_DISCOVER:
2569 case IPV6_MTU:
2570 case IPV6_V6ONLY:
2571 case IPV6_RECVPKTINFO:
2572 val = 0;
2573 if (optlen < sizeof(uint32_t)) {
2574 return -TARGET_EINVAL;
2576 if (get_user_u32(val, optval_addr)) {
2577 return -TARGET_EFAULT;
2579 ret = get_errno(setsockopt(sockfd, level, optname,
2580 &val, sizeof(val)));
2581 break;
2582 default:
2583 goto unimplemented;
2585 break;
2586 case SOL_RAW:
2587 switch (optname) {
2588 case ICMP_FILTER:
2589 /* struct icmp_filter takes an u32 value */
2590 if (optlen < sizeof(uint32_t)) {
2591 return -TARGET_EINVAL;
2594 if (get_user_u32(val, optval_addr)) {
2595 return -TARGET_EFAULT;
2597 ret = get_errno(setsockopt(sockfd, level, optname,
2598 &val, sizeof(val)));
2599 break;
2601 default:
2602 goto unimplemented;
2604 break;
2605 case TARGET_SOL_SOCKET:
2606 switch (optname) {
2607 case TARGET_SO_RCVTIMEO:
2609 struct timeval tv;
2611 optname = SO_RCVTIMEO;
2613 set_timeout:
2614 if (optlen != sizeof(struct target_timeval)) {
2615 return -TARGET_EINVAL;
2618 if (copy_from_user_timeval(&tv, optval_addr)) {
2619 return -TARGET_EFAULT;
2622 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2623 &tv, sizeof(tv)));
2624 return ret;
2626 case TARGET_SO_SNDTIMEO:
2627 optname = SO_SNDTIMEO;
2628 goto set_timeout;
2629 case TARGET_SO_ATTACH_FILTER:
2631 struct target_sock_fprog *tfprog;
2632 struct target_sock_filter *tfilter;
2633 struct sock_fprog fprog;
2634 struct sock_filter *filter;
2635 int i;
2637 if (optlen != sizeof(*tfprog)) {
2638 return -TARGET_EINVAL;
2640 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2641 return -TARGET_EFAULT;
2643 if (!lock_user_struct(VERIFY_READ, tfilter,
2644 tswapal(tfprog->filter), 0)) {
2645 unlock_user_struct(tfprog, optval_addr, 1);
2646 return -TARGET_EFAULT;
2649 fprog.len = tswap16(tfprog->len);
2650 filter = g_try_new(struct sock_filter, fprog.len);
2651 if (filter == NULL) {
2652 unlock_user_struct(tfilter, tfprog->filter, 1);
2653 unlock_user_struct(tfprog, optval_addr, 1);
2654 return -TARGET_ENOMEM;
2656 for (i = 0; i < fprog.len; i++) {
2657 filter[i].code = tswap16(tfilter[i].code);
2658 filter[i].jt = tfilter[i].jt;
2659 filter[i].jf = tfilter[i].jf;
2660 filter[i].k = tswap32(tfilter[i].k);
2662 fprog.filter = filter;
2664 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2665 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2666 g_free(filter);
2668 unlock_user_struct(tfilter, tfprog->filter, 1);
2669 unlock_user_struct(tfprog, optval_addr, 1);
2670 return ret;
2672 case TARGET_SO_BINDTODEVICE:
2674 char *dev_ifname, *addr_ifname;
2676 if (optlen > IFNAMSIZ - 1) {
2677 optlen = IFNAMSIZ - 1;
2679 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2680 if (!dev_ifname) {
2681 return -TARGET_EFAULT;
2683 optname = SO_BINDTODEVICE;
2684 addr_ifname = alloca(IFNAMSIZ);
2685 memcpy(addr_ifname, dev_ifname, optlen);
2686 addr_ifname[optlen] = 0;
2687 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2688 addr_ifname, optlen));
2689 unlock_user (dev_ifname, optval_addr, 0);
2690 return ret;
2692 /* Options with 'int' argument. */
2693 case TARGET_SO_DEBUG:
2694 optname = SO_DEBUG;
2695 break;
2696 case TARGET_SO_REUSEADDR:
2697 optname = SO_REUSEADDR;
2698 break;
2699 case TARGET_SO_TYPE:
2700 optname = SO_TYPE;
2701 break;
2702 case TARGET_SO_ERROR:
2703 optname = SO_ERROR;
2704 break;
2705 case TARGET_SO_DONTROUTE:
2706 optname = SO_DONTROUTE;
2707 break;
2708 case TARGET_SO_BROADCAST:
2709 optname = SO_BROADCAST;
2710 break;
2711 case TARGET_SO_SNDBUF:
2712 optname = SO_SNDBUF;
2713 break;
2714 case TARGET_SO_SNDBUFFORCE:
2715 optname = SO_SNDBUFFORCE;
2716 break;
2717 case TARGET_SO_RCVBUF:
2718 optname = SO_RCVBUF;
2719 break;
2720 case TARGET_SO_RCVBUFFORCE:
2721 optname = SO_RCVBUFFORCE;
2722 break;
2723 case TARGET_SO_KEEPALIVE:
2724 optname = SO_KEEPALIVE;
2725 break;
2726 case TARGET_SO_OOBINLINE:
2727 optname = SO_OOBINLINE;
2728 break;
2729 case TARGET_SO_NO_CHECK:
2730 optname = SO_NO_CHECK;
2731 break;
2732 case TARGET_SO_PRIORITY:
2733 optname = SO_PRIORITY;
2734 break;
2735 #ifdef SO_BSDCOMPAT
2736 case TARGET_SO_BSDCOMPAT:
2737 optname = SO_BSDCOMPAT;
2738 break;
2739 #endif
2740 case TARGET_SO_PASSCRED:
2741 optname = SO_PASSCRED;
2742 break;
2743 case TARGET_SO_PASSSEC:
2744 optname = SO_PASSSEC;
2745 break;
2746 case TARGET_SO_TIMESTAMP:
2747 optname = SO_TIMESTAMP;
2748 break;
2749 case TARGET_SO_RCVLOWAT:
2750 optname = SO_RCVLOWAT;
2751 break;
2752 break;
2753 default:
2754 goto unimplemented;
2756 if (optlen < sizeof(uint32_t))
2757 return -TARGET_EINVAL;
2759 if (get_user_u32(val, optval_addr))
2760 return -TARGET_EFAULT;
2761 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2762 break;
2763 default:
2764 unimplemented:
2765 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2766 ret = -TARGET_ENOPROTOOPT;
2768 return ret;
2771 /* do_getsockopt() Must return target values and target errnos. */
2772 static abi_long do_getsockopt(int sockfd, int level, int optname,
2773 abi_ulong optval_addr, abi_ulong optlen)
2775 abi_long ret;
2776 int len, val;
2777 socklen_t lv;
2779 switch(level) {
2780 case TARGET_SOL_SOCKET:
2781 level = SOL_SOCKET;
2782 switch (optname) {
2783 /* These don't just return a single integer */
2784 case TARGET_SO_LINGER:
2785 case TARGET_SO_RCVTIMEO:
2786 case TARGET_SO_SNDTIMEO:
2787 case TARGET_SO_PEERNAME:
2788 goto unimplemented;
2789 case TARGET_SO_PEERCRED: {
2790 struct ucred cr;
2791 socklen_t crlen;
2792 struct target_ucred *tcr;
2794 if (get_user_u32(len, optlen)) {
2795 return -TARGET_EFAULT;
2797 if (len < 0) {
2798 return -TARGET_EINVAL;
2801 crlen = sizeof(cr);
2802 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2803 &cr, &crlen));
2804 if (ret < 0) {
2805 return ret;
2807 if (len > crlen) {
2808 len = crlen;
2810 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2811 return -TARGET_EFAULT;
2813 __put_user(cr.pid, &tcr->pid);
2814 __put_user(cr.uid, &tcr->uid);
2815 __put_user(cr.gid, &tcr->gid);
2816 unlock_user_struct(tcr, optval_addr, 1);
2817 if (put_user_u32(len, optlen)) {
2818 return -TARGET_EFAULT;
2820 break;
2822 /* Options with 'int' argument. */
2823 case TARGET_SO_DEBUG:
2824 optname = SO_DEBUG;
2825 goto int_case;
2826 case TARGET_SO_REUSEADDR:
2827 optname = SO_REUSEADDR;
2828 goto int_case;
2829 case TARGET_SO_TYPE:
2830 optname = SO_TYPE;
2831 goto int_case;
2832 case TARGET_SO_ERROR:
2833 optname = SO_ERROR;
2834 goto int_case;
2835 case TARGET_SO_DONTROUTE:
2836 optname = SO_DONTROUTE;
2837 goto int_case;
2838 case TARGET_SO_BROADCAST:
2839 optname = SO_BROADCAST;
2840 goto int_case;
2841 case TARGET_SO_SNDBUF:
2842 optname = SO_SNDBUF;
2843 goto int_case;
2844 case TARGET_SO_RCVBUF:
2845 optname = SO_RCVBUF;
2846 goto int_case;
2847 case TARGET_SO_KEEPALIVE:
2848 optname = SO_KEEPALIVE;
2849 goto int_case;
2850 case TARGET_SO_OOBINLINE:
2851 optname = SO_OOBINLINE;
2852 goto int_case;
2853 case TARGET_SO_NO_CHECK:
2854 optname = SO_NO_CHECK;
2855 goto int_case;
2856 case TARGET_SO_PRIORITY:
2857 optname = SO_PRIORITY;
2858 goto int_case;
2859 #ifdef SO_BSDCOMPAT
2860 case TARGET_SO_BSDCOMPAT:
2861 optname = SO_BSDCOMPAT;
2862 goto int_case;
2863 #endif
2864 case TARGET_SO_PASSCRED:
2865 optname = SO_PASSCRED;
2866 goto int_case;
2867 case TARGET_SO_TIMESTAMP:
2868 optname = SO_TIMESTAMP;
2869 goto int_case;
2870 case TARGET_SO_RCVLOWAT:
2871 optname = SO_RCVLOWAT;
2872 goto int_case;
2873 case TARGET_SO_ACCEPTCONN:
2874 optname = SO_ACCEPTCONN;
2875 goto int_case;
2876 default:
2877 goto int_case;
2879 break;
2880 case SOL_TCP:
2881 /* TCP options all take an 'int' value. */
2882 int_case:
2883 if (get_user_u32(len, optlen))
2884 return -TARGET_EFAULT;
2885 if (len < 0)
2886 return -TARGET_EINVAL;
2887 lv = sizeof(lv);
2888 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2889 if (ret < 0)
2890 return ret;
2891 if (optname == SO_TYPE) {
2892 val = host_to_target_sock_type(val);
2894 if (len > lv)
2895 len = lv;
2896 if (len == 4) {
2897 if (put_user_u32(val, optval_addr))
2898 return -TARGET_EFAULT;
2899 } else {
2900 if (put_user_u8(val, optval_addr))
2901 return -TARGET_EFAULT;
2903 if (put_user_u32(len, optlen))
2904 return -TARGET_EFAULT;
2905 break;
2906 case SOL_IP:
2907 switch(optname) {
2908 case IP_TOS:
2909 case IP_TTL:
2910 case IP_HDRINCL:
2911 case IP_ROUTER_ALERT:
2912 case IP_RECVOPTS:
2913 case IP_RETOPTS:
2914 case IP_PKTINFO:
2915 case IP_MTU_DISCOVER:
2916 case IP_RECVERR:
2917 case IP_RECVTOS:
2918 #ifdef IP_FREEBIND
2919 case IP_FREEBIND:
2920 #endif
2921 case IP_MULTICAST_TTL:
2922 case IP_MULTICAST_LOOP:
2923 if (get_user_u32(len, optlen))
2924 return -TARGET_EFAULT;
2925 if (len < 0)
2926 return -TARGET_EINVAL;
2927 lv = sizeof(lv);
2928 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2929 if (ret < 0)
2930 return ret;
2931 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2932 len = 1;
2933 if (put_user_u32(len, optlen)
2934 || put_user_u8(val, optval_addr))
2935 return -TARGET_EFAULT;
2936 } else {
2937 if (len > sizeof(int))
2938 len = sizeof(int);
2939 if (put_user_u32(len, optlen)
2940 || put_user_u32(val, optval_addr))
2941 return -TARGET_EFAULT;
2943 break;
2944 default:
2945 ret = -TARGET_ENOPROTOOPT;
2946 break;
2948 break;
2949 default:
2950 unimplemented:
2951 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2952 level, optname);
2953 ret = -TARGET_EOPNOTSUPP;
2954 break;
2956 return ret;
2959 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2960 int count, int copy)
2962 struct target_iovec *target_vec;
2963 struct iovec *vec;
2964 abi_ulong total_len, max_len;
2965 int i;
2966 int err = 0;
2967 bool bad_address = false;
2969 if (count == 0) {
2970 errno = 0;
2971 return NULL;
2973 if (count < 0 || count > IOV_MAX) {
2974 errno = EINVAL;
2975 return NULL;
2978 vec = g_try_new0(struct iovec, count);
2979 if (vec == NULL) {
2980 errno = ENOMEM;
2981 return NULL;
2984 target_vec = lock_user(VERIFY_READ, target_addr,
2985 count * sizeof(struct target_iovec), 1);
2986 if (target_vec == NULL) {
2987 err = EFAULT;
2988 goto fail2;
2991 /* ??? If host page size > target page size, this will result in a
2992 value larger than what we can actually support. */
2993 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2994 total_len = 0;
2996 for (i = 0; i < count; i++) {
2997 abi_ulong base = tswapal(target_vec[i].iov_base);
2998 abi_long len = tswapal(target_vec[i].iov_len);
3000 if (len < 0) {
3001 err = EINVAL;
3002 goto fail;
3003 } else if (len == 0) {
3004 /* Zero length pointer is ignored. */
3005 vec[i].iov_base = 0;
3006 } else {
3007 vec[i].iov_base = lock_user(type, base, len, copy);
3008 /* If the first buffer pointer is bad, this is a fault. But
3009 * subsequent bad buffers will result in a partial write; this
3010 * is realized by filling the vector with null pointers and
3011 * zero lengths. */
3012 if (!vec[i].iov_base) {
3013 if (i == 0) {
3014 err = EFAULT;
3015 goto fail;
3016 } else {
3017 bad_address = true;
3020 if (bad_address) {
3021 len = 0;
3023 if (len > max_len - total_len) {
3024 len = max_len - total_len;
3027 vec[i].iov_len = len;
3028 total_len += len;
3031 unlock_user(target_vec, target_addr, 0);
3032 return vec;
3034 fail:
3035 while (--i >= 0) {
3036 if (tswapal(target_vec[i].iov_len) > 0) {
3037 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3040 unlock_user(target_vec, target_addr, 0);
3041 fail2:
3042 g_free(vec);
3043 errno = err;
3044 return NULL;
3047 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3048 int count, int copy)
3050 struct target_iovec *target_vec;
3051 int i;
3053 target_vec = lock_user(VERIFY_READ, target_addr,
3054 count * sizeof(struct target_iovec), 1);
3055 if (target_vec) {
3056 for (i = 0; i < count; i++) {
3057 abi_ulong base = tswapal(target_vec[i].iov_base);
3058 abi_long len = tswapal(target_vec[i].iov_len);
3059 if (len < 0) {
3060 break;
3062 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3064 unlock_user(target_vec, target_addr, 0);
3067 g_free(vec);
3070 static inline int target_to_host_sock_type(int *type)
3072 int host_type = 0;
3073 int target_type = *type;
3075 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3076 case TARGET_SOCK_DGRAM:
3077 host_type = SOCK_DGRAM;
3078 break;
3079 case TARGET_SOCK_STREAM:
3080 host_type = SOCK_STREAM;
3081 break;
3082 default:
3083 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3084 break;
3086 if (target_type & TARGET_SOCK_CLOEXEC) {
3087 #if defined(SOCK_CLOEXEC)
3088 host_type |= SOCK_CLOEXEC;
3089 #else
3090 return -TARGET_EINVAL;
3091 #endif
3093 if (target_type & TARGET_SOCK_NONBLOCK) {
3094 #if defined(SOCK_NONBLOCK)
3095 host_type |= SOCK_NONBLOCK;
3096 #elif !defined(O_NONBLOCK)
3097 return -TARGET_EINVAL;
3098 #endif
3100 *type = host_type;
3101 return 0;
3104 /* Try to emulate socket type flags after socket creation. */
3105 static int sock_flags_fixup(int fd, int target_type)
3107 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3108 if (target_type & TARGET_SOCK_NONBLOCK) {
3109 int flags = fcntl(fd, F_GETFL);
3110 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3111 close(fd);
3112 return -TARGET_EINVAL;
3115 #endif
3116 return fd;
3119 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3120 abi_ulong target_addr,
3121 socklen_t len)
3123 struct sockaddr *addr = host_addr;
3124 struct target_sockaddr *target_saddr;
3126 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3127 if (!target_saddr) {
3128 return -TARGET_EFAULT;
3131 memcpy(addr, target_saddr, len);
3132 addr->sa_family = tswap16(target_saddr->sa_family);
3133 /* spkt_protocol is big-endian */
3135 unlock_user(target_saddr, target_addr, 0);
3136 return 0;
3139 static TargetFdTrans target_packet_trans = {
3140 .target_to_host_addr = packet_target_to_host_sockaddr,
3143 #ifdef CONFIG_RTNETLINK
3144 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3146 abi_long ret;
3148 ret = target_to_host_nlmsg_route(buf, len);
3149 if (ret < 0) {
3150 return ret;
3153 return len;
3156 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3158 abi_long ret;
3160 ret = host_to_target_nlmsg_route(buf, len);
3161 if (ret < 0) {
3162 return ret;
3165 return len;
3168 static TargetFdTrans target_netlink_route_trans = {
3169 .target_to_host_data = netlink_route_target_to_host,
3170 .host_to_target_data = netlink_route_host_to_target,
3172 #endif /* CONFIG_RTNETLINK */
3174 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3176 abi_long ret;
3178 ret = target_to_host_nlmsg_audit(buf, len);
3179 if (ret < 0) {
3180 return ret;
3183 return len;
3186 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3188 abi_long ret;
3190 ret = host_to_target_nlmsg_audit(buf, len);
3191 if (ret < 0) {
3192 return ret;
3195 return len;
3198 static TargetFdTrans target_netlink_audit_trans = {
3199 .target_to_host_data = netlink_audit_target_to_host,
3200 .host_to_target_data = netlink_audit_host_to_target,
3203 /* do_socket() Must return target values and target errnos. */
3204 static abi_long do_socket(int domain, int type, int protocol)
3206 int target_type = type;
3207 int ret;
3209 ret = target_to_host_sock_type(&type);
3210 if (ret) {
3211 return ret;
3214 if (domain == PF_NETLINK && !(
3215 #ifdef CONFIG_RTNETLINK
3216 protocol == NETLINK_ROUTE ||
3217 #endif
3218 protocol == NETLINK_KOBJECT_UEVENT ||
3219 protocol == NETLINK_AUDIT)) {
3220 return -EPFNOSUPPORT;
3223 if (domain == AF_PACKET ||
3224 (domain == AF_INET && type == SOCK_PACKET)) {
3225 protocol = tswap16(protocol);
3228 ret = get_errno(socket(domain, type, protocol));
3229 if (ret >= 0) {
3230 ret = sock_flags_fixup(ret, target_type);
3231 if (type == SOCK_PACKET) {
3232 /* Manage an obsolete case :
3233 * if socket type is SOCK_PACKET, bind by name
3235 fd_trans_register(ret, &target_packet_trans);
3236 } else if (domain == PF_NETLINK) {
3237 switch (protocol) {
3238 #ifdef CONFIG_RTNETLINK
3239 case NETLINK_ROUTE:
3240 fd_trans_register(ret, &target_netlink_route_trans);
3241 break;
3242 #endif
3243 case NETLINK_KOBJECT_UEVENT:
3244 /* nothing to do: messages are strings */
3245 break;
3246 case NETLINK_AUDIT:
3247 fd_trans_register(ret, &target_netlink_audit_trans);
3248 break;
3249 default:
3250 g_assert_not_reached();
3254 return ret;
3257 /* do_bind() Must return target values and target errnos. */
3258 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3259 socklen_t addrlen)
3261 void *addr;
3262 abi_long ret;
3264 if ((int)addrlen < 0) {
3265 return -TARGET_EINVAL;
3268 addr = alloca(addrlen+1);
3270 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3271 if (ret)
3272 return ret;
3274 return get_errno(bind(sockfd, addr, addrlen));
3277 /* do_connect() Must return target values and target errnos. */
3278 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3279 socklen_t addrlen)
3281 void *addr;
3282 abi_long ret;
3284 if ((int)addrlen < 0) {
3285 return -TARGET_EINVAL;
3288 addr = alloca(addrlen+1);
3290 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3291 if (ret)
3292 return ret;
3294 return get_errno(safe_connect(sockfd, addr, addrlen));
3297 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3298 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3299 int flags, int send)
3301 abi_long ret, len;
3302 struct msghdr msg;
3303 int count;
3304 struct iovec *vec;
3305 abi_ulong target_vec;
3307 if (msgp->msg_name) {
3308 msg.msg_namelen = tswap32(msgp->msg_namelen);
3309 msg.msg_name = alloca(msg.msg_namelen+1);
3310 ret = target_to_host_sockaddr(fd, msg.msg_name,
3311 tswapal(msgp->msg_name),
3312 msg.msg_namelen);
3313 if (ret) {
3314 goto out2;
3316 } else {
3317 msg.msg_name = NULL;
3318 msg.msg_namelen = 0;
3320 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3321 msg.msg_control = alloca(msg.msg_controllen);
3322 msg.msg_flags = tswap32(msgp->msg_flags);
3324 count = tswapal(msgp->msg_iovlen);
3325 target_vec = tswapal(msgp->msg_iov);
3326 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3327 target_vec, count, send);
3328 if (vec == NULL) {
3329 ret = -host_to_target_errno(errno);
3330 goto out2;
3332 msg.msg_iovlen = count;
3333 msg.msg_iov = vec;
3335 if (send) {
3336 if (fd_trans_target_to_host_data(fd)) {
3337 void *host_msg;
3339 host_msg = g_malloc(msg.msg_iov->iov_len);
3340 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3341 ret = fd_trans_target_to_host_data(fd)(host_msg,
3342 msg.msg_iov->iov_len);
3343 if (ret >= 0) {
3344 msg.msg_iov->iov_base = host_msg;
3345 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3347 g_free(host_msg);
3348 } else {
3349 ret = target_to_host_cmsg(&msg, msgp);
3350 if (ret == 0) {
3351 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3354 } else {
3355 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3356 if (!is_error(ret)) {
3357 len = ret;
3358 if (fd_trans_host_to_target_data(fd)) {
3359 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3360 len);
3361 } else {
3362 ret = host_to_target_cmsg(msgp, &msg);
3364 if (!is_error(ret)) {
3365 msgp->msg_namelen = tswap32(msg.msg_namelen);
3366 if (msg.msg_name != NULL) {
3367 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3368 msg.msg_name, msg.msg_namelen);
3369 if (ret) {
3370 goto out;
3374 ret = len;
3379 out:
3380 unlock_iovec(vec, target_vec, count, !send);
3381 out2:
3382 return ret;
3385 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3386 int flags, int send)
3388 abi_long ret;
3389 struct target_msghdr *msgp;
3391 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3392 msgp,
3393 target_msg,
3394 send ? 1 : 0)) {
3395 return -TARGET_EFAULT;
3397 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3398 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3399 return ret;
3402 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3403 * so it might not have this *mmsg-specific flag either.
3405 #ifndef MSG_WAITFORONE
3406 #define MSG_WAITFORONE 0x10000
3407 #endif
3409 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3410 unsigned int vlen, unsigned int flags,
3411 int send)
3413 struct target_mmsghdr *mmsgp;
3414 abi_long ret = 0;
3415 int i;
3417 if (vlen > UIO_MAXIOV) {
3418 vlen = UIO_MAXIOV;
3421 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3422 if (!mmsgp) {
3423 return -TARGET_EFAULT;
3426 for (i = 0; i < vlen; i++) {
3427 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3428 if (is_error(ret)) {
3429 break;
3431 mmsgp[i].msg_len = tswap32(ret);
3432 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3433 if (flags & MSG_WAITFORONE) {
3434 flags |= MSG_DONTWAIT;
3438 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3440 /* Return number of datagrams sent if we sent any at all;
3441 * otherwise return the error.
3443 if (i) {
3444 return i;
3446 return ret;
3449 /* do_accept4() Must return target values and target errnos. */
3450 static abi_long do_accept4(int fd, abi_ulong target_addr,
3451 abi_ulong target_addrlen_addr, int flags)
3453 socklen_t addrlen;
3454 void *addr;
3455 abi_long ret;
3456 int host_flags;
3458 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3460 if (target_addr == 0) {
3461 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3464 /* linux returns EINVAL if addrlen pointer is invalid */
3465 if (get_user_u32(addrlen, target_addrlen_addr))
3466 return -TARGET_EINVAL;
3468 if ((int)addrlen < 0) {
3469 return -TARGET_EINVAL;
3472 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3473 return -TARGET_EINVAL;
3475 addr = alloca(addrlen);
3477 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3478 if (!is_error(ret)) {
3479 host_to_target_sockaddr(target_addr, addr, addrlen);
3480 if (put_user_u32(addrlen, target_addrlen_addr))
3481 ret = -TARGET_EFAULT;
3483 return ret;
3486 /* do_getpeername() Must return target values and target errnos. */
3487 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3488 abi_ulong target_addrlen_addr)
3490 socklen_t addrlen;
3491 void *addr;
3492 abi_long ret;
3494 if (get_user_u32(addrlen, target_addrlen_addr))
3495 return -TARGET_EFAULT;
3497 if ((int)addrlen < 0) {
3498 return -TARGET_EINVAL;
3501 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3502 return -TARGET_EFAULT;
3504 addr = alloca(addrlen);
3506 ret = get_errno(getpeername(fd, addr, &addrlen));
3507 if (!is_error(ret)) {
3508 host_to_target_sockaddr(target_addr, addr, addrlen);
3509 if (put_user_u32(addrlen, target_addrlen_addr))
3510 ret = -TARGET_EFAULT;
3512 return ret;
3515 /* do_getsockname() Must return target values and target errnos. */
3516 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3517 abi_ulong target_addrlen_addr)
3519 socklen_t addrlen;
3520 void *addr;
3521 abi_long ret;
3523 if (get_user_u32(addrlen, target_addrlen_addr))
3524 return -TARGET_EFAULT;
3526 if ((int)addrlen < 0) {
3527 return -TARGET_EINVAL;
3530 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3531 return -TARGET_EFAULT;
3533 addr = alloca(addrlen);
3535 ret = get_errno(getsockname(fd, addr, &addrlen));
3536 if (!is_error(ret)) {
3537 host_to_target_sockaddr(target_addr, addr, addrlen);
3538 if (put_user_u32(addrlen, target_addrlen_addr))
3539 ret = -TARGET_EFAULT;
3541 return ret;
3544 /* do_socketpair() Must return target values and target errnos. */
3545 static abi_long do_socketpair(int domain, int type, int protocol,
3546 abi_ulong target_tab_addr)
3548 int tab[2];
3549 abi_long ret;
3551 target_to_host_sock_type(&type);
3553 ret = get_errno(socketpair(domain, type, protocol, tab));
3554 if (!is_error(ret)) {
3555 if (put_user_s32(tab[0], target_tab_addr)
3556 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3557 ret = -TARGET_EFAULT;
3559 return ret;
3562 /* do_sendto() Must return target values and target errnos. */
3563 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3564 abi_ulong target_addr, socklen_t addrlen)
3566 void *addr;
3567 void *host_msg;
3568 void *copy_msg = NULL;
3569 abi_long ret;
3571 if ((int)addrlen < 0) {
3572 return -TARGET_EINVAL;
3575 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3576 if (!host_msg)
3577 return -TARGET_EFAULT;
3578 if (fd_trans_target_to_host_data(fd)) {
3579 copy_msg = host_msg;
3580 host_msg = g_malloc(len);
3581 memcpy(host_msg, copy_msg, len);
3582 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3583 if (ret < 0) {
3584 goto fail;
3587 if (target_addr) {
3588 addr = alloca(addrlen+1);
3589 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3590 if (ret) {
3591 goto fail;
3593 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3594 } else {
3595 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3597 fail:
3598 if (copy_msg) {
3599 g_free(host_msg);
3600 host_msg = copy_msg;
3602 unlock_user(host_msg, msg, 0);
3603 return ret;
3606 /* do_recvfrom() Must return target values and target errnos. */
3607 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3608 abi_ulong target_addr,
3609 abi_ulong target_addrlen)
3611 socklen_t addrlen;
3612 void *addr;
3613 void *host_msg;
3614 abi_long ret;
3616 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3617 if (!host_msg)
3618 return -TARGET_EFAULT;
3619 if (target_addr) {
3620 if (get_user_u32(addrlen, target_addrlen)) {
3621 ret = -TARGET_EFAULT;
3622 goto fail;
3624 if ((int)addrlen < 0) {
3625 ret = -TARGET_EINVAL;
3626 goto fail;
3628 addr = alloca(addrlen);
3629 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3630 addr, &addrlen));
3631 } else {
3632 addr = NULL; /* To keep compiler quiet. */
3633 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3635 if (!is_error(ret)) {
3636 if (fd_trans_host_to_target_data(fd)) {
3637 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3639 if (target_addr) {
3640 host_to_target_sockaddr(target_addr, addr, addrlen);
3641 if (put_user_u32(addrlen, target_addrlen)) {
3642 ret = -TARGET_EFAULT;
3643 goto fail;
3646 unlock_user(host_msg, msg, len);
3647 } else {
3648 fail:
3649 unlock_user(host_msg, msg, 0);
3651 return ret;
3654 #ifdef TARGET_NR_socketcall
3655 /* do_socketcall() Must return target values and target errnos. */
3656 static abi_long do_socketcall(int num, abi_ulong vptr)
3658 static const unsigned ac[] = { /* number of arguments per call */
3659 [SOCKOP_socket] = 3, /* domain, type, protocol */
3660 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3661 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3662 [SOCKOP_listen] = 2, /* sockfd, backlog */
3663 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3664 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3665 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3666 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3667 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3668 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3669 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3670 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3671 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3672 [SOCKOP_shutdown] = 2, /* sockfd, how */
3673 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3674 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3675 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3676 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3677 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3678 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3680 abi_long a[6]; /* max 6 args */
3682 /* first, collect the arguments in a[] according to ac[] */
3683 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3684 unsigned i;
3685 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3686 for (i = 0; i < ac[num]; ++i) {
3687 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3688 return -TARGET_EFAULT;
3693 /* now when we have the args, actually handle the call */
3694 switch (num) {
3695 case SOCKOP_socket: /* domain, type, protocol */
3696 return do_socket(a[0], a[1], a[2]);
3697 case SOCKOP_bind: /* sockfd, addr, addrlen */
3698 return do_bind(a[0], a[1], a[2]);
3699 case SOCKOP_connect: /* sockfd, addr, addrlen */
3700 return do_connect(a[0], a[1], a[2]);
3701 case SOCKOP_listen: /* sockfd, backlog */
3702 return get_errno(listen(a[0], a[1]));
3703 case SOCKOP_accept: /* sockfd, addr, addrlen */
3704 return do_accept4(a[0], a[1], a[2], 0);
3705 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3706 return do_accept4(a[0], a[1], a[2], a[3]);
3707 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3708 return do_getsockname(a[0], a[1], a[2]);
3709 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3710 return do_getpeername(a[0], a[1], a[2]);
3711 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3712 return do_socketpair(a[0], a[1], a[2], a[3]);
3713 case SOCKOP_send: /* sockfd, msg, len, flags */
3714 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3715 case SOCKOP_recv: /* sockfd, msg, len, flags */
3716 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3717 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3718 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3719 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3720 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3721 case SOCKOP_shutdown: /* sockfd, how */
3722 return get_errno(shutdown(a[0], a[1]));
3723 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3724 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3725 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3726 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3727 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3728 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3729 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3730 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3731 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3732 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3733 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3734 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3735 default:
3736 gemu_log("Unsupported socketcall: %d\n", num);
3737 return -TARGET_ENOSYS;
3740 #endif
3742 #define N_SHM_REGIONS 32
3744 static struct shm_region {
3745 abi_ulong start;
3746 abi_ulong size;
3747 bool in_use;
3748 } shm_regions[N_SHM_REGIONS];
3750 struct target_semid_ds
3752 struct target_ipc_perm sem_perm;
3753 abi_ulong sem_otime;
3754 #if !defined(TARGET_PPC64)
3755 abi_ulong __unused1;
3756 #endif
3757 abi_ulong sem_ctime;
3758 #if !defined(TARGET_PPC64)
3759 abi_ulong __unused2;
3760 #endif
3761 abi_ulong sem_nsems;
3762 abi_ulong __unused3;
3763 abi_ulong __unused4;
3766 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3767 abi_ulong target_addr)
3769 struct target_ipc_perm *target_ip;
3770 struct target_semid_ds *target_sd;
3772 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3773 return -TARGET_EFAULT;
3774 target_ip = &(target_sd->sem_perm);
3775 host_ip->__key = tswap32(target_ip->__key);
3776 host_ip->uid = tswap32(target_ip->uid);
3777 host_ip->gid = tswap32(target_ip->gid);
3778 host_ip->cuid = tswap32(target_ip->cuid);
3779 host_ip->cgid = tswap32(target_ip->cgid);
3780 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3781 host_ip->mode = tswap32(target_ip->mode);
3782 #else
3783 host_ip->mode = tswap16(target_ip->mode);
3784 #endif
3785 #if defined(TARGET_PPC)
3786 host_ip->__seq = tswap32(target_ip->__seq);
3787 #else
3788 host_ip->__seq = tswap16(target_ip->__seq);
3789 #endif
3790 unlock_user_struct(target_sd, target_addr, 0);
3791 return 0;
3794 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3795 struct ipc_perm *host_ip)
3797 struct target_ipc_perm *target_ip;
3798 struct target_semid_ds *target_sd;
3800 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3801 return -TARGET_EFAULT;
3802 target_ip = &(target_sd->sem_perm);
3803 target_ip->__key = tswap32(host_ip->__key);
3804 target_ip->uid = tswap32(host_ip->uid);
3805 target_ip->gid = tswap32(host_ip->gid);
3806 target_ip->cuid = tswap32(host_ip->cuid);
3807 target_ip->cgid = tswap32(host_ip->cgid);
3808 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3809 target_ip->mode = tswap32(host_ip->mode);
3810 #else
3811 target_ip->mode = tswap16(host_ip->mode);
3812 #endif
3813 #if defined(TARGET_PPC)
3814 target_ip->__seq = tswap32(host_ip->__seq);
3815 #else
3816 target_ip->__seq = tswap16(host_ip->__seq);
3817 #endif
3818 unlock_user_struct(target_sd, target_addr, 1);
3819 return 0;
3822 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3823 abi_ulong target_addr)
3825 struct target_semid_ds *target_sd;
3827 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3828 return -TARGET_EFAULT;
3829 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3830 return -TARGET_EFAULT;
3831 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3832 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3833 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3834 unlock_user_struct(target_sd, target_addr, 0);
3835 return 0;
3838 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3839 struct semid_ds *host_sd)
3841 struct target_semid_ds *target_sd;
3843 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3844 return -TARGET_EFAULT;
3845 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3846 return -TARGET_EFAULT;
3847 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3848 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3849 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3850 unlock_user_struct(target_sd, target_addr, 1);
3851 return 0;
3854 struct target_seminfo {
3855 int semmap;
3856 int semmni;
3857 int semmns;
3858 int semmnu;
3859 int semmsl;
3860 int semopm;
3861 int semume;
3862 int semusz;
3863 int semvmx;
3864 int semaem;
3867 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3868 struct seminfo *host_seminfo)
3870 struct target_seminfo *target_seminfo;
3871 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3872 return -TARGET_EFAULT;
3873 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3874 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3875 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3876 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3877 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3878 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3879 __put_user(host_seminfo->semume, &target_seminfo->semume);
3880 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3881 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3882 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3883 unlock_user_struct(target_seminfo, target_addr, 1);
3884 return 0;
3887 union semun {
3888 int val;
3889 struct semid_ds *buf;
3890 unsigned short *array;
3891 struct seminfo *__buf;
3894 union target_semun {
3895 int val;
3896 abi_ulong buf;
3897 abi_ulong array;
3898 abi_ulong __buf;
3901 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3902 abi_ulong target_addr)
3904 int nsems;
3905 unsigned short *array;
3906 union semun semun;
3907 struct semid_ds semid_ds;
3908 int i, ret;
3910 semun.buf = &semid_ds;
3912 ret = semctl(semid, 0, IPC_STAT, semun);
3913 if (ret == -1)
3914 return get_errno(ret);
3916 nsems = semid_ds.sem_nsems;
3918 *host_array = g_try_new(unsigned short, nsems);
3919 if (!*host_array) {
3920 return -TARGET_ENOMEM;
3922 array = lock_user(VERIFY_READ, target_addr,
3923 nsems*sizeof(unsigned short), 1);
3924 if (!array) {
3925 g_free(*host_array);
3926 return -TARGET_EFAULT;
3929 for(i=0; i<nsems; i++) {
3930 __get_user((*host_array)[i], &array[i]);
3932 unlock_user(array, target_addr, 0);
3934 return 0;
3937 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3938 unsigned short **host_array)
3940 int nsems;
3941 unsigned short *array;
3942 union semun semun;
3943 struct semid_ds semid_ds;
3944 int i, ret;
3946 semun.buf = &semid_ds;
3948 ret = semctl(semid, 0, IPC_STAT, semun);
3949 if (ret == -1)
3950 return get_errno(ret);
3952 nsems = semid_ds.sem_nsems;
3954 array = lock_user(VERIFY_WRITE, target_addr,
3955 nsems*sizeof(unsigned short), 0);
3956 if (!array)
3957 return -TARGET_EFAULT;
3959 for(i=0; i<nsems; i++) {
3960 __put_user((*host_array)[i], &array[i]);
3962 g_free(*host_array);
3963 unlock_user(array, target_addr, 1);
3965 return 0;
3968 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3969 abi_ulong target_arg)
3971 union target_semun target_su = { .buf = target_arg };
3972 union semun arg;
3973 struct semid_ds dsarg;
3974 unsigned short *array = NULL;
3975 struct seminfo seminfo;
3976 abi_long ret = -TARGET_EINVAL;
3977 abi_long err;
3978 cmd &= 0xff;
3980 switch( cmd ) {
3981 case GETVAL:
3982 case SETVAL:
3983 /* In 64 bit cross-endian situations, we will erroneously pick up
3984 * the wrong half of the union for the "val" element. To rectify
3985 * this, the entire 8-byte structure is byteswapped, followed by
3986 * a swap of the 4 byte val field. In other cases, the data is
3987 * already in proper host byte order. */
3988 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3989 target_su.buf = tswapal(target_su.buf);
3990 arg.val = tswap32(target_su.val);
3991 } else {
3992 arg.val = target_su.val;
3994 ret = get_errno(semctl(semid, semnum, cmd, arg));
3995 break;
3996 case GETALL:
3997 case SETALL:
3998 err = target_to_host_semarray(semid, &array, target_su.array);
3999 if (err)
4000 return err;
4001 arg.array = array;
4002 ret = get_errno(semctl(semid, semnum, cmd, arg));
4003 err = host_to_target_semarray(semid, target_su.array, &array);
4004 if (err)
4005 return err;
4006 break;
4007 case IPC_STAT:
4008 case IPC_SET:
4009 case SEM_STAT:
4010 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4011 if (err)
4012 return err;
4013 arg.buf = &dsarg;
4014 ret = get_errno(semctl(semid, semnum, cmd, arg));
4015 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4016 if (err)
4017 return err;
4018 break;
4019 case IPC_INFO:
4020 case SEM_INFO:
4021 arg.__buf = &seminfo;
4022 ret = get_errno(semctl(semid, semnum, cmd, arg));
4023 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4024 if (err)
4025 return err;
4026 break;
4027 case IPC_RMID:
4028 case GETPID:
4029 case GETNCNT:
4030 case GETZCNT:
4031 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4032 break;
4035 return ret;
4038 struct target_sembuf {
4039 unsigned short sem_num;
4040 short sem_op;
4041 short sem_flg;
4044 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4045 abi_ulong target_addr,
4046 unsigned nsops)
4048 struct target_sembuf *target_sembuf;
4049 int i;
4051 target_sembuf = lock_user(VERIFY_READ, target_addr,
4052 nsops*sizeof(struct target_sembuf), 1);
4053 if (!target_sembuf)
4054 return -TARGET_EFAULT;
4056 for(i=0; i<nsops; i++) {
4057 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4058 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4059 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4062 unlock_user(target_sembuf, target_addr, 0);
4064 return 0;
4067 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4069 struct sembuf sops[nsops];
4071 if (target_to_host_sembuf(sops, ptr, nsops))
4072 return -TARGET_EFAULT;
4074 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4077 struct target_msqid_ds
4079 struct target_ipc_perm msg_perm;
4080 abi_ulong msg_stime;
4081 #if TARGET_ABI_BITS == 32
4082 abi_ulong __unused1;
4083 #endif
4084 abi_ulong msg_rtime;
4085 #if TARGET_ABI_BITS == 32
4086 abi_ulong __unused2;
4087 #endif
4088 abi_ulong msg_ctime;
4089 #if TARGET_ABI_BITS == 32
4090 abi_ulong __unused3;
4091 #endif
4092 abi_ulong __msg_cbytes;
4093 abi_ulong msg_qnum;
4094 abi_ulong msg_qbytes;
4095 abi_ulong msg_lspid;
4096 abi_ulong msg_lrpid;
4097 abi_ulong __unused4;
4098 abi_ulong __unused5;
4101 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4102 abi_ulong target_addr)
4104 struct target_msqid_ds *target_md;
4106 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4107 return -TARGET_EFAULT;
4108 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4109 return -TARGET_EFAULT;
4110 host_md->msg_stime = tswapal(target_md->msg_stime);
4111 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4112 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4113 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4114 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4115 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4116 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4117 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4118 unlock_user_struct(target_md, target_addr, 0);
4119 return 0;
4122 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4123 struct msqid_ds *host_md)
4125 struct target_msqid_ds *target_md;
4127 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4128 return -TARGET_EFAULT;
4129 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4130 return -TARGET_EFAULT;
4131 target_md->msg_stime = tswapal(host_md->msg_stime);
4132 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4133 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4134 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4135 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4136 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4137 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4138 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4139 unlock_user_struct(target_md, target_addr, 1);
4140 return 0;
4143 struct target_msginfo {
4144 int msgpool;
4145 int msgmap;
4146 int msgmax;
4147 int msgmnb;
4148 int msgmni;
4149 int msgssz;
4150 int msgtql;
4151 unsigned short int msgseg;
4154 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4155 struct msginfo *host_msginfo)
4157 struct target_msginfo *target_msginfo;
4158 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4159 return -TARGET_EFAULT;
4160 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4161 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4162 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4163 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4164 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4165 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4166 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4167 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4168 unlock_user_struct(target_msginfo, target_addr, 1);
4169 return 0;
4172 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4174 struct msqid_ds dsarg;
4175 struct msginfo msginfo;
4176 abi_long ret = -TARGET_EINVAL;
4178 cmd &= 0xff;
4180 switch (cmd) {
4181 case IPC_STAT:
4182 case IPC_SET:
4183 case MSG_STAT:
4184 if (target_to_host_msqid_ds(&dsarg,ptr))
4185 return -TARGET_EFAULT;
4186 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4187 if (host_to_target_msqid_ds(ptr,&dsarg))
4188 return -TARGET_EFAULT;
4189 break;
4190 case IPC_RMID:
4191 ret = get_errno(msgctl(msgid, cmd, NULL));
4192 break;
4193 case IPC_INFO:
4194 case MSG_INFO:
4195 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4196 if (host_to_target_msginfo(ptr, &msginfo))
4197 return -TARGET_EFAULT;
4198 break;
4201 return ret;
4204 struct target_msgbuf {
4205 abi_long mtype;
4206 char mtext[1];
4209 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4210 ssize_t msgsz, int msgflg)
4212 struct target_msgbuf *target_mb;
4213 struct msgbuf *host_mb;
4214 abi_long ret = 0;
4216 if (msgsz < 0) {
4217 return -TARGET_EINVAL;
4220 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4221 return -TARGET_EFAULT;
4222 host_mb = g_try_malloc(msgsz + sizeof(long));
4223 if (!host_mb) {
4224 unlock_user_struct(target_mb, msgp, 0);
4225 return -TARGET_ENOMEM;
4227 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4228 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4229 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4230 g_free(host_mb);
4231 unlock_user_struct(target_mb, msgp, 0);
4233 return ret;
4236 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4237 ssize_t msgsz, abi_long msgtyp,
4238 int msgflg)
4240 struct target_msgbuf *target_mb;
4241 char *target_mtext;
4242 struct msgbuf *host_mb;
4243 abi_long ret = 0;
4245 if (msgsz < 0) {
4246 return -TARGET_EINVAL;
4249 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4250 return -TARGET_EFAULT;
4252 host_mb = g_try_malloc(msgsz + sizeof(long));
4253 if (!host_mb) {
4254 ret = -TARGET_ENOMEM;
4255 goto end;
4257 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4259 if (ret > 0) {
4260 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4261 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4262 if (!target_mtext) {
4263 ret = -TARGET_EFAULT;
4264 goto end;
4266 memcpy(target_mb->mtext, host_mb->mtext, ret);
4267 unlock_user(target_mtext, target_mtext_addr, ret);
4270 target_mb->mtype = tswapal(host_mb->mtype);
4272 end:
4273 if (target_mb)
4274 unlock_user_struct(target_mb, msgp, 1);
4275 g_free(host_mb);
4276 return ret;
4279 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4280 abi_ulong target_addr)
4282 struct target_shmid_ds *target_sd;
4284 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4285 return -TARGET_EFAULT;
4286 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4287 return -TARGET_EFAULT;
4288 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4289 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4290 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4291 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4292 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4293 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4294 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4295 unlock_user_struct(target_sd, target_addr, 0);
4296 return 0;
4299 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4300 struct shmid_ds *host_sd)
4302 struct target_shmid_ds *target_sd;
4304 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4305 return -TARGET_EFAULT;
4306 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4307 return -TARGET_EFAULT;
4308 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4309 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4310 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4311 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4312 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4313 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4314 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4315 unlock_user_struct(target_sd, target_addr, 1);
4316 return 0;
4319 struct target_shminfo {
4320 abi_ulong shmmax;
4321 abi_ulong shmmin;
4322 abi_ulong shmmni;
4323 abi_ulong shmseg;
4324 abi_ulong shmall;
4327 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4328 struct shminfo *host_shminfo)
4330 struct target_shminfo *target_shminfo;
4331 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4332 return -TARGET_EFAULT;
4333 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4334 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4335 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4336 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4337 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4338 unlock_user_struct(target_shminfo, target_addr, 1);
4339 return 0;
4342 struct target_shm_info {
4343 int used_ids;
4344 abi_ulong shm_tot;
4345 abi_ulong shm_rss;
4346 abi_ulong shm_swp;
4347 abi_ulong swap_attempts;
4348 abi_ulong swap_successes;
4351 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4352 struct shm_info *host_shm_info)
4354 struct target_shm_info *target_shm_info;
4355 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4356 return -TARGET_EFAULT;
4357 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4358 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4359 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4360 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4361 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4362 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4363 unlock_user_struct(target_shm_info, target_addr, 1);
4364 return 0;
4367 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4369 struct shmid_ds dsarg;
4370 struct shminfo shminfo;
4371 struct shm_info shm_info;
4372 abi_long ret = -TARGET_EINVAL;
4374 cmd &= 0xff;
4376 switch(cmd) {
4377 case IPC_STAT:
4378 case IPC_SET:
4379 case SHM_STAT:
4380 if (target_to_host_shmid_ds(&dsarg, buf))
4381 return -TARGET_EFAULT;
4382 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4383 if (host_to_target_shmid_ds(buf, &dsarg))
4384 return -TARGET_EFAULT;
4385 break;
4386 case IPC_INFO:
4387 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4388 if (host_to_target_shminfo(buf, &shminfo))
4389 return -TARGET_EFAULT;
4390 break;
4391 case SHM_INFO:
4392 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4393 if (host_to_target_shm_info(buf, &shm_info))
4394 return -TARGET_EFAULT;
4395 break;
4396 case IPC_RMID:
4397 case SHM_LOCK:
4398 case SHM_UNLOCK:
4399 ret = get_errno(shmctl(shmid, cmd, NULL));
4400 break;
4403 return ret;
4406 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4408 abi_long raddr;
4409 void *host_raddr;
4410 struct shmid_ds shm_info;
4411 int i,ret;
4413 /* find out the length of the shared memory segment */
4414 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4415 if (is_error(ret)) {
4416 /* can't get length, bail out */
4417 return ret;
4420 mmap_lock();
4422 if (shmaddr)
4423 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4424 else {
4425 abi_ulong mmap_start;
4427 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4429 if (mmap_start == -1) {
4430 errno = ENOMEM;
4431 host_raddr = (void *)-1;
4432 } else
4433 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4436 if (host_raddr == (void *)-1) {
4437 mmap_unlock();
4438 return get_errno((long)host_raddr);
4440 raddr=h2g((unsigned long)host_raddr);
4442 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4443 PAGE_VALID | PAGE_READ |
4444 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4446 for (i = 0; i < N_SHM_REGIONS; i++) {
4447 if (!shm_regions[i].in_use) {
4448 shm_regions[i].in_use = true;
4449 shm_regions[i].start = raddr;
4450 shm_regions[i].size = shm_info.shm_segsz;
4451 break;
4455 mmap_unlock();
4456 return raddr;
4460 static inline abi_long do_shmdt(abi_ulong shmaddr)
4462 int i;
4464 for (i = 0; i < N_SHM_REGIONS; ++i) {
4465 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4466 shm_regions[i].in_use = false;
4467 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4468 break;
4472 return get_errno(shmdt(g2h(shmaddr)));
4475 #ifdef TARGET_NR_ipc
4476 /* ??? This only works with linear mappings. */
4477 /* do_ipc() must return target values and target errnos. */
4478 static abi_long do_ipc(unsigned int call, abi_long first,
4479 abi_long second, abi_long third,
4480 abi_long ptr, abi_long fifth)
4482 int version;
4483 abi_long ret = 0;
4485 version = call >> 16;
4486 call &= 0xffff;
4488 switch (call) {
4489 case IPCOP_semop:
4490 ret = do_semop(first, ptr, second);
4491 break;
4493 case IPCOP_semget:
4494 ret = get_errno(semget(first, second, third));
4495 break;
4497 case IPCOP_semctl: {
4498 /* The semun argument to semctl is passed by value, so dereference the
4499 * ptr argument. */
4500 abi_ulong atptr;
4501 get_user_ual(atptr, ptr);
4502 ret = do_semctl(first, second, third, atptr);
4503 break;
4506 case IPCOP_msgget:
4507 ret = get_errno(msgget(first, second));
4508 break;
4510 case IPCOP_msgsnd:
4511 ret = do_msgsnd(first, ptr, second, third);
4512 break;
4514 case IPCOP_msgctl:
4515 ret = do_msgctl(first, second, ptr);
4516 break;
4518 case IPCOP_msgrcv:
4519 switch (version) {
4520 case 0:
4522 struct target_ipc_kludge {
4523 abi_long msgp;
4524 abi_long msgtyp;
4525 } *tmp;
4527 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4528 ret = -TARGET_EFAULT;
4529 break;
4532 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4534 unlock_user_struct(tmp, ptr, 0);
4535 break;
4537 default:
4538 ret = do_msgrcv(first, ptr, second, fifth, third);
4540 break;
4542 case IPCOP_shmat:
4543 switch (version) {
4544 default:
4546 abi_ulong raddr;
4547 raddr = do_shmat(first, ptr, second);
4548 if (is_error(raddr))
4549 return get_errno(raddr);
4550 if (put_user_ual(raddr, third))
4551 return -TARGET_EFAULT;
4552 break;
4554 case 1:
4555 ret = -TARGET_EINVAL;
4556 break;
4558 break;
4559 case IPCOP_shmdt:
4560 ret = do_shmdt(ptr);
4561 break;
4563 case IPCOP_shmget:
4564 /* IPC_* flag values are the same on all linux platforms */
4565 ret = get_errno(shmget(first, second, third));
4566 break;
4568 /* IPC_* and SHM_* command values are the same on all linux platforms */
4569 case IPCOP_shmctl:
4570 ret = do_shmctl(first, second, ptr);
4571 break;
4572 default:
4573 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4574 ret = -TARGET_ENOSYS;
4575 break;
4577 return ret;
4579 #endif
4581 /* kernel structure types definitions */
4583 #define STRUCT(name, ...) STRUCT_ ## name,
4584 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4585 enum {
4586 #include "syscall_types.h"
4587 STRUCT_MAX
4589 #undef STRUCT
4590 #undef STRUCT_SPECIAL
4592 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4593 #define STRUCT_SPECIAL(name)
4594 #include "syscall_types.h"
4595 #undef STRUCT
4596 #undef STRUCT_SPECIAL
4598 typedef struct IOCTLEntry IOCTLEntry;
4600 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4601 int fd, int cmd, abi_long arg);
4603 struct IOCTLEntry {
4604 int target_cmd;
4605 unsigned int host_cmd;
4606 const char *name;
4607 int access;
4608 do_ioctl_fn *do_ioctl;
4609 const argtype arg_type[5];
4612 #define IOC_R 0x0001
4613 #define IOC_W 0x0002
4614 #define IOC_RW (IOC_R | IOC_W)
4616 #define MAX_STRUCT_SIZE 4096
4618 #ifdef CONFIG_FIEMAP
4619 /* So fiemap access checks don't overflow on 32 bit systems.
4620 * This is very slightly smaller than the limit imposed by
4621 * the underlying kernel.
4623 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4624 / sizeof(struct fiemap_extent))
4626 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4627 int fd, int cmd, abi_long arg)
4629 /* The parameter for this ioctl is a struct fiemap followed
4630 * by an array of struct fiemap_extent whose size is set
4631 * in fiemap->fm_extent_count. The array is filled in by the
4632 * ioctl.
4634 int target_size_in, target_size_out;
4635 struct fiemap *fm;
4636 const argtype *arg_type = ie->arg_type;
4637 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4638 void *argptr, *p;
4639 abi_long ret;
4640 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4641 uint32_t outbufsz;
4642 int free_fm = 0;
4644 assert(arg_type[0] == TYPE_PTR);
4645 assert(ie->access == IOC_RW);
4646 arg_type++;
4647 target_size_in = thunk_type_size(arg_type, 0);
4648 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4649 if (!argptr) {
4650 return -TARGET_EFAULT;
4652 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4653 unlock_user(argptr, arg, 0);
4654 fm = (struct fiemap *)buf_temp;
4655 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4656 return -TARGET_EINVAL;
4659 outbufsz = sizeof (*fm) +
4660 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4662 if (outbufsz > MAX_STRUCT_SIZE) {
4663 /* We can't fit all the extents into the fixed size buffer.
4664 * Allocate one that is large enough and use it instead.
4666 fm = g_try_malloc(outbufsz);
4667 if (!fm) {
4668 return -TARGET_ENOMEM;
4670 memcpy(fm, buf_temp, sizeof(struct fiemap));
4671 free_fm = 1;
4673 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4674 if (!is_error(ret)) {
4675 target_size_out = target_size_in;
4676 /* An extent_count of 0 means we were only counting the extents
4677 * so there are no structs to copy
4679 if (fm->fm_extent_count != 0) {
4680 target_size_out += fm->fm_mapped_extents * extent_size;
4682 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4683 if (!argptr) {
4684 ret = -TARGET_EFAULT;
4685 } else {
4686 /* Convert the struct fiemap */
4687 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4688 if (fm->fm_extent_count != 0) {
4689 p = argptr + target_size_in;
4690 /* ...and then all the struct fiemap_extents */
4691 for (i = 0; i < fm->fm_mapped_extents; i++) {
4692 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4693 THUNK_TARGET);
4694 p += extent_size;
4697 unlock_user(argptr, arg, target_size_out);
4700 if (free_fm) {
4701 g_free(fm);
4703 return ret;
4705 #endif
4707 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4708 int fd, int cmd, abi_long arg)
4710 const argtype *arg_type = ie->arg_type;
4711 int target_size;
4712 void *argptr;
4713 int ret;
4714 struct ifconf *host_ifconf;
4715 uint32_t outbufsz;
4716 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4717 int target_ifreq_size;
4718 int nb_ifreq;
4719 int free_buf = 0;
4720 int i;
4721 int target_ifc_len;
4722 abi_long target_ifc_buf;
4723 int host_ifc_len;
4724 char *host_ifc_buf;
4726 assert(arg_type[0] == TYPE_PTR);
4727 assert(ie->access == IOC_RW);
4729 arg_type++;
4730 target_size = thunk_type_size(arg_type, 0);
4732 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4733 if (!argptr)
4734 return -TARGET_EFAULT;
4735 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4736 unlock_user(argptr, arg, 0);
4738 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4739 target_ifc_len = host_ifconf->ifc_len;
4740 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4742 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4743 nb_ifreq = target_ifc_len / target_ifreq_size;
4744 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4746 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4747 if (outbufsz > MAX_STRUCT_SIZE) {
4748 /* We can't fit all the extents into the fixed size buffer.
4749 * Allocate one that is large enough and use it instead.
4751 host_ifconf = malloc(outbufsz);
4752 if (!host_ifconf) {
4753 return -TARGET_ENOMEM;
4755 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4756 free_buf = 1;
4758 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4760 host_ifconf->ifc_len = host_ifc_len;
4761 host_ifconf->ifc_buf = host_ifc_buf;
4763 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4764 if (!is_error(ret)) {
4765 /* convert host ifc_len to target ifc_len */
4767 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4768 target_ifc_len = nb_ifreq * target_ifreq_size;
4769 host_ifconf->ifc_len = target_ifc_len;
4771 /* restore target ifc_buf */
4773 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4775 /* copy struct ifconf to target user */
4777 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4778 if (!argptr)
4779 return -TARGET_EFAULT;
4780 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4781 unlock_user(argptr, arg, target_size);
4783 /* copy ifreq[] to target user */
4785 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4786 for (i = 0; i < nb_ifreq ; i++) {
4787 thunk_convert(argptr + i * target_ifreq_size,
4788 host_ifc_buf + i * sizeof(struct ifreq),
4789 ifreq_arg_type, THUNK_TARGET);
4791 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4794 if (free_buf) {
4795 free(host_ifconf);
4798 return ret;
4801 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4802 int cmd, abi_long arg)
4804 void *argptr;
4805 struct dm_ioctl *host_dm;
4806 abi_long guest_data;
4807 uint32_t guest_data_size;
4808 int target_size;
4809 const argtype *arg_type = ie->arg_type;
4810 abi_long ret;
4811 void *big_buf = NULL;
4812 char *host_data;
4814 arg_type++;
4815 target_size = thunk_type_size(arg_type, 0);
4816 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4817 if (!argptr) {
4818 ret = -TARGET_EFAULT;
4819 goto out;
4821 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4822 unlock_user(argptr, arg, 0);
4824 /* buf_temp is too small, so fetch things into a bigger buffer */
4825 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4826 memcpy(big_buf, buf_temp, target_size);
4827 buf_temp = big_buf;
4828 host_dm = big_buf;
4830 guest_data = arg + host_dm->data_start;
4831 if ((guest_data - arg) < 0) {
4832 ret = -EINVAL;
4833 goto out;
4835 guest_data_size = host_dm->data_size - host_dm->data_start;
4836 host_data = (char*)host_dm + host_dm->data_start;
4838 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4839 switch (ie->host_cmd) {
4840 case DM_REMOVE_ALL:
4841 case DM_LIST_DEVICES:
4842 case DM_DEV_CREATE:
4843 case DM_DEV_REMOVE:
4844 case DM_DEV_SUSPEND:
4845 case DM_DEV_STATUS:
4846 case DM_DEV_WAIT:
4847 case DM_TABLE_STATUS:
4848 case DM_TABLE_CLEAR:
4849 case DM_TABLE_DEPS:
4850 case DM_LIST_VERSIONS:
4851 /* no input data */
4852 break;
4853 case DM_DEV_RENAME:
4854 case DM_DEV_SET_GEOMETRY:
4855 /* data contains only strings */
4856 memcpy(host_data, argptr, guest_data_size);
4857 break;
4858 case DM_TARGET_MSG:
4859 memcpy(host_data, argptr, guest_data_size);
4860 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4861 break;
4862 case DM_TABLE_LOAD:
4864 void *gspec = argptr;
4865 void *cur_data = host_data;
4866 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4867 int spec_size = thunk_type_size(arg_type, 0);
4868 int i;
4870 for (i = 0; i < host_dm->target_count; i++) {
4871 struct dm_target_spec *spec = cur_data;
4872 uint32_t next;
4873 int slen;
4875 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4876 slen = strlen((char*)gspec + spec_size) + 1;
4877 next = spec->next;
4878 spec->next = sizeof(*spec) + slen;
4879 strcpy((char*)&spec[1], gspec + spec_size);
4880 gspec += next;
4881 cur_data += spec->next;
4883 break;
4885 default:
4886 ret = -TARGET_EINVAL;
4887 unlock_user(argptr, guest_data, 0);
4888 goto out;
4890 unlock_user(argptr, guest_data, 0);
4892 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4893 if (!is_error(ret)) {
4894 guest_data = arg + host_dm->data_start;
4895 guest_data_size = host_dm->data_size - host_dm->data_start;
4896 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4897 switch (ie->host_cmd) {
4898 case DM_REMOVE_ALL:
4899 case DM_DEV_CREATE:
4900 case DM_DEV_REMOVE:
4901 case DM_DEV_RENAME:
4902 case DM_DEV_SUSPEND:
4903 case DM_DEV_STATUS:
4904 case DM_TABLE_LOAD:
4905 case DM_TABLE_CLEAR:
4906 case DM_TARGET_MSG:
4907 case DM_DEV_SET_GEOMETRY:
4908 /* no return data */
4909 break;
4910 case DM_LIST_DEVICES:
4912 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4913 uint32_t remaining_data = guest_data_size;
4914 void *cur_data = argptr;
4915 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4916 int nl_size = 12; /* can't use thunk_size due to alignment */
4918 while (1) {
4919 uint32_t next = nl->next;
4920 if (next) {
4921 nl->next = nl_size + (strlen(nl->name) + 1);
4923 if (remaining_data < nl->next) {
4924 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4925 break;
4927 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4928 strcpy(cur_data + nl_size, nl->name);
4929 cur_data += nl->next;
4930 remaining_data -= nl->next;
4931 if (!next) {
4932 break;
4934 nl = (void*)nl + next;
4936 break;
4938 case DM_DEV_WAIT:
4939 case DM_TABLE_STATUS:
4941 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4942 void *cur_data = argptr;
4943 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4944 int spec_size = thunk_type_size(arg_type, 0);
4945 int i;
4947 for (i = 0; i < host_dm->target_count; i++) {
4948 uint32_t next = spec->next;
4949 int slen = strlen((char*)&spec[1]) + 1;
4950 spec->next = (cur_data - argptr) + spec_size + slen;
4951 if (guest_data_size < spec->next) {
4952 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4953 break;
4955 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4956 strcpy(cur_data + spec_size, (char*)&spec[1]);
4957 cur_data = argptr + spec->next;
4958 spec = (void*)host_dm + host_dm->data_start + next;
4960 break;
4962 case DM_TABLE_DEPS:
4964 void *hdata = (void*)host_dm + host_dm->data_start;
4965 int count = *(uint32_t*)hdata;
4966 uint64_t *hdev = hdata + 8;
4967 uint64_t *gdev = argptr + 8;
4968 int i;
4970 *(uint32_t*)argptr = tswap32(count);
4971 for (i = 0; i < count; i++) {
4972 *gdev = tswap64(*hdev);
4973 gdev++;
4974 hdev++;
4976 break;
4978 case DM_LIST_VERSIONS:
4980 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4981 uint32_t remaining_data = guest_data_size;
4982 void *cur_data = argptr;
4983 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4984 int vers_size = thunk_type_size(arg_type, 0);
4986 while (1) {
4987 uint32_t next = vers->next;
4988 if (next) {
4989 vers->next = vers_size + (strlen(vers->name) + 1);
4991 if (remaining_data < vers->next) {
4992 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4993 break;
4995 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4996 strcpy(cur_data + vers_size, vers->name);
4997 cur_data += vers->next;
4998 remaining_data -= vers->next;
4999 if (!next) {
5000 break;
5002 vers = (void*)vers + next;
5004 break;
5006 default:
5007 unlock_user(argptr, guest_data, 0);
5008 ret = -TARGET_EINVAL;
5009 goto out;
5011 unlock_user(argptr, guest_data, guest_data_size);
5013 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5014 if (!argptr) {
5015 ret = -TARGET_EFAULT;
5016 goto out;
5018 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5019 unlock_user(argptr, arg, target_size);
5021 out:
5022 g_free(big_buf);
5023 return ret;
5026 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5027 int cmd, abi_long arg)
5029 void *argptr;
5030 int target_size;
5031 const argtype *arg_type = ie->arg_type;
5032 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5033 abi_long ret;
5035 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5036 struct blkpg_partition host_part;
5038 /* Read and convert blkpg */
5039 arg_type++;
5040 target_size = thunk_type_size(arg_type, 0);
5041 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5042 if (!argptr) {
5043 ret = -TARGET_EFAULT;
5044 goto out;
5046 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5047 unlock_user(argptr, arg, 0);
5049 switch (host_blkpg->op) {
5050 case BLKPG_ADD_PARTITION:
5051 case BLKPG_DEL_PARTITION:
5052 /* payload is struct blkpg_partition */
5053 break;
5054 default:
5055 /* Unknown opcode */
5056 ret = -TARGET_EINVAL;
5057 goto out;
5060 /* Read and convert blkpg->data */
5061 arg = (abi_long)(uintptr_t)host_blkpg->data;
5062 target_size = thunk_type_size(part_arg_type, 0);
5063 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5064 if (!argptr) {
5065 ret = -TARGET_EFAULT;
5066 goto out;
5068 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5069 unlock_user(argptr, arg, 0);
5071 /* Swizzle the data pointer to our local copy and call! */
5072 host_blkpg->data = &host_part;
5073 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5075 out:
5076 return ret;
5079 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5080 int fd, int cmd, abi_long arg)
5082 const argtype *arg_type = ie->arg_type;
5083 const StructEntry *se;
5084 const argtype *field_types;
5085 const int *dst_offsets, *src_offsets;
5086 int target_size;
5087 void *argptr;
5088 abi_ulong *target_rt_dev_ptr;
5089 unsigned long *host_rt_dev_ptr;
5090 abi_long ret;
5091 int i;
5093 assert(ie->access == IOC_W);
5094 assert(*arg_type == TYPE_PTR);
5095 arg_type++;
5096 assert(*arg_type == TYPE_STRUCT);
5097 target_size = thunk_type_size(arg_type, 0);
5098 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5099 if (!argptr) {
5100 return -TARGET_EFAULT;
5102 arg_type++;
5103 assert(*arg_type == (int)STRUCT_rtentry);
5104 se = struct_entries + *arg_type++;
5105 assert(se->convert[0] == NULL);
5106 /* convert struct here to be able to catch rt_dev string */
5107 field_types = se->field_types;
5108 dst_offsets = se->field_offsets[THUNK_HOST];
5109 src_offsets = se->field_offsets[THUNK_TARGET];
5110 for (i = 0; i < se->nb_fields; i++) {
5111 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5112 assert(*field_types == TYPE_PTRVOID);
5113 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5114 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5115 if (*target_rt_dev_ptr != 0) {
5116 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5117 tswapal(*target_rt_dev_ptr));
5118 if (!*host_rt_dev_ptr) {
5119 unlock_user(argptr, arg, 0);
5120 return -TARGET_EFAULT;
5122 } else {
5123 *host_rt_dev_ptr = 0;
5125 field_types++;
5126 continue;
5128 field_types = thunk_convert(buf_temp + dst_offsets[i],
5129 argptr + src_offsets[i],
5130 field_types, THUNK_HOST);
5132 unlock_user(argptr, arg, 0);
5134 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5135 if (*host_rt_dev_ptr != 0) {
5136 unlock_user((void *)*host_rt_dev_ptr,
5137 *target_rt_dev_ptr, 0);
5139 return ret;
5142 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5143 int fd, int cmd, abi_long arg)
5145 int sig = target_to_host_signal(arg);
5146 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5149 static IOCTLEntry ioctl_entries[] = {
5150 #define IOCTL(cmd, access, ...) \
5151 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5152 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5153 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5154 #include "ioctls.h"
5155 { 0, 0, },
5158 /* ??? Implement proper locking for ioctls. */
5159 /* do_ioctl() Must return target values and target errnos. */
5160 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5162 const IOCTLEntry *ie;
5163 const argtype *arg_type;
5164 abi_long ret;
5165 uint8_t buf_temp[MAX_STRUCT_SIZE];
5166 int target_size;
5167 void *argptr;
5169 ie = ioctl_entries;
5170 for(;;) {
5171 if (ie->target_cmd == 0) {
5172 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5173 return -TARGET_ENOSYS;
5175 if (ie->target_cmd == cmd)
5176 break;
5177 ie++;
5179 arg_type = ie->arg_type;
5180 #if defined(DEBUG)
5181 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5182 #endif
5183 if (ie->do_ioctl) {
5184 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5187 switch(arg_type[0]) {
5188 case TYPE_NULL:
5189 /* no argument */
5190 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5191 break;
5192 case TYPE_PTRVOID:
5193 case TYPE_INT:
5194 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5195 break;
5196 case TYPE_PTR:
5197 arg_type++;
5198 target_size = thunk_type_size(arg_type, 0);
5199 switch(ie->access) {
5200 case IOC_R:
5201 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5202 if (!is_error(ret)) {
5203 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5204 if (!argptr)
5205 return -TARGET_EFAULT;
5206 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5207 unlock_user(argptr, arg, target_size);
5209 break;
5210 case IOC_W:
5211 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5212 if (!argptr)
5213 return -TARGET_EFAULT;
5214 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5215 unlock_user(argptr, arg, 0);
5216 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5217 break;
5218 default:
5219 case IOC_RW:
5220 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5221 if (!argptr)
5222 return -TARGET_EFAULT;
5223 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5224 unlock_user(argptr, arg, 0);
5225 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5226 if (!is_error(ret)) {
5227 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5228 if (!argptr)
5229 return -TARGET_EFAULT;
5230 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5231 unlock_user(argptr, arg, target_size);
5233 break;
5235 break;
5236 default:
5237 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5238 (long)cmd, arg_type[0]);
5239 ret = -TARGET_ENOSYS;
5240 break;
5242 return ret;
5245 static const bitmask_transtbl iflag_tbl[] = {
5246 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5247 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5248 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5249 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5250 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5251 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5252 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5253 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5254 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5255 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5256 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5257 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5258 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5259 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5260 { 0, 0, 0, 0 }
5263 static const bitmask_transtbl oflag_tbl[] = {
5264 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5265 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5266 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5267 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5268 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5269 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5270 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5271 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5272 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5273 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5274 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5275 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5276 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5277 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5278 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5279 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5280 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5281 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5282 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5283 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5284 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5285 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5286 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5287 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5288 { 0, 0, 0, 0 }
5291 static const bitmask_transtbl cflag_tbl[] = {
5292 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5293 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5294 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5295 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5296 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5297 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5298 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5299 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5300 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5301 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5302 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5303 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5304 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5305 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5306 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5307 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5308 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5309 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5310 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5311 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5312 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5313 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5314 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5315 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5316 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5317 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5318 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5319 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5320 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5321 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5322 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5323 { 0, 0, 0, 0 }
5326 static const bitmask_transtbl lflag_tbl[] = {
5327 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5328 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5329 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5330 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5331 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5332 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5333 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5334 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5335 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5336 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5337 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5338 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5339 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5340 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5341 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5342 { 0, 0, 0, 0 }
5345 static void target_to_host_termios (void *dst, const void *src)
5347 struct host_termios *host = dst;
5348 const struct target_termios *target = src;
5350 host->c_iflag =
5351 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5352 host->c_oflag =
5353 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5354 host->c_cflag =
5355 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5356 host->c_lflag =
5357 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5358 host->c_line = target->c_line;
5360 memset(host->c_cc, 0, sizeof(host->c_cc));
5361 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5362 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5363 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5364 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5365 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5366 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5367 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5368 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5369 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5370 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5371 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5372 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5373 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5374 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5375 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5376 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5377 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5380 static void host_to_target_termios (void *dst, const void *src)
5382 struct target_termios *target = dst;
5383 const struct host_termios *host = src;
5385 target->c_iflag =
5386 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5387 target->c_oflag =
5388 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5389 target->c_cflag =
5390 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5391 target->c_lflag =
5392 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5393 target->c_line = host->c_line;
5395 memset(target->c_cc, 0, sizeof(target->c_cc));
5396 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5397 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5398 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5399 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5400 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5401 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5402 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5403 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5404 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5405 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5406 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5407 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5408 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5409 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5410 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5411 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5412 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5415 static const StructEntry struct_termios_def = {
5416 .convert = { host_to_target_termios, target_to_host_termios },
5417 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5418 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5421 static bitmask_transtbl mmap_flags_tbl[] = {
5422 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5423 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5424 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5425 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5426 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5427 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5428 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5429 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5430 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5431 MAP_NORESERVE },
5432 { 0, 0, 0, 0 }
5435 #if defined(TARGET_I386)
5437 /* NOTE: there is really one LDT for all the threads */
5438 static uint8_t *ldt_table;
5440 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5442 int size;
5443 void *p;
5445 if (!ldt_table)
5446 return 0;
5447 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5448 if (size > bytecount)
5449 size = bytecount;
5450 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5451 if (!p)
5452 return -TARGET_EFAULT;
5453 /* ??? Should this by byteswapped? */
5454 memcpy(p, ldt_table, size);
5455 unlock_user(p, ptr, size);
5456 return size;
5459 /* XXX: add locking support */
5460 static abi_long write_ldt(CPUX86State *env,
5461 abi_ulong ptr, unsigned long bytecount, int oldmode)
5463 struct target_modify_ldt_ldt_s ldt_info;
5464 struct target_modify_ldt_ldt_s *target_ldt_info;
5465 int seg_32bit, contents, read_exec_only, limit_in_pages;
5466 int seg_not_present, useable, lm;
5467 uint32_t *lp, entry_1, entry_2;
5469 if (bytecount != sizeof(ldt_info))
5470 return -TARGET_EINVAL;
5471 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5472 return -TARGET_EFAULT;
5473 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5474 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5475 ldt_info.limit = tswap32(target_ldt_info->limit);
5476 ldt_info.flags = tswap32(target_ldt_info->flags);
5477 unlock_user_struct(target_ldt_info, ptr, 0);
5479 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5480 return -TARGET_EINVAL;
5481 seg_32bit = ldt_info.flags & 1;
5482 contents = (ldt_info.flags >> 1) & 3;
5483 read_exec_only = (ldt_info.flags >> 3) & 1;
5484 limit_in_pages = (ldt_info.flags >> 4) & 1;
5485 seg_not_present = (ldt_info.flags >> 5) & 1;
5486 useable = (ldt_info.flags >> 6) & 1;
5487 #ifdef TARGET_ABI32
5488 lm = 0;
5489 #else
5490 lm = (ldt_info.flags >> 7) & 1;
5491 #endif
5492 if (contents == 3) {
5493 if (oldmode)
5494 return -TARGET_EINVAL;
5495 if (seg_not_present == 0)
5496 return -TARGET_EINVAL;
5498 /* allocate the LDT */
5499 if (!ldt_table) {
5500 env->ldt.base = target_mmap(0,
5501 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5502 PROT_READ|PROT_WRITE,
5503 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5504 if (env->ldt.base == -1)
5505 return -TARGET_ENOMEM;
5506 memset(g2h(env->ldt.base), 0,
5507 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5508 env->ldt.limit = 0xffff;
5509 ldt_table = g2h(env->ldt.base);
5512 /* NOTE: same code as Linux kernel */
5513 /* Allow LDTs to be cleared by the user. */
5514 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5515 if (oldmode ||
5516 (contents == 0 &&
5517 read_exec_only == 1 &&
5518 seg_32bit == 0 &&
5519 limit_in_pages == 0 &&
5520 seg_not_present == 1 &&
5521 useable == 0 )) {
5522 entry_1 = 0;
5523 entry_2 = 0;
5524 goto install;
5528 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5529 (ldt_info.limit & 0x0ffff);
5530 entry_2 = (ldt_info.base_addr & 0xff000000) |
5531 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5532 (ldt_info.limit & 0xf0000) |
5533 ((read_exec_only ^ 1) << 9) |
5534 (contents << 10) |
5535 ((seg_not_present ^ 1) << 15) |
5536 (seg_32bit << 22) |
5537 (limit_in_pages << 23) |
5538 (lm << 21) |
5539 0x7000;
5540 if (!oldmode)
5541 entry_2 |= (useable << 20);
5543 /* Install the new entry ... */
5544 install:
5545 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5546 lp[0] = tswap32(entry_1);
5547 lp[1] = tswap32(entry_2);
5548 return 0;
5551 /* specific and weird i386 syscalls */
5552 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5553 unsigned long bytecount)
5555 abi_long ret;
5557 switch (func) {
5558 case 0:
5559 ret = read_ldt(ptr, bytecount);
5560 break;
5561 case 1:
5562 ret = write_ldt(env, ptr, bytecount, 1);
5563 break;
5564 case 0x11:
5565 ret = write_ldt(env, ptr, bytecount, 0);
5566 break;
5567 default:
5568 ret = -TARGET_ENOSYS;
5569 break;
5571 return ret;
5574 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5575 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5577 uint64_t *gdt_table = g2h(env->gdt.base);
5578 struct target_modify_ldt_ldt_s ldt_info;
5579 struct target_modify_ldt_ldt_s *target_ldt_info;
5580 int seg_32bit, contents, read_exec_only, limit_in_pages;
5581 int seg_not_present, useable, lm;
5582 uint32_t *lp, entry_1, entry_2;
5583 int i;
5585 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5586 if (!target_ldt_info)
5587 return -TARGET_EFAULT;
5588 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5589 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5590 ldt_info.limit = tswap32(target_ldt_info->limit);
5591 ldt_info.flags = tswap32(target_ldt_info->flags);
5592 if (ldt_info.entry_number == -1) {
5593 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5594 if (gdt_table[i] == 0) {
5595 ldt_info.entry_number = i;
5596 target_ldt_info->entry_number = tswap32(i);
5597 break;
5601 unlock_user_struct(target_ldt_info, ptr, 1);
5603 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5604 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5605 return -TARGET_EINVAL;
5606 seg_32bit = ldt_info.flags & 1;
5607 contents = (ldt_info.flags >> 1) & 3;
5608 read_exec_only = (ldt_info.flags >> 3) & 1;
5609 limit_in_pages = (ldt_info.flags >> 4) & 1;
5610 seg_not_present = (ldt_info.flags >> 5) & 1;
5611 useable = (ldt_info.flags >> 6) & 1;
5612 #ifdef TARGET_ABI32
5613 lm = 0;
5614 #else
5615 lm = (ldt_info.flags >> 7) & 1;
5616 #endif
5618 if (contents == 3) {
5619 if (seg_not_present == 0)
5620 return -TARGET_EINVAL;
5623 /* NOTE: same code as Linux kernel */
5624 /* Allow LDTs to be cleared by the user. */
5625 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5626 if ((contents == 0 &&
5627 read_exec_only == 1 &&
5628 seg_32bit == 0 &&
5629 limit_in_pages == 0 &&
5630 seg_not_present == 1 &&
5631 useable == 0 )) {
5632 entry_1 = 0;
5633 entry_2 = 0;
5634 goto install;
5638 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5639 (ldt_info.limit & 0x0ffff);
5640 entry_2 = (ldt_info.base_addr & 0xff000000) |
5641 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5642 (ldt_info.limit & 0xf0000) |
5643 ((read_exec_only ^ 1) << 9) |
5644 (contents << 10) |
5645 ((seg_not_present ^ 1) << 15) |
5646 (seg_32bit << 22) |
5647 (limit_in_pages << 23) |
5648 (useable << 20) |
5649 (lm << 21) |
5650 0x7000;
5652 /* Install the new entry ... */
5653 install:
5654 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5655 lp[0] = tswap32(entry_1);
5656 lp[1] = tswap32(entry_2);
5657 return 0;
5660 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5662 struct target_modify_ldt_ldt_s *target_ldt_info;
5663 uint64_t *gdt_table = g2h(env->gdt.base);
5664 uint32_t base_addr, limit, flags;
5665 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5666 int seg_not_present, useable, lm;
5667 uint32_t *lp, entry_1, entry_2;
5669 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5670 if (!target_ldt_info)
5671 return -TARGET_EFAULT;
5672 idx = tswap32(target_ldt_info->entry_number);
5673 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5674 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5675 unlock_user_struct(target_ldt_info, ptr, 1);
5676 return -TARGET_EINVAL;
5678 lp = (uint32_t *)(gdt_table + idx);
5679 entry_1 = tswap32(lp[0]);
5680 entry_2 = tswap32(lp[1]);
5682 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5683 contents = (entry_2 >> 10) & 3;
5684 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5685 seg_32bit = (entry_2 >> 22) & 1;
5686 limit_in_pages = (entry_2 >> 23) & 1;
5687 useable = (entry_2 >> 20) & 1;
5688 #ifdef TARGET_ABI32
5689 lm = 0;
5690 #else
5691 lm = (entry_2 >> 21) & 1;
5692 #endif
5693 flags = (seg_32bit << 0) | (contents << 1) |
5694 (read_exec_only << 3) | (limit_in_pages << 4) |
5695 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5696 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5697 base_addr = (entry_1 >> 16) |
5698 (entry_2 & 0xff000000) |
5699 ((entry_2 & 0xff) << 16);
5700 target_ldt_info->base_addr = tswapal(base_addr);
5701 target_ldt_info->limit = tswap32(limit);
5702 target_ldt_info->flags = tswap32(flags);
5703 unlock_user_struct(target_ldt_info, ptr, 1);
5704 return 0;
5706 #endif /* TARGET_I386 && TARGET_ABI32 */
5708 #ifndef TARGET_ABI32
5709 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5711 abi_long ret = 0;
5712 abi_ulong val;
5713 int idx;
5715 switch(code) {
5716 case TARGET_ARCH_SET_GS:
5717 case TARGET_ARCH_SET_FS:
5718 if (code == TARGET_ARCH_SET_GS)
5719 idx = R_GS;
5720 else
5721 idx = R_FS;
5722 cpu_x86_load_seg(env, idx, 0);
5723 env->segs[idx].base = addr;
5724 break;
5725 case TARGET_ARCH_GET_GS:
5726 case TARGET_ARCH_GET_FS:
5727 if (code == TARGET_ARCH_GET_GS)
5728 idx = R_GS;
5729 else
5730 idx = R_FS;
5731 val = env->segs[idx].base;
5732 if (put_user(val, addr, abi_ulong))
5733 ret = -TARGET_EFAULT;
5734 break;
5735 default:
5736 ret = -TARGET_EINVAL;
5737 break;
5739 return ret;
5741 #endif
5743 #endif /* defined(TARGET_I386) */
5745 #define NEW_STACK_SIZE 0x40000
5748 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5749 typedef struct {
5750 CPUArchState *env;
5751 pthread_mutex_t mutex;
5752 pthread_cond_t cond;
5753 pthread_t thread;
5754 uint32_t tid;
5755 abi_ulong child_tidptr;
5756 abi_ulong parent_tidptr;
5757 sigset_t sigmask;
5758 } new_thread_info;
5760 static void *clone_func(void *arg)
5762 new_thread_info *info = arg;
5763 CPUArchState *env;
5764 CPUState *cpu;
5765 TaskState *ts;
5767 rcu_register_thread();
5768 env = info->env;
5769 cpu = ENV_GET_CPU(env);
5770 thread_cpu = cpu;
5771 ts = (TaskState *)cpu->opaque;
5772 info->tid = gettid();
5773 cpu->host_tid = info->tid;
5774 task_settid(ts);
5775 if (info->child_tidptr)
5776 put_user_u32(info->tid, info->child_tidptr);
5777 if (info->parent_tidptr)
5778 put_user_u32(info->tid, info->parent_tidptr);
5779 /* Enable signals. */
5780 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5781 /* Signal to the parent that we're ready. */
5782 pthread_mutex_lock(&info->mutex);
5783 pthread_cond_broadcast(&info->cond);
5784 pthread_mutex_unlock(&info->mutex);
5785 /* Wait until the parent has finshed initializing the tls state. */
5786 pthread_mutex_lock(&clone_lock);
5787 pthread_mutex_unlock(&clone_lock);
5788 cpu_loop(env);
5789 /* never exits */
5790 return NULL;
5793 /* do_fork() Must return host values and target errnos (unlike most
5794 do_*() functions). */
5795 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5796 abi_ulong parent_tidptr, target_ulong newtls,
5797 abi_ulong child_tidptr)
5799 CPUState *cpu = ENV_GET_CPU(env);
5800 int ret;
5801 TaskState *ts;
5802 CPUState *new_cpu;
5803 CPUArchState *new_env;
5804 unsigned int nptl_flags;
5805 sigset_t sigmask;
5807 /* Emulate vfork() with fork() */
5808 if (flags & CLONE_VFORK)
5809 flags &= ~(CLONE_VFORK | CLONE_VM);
5811 if (flags & CLONE_VM) {
5812 TaskState *parent_ts = (TaskState *)cpu->opaque;
5813 new_thread_info info;
5814 pthread_attr_t attr;
5816 ts = g_new0(TaskState, 1);
5817 init_task_state(ts);
5818 /* we create a new CPU instance. */
5819 new_env = cpu_copy(env);
5820 /* Init regs that differ from the parent. */
5821 cpu_clone_regs(new_env, newsp);
5822 new_cpu = ENV_GET_CPU(new_env);
5823 new_cpu->opaque = ts;
5824 ts->bprm = parent_ts->bprm;
5825 ts->info = parent_ts->info;
5826 ts->signal_mask = parent_ts->signal_mask;
5827 nptl_flags = flags;
5828 flags &= ~CLONE_NPTL_FLAGS2;
5830 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5831 ts->child_tidptr = child_tidptr;
5834 if (nptl_flags & CLONE_SETTLS)
5835 cpu_set_tls (new_env, newtls);
5837 /* Grab a mutex so that thread setup appears atomic. */
5838 pthread_mutex_lock(&clone_lock);
5840 memset(&info, 0, sizeof(info));
5841 pthread_mutex_init(&info.mutex, NULL);
5842 pthread_mutex_lock(&info.mutex);
5843 pthread_cond_init(&info.cond, NULL);
5844 info.env = new_env;
5845 if (nptl_flags & CLONE_CHILD_SETTID)
5846 info.child_tidptr = child_tidptr;
5847 if (nptl_flags & CLONE_PARENT_SETTID)
5848 info.parent_tidptr = parent_tidptr;
5850 ret = pthread_attr_init(&attr);
5851 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5852 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5853 /* It is not safe to deliver signals until the child has finished
5854 initializing, so temporarily block all signals. */
5855 sigfillset(&sigmask);
5856 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5858 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5859 /* TODO: Free new CPU state if thread creation failed. */
5861 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5862 pthread_attr_destroy(&attr);
5863 if (ret == 0) {
5864 /* Wait for the child to initialize. */
5865 pthread_cond_wait(&info.cond, &info.mutex);
5866 ret = info.tid;
5867 if (flags & CLONE_PARENT_SETTID)
5868 put_user_u32(ret, parent_tidptr);
5869 } else {
5870 ret = -1;
5872 pthread_mutex_unlock(&info.mutex);
5873 pthread_cond_destroy(&info.cond);
5874 pthread_mutex_destroy(&info.mutex);
5875 pthread_mutex_unlock(&clone_lock);
5876 } else {
5877 /* if no CLONE_VM, we consider it is a fork */
5878 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5879 return -TARGET_EINVAL;
5882 if (block_signals()) {
5883 return -TARGET_ERESTARTSYS;
5886 fork_start();
5887 ret = fork();
5888 if (ret == 0) {
5889 /* Child Process. */
5890 rcu_after_fork();
5891 cpu_clone_regs(env, newsp);
5892 fork_end(1);
5893 /* There is a race condition here. The parent process could
5894 theoretically read the TID in the child process before the child
5895 tid is set. This would require using either ptrace
5896 (not implemented) or having *_tidptr to point at a shared memory
5897 mapping. We can't repeat the spinlock hack used above because
5898 the child process gets its own copy of the lock. */
5899 if (flags & CLONE_CHILD_SETTID)
5900 put_user_u32(gettid(), child_tidptr);
5901 if (flags & CLONE_PARENT_SETTID)
5902 put_user_u32(gettid(), parent_tidptr);
5903 ts = (TaskState *)cpu->opaque;
5904 if (flags & CLONE_SETTLS)
5905 cpu_set_tls (env, newtls);
5906 if (flags & CLONE_CHILD_CLEARTID)
5907 ts->child_tidptr = child_tidptr;
5908 } else {
5909 fork_end(0);
5912 return ret;
5915 /* warning : doesn't handle linux specific flags... */
5916 static int target_to_host_fcntl_cmd(int cmd)
5918 switch(cmd) {
5919 case TARGET_F_DUPFD:
5920 case TARGET_F_GETFD:
5921 case TARGET_F_SETFD:
5922 case TARGET_F_GETFL:
5923 case TARGET_F_SETFL:
5924 return cmd;
5925 case TARGET_F_GETLK:
5926 return F_GETLK64;
5927 case TARGET_F_SETLK:
5928 return F_SETLK64;
5929 case TARGET_F_SETLKW:
5930 return F_SETLKW64;
5931 case TARGET_F_GETOWN:
5932 return F_GETOWN;
5933 case TARGET_F_SETOWN:
5934 return F_SETOWN;
5935 case TARGET_F_GETSIG:
5936 return F_GETSIG;
5937 case TARGET_F_SETSIG:
5938 return F_SETSIG;
5939 #if TARGET_ABI_BITS == 32
5940 case TARGET_F_GETLK64:
5941 return F_GETLK64;
5942 case TARGET_F_SETLK64:
5943 return F_SETLK64;
5944 case TARGET_F_SETLKW64:
5945 return F_SETLKW64;
5946 #endif
5947 case TARGET_F_SETLEASE:
5948 return F_SETLEASE;
5949 case TARGET_F_GETLEASE:
5950 return F_GETLEASE;
5951 #ifdef F_DUPFD_CLOEXEC
5952 case TARGET_F_DUPFD_CLOEXEC:
5953 return F_DUPFD_CLOEXEC;
5954 #endif
5955 case TARGET_F_NOTIFY:
5956 return F_NOTIFY;
5957 #ifdef F_GETOWN_EX
5958 case TARGET_F_GETOWN_EX:
5959 return F_GETOWN_EX;
5960 #endif
5961 #ifdef F_SETOWN_EX
5962 case TARGET_F_SETOWN_EX:
5963 return F_SETOWN_EX;
5964 #endif
5965 #ifdef F_SETPIPE_SZ
5966 case TARGET_F_SETPIPE_SZ:
5967 return F_SETPIPE_SZ;
5968 case TARGET_F_GETPIPE_SZ:
5969 return F_GETPIPE_SZ;
5970 #endif
5971 default:
5972 return -TARGET_EINVAL;
5974 return -TARGET_EINVAL;
5977 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5978 static const bitmask_transtbl flock_tbl[] = {
5979 TRANSTBL_CONVERT(F_RDLCK),
5980 TRANSTBL_CONVERT(F_WRLCK),
5981 TRANSTBL_CONVERT(F_UNLCK),
5982 TRANSTBL_CONVERT(F_EXLCK),
5983 TRANSTBL_CONVERT(F_SHLCK),
5984 { 0, 0, 0, 0 }
5987 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5988 abi_ulong target_flock_addr)
5990 struct target_flock *target_fl;
5991 short l_type;
5993 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5994 return -TARGET_EFAULT;
5997 __get_user(l_type, &target_fl->l_type);
5998 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5999 __get_user(fl->l_whence, &target_fl->l_whence);
6000 __get_user(fl->l_start, &target_fl->l_start);
6001 __get_user(fl->l_len, &target_fl->l_len);
6002 __get_user(fl->l_pid, &target_fl->l_pid);
6003 unlock_user_struct(target_fl, target_flock_addr, 0);
6004 return 0;
6007 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6008 const struct flock64 *fl)
6010 struct target_flock *target_fl;
6011 short l_type;
6013 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6014 return -TARGET_EFAULT;
6017 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6018 __put_user(l_type, &target_fl->l_type);
6019 __put_user(fl->l_whence, &target_fl->l_whence);
6020 __put_user(fl->l_start, &target_fl->l_start);
6021 __put_user(fl->l_len, &target_fl->l_len);
6022 __put_user(fl->l_pid, &target_fl->l_pid);
6023 unlock_user_struct(target_fl, target_flock_addr, 1);
6024 return 0;
6027 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6028 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6030 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6031 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6032 abi_ulong target_flock_addr)
6034 struct target_eabi_flock64 *target_fl;
6035 short l_type;
6037 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6038 return -TARGET_EFAULT;
6041 __get_user(l_type, &target_fl->l_type);
6042 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6043 __get_user(fl->l_whence, &target_fl->l_whence);
6044 __get_user(fl->l_start, &target_fl->l_start);
6045 __get_user(fl->l_len, &target_fl->l_len);
6046 __get_user(fl->l_pid, &target_fl->l_pid);
6047 unlock_user_struct(target_fl, target_flock_addr, 0);
6048 return 0;
6051 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6052 const struct flock64 *fl)
6054 struct target_eabi_flock64 *target_fl;
6055 short l_type;
6057 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6058 return -TARGET_EFAULT;
6061 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6062 __put_user(l_type, &target_fl->l_type);
6063 __put_user(fl->l_whence, &target_fl->l_whence);
6064 __put_user(fl->l_start, &target_fl->l_start);
6065 __put_user(fl->l_len, &target_fl->l_len);
6066 __put_user(fl->l_pid, &target_fl->l_pid);
6067 unlock_user_struct(target_fl, target_flock_addr, 1);
6068 return 0;
6070 #endif
6072 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6073 abi_ulong target_flock_addr)
6075 struct target_flock64 *target_fl;
6076 short l_type;
6078 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6079 return -TARGET_EFAULT;
6082 __get_user(l_type, &target_fl->l_type);
6083 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6084 __get_user(fl->l_whence, &target_fl->l_whence);
6085 __get_user(fl->l_start, &target_fl->l_start);
6086 __get_user(fl->l_len, &target_fl->l_len);
6087 __get_user(fl->l_pid, &target_fl->l_pid);
6088 unlock_user_struct(target_fl, target_flock_addr, 0);
6089 return 0;
6092 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6093 const struct flock64 *fl)
6095 struct target_flock64 *target_fl;
6096 short l_type;
6098 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6099 return -TARGET_EFAULT;
6102 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6103 __put_user(l_type, &target_fl->l_type);
6104 __put_user(fl->l_whence, &target_fl->l_whence);
6105 __put_user(fl->l_start, &target_fl->l_start);
6106 __put_user(fl->l_len, &target_fl->l_len);
6107 __put_user(fl->l_pid, &target_fl->l_pid);
6108 unlock_user_struct(target_fl, target_flock_addr, 1);
6109 return 0;
6112 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6114 struct flock64 fl64;
6115 #ifdef F_GETOWN_EX
6116 struct f_owner_ex fox;
6117 struct target_f_owner_ex *target_fox;
6118 #endif
6119 abi_long ret;
6120 int host_cmd = target_to_host_fcntl_cmd(cmd);
6122 if (host_cmd == -TARGET_EINVAL)
6123 return host_cmd;
6125 switch(cmd) {
6126 case TARGET_F_GETLK:
6127 ret = copy_from_user_flock(&fl64, arg);
6128 if (ret) {
6129 return ret;
6131 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6132 if (ret == 0) {
6133 ret = copy_to_user_flock(arg, &fl64);
6135 break;
6137 case TARGET_F_SETLK:
6138 case TARGET_F_SETLKW:
6139 ret = copy_from_user_flock(&fl64, arg);
6140 if (ret) {
6141 return ret;
6143 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6144 break;
6146 case TARGET_F_GETLK64:
6147 ret = copy_from_user_flock64(&fl64, arg);
6148 if (ret) {
6149 return ret;
6151 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6152 if (ret == 0) {
6153 ret = copy_to_user_flock64(arg, &fl64);
6155 break;
6156 case TARGET_F_SETLK64:
6157 case TARGET_F_SETLKW64:
6158 ret = copy_from_user_flock64(&fl64, arg);
6159 if (ret) {
6160 return ret;
6162 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6163 break;
6165 case TARGET_F_GETFL:
6166 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6167 if (ret >= 0) {
6168 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6170 break;
6172 case TARGET_F_SETFL:
6173 ret = get_errno(safe_fcntl(fd, host_cmd,
6174 target_to_host_bitmask(arg,
6175 fcntl_flags_tbl)));
6176 break;
6178 #ifdef F_GETOWN_EX
6179 case TARGET_F_GETOWN_EX:
6180 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6181 if (ret >= 0) {
6182 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6183 return -TARGET_EFAULT;
6184 target_fox->type = tswap32(fox.type);
6185 target_fox->pid = tswap32(fox.pid);
6186 unlock_user_struct(target_fox, arg, 1);
6188 break;
6189 #endif
6191 #ifdef F_SETOWN_EX
6192 case TARGET_F_SETOWN_EX:
6193 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6194 return -TARGET_EFAULT;
6195 fox.type = tswap32(target_fox->type);
6196 fox.pid = tswap32(target_fox->pid);
6197 unlock_user_struct(target_fox, arg, 0);
6198 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6199 break;
6200 #endif
6202 case TARGET_F_SETOWN:
6203 case TARGET_F_GETOWN:
6204 case TARGET_F_SETSIG:
6205 case TARGET_F_GETSIG:
6206 case TARGET_F_SETLEASE:
6207 case TARGET_F_GETLEASE:
6208 case TARGET_F_SETPIPE_SZ:
6209 case TARGET_F_GETPIPE_SZ:
6210 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6211 break;
6213 default:
6214 ret = get_errno(safe_fcntl(fd, cmd, arg));
6215 break;
6217 return ret;
6220 #ifdef USE_UID16
6222 static inline int high2lowuid(int uid)
6224 if (uid > 65535)
6225 return 65534;
6226 else
6227 return uid;
6230 static inline int high2lowgid(int gid)
6232 if (gid > 65535)
6233 return 65534;
6234 else
6235 return gid;
6238 static inline int low2highuid(int uid)
6240 if ((int16_t)uid == -1)
6241 return -1;
6242 else
6243 return uid;
6246 static inline int low2highgid(int gid)
6248 if ((int16_t)gid == -1)
6249 return -1;
6250 else
6251 return gid;
6253 static inline int tswapid(int id)
6255 return tswap16(id);
6258 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6260 #else /* !USE_UID16 */
6261 static inline int high2lowuid(int uid)
6263 return uid;
6265 static inline int high2lowgid(int gid)
6267 return gid;
6269 static inline int low2highuid(int uid)
6271 return uid;
6273 static inline int low2highgid(int gid)
6275 return gid;
6277 static inline int tswapid(int id)
6279 return tswap32(id);
6282 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6284 #endif /* USE_UID16 */
6286 /* We must do direct syscalls for setting UID/GID, because we want to
6287 * implement the Linux system call semantics of "change only for this thread",
6288 * not the libc/POSIX semantics of "change for all threads in process".
6289 * (See http://ewontfix.com/17/ for more details.)
6290 * We use the 32-bit version of the syscalls if present; if it is not
6291 * then either the host architecture supports 32-bit UIDs natively with
6292 * the standard syscall, or the 16-bit UID is the best we can do.
6294 #ifdef __NR_setuid32
6295 #define __NR_sys_setuid __NR_setuid32
6296 #else
6297 #define __NR_sys_setuid __NR_setuid
6298 #endif
6299 #ifdef __NR_setgid32
6300 #define __NR_sys_setgid __NR_setgid32
6301 #else
6302 #define __NR_sys_setgid __NR_setgid
6303 #endif
6304 #ifdef __NR_setresuid32
6305 #define __NR_sys_setresuid __NR_setresuid32
6306 #else
6307 #define __NR_sys_setresuid __NR_setresuid
6308 #endif
6309 #ifdef __NR_setresgid32
6310 #define __NR_sys_setresgid __NR_setresgid32
6311 #else
6312 #define __NR_sys_setresgid __NR_setresgid
6313 #endif
6315 _syscall1(int, sys_setuid, uid_t, uid)
6316 _syscall1(int, sys_setgid, gid_t, gid)
6317 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6318 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6320 void syscall_init(void)
6322 IOCTLEntry *ie;
6323 const argtype *arg_type;
6324 int size;
6325 int i;
6327 thunk_init(STRUCT_MAX);
6329 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6330 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6331 #include "syscall_types.h"
6332 #undef STRUCT
6333 #undef STRUCT_SPECIAL
6335 /* Build target_to_host_errno_table[] table from
6336 * host_to_target_errno_table[]. */
6337 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6338 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6341 /* we patch the ioctl size if necessary. We rely on the fact that
6342 no ioctl has all the bits at '1' in the size field */
6343 ie = ioctl_entries;
6344 while (ie->target_cmd != 0) {
6345 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6346 TARGET_IOC_SIZEMASK) {
6347 arg_type = ie->arg_type;
6348 if (arg_type[0] != TYPE_PTR) {
6349 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6350 ie->target_cmd);
6351 exit(1);
6353 arg_type++;
6354 size = thunk_type_size(arg_type, 0);
6355 ie->target_cmd = (ie->target_cmd &
6356 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6357 (size << TARGET_IOC_SIZESHIFT);
6360 /* automatic consistency check if same arch */
6361 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6362 (defined(__x86_64__) && defined(TARGET_X86_64))
6363 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6364 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6365 ie->name, ie->target_cmd, ie->host_cmd);
6367 #endif
6368 ie++;
6372 #if TARGET_ABI_BITS == 32
6373 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6375 #ifdef TARGET_WORDS_BIGENDIAN
6376 return ((uint64_t)word0 << 32) | word1;
6377 #else
6378 return ((uint64_t)word1 << 32) | word0;
6379 #endif
6381 #else /* TARGET_ABI_BITS == 32 */
6382 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6384 return word0;
6386 #endif /* TARGET_ABI_BITS != 32 */
6388 #ifdef TARGET_NR_truncate64
6389 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6390 abi_long arg2,
6391 abi_long arg3,
6392 abi_long arg4)
6394 if (regpairs_aligned(cpu_env)) {
6395 arg2 = arg3;
6396 arg3 = arg4;
6398 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6400 #endif
6402 #ifdef TARGET_NR_ftruncate64
6403 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6404 abi_long arg2,
6405 abi_long arg3,
6406 abi_long arg4)
6408 if (regpairs_aligned(cpu_env)) {
6409 arg2 = arg3;
6410 arg3 = arg4;
6412 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6414 #endif
6416 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6417 abi_ulong target_addr)
6419 struct target_timespec *target_ts;
6421 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6422 return -TARGET_EFAULT;
6423 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6424 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6425 unlock_user_struct(target_ts, target_addr, 0);
6426 return 0;
6429 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6430 struct timespec *host_ts)
6432 struct target_timespec *target_ts;
6434 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6435 return -TARGET_EFAULT;
6436 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6437 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6438 unlock_user_struct(target_ts, target_addr, 1);
6439 return 0;
6442 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6443 abi_ulong target_addr)
6445 struct target_itimerspec *target_itspec;
6447 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6448 return -TARGET_EFAULT;
6451 host_itspec->it_interval.tv_sec =
6452 tswapal(target_itspec->it_interval.tv_sec);
6453 host_itspec->it_interval.tv_nsec =
6454 tswapal(target_itspec->it_interval.tv_nsec);
6455 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6456 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6458 unlock_user_struct(target_itspec, target_addr, 1);
6459 return 0;
6462 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6463 struct itimerspec *host_its)
6465 struct target_itimerspec *target_itspec;
6467 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6468 return -TARGET_EFAULT;
6471 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6472 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6474 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6475 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6477 unlock_user_struct(target_itspec, target_addr, 0);
6478 return 0;
6481 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6482 abi_ulong target_addr)
6484 struct target_sigevent *target_sevp;
6486 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6487 return -TARGET_EFAULT;
6490 /* This union is awkward on 64 bit systems because it has a 32 bit
6491 * integer and a pointer in it; we follow the conversion approach
6492 * used for handling sigval types in signal.c so the guest should get
6493 * the correct value back even if we did a 64 bit byteswap and it's
6494 * using the 32 bit integer.
6496 host_sevp->sigev_value.sival_ptr =
6497 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6498 host_sevp->sigev_signo =
6499 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6500 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6501 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6503 unlock_user_struct(target_sevp, target_addr, 1);
6504 return 0;
6507 #if defined(TARGET_NR_mlockall)
6508 static inline int target_to_host_mlockall_arg(int arg)
6510 int result = 0;
6512 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6513 result |= MCL_CURRENT;
6515 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6516 result |= MCL_FUTURE;
6518 return result;
6520 #endif
6522 static inline abi_long host_to_target_stat64(void *cpu_env,
6523 abi_ulong target_addr,
6524 struct stat *host_st)
6526 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6527 if (((CPUARMState *)cpu_env)->eabi) {
6528 struct target_eabi_stat64 *target_st;
6530 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6531 return -TARGET_EFAULT;
6532 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6533 __put_user(host_st->st_dev, &target_st->st_dev);
6534 __put_user(host_st->st_ino, &target_st->st_ino);
6535 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6536 __put_user(host_st->st_ino, &target_st->__st_ino);
6537 #endif
6538 __put_user(host_st->st_mode, &target_st->st_mode);
6539 __put_user(host_st->st_nlink, &target_st->st_nlink);
6540 __put_user(host_st->st_uid, &target_st->st_uid);
6541 __put_user(host_st->st_gid, &target_st->st_gid);
6542 __put_user(host_st->st_rdev, &target_st->st_rdev);
6543 __put_user(host_st->st_size, &target_st->st_size);
6544 __put_user(host_st->st_blksize, &target_st->st_blksize);
6545 __put_user(host_st->st_blocks, &target_st->st_blocks);
6546 __put_user(host_st->st_atime, &target_st->target_st_atime);
6547 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6548 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6549 unlock_user_struct(target_st, target_addr, 1);
6550 } else
6551 #endif
6553 #if defined(TARGET_HAS_STRUCT_STAT64)
6554 struct target_stat64 *target_st;
6555 #else
6556 struct target_stat *target_st;
6557 #endif
6559 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6560 return -TARGET_EFAULT;
6561 memset(target_st, 0, sizeof(*target_st));
6562 __put_user(host_st->st_dev, &target_st->st_dev);
6563 __put_user(host_st->st_ino, &target_st->st_ino);
6564 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6565 __put_user(host_st->st_ino, &target_st->__st_ino);
6566 #endif
6567 __put_user(host_st->st_mode, &target_st->st_mode);
6568 __put_user(host_st->st_nlink, &target_st->st_nlink);
6569 __put_user(host_st->st_uid, &target_st->st_uid);
6570 __put_user(host_st->st_gid, &target_st->st_gid);
6571 __put_user(host_st->st_rdev, &target_st->st_rdev);
6572 /* XXX: better use of kernel struct */
6573 __put_user(host_st->st_size, &target_st->st_size);
6574 __put_user(host_st->st_blksize, &target_st->st_blksize);
6575 __put_user(host_st->st_blocks, &target_st->st_blocks);
6576 __put_user(host_st->st_atime, &target_st->target_st_atime);
6577 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6578 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6579 unlock_user_struct(target_st, target_addr, 1);
6582 return 0;
6585 /* ??? Using host futex calls even when target atomic operations
6586 are not really atomic probably breaks things. However implementing
6587 futexes locally would make futexes shared between multiple processes
6588 tricky. However they're probably useless because guest atomic
6589 operations won't work either. */
6590 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6591 target_ulong uaddr2, int val3)
6593 struct timespec ts, *pts;
6594 int base_op;
6596 /* ??? We assume FUTEX_* constants are the same on both host
6597 and target. */
6598 #ifdef FUTEX_CMD_MASK
6599 base_op = op & FUTEX_CMD_MASK;
6600 #else
6601 base_op = op;
6602 #endif
6603 switch (base_op) {
6604 case FUTEX_WAIT:
6605 case FUTEX_WAIT_BITSET:
6606 if (timeout) {
6607 pts = &ts;
6608 target_to_host_timespec(pts, timeout);
6609 } else {
6610 pts = NULL;
6612 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6613 pts, NULL, val3));
6614 case FUTEX_WAKE:
6615 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6616 case FUTEX_FD:
6617 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6618 case FUTEX_REQUEUE:
6619 case FUTEX_CMP_REQUEUE:
6620 case FUTEX_WAKE_OP:
6621 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6622 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6623 But the prototype takes a `struct timespec *'; insert casts
6624 to satisfy the compiler. We do not need to tswap TIMEOUT
6625 since it's not compared to guest memory. */
6626 pts = (struct timespec *)(uintptr_t) timeout;
6627 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6628 g2h(uaddr2),
6629 (base_op == FUTEX_CMP_REQUEUE
6630 ? tswap32(val3)
6631 : val3)));
6632 default:
6633 return -TARGET_ENOSYS;
6636 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6637 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6638 abi_long handle, abi_long mount_id,
6639 abi_long flags)
6641 struct file_handle *target_fh;
6642 struct file_handle *fh;
6643 int mid = 0;
6644 abi_long ret;
6645 char *name;
6646 unsigned int size, total_size;
6648 if (get_user_s32(size, handle)) {
6649 return -TARGET_EFAULT;
6652 name = lock_user_string(pathname);
6653 if (!name) {
6654 return -TARGET_EFAULT;
6657 total_size = sizeof(struct file_handle) + size;
6658 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6659 if (!target_fh) {
6660 unlock_user(name, pathname, 0);
6661 return -TARGET_EFAULT;
6664 fh = g_malloc0(total_size);
6665 fh->handle_bytes = size;
6667 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6668 unlock_user(name, pathname, 0);
6670 /* man name_to_handle_at(2):
6671 * Other than the use of the handle_bytes field, the caller should treat
6672 * the file_handle structure as an opaque data type
6675 memcpy(target_fh, fh, total_size);
6676 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6677 target_fh->handle_type = tswap32(fh->handle_type);
6678 g_free(fh);
6679 unlock_user(target_fh, handle, total_size);
6681 if (put_user_s32(mid, mount_id)) {
6682 return -TARGET_EFAULT;
6685 return ret;
6688 #endif
6690 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6691 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6692 abi_long flags)
6694 struct file_handle *target_fh;
6695 struct file_handle *fh;
6696 unsigned int size, total_size;
6697 abi_long ret;
6699 if (get_user_s32(size, handle)) {
6700 return -TARGET_EFAULT;
6703 total_size = sizeof(struct file_handle) + size;
6704 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6705 if (!target_fh) {
6706 return -TARGET_EFAULT;
6709 fh = g_memdup(target_fh, total_size);
6710 fh->handle_bytes = size;
6711 fh->handle_type = tswap32(target_fh->handle_type);
6713 ret = get_errno(open_by_handle_at(mount_fd, fh,
6714 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6716 g_free(fh);
6718 unlock_user(target_fh, handle, total_size);
6720 return ret;
6722 #endif
6724 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6726 /* signalfd siginfo conversion */
6728 static void
6729 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6730 const struct signalfd_siginfo *info)
6732 int sig = host_to_target_signal(info->ssi_signo);
6734 /* linux/signalfd.h defines a ssi_addr_lsb
6735 * not defined in sys/signalfd.h but used by some kernels
6738 #ifdef BUS_MCEERR_AO
6739 if (tinfo->ssi_signo == SIGBUS &&
6740 (tinfo->ssi_code == BUS_MCEERR_AR ||
6741 tinfo->ssi_code == BUS_MCEERR_AO)) {
6742 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6743 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6744 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6746 #endif
6748 tinfo->ssi_signo = tswap32(sig);
6749 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6750 tinfo->ssi_code = tswap32(info->ssi_code);
6751 tinfo->ssi_pid = tswap32(info->ssi_pid);
6752 tinfo->ssi_uid = tswap32(info->ssi_uid);
6753 tinfo->ssi_fd = tswap32(info->ssi_fd);
6754 tinfo->ssi_tid = tswap32(info->ssi_tid);
6755 tinfo->ssi_band = tswap32(info->ssi_band);
6756 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6757 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6758 tinfo->ssi_status = tswap32(info->ssi_status);
6759 tinfo->ssi_int = tswap32(info->ssi_int);
6760 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6761 tinfo->ssi_utime = tswap64(info->ssi_utime);
6762 tinfo->ssi_stime = tswap64(info->ssi_stime);
6763 tinfo->ssi_addr = tswap64(info->ssi_addr);
6766 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6768 int i;
6770 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6771 host_to_target_signalfd_siginfo(buf + i, buf + i);
6774 return len;
6777 static TargetFdTrans target_signalfd_trans = {
6778 .host_to_target_data = host_to_target_data_signalfd,
6781 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6783 int host_flags;
6784 target_sigset_t *target_mask;
6785 sigset_t host_mask;
6786 abi_long ret;
6788 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6789 return -TARGET_EINVAL;
6791 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6792 return -TARGET_EFAULT;
6795 target_to_host_sigset(&host_mask, target_mask);
6797 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6799 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6800 if (ret >= 0) {
6801 fd_trans_register(ret, &target_signalfd_trans);
6804 unlock_user_struct(target_mask, mask, 0);
6806 return ret;
6808 #endif
6810 /* Map host to target signal numbers for the wait family of syscalls.
6811 Assume all other status bits are the same. */
6812 int host_to_target_waitstatus(int status)
6814 if (WIFSIGNALED(status)) {
6815 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6817 if (WIFSTOPPED(status)) {
6818 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6819 | (status & 0xff);
6821 return status;
6824 static int open_self_cmdline(void *cpu_env, int fd)
6826 int fd_orig = -1;
6827 bool word_skipped = false;
6829 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6830 if (fd_orig < 0) {
6831 return fd_orig;
6834 while (true) {
6835 ssize_t nb_read;
6836 char buf[128];
6837 char *cp_buf = buf;
6839 nb_read = read(fd_orig, buf, sizeof(buf));
6840 if (nb_read < 0) {
6841 int e = errno;
6842 fd_orig = close(fd_orig);
6843 errno = e;
6844 return -1;
6845 } else if (nb_read == 0) {
6846 break;
6849 if (!word_skipped) {
6850 /* Skip the first string, which is the path to qemu-*-static
6851 instead of the actual command. */
6852 cp_buf = memchr(buf, 0, sizeof(buf));
6853 if (cp_buf) {
6854 /* Null byte found, skip one string */
6855 cp_buf++;
6856 nb_read -= cp_buf - buf;
6857 word_skipped = true;
6861 if (word_skipped) {
6862 if (write(fd, cp_buf, nb_read) != nb_read) {
6863 int e = errno;
6864 close(fd_orig);
6865 errno = e;
6866 return -1;
6871 return close(fd_orig);
6874 static int open_self_maps(void *cpu_env, int fd)
6876 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6877 TaskState *ts = cpu->opaque;
6878 FILE *fp;
6879 char *line = NULL;
6880 size_t len = 0;
6881 ssize_t read;
6883 fp = fopen("/proc/self/maps", "r");
6884 if (fp == NULL) {
6885 return -1;
6888 while ((read = getline(&line, &len, fp)) != -1) {
6889 int fields, dev_maj, dev_min, inode;
6890 uint64_t min, max, offset;
6891 char flag_r, flag_w, flag_x, flag_p;
6892 char path[512] = "";
6893 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6894 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6895 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6897 if ((fields < 10) || (fields > 11)) {
6898 continue;
6900 if (h2g_valid(min)) {
6901 int flags = page_get_flags(h2g(min));
6902 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6903 if (page_check_range(h2g(min), max - min, flags) == -1) {
6904 continue;
6906 if (h2g(min) == ts->info->stack_limit) {
6907 pstrcpy(path, sizeof(path), " [stack]");
6909 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6910 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6911 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6912 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6913 path[0] ? " " : "", path);
6917 free(line);
6918 fclose(fp);
6920 return 0;
6923 static int open_self_stat(void *cpu_env, int fd)
6925 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6926 TaskState *ts = cpu->opaque;
6927 abi_ulong start_stack = ts->info->start_stack;
6928 int i;
6930 for (i = 0; i < 44; i++) {
6931 char buf[128];
6932 int len;
6933 uint64_t val = 0;
6935 if (i == 0) {
6936 /* pid */
6937 val = getpid();
6938 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6939 } else if (i == 1) {
6940 /* app name */
6941 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6942 } else if (i == 27) {
6943 /* stack bottom */
6944 val = start_stack;
6945 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6946 } else {
6947 /* for the rest, there is MasterCard */
6948 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6951 len = strlen(buf);
6952 if (write(fd, buf, len) != len) {
6953 return -1;
6957 return 0;
6960 static int open_self_auxv(void *cpu_env, int fd)
6962 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6963 TaskState *ts = cpu->opaque;
6964 abi_ulong auxv = ts->info->saved_auxv;
6965 abi_ulong len = ts->info->auxv_len;
6966 char *ptr;
6969 * Auxiliary vector is stored in target process stack.
6970 * read in whole auxv vector and copy it to file
6972 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6973 if (ptr != NULL) {
6974 while (len > 0) {
6975 ssize_t r;
6976 r = write(fd, ptr, len);
6977 if (r <= 0) {
6978 break;
6980 len -= r;
6981 ptr += r;
6983 lseek(fd, 0, SEEK_SET);
6984 unlock_user(ptr, auxv, len);
6987 return 0;
6990 static int is_proc_myself(const char *filename, const char *entry)
6992 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6993 filename += strlen("/proc/");
6994 if (!strncmp(filename, "self/", strlen("self/"))) {
6995 filename += strlen("self/");
6996 } else if (*filename >= '1' && *filename <= '9') {
6997 char myself[80];
6998 snprintf(myself, sizeof(myself), "%d/", getpid());
6999 if (!strncmp(filename, myself, strlen(myself))) {
7000 filename += strlen(myself);
7001 } else {
7002 return 0;
7004 } else {
7005 return 0;
7007 if (!strcmp(filename, entry)) {
7008 return 1;
7011 return 0;
7014 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7015 static int is_proc(const char *filename, const char *entry)
7017 return strcmp(filename, entry) == 0;
7020 static int open_net_route(void *cpu_env, int fd)
7022 FILE *fp;
7023 char *line = NULL;
7024 size_t len = 0;
7025 ssize_t read;
7027 fp = fopen("/proc/net/route", "r");
7028 if (fp == NULL) {
7029 return -1;
7032 /* read header */
7034 read = getline(&line, &len, fp);
7035 dprintf(fd, "%s", line);
7037 /* read routes */
7039 while ((read = getline(&line, &len, fp)) != -1) {
7040 char iface[16];
7041 uint32_t dest, gw, mask;
7042 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7043 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7044 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7045 &mask, &mtu, &window, &irtt);
7046 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7047 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7048 metric, tswap32(mask), mtu, window, irtt);
7051 free(line);
7052 fclose(fp);
7054 return 0;
7056 #endif
7058 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7060 struct fake_open {
7061 const char *filename;
7062 int (*fill)(void *cpu_env, int fd);
7063 int (*cmp)(const char *s1, const char *s2);
7065 const struct fake_open *fake_open;
7066 static const struct fake_open fakes[] = {
7067 { "maps", open_self_maps, is_proc_myself },
7068 { "stat", open_self_stat, is_proc_myself },
7069 { "auxv", open_self_auxv, is_proc_myself },
7070 { "cmdline", open_self_cmdline, is_proc_myself },
7071 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7072 { "/proc/net/route", open_net_route, is_proc },
7073 #endif
7074 { NULL, NULL, NULL }
7077 if (is_proc_myself(pathname, "exe")) {
7078 int execfd = qemu_getauxval(AT_EXECFD);
7079 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7082 for (fake_open = fakes; fake_open->filename; fake_open++) {
7083 if (fake_open->cmp(pathname, fake_open->filename)) {
7084 break;
7088 if (fake_open->filename) {
7089 const char *tmpdir;
7090 char filename[PATH_MAX];
7091 int fd, r;
7093 /* create temporary file to map stat to */
7094 tmpdir = getenv("TMPDIR");
7095 if (!tmpdir)
7096 tmpdir = "/tmp";
7097 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7098 fd = mkstemp(filename);
7099 if (fd < 0) {
7100 return fd;
7102 unlink(filename);
7104 if ((r = fake_open->fill(cpu_env, fd))) {
7105 int e = errno;
7106 close(fd);
7107 errno = e;
7108 return r;
7110 lseek(fd, 0, SEEK_SET);
7112 return fd;
7115 return safe_openat(dirfd, path(pathname), flags, mode);
7118 #define TIMER_MAGIC 0x0caf0000
7119 #define TIMER_MAGIC_MASK 0xffff0000
7121 /* Convert QEMU provided timer ID back to internal 16bit index format */
7122 static target_timer_t get_timer_id(abi_long arg)
7124 target_timer_t timerid = arg;
7126 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7127 return -TARGET_EINVAL;
7130 timerid &= 0xffff;
7132 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7133 return -TARGET_EINVAL;
7136 return timerid;
7139 /* do_syscall() should always have a single exit point at the end so
7140 that actions, such as logging of syscall results, can be performed.
7141 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7142 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7143 abi_long arg2, abi_long arg3, abi_long arg4,
7144 abi_long arg5, abi_long arg6, abi_long arg7,
7145 abi_long arg8)
7147 CPUState *cpu = ENV_GET_CPU(cpu_env);
7148 abi_long ret;
7149 struct stat st;
7150 struct statfs stfs;
7151 void *p;
7153 #if defined(DEBUG_ERESTARTSYS)
7154 /* Debug-only code for exercising the syscall-restart code paths
7155 * in the per-architecture cpu main loops: restart every syscall
7156 * the guest makes once before letting it through.
7159 static int flag;
7161 flag = !flag;
7162 if (flag) {
7163 return -TARGET_ERESTARTSYS;
7166 #endif
7168 #ifdef DEBUG
7169 gemu_log("syscall %d", num);
7170 #endif
7171 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7172 if(do_strace)
7173 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7175 switch(num) {
7176 case TARGET_NR_exit:
7177 /* In old applications this may be used to implement _exit(2).
7178 However in threaded applictions it is used for thread termination,
7179 and _exit_group is used for application termination.
7180 Do thread termination if we have more then one thread. */
7182 if (block_signals()) {
7183 ret = -TARGET_ERESTARTSYS;
7184 break;
7187 if (CPU_NEXT(first_cpu)) {
7188 TaskState *ts;
7190 cpu_list_lock();
7191 /* Remove the CPU from the list. */
7192 QTAILQ_REMOVE(&cpus, cpu, node);
7193 cpu_list_unlock();
7194 ts = cpu->opaque;
7195 if (ts->child_tidptr) {
7196 put_user_u32(0, ts->child_tidptr);
7197 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7198 NULL, NULL, 0);
7200 thread_cpu = NULL;
7201 object_unref(OBJECT(cpu));
7202 g_free(ts);
7203 rcu_unregister_thread();
7204 pthread_exit(NULL);
7206 #ifdef TARGET_GPROF
7207 _mcleanup();
7208 #endif
7209 gdb_exit(cpu_env, arg1);
7210 _exit(arg1);
7211 ret = 0; /* avoid warning */
7212 break;
7213 case TARGET_NR_read:
7214 if (arg3 == 0)
7215 ret = 0;
7216 else {
7217 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7218 goto efault;
7219 ret = get_errno(safe_read(arg1, p, arg3));
7220 if (ret >= 0 &&
7221 fd_trans_host_to_target_data(arg1)) {
7222 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7224 unlock_user(p, arg2, ret);
7226 break;
7227 case TARGET_NR_write:
7228 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7229 goto efault;
7230 ret = get_errno(safe_write(arg1, p, arg3));
7231 unlock_user(p, arg2, 0);
7232 break;
7233 #ifdef TARGET_NR_open
7234 case TARGET_NR_open:
7235 if (!(p = lock_user_string(arg1)))
7236 goto efault;
7237 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7238 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7239 arg3));
7240 fd_trans_unregister(ret);
7241 unlock_user(p, arg1, 0);
7242 break;
7243 #endif
7244 case TARGET_NR_openat:
7245 if (!(p = lock_user_string(arg2)))
7246 goto efault;
7247 ret = get_errno(do_openat(cpu_env, arg1, p,
7248 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7249 arg4));
7250 fd_trans_unregister(ret);
7251 unlock_user(p, arg2, 0);
7252 break;
7253 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7254 case TARGET_NR_name_to_handle_at:
7255 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7256 break;
7257 #endif
7258 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7259 case TARGET_NR_open_by_handle_at:
7260 ret = do_open_by_handle_at(arg1, arg2, arg3);
7261 fd_trans_unregister(ret);
7262 break;
7263 #endif
7264 case TARGET_NR_close:
7265 fd_trans_unregister(arg1);
7266 ret = get_errno(close(arg1));
7267 break;
7268 case TARGET_NR_brk:
7269 ret = do_brk(arg1);
7270 break;
7271 #ifdef TARGET_NR_fork
7272 case TARGET_NR_fork:
7273 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7274 break;
7275 #endif
7276 #ifdef TARGET_NR_waitpid
7277 case TARGET_NR_waitpid:
7279 int status;
7280 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7281 if (!is_error(ret) && arg2 && ret
7282 && put_user_s32(host_to_target_waitstatus(status), arg2))
7283 goto efault;
7285 break;
7286 #endif
7287 #ifdef TARGET_NR_waitid
7288 case TARGET_NR_waitid:
7290 siginfo_t info;
7291 info.si_pid = 0;
7292 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7293 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7294 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7295 goto efault;
7296 host_to_target_siginfo(p, &info);
7297 unlock_user(p, arg3, sizeof(target_siginfo_t));
7300 break;
7301 #endif
7302 #ifdef TARGET_NR_creat /* not on alpha */
7303 case TARGET_NR_creat:
7304 if (!(p = lock_user_string(arg1)))
7305 goto efault;
7306 ret = get_errno(creat(p, arg2));
7307 fd_trans_unregister(ret);
7308 unlock_user(p, arg1, 0);
7309 break;
7310 #endif
7311 #ifdef TARGET_NR_link
7312 case TARGET_NR_link:
7314 void * p2;
7315 p = lock_user_string(arg1);
7316 p2 = lock_user_string(arg2);
7317 if (!p || !p2)
7318 ret = -TARGET_EFAULT;
7319 else
7320 ret = get_errno(link(p, p2));
7321 unlock_user(p2, arg2, 0);
7322 unlock_user(p, arg1, 0);
7324 break;
7325 #endif
7326 #if defined(TARGET_NR_linkat)
7327 case TARGET_NR_linkat:
7329 void * p2 = NULL;
7330 if (!arg2 || !arg4)
7331 goto efault;
7332 p = lock_user_string(arg2);
7333 p2 = lock_user_string(arg4);
7334 if (!p || !p2)
7335 ret = -TARGET_EFAULT;
7336 else
7337 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7338 unlock_user(p, arg2, 0);
7339 unlock_user(p2, arg4, 0);
7341 break;
7342 #endif
7343 #ifdef TARGET_NR_unlink
7344 case TARGET_NR_unlink:
7345 if (!(p = lock_user_string(arg1)))
7346 goto efault;
7347 ret = get_errno(unlink(p));
7348 unlock_user(p, arg1, 0);
7349 break;
7350 #endif
7351 #if defined(TARGET_NR_unlinkat)
7352 case TARGET_NR_unlinkat:
7353 if (!(p = lock_user_string(arg2)))
7354 goto efault;
7355 ret = get_errno(unlinkat(arg1, p, arg3));
7356 unlock_user(p, arg2, 0);
7357 break;
7358 #endif
7359 case TARGET_NR_execve:
7361 char **argp, **envp;
7362 int argc, envc;
7363 abi_ulong gp;
7364 abi_ulong guest_argp;
7365 abi_ulong guest_envp;
7366 abi_ulong addr;
7367 char **q;
7368 int total_size = 0;
7370 argc = 0;
7371 guest_argp = arg2;
7372 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7373 if (get_user_ual(addr, gp))
7374 goto efault;
7375 if (!addr)
7376 break;
7377 argc++;
7379 envc = 0;
7380 guest_envp = arg3;
7381 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7382 if (get_user_ual(addr, gp))
7383 goto efault;
7384 if (!addr)
7385 break;
7386 envc++;
7389 argp = alloca((argc + 1) * sizeof(void *));
7390 envp = alloca((envc + 1) * sizeof(void *));
7392 for (gp = guest_argp, q = argp; gp;
7393 gp += sizeof(abi_ulong), q++) {
7394 if (get_user_ual(addr, gp))
7395 goto execve_efault;
7396 if (!addr)
7397 break;
7398 if (!(*q = lock_user_string(addr)))
7399 goto execve_efault;
7400 total_size += strlen(*q) + 1;
7402 *q = NULL;
7404 for (gp = guest_envp, q = envp; gp;
7405 gp += sizeof(abi_ulong), q++) {
7406 if (get_user_ual(addr, gp))
7407 goto execve_efault;
7408 if (!addr)
7409 break;
7410 if (!(*q = lock_user_string(addr)))
7411 goto execve_efault;
7412 total_size += strlen(*q) + 1;
7414 *q = NULL;
7416 if (!(p = lock_user_string(arg1)))
7417 goto execve_efault;
7418 /* Although execve() is not an interruptible syscall it is
7419 * a special case where we must use the safe_syscall wrapper:
7420 * if we allow a signal to happen before we make the host
7421 * syscall then we will 'lose' it, because at the point of
7422 * execve the process leaves QEMU's control. So we use the
7423 * safe syscall wrapper to ensure that we either take the
7424 * signal as a guest signal, or else it does not happen
7425 * before the execve completes and makes it the other
7426 * program's problem.
7428 ret = get_errno(safe_execve(p, argp, envp));
7429 unlock_user(p, arg1, 0);
7431 goto execve_end;
7433 execve_efault:
7434 ret = -TARGET_EFAULT;
7436 execve_end:
7437 for (gp = guest_argp, q = argp; *q;
7438 gp += sizeof(abi_ulong), q++) {
7439 if (get_user_ual(addr, gp)
7440 || !addr)
7441 break;
7442 unlock_user(*q, addr, 0);
7444 for (gp = guest_envp, q = envp; *q;
7445 gp += sizeof(abi_ulong), q++) {
7446 if (get_user_ual(addr, gp)
7447 || !addr)
7448 break;
7449 unlock_user(*q, addr, 0);
7452 break;
7453 case TARGET_NR_chdir:
7454 if (!(p = lock_user_string(arg1)))
7455 goto efault;
7456 ret = get_errno(chdir(p));
7457 unlock_user(p, arg1, 0);
7458 break;
7459 #ifdef TARGET_NR_time
7460 case TARGET_NR_time:
7462 time_t host_time;
7463 ret = get_errno(time(&host_time));
7464 if (!is_error(ret)
7465 && arg1
7466 && put_user_sal(host_time, arg1))
7467 goto efault;
7469 break;
7470 #endif
7471 #ifdef TARGET_NR_mknod
7472 case TARGET_NR_mknod:
7473 if (!(p = lock_user_string(arg1)))
7474 goto efault;
7475 ret = get_errno(mknod(p, arg2, arg3));
7476 unlock_user(p, arg1, 0);
7477 break;
7478 #endif
7479 #if defined(TARGET_NR_mknodat)
7480 case TARGET_NR_mknodat:
7481 if (!(p = lock_user_string(arg2)))
7482 goto efault;
7483 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7484 unlock_user(p, arg2, 0);
7485 break;
7486 #endif
7487 #ifdef TARGET_NR_chmod
7488 case TARGET_NR_chmod:
7489 if (!(p = lock_user_string(arg1)))
7490 goto efault;
7491 ret = get_errno(chmod(p, arg2));
7492 unlock_user(p, arg1, 0);
7493 break;
7494 #endif
7495 #ifdef TARGET_NR_break
7496 case TARGET_NR_break:
7497 goto unimplemented;
7498 #endif
7499 #ifdef TARGET_NR_oldstat
7500 case TARGET_NR_oldstat:
7501 goto unimplemented;
7502 #endif
7503 case TARGET_NR_lseek:
7504 ret = get_errno(lseek(arg1, arg2, arg3));
7505 break;
7506 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7507 /* Alpha specific */
7508 case TARGET_NR_getxpid:
7509 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7510 ret = get_errno(getpid());
7511 break;
7512 #endif
7513 #ifdef TARGET_NR_getpid
7514 case TARGET_NR_getpid:
7515 ret = get_errno(getpid());
7516 break;
7517 #endif
7518 case TARGET_NR_mount:
7520 /* need to look at the data field */
7521 void *p2, *p3;
7523 if (arg1) {
7524 p = lock_user_string(arg1);
7525 if (!p) {
7526 goto efault;
7528 } else {
7529 p = NULL;
7532 p2 = lock_user_string(arg2);
7533 if (!p2) {
7534 if (arg1) {
7535 unlock_user(p, arg1, 0);
7537 goto efault;
7540 if (arg3) {
7541 p3 = lock_user_string(arg3);
7542 if (!p3) {
7543 if (arg1) {
7544 unlock_user(p, arg1, 0);
7546 unlock_user(p2, arg2, 0);
7547 goto efault;
7549 } else {
7550 p3 = NULL;
7553 /* FIXME - arg5 should be locked, but it isn't clear how to
7554 * do that since it's not guaranteed to be a NULL-terminated
7555 * string.
7557 if (!arg5) {
7558 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7559 } else {
7560 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7562 ret = get_errno(ret);
7564 if (arg1) {
7565 unlock_user(p, arg1, 0);
7567 unlock_user(p2, arg2, 0);
7568 if (arg3) {
7569 unlock_user(p3, arg3, 0);
7572 break;
7573 #ifdef TARGET_NR_umount
7574 case TARGET_NR_umount:
7575 if (!(p = lock_user_string(arg1)))
7576 goto efault;
7577 ret = get_errno(umount(p));
7578 unlock_user(p, arg1, 0);
7579 break;
7580 #endif
7581 #ifdef TARGET_NR_stime /* not on alpha */
7582 case TARGET_NR_stime:
7584 time_t host_time;
7585 if (get_user_sal(host_time, arg1))
7586 goto efault;
7587 ret = get_errno(stime(&host_time));
7589 break;
7590 #endif
7591 case TARGET_NR_ptrace:
7592 goto unimplemented;
7593 #ifdef TARGET_NR_alarm /* not on alpha */
7594 case TARGET_NR_alarm:
7595 ret = alarm(arg1);
7596 break;
7597 #endif
7598 #ifdef TARGET_NR_oldfstat
7599 case TARGET_NR_oldfstat:
7600 goto unimplemented;
7601 #endif
7602 #ifdef TARGET_NR_pause /* not on alpha */
7603 case TARGET_NR_pause:
7604 if (!block_signals()) {
7605 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7607 ret = -TARGET_EINTR;
7608 break;
7609 #endif
7610 #ifdef TARGET_NR_utime
7611 case TARGET_NR_utime:
7613 struct utimbuf tbuf, *host_tbuf;
7614 struct target_utimbuf *target_tbuf;
7615 if (arg2) {
7616 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7617 goto efault;
7618 tbuf.actime = tswapal(target_tbuf->actime);
7619 tbuf.modtime = tswapal(target_tbuf->modtime);
7620 unlock_user_struct(target_tbuf, arg2, 0);
7621 host_tbuf = &tbuf;
7622 } else {
7623 host_tbuf = NULL;
7625 if (!(p = lock_user_string(arg1)))
7626 goto efault;
7627 ret = get_errno(utime(p, host_tbuf));
7628 unlock_user(p, arg1, 0);
7630 break;
7631 #endif
7632 #ifdef TARGET_NR_utimes
7633 case TARGET_NR_utimes:
7635 struct timeval *tvp, tv[2];
7636 if (arg2) {
7637 if (copy_from_user_timeval(&tv[0], arg2)
7638 || copy_from_user_timeval(&tv[1],
7639 arg2 + sizeof(struct target_timeval)))
7640 goto efault;
7641 tvp = tv;
7642 } else {
7643 tvp = NULL;
7645 if (!(p = lock_user_string(arg1)))
7646 goto efault;
7647 ret = get_errno(utimes(p, tvp));
7648 unlock_user(p, arg1, 0);
7650 break;
7651 #endif
7652 #if defined(TARGET_NR_futimesat)
7653 case TARGET_NR_futimesat:
7655 struct timeval *tvp, tv[2];
7656 if (arg3) {
7657 if (copy_from_user_timeval(&tv[0], arg3)
7658 || copy_from_user_timeval(&tv[1],
7659 arg3 + sizeof(struct target_timeval)))
7660 goto efault;
7661 tvp = tv;
7662 } else {
7663 tvp = NULL;
7665 if (!(p = lock_user_string(arg2)))
7666 goto efault;
7667 ret = get_errno(futimesat(arg1, path(p), tvp));
7668 unlock_user(p, arg2, 0);
7670 break;
7671 #endif
7672 #ifdef TARGET_NR_stty
7673 case TARGET_NR_stty:
7674 goto unimplemented;
7675 #endif
7676 #ifdef TARGET_NR_gtty
7677 case TARGET_NR_gtty:
7678 goto unimplemented;
7679 #endif
7680 #ifdef TARGET_NR_access
7681 case TARGET_NR_access:
7682 if (!(p = lock_user_string(arg1)))
7683 goto efault;
7684 ret = get_errno(access(path(p), arg2));
7685 unlock_user(p, arg1, 0);
7686 break;
7687 #endif
7688 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7689 case TARGET_NR_faccessat:
7690 if (!(p = lock_user_string(arg2)))
7691 goto efault;
7692 ret = get_errno(faccessat(arg1, p, arg3, 0));
7693 unlock_user(p, arg2, 0);
7694 break;
7695 #endif
7696 #ifdef TARGET_NR_nice /* not on alpha */
7697 case TARGET_NR_nice:
7698 ret = get_errno(nice(arg1));
7699 break;
7700 #endif
7701 #ifdef TARGET_NR_ftime
7702 case TARGET_NR_ftime:
7703 goto unimplemented;
7704 #endif
7705 case TARGET_NR_sync:
7706 sync();
7707 ret = 0;
7708 break;
7709 case TARGET_NR_kill:
7710 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7711 break;
7712 #ifdef TARGET_NR_rename
7713 case TARGET_NR_rename:
7715 void *p2;
7716 p = lock_user_string(arg1);
7717 p2 = lock_user_string(arg2);
7718 if (!p || !p2)
7719 ret = -TARGET_EFAULT;
7720 else
7721 ret = get_errno(rename(p, p2));
7722 unlock_user(p2, arg2, 0);
7723 unlock_user(p, arg1, 0);
7725 break;
7726 #endif
7727 #if defined(TARGET_NR_renameat)
7728 case TARGET_NR_renameat:
7730 void *p2;
7731 p = lock_user_string(arg2);
7732 p2 = lock_user_string(arg4);
7733 if (!p || !p2)
7734 ret = -TARGET_EFAULT;
7735 else
7736 ret = get_errno(renameat(arg1, p, arg3, p2));
7737 unlock_user(p2, arg4, 0);
7738 unlock_user(p, arg2, 0);
7740 break;
7741 #endif
7742 #ifdef TARGET_NR_mkdir
7743 case TARGET_NR_mkdir:
7744 if (!(p = lock_user_string(arg1)))
7745 goto efault;
7746 ret = get_errno(mkdir(p, arg2));
7747 unlock_user(p, arg1, 0);
7748 break;
7749 #endif
7750 #if defined(TARGET_NR_mkdirat)
7751 case TARGET_NR_mkdirat:
7752 if (!(p = lock_user_string(arg2)))
7753 goto efault;
7754 ret = get_errno(mkdirat(arg1, p, arg3));
7755 unlock_user(p, arg2, 0);
7756 break;
7757 #endif
7758 #ifdef TARGET_NR_rmdir
7759 case TARGET_NR_rmdir:
7760 if (!(p = lock_user_string(arg1)))
7761 goto efault;
7762 ret = get_errno(rmdir(p));
7763 unlock_user(p, arg1, 0);
7764 break;
7765 #endif
7766 case TARGET_NR_dup:
7767 ret = get_errno(dup(arg1));
7768 if (ret >= 0) {
7769 fd_trans_dup(arg1, ret);
7771 break;
7772 #ifdef TARGET_NR_pipe
7773 case TARGET_NR_pipe:
7774 ret = do_pipe(cpu_env, arg1, 0, 0);
7775 break;
7776 #endif
7777 #ifdef TARGET_NR_pipe2
7778 case TARGET_NR_pipe2:
7779 ret = do_pipe(cpu_env, arg1,
7780 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7781 break;
7782 #endif
7783 case TARGET_NR_times:
7785 struct target_tms *tmsp;
7786 struct tms tms;
7787 ret = get_errno(times(&tms));
7788 if (arg1) {
7789 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7790 if (!tmsp)
7791 goto efault;
7792 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7793 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7794 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7795 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7797 if (!is_error(ret))
7798 ret = host_to_target_clock_t(ret);
7800 break;
7801 #ifdef TARGET_NR_prof
7802 case TARGET_NR_prof:
7803 goto unimplemented;
7804 #endif
7805 #ifdef TARGET_NR_signal
7806 case TARGET_NR_signal:
7807 goto unimplemented;
7808 #endif
7809 case TARGET_NR_acct:
7810 if (arg1 == 0) {
7811 ret = get_errno(acct(NULL));
7812 } else {
7813 if (!(p = lock_user_string(arg1)))
7814 goto efault;
7815 ret = get_errno(acct(path(p)));
7816 unlock_user(p, arg1, 0);
7818 break;
7819 #ifdef TARGET_NR_umount2
7820 case TARGET_NR_umount2:
7821 if (!(p = lock_user_string(arg1)))
7822 goto efault;
7823 ret = get_errno(umount2(p, arg2));
7824 unlock_user(p, arg1, 0);
7825 break;
7826 #endif
7827 #ifdef TARGET_NR_lock
7828 case TARGET_NR_lock:
7829 goto unimplemented;
7830 #endif
7831 case TARGET_NR_ioctl:
7832 ret = do_ioctl(arg1, arg2, arg3);
7833 break;
7834 case TARGET_NR_fcntl:
7835 ret = do_fcntl(arg1, arg2, arg3);
7836 break;
7837 #ifdef TARGET_NR_mpx
7838 case TARGET_NR_mpx:
7839 goto unimplemented;
7840 #endif
7841 case TARGET_NR_setpgid:
7842 ret = get_errno(setpgid(arg1, arg2));
7843 break;
7844 #ifdef TARGET_NR_ulimit
7845 case TARGET_NR_ulimit:
7846 goto unimplemented;
7847 #endif
7848 #ifdef TARGET_NR_oldolduname
7849 case TARGET_NR_oldolduname:
7850 goto unimplemented;
7851 #endif
7852 case TARGET_NR_umask:
7853 ret = get_errno(umask(arg1));
7854 break;
7855 case TARGET_NR_chroot:
7856 if (!(p = lock_user_string(arg1)))
7857 goto efault;
7858 ret = get_errno(chroot(p));
7859 unlock_user(p, arg1, 0);
7860 break;
7861 #ifdef TARGET_NR_ustat
7862 case TARGET_NR_ustat:
7863 goto unimplemented;
7864 #endif
7865 #ifdef TARGET_NR_dup2
7866 case TARGET_NR_dup2:
7867 ret = get_errno(dup2(arg1, arg2));
7868 if (ret >= 0) {
7869 fd_trans_dup(arg1, arg2);
7871 break;
7872 #endif
7873 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7874 case TARGET_NR_dup3:
7875 ret = get_errno(dup3(arg1, arg2, arg3));
7876 if (ret >= 0) {
7877 fd_trans_dup(arg1, arg2);
7879 break;
7880 #endif
7881 #ifdef TARGET_NR_getppid /* not on alpha */
7882 case TARGET_NR_getppid:
7883 ret = get_errno(getppid());
7884 break;
7885 #endif
7886 #ifdef TARGET_NR_getpgrp
7887 case TARGET_NR_getpgrp:
7888 ret = get_errno(getpgrp());
7889 break;
7890 #endif
7891 case TARGET_NR_setsid:
7892 ret = get_errno(setsid());
7893 break;
7894 #ifdef TARGET_NR_sigaction
7895 case TARGET_NR_sigaction:
7897 #if defined(TARGET_ALPHA)
7898 struct target_sigaction act, oact, *pact = 0;
7899 struct target_old_sigaction *old_act;
7900 if (arg2) {
7901 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7902 goto efault;
7903 act._sa_handler = old_act->_sa_handler;
7904 target_siginitset(&act.sa_mask, old_act->sa_mask);
7905 act.sa_flags = old_act->sa_flags;
7906 act.sa_restorer = 0;
7907 unlock_user_struct(old_act, arg2, 0);
7908 pact = &act;
7910 ret = get_errno(do_sigaction(arg1, pact, &oact));
7911 if (!is_error(ret) && arg3) {
7912 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7913 goto efault;
7914 old_act->_sa_handler = oact._sa_handler;
7915 old_act->sa_mask = oact.sa_mask.sig[0];
7916 old_act->sa_flags = oact.sa_flags;
7917 unlock_user_struct(old_act, arg3, 1);
7919 #elif defined(TARGET_MIPS)
7920 struct target_sigaction act, oact, *pact, *old_act;
7922 if (arg2) {
7923 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7924 goto efault;
7925 act._sa_handler = old_act->_sa_handler;
7926 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7927 act.sa_flags = old_act->sa_flags;
7928 unlock_user_struct(old_act, arg2, 0);
7929 pact = &act;
7930 } else {
7931 pact = NULL;
7934 ret = get_errno(do_sigaction(arg1, pact, &oact));
7936 if (!is_error(ret) && arg3) {
7937 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7938 goto efault;
7939 old_act->_sa_handler = oact._sa_handler;
7940 old_act->sa_flags = oact.sa_flags;
7941 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7942 old_act->sa_mask.sig[1] = 0;
7943 old_act->sa_mask.sig[2] = 0;
7944 old_act->sa_mask.sig[3] = 0;
7945 unlock_user_struct(old_act, arg3, 1);
7947 #else
7948 struct target_old_sigaction *old_act;
7949 struct target_sigaction act, oact, *pact;
7950 if (arg2) {
7951 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7952 goto efault;
7953 act._sa_handler = old_act->_sa_handler;
7954 target_siginitset(&act.sa_mask, old_act->sa_mask);
7955 act.sa_flags = old_act->sa_flags;
7956 act.sa_restorer = old_act->sa_restorer;
7957 unlock_user_struct(old_act, arg2, 0);
7958 pact = &act;
7959 } else {
7960 pact = NULL;
7962 ret = get_errno(do_sigaction(arg1, pact, &oact));
7963 if (!is_error(ret) && arg3) {
7964 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7965 goto efault;
7966 old_act->_sa_handler = oact._sa_handler;
7967 old_act->sa_mask = oact.sa_mask.sig[0];
7968 old_act->sa_flags = oact.sa_flags;
7969 old_act->sa_restorer = oact.sa_restorer;
7970 unlock_user_struct(old_act, arg3, 1);
7972 #endif
7974 break;
7975 #endif
7976 case TARGET_NR_rt_sigaction:
7978 #if defined(TARGET_ALPHA)
7979 struct target_sigaction act, oact, *pact = 0;
7980 struct target_rt_sigaction *rt_act;
7982 if (arg4 != sizeof(target_sigset_t)) {
7983 ret = -TARGET_EINVAL;
7984 break;
7986 if (arg2) {
7987 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7988 goto efault;
7989 act._sa_handler = rt_act->_sa_handler;
7990 act.sa_mask = rt_act->sa_mask;
7991 act.sa_flags = rt_act->sa_flags;
7992 act.sa_restorer = arg5;
7993 unlock_user_struct(rt_act, arg2, 0);
7994 pact = &act;
7996 ret = get_errno(do_sigaction(arg1, pact, &oact));
7997 if (!is_error(ret) && arg3) {
7998 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7999 goto efault;
8000 rt_act->_sa_handler = oact._sa_handler;
8001 rt_act->sa_mask = oact.sa_mask;
8002 rt_act->sa_flags = oact.sa_flags;
8003 unlock_user_struct(rt_act, arg3, 1);
8005 #else
8006 struct target_sigaction *act;
8007 struct target_sigaction *oact;
8009 if (arg4 != sizeof(target_sigset_t)) {
8010 ret = -TARGET_EINVAL;
8011 break;
8013 if (arg2) {
8014 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8015 goto efault;
8016 } else
8017 act = NULL;
8018 if (arg3) {
8019 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8020 ret = -TARGET_EFAULT;
8021 goto rt_sigaction_fail;
8023 } else
8024 oact = NULL;
8025 ret = get_errno(do_sigaction(arg1, act, oact));
8026 rt_sigaction_fail:
8027 if (act)
8028 unlock_user_struct(act, arg2, 0);
8029 if (oact)
8030 unlock_user_struct(oact, arg3, 1);
8031 #endif
8033 break;
8034 #ifdef TARGET_NR_sgetmask /* not on alpha */
8035 case TARGET_NR_sgetmask:
8037 sigset_t cur_set;
8038 abi_ulong target_set;
8039 ret = do_sigprocmask(0, NULL, &cur_set);
8040 if (!ret) {
8041 host_to_target_old_sigset(&target_set, &cur_set);
8042 ret = target_set;
8045 break;
8046 #endif
8047 #ifdef TARGET_NR_ssetmask /* not on alpha */
8048 case TARGET_NR_ssetmask:
8050 sigset_t set, oset, cur_set;
8051 abi_ulong target_set = arg1;
8052 /* We only have one word of the new mask so we must read
8053 * the rest of it with do_sigprocmask() and OR in this word.
8054 * We are guaranteed that a do_sigprocmask() that only queries
8055 * the signal mask will not fail.
8057 ret = do_sigprocmask(0, NULL, &cur_set);
8058 assert(!ret);
8059 target_to_host_old_sigset(&set, &target_set);
8060 sigorset(&set, &set, &cur_set);
8061 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8062 if (!ret) {
8063 host_to_target_old_sigset(&target_set, &oset);
8064 ret = target_set;
8067 break;
8068 #endif
8069 #ifdef TARGET_NR_sigprocmask
8070 case TARGET_NR_sigprocmask:
8072 #if defined(TARGET_ALPHA)
8073 sigset_t set, oldset;
8074 abi_ulong mask;
8075 int how;
8077 switch (arg1) {
8078 case TARGET_SIG_BLOCK:
8079 how = SIG_BLOCK;
8080 break;
8081 case TARGET_SIG_UNBLOCK:
8082 how = SIG_UNBLOCK;
8083 break;
8084 case TARGET_SIG_SETMASK:
8085 how = SIG_SETMASK;
8086 break;
8087 default:
8088 ret = -TARGET_EINVAL;
8089 goto fail;
8091 mask = arg2;
8092 target_to_host_old_sigset(&set, &mask);
8094 ret = do_sigprocmask(how, &set, &oldset);
8095 if (!is_error(ret)) {
8096 host_to_target_old_sigset(&mask, &oldset);
8097 ret = mask;
8098 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8100 #else
8101 sigset_t set, oldset, *set_ptr;
8102 int how;
8104 if (arg2) {
8105 switch (arg1) {
8106 case TARGET_SIG_BLOCK:
8107 how = SIG_BLOCK;
8108 break;
8109 case TARGET_SIG_UNBLOCK:
8110 how = SIG_UNBLOCK;
8111 break;
8112 case TARGET_SIG_SETMASK:
8113 how = SIG_SETMASK;
8114 break;
8115 default:
8116 ret = -TARGET_EINVAL;
8117 goto fail;
8119 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8120 goto efault;
8121 target_to_host_old_sigset(&set, p);
8122 unlock_user(p, arg2, 0);
8123 set_ptr = &set;
8124 } else {
8125 how = 0;
8126 set_ptr = NULL;
8128 ret = do_sigprocmask(how, set_ptr, &oldset);
8129 if (!is_error(ret) && arg3) {
8130 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8131 goto efault;
8132 host_to_target_old_sigset(p, &oldset);
8133 unlock_user(p, arg3, sizeof(target_sigset_t));
8135 #endif
8137 break;
8138 #endif
8139 case TARGET_NR_rt_sigprocmask:
8141 int how = arg1;
8142 sigset_t set, oldset, *set_ptr;
8144 if (arg4 != sizeof(target_sigset_t)) {
8145 ret = -TARGET_EINVAL;
8146 break;
8149 if (arg2) {
8150 switch(how) {
8151 case TARGET_SIG_BLOCK:
8152 how = SIG_BLOCK;
8153 break;
8154 case TARGET_SIG_UNBLOCK:
8155 how = SIG_UNBLOCK;
8156 break;
8157 case TARGET_SIG_SETMASK:
8158 how = SIG_SETMASK;
8159 break;
8160 default:
8161 ret = -TARGET_EINVAL;
8162 goto fail;
8164 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8165 goto efault;
8166 target_to_host_sigset(&set, p);
8167 unlock_user(p, arg2, 0);
8168 set_ptr = &set;
8169 } else {
8170 how = 0;
8171 set_ptr = NULL;
8173 ret = do_sigprocmask(how, set_ptr, &oldset);
8174 if (!is_error(ret) && arg3) {
8175 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8176 goto efault;
8177 host_to_target_sigset(p, &oldset);
8178 unlock_user(p, arg3, sizeof(target_sigset_t));
8181 break;
8182 #ifdef TARGET_NR_sigpending
8183 case TARGET_NR_sigpending:
8185 sigset_t set;
8186 ret = get_errno(sigpending(&set));
8187 if (!is_error(ret)) {
8188 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8189 goto efault;
8190 host_to_target_old_sigset(p, &set);
8191 unlock_user(p, arg1, sizeof(target_sigset_t));
8194 break;
8195 #endif
8196 case TARGET_NR_rt_sigpending:
8198 sigset_t set;
8200 /* Yes, this check is >, not != like most. We follow the kernel's
8201 * logic and it does it like this because it implements
8202 * NR_sigpending through the same code path, and in that case
8203 * the old_sigset_t is smaller in size.
8205 if (arg2 > sizeof(target_sigset_t)) {
8206 ret = -TARGET_EINVAL;
8207 break;
8210 ret = get_errno(sigpending(&set));
8211 if (!is_error(ret)) {
8212 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8213 goto efault;
8214 host_to_target_sigset(p, &set);
8215 unlock_user(p, arg1, sizeof(target_sigset_t));
8218 break;
8219 #ifdef TARGET_NR_sigsuspend
8220 case TARGET_NR_sigsuspend:
8222 TaskState *ts = cpu->opaque;
8223 #if defined(TARGET_ALPHA)
8224 abi_ulong mask = arg1;
8225 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8226 #else
8227 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8228 goto efault;
8229 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8230 unlock_user(p, arg1, 0);
8231 #endif
8232 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8233 SIGSET_T_SIZE));
8234 if (ret != -TARGET_ERESTARTSYS) {
8235 ts->in_sigsuspend = 1;
8238 break;
8239 #endif
8240 case TARGET_NR_rt_sigsuspend:
8242 TaskState *ts = cpu->opaque;
8244 if (arg2 != sizeof(target_sigset_t)) {
8245 ret = -TARGET_EINVAL;
8246 break;
8248 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8249 goto efault;
8250 target_to_host_sigset(&ts->sigsuspend_mask, p);
8251 unlock_user(p, arg1, 0);
8252 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8253 SIGSET_T_SIZE));
8254 if (ret != -TARGET_ERESTARTSYS) {
8255 ts->in_sigsuspend = 1;
8258 break;
8259 case TARGET_NR_rt_sigtimedwait:
8261 sigset_t set;
8262 struct timespec uts, *puts;
8263 siginfo_t uinfo;
8265 if (arg4 != sizeof(target_sigset_t)) {
8266 ret = -TARGET_EINVAL;
8267 break;
8270 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8271 goto efault;
8272 target_to_host_sigset(&set, p);
8273 unlock_user(p, arg1, 0);
8274 if (arg3) {
8275 puts = &uts;
8276 target_to_host_timespec(puts, arg3);
8277 } else {
8278 puts = NULL;
8280 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8281 SIGSET_T_SIZE));
8282 if (!is_error(ret)) {
8283 if (arg2) {
8284 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8286 if (!p) {
8287 goto efault;
8289 host_to_target_siginfo(p, &uinfo);
8290 unlock_user(p, arg2, sizeof(target_siginfo_t));
8292 ret = host_to_target_signal(ret);
8295 break;
8296 case TARGET_NR_rt_sigqueueinfo:
8298 siginfo_t uinfo;
8300 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8301 if (!p) {
8302 goto efault;
8304 target_to_host_siginfo(&uinfo, p);
8305 unlock_user(p, arg1, 0);
8306 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8308 break;
8309 #ifdef TARGET_NR_sigreturn
8310 case TARGET_NR_sigreturn:
8311 if (block_signals()) {
8312 ret = -TARGET_ERESTARTSYS;
8313 } else {
8314 ret = do_sigreturn(cpu_env);
8316 break;
8317 #endif
8318 case TARGET_NR_rt_sigreturn:
8319 if (block_signals()) {
8320 ret = -TARGET_ERESTARTSYS;
8321 } else {
8322 ret = do_rt_sigreturn(cpu_env);
8324 break;
8325 case TARGET_NR_sethostname:
8326 if (!(p = lock_user_string(arg1)))
8327 goto efault;
8328 ret = get_errno(sethostname(p, arg2));
8329 unlock_user(p, arg1, 0);
8330 break;
8331 case TARGET_NR_setrlimit:
8333 int resource = target_to_host_resource(arg1);
8334 struct target_rlimit *target_rlim;
8335 struct rlimit rlim;
8336 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8337 goto efault;
8338 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8339 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8340 unlock_user_struct(target_rlim, arg2, 0);
8341 ret = get_errno(setrlimit(resource, &rlim));
8343 break;
8344 case TARGET_NR_getrlimit:
8346 int resource = target_to_host_resource(arg1);
8347 struct target_rlimit *target_rlim;
8348 struct rlimit rlim;
8350 ret = get_errno(getrlimit(resource, &rlim));
8351 if (!is_error(ret)) {
8352 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8353 goto efault;
8354 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8355 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8356 unlock_user_struct(target_rlim, arg2, 1);
8359 break;
8360 case TARGET_NR_getrusage:
8362 struct rusage rusage;
8363 ret = get_errno(getrusage(arg1, &rusage));
8364 if (!is_error(ret)) {
8365 ret = host_to_target_rusage(arg2, &rusage);
8368 break;
8369 case TARGET_NR_gettimeofday:
8371 struct timeval tv;
8372 ret = get_errno(gettimeofday(&tv, NULL));
8373 if (!is_error(ret)) {
8374 if (copy_to_user_timeval(arg1, &tv))
8375 goto efault;
8378 break;
8379 case TARGET_NR_settimeofday:
8381 struct timeval tv, *ptv = NULL;
8382 struct timezone tz, *ptz = NULL;
8384 if (arg1) {
8385 if (copy_from_user_timeval(&tv, arg1)) {
8386 goto efault;
8388 ptv = &tv;
8391 if (arg2) {
8392 if (copy_from_user_timezone(&tz, arg2)) {
8393 goto efault;
8395 ptz = &tz;
8398 ret = get_errno(settimeofday(ptv, ptz));
8400 break;
8401 #if defined(TARGET_NR_select)
8402 case TARGET_NR_select:
8403 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8404 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8405 #else
8407 struct target_sel_arg_struct *sel;
8408 abi_ulong inp, outp, exp, tvp;
8409 long nsel;
8411 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8412 goto efault;
8413 nsel = tswapal(sel->n);
8414 inp = tswapal(sel->inp);
8415 outp = tswapal(sel->outp);
8416 exp = tswapal(sel->exp);
8417 tvp = tswapal(sel->tvp);
8418 unlock_user_struct(sel, arg1, 0);
8419 ret = do_select(nsel, inp, outp, exp, tvp);
8421 #endif
8422 break;
8423 #endif
8424 #ifdef TARGET_NR_pselect6
8425 case TARGET_NR_pselect6:
8427 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8428 fd_set rfds, wfds, efds;
8429 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8430 struct timespec ts, *ts_ptr;
8433 * The 6th arg is actually two args smashed together,
8434 * so we cannot use the C library.
8436 sigset_t set;
8437 struct {
8438 sigset_t *set;
8439 size_t size;
8440 } sig, *sig_ptr;
8442 abi_ulong arg_sigset, arg_sigsize, *arg7;
8443 target_sigset_t *target_sigset;
8445 n = arg1;
8446 rfd_addr = arg2;
8447 wfd_addr = arg3;
8448 efd_addr = arg4;
8449 ts_addr = arg5;
8451 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8452 if (ret) {
8453 goto fail;
8455 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8456 if (ret) {
8457 goto fail;
8459 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8460 if (ret) {
8461 goto fail;
8465 * This takes a timespec, and not a timeval, so we cannot
8466 * use the do_select() helper ...
8468 if (ts_addr) {
8469 if (target_to_host_timespec(&ts, ts_addr)) {
8470 goto efault;
8472 ts_ptr = &ts;
8473 } else {
8474 ts_ptr = NULL;
8477 /* Extract the two packed args for the sigset */
8478 if (arg6) {
8479 sig_ptr = &sig;
8480 sig.size = SIGSET_T_SIZE;
8482 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8483 if (!arg7) {
8484 goto efault;
8486 arg_sigset = tswapal(arg7[0]);
8487 arg_sigsize = tswapal(arg7[1]);
8488 unlock_user(arg7, arg6, 0);
8490 if (arg_sigset) {
8491 sig.set = &set;
8492 if (arg_sigsize != sizeof(*target_sigset)) {
8493 /* Like the kernel, we enforce correct size sigsets */
8494 ret = -TARGET_EINVAL;
8495 goto fail;
8497 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8498 sizeof(*target_sigset), 1);
8499 if (!target_sigset) {
8500 goto efault;
8502 target_to_host_sigset(&set, target_sigset);
8503 unlock_user(target_sigset, arg_sigset, 0);
8504 } else {
8505 sig.set = NULL;
8507 } else {
8508 sig_ptr = NULL;
8511 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8512 ts_ptr, sig_ptr));
8514 if (!is_error(ret)) {
8515 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8516 goto efault;
8517 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8518 goto efault;
8519 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8520 goto efault;
8522 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8523 goto efault;
8526 break;
8527 #endif
8528 #ifdef TARGET_NR_symlink
8529 case TARGET_NR_symlink:
8531 void *p2;
8532 p = lock_user_string(arg1);
8533 p2 = lock_user_string(arg2);
8534 if (!p || !p2)
8535 ret = -TARGET_EFAULT;
8536 else
8537 ret = get_errno(symlink(p, p2));
8538 unlock_user(p2, arg2, 0);
8539 unlock_user(p, arg1, 0);
8541 break;
8542 #endif
8543 #if defined(TARGET_NR_symlinkat)
8544 case TARGET_NR_symlinkat:
8546 void *p2;
8547 p = lock_user_string(arg1);
8548 p2 = lock_user_string(arg3);
8549 if (!p || !p2)
8550 ret = -TARGET_EFAULT;
8551 else
8552 ret = get_errno(symlinkat(p, arg2, p2));
8553 unlock_user(p2, arg3, 0);
8554 unlock_user(p, arg1, 0);
8556 break;
8557 #endif
8558 #ifdef TARGET_NR_oldlstat
8559 case TARGET_NR_oldlstat:
8560 goto unimplemented;
8561 #endif
8562 #ifdef TARGET_NR_readlink
8563 case TARGET_NR_readlink:
8565 void *p2;
8566 p = lock_user_string(arg1);
8567 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8568 if (!p || !p2) {
8569 ret = -TARGET_EFAULT;
8570 } else if (!arg3) {
8571 /* Short circuit this for the magic exe check. */
8572 ret = -TARGET_EINVAL;
8573 } else if (is_proc_myself((const char *)p, "exe")) {
8574 char real[PATH_MAX], *temp;
8575 temp = realpath(exec_path, real);
8576 /* Return value is # of bytes that we wrote to the buffer. */
8577 if (temp == NULL) {
8578 ret = get_errno(-1);
8579 } else {
8580 /* Don't worry about sign mismatch as earlier mapping
8581 * logic would have thrown a bad address error. */
8582 ret = MIN(strlen(real), arg3);
8583 /* We cannot NUL terminate the string. */
8584 memcpy(p2, real, ret);
8586 } else {
8587 ret = get_errno(readlink(path(p), p2, arg3));
8589 unlock_user(p2, arg2, ret);
8590 unlock_user(p, arg1, 0);
8592 break;
8593 #endif
8594 #if defined(TARGET_NR_readlinkat)
8595 case TARGET_NR_readlinkat:
8597 void *p2;
8598 p = lock_user_string(arg2);
8599 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8600 if (!p || !p2) {
8601 ret = -TARGET_EFAULT;
8602 } else if (is_proc_myself((const char *)p, "exe")) {
8603 char real[PATH_MAX], *temp;
8604 temp = realpath(exec_path, real);
8605 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8606 snprintf((char *)p2, arg4, "%s", real);
8607 } else {
8608 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8610 unlock_user(p2, arg3, ret);
8611 unlock_user(p, arg2, 0);
8613 break;
8614 #endif
8615 #ifdef TARGET_NR_uselib
8616 case TARGET_NR_uselib:
8617 goto unimplemented;
8618 #endif
8619 #ifdef TARGET_NR_swapon
8620 case TARGET_NR_swapon:
8621 if (!(p = lock_user_string(arg1)))
8622 goto efault;
8623 ret = get_errno(swapon(p, arg2));
8624 unlock_user(p, arg1, 0);
8625 break;
8626 #endif
8627 case TARGET_NR_reboot:
8628 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8629 /* arg4 must be ignored in all other cases */
8630 p = lock_user_string(arg4);
8631 if (!p) {
8632 goto efault;
8634 ret = get_errno(reboot(arg1, arg2, arg3, p));
8635 unlock_user(p, arg4, 0);
8636 } else {
8637 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8639 break;
8640 #ifdef TARGET_NR_readdir
8641 case TARGET_NR_readdir:
8642 goto unimplemented;
8643 #endif
8644 #ifdef TARGET_NR_mmap
8645 case TARGET_NR_mmap:
8646 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8647 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8648 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8649 || defined(TARGET_S390X)
8651 abi_ulong *v;
8652 abi_ulong v1, v2, v3, v4, v5, v6;
8653 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8654 goto efault;
8655 v1 = tswapal(v[0]);
8656 v2 = tswapal(v[1]);
8657 v3 = tswapal(v[2]);
8658 v4 = tswapal(v[3]);
8659 v5 = tswapal(v[4]);
8660 v6 = tswapal(v[5]);
8661 unlock_user(v, arg1, 0);
8662 ret = get_errno(target_mmap(v1, v2, v3,
8663 target_to_host_bitmask(v4, mmap_flags_tbl),
8664 v5, v6));
8666 #else
8667 ret = get_errno(target_mmap(arg1, arg2, arg3,
8668 target_to_host_bitmask(arg4, mmap_flags_tbl),
8669 arg5,
8670 arg6));
8671 #endif
8672 break;
8673 #endif
8674 #ifdef TARGET_NR_mmap2
8675 case TARGET_NR_mmap2:
8676 #ifndef MMAP_SHIFT
8677 #define MMAP_SHIFT 12
8678 #endif
8679 ret = get_errno(target_mmap(arg1, arg2, arg3,
8680 target_to_host_bitmask(arg4, mmap_flags_tbl),
8681 arg5,
8682 arg6 << MMAP_SHIFT));
8683 break;
8684 #endif
8685 case TARGET_NR_munmap:
8686 ret = get_errno(target_munmap(arg1, arg2));
8687 break;
8688 case TARGET_NR_mprotect:
8690 TaskState *ts = cpu->opaque;
8691 /* Special hack to detect libc making the stack executable. */
8692 if ((arg3 & PROT_GROWSDOWN)
8693 && arg1 >= ts->info->stack_limit
8694 && arg1 <= ts->info->start_stack) {
8695 arg3 &= ~PROT_GROWSDOWN;
8696 arg2 = arg2 + arg1 - ts->info->stack_limit;
8697 arg1 = ts->info->stack_limit;
8700 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8701 break;
8702 #ifdef TARGET_NR_mremap
8703 case TARGET_NR_mremap:
8704 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8705 break;
8706 #endif
8707 /* ??? msync/mlock/munlock are broken for softmmu. */
8708 #ifdef TARGET_NR_msync
8709 case TARGET_NR_msync:
8710 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8711 break;
8712 #endif
8713 #ifdef TARGET_NR_mlock
8714 case TARGET_NR_mlock:
8715 ret = get_errno(mlock(g2h(arg1), arg2));
8716 break;
8717 #endif
8718 #ifdef TARGET_NR_munlock
8719 case TARGET_NR_munlock:
8720 ret = get_errno(munlock(g2h(arg1), arg2));
8721 break;
8722 #endif
8723 #ifdef TARGET_NR_mlockall
8724 case TARGET_NR_mlockall:
8725 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8726 break;
8727 #endif
8728 #ifdef TARGET_NR_munlockall
8729 case TARGET_NR_munlockall:
8730 ret = get_errno(munlockall());
8731 break;
8732 #endif
8733 case TARGET_NR_truncate:
8734 if (!(p = lock_user_string(arg1)))
8735 goto efault;
8736 ret = get_errno(truncate(p, arg2));
8737 unlock_user(p, arg1, 0);
8738 break;
8739 case TARGET_NR_ftruncate:
8740 ret = get_errno(ftruncate(arg1, arg2));
8741 break;
8742 case TARGET_NR_fchmod:
8743 ret = get_errno(fchmod(arg1, arg2));
8744 break;
8745 #if defined(TARGET_NR_fchmodat)
8746 case TARGET_NR_fchmodat:
8747 if (!(p = lock_user_string(arg2)))
8748 goto efault;
8749 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8750 unlock_user(p, arg2, 0);
8751 break;
8752 #endif
8753 case TARGET_NR_getpriority:
8754 /* Note that negative values are valid for getpriority, so we must
8755 differentiate based on errno settings. */
8756 errno = 0;
8757 ret = getpriority(arg1, arg2);
8758 if (ret == -1 && errno != 0) {
8759 ret = -host_to_target_errno(errno);
8760 break;
8762 #ifdef TARGET_ALPHA
8763 /* Return value is the unbiased priority. Signal no error. */
8764 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8765 #else
8766 /* Return value is a biased priority to avoid negative numbers. */
8767 ret = 20 - ret;
8768 #endif
8769 break;
8770 case TARGET_NR_setpriority:
8771 ret = get_errno(setpriority(arg1, arg2, arg3));
8772 break;
8773 #ifdef TARGET_NR_profil
8774 case TARGET_NR_profil:
8775 goto unimplemented;
8776 #endif
8777 case TARGET_NR_statfs:
8778 if (!(p = lock_user_string(arg1)))
8779 goto efault;
8780 ret = get_errno(statfs(path(p), &stfs));
8781 unlock_user(p, arg1, 0);
8782 convert_statfs:
8783 if (!is_error(ret)) {
8784 struct target_statfs *target_stfs;
8786 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8787 goto efault;
8788 __put_user(stfs.f_type, &target_stfs->f_type);
8789 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8790 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8791 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8792 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8793 __put_user(stfs.f_files, &target_stfs->f_files);
8794 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8795 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8796 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8797 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8798 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8799 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8800 unlock_user_struct(target_stfs, arg2, 1);
8802 break;
8803 case TARGET_NR_fstatfs:
8804 ret = get_errno(fstatfs(arg1, &stfs));
8805 goto convert_statfs;
8806 #ifdef TARGET_NR_statfs64
8807 case TARGET_NR_statfs64:
8808 if (!(p = lock_user_string(arg1)))
8809 goto efault;
8810 ret = get_errno(statfs(path(p), &stfs));
8811 unlock_user(p, arg1, 0);
8812 convert_statfs64:
8813 if (!is_error(ret)) {
8814 struct target_statfs64 *target_stfs;
8816 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8817 goto efault;
8818 __put_user(stfs.f_type, &target_stfs->f_type);
8819 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8820 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8821 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8822 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8823 __put_user(stfs.f_files, &target_stfs->f_files);
8824 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8825 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8826 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8827 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8828 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8829 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8830 unlock_user_struct(target_stfs, arg3, 1);
8832 break;
8833 case TARGET_NR_fstatfs64:
8834 ret = get_errno(fstatfs(arg1, &stfs));
8835 goto convert_statfs64;
8836 #endif
8837 #ifdef TARGET_NR_ioperm
8838 case TARGET_NR_ioperm:
8839 goto unimplemented;
8840 #endif
8841 #ifdef TARGET_NR_socketcall
8842 case TARGET_NR_socketcall:
8843 ret = do_socketcall(arg1, arg2);
8844 break;
8845 #endif
8846 #ifdef TARGET_NR_accept
8847 case TARGET_NR_accept:
8848 ret = do_accept4(arg1, arg2, arg3, 0);
8849 break;
8850 #endif
8851 #ifdef TARGET_NR_accept4
8852 case TARGET_NR_accept4:
8853 ret = do_accept4(arg1, arg2, arg3, arg4);
8854 break;
8855 #endif
8856 #ifdef TARGET_NR_bind
8857 case TARGET_NR_bind:
8858 ret = do_bind(arg1, arg2, arg3);
8859 break;
8860 #endif
8861 #ifdef TARGET_NR_connect
8862 case TARGET_NR_connect:
8863 ret = do_connect(arg1, arg2, arg3);
8864 break;
8865 #endif
8866 #ifdef TARGET_NR_getpeername
8867 case TARGET_NR_getpeername:
8868 ret = do_getpeername(arg1, arg2, arg3);
8869 break;
8870 #endif
8871 #ifdef TARGET_NR_getsockname
8872 case TARGET_NR_getsockname:
8873 ret = do_getsockname(arg1, arg2, arg3);
8874 break;
8875 #endif
8876 #ifdef TARGET_NR_getsockopt
8877 case TARGET_NR_getsockopt:
8878 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8879 break;
8880 #endif
8881 #ifdef TARGET_NR_listen
8882 case TARGET_NR_listen:
8883 ret = get_errno(listen(arg1, arg2));
8884 break;
8885 #endif
8886 #ifdef TARGET_NR_recv
8887 case TARGET_NR_recv:
8888 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8889 break;
8890 #endif
8891 #ifdef TARGET_NR_recvfrom
8892 case TARGET_NR_recvfrom:
8893 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8894 break;
8895 #endif
8896 #ifdef TARGET_NR_recvmsg
8897 case TARGET_NR_recvmsg:
8898 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8899 break;
8900 #endif
8901 #ifdef TARGET_NR_send
8902 case TARGET_NR_send:
8903 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8904 break;
8905 #endif
8906 #ifdef TARGET_NR_sendmsg
8907 case TARGET_NR_sendmsg:
8908 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8909 break;
8910 #endif
8911 #ifdef TARGET_NR_sendmmsg
8912 case TARGET_NR_sendmmsg:
8913 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8914 break;
8915 case TARGET_NR_recvmmsg:
8916 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8917 break;
8918 #endif
8919 #ifdef TARGET_NR_sendto
8920 case TARGET_NR_sendto:
8921 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8922 break;
8923 #endif
8924 #ifdef TARGET_NR_shutdown
8925 case TARGET_NR_shutdown:
8926 ret = get_errno(shutdown(arg1, arg2));
8927 break;
8928 #endif
8929 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8930 case TARGET_NR_getrandom:
8931 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8932 if (!p) {
8933 goto efault;
8935 ret = get_errno(getrandom(p, arg2, arg3));
8936 unlock_user(p, arg1, ret);
8937 break;
8938 #endif
8939 #ifdef TARGET_NR_socket
8940 case TARGET_NR_socket:
8941 ret = do_socket(arg1, arg2, arg3);
8942 fd_trans_unregister(ret);
8943 break;
8944 #endif
8945 #ifdef TARGET_NR_socketpair
8946 case TARGET_NR_socketpair:
8947 ret = do_socketpair(arg1, arg2, arg3, arg4);
8948 break;
8949 #endif
8950 #ifdef TARGET_NR_setsockopt
8951 case TARGET_NR_setsockopt:
8952 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8953 break;
8954 #endif
8956 case TARGET_NR_syslog:
8957 if (!(p = lock_user_string(arg2)))
8958 goto efault;
8959 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8960 unlock_user(p, arg2, 0);
8961 break;
8963 case TARGET_NR_setitimer:
8965 struct itimerval value, ovalue, *pvalue;
8967 if (arg2) {
8968 pvalue = &value;
8969 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8970 || copy_from_user_timeval(&pvalue->it_value,
8971 arg2 + sizeof(struct target_timeval)))
8972 goto efault;
8973 } else {
8974 pvalue = NULL;
8976 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8977 if (!is_error(ret) && arg3) {
8978 if (copy_to_user_timeval(arg3,
8979 &ovalue.it_interval)
8980 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8981 &ovalue.it_value))
8982 goto efault;
8985 break;
8986 case TARGET_NR_getitimer:
8988 struct itimerval value;
8990 ret = get_errno(getitimer(arg1, &value));
8991 if (!is_error(ret) && arg2) {
8992 if (copy_to_user_timeval(arg2,
8993 &value.it_interval)
8994 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8995 &value.it_value))
8996 goto efault;
8999 break;
9000 #ifdef TARGET_NR_stat
9001 case TARGET_NR_stat:
9002 if (!(p = lock_user_string(arg1)))
9003 goto efault;
9004 ret = get_errno(stat(path(p), &st));
9005 unlock_user(p, arg1, 0);
9006 goto do_stat;
9007 #endif
9008 #ifdef TARGET_NR_lstat
9009 case TARGET_NR_lstat:
9010 if (!(p = lock_user_string(arg1)))
9011 goto efault;
9012 ret = get_errno(lstat(path(p), &st));
9013 unlock_user(p, arg1, 0);
9014 goto do_stat;
9015 #endif
9016 case TARGET_NR_fstat:
9018 ret = get_errno(fstat(arg1, &st));
9019 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9020 do_stat:
9021 #endif
9022 if (!is_error(ret)) {
9023 struct target_stat *target_st;
9025 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9026 goto efault;
9027 memset(target_st, 0, sizeof(*target_st));
9028 __put_user(st.st_dev, &target_st->st_dev);
9029 __put_user(st.st_ino, &target_st->st_ino);
9030 __put_user(st.st_mode, &target_st->st_mode);
9031 __put_user(st.st_uid, &target_st->st_uid);
9032 __put_user(st.st_gid, &target_st->st_gid);
9033 __put_user(st.st_nlink, &target_st->st_nlink);
9034 __put_user(st.st_rdev, &target_st->st_rdev);
9035 __put_user(st.st_size, &target_st->st_size);
9036 __put_user(st.st_blksize, &target_st->st_blksize);
9037 __put_user(st.st_blocks, &target_st->st_blocks);
9038 __put_user(st.st_atime, &target_st->target_st_atime);
9039 __put_user(st.st_mtime, &target_st->target_st_mtime);
9040 __put_user(st.st_ctime, &target_st->target_st_ctime);
9041 unlock_user_struct(target_st, arg2, 1);
9044 break;
9045 #ifdef TARGET_NR_olduname
9046 case TARGET_NR_olduname:
9047 goto unimplemented;
9048 #endif
9049 #ifdef TARGET_NR_iopl
9050 case TARGET_NR_iopl:
9051 goto unimplemented;
9052 #endif
9053 case TARGET_NR_vhangup:
9054 ret = get_errno(vhangup());
9055 break;
9056 #ifdef TARGET_NR_idle
9057 case TARGET_NR_idle:
9058 goto unimplemented;
9059 #endif
9060 #ifdef TARGET_NR_syscall
9061 case TARGET_NR_syscall:
9062 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9063 arg6, arg7, arg8, 0);
9064 break;
9065 #endif
9066 case TARGET_NR_wait4:
9068 int status;
9069 abi_long status_ptr = arg2;
9070 struct rusage rusage, *rusage_ptr;
9071 abi_ulong target_rusage = arg4;
9072 abi_long rusage_err;
9073 if (target_rusage)
9074 rusage_ptr = &rusage;
9075 else
9076 rusage_ptr = NULL;
9077 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9078 if (!is_error(ret)) {
9079 if (status_ptr && ret) {
9080 status = host_to_target_waitstatus(status);
9081 if (put_user_s32(status, status_ptr))
9082 goto efault;
9084 if (target_rusage) {
9085 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9086 if (rusage_err) {
9087 ret = rusage_err;
9092 break;
9093 #ifdef TARGET_NR_swapoff
9094 case TARGET_NR_swapoff:
9095 if (!(p = lock_user_string(arg1)))
9096 goto efault;
9097 ret = get_errno(swapoff(p));
9098 unlock_user(p, arg1, 0);
9099 break;
9100 #endif
9101 case TARGET_NR_sysinfo:
9103 struct target_sysinfo *target_value;
9104 struct sysinfo value;
9105 ret = get_errno(sysinfo(&value));
9106 if (!is_error(ret) && arg1)
9108 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9109 goto efault;
9110 __put_user(value.uptime, &target_value->uptime);
9111 __put_user(value.loads[0], &target_value->loads[0]);
9112 __put_user(value.loads[1], &target_value->loads[1]);
9113 __put_user(value.loads[2], &target_value->loads[2]);
9114 __put_user(value.totalram, &target_value->totalram);
9115 __put_user(value.freeram, &target_value->freeram);
9116 __put_user(value.sharedram, &target_value->sharedram);
9117 __put_user(value.bufferram, &target_value->bufferram);
9118 __put_user(value.totalswap, &target_value->totalswap);
9119 __put_user(value.freeswap, &target_value->freeswap);
9120 __put_user(value.procs, &target_value->procs);
9121 __put_user(value.totalhigh, &target_value->totalhigh);
9122 __put_user(value.freehigh, &target_value->freehigh);
9123 __put_user(value.mem_unit, &target_value->mem_unit);
9124 unlock_user_struct(target_value, arg1, 1);
9127 break;
9128 #ifdef TARGET_NR_ipc
9129 case TARGET_NR_ipc:
9130 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
9131 break;
9132 #endif
9133 #ifdef TARGET_NR_semget
9134 case TARGET_NR_semget:
9135 ret = get_errno(semget(arg1, arg2, arg3));
9136 break;
9137 #endif
9138 #ifdef TARGET_NR_semop
9139 case TARGET_NR_semop:
9140 ret = do_semop(arg1, arg2, arg3);
9141 break;
9142 #endif
9143 #ifdef TARGET_NR_semctl
9144 case TARGET_NR_semctl:
9145 ret = do_semctl(arg1, arg2, arg3, arg4);
9146 break;
9147 #endif
9148 #ifdef TARGET_NR_msgctl
9149 case TARGET_NR_msgctl:
9150 ret = do_msgctl(arg1, arg2, arg3);
9151 break;
9152 #endif
9153 #ifdef TARGET_NR_msgget
9154 case TARGET_NR_msgget:
9155 ret = get_errno(msgget(arg1, arg2));
9156 break;
9157 #endif
9158 #ifdef TARGET_NR_msgrcv
9159 case TARGET_NR_msgrcv:
9160 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9161 break;
9162 #endif
9163 #ifdef TARGET_NR_msgsnd
9164 case TARGET_NR_msgsnd:
9165 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9166 break;
9167 #endif
9168 #ifdef TARGET_NR_shmget
9169 case TARGET_NR_shmget:
9170 ret = get_errno(shmget(arg1, arg2, arg3));
9171 break;
9172 #endif
9173 #ifdef TARGET_NR_shmctl
9174 case TARGET_NR_shmctl:
9175 ret = do_shmctl(arg1, arg2, arg3);
9176 break;
9177 #endif
9178 #ifdef TARGET_NR_shmat
9179 case TARGET_NR_shmat:
9180 ret = do_shmat(arg1, arg2, arg3);
9181 break;
9182 #endif
9183 #ifdef TARGET_NR_shmdt
9184 case TARGET_NR_shmdt:
9185 ret = do_shmdt(arg1);
9186 break;
9187 #endif
9188 case TARGET_NR_fsync:
9189 ret = get_errno(fsync(arg1));
9190 break;
9191 case TARGET_NR_clone:
9192 /* Linux manages to have three different orderings for its
9193 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9194 * match the kernel's CONFIG_CLONE_* settings.
9195 * Microblaze is further special in that it uses a sixth
9196 * implicit argument to clone for the TLS pointer.
9198 #if defined(TARGET_MICROBLAZE)
9199 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9200 #elif defined(TARGET_CLONE_BACKWARDS)
9201 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9202 #elif defined(TARGET_CLONE_BACKWARDS2)
9203 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9204 #else
9205 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9206 #endif
9207 break;
9208 #ifdef __NR_exit_group
9209 /* new thread calls */
9210 case TARGET_NR_exit_group:
9211 #ifdef TARGET_GPROF
9212 _mcleanup();
9213 #endif
9214 gdb_exit(cpu_env, arg1);
9215 ret = get_errno(exit_group(arg1));
9216 break;
9217 #endif
9218 case TARGET_NR_setdomainname:
9219 if (!(p = lock_user_string(arg1)))
9220 goto efault;
9221 ret = get_errno(setdomainname(p, arg2));
9222 unlock_user(p, arg1, 0);
9223 break;
9224 case TARGET_NR_uname:
9225 /* no need to transcode because we use the linux syscall */
9227 struct new_utsname * buf;
9229 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9230 goto efault;
9231 ret = get_errno(sys_uname(buf));
9232 if (!is_error(ret)) {
9233 /* Overrite the native machine name with whatever is being
9234 emulated. */
9235 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9236 /* Allow the user to override the reported release. */
9237 if (qemu_uname_release && *qemu_uname_release)
9238 strcpy (buf->release, qemu_uname_release);
9240 unlock_user_struct(buf, arg1, 1);
9242 break;
9243 #ifdef TARGET_I386
9244 case TARGET_NR_modify_ldt:
9245 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9246 break;
9247 #if !defined(TARGET_X86_64)
9248 case TARGET_NR_vm86old:
9249 goto unimplemented;
9250 case TARGET_NR_vm86:
9251 ret = do_vm86(cpu_env, arg1, arg2);
9252 break;
9253 #endif
9254 #endif
9255 case TARGET_NR_adjtimex:
9256 goto unimplemented;
9257 #ifdef TARGET_NR_create_module
9258 case TARGET_NR_create_module:
9259 #endif
9260 case TARGET_NR_init_module:
9261 case TARGET_NR_delete_module:
9262 #ifdef TARGET_NR_get_kernel_syms
9263 case TARGET_NR_get_kernel_syms:
9264 #endif
9265 goto unimplemented;
9266 case TARGET_NR_quotactl:
9267 goto unimplemented;
9268 case TARGET_NR_getpgid:
9269 ret = get_errno(getpgid(arg1));
9270 break;
9271 case TARGET_NR_fchdir:
9272 ret = get_errno(fchdir(arg1));
9273 break;
9274 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9275 case TARGET_NR_bdflush:
9276 goto unimplemented;
9277 #endif
9278 #ifdef TARGET_NR_sysfs
9279 case TARGET_NR_sysfs:
9280 goto unimplemented;
9281 #endif
9282 case TARGET_NR_personality:
9283 ret = get_errno(personality(arg1));
9284 break;
9285 #ifdef TARGET_NR_afs_syscall
9286 case TARGET_NR_afs_syscall:
9287 goto unimplemented;
9288 #endif
9289 #ifdef TARGET_NR__llseek /* Not on alpha */
9290 case TARGET_NR__llseek:
9292 int64_t res;
9293 #if !defined(__NR_llseek)
9294 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
9295 if (res == -1) {
9296 ret = get_errno(res);
9297 } else {
9298 ret = 0;
9300 #else
9301 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9302 #endif
9303 if ((ret == 0) && put_user_s64(res, arg4)) {
9304 goto efault;
9307 break;
9308 #endif
9309 #ifdef TARGET_NR_getdents
9310 case TARGET_NR_getdents:
9311 #ifdef __NR_getdents
9312 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9314 struct target_dirent *target_dirp;
9315 struct linux_dirent *dirp;
9316 abi_long count = arg3;
9318 dirp = g_try_malloc(count);
9319 if (!dirp) {
9320 ret = -TARGET_ENOMEM;
9321 goto fail;
9324 ret = get_errno(sys_getdents(arg1, dirp, count));
9325 if (!is_error(ret)) {
9326 struct linux_dirent *de;
9327 struct target_dirent *tde;
9328 int len = ret;
9329 int reclen, treclen;
9330 int count1, tnamelen;
9332 count1 = 0;
9333 de = dirp;
9334 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9335 goto efault;
9336 tde = target_dirp;
9337 while (len > 0) {
9338 reclen = de->d_reclen;
9339 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9340 assert(tnamelen >= 0);
9341 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9342 assert(count1 + treclen <= count);
9343 tde->d_reclen = tswap16(treclen);
9344 tde->d_ino = tswapal(de->d_ino);
9345 tde->d_off = tswapal(de->d_off);
9346 memcpy(tde->d_name, de->d_name, tnamelen);
9347 de = (struct linux_dirent *)((char *)de + reclen);
9348 len -= reclen;
9349 tde = (struct target_dirent *)((char *)tde + treclen);
9350 count1 += treclen;
9352 ret = count1;
9353 unlock_user(target_dirp, arg2, ret);
9355 g_free(dirp);
9357 #else
9359 struct linux_dirent *dirp;
9360 abi_long count = arg3;
9362 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9363 goto efault;
9364 ret = get_errno(sys_getdents(arg1, dirp, count));
9365 if (!is_error(ret)) {
9366 struct linux_dirent *de;
9367 int len = ret;
9368 int reclen;
9369 de = dirp;
9370 while (len > 0) {
9371 reclen = de->d_reclen;
9372 if (reclen > len)
9373 break;
9374 de->d_reclen = tswap16(reclen);
9375 tswapls(&de->d_ino);
9376 tswapls(&de->d_off);
9377 de = (struct linux_dirent *)((char *)de + reclen);
9378 len -= reclen;
9381 unlock_user(dirp, arg2, ret);
9383 #endif
9384 #else
9385 /* Implement getdents in terms of getdents64 */
9387 struct linux_dirent64 *dirp;
9388 abi_long count = arg3;
9390 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9391 if (!dirp) {
9392 goto efault;
9394 ret = get_errno(sys_getdents64(arg1, dirp, count));
9395 if (!is_error(ret)) {
9396 /* Convert the dirent64 structs to target dirent. We do this
9397 * in-place, since we can guarantee that a target_dirent is no
9398 * larger than a dirent64; however this means we have to be
9399 * careful to read everything before writing in the new format.
9401 struct linux_dirent64 *de;
9402 struct target_dirent *tde;
9403 int len = ret;
9404 int tlen = 0;
9406 de = dirp;
9407 tde = (struct target_dirent *)dirp;
9408 while (len > 0) {
9409 int namelen, treclen;
9410 int reclen = de->d_reclen;
9411 uint64_t ino = de->d_ino;
9412 int64_t off = de->d_off;
9413 uint8_t type = de->d_type;
9415 namelen = strlen(de->d_name);
9416 treclen = offsetof(struct target_dirent, d_name)
9417 + namelen + 2;
9418 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9420 memmove(tde->d_name, de->d_name, namelen + 1);
9421 tde->d_ino = tswapal(ino);
9422 tde->d_off = tswapal(off);
9423 tde->d_reclen = tswap16(treclen);
9424 /* The target_dirent type is in what was formerly a padding
9425 * byte at the end of the structure:
9427 *(((char *)tde) + treclen - 1) = type;
9429 de = (struct linux_dirent64 *)((char *)de + reclen);
9430 tde = (struct target_dirent *)((char *)tde + treclen);
9431 len -= reclen;
9432 tlen += treclen;
9434 ret = tlen;
9436 unlock_user(dirp, arg2, ret);
9438 #endif
9439 break;
9440 #endif /* TARGET_NR_getdents */
9441 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9442 case TARGET_NR_getdents64:
9444 struct linux_dirent64 *dirp;
9445 abi_long count = arg3;
9446 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9447 goto efault;
9448 ret = get_errno(sys_getdents64(arg1, dirp, count));
9449 if (!is_error(ret)) {
9450 struct linux_dirent64 *de;
9451 int len = ret;
9452 int reclen;
9453 de = dirp;
9454 while (len > 0) {
9455 reclen = de->d_reclen;
9456 if (reclen > len)
9457 break;
9458 de->d_reclen = tswap16(reclen);
9459 tswap64s((uint64_t *)&de->d_ino);
9460 tswap64s((uint64_t *)&de->d_off);
9461 de = (struct linux_dirent64 *)((char *)de + reclen);
9462 len -= reclen;
9465 unlock_user(dirp, arg2, ret);
9467 break;
9468 #endif /* TARGET_NR_getdents64 */
9469 #if defined(TARGET_NR__newselect)
9470 case TARGET_NR__newselect:
9471 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9472 break;
9473 #endif
9474 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9475 # ifdef TARGET_NR_poll
9476 case TARGET_NR_poll:
9477 # endif
9478 # ifdef TARGET_NR_ppoll
9479 case TARGET_NR_ppoll:
9480 # endif
9482 struct target_pollfd *target_pfd;
9483 unsigned int nfds = arg2;
9484 struct pollfd *pfd;
9485 unsigned int i;
9487 pfd = NULL;
9488 target_pfd = NULL;
9489 if (nfds) {
9490 target_pfd = lock_user(VERIFY_WRITE, arg1,
9491 sizeof(struct target_pollfd) * nfds, 1);
9492 if (!target_pfd) {
9493 goto efault;
9496 pfd = alloca(sizeof(struct pollfd) * nfds);
9497 for (i = 0; i < nfds; i++) {
9498 pfd[i].fd = tswap32(target_pfd[i].fd);
9499 pfd[i].events = tswap16(target_pfd[i].events);
9503 switch (num) {
9504 # ifdef TARGET_NR_ppoll
9505 case TARGET_NR_ppoll:
9507 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9508 target_sigset_t *target_set;
9509 sigset_t _set, *set = &_set;
9511 if (arg3) {
9512 if (target_to_host_timespec(timeout_ts, arg3)) {
9513 unlock_user(target_pfd, arg1, 0);
9514 goto efault;
9516 } else {
9517 timeout_ts = NULL;
9520 if (arg4) {
9521 if (arg5 != sizeof(target_sigset_t)) {
9522 unlock_user(target_pfd, arg1, 0);
9523 ret = -TARGET_EINVAL;
9524 break;
9527 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9528 if (!target_set) {
9529 unlock_user(target_pfd, arg1, 0);
9530 goto efault;
9532 target_to_host_sigset(set, target_set);
9533 } else {
9534 set = NULL;
9537 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9538 set, SIGSET_T_SIZE));
9540 if (!is_error(ret) && arg3) {
9541 host_to_target_timespec(arg3, timeout_ts);
9543 if (arg4) {
9544 unlock_user(target_set, arg4, 0);
9546 break;
9548 # endif
9549 # ifdef TARGET_NR_poll
9550 case TARGET_NR_poll:
9552 struct timespec ts, *pts;
9554 if (arg3 >= 0) {
9555 /* Convert ms to secs, ns */
9556 ts.tv_sec = arg3 / 1000;
9557 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9558 pts = &ts;
9559 } else {
9560 /* -ve poll() timeout means "infinite" */
9561 pts = NULL;
9563 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9564 break;
9566 # endif
9567 default:
9568 g_assert_not_reached();
9571 if (!is_error(ret)) {
9572 for(i = 0; i < nfds; i++) {
9573 target_pfd[i].revents = tswap16(pfd[i].revents);
9576 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9578 break;
9579 #endif
9580 case TARGET_NR_flock:
9581 /* NOTE: the flock constant seems to be the same for every
9582 Linux platform */
9583 ret = get_errno(safe_flock(arg1, arg2));
9584 break;
9585 case TARGET_NR_readv:
9587 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9588 if (vec != NULL) {
9589 ret = get_errno(safe_readv(arg1, vec, arg3));
9590 unlock_iovec(vec, arg2, arg3, 1);
9591 } else {
9592 ret = -host_to_target_errno(errno);
9595 break;
9596 case TARGET_NR_writev:
9598 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9599 if (vec != NULL) {
9600 ret = get_errno(safe_writev(arg1, vec, arg3));
9601 unlock_iovec(vec, arg2, arg3, 0);
9602 } else {
9603 ret = -host_to_target_errno(errno);
9606 break;
9607 case TARGET_NR_getsid:
9608 ret = get_errno(getsid(arg1));
9609 break;
9610 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9611 case TARGET_NR_fdatasync:
9612 ret = get_errno(fdatasync(arg1));
9613 break;
9614 #endif
9615 #ifdef TARGET_NR__sysctl
9616 case TARGET_NR__sysctl:
9617 /* We don't implement this, but ENOTDIR is always a safe
9618 return value. */
9619 ret = -TARGET_ENOTDIR;
9620 break;
9621 #endif
9622 case TARGET_NR_sched_getaffinity:
9624 unsigned int mask_size;
9625 unsigned long *mask;
9628 * sched_getaffinity needs multiples of ulong, so need to take
9629 * care of mismatches between target ulong and host ulong sizes.
9631 if (arg2 & (sizeof(abi_ulong) - 1)) {
9632 ret = -TARGET_EINVAL;
9633 break;
9635 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9637 mask = alloca(mask_size);
9638 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9640 if (!is_error(ret)) {
9641 if (ret > arg2) {
9642 /* More data returned than the caller's buffer will fit.
9643 * This only happens if sizeof(abi_long) < sizeof(long)
9644 * and the caller passed us a buffer holding an odd number
9645 * of abi_longs. If the host kernel is actually using the
9646 * extra 4 bytes then fail EINVAL; otherwise we can just
9647 * ignore them and only copy the interesting part.
9649 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9650 if (numcpus > arg2 * 8) {
9651 ret = -TARGET_EINVAL;
9652 break;
9654 ret = arg2;
9657 if (copy_to_user(arg3, mask, ret)) {
9658 goto efault;
9662 break;
9663 case TARGET_NR_sched_setaffinity:
9665 unsigned int mask_size;
9666 unsigned long *mask;
9669 * sched_setaffinity needs multiples of ulong, so need to take
9670 * care of mismatches between target ulong and host ulong sizes.
9672 if (arg2 & (sizeof(abi_ulong) - 1)) {
9673 ret = -TARGET_EINVAL;
9674 break;
9676 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9678 mask = alloca(mask_size);
9679 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9680 goto efault;
9682 memcpy(mask, p, arg2);
9683 unlock_user_struct(p, arg2, 0);
9685 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9687 break;
9688 case TARGET_NR_sched_setparam:
9690 struct sched_param *target_schp;
9691 struct sched_param schp;
9693 if (arg2 == 0) {
9694 return -TARGET_EINVAL;
9696 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9697 goto efault;
9698 schp.sched_priority = tswap32(target_schp->sched_priority);
9699 unlock_user_struct(target_schp, arg2, 0);
9700 ret = get_errno(sched_setparam(arg1, &schp));
9702 break;
9703 case TARGET_NR_sched_getparam:
9705 struct sched_param *target_schp;
9706 struct sched_param schp;
9708 if (arg2 == 0) {
9709 return -TARGET_EINVAL;
9711 ret = get_errno(sched_getparam(arg1, &schp));
9712 if (!is_error(ret)) {
9713 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9714 goto efault;
9715 target_schp->sched_priority = tswap32(schp.sched_priority);
9716 unlock_user_struct(target_schp, arg2, 1);
9719 break;
9720 case TARGET_NR_sched_setscheduler:
9722 struct sched_param *target_schp;
9723 struct sched_param schp;
9724 if (arg3 == 0) {
9725 return -TARGET_EINVAL;
9727 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9728 goto efault;
9729 schp.sched_priority = tswap32(target_schp->sched_priority);
9730 unlock_user_struct(target_schp, arg3, 0);
9731 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9733 break;
9734 case TARGET_NR_sched_getscheduler:
9735 ret = get_errno(sched_getscheduler(arg1));
9736 break;
9737 case TARGET_NR_sched_yield:
9738 ret = get_errno(sched_yield());
9739 break;
9740 case TARGET_NR_sched_get_priority_max:
9741 ret = get_errno(sched_get_priority_max(arg1));
9742 break;
9743 case TARGET_NR_sched_get_priority_min:
9744 ret = get_errno(sched_get_priority_min(arg1));
9745 break;
9746 case TARGET_NR_sched_rr_get_interval:
9748 struct timespec ts;
9749 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9750 if (!is_error(ret)) {
9751 ret = host_to_target_timespec(arg2, &ts);
9754 break;
9755 case TARGET_NR_nanosleep:
9757 struct timespec req, rem;
9758 target_to_host_timespec(&req, arg1);
9759 ret = get_errno(safe_nanosleep(&req, &rem));
9760 if (is_error(ret) && arg2) {
9761 host_to_target_timespec(arg2, &rem);
9764 break;
9765 #ifdef TARGET_NR_query_module
9766 case TARGET_NR_query_module:
9767 goto unimplemented;
9768 #endif
9769 #ifdef TARGET_NR_nfsservctl
9770 case TARGET_NR_nfsservctl:
9771 goto unimplemented;
9772 #endif
9773 case TARGET_NR_prctl:
9774 switch (arg1) {
9775 case PR_GET_PDEATHSIG:
9777 int deathsig;
9778 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9779 if (!is_error(ret) && arg2
9780 && put_user_ual(deathsig, arg2)) {
9781 goto efault;
9783 break;
9785 #ifdef PR_GET_NAME
9786 case PR_GET_NAME:
9788 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9789 if (!name) {
9790 goto efault;
9792 ret = get_errno(prctl(arg1, (unsigned long)name,
9793 arg3, arg4, arg5));
9794 unlock_user(name, arg2, 16);
9795 break;
9797 case PR_SET_NAME:
9799 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9800 if (!name) {
9801 goto efault;
9803 ret = get_errno(prctl(arg1, (unsigned long)name,
9804 arg3, arg4, arg5));
9805 unlock_user(name, arg2, 0);
9806 break;
9808 #endif
9809 default:
9810 /* Most prctl options have no pointer arguments */
9811 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9812 break;
9814 break;
9815 #ifdef TARGET_NR_arch_prctl
9816 case TARGET_NR_arch_prctl:
9817 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9818 ret = do_arch_prctl(cpu_env, arg1, arg2);
9819 break;
9820 #else
9821 goto unimplemented;
9822 #endif
9823 #endif
9824 #ifdef TARGET_NR_pread64
9825 case TARGET_NR_pread64:
9826 if (regpairs_aligned(cpu_env)) {
9827 arg4 = arg5;
9828 arg5 = arg6;
9830 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9831 goto efault;
9832 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9833 unlock_user(p, arg2, ret);
9834 break;
9835 case TARGET_NR_pwrite64:
9836 if (regpairs_aligned(cpu_env)) {
9837 arg4 = arg5;
9838 arg5 = arg6;
9840 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9841 goto efault;
9842 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9843 unlock_user(p, arg2, 0);
9844 break;
9845 #endif
9846 case TARGET_NR_getcwd:
9847 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9848 goto efault;
9849 ret = get_errno(sys_getcwd1(p, arg2));
9850 unlock_user(p, arg1, ret);
9851 break;
9852 case TARGET_NR_capget:
9853 case TARGET_NR_capset:
9855 struct target_user_cap_header *target_header;
9856 struct target_user_cap_data *target_data = NULL;
9857 struct __user_cap_header_struct header;
9858 struct __user_cap_data_struct data[2];
9859 struct __user_cap_data_struct *dataptr = NULL;
9860 int i, target_datalen;
9861 int data_items = 1;
9863 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9864 goto efault;
9866 header.version = tswap32(target_header->version);
9867 header.pid = tswap32(target_header->pid);
9869 if (header.version != _LINUX_CAPABILITY_VERSION) {
9870 /* Version 2 and up takes pointer to two user_data structs */
9871 data_items = 2;
9874 target_datalen = sizeof(*target_data) * data_items;
9876 if (arg2) {
9877 if (num == TARGET_NR_capget) {
9878 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9879 } else {
9880 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9882 if (!target_data) {
9883 unlock_user_struct(target_header, arg1, 0);
9884 goto efault;
9887 if (num == TARGET_NR_capset) {
9888 for (i = 0; i < data_items; i++) {
9889 data[i].effective = tswap32(target_data[i].effective);
9890 data[i].permitted = tswap32(target_data[i].permitted);
9891 data[i].inheritable = tswap32(target_data[i].inheritable);
9895 dataptr = data;
9898 if (num == TARGET_NR_capget) {
9899 ret = get_errno(capget(&header, dataptr));
9900 } else {
9901 ret = get_errno(capset(&header, dataptr));
9904 /* The kernel always updates version for both capget and capset */
9905 target_header->version = tswap32(header.version);
9906 unlock_user_struct(target_header, arg1, 1);
9908 if (arg2) {
9909 if (num == TARGET_NR_capget) {
9910 for (i = 0; i < data_items; i++) {
9911 target_data[i].effective = tswap32(data[i].effective);
9912 target_data[i].permitted = tswap32(data[i].permitted);
9913 target_data[i].inheritable = tswap32(data[i].inheritable);
9915 unlock_user(target_data, arg2, target_datalen);
9916 } else {
9917 unlock_user(target_data, arg2, 0);
9920 break;
9922 case TARGET_NR_sigaltstack:
9923 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9924 break;
9926 #ifdef CONFIG_SENDFILE
9927 case TARGET_NR_sendfile:
9929 off_t *offp = NULL;
9930 off_t off;
9931 if (arg3) {
9932 ret = get_user_sal(off, arg3);
9933 if (is_error(ret)) {
9934 break;
9936 offp = &off;
9938 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9939 if (!is_error(ret) && arg3) {
9940 abi_long ret2 = put_user_sal(off, arg3);
9941 if (is_error(ret2)) {
9942 ret = ret2;
9945 break;
9947 #ifdef TARGET_NR_sendfile64
9948 case TARGET_NR_sendfile64:
9950 off_t *offp = NULL;
9951 off_t off;
9952 if (arg3) {
9953 ret = get_user_s64(off, arg3);
9954 if (is_error(ret)) {
9955 break;
9957 offp = &off;
9959 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9960 if (!is_error(ret) && arg3) {
9961 abi_long ret2 = put_user_s64(off, arg3);
9962 if (is_error(ret2)) {
9963 ret = ret2;
9966 break;
9968 #endif
9969 #else
9970 case TARGET_NR_sendfile:
9971 #ifdef TARGET_NR_sendfile64
9972 case TARGET_NR_sendfile64:
9973 #endif
9974 goto unimplemented;
9975 #endif
9977 #ifdef TARGET_NR_getpmsg
9978 case TARGET_NR_getpmsg:
9979 goto unimplemented;
9980 #endif
9981 #ifdef TARGET_NR_putpmsg
9982 case TARGET_NR_putpmsg:
9983 goto unimplemented;
9984 #endif
9985 #ifdef TARGET_NR_vfork
9986 case TARGET_NR_vfork:
9987 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9988 0, 0, 0, 0));
9989 break;
9990 #endif
9991 #ifdef TARGET_NR_ugetrlimit
9992 case TARGET_NR_ugetrlimit:
9994 struct rlimit rlim;
9995 int resource = target_to_host_resource(arg1);
9996 ret = get_errno(getrlimit(resource, &rlim));
9997 if (!is_error(ret)) {
9998 struct target_rlimit *target_rlim;
9999 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10000 goto efault;
10001 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10002 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10003 unlock_user_struct(target_rlim, arg2, 1);
10005 break;
10007 #endif
10008 #ifdef TARGET_NR_truncate64
10009 case TARGET_NR_truncate64:
10010 if (!(p = lock_user_string(arg1)))
10011 goto efault;
10012 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10013 unlock_user(p, arg1, 0);
10014 break;
10015 #endif
10016 #ifdef TARGET_NR_ftruncate64
10017 case TARGET_NR_ftruncate64:
10018 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10019 break;
10020 #endif
10021 #ifdef TARGET_NR_stat64
10022 case TARGET_NR_stat64:
10023 if (!(p = lock_user_string(arg1)))
10024 goto efault;
10025 ret = get_errno(stat(path(p), &st));
10026 unlock_user(p, arg1, 0);
10027 if (!is_error(ret))
10028 ret = host_to_target_stat64(cpu_env, arg2, &st);
10029 break;
10030 #endif
10031 #ifdef TARGET_NR_lstat64
10032 case TARGET_NR_lstat64:
10033 if (!(p = lock_user_string(arg1)))
10034 goto efault;
10035 ret = get_errno(lstat(path(p), &st));
10036 unlock_user(p, arg1, 0);
10037 if (!is_error(ret))
10038 ret = host_to_target_stat64(cpu_env, arg2, &st);
10039 break;
10040 #endif
10041 #ifdef TARGET_NR_fstat64
10042 case TARGET_NR_fstat64:
10043 ret = get_errno(fstat(arg1, &st));
10044 if (!is_error(ret))
10045 ret = host_to_target_stat64(cpu_env, arg2, &st);
10046 break;
10047 #endif
10048 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10049 #ifdef TARGET_NR_fstatat64
10050 case TARGET_NR_fstatat64:
10051 #endif
10052 #ifdef TARGET_NR_newfstatat
10053 case TARGET_NR_newfstatat:
10054 #endif
10055 if (!(p = lock_user_string(arg2)))
10056 goto efault;
10057 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10058 if (!is_error(ret))
10059 ret = host_to_target_stat64(cpu_env, arg3, &st);
10060 break;
10061 #endif
10062 #ifdef TARGET_NR_lchown
10063 case TARGET_NR_lchown:
10064 if (!(p = lock_user_string(arg1)))
10065 goto efault;
10066 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10067 unlock_user(p, arg1, 0);
10068 break;
10069 #endif
10070 #ifdef TARGET_NR_getuid
10071 case TARGET_NR_getuid:
10072 ret = get_errno(high2lowuid(getuid()));
10073 break;
10074 #endif
10075 #ifdef TARGET_NR_getgid
10076 case TARGET_NR_getgid:
10077 ret = get_errno(high2lowgid(getgid()));
10078 break;
10079 #endif
10080 #ifdef TARGET_NR_geteuid
10081 case TARGET_NR_geteuid:
10082 ret = get_errno(high2lowuid(geteuid()));
10083 break;
10084 #endif
10085 #ifdef TARGET_NR_getegid
10086 case TARGET_NR_getegid:
10087 ret = get_errno(high2lowgid(getegid()));
10088 break;
10089 #endif
10090 case TARGET_NR_setreuid:
10091 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10092 break;
10093 case TARGET_NR_setregid:
10094 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10095 break;
10096 case TARGET_NR_getgroups:
10098 int gidsetsize = arg1;
10099 target_id *target_grouplist;
10100 gid_t *grouplist;
10101 int i;
10103 grouplist = alloca(gidsetsize * sizeof(gid_t));
10104 ret = get_errno(getgroups(gidsetsize, grouplist));
10105 if (gidsetsize == 0)
10106 break;
10107 if (!is_error(ret)) {
10108 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10109 if (!target_grouplist)
10110 goto efault;
10111 for(i = 0;i < ret; i++)
10112 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10113 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10116 break;
10117 case TARGET_NR_setgroups:
10119 int gidsetsize = arg1;
10120 target_id *target_grouplist;
10121 gid_t *grouplist = NULL;
10122 int i;
10123 if (gidsetsize) {
10124 grouplist = alloca(gidsetsize * sizeof(gid_t));
10125 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10126 if (!target_grouplist) {
10127 ret = -TARGET_EFAULT;
10128 goto fail;
10130 for (i = 0; i < gidsetsize; i++) {
10131 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10133 unlock_user(target_grouplist, arg2, 0);
10135 ret = get_errno(setgroups(gidsetsize, grouplist));
10137 break;
10138 case TARGET_NR_fchown:
10139 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10140 break;
10141 #if defined(TARGET_NR_fchownat)
10142 case TARGET_NR_fchownat:
10143 if (!(p = lock_user_string(arg2)))
10144 goto efault;
10145 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10146 low2highgid(arg4), arg5));
10147 unlock_user(p, arg2, 0);
10148 break;
10149 #endif
10150 #ifdef TARGET_NR_setresuid
10151 case TARGET_NR_setresuid:
10152 ret = get_errno(sys_setresuid(low2highuid(arg1),
10153 low2highuid(arg2),
10154 low2highuid(arg3)));
10155 break;
10156 #endif
10157 #ifdef TARGET_NR_getresuid
10158 case TARGET_NR_getresuid:
10160 uid_t ruid, euid, suid;
10161 ret = get_errno(getresuid(&ruid, &euid, &suid));
10162 if (!is_error(ret)) {
10163 if (put_user_id(high2lowuid(ruid), arg1)
10164 || put_user_id(high2lowuid(euid), arg2)
10165 || put_user_id(high2lowuid(suid), arg3))
10166 goto efault;
10169 break;
10170 #endif
10171 #ifdef TARGET_NR_getresgid
10172 case TARGET_NR_setresgid:
10173 ret = get_errno(sys_setresgid(low2highgid(arg1),
10174 low2highgid(arg2),
10175 low2highgid(arg3)));
10176 break;
10177 #endif
10178 #ifdef TARGET_NR_getresgid
10179 case TARGET_NR_getresgid:
10181 gid_t rgid, egid, sgid;
10182 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10183 if (!is_error(ret)) {
10184 if (put_user_id(high2lowgid(rgid), arg1)
10185 || put_user_id(high2lowgid(egid), arg2)
10186 || put_user_id(high2lowgid(sgid), arg3))
10187 goto efault;
10190 break;
10191 #endif
10192 #ifdef TARGET_NR_chown
10193 case TARGET_NR_chown:
10194 if (!(p = lock_user_string(arg1)))
10195 goto efault;
10196 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10197 unlock_user(p, arg1, 0);
10198 break;
10199 #endif
10200 case TARGET_NR_setuid:
10201 ret = get_errno(sys_setuid(low2highuid(arg1)));
10202 break;
10203 case TARGET_NR_setgid:
10204 ret = get_errno(sys_setgid(low2highgid(arg1)));
10205 break;
10206 case TARGET_NR_setfsuid:
10207 ret = get_errno(setfsuid(arg1));
10208 break;
10209 case TARGET_NR_setfsgid:
10210 ret = get_errno(setfsgid(arg1));
10211 break;
10213 #ifdef TARGET_NR_lchown32
10214 case TARGET_NR_lchown32:
10215 if (!(p = lock_user_string(arg1)))
10216 goto efault;
10217 ret = get_errno(lchown(p, arg2, arg3));
10218 unlock_user(p, arg1, 0);
10219 break;
10220 #endif
10221 #ifdef TARGET_NR_getuid32
10222 case TARGET_NR_getuid32:
10223 ret = get_errno(getuid());
10224 break;
10225 #endif
10227 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10228 /* Alpha specific */
10229 case TARGET_NR_getxuid:
10231 uid_t euid;
10232 euid=geteuid();
10233 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10235 ret = get_errno(getuid());
10236 break;
10237 #endif
10238 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10239 /* Alpha specific */
10240 case TARGET_NR_getxgid:
10242 uid_t egid;
10243 egid=getegid();
10244 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10246 ret = get_errno(getgid());
10247 break;
10248 #endif
10249 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10250 /* Alpha specific */
10251 case TARGET_NR_osf_getsysinfo:
10252 ret = -TARGET_EOPNOTSUPP;
10253 switch (arg1) {
10254 case TARGET_GSI_IEEE_FP_CONTROL:
10256 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10258 /* Copied from linux ieee_fpcr_to_swcr. */
10259 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10260 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10261 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10262 | SWCR_TRAP_ENABLE_DZE
10263 | SWCR_TRAP_ENABLE_OVF);
10264 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10265 | SWCR_TRAP_ENABLE_INE);
10266 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10267 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10269 if (put_user_u64 (swcr, arg2))
10270 goto efault;
10271 ret = 0;
10273 break;
10275 /* case GSI_IEEE_STATE_AT_SIGNAL:
10276 -- Not implemented in linux kernel.
10277 case GSI_UACPROC:
10278 -- Retrieves current unaligned access state; not much used.
10279 case GSI_PROC_TYPE:
10280 -- Retrieves implver information; surely not used.
10281 case GSI_GET_HWRPB:
10282 -- Grabs a copy of the HWRPB; surely not used.
10285 break;
10286 #endif
10287 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10288 /* Alpha specific */
10289 case TARGET_NR_osf_setsysinfo:
10290 ret = -TARGET_EOPNOTSUPP;
10291 switch (arg1) {
10292 case TARGET_SSI_IEEE_FP_CONTROL:
10294 uint64_t swcr, fpcr, orig_fpcr;
10296 if (get_user_u64 (swcr, arg2)) {
10297 goto efault;
10299 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10300 fpcr = orig_fpcr & FPCR_DYN_MASK;
10302 /* Copied from linux ieee_swcr_to_fpcr. */
10303 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10304 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10305 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10306 | SWCR_TRAP_ENABLE_DZE
10307 | SWCR_TRAP_ENABLE_OVF)) << 48;
10308 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10309 | SWCR_TRAP_ENABLE_INE)) << 57;
10310 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10311 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10313 cpu_alpha_store_fpcr(cpu_env, fpcr);
10314 ret = 0;
10316 break;
10318 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10320 uint64_t exc, fpcr, orig_fpcr;
10321 int si_code;
10323 if (get_user_u64(exc, arg2)) {
10324 goto efault;
10327 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10329 /* We only add to the exception status here. */
10330 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10332 cpu_alpha_store_fpcr(cpu_env, fpcr);
10333 ret = 0;
10335 /* Old exceptions are not signaled. */
10336 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10338 /* If any exceptions set by this call,
10339 and are unmasked, send a signal. */
10340 si_code = 0;
10341 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10342 si_code = TARGET_FPE_FLTRES;
10344 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10345 si_code = TARGET_FPE_FLTUND;
10347 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10348 si_code = TARGET_FPE_FLTOVF;
10350 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10351 si_code = TARGET_FPE_FLTDIV;
10353 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10354 si_code = TARGET_FPE_FLTINV;
10356 if (si_code != 0) {
10357 target_siginfo_t info;
10358 info.si_signo = SIGFPE;
10359 info.si_errno = 0;
10360 info.si_code = si_code;
10361 info._sifields._sigfault._addr
10362 = ((CPUArchState *)cpu_env)->pc;
10363 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10366 break;
10368 /* case SSI_NVPAIRS:
10369 -- Used with SSIN_UACPROC to enable unaligned accesses.
10370 case SSI_IEEE_STATE_AT_SIGNAL:
10371 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10372 -- Not implemented in linux kernel
10375 break;
10376 #endif
10377 #ifdef TARGET_NR_osf_sigprocmask
10378 /* Alpha specific. */
10379 case TARGET_NR_osf_sigprocmask:
10381 abi_ulong mask;
10382 int how;
10383 sigset_t set, oldset;
10385 switch(arg1) {
10386 case TARGET_SIG_BLOCK:
10387 how = SIG_BLOCK;
10388 break;
10389 case TARGET_SIG_UNBLOCK:
10390 how = SIG_UNBLOCK;
10391 break;
10392 case TARGET_SIG_SETMASK:
10393 how = SIG_SETMASK;
10394 break;
10395 default:
10396 ret = -TARGET_EINVAL;
10397 goto fail;
10399 mask = arg2;
10400 target_to_host_old_sigset(&set, &mask);
10401 ret = do_sigprocmask(how, &set, &oldset);
10402 if (!ret) {
10403 host_to_target_old_sigset(&mask, &oldset);
10404 ret = mask;
10407 break;
10408 #endif
10410 #ifdef TARGET_NR_getgid32
10411 case TARGET_NR_getgid32:
10412 ret = get_errno(getgid());
10413 break;
10414 #endif
10415 #ifdef TARGET_NR_geteuid32
10416 case TARGET_NR_geteuid32:
10417 ret = get_errno(geteuid());
10418 break;
10419 #endif
10420 #ifdef TARGET_NR_getegid32
10421 case TARGET_NR_getegid32:
10422 ret = get_errno(getegid());
10423 break;
10424 #endif
10425 #ifdef TARGET_NR_setreuid32
10426 case TARGET_NR_setreuid32:
10427 ret = get_errno(setreuid(arg1, arg2));
10428 break;
10429 #endif
10430 #ifdef TARGET_NR_setregid32
10431 case TARGET_NR_setregid32:
10432 ret = get_errno(setregid(arg1, arg2));
10433 break;
10434 #endif
10435 #ifdef TARGET_NR_getgroups32
10436 case TARGET_NR_getgroups32:
10438 int gidsetsize = arg1;
10439 uint32_t *target_grouplist;
10440 gid_t *grouplist;
10441 int i;
10443 grouplist = alloca(gidsetsize * sizeof(gid_t));
10444 ret = get_errno(getgroups(gidsetsize, grouplist));
10445 if (gidsetsize == 0)
10446 break;
10447 if (!is_error(ret)) {
10448 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10449 if (!target_grouplist) {
10450 ret = -TARGET_EFAULT;
10451 goto fail;
10453 for(i = 0;i < ret; i++)
10454 target_grouplist[i] = tswap32(grouplist[i]);
10455 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10458 break;
10459 #endif
10460 #ifdef TARGET_NR_setgroups32
10461 case TARGET_NR_setgroups32:
10463 int gidsetsize = arg1;
10464 uint32_t *target_grouplist;
10465 gid_t *grouplist;
10466 int i;
10468 grouplist = alloca(gidsetsize * sizeof(gid_t));
10469 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10470 if (!target_grouplist) {
10471 ret = -TARGET_EFAULT;
10472 goto fail;
10474 for(i = 0;i < gidsetsize; i++)
10475 grouplist[i] = tswap32(target_grouplist[i]);
10476 unlock_user(target_grouplist, arg2, 0);
10477 ret = get_errno(setgroups(gidsetsize, grouplist));
10479 break;
10480 #endif
10481 #ifdef TARGET_NR_fchown32
10482 case TARGET_NR_fchown32:
10483 ret = get_errno(fchown(arg1, arg2, arg3));
10484 break;
10485 #endif
10486 #ifdef TARGET_NR_setresuid32
10487 case TARGET_NR_setresuid32:
10488 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10489 break;
10490 #endif
10491 #ifdef TARGET_NR_getresuid32
10492 case TARGET_NR_getresuid32:
10494 uid_t ruid, euid, suid;
10495 ret = get_errno(getresuid(&ruid, &euid, &suid));
10496 if (!is_error(ret)) {
10497 if (put_user_u32(ruid, arg1)
10498 || put_user_u32(euid, arg2)
10499 || put_user_u32(suid, arg3))
10500 goto efault;
10503 break;
10504 #endif
10505 #ifdef TARGET_NR_setresgid32
10506 case TARGET_NR_setresgid32:
10507 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10508 break;
10509 #endif
10510 #ifdef TARGET_NR_getresgid32
10511 case TARGET_NR_getresgid32:
10513 gid_t rgid, egid, sgid;
10514 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10515 if (!is_error(ret)) {
10516 if (put_user_u32(rgid, arg1)
10517 || put_user_u32(egid, arg2)
10518 || put_user_u32(sgid, arg3))
10519 goto efault;
10522 break;
10523 #endif
10524 #ifdef TARGET_NR_chown32
10525 case TARGET_NR_chown32:
10526 if (!(p = lock_user_string(arg1)))
10527 goto efault;
10528 ret = get_errno(chown(p, arg2, arg3));
10529 unlock_user(p, arg1, 0);
10530 break;
10531 #endif
10532 #ifdef TARGET_NR_setuid32
10533 case TARGET_NR_setuid32:
10534 ret = get_errno(sys_setuid(arg1));
10535 break;
10536 #endif
10537 #ifdef TARGET_NR_setgid32
10538 case TARGET_NR_setgid32:
10539 ret = get_errno(sys_setgid(arg1));
10540 break;
10541 #endif
10542 #ifdef TARGET_NR_setfsuid32
10543 case TARGET_NR_setfsuid32:
10544 ret = get_errno(setfsuid(arg1));
10545 break;
10546 #endif
10547 #ifdef TARGET_NR_setfsgid32
10548 case TARGET_NR_setfsgid32:
10549 ret = get_errno(setfsgid(arg1));
10550 break;
10551 #endif
10553 case TARGET_NR_pivot_root:
10554 goto unimplemented;
10555 #ifdef TARGET_NR_mincore
10556 case TARGET_NR_mincore:
10558 void *a;
10559 ret = -TARGET_EFAULT;
10560 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10561 goto efault;
10562 if (!(p = lock_user_string(arg3)))
10563 goto mincore_fail;
10564 ret = get_errno(mincore(a, arg2, p));
10565 unlock_user(p, arg3, ret);
10566 mincore_fail:
10567 unlock_user(a, arg1, 0);
10569 break;
10570 #endif
10571 #ifdef TARGET_NR_arm_fadvise64_64
10572 case TARGET_NR_arm_fadvise64_64:
10573 /* arm_fadvise64_64 looks like fadvise64_64 but
10574 * with different argument order: fd, advice, offset, len
10575 * rather than the usual fd, offset, len, advice.
10576 * Note that offset and len are both 64-bit so appear as
10577 * pairs of 32-bit registers.
10579 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10580 target_offset64(arg5, arg6), arg2);
10581 ret = -host_to_target_errno(ret);
10582 break;
10583 #endif
10585 #if TARGET_ABI_BITS == 32
10587 #ifdef TARGET_NR_fadvise64_64
10588 case TARGET_NR_fadvise64_64:
10589 /* 6 args: fd, offset (high, low), len (high, low), advice */
10590 if (regpairs_aligned(cpu_env)) {
10591 /* offset is in (3,4), len in (5,6) and advice in 7 */
10592 arg2 = arg3;
10593 arg3 = arg4;
10594 arg4 = arg5;
10595 arg5 = arg6;
10596 arg6 = arg7;
10598 ret = -host_to_target_errno(posix_fadvise(arg1,
10599 target_offset64(arg2, arg3),
10600 target_offset64(arg4, arg5),
10601 arg6));
10602 break;
10603 #endif
10605 #ifdef TARGET_NR_fadvise64
10606 case TARGET_NR_fadvise64:
10607 /* 5 args: fd, offset (high, low), len, advice */
10608 if (regpairs_aligned(cpu_env)) {
10609 /* offset is in (3,4), len in 5 and advice in 6 */
10610 arg2 = arg3;
10611 arg3 = arg4;
10612 arg4 = arg5;
10613 arg5 = arg6;
10615 ret = -host_to_target_errno(posix_fadvise(arg1,
10616 target_offset64(arg2, arg3),
10617 arg4, arg5));
10618 break;
10619 #endif
10621 #else /* not a 32-bit ABI */
10622 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10623 #ifdef TARGET_NR_fadvise64_64
10624 case TARGET_NR_fadvise64_64:
10625 #endif
10626 #ifdef TARGET_NR_fadvise64
10627 case TARGET_NR_fadvise64:
10628 #endif
10629 #ifdef TARGET_S390X
10630 switch (arg4) {
10631 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10632 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10633 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10634 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10635 default: break;
10637 #endif
10638 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10639 break;
10640 #endif
10641 #endif /* end of 64-bit ABI fadvise handling */
10643 #ifdef TARGET_NR_madvise
10644 case TARGET_NR_madvise:
10645 /* A straight passthrough may not be safe because qemu sometimes
10646 turns private file-backed mappings into anonymous mappings.
10647 This will break MADV_DONTNEED.
10648 This is a hint, so ignoring and returning success is ok. */
10649 ret = get_errno(0);
10650 break;
10651 #endif
10652 #if TARGET_ABI_BITS == 32
10653 case TARGET_NR_fcntl64:
10655 int cmd;
10656 struct flock64 fl;
10657 from_flock64_fn *copyfrom = copy_from_user_flock64;
10658 to_flock64_fn *copyto = copy_to_user_flock64;
10660 #ifdef TARGET_ARM
10661 if (((CPUARMState *)cpu_env)->eabi) {
10662 copyfrom = copy_from_user_eabi_flock64;
10663 copyto = copy_to_user_eabi_flock64;
10665 #endif
10667 cmd = target_to_host_fcntl_cmd(arg2);
10668 if (cmd == -TARGET_EINVAL) {
10669 ret = cmd;
10670 break;
10673 switch(arg2) {
10674 case TARGET_F_GETLK64:
10675 ret = copyfrom(&fl, arg3);
10676 if (ret) {
10677 break;
10679 ret = get_errno(fcntl(arg1, cmd, &fl));
10680 if (ret == 0) {
10681 ret = copyto(arg3, &fl);
10683 break;
10685 case TARGET_F_SETLK64:
10686 case TARGET_F_SETLKW64:
10687 ret = copyfrom(&fl, arg3);
10688 if (ret) {
10689 break;
10691 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10692 break;
10693 default:
10694 ret = do_fcntl(arg1, arg2, arg3);
10695 break;
10697 break;
10699 #endif
10700 #ifdef TARGET_NR_cacheflush
10701 case TARGET_NR_cacheflush:
10702 /* self-modifying code is handled automatically, so nothing needed */
10703 ret = 0;
10704 break;
10705 #endif
10706 #ifdef TARGET_NR_security
10707 case TARGET_NR_security:
10708 goto unimplemented;
10709 #endif
10710 #ifdef TARGET_NR_getpagesize
10711 case TARGET_NR_getpagesize:
10712 ret = TARGET_PAGE_SIZE;
10713 break;
10714 #endif
10715 case TARGET_NR_gettid:
10716 ret = get_errno(gettid());
10717 break;
10718 #ifdef TARGET_NR_readahead
10719 case TARGET_NR_readahead:
10720 #if TARGET_ABI_BITS == 32
10721 if (regpairs_aligned(cpu_env)) {
10722 arg2 = arg3;
10723 arg3 = arg4;
10724 arg4 = arg5;
10726 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10727 #else
10728 ret = get_errno(readahead(arg1, arg2, arg3));
10729 #endif
10730 break;
10731 #endif
10732 #ifdef CONFIG_ATTR
10733 #ifdef TARGET_NR_setxattr
10734 case TARGET_NR_listxattr:
10735 case TARGET_NR_llistxattr:
10737 void *p, *b = 0;
10738 if (arg2) {
10739 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10740 if (!b) {
10741 ret = -TARGET_EFAULT;
10742 break;
10745 p = lock_user_string(arg1);
10746 if (p) {
10747 if (num == TARGET_NR_listxattr) {
10748 ret = get_errno(listxattr(p, b, arg3));
10749 } else {
10750 ret = get_errno(llistxattr(p, b, arg3));
10752 } else {
10753 ret = -TARGET_EFAULT;
10755 unlock_user(p, arg1, 0);
10756 unlock_user(b, arg2, arg3);
10757 break;
10759 case TARGET_NR_flistxattr:
10761 void *b = 0;
10762 if (arg2) {
10763 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10764 if (!b) {
10765 ret = -TARGET_EFAULT;
10766 break;
10769 ret = get_errno(flistxattr(arg1, b, arg3));
10770 unlock_user(b, arg2, arg3);
10771 break;
10773 case TARGET_NR_setxattr:
10774 case TARGET_NR_lsetxattr:
10776 void *p, *n, *v = 0;
10777 if (arg3) {
10778 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10779 if (!v) {
10780 ret = -TARGET_EFAULT;
10781 break;
10784 p = lock_user_string(arg1);
10785 n = lock_user_string(arg2);
10786 if (p && n) {
10787 if (num == TARGET_NR_setxattr) {
10788 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10789 } else {
10790 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10792 } else {
10793 ret = -TARGET_EFAULT;
10795 unlock_user(p, arg1, 0);
10796 unlock_user(n, arg2, 0);
10797 unlock_user(v, arg3, 0);
10799 break;
10800 case TARGET_NR_fsetxattr:
10802 void *n, *v = 0;
10803 if (arg3) {
10804 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10805 if (!v) {
10806 ret = -TARGET_EFAULT;
10807 break;
10810 n = lock_user_string(arg2);
10811 if (n) {
10812 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10813 } else {
10814 ret = -TARGET_EFAULT;
10816 unlock_user(n, arg2, 0);
10817 unlock_user(v, arg3, 0);
10819 break;
10820 case TARGET_NR_getxattr:
10821 case TARGET_NR_lgetxattr:
10823 void *p, *n, *v = 0;
10824 if (arg3) {
10825 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10826 if (!v) {
10827 ret = -TARGET_EFAULT;
10828 break;
10831 p = lock_user_string(arg1);
10832 n = lock_user_string(arg2);
10833 if (p && n) {
10834 if (num == TARGET_NR_getxattr) {
10835 ret = get_errno(getxattr(p, n, v, arg4));
10836 } else {
10837 ret = get_errno(lgetxattr(p, n, v, arg4));
10839 } else {
10840 ret = -TARGET_EFAULT;
10842 unlock_user(p, arg1, 0);
10843 unlock_user(n, arg2, 0);
10844 unlock_user(v, arg3, arg4);
10846 break;
10847 case TARGET_NR_fgetxattr:
10849 void *n, *v = 0;
10850 if (arg3) {
10851 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10852 if (!v) {
10853 ret = -TARGET_EFAULT;
10854 break;
10857 n = lock_user_string(arg2);
10858 if (n) {
10859 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10860 } else {
10861 ret = -TARGET_EFAULT;
10863 unlock_user(n, arg2, 0);
10864 unlock_user(v, arg3, arg4);
10866 break;
10867 case TARGET_NR_removexattr:
10868 case TARGET_NR_lremovexattr:
10870 void *p, *n;
10871 p = lock_user_string(arg1);
10872 n = lock_user_string(arg2);
10873 if (p && n) {
10874 if (num == TARGET_NR_removexattr) {
10875 ret = get_errno(removexattr(p, n));
10876 } else {
10877 ret = get_errno(lremovexattr(p, n));
10879 } else {
10880 ret = -TARGET_EFAULT;
10882 unlock_user(p, arg1, 0);
10883 unlock_user(n, arg2, 0);
10885 break;
10886 case TARGET_NR_fremovexattr:
10888 void *n;
10889 n = lock_user_string(arg2);
10890 if (n) {
10891 ret = get_errno(fremovexattr(arg1, n));
10892 } else {
10893 ret = -TARGET_EFAULT;
10895 unlock_user(n, arg2, 0);
10897 break;
10898 #endif
10899 #endif /* CONFIG_ATTR */
10900 #ifdef TARGET_NR_set_thread_area
10901 case TARGET_NR_set_thread_area:
10902 #if defined(TARGET_MIPS)
10903 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10904 ret = 0;
10905 break;
10906 #elif defined(TARGET_CRIS)
10907 if (arg1 & 0xff)
10908 ret = -TARGET_EINVAL;
10909 else {
10910 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10911 ret = 0;
10913 break;
10914 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10915 ret = do_set_thread_area(cpu_env, arg1);
10916 break;
10917 #elif defined(TARGET_M68K)
10919 TaskState *ts = cpu->opaque;
10920 ts->tp_value = arg1;
10921 ret = 0;
10922 break;
10924 #else
10925 goto unimplemented_nowarn;
10926 #endif
10927 #endif
10928 #ifdef TARGET_NR_get_thread_area
10929 case TARGET_NR_get_thread_area:
10930 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10931 ret = do_get_thread_area(cpu_env, arg1);
10932 break;
10933 #elif defined(TARGET_M68K)
10935 TaskState *ts = cpu->opaque;
10936 ret = ts->tp_value;
10937 break;
10939 #else
10940 goto unimplemented_nowarn;
10941 #endif
10942 #endif
10943 #ifdef TARGET_NR_getdomainname
10944 case TARGET_NR_getdomainname:
10945 goto unimplemented_nowarn;
10946 #endif
10948 #ifdef TARGET_NR_clock_gettime
10949 case TARGET_NR_clock_gettime:
10951 struct timespec ts;
10952 ret = get_errno(clock_gettime(arg1, &ts));
10953 if (!is_error(ret)) {
10954 host_to_target_timespec(arg2, &ts);
10956 break;
10958 #endif
10959 #ifdef TARGET_NR_clock_getres
10960 case TARGET_NR_clock_getres:
10962 struct timespec ts;
10963 ret = get_errno(clock_getres(arg1, &ts));
10964 if (!is_error(ret)) {
10965 host_to_target_timespec(arg2, &ts);
10967 break;
10969 #endif
10970 #ifdef TARGET_NR_clock_nanosleep
10971 case TARGET_NR_clock_nanosleep:
10973 struct timespec ts;
10974 target_to_host_timespec(&ts, arg3);
10975 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10976 &ts, arg4 ? &ts : NULL));
10977 if (arg4)
10978 host_to_target_timespec(arg4, &ts);
10980 #if defined(TARGET_PPC)
10981 /* clock_nanosleep is odd in that it returns positive errno values.
10982 * On PPC, CR0 bit 3 should be set in such a situation. */
10983 if (ret && ret != -TARGET_ERESTARTSYS) {
10984 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10986 #endif
10987 break;
10989 #endif
10991 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10992 case TARGET_NR_set_tid_address:
10993 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10994 break;
10995 #endif
10997 case TARGET_NR_tkill:
10998 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10999 break;
11001 case TARGET_NR_tgkill:
11002 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11003 target_to_host_signal(arg3)));
11004 break;
11006 #ifdef TARGET_NR_set_robust_list
11007 case TARGET_NR_set_robust_list:
11008 case TARGET_NR_get_robust_list:
11009 /* The ABI for supporting robust futexes has userspace pass
11010 * the kernel a pointer to a linked list which is updated by
11011 * userspace after the syscall; the list is walked by the kernel
11012 * when the thread exits. Since the linked list in QEMU guest
11013 * memory isn't a valid linked list for the host and we have
11014 * no way to reliably intercept the thread-death event, we can't
11015 * support these. Silently return ENOSYS so that guest userspace
11016 * falls back to a non-robust futex implementation (which should
11017 * be OK except in the corner case of the guest crashing while
11018 * holding a mutex that is shared with another process via
11019 * shared memory).
11021 goto unimplemented_nowarn;
11022 #endif
11024 #if defined(TARGET_NR_utimensat)
11025 case TARGET_NR_utimensat:
11027 struct timespec *tsp, ts[2];
11028 if (!arg3) {
11029 tsp = NULL;
11030 } else {
11031 target_to_host_timespec(ts, arg3);
11032 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11033 tsp = ts;
11035 if (!arg2)
11036 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11037 else {
11038 if (!(p = lock_user_string(arg2))) {
11039 ret = -TARGET_EFAULT;
11040 goto fail;
11042 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11043 unlock_user(p, arg2, 0);
11046 break;
11047 #endif
11048 case TARGET_NR_futex:
11049 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11050 break;
11051 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11052 case TARGET_NR_inotify_init:
11053 ret = get_errno(sys_inotify_init());
11054 break;
11055 #endif
11056 #ifdef CONFIG_INOTIFY1
11057 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11058 case TARGET_NR_inotify_init1:
11059 ret = get_errno(sys_inotify_init1(arg1));
11060 break;
11061 #endif
11062 #endif
11063 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11064 case TARGET_NR_inotify_add_watch:
11065 p = lock_user_string(arg2);
11066 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11067 unlock_user(p, arg2, 0);
11068 break;
11069 #endif
11070 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11071 case TARGET_NR_inotify_rm_watch:
11072 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11073 break;
11074 #endif
11076 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11077 case TARGET_NR_mq_open:
11079 struct mq_attr posix_mq_attr, *attrp;
11081 p = lock_user_string(arg1 - 1);
11082 if (arg4 != 0) {
11083 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11084 attrp = &posix_mq_attr;
11085 } else {
11086 attrp = 0;
11088 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11089 unlock_user (p, arg1, 0);
11091 break;
11093 case TARGET_NR_mq_unlink:
11094 p = lock_user_string(arg1 - 1);
11095 ret = get_errno(mq_unlink(p));
11096 unlock_user (p, arg1, 0);
11097 break;
11099 case TARGET_NR_mq_timedsend:
11101 struct timespec ts;
11103 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11104 if (arg5 != 0) {
11105 target_to_host_timespec(&ts, arg5);
11106 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11107 host_to_target_timespec(arg5, &ts);
11108 } else {
11109 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11111 unlock_user (p, arg2, arg3);
11113 break;
11115 case TARGET_NR_mq_timedreceive:
11117 struct timespec ts;
11118 unsigned int prio;
11120 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11121 if (arg5 != 0) {
11122 target_to_host_timespec(&ts, arg5);
11123 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11124 &prio, &ts));
11125 host_to_target_timespec(arg5, &ts);
11126 } else {
11127 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11128 &prio, NULL));
11130 unlock_user (p, arg2, arg3);
11131 if (arg4 != 0)
11132 put_user_u32(prio, arg4);
11134 break;
11136 /* Not implemented for now... */
11137 /* case TARGET_NR_mq_notify: */
11138 /* break; */
11140 case TARGET_NR_mq_getsetattr:
11142 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11143 ret = 0;
11144 if (arg3 != 0) {
11145 ret = mq_getattr(arg1, &posix_mq_attr_out);
11146 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11148 if (arg2 != 0) {
11149 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11150 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11154 break;
11155 #endif
11157 #ifdef CONFIG_SPLICE
11158 #ifdef TARGET_NR_tee
11159 case TARGET_NR_tee:
11161 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11163 break;
11164 #endif
11165 #ifdef TARGET_NR_splice
11166 case TARGET_NR_splice:
11168 loff_t loff_in, loff_out;
11169 loff_t *ploff_in = NULL, *ploff_out = NULL;
11170 if (arg2) {
11171 if (get_user_u64(loff_in, arg2)) {
11172 goto efault;
11174 ploff_in = &loff_in;
11176 if (arg4) {
11177 if (get_user_u64(loff_out, arg4)) {
11178 goto efault;
11180 ploff_out = &loff_out;
11182 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11183 if (arg2) {
11184 if (put_user_u64(loff_in, arg2)) {
11185 goto efault;
11188 if (arg4) {
11189 if (put_user_u64(loff_out, arg4)) {
11190 goto efault;
11194 break;
11195 #endif
11196 #ifdef TARGET_NR_vmsplice
11197 case TARGET_NR_vmsplice:
11199 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11200 if (vec != NULL) {
11201 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11202 unlock_iovec(vec, arg2, arg3, 0);
11203 } else {
11204 ret = -host_to_target_errno(errno);
11207 break;
11208 #endif
11209 #endif /* CONFIG_SPLICE */
11210 #ifdef CONFIG_EVENTFD
11211 #if defined(TARGET_NR_eventfd)
11212 case TARGET_NR_eventfd:
11213 ret = get_errno(eventfd(arg1, 0));
11214 fd_trans_unregister(ret);
11215 break;
11216 #endif
11217 #if defined(TARGET_NR_eventfd2)
11218 case TARGET_NR_eventfd2:
11220 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11221 if (arg2 & TARGET_O_NONBLOCK) {
11222 host_flags |= O_NONBLOCK;
11224 if (arg2 & TARGET_O_CLOEXEC) {
11225 host_flags |= O_CLOEXEC;
11227 ret = get_errno(eventfd(arg1, host_flags));
11228 fd_trans_unregister(ret);
11229 break;
11231 #endif
11232 #endif /* CONFIG_EVENTFD */
11233 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11234 case TARGET_NR_fallocate:
11235 #if TARGET_ABI_BITS == 32
11236 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11237 target_offset64(arg5, arg6)));
11238 #else
11239 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11240 #endif
11241 break;
11242 #endif
11243 #if defined(CONFIG_SYNC_FILE_RANGE)
11244 #if defined(TARGET_NR_sync_file_range)
11245 case TARGET_NR_sync_file_range:
11246 #if TARGET_ABI_BITS == 32
11247 #if defined(TARGET_MIPS)
11248 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11249 target_offset64(arg5, arg6), arg7));
11250 #else
11251 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11252 target_offset64(arg4, arg5), arg6));
11253 #endif /* !TARGET_MIPS */
11254 #else
11255 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11256 #endif
11257 break;
11258 #endif
11259 #if defined(TARGET_NR_sync_file_range2)
11260 case TARGET_NR_sync_file_range2:
11261 /* This is like sync_file_range but the arguments are reordered */
11262 #if TARGET_ABI_BITS == 32
11263 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11264 target_offset64(arg5, arg6), arg2));
11265 #else
11266 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11267 #endif
11268 break;
11269 #endif
11270 #endif
11271 #if defined(TARGET_NR_signalfd4)
11272 case TARGET_NR_signalfd4:
11273 ret = do_signalfd4(arg1, arg2, arg4);
11274 break;
11275 #endif
11276 #if defined(TARGET_NR_signalfd)
11277 case TARGET_NR_signalfd:
11278 ret = do_signalfd4(arg1, arg2, 0);
11279 break;
11280 #endif
11281 #if defined(CONFIG_EPOLL)
11282 #if defined(TARGET_NR_epoll_create)
11283 case TARGET_NR_epoll_create:
11284 ret = get_errno(epoll_create(arg1));
11285 break;
11286 #endif
11287 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11288 case TARGET_NR_epoll_create1:
11289 ret = get_errno(epoll_create1(arg1));
11290 break;
11291 #endif
11292 #if defined(TARGET_NR_epoll_ctl)
11293 case TARGET_NR_epoll_ctl:
11295 struct epoll_event ep;
11296 struct epoll_event *epp = 0;
11297 if (arg4) {
11298 struct target_epoll_event *target_ep;
11299 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11300 goto efault;
11302 ep.events = tswap32(target_ep->events);
11303 /* The epoll_data_t union is just opaque data to the kernel,
11304 * so we transfer all 64 bits across and need not worry what
11305 * actual data type it is.
11307 ep.data.u64 = tswap64(target_ep->data.u64);
11308 unlock_user_struct(target_ep, arg4, 0);
11309 epp = &ep;
11311 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11312 break;
11314 #endif
11316 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11317 #if defined(TARGET_NR_epoll_wait)
11318 case TARGET_NR_epoll_wait:
11319 #endif
11320 #if defined(TARGET_NR_epoll_pwait)
11321 case TARGET_NR_epoll_pwait:
11322 #endif
11324 struct target_epoll_event *target_ep;
11325 struct epoll_event *ep;
11326 int epfd = arg1;
11327 int maxevents = arg3;
11328 int timeout = arg4;
11330 target_ep = lock_user(VERIFY_WRITE, arg2,
11331 maxevents * sizeof(struct target_epoll_event), 1);
11332 if (!target_ep) {
11333 goto efault;
11336 ep = alloca(maxevents * sizeof(struct epoll_event));
11338 switch (num) {
11339 #if defined(TARGET_NR_epoll_pwait)
11340 case TARGET_NR_epoll_pwait:
11342 target_sigset_t *target_set;
11343 sigset_t _set, *set = &_set;
11345 if (arg5) {
11346 if (arg6 != sizeof(target_sigset_t)) {
11347 ret = -TARGET_EINVAL;
11348 break;
11351 target_set = lock_user(VERIFY_READ, arg5,
11352 sizeof(target_sigset_t), 1);
11353 if (!target_set) {
11354 unlock_user(target_ep, arg2, 0);
11355 goto efault;
11357 target_to_host_sigset(set, target_set);
11358 unlock_user(target_set, arg5, 0);
11359 } else {
11360 set = NULL;
11363 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11364 set, SIGSET_T_SIZE));
11365 break;
11367 #endif
11368 #if defined(TARGET_NR_epoll_wait)
11369 case TARGET_NR_epoll_wait:
11370 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11371 NULL, 0));
11372 break;
11373 #endif
11374 default:
11375 ret = -TARGET_ENOSYS;
11377 if (!is_error(ret)) {
11378 int i;
11379 for (i = 0; i < ret; i++) {
11380 target_ep[i].events = tswap32(ep[i].events);
11381 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11384 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11385 break;
11387 #endif
11388 #endif
11389 #ifdef TARGET_NR_prlimit64
11390 case TARGET_NR_prlimit64:
11392 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11393 struct target_rlimit64 *target_rnew, *target_rold;
11394 struct host_rlimit64 rnew, rold, *rnewp = 0;
11395 int resource = target_to_host_resource(arg2);
11396 if (arg3) {
11397 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11398 goto efault;
11400 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11401 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11402 unlock_user_struct(target_rnew, arg3, 0);
11403 rnewp = &rnew;
11406 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11407 if (!is_error(ret) && arg4) {
11408 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11409 goto efault;
11411 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11412 target_rold->rlim_max = tswap64(rold.rlim_max);
11413 unlock_user_struct(target_rold, arg4, 1);
11415 break;
11417 #endif
11418 #ifdef TARGET_NR_gethostname
11419 case TARGET_NR_gethostname:
11421 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11422 if (name) {
11423 ret = get_errno(gethostname(name, arg2));
11424 unlock_user(name, arg1, arg2);
11425 } else {
11426 ret = -TARGET_EFAULT;
11428 break;
11430 #endif
11431 #ifdef TARGET_NR_atomic_cmpxchg_32
11432 case TARGET_NR_atomic_cmpxchg_32:
11434 /* should use start_exclusive from main.c */
11435 abi_ulong mem_value;
11436 if (get_user_u32(mem_value, arg6)) {
11437 target_siginfo_t info;
11438 info.si_signo = SIGSEGV;
11439 info.si_errno = 0;
11440 info.si_code = TARGET_SEGV_MAPERR;
11441 info._sifields._sigfault._addr = arg6;
11442 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11443 ret = 0xdeadbeef;
11446 if (mem_value == arg2)
11447 put_user_u32(arg1, arg6);
11448 ret = mem_value;
11449 break;
11451 #endif
11452 #ifdef TARGET_NR_atomic_barrier
11453 case TARGET_NR_atomic_barrier:
11455 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11456 ret = 0;
11457 break;
11459 #endif
11461 #ifdef TARGET_NR_timer_create
11462 case TARGET_NR_timer_create:
11464 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11466 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11468 int clkid = arg1;
11469 int timer_index = next_free_host_timer();
11471 if (timer_index < 0) {
11472 ret = -TARGET_EAGAIN;
11473 } else {
11474 timer_t *phtimer = g_posix_timers + timer_index;
11476 if (arg2) {
11477 phost_sevp = &host_sevp;
11478 ret = target_to_host_sigevent(phost_sevp, arg2);
11479 if (ret != 0) {
11480 break;
11484 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11485 if (ret) {
11486 phtimer = NULL;
11487 } else {
11488 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11489 goto efault;
11493 break;
11495 #endif
11497 #ifdef TARGET_NR_timer_settime
11498 case TARGET_NR_timer_settime:
11500 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11501 * struct itimerspec * old_value */
11502 target_timer_t timerid = get_timer_id(arg1);
11504 if (timerid < 0) {
11505 ret = timerid;
11506 } else if (arg3 == 0) {
11507 ret = -TARGET_EINVAL;
11508 } else {
11509 timer_t htimer = g_posix_timers[timerid];
11510 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11512 target_to_host_itimerspec(&hspec_new, arg3);
11513 ret = get_errno(
11514 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11515 host_to_target_itimerspec(arg2, &hspec_old);
11517 break;
11519 #endif
11521 #ifdef TARGET_NR_timer_gettime
11522 case TARGET_NR_timer_gettime:
11524 /* args: timer_t timerid, struct itimerspec *curr_value */
11525 target_timer_t timerid = get_timer_id(arg1);
11527 if (timerid < 0) {
11528 ret = timerid;
11529 } else if (!arg2) {
11530 ret = -TARGET_EFAULT;
11531 } else {
11532 timer_t htimer = g_posix_timers[timerid];
11533 struct itimerspec hspec;
11534 ret = get_errno(timer_gettime(htimer, &hspec));
11536 if (host_to_target_itimerspec(arg2, &hspec)) {
11537 ret = -TARGET_EFAULT;
11540 break;
11542 #endif
11544 #ifdef TARGET_NR_timer_getoverrun
11545 case TARGET_NR_timer_getoverrun:
11547 /* args: timer_t timerid */
11548 target_timer_t timerid = get_timer_id(arg1);
11550 if (timerid < 0) {
11551 ret = timerid;
11552 } else {
11553 timer_t htimer = g_posix_timers[timerid];
11554 ret = get_errno(timer_getoverrun(htimer));
11556 fd_trans_unregister(ret);
11557 break;
11559 #endif
11561 #ifdef TARGET_NR_timer_delete
11562 case TARGET_NR_timer_delete:
11564 /* args: timer_t timerid */
11565 target_timer_t timerid = get_timer_id(arg1);
11567 if (timerid < 0) {
11568 ret = timerid;
11569 } else {
11570 timer_t htimer = g_posix_timers[timerid];
11571 ret = get_errno(timer_delete(htimer));
11572 g_posix_timers[timerid] = 0;
11574 break;
11576 #endif
11578 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11579 case TARGET_NR_timerfd_create:
11580 ret = get_errno(timerfd_create(arg1,
11581 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11582 break;
11583 #endif
11585 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11586 case TARGET_NR_timerfd_gettime:
11588 struct itimerspec its_curr;
11590 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11592 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11593 goto efault;
11596 break;
11597 #endif
11599 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11600 case TARGET_NR_timerfd_settime:
11602 struct itimerspec its_new, its_old, *p_new;
11604 if (arg3) {
11605 if (target_to_host_itimerspec(&its_new, arg3)) {
11606 goto efault;
11608 p_new = &its_new;
11609 } else {
11610 p_new = NULL;
11613 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11615 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11616 goto efault;
11619 break;
11620 #endif
11622 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11623 case TARGET_NR_ioprio_get:
11624 ret = get_errno(ioprio_get(arg1, arg2));
11625 break;
11626 #endif
11628 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11629 case TARGET_NR_ioprio_set:
11630 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11631 break;
11632 #endif
11634 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11635 case TARGET_NR_setns:
11636 ret = get_errno(setns(arg1, arg2));
11637 break;
11638 #endif
11639 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11640 case TARGET_NR_unshare:
11641 ret = get_errno(unshare(arg1));
11642 break;
11643 #endif
11645 default:
11646 unimplemented:
11647 gemu_log("qemu: Unsupported syscall: %d\n", num);
11648 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11649 unimplemented_nowarn:
11650 #endif
11651 ret = -TARGET_ENOSYS;
11652 break;
11654 fail:
11655 #ifdef DEBUG
11656 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11657 #endif
11658 if(do_strace)
11659 print_syscall_ret(num, ret);
11660 trace_guest_user_syscall_ret(cpu, num, ret);
11661 return ret;
11662 efault:
11663 ret = -TARGET_EFAULT;
11664 goto fail;