xen: handle inbound migration of VMs without ioreq server pages
[qemu.git] / linux-user / syscall.c
blob833f853200e269f9edfdf43a8744254d2eaf8016
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
118 //#define DEBUG
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 defined(__s390x__)
197 #define __NR__llseek __NR_lseek
198 #endif
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
203 #endif
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
213 #endif
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
216 #endif
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group,int,error_code)
229 #endif
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address,int *,tidptr)
232 #endif
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
235 const struct timespec *,timeout,int *,uaddr2,int,val3)
236 #endif
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
239 unsigned long *, user_mask_ptr);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
244 void *, arg);
245 _syscall2(int, capget, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 _syscall2(int, capset, struct __user_cap_header_struct *, header,
248 struct __user_cap_data_struct *, data);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get, int, which, int, who)
251 #endif
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
254 #endif
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
257 #endif
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
275 #endif
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
278 #endif
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
281 #endif
282 #if defined(O_PATH)
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
284 #endif
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
288 #endif
289 { 0, 0, 0, 0 }
292 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
293 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
294 typedef struct TargetFdTrans {
295 TargetFdDataFunc host_to_target_data;
296 TargetFdDataFunc target_to_host_data;
297 TargetFdAddrFunc target_to_host_addr;
298 } TargetFdTrans;
300 static TargetFdTrans **target_fd_trans;
302 static unsigned int target_fd_max;
304 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
306 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
307 return target_fd_trans[fd]->target_to_host_data;
309 return NULL;
312 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
314 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
315 return target_fd_trans[fd]->host_to_target_data;
317 return NULL;
320 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
322 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
323 return target_fd_trans[fd]->target_to_host_addr;
325 return NULL;
328 static void fd_trans_register(int fd, TargetFdTrans *trans)
330 unsigned int oldmax;
332 if (fd >= target_fd_max) {
333 oldmax = target_fd_max;
334 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
335 target_fd_trans = g_renew(TargetFdTrans *,
336 target_fd_trans, target_fd_max);
337 memset((void *)(target_fd_trans + oldmax), 0,
338 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
340 target_fd_trans[fd] = trans;
343 static void fd_trans_unregister(int fd)
345 if (fd >= 0 && fd < target_fd_max) {
346 target_fd_trans[fd] = NULL;
350 static void fd_trans_dup(int oldfd, int newfd)
352 fd_trans_unregister(newfd);
353 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
354 fd_trans_register(newfd, target_fd_trans[oldfd]);
358 static int sys_getcwd1(char *buf, size_t size)
360 if (getcwd(buf, size) == NULL) {
361 /* getcwd() sets errno */
362 return (-1);
364 return strlen(buf)+1;
367 #ifdef TARGET_NR_utimensat
368 #ifdef CONFIG_UTIMENSAT
369 static int sys_utimensat(int dirfd, const char *pathname,
370 const struct timespec times[2], int flags)
372 if (pathname == NULL)
373 return futimens(dirfd, times);
374 else
375 return utimensat(dirfd, pathname, times, flags);
377 #elif defined(__NR_utimensat)
378 #define __NR_sys_utimensat __NR_utimensat
379 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
380 const struct timespec *,tsp,int,flags)
381 #else
382 static int sys_utimensat(int dirfd, const char *pathname,
383 const struct timespec times[2], int flags)
385 errno = ENOSYS;
386 return -1;
388 #endif
389 #endif /* TARGET_NR_utimensat */
391 #ifdef CONFIG_INOTIFY
392 #include <sys/inotify.h>
394 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
395 static int sys_inotify_init(void)
397 return (inotify_init());
399 #endif
400 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
401 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
403 return (inotify_add_watch(fd, pathname, mask));
405 #endif
406 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
407 static int sys_inotify_rm_watch(int fd, int32_t wd)
409 return (inotify_rm_watch(fd, wd));
411 #endif
412 #ifdef CONFIG_INOTIFY1
413 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
414 static int sys_inotify_init1(int flags)
416 return (inotify_init1(flags));
418 #endif
419 #endif
420 #else
421 /* Userspace can usually survive runtime without inotify */
422 #undef TARGET_NR_inotify_init
423 #undef TARGET_NR_inotify_init1
424 #undef TARGET_NR_inotify_add_watch
425 #undef TARGET_NR_inotify_rm_watch
426 #endif /* CONFIG_INOTIFY */
428 #if defined(TARGET_NR_prlimit64)
429 #ifndef __NR_prlimit64
430 # define __NR_prlimit64 -1
431 #endif
432 #define __NR_sys_prlimit64 __NR_prlimit64
433 /* The glibc rlimit structure may not be that used by the underlying syscall */
434 struct host_rlimit64 {
435 uint64_t rlim_cur;
436 uint64_t rlim_max;
438 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
439 const struct host_rlimit64 *, new_limit,
440 struct host_rlimit64 *, old_limit)
441 #endif
444 #if defined(TARGET_NR_timer_create)
445 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
446 static timer_t g_posix_timers[32] = { 0, } ;
448 static inline int next_free_host_timer(void)
450 int k ;
451 /* FIXME: Does finding the next free slot require a lock? */
452 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
453 if (g_posix_timers[k] == 0) {
454 g_posix_timers[k] = (timer_t) 1;
455 return k;
458 return -1;
460 #endif
462 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
463 #ifdef TARGET_ARM
464 static inline int regpairs_aligned(void *cpu_env) {
465 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
467 #elif defined(TARGET_MIPS)
468 static inline int regpairs_aligned(void *cpu_env) { return 1; }
469 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
470 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
471 * of registers which translates to the same as ARM/MIPS, because we start with
472 * r3 as arg1 */
473 static inline int regpairs_aligned(void *cpu_env) { return 1; }
474 #else
475 static inline int regpairs_aligned(void *cpu_env) { return 0; }
476 #endif
478 #define ERRNO_TABLE_SIZE 1200
480 /* target_to_host_errno_table[] is initialized from
481 * host_to_target_errno_table[] in syscall_init(). */
482 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
486 * This list is the union of errno values overridden in asm-<arch>/errno.h
487 * minus the errnos that are not actually generic to all archs.
489 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
490 [EAGAIN] = TARGET_EAGAIN,
491 [EIDRM] = TARGET_EIDRM,
492 [ECHRNG] = TARGET_ECHRNG,
493 [EL2NSYNC] = TARGET_EL2NSYNC,
494 [EL3HLT] = TARGET_EL3HLT,
495 [EL3RST] = TARGET_EL3RST,
496 [ELNRNG] = TARGET_ELNRNG,
497 [EUNATCH] = TARGET_EUNATCH,
498 [ENOCSI] = TARGET_ENOCSI,
499 [EL2HLT] = TARGET_EL2HLT,
500 [EDEADLK] = TARGET_EDEADLK,
501 [ENOLCK] = TARGET_ENOLCK,
502 [EBADE] = TARGET_EBADE,
503 [EBADR] = TARGET_EBADR,
504 [EXFULL] = TARGET_EXFULL,
505 [ENOANO] = TARGET_ENOANO,
506 [EBADRQC] = TARGET_EBADRQC,
507 [EBADSLT] = TARGET_EBADSLT,
508 [EBFONT] = TARGET_EBFONT,
509 [ENOSTR] = TARGET_ENOSTR,
510 [ENODATA] = TARGET_ENODATA,
511 [ETIME] = TARGET_ETIME,
512 [ENOSR] = TARGET_ENOSR,
513 [ENONET] = TARGET_ENONET,
514 [ENOPKG] = TARGET_ENOPKG,
515 [EREMOTE] = TARGET_EREMOTE,
516 [ENOLINK] = TARGET_ENOLINK,
517 [EADV] = TARGET_EADV,
518 [ESRMNT] = TARGET_ESRMNT,
519 [ECOMM] = TARGET_ECOMM,
520 [EPROTO] = TARGET_EPROTO,
521 [EDOTDOT] = TARGET_EDOTDOT,
522 [EMULTIHOP] = TARGET_EMULTIHOP,
523 [EBADMSG] = TARGET_EBADMSG,
524 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
525 [EOVERFLOW] = TARGET_EOVERFLOW,
526 [ENOTUNIQ] = TARGET_ENOTUNIQ,
527 [EBADFD] = TARGET_EBADFD,
528 [EREMCHG] = TARGET_EREMCHG,
529 [ELIBACC] = TARGET_ELIBACC,
530 [ELIBBAD] = TARGET_ELIBBAD,
531 [ELIBSCN] = TARGET_ELIBSCN,
532 [ELIBMAX] = TARGET_ELIBMAX,
533 [ELIBEXEC] = TARGET_ELIBEXEC,
534 [EILSEQ] = TARGET_EILSEQ,
535 [ENOSYS] = TARGET_ENOSYS,
536 [ELOOP] = TARGET_ELOOP,
537 [ERESTART] = TARGET_ERESTART,
538 [ESTRPIPE] = TARGET_ESTRPIPE,
539 [ENOTEMPTY] = TARGET_ENOTEMPTY,
540 [EUSERS] = TARGET_EUSERS,
541 [ENOTSOCK] = TARGET_ENOTSOCK,
542 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
543 [EMSGSIZE] = TARGET_EMSGSIZE,
544 [EPROTOTYPE] = TARGET_EPROTOTYPE,
545 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
546 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
547 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
548 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
549 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
550 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
551 [EADDRINUSE] = TARGET_EADDRINUSE,
552 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
553 [ENETDOWN] = TARGET_ENETDOWN,
554 [ENETUNREACH] = TARGET_ENETUNREACH,
555 [ENETRESET] = TARGET_ENETRESET,
556 [ECONNABORTED] = TARGET_ECONNABORTED,
557 [ECONNRESET] = TARGET_ECONNRESET,
558 [ENOBUFS] = TARGET_ENOBUFS,
559 [EISCONN] = TARGET_EISCONN,
560 [ENOTCONN] = TARGET_ENOTCONN,
561 [EUCLEAN] = TARGET_EUCLEAN,
562 [ENOTNAM] = TARGET_ENOTNAM,
563 [ENAVAIL] = TARGET_ENAVAIL,
564 [EISNAM] = TARGET_EISNAM,
565 [EREMOTEIO] = TARGET_EREMOTEIO,
566 [ESHUTDOWN] = TARGET_ESHUTDOWN,
567 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
568 [ETIMEDOUT] = TARGET_ETIMEDOUT,
569 [ECONNREFUSED] = TARGET_ECONNREFUSED,
570 [EHOSTDOWN] = TARGET_EHOSTDOWN,
571 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
572 [EALREADY] = TARGET_EALREADY,
573 [EINPROGRESS] = TARGET_EINPROGRESS,
574 [ESTALE] = TARGET_ESTALE,
575 [ECANCELED] = TARGET_ECANCELED,
576 [ENOMEDIUM] = TARGET_ENOMEDIUM,
577 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
578 #ifdef ENOKEY
579 [ENOKEY] = TARGET_ENOKEY,
580 #endif
581 #ifdef EKEYEXPIRED
582 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
583 #endif
584 #ifdef EKEYREVOKED
585 [EKEYREVOKED] = TARGET_EKEYREVOKED,
586 #endif
587 #ifdef EKEYREJECTED
588 [EKEYREJECTED] = TARGET_EKEYREJECTED,
589 #endif
590 #ifdef EOWNERDEAD
591 [EOWNERDEAD] = TARGET_EOWNERDEAD,
592 #endif
593 #ifdef ENOTRECOVERABLE
594 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
595 #endif
598 static inline int host_to_target_errno(int err)
600 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
601 host_to_target_errno_table[err]) {
602 return host_to_target_errno_table[err];
604 return err;
607 static inline int target_to_host_errno(int err)
609 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
610 target_to_host_errno_table[err]) {
611 return target_to_host_errno_table[err];
613 return err;
616 static inline abi_long get_errno(abi_long ret)
618 if (ret == -1)
619 return -host_to_target_errno(errno);
620 else
621 return ret;
624 static inline int is_error(abi_long ret)
626 return (abi_ulong)ret >= (abi_ulong)(-4096);
629 const char *target_strerror(int err)
631 if (err == TARGET_ERESTARTSYS) {
632 return "To be restarted";
634 if (err == TARGET_QEMU_ESIGRETURN) {
635 return "Successful exit from sigreturn";
638 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
639 return NULL;
641 return strerror(target_to_host_errno(err));
644 #define safe_syscall0(type, name) \
645 static type safe_##name(void) \
647 return safe_syscall(__NR_##name); \
650 #define safe_syscall1(type, name, type1, arg1) \
651 static type safe_##name(type1 arg1) \
653 return safe_syscall(__NR_##name, arg1); \
656 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
657 static type safe_##name(type1 arg1, type2 arg2) \
659 return safe_syscall(__NR_##name, arg1, arg2); \
662 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
663 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
665 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
668 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
669 type4, arg4) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
675 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4, type5, arg5) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
678 type5 arg5) \
680 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
683 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
684 type4, arg4, type5, arg5, type6, arg6) \
685 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
686 type5 arg5, type6 arg6) \
688 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
691 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
692 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
693 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
694 int, flags, mode_t, mode)
695 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
696 struct rusage *, rusage)
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698 int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
701 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
702 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
703 struct timespec *, tsp, const sigset_t *, sigmask,
704 size_t, sigsetsize)
705 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
706 int, maxevents, int, timeout, const sigset_t *, sigmask,
707 size_t, sigsetsize)
708 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
709 const struct timespec *,timeout,int *,uaddr2,int,val3)
710 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
711 safe_syscall2(int, kill, pid_t, pid, int, sig)
712 safe_syscall2(int, tkill, int, tid, int, sig)
713 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
714 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
715 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
716 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
717 socklen_t, addrlen)
718 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
719 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
720 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
721 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
722 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
723 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
724 safe_syscall2(int, flock, int, fd, int, operation)
725 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
726 const struct timespec *, uts, size_t, sigsetsize)
727 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
728 int, flags)
729 safe_syscall2(int, nanosleep, const struct timespec *, req,
730 struct timespec *, rem)
731 #ifdef TARGET_NR_clock_nanosleep
732 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
733 const struct timespec *, req, struct timespec *, rem)
734 #endif
735 #ifdef __NR_msgsnd
736 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
737 int, flags)
738 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
739 long, msgtype, int, flags)
740 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
741 unsigned, nsops, const struct timespec *, timeout)
742 #else
743 /* This host kernel architecture uses a single ipc syscall; fake up
744 * wrappers for the sub-operations to hide this implementation detail.
745 * Annoyingly we can't include linux/ipc.h to get the constant definitions
746 * for the call parameter because some structs in there conflict with the
747 * sys/ipc.h ones. So we just define them here, and rely on them being
748 * the same for all host architectures.
750 #define Q_SEMTIMEDOP 4
751 #define Q_MSGSND 11
752 #define Q_MSGRCV 12
753 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
755 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
756 void *, ptr, long, fifth)
757 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
759 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
761 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
763 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
765 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
766 const struct timespec *timeout)
768 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
769 (long)timeout);
771 #endif
772 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
773 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
774 size_t, len, unsigned, prio, const struct timespec *, timeout)
775 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
776 size_t, len, unsigned *, prio, const struct timespec *, timeout)
777 #endif
778 /* We do ioctl like this rather than via safe_syscall3 to preserve the
779 * "third argument might be integer or pointer or not present" behaviour of
780 * the libc function.
782 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
783 /* Similarly for fcntl. Note that callers must always:
784 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
785 * use the flock64 struct rather than unsuffixed flock
786 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
788 #ifdef __NR_fcntl64
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
790 #else
791 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
792 #endif
794 static inline int host_to_target_sock_type(int host_type)
796 int target_type;
798 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
799 case SOCK_DGRAM:
800 target_type = TARGET_SOCK_DGRAM;
801 break;
802 case SOCK_STREAM:
803 target_type = TARGET_SOCK_STREAM;
804 break;
805 default:
806 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
807 break;
810 #if defined(SOCK_CLOEXEC)
811 if (host_type & SOCK_CLOEXEC) {
812 target_type |= TARGET_SOCK_CLOEXEC;
814 #endif
816 #if defined(SOCK_NONBLOCK)
817 if (host_type & SOCK_NONBLOCK) {
818 target_type |= TARGET_SOCK_NONBLOCK;
820 #endif
822 return target_type;
825 static abi_ulong target_brk;
826 static abi_ulong target_original_brk;
827 static abi_ulong brk_page;
829 void target_set_brk(abi_ulong new_brk)
831 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
832 brk_page = HOST_PAGE_ALIGN(target_brk);
835 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
836 #define DEBUGF_BRK(message, args...)
838 /* do_brk() must return target values and target errnos. */
839 abi_long do_brk(abi_ulong new_brk)
841 abi_long mapped_addr;
842 abi_ulong new_alloc_size;
844 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
846 if (!new_brk) {
847 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
848 return target_brk;
850 if (new_brk < target_original_brk) {
851 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
852 target_brk);
853 return target_brk;
856 /* If the new brk is less than the highest page reserved to the
857 * target heap allocation, set it and we're almost done... */
858 if (new_brk <= brk_page) {
859 /* Heap contents are initialized to zero, as for anonymous
860 * mapped pages. */
861 if (new_brk > target_brk) {
862 memset(g2h(target_brk), 0, new_brk - target_brk);
864 target_brk = new_brk;
865 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
866 return target_brk;
869 /* We need to allocate more memory after the brk... Note that
870 * we don't use MAP_FIXED because that will map over the top of
871 * any existing mapping (like the one with the host libc or qemu
872 * itself); instead we treat "mapped but at wrong address" as
873 * a failure and unmap again.
875 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
876 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
877 PROT_READ|PROT_WRITE,
878 MAP_ANON|MAP_PRIVATE, 0, 0));
880 if (mapped_addr == brk_page) {
881 /* Heap contents are initialized to zero, as for anonymous
882 * mapped pages. Technically the new pages are already
883 * initialized to zero since they *are* anonymous mapped
884 * pages, however we have to take care with the contents that
885 * come from the remaining part of the previous page: it may
886 * contains garbage data due to a previous heap usage (grown
887 * then shrunken). */
888 memset(g2h(target_brk), 0, brk_page - target_brk);
890 target_brk = new_brk;
891 brk_page = HOST_PAGE_ALIGN(target_brk);
892 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
893 target_brk);
894 return target_brk;
895 } else if (mapped_addr != -1) {
896 /* Mapped but at wrong address, meaning there wasn't actually
897 * enough space for this brk.
899 target_munmap(mapped_addr, new_alloc_size);
900 mapped_addr = -1;
901 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
903 else {
904 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
907 #if defined(TARGET_ALPHA)
908 /* We (partially) emulate OSF/1 on Alpha, which requires we
909 return a proper errno, not an unchanged brk value. */
910 return -TARGET_ENOMEM;
911 #endif
912 /* For everything else, return the previous break. */
913 return target_brk;
916 static inline abi_long copy_from_user_fdset(fd_set *fds,
917 abi_ulong target_fds_addr,
918 int n)
920 int i, nw, j, k;
921 abi_ulong b, *target_fds;
923 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
924 if (!(target_fds = lock_user(VERIFY_READ,
925 target_fds_addr,
926 sizeof(abi_ulong) * nw,
927 1)))
928 return -TARGET_EFAULT;
930 FD_ZERO(fds);
931 k = 0;
932 for (i = 0; i < nw; i++) {
933 /* grab the abi_ulong */
934 __get_user(b, &target_fds[i]);
935 for (j = 0; j < TARGET_ABI_BITS; j++) {
936 /* check the bit inside the abi_ulong */
937 if ((b >> j) & 1)
938 FD_SET(k, fds);
939 k++;
943 unlock_user(target_fds, target_fds_addr, 0);
945 return 0;
948 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
949 abi_ulong target_fds_addr,
950 int n)
952 if (target_fds_addr) {
953 if (copy_from_user_fdset(fds, target_fds_addr, n))
954 return -TARGET_EFAULT;
955 *fds_ptr = fds;
956 } else {
957 *fds_ptr = NULL;
959 return 0;
962 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
963 const fd_set *fds,
964 int n)
966 int i, nw, j, k;
967 abi_long v;
968 abi_ulong *target_fds;
970 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
971 if (!(target_fds = lock_user(VERIFY_WRITE,
972 target_fds_addr,
973 sizeof(abi_ulong) * nw,
974 0)))
975 return -TARGET_EFAULT;
977 k = 0;
978 for (i = 0; i < nw; i++) {
979 v = 0;
980 for (j = 0; j < TARGET_ABI_BITS; j++) {
981 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
982 k++;
984 __put_user(v, &target_fds[i]);
987 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
989 return 0;
992 #if defined(__alpha__)
993 #define HOST_HZ 1024
994 #else
995 #define HOST_HZ 100
996 #endif
998 static inline abi_long host_to_target_clock_t(long ticks)
1000 #if HOST_HZ == TARGET_HZ
1001 return ticks;
1002 #else
1003 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1004 #endif
1007 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1008 const struct rusage *rusage)
1010 struct target_rusage *target_rusage;
1012 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1013 return -TARGET_EFAULT;
1014 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1015 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1016 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1017 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1018 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1019 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1020 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1021 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1022 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1023 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1024 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1025 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1026 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1027 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1028 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1029 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1030 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1031 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1032 unlock_user_struct(target_rusage, target_addr, 1);
1034 return 0;
1037 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1039 abi_ulong target_rlim_swap;
1040 rlim_t result;
1042 target_rlim_swap = tswapal(target_rlim);
1043 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1044 return RLIM_INFINITY;
1046 result = target_rlim_swap;
1047 if (target_rlim_swap != (rlim_t)result)
1048 return RLIM_INFINITY;
1050 return result;
1053 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1055 abi_ulong target_rlim_swap;
1056 abi_ulong result;
1058 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1059 target_rlim_swap = TARGET_RLIM_INFINITY;
1060 else
1061 target_rlim_swap = rlim;
1062 result = tswapal(target_rlim_swap);
1064 return result;
1067 static inline int target_to_host_resource(int code)
1069 switch (code) {
1070 case TARGET_RLIMIT_AS:
1071 return RLIMIT_AS;
1072 case TARGET_RLIMIT_CORE:
1073 return RLIMIT_CORE;
1074 case TARGET_RLIMIT_CPU:
1075 return RLIMIT_CPU;
1076 case TARGET_RLIMIT_DATA:
1077 return RLIMIT_DATA;
1078 case TARGET_RLIMIT_FSIZE:
1079 return RLIMIT_FSIZE;
1080 case TARGET_RLIMIT_LOCKS:
1081 return RLIMIT_LOCKS;
1082 case TARGET_RLIMIT_MEMLOCK:
1083 return RLIMIT_MEMLOCK;
1084 case TARGET_RLIMIT_MSGQUEUE:
1085 return RLIMIT_MSGQUEUE;
1086 case TARGET_RLIMIT_NICE:
1087 return RLIMIT_NICE;
1088 case TARGET_RLIMIT_NOFILE:
1089 return RLIMIT_NOFILE;
1090 case TARGET_RLIMIT_NPROC:
1091 return RLIMIT_NPROC;
1092 case TARGET_RLIMIT_RSS:
1093 return RLIMIT_RSS;
1094 case TARGET_RLIMIT_RTPRIO:
1095 return RLIMIT_RTPRIO;
1096 case TARGET_RLIMIT_SIGPENDING:
1097 return RLIMIT_SIGPENDING;
1098 case TARGET_RLIMIT_STACK:
1099 return RLIMIT_STACK;
1100 default:
1101 return code;
1105 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1106 abi_ulong target_tv_addr)
1108 struct target_timeval *target_tv;
1110 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1111 return -TARGET_EFAULT;
1113 __get_user(tv->tv_sec, &target_tv->tv_sec);
1114 __get_user(tv->tv_usec, &target_tv->tv_usec);
1116 unlock_user_struct(target_tv, target_tv_addr, 0);
1118 return 0;
1121 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1122 const struct timeval *tv)
1124 struct target_timeval *target_tv;
1126 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1127 return -TARGET_EFAULT;
1129 __put_user(tv->tv_sec, &target_tv->tv_sec);
1130 __put_user(tv->tv_usec, &target_tv->tv_usec);
1132 unlock_user_struct(target_tv, target_tv_addr, 1);
1134 return 0;
1137 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1138 abi_ulong target_tz_addr)
1140 struct target_timezone *target_tz;
1142 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1143 return -TARGET_EFAULT;
1146 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1147 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1149 unlock_user_struct(target_tz, target_tz_addr, 0);
1151 return 0;
1154 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1155 #include <mqueue.h>
1157 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1158 abi_ulong target_mq_attr_addr)
1160 struct target_mq_attr *target_mq_attr;
1162 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1163 target_mq_attr_addr, 1))
1164 return -TARGET_EFAULT;
1166 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1167 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1168 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1169 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1171 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1173 return 0;
1176 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1177 const struct mq_attr *attr)
1179 struct target_mq_attr *target_mq_attr;
1181 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1182 target_mq_attr_addr, 0))
1183 return -TARGET_EFAULT;
1185 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1186 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1187 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1188 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1190 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1192 return 0;
1194 #endif
1196 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1197 /* do_select() must return target values and target errnos. */
1198 static abi_long do_select(int n,
1199 abi_ulong rfd_addr, abi_ulong wfd_addr,
1200 abi_ulong efd_addr, abi_ulong target_tv_addr)
1202 fd_set rfds, wfds, efds;
1203 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1204 struct timeval tv;
1205 struct timespec ts, *ts_ptr;
1206 abi_long ret;
1208 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1209 if (ret) {
1210 return ret;
1212 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1213 if (ret) {
1214 return ret;
1216 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1217 if (ret) {
1218 return ret;
1221 if (target_tv_addr) {
1222 if (copy_from_user_timeval(&tv, target_tv_addr))
1223 return -TARGET_EFAULT;
1224 ts.tv_sec = tv.tv_sec;
1225 ts.tv_nsec = tv.tv_usec * 1000;
1226 ts_ptr = &ts;
1227 } else {
1228 ts_ptr = NULL;
1231 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1232 ts_ptr, NULL));
1234 if (!is_error(ret)) {
1235 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1236 return -TARGET_EFAULT;
1237 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1238 return -TARGET_EFAULT;
1239 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1240 return -TARGET_EFAULT;
1242 if (target_tv_addr) {
1243 tv.tv_sec = ts.tv_sec;
1244 tv.tv_usec = ts.tv_nsec / 1000;
1245 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1246 return -TARGET_EFAULT;
1251 return ret;
1253 #endif
1255 static abi_long do_pipe2(int host_pipe[], int flags)
1257 #ifdef CONFIG_PIPE2
1258 return pipe2(host_pipe, flags);
1259 #else
1260 return -ENOSYS;
1261 #endif
1264 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1265 int flags, int is_pipe2)
1267 int host_pipe[2];
1268 abi_long ret;
1269 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1271 if (is_error(ret))
1272 return get_errno(ret);
1274 /* Several targets have special calling conventions for the original
1275 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1276 if (!is_pipe2) {
1277 #if defined(TARGET_ALPHA)
1278 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1279 return host_pipe[0];
1280 #elif defined(TARGET_MIPS)
1281 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1282 return host_pipe[0];
1283 #elif defined(TARGET_SH4)
1284 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1285 return host_pipe[0];
1286 #elif defined(TARGET_SPARC)
1287 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1288 return host_pipe[0];
1289 #endif
1292 if (put_user_s32(host_pipe[0], pipedes)
1293 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1294 return -TARGET_EFAULT;
1295 return get_errno(ret);
1298 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1299 abi_ulong target_addr,
1300 socklen_t len)
1302 struct target_ip_mreqn *target_smreqn;
1304 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1305 if (!target_smreqn)
1306 return -TARGET_EFAULT;
1307 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1308 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1309 if (len == sizeof(struct target_ip_mreqn))
1310 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1311 unlock_user(target_smreqn, target_addr, 0);
1313 return 0;
1316 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1317 abi_ulong target_addr,
1318 socklen_t len)
1320 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1321 sa_family_t sa_family;
1322 struct target_sockaddr *target_saddr;
1324 if (fd_trans_target_to_host_addr(fd)) {
1325 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1328 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1329 if (!target_saddr)
1330 return -TARGET_EFAULT;
1332 sa_family = tswap16(target_saddr->sa_family);
1334 /* Oops. The caller might send a incomplete sun_path; sun_path
1335 * must be terminated by \0 (see the manual page), but
1336 * unfortunately it is quite common to specify sockaddr_un
1337 * length as "strlen(x->sun_path)" while it should be
1338 * "strlen(...) + 1". We'll fix that here if needed.
1339 * Linux kernel has a similar feature.
1342 if (sa_family == AF_UNIX) {
1343 if (len < unix_maxlen && len > 0) {
1344 char *cp = (char*)target_saddr;
1346 if ( cp[len-1] && !cp[len] )
1347 len++;
1349 if (len > unix_maxlen)
1350 len = unix_maxlen;
1353 memcpy(addr, target_saddr, len);
1354 addr->sa_family = sa_family;
1355 if (sa_family == AF_NETLINK) {
1356 struct sockaddr_nl *nladdr;
1358 nladdr = (struct sockaddr_nl *)addr;
1359 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1360 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1361 } else if (sa_family == AF_PACKET) {
1362 struct target_sockaddr_ll *lladdr;
1364 lladdr = (struct target_sockaddr_ll *)addr;
1365 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1366 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1368 unlock_user(target_saddr, target_addr, 0);
1370 return 0;
1373 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1374 struct sockaddr *addr,
1375 socklen_t len)
1377 struct target_sockaddr *target_saddr;
1379 if (len == 0) {
1380 return 0;
1383 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1384 if (!target_saddr)
1385 return -TARGET_EFAULT;
1386 memcpy(target_saddr, addr, len);
1387 if (len >= offsetof(struct target_sockaddr, sa_family) +
1388 sizeof(target_saddr->sa_family)) {
1389 target_saddr->sa_family = tswap16(addr->sa_family);
1391 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1392 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1393 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1394 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1395 } else if (addr->sa_family == AF_PACKET) {
1396 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1397 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1398 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1400 unlock_user(target_saddr, target_addr, len);
1402 return 0;
1405 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1406 struct target_msghdr *target_msgh)
1408 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1409 abi_long msg_controllen;
1410 abi_ulong target_cmsg_addr;
1411 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1412 socklen_t space = 0;
1414 msg_controllen = tswapal(target_msgh->msg_controllen);
1415 if (msg_controllen < sizeof (struct target_cmsghdr))
1416 goto the_end;
1417 target_cmsg_addr = tswapal(target_msgh->msg_control);
1418 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1419 target_cmsg_start = target_cmsg;
1420 if (!target_cmsg)
1421 return -TARGET_EFAULT;
1423 while (cmsg && target_cmsg) {
1424 void *data = CMSG_DATA(cmsg);
1425 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1427 int len = tswapal(target_cmsg->cmsg_len)
1428 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1430 space += CMSG_SPACE(len);
1431 if (space > msgh->msg_controllen) {
1432 space -= CMSG_SPACE(len);
1433 /* This is a QEMU bug, since we allocated the payload
1434 * area ourselves (unlike overflow in host-to-target
1435 * conversion, which is just the guest giving us a buffer
1436 * that's too small). It can't happen for the payload types
1437 * we currently support; if it becomes an issue in future
1438 * we would need to improve our allocation strategy to
1439 * something more intelligent than "twice the size of the
1440 * target buffer we're reading from".
1442 gemu_log("Host cmsg overflow\n");
1443 break;
1446 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1447 cmsg->cmsg_level = SOL_SOCKET;
1448 } else {
1449 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1451 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1452 cmsg->cmsg_len = CMSG_LEN(len);
1454 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1455 int *fd = (int *)data;
1456 int *target_fd = (int *)target_data;
1457 int i, numfds = len / sizeof(int);
1459 for (i = 0; i < numfds; i++) {
1460 __get_user(fd[i], target_fd + i);
1462 } else if (cmsg->cmsg_level == SOL_SOCKET
1463 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1464 struct ucred *cred = (struct ucred *)data;
1465 struct target_ucred *target_cred =
1466 (struct target_ucred *)target_data;
1468 __get_user(cred->pid, &target_cred->pid);
1469 __get_user(cred->uid, &target_cred->uid);
1470 __get_user(cred->gid, &target_cred->gid);
1471 } else {
1472 gemu_log("Unsupported ancillary data: %d/%d\n",
1473 cmsg->cmsg_level, cmsg->cmsg_type);
1474 memcpy(data, target_data, len);
1477 cmsg = CMSG_NXTHDR(msgh, cmsg);
1478 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1479 target_cmsg_start);
1481 unlock_user(target_cmsg, target_cmsg_addr, 0);
1482 the_end:
1483 msgh->msg_controllen = space;
1484 return 0;
1487 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1488 struct msghdr *msgh)
1490 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1491 abi_long msg_controllen;
1492 abi_ulong target_cmsg_addr;
1493 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1494 socklen_t space = 0;
1496 msg_controllen = tswapal(target_msgh->msg_controllen);
1497 if (msg_controllen < sizeof (struct target_cmsghdr))
1498 goto the_end;
1499 target_cmsg_addr = tswapal(target_msgh->msg_control);
1500 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1501 target_cmsg_start = target_cmsg;
1502 if (!target_cmsg)
1503 return -TARGET_EFAULT;
1505 while (cmsg && target_cmsg) {
1506 void *data = CMSG_DATA(cmsg);
1507 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1509 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1510 int tgt_len, tgt_space;
1512 /* We never copy a half-header but may copy half-data;
1513 * this is Linux's behaviour in put_cmsg(). Note that
1514 * truncation here is a guest problem (which we report
1515 * to the guest via the CTRUNC bit), unlike truncation
1516 * in target_to_host_cmsg, which is a QEMU bug.
1518 if (msg_controllen < sizeof(struct cmsghdr)) {
1519 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1520 break;
1523 if (cmsg->cmsg_level == SOL_SOCKET) {
1524 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1525 } else {
1526 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1528 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1530 tgt_len = TARGET_CMSG_LEN(len);
1532 /* Payload types which need a different size of payload on
1533 * the target must adjust tgt_len here.
1535 switch (cmsg->cmsg_level) {
1536 case SOL_SOCKET:
1537 switch (cmsg->cmsg_type) {
1538 case SO_TIMESTAMP:
1539 tgt_len = sizeof(struct target_timeval);
1540 break;
1541 default:
1542 break;
1544 default:
1545 break;
1548 if (msg_controllen < tgt_len) {
1549 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1550 tgt_len = msg_controllen;
1553 /* We must now copy-and-convert len bytes of payload
1554 * into tgt_len bytes of destination space. Bear in mind
1555 * that in both source and destination we may be dealing
1556 * with a truncated value!
1558 switch (cmsg->cmsg_level) {
1559 case SOL_SOCKET:
1560 switch (cmsg->cmsg_type) {
1561 case SCM_RIGHTS:
1563 int *fd = (int *)data;
1564 int *target_fd = (int *)target_data;
1565 int i, numfds = tgt_len / sizeof(int);
1567 for (i = 0; i < numfds; i++) {
1568 __put_user(fd[i], target_fd + i);
1570 break;
1572 case SO_TIMESTAMP:
1574 struct timeval *tv = (struct timeval *)data;
1575 struct target_timeval *target_tv =
1576 (struct target_timeval *)target_data;
1578 if (len != sizeof(struct timeval) ||
1579 tgt_len != sizeof(struct target_timeval)) {
1580 goto unimplemented;
1583 /* copy struct timeval to target */
1584 __put_user(tv->tv_sec, &target_tv->tv_sec);
1585 __put_user(tv->tv_usec, &target_tv->tv_usec);
1586 break;
1588 case SCM_CREDENTIALS:
1590 struct ucred *cred = (struct ucred *)data;
1591 struct target_ucred *target_cred =
1592 (struct target_ucred *)target_data;
1594 __put_user(cred->pid, &target_cred->pid);
1595 __put_user(cred->uid, &target_cred->uid);
1596 __put_user(cred->gid, &target_cred->gid);
1597 break;
1599 default:
1600 goto unimplemented;
1602 break;
1604 default:
1605 unimplemented:
1606 gemu_log("Unsupported ancillary data: %d/%d\n",
1607 cmsg->cmsg_level, cmsg->cmsg_type);
1608 memcpy(target_data, data, MIN(len, tgt_len));
1609 if (tgt_len > len) {
1610 memset(target_data + len, 0, tgt_len - len);
1614 target_cmsg->cmsg_len = tswapal(tgt_len);
1615 tgt_space = TARGET_CMSG_SPACE(len);
1616 if (msg_controllen < tgt_space) {
1617 tgt_space = msg_controllen;
1619 msg_controllen -= tgt_space;
1620 space += tgt_space;
1621 cmsg = CMSG_NXTHDR(msgh, cmsg);
1622 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1623 target_cmsg_start);
1625 unlock_user(target_cmsg, target_cmsg_addr, space);
1626 the_end:
1627 target_msgh->msg_controllen = tswapal(space);
1628 return 0;
1631 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1633 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1634 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1635 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1636 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1637 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1640 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1641 size_t len,
1642 abi_long (*host_to_target_nlmsg)
1643 (struct nlmsghdr *))
1645 uint32_t nlmsg_len;
1646 abi_long ret;
1648 while (len > sizeof(struct nlmsghdr)) {
1650 nlmsg_len = nlh->nlmsg_len;
1651 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1652 nlmsg_len > len) {
1653 break;
1656 switch (nlh->nlmsg_type) {
1657 case NLMSG_DONE:
1658 tswap_nlmsghdr(nlh);
1659 return 0;
1660 case NLMSG_NOOP:
1661 break;
1662 case NLMSG_ERROR:
1664 struct nlmsgerr *e = NLMSG_DATA(nlh);
1665 e->error = tswap32(e->error);
1666 tswap_nlmsghdr(&e->msg);
1667 tswap_nlmsghdr(nlh);
1668 return 0;
1670 default:
1671 ret = host_to_target_nlmsg(nlh);
1672 if (ret < 0) {
1673 tswap_nlmsghdr(nlh);
1674 return ret;
1676 break;
1678 tswap_nlmsghdr(nlh);
1679 len -= NLMSG_ALIGN(nlmsg_len);
1680 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1682 return 0;
1685 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1686 size_t len,
1687 abi_long (*target_to_host_nlmsg)
1688 (struct nlmsghdr *))
1690 int ret;
1692 while (len > sizeof(struct nlmsghdr)) {
1693 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1694 tswap32(nlh->nlmsg_len) > len) {
1695 break;
1697 tswap_nlmsghdr(nlh);
1698 switch (nlh->nlmsg_type) {
1699 case NLMSG_DONE:
1700 return 0;
1701 case NLMSG_NOOP:
1702 break;
1703 case NLMSG_ERROR:
1705 struct nlmsgerr *e = NLMSG_DATA(nlh);
1706 e->error = tswap32(e->error);
1707 tswap_nlmsghdr(&e->msg);
1708 return 0;
1710 default:
1711 ret = target_to_host_nlmsg(nlh);
1712 if (ret < 0) {
1713 return ret;
1716 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1717 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1719 return 0;
1722 #ifdef CONFIG_RTNETLINK
1723 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1724 size_t len, void *context,
1725 abi_long (*host_to_target_nlattr)
1726 (struct nlattr *,
1727 void *context))
1729 unsigned short nla_len;
1730 abi_long ret;
1732 while (len > sizeof(struct nlattr)) {
1733 nla_len = nlattr->nla_len;
1734 if (nla_len < sizeof(struct nlattr) ||
1735 nla_len > len) {
1736 break;
1738 ret = host_to_target_nlattr(nlattr, context);
1739 nlattr->nla_len = tswap16(nlattr->nla_len);
1740 nlattr->nla_type = tswap16(nlattr->nla_type);
1741 if (ret < 0) {
1742 return ret;
1744 len -= NLA_ALIGN(nla_len);
1745 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1747 return 0;
1750 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1751 size_t len,
1752 abi_long (*host_to_target_rtattr)
1753 (struct rtattr *))
1755 unsigned short rta_len;
1756 abi_long ret;
1758 while (len > sizeof(struct rtattr)) {
1759 rta_len = rtattr->rta_len;
1760 if (rta_len < sizeof(struct rtattr) ||
1761 rta_len > len) {
1762 break;
1764 ret = host_to_target_rtattr(rtattr);
1765 rtattr->rta_len = tswap16(rtattr->rta_len);
1766 rtattr->rta_type = tswap16(rtattr->rta_type);
1767 if (ret < 0) {
1768 return ret;
1770 len -= RTA_ALIGN(rta_len);
1771 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1773 return 0;
1776 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1778 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1779 void *context)
1781 uint16_t *u16;
1782 uint32_t *u32;
1783 uint64_t *u64;
1785 switch (nlattr->nla_type) {
1786 /* no data */
1787 case IFLA_BR_FDB_FLUSH:
1788 break;
1789 /* binary */
1790 case IFLA_BR_GROUP_ADDR:
1791 break;
1792 /* uint8_t */
1793 case IFLA_BR_VLAN_FILTERING:
1794 case IFLA_BR_TOPOLOGY_CHANGE:
1795 case IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
1796 case IFLA_BR_MCAST_ROUTER:
1797 case IFLA_BR_MCAST_SNOOPING:
1798 case IFLA_BR_MCAST_QUERY_USE_IFADDR:
1799 case IFLA_BR_MCAST_QUERIER:
1800 case IFLA_BR_NF_CALL_IPTABLES:
1801 case IFLA_BR_NF_CALL_IP6TABLES:
1802 case IFLA_BR_NF_CALL_ARPTABLES:
1803 break;
1804 /* uint16_t */
1805 case IFLA_BR_PRIORITY:
1806 case IFLA_BR_VLAN_PROTOCOL:
1807 case IFLA_BR_GROUP_FWD_MASK:
1808 case IFLA_BR_ROOT_PORT:
1809 case IFLA_BR_VLAN_DEFAULT_PVID:
1810 u16 = NLA_DATA(nlattr);
1811 *u16 = tswap16(*u16);
1812 break;
1813 /* uint32_t */
1814 case IFLA_BR_FORWARD_DELAY:
1815 case IFLA_BR_HELLO_TIME:
1816 case IFLA_BR_MAX_AGE:
1817 case IFLA_BR_AGEING_TIME:
1818 case IFLA_BR_STP_STATE:
1819 case IFLA_BR_ROOT_PATH_COST:
1820 case IFLA_BR_MCAST_HASH_ELASTICITY:
1821 case IFLA_BR_MCAST_HASH_MAX:
1822 case IFLA_BR_MCAST_LAST_MEMBER_CNT:
1823 case IFLA_BR_MCAST_STARTUP_QUERY_CNT:
1824 u32 = NLA_DATA(nlattr);
1825 *u32 = tswap32(*u32);
1826 break;
1827 /* uint64_t */
1828 case IFLA_BR_HELLO_TIMER:
1829 case IFLA_BR_TCN_TIMER:
1830 case IFLA_BR_GC_TIMER:
1831 case IFLA_BR_TOPOLOGY_CHANGE_TIMER:
1832 case IFLA_BR_MCAST_LAST_MEMBER_INTVL:
1833 case IFLA_BR_MCAST_MEMBERSHIP_INTVL:
1834 case IFLA_BR_MCAST_QUERIER_INTVL:
1835 case IFLA_BR_MCAST_QUERY_INTVL:
1836 case IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
1837 case IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
1838 u64 = NLA_DATA(nlattr);
1839 *u64 = tswap64(*u64);
1840 break;
1841 /* ifla_bridge_id: uin8_t[] */
1842 case IFLA_BR_ROOT_ID:
1843 case IFLA_BR_BRIDGE_ID:
1844 break;
1845 default:
1846 gemu_log("Unknown IFLA_BR type %d\n", nlattr->nla_type);
1847 break;
1849 return 0;
1852 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
1853 void *context)
1855 uint16_t *u16;
1856 uint32_t *u32;
1857 uint64_t *u64;
1859 switch (nlattr->nla_type) {
1860 /* uint8_t */
1861 case IFLA_BRPORT_STATE:
1862 case IFLA_BRPORT_MODE:
1863 case IFLA_BRPORT_GUARD:
1864 case IFLA_BRPORT_PROTECT:
1865 case IFLA_BRPORT_FAST_LEAVE:
1866 case IFLA_BRPORT_LEARNING:
1867 case IFLA_BRPORT_UNICAST_FLOOD:
1868 case IFLA_BRPORT_PROXYARP:
1869 case IFLA_BRPORT_LEARNING_SYNC:
1870 case IFLA_BRPORT_PROXYARP_WIFI:
1871 case IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
1872 case IFLA_BRPORT_CONFIG_PENDING:
1873 case IFLA_BRPORT_MULTICAST_ROUTER:
1874 break;
1875 /* uint16_t */
1876 case IFLA_BRPORT_PRIORITY:
1877 case IFLA_BRPORT_DESIGNATED_PORT:
1878 case IFLA_BRPORT_DESIGNATED_COST:
1879 case IFLA_BRPORT_ID:
1880 case IFLA_BRPORT_NO:
1881 u16 = NLA_DATA(nlattr);
1882 *u16 = tswap16(*u16);
1883 break;
1884 /* uin32_t */
1885 case IFLA_BRPORT_COST:
1886 u32 = NLA_DATA(nlattr);
1887 *u32 = tswap32(*u32);
1888 break;
1889 /* uint64_t */
1890 case IFLA_BRPORT_MESSAGE_AGE_TIMER:
1891 case IFLA_BRPORT_FORWARD_DELAY_TIMER:
1892 case IFLA_BRPORT_HOLD_TIMER:
1893 u64 = NLA_DATA(nlattr);
1894 *u64 = tswap64(*u64);
1895 break;
1896 /* ifla_bridge_id: uint8_t[] */
1897 case IFLA_BRPORT_ROOT_ID:
1898 case IFLA_BRPORT_BRIDGE_ID:
1899 break;
1900 default:
1901 gemu_log("Unknown IFLA_BRPORT type %d\n", nlattr->nla_type);
1902 break;
1904 return 0;
1907 struct linkinfo_context {
1908 int len;
1909 char *name;
1910 int slave_len;
1911 char *slave_name;
1914 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
1915 void *context)
1917 struct linkinfo_context *li_context = context;
1919 switch (nlattr->nla_type) {
1920 /* string */
1921 case IFLA_INFO_KIND:
1922 li_context->name = NLA_DATA(nlattr);
1923 li_context->len = nlattr->nla_len - NLA_HDRLEN;
1924 break;
1925 case IFLA_INFO_SLAVE_KIND:
1926 li_context->slave_name = NLA_DATA(nlattr);
1927 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
1928 break;
1929 /* stats */
1930 case IFLA_INFO_XSTATS:
1931 /* FIXME: only used by CAN */
1932 break;
1933 /* nested */
1934 case IFLA_INFO_DATA:
1935 if (strncmp(li_context->name, "bridge",
1936 li_context->len) == 0) {
1937 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
1938 nlattr->nla_len,
1939 NULL,
1940 host_to_target_data_bridge_nlattr);
1941 } else {
1942 gemu_log("Unknown IFLA_INFO_KIND %s\n", li_context->name);
1944 break;
1945 case IFLA_INFO_SLAVE_DATA:
1946 if (strncmp(li_context->slave_name, "bridge",
1947 li_context->slave_len) == 0) {
1948 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
1949 nlattr->nla_len,
1950 NULL,
1951 host_to_target_slave_data_bridge_nlattr);
1952 } else {
1953 gemu_log("Unknown IFLA_INFO_SLAVE_KIND %s\n",
1954 li_context->slave_name);
1956 break;
1957 default:
1958 gemu_log("Unknown host IFLA_INFO type: %d\n", nlattr->nla_type);
1959 break;
1962 return 0;
1965 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
1966 void *context)
1968 uint32_t *u32;
1969 int i;
1971 switch (nlattr->nla_type) {
1972 case IFLA_INET_CONF:
1973 u32 = NLA_DATA(nlattr);
1974 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
1975 i++) {
1976 u32[i] = tswap32(u32[i]);
1978 break;
1979 default:
1980 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
1982 return 0;
1985 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
1986 void *context)
1988 uint32_t *u32;
1989 uint64_t *u64;
1990 struct ifla_cacheinfo *ci;
1991 int i;
1993 switch (nlattr->nla_type) {
1994 /* binaries */
1995 case IFLA_INET6_TOKEN:
1996 break;
1997 /* uint8_t */
1998 case IFLA_INET6_ADDR_GEN_MODE:
1999 break;
2000 /* uint32_t */
2001 case IFLA_INET6_FLAGS:
2002 u32 = NLA_DATA(nlattr);
2003 *u32 = tswap32(*u32);
2004 break;
2005 /* uint32_t[] */
2006 case IFLA_INET6_CONF:
2007 u32 = NLA_DATA(nlattr);
2008 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2009 i++) {
2010 u32[i] = tswap32(u32[i]);
2012 break;
2013 /* ifla_cacheinfo */
2014 case IFLA_INET6_CACHEINFO:
2015 ci = NLA_DATA(nlattr);
2016 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2017 ci->tstamp = tswap32(ci->tstamp);
2018 ci->reachable_time = tswap32(ci->reachable_time);
2019 ci->retrans_time = tswap32(ci->retrans_time);
2020 break;
2021 /* uint64_t[] */
2022 case IFLA_INET6_STATS:
2023 case IFLA_INET6_ICMP6STATS:
2024 u64 = NLA_DATA(nlattr);
2025 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2026 i++) {
2027 u64[i] = tswap64(u64[i]);
2029 break;
2030 default:
2031 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2033 return 0;
2036 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2037 void *context)
2039 switch (nlattr->nla_type) {
2040 case AF_INET:
2041 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2042 NULL,
2043 host_to_target_data_inet_nlattr);
2044 case AF_INET6:
2045 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2046 NULL,
2047 host_to_target_data_inet6_nlattr);
2048 default:
2049 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2050 break;
2052 return 0;
2055 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2057 uint32_t *u32;
2058 struct rtnl_link_stats *st;
2059 struct rtnl_link_stats64 *st64;
2060 struct rtnl_link_ifmap *map;
2061 struct linkinfo_context li_context;
2063 switch (rtattr->rta_type) {
2064 /* binary stream */
2065 case IFLA_ADDRESS:
2066 case IFLA_BROADCAST:
2067 /* string */
2068 case IFLA_IFNAME:
2069 case IFLA_QDISC:
2070 break;
2071 /* uin8_t */
2072 case IFLA_OPERSTATE:
2073 case IFLA_LINKMODE:
2074 case IFLA_CARRIER:
2075 case IFLA_PROTO_DOWN:
2076 break;
2077 /* uint32_t */
2078 case IFLA_MTU:
2079 case IFLA_LINK:
2080 case IFLA_WEIGHT:
2081 case IFLA_TXQLEN:
2082 case IFLA_CARRIER_CHANGES:
2083 case IFLA_NUM_RX_QUEUES:
2084 case IFLA_NUM_TX_QUEUES:
2085 case IFLA_PROMISCUITY:
2086 case IFLA_EXT_MASK:
2087 case IFLA_LINK_NETNSID:
2088 case IFLA_GROUP:
2089 case IFLA_MASTER:
2090 case IFLA_NUM_VF:
2091 u32 = RTA_DATA(rtattr);
2092 *u32 = tswap32(*u32);
2093 break;
2094 /* struct rtnl_link_stats */
2095 case IFLA_STATS:
2096 st = RTA_DATA(rtattr);
2097 st->rx_packets = tswap32(st->rx_packets);
2098 st->tx_packets = tswap32(st->tx_packets);
2099 st->rx_bytes = tswap32(st->rx_bytes);
2100 st->tx_bytes = tswap32(st->tx_bytes);
2101 st->rx_errors = tswap32(st->rx_errors);
2102 st->tx_errors = tswap32(st->tx_errors);
2103 st->rx_dropped = tswap32(st->rx_dropped);
2104 st->tx_dropped = tswap32(st->tx_dropped);
2105 st->multicast = tswap32(st->multicast);
2106 st->collisions = tswap32(st->collisions);
2108 /* detailed rx_errors: */
2109 st->rx_length_errors = tswap32(st->rx_length_errors);
2110 st->rx_over_errors = tswap32(st->rx_over_errors);
2111 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2112 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2113 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2114 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2116 /* detailed tx_errors */
2117 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2118 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2119 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2120 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2121 st->tx_window_errors = tswap32(st->tx_window_errors);
2123 /* for cslip etc */
2124 st->rx_compressed = tswap32(st->rx_compressed);
2125 st->tx_compressed = tswap32(st->tx_compressed);
2126 break;
2127 /* struct rtnl_link_stats64 */
2128 case IFLA_STATS64:
2129 st64 = RTA_DATA(rtattr);
2130 st64->rx_packets = tswap64(st64->rx_packets);
2131 st64->tx_packets = tswap64(st64->tx_packets);
2132 st64->rx_bytes = tswap64(st64->rx_bytes);
2133 st64->tx_bytes = tswap64(st64->tx_bytes);
2134 st64->rx_errors = tswap64(st64->rx_errors);
2135 st64->tx_errors = tswap64(st64->tx_errors);
2136 st64->rx_dropped = tswap64(st64->rx_dropped);
2137 st64->tx_dropped = tswap64(st64->tx_dropped);
2138 st64->multicast = tswap64(st64->multicast);
2139 st64->collisions = tswap64(st64->collisions);
2141 /* detailed rx_errors: */
2142 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2143 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2144 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2145 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2146 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2147 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2149 /* detailed tx_errors */
2150 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2151 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2152 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2153 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2154 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2156 /* for cslip etc */
2157 st64->rx_compressed = tswap64(st64->rx_compressed);
2158 st64->tx_compressed = tswap64(st64->tx_compressed);
2159 break;
2160 /* struct rtnl_link_ifmap */
2161 case IFLA_MAP:
2162 map = RTA_DATA(rtattr);
2163 map->mem_start = tswap64(map->mem_start);
2164 map->mem_end = tswap64(map->mem_end);
2165 map->base_addr = tswap64(map->base_addr);
2166 map->irq = tswap16(map->irq);
2167 break;
2168 /* nested */
2169 case IFLA_LINKINFO:
2170 memset(&li_context, 0, sizeof(li_context));
2171 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2172 &li_context,
2173 host_to_target_data_linkinfo_nlattr);
2174 case IFLA_AF_SPEC:
2175 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2176 NULL,
2177 host_to_target_data_spec_nlattr);
2178 default:
2179 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
2180 break;
2182 return 0;
2185 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2187 uint32_t *u32;
2188 struct ifa_cacheinfo *ci;
2190 switch (rtattr->rta_type) {
2191 /* binary: depends on family type */
2192 case IFA_ADDRESS:
2193 case IFA_LOCAL:
2194 break;
2195 /* string */
2196 case IFA_LABEL:
2197 break;
2198 /* u32 */
2199 case IFA_FLAGS:
2200 case IFA_BROADCAST:
2201 u32 = RTA_DATA(rtattr);
2202 *u32 = tswap32(*u32);
2203 break;
2204 /* struct ifa_cacheinfo */
2205 case IFA_CACHEINFO:
2206 ci = RTA_DATA(rtattr);
2207 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2208 ci->ifa_valid = tswap32(ci->ifa_valid);
2209 ci->cstamp = tswap32(ci->cstamp);
2210 ci->tstamp = tswap32(ci->tstamp);
2211 break;
2212 default:
2213 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2214 break;
2216 return 0;
2219 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2221 uint32_t *u32;
2222 switch (rtattr->rta_type) {
2223 /* binary: depends on family type */
2224 case RTA_GATEWAY:
2225 case RTA_DST:
2226 case RTA_PREFSRC:
2227 break;
2228 /* u32 */
2229 case RTA_PRIORITY:
2230 case RTA_TABLE:
2231 case RTA_OIF:
2232 u32 = RTA_DATA(rtattr);
2233 *u32 = tswap32(*u32);
2234 break;
2235 default:
2236 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2237 break;
2239 return 0;
2242 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2243 uint32_t rtattr_len)
2245 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2246 host_to_target_data_link_rtattr);
2249 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2250 uint32_t rtattr_len)
2252 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2253 host_to_target_data_addr_rtattr);
2256 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2257 uint32_t rtattr_len)
2259 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2260 host_to_target_data_route_rtattr);
2263 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2265 uint32_t nlmsg_len;
2266 struct ifinfomsg *ifi;
2267 struct ifaddrmsg *ifa;
2268 struct rtmsg *rtm;
2270 nlmsg_len = nlh->nlmsg_len;
2271 switch (nlh->nlmsg_type) {
2272 case RTM_NEWLINK:
2273 case RTM_DELLINK:
2274 case RTM_GETLINK:
2275 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2276 ifi = NLMSG_DATA(nlh);
2277 ifi->ifi_type = tswap16(ifi->ifi_type);
2278 ifi->ifi_index = tswap32(ifi->ifi_index);
2279 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2280 ifi->ifi_change = tswap32(ifi->ifi_change);
2281 host_to_target_link_rtattr(IFLA_RTA(ifi),
2282 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2284 break;
2285 case RTM_NEWADDR:
2286 case RTM_DELADDR:
2287 case RTM_GETADDR:
2288 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2289 ifa = NLMSG_DATA(nlh);
2290 ifa->ifa_index = tswap32(ifa->ifa_index);
2291 host_to_target_addr_rtattr(IFA_RTA(ifa),
2292 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2294 break;
2295 case RTM_NEWROUTE:
2296 case RTM_DELROUTE:
2297 case RTM_GETROUTE:
2298 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2299 rtm = NLMSG_DATA(nlh);
2300 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2301 host_to_target_route_rtattr(RTM_RTA(rtm),
2302 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2304 break;
2305 default:
2306 return -TARGET_EINVAL;
2308 return 0;
2311 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2312 size_t len)
2314 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2317 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2318 size_t len,
2319 abi_long (*target_to_host_rtattr)
2320 (struct rtattr *))
2322 abi_long ret;
2324 while (len >= sizeof(struct rtattr)) {
2325 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2326 tswap16(rtattr->rta_len) > len) {
2327 break;
2329 rtattr->rta_len = tswap16(rtattr->rta_len);
2330 rtattr->rta_type = tswap16(rtattr->rta_type);
2331 ret = target_to_host_rtattr(rtattr);
2332 if (ret < 0) {
2333 return ret;
2335 len -= RTA_ALIGN(rtattr->rta_len);
2336 rtattr = (struct rtattr *)(((char *)rtattr) +
2337 RTA_ALIGN(rtattr->rta_len));
2339 return 0;
2342 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2344 switch (rtattr->rta_type) {
2345 default:
2346 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
2347 break;
2349 return 0;
2352 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2354 switch (rtattr->rta_type) {
2355 /* binary: depends on family type */
2356 case IFA_LOCAL:
2357 case IFA_ADDRESS:
2358 break;
2359 default:
2360 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2361 break;
2363 return 0;
2366 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2368 uint32_t *u32;
2369 switch (rtattr->rta_type) {
2370 /* binary: depends on family type */
2371 case RTA_DST:
2372 case RTA_SRC:
2373 case RTA_GATEWAY:
2374 break;
2375 /* u32 */
2376 case RTA_OIF:
2377 u32 = RTA_DATA(rtattr);
2378 *u32 = tswap32(*u32);
2379 break;
2380 default:
2381 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2382 break;
2384 return 0;
2387 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2388 uint32_t rtattr_len)
2390 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2391 target_to_host_data_link_rtattr);
2394 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2395 uint32_t rtattr_len)
2397 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2398 target_to_host_data_addr_rtattr);
2401 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2402 uint32_t rtattr_len)
2404 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2405 target_to_host_data_route_rtattr);
2408 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2410 struct ifinfomsg *ifi;
2411 struct ifaddrmsg *ifa;
2412 struct rtmsg *rtm;
2414 switch (nlh->nlmsg_type) {
2415 case RTM_GETLINK:
2416 break;
2417 case RTM_NEWLINK:
2418 case RTM_DELLINK:
2419 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2420 ifi = NLMSG_DATA(nlh);
2421 ifi->ifi_type = tswap16(ifi->ifi_type);
2422 ifi->ifi_index = tswap32(ifi->ifi_index);
2423 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2424 ifi->ifi_change = tswap32(ifi->ifi_change);
2425 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2426 NLMSG_LENGTH(sizeof(*ifi)));
2428 break;
2429 case RTM_GETADDR:
2430 case RTM_NEWADDR:
2431 case RTM_DELADDR:
2432 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2433 ifa = NLMSG_DATA(nlh);
2434 ifa->ifa_index = tswap32(ifa->ifa_index);
2435 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2436 NLMSG_LENGTH(sizeof(*ifa)));
2438 break;
2439 case RTM_GETROUTE:
2440 break;
2441 case RTM_NEWROUTE:
2442 case RTM_DELROUTE:
2443 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2444 rtm = NLMSG_DATA(nlh);
2445 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2446 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2447 NLMSG_LENGTH(sizeof(*rtm)));
2449 break;
2450 default:
2451 return -TARGET_EOPNOTSUPP;
2453 return 0;
2456 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2458 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2460 #endif /* CONFIG_RTNETLINK */
2462 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2464 switch (nlh->nlmsg_type) {
2465 default:
2466 gemu_log("Unknown host audit message type %d\n",
2467 nlh->nlmsg_type);
2468 return -TARGET_EINVAL;
2470 return 0;
2473 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2474 size_t len)
2476 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2479 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2481 switch (nlh->nlmsg_type) {
2482 case AUDIT_USER:
2483 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2484 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2485 break;
2486 default:
2487 gemu_log("Unknown target audit message type %d\n",
2488 nlh->nlmsg_type);
2489 return -TARGET_EINVAL;
2492 return 0;
2495 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2497 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2500 /* do_setsockopt() Must return target values and target errnos. */
2501 static abi_long do_setsockopt(int sockfd, int level, int optname,
2502 abi_ulong optval_addr, socklen_t optlen)
2504 abi_long ret;
2505 int val;
2506 struct ip_mreqn *ip_mreq;
2507 struct ip_mreq_source *ip_mreq_source;
2509 switch(level) {
2510 case SOL_TCP:
2511 /* TCP options all take an 'int' value. */
2512 if (optlen < sizeof(uint32_t))
2513 return -TARGET_EINVAL;
2515 if (get_user_u32(val, optval_addr))
2516 return -TARGET_EFAULT;
2517 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2518 break;
2519 case SOL_IP:
2520 switch(optname) {
2521 case IP_TOS:
2522 case IP_TTL:
2523 case IP_HDRINCL:
2524 case IP_ROUTER_ALERT:
2525 case IP_RECVOPTS:
2526 case IP_RETOPTS:
2527 case IP_PKTINFO:
2528 case IP_MTU_DISCOVER:
2529 case IP_RECVERR:
2530 case IP_RECVTOS:
2531 #ifdef IP_FREEBIND
2532 case IP_FREEBIND:
2533 #endif
2534 case IP_MULTICAST_TTL:
2535 case IP_MULTICAST_LOOP:
2536 val = 0;
2537 if (optlen >= sizeof(uint32_t)) {
2538 if (get_user_u32(val, optval_addr))
2539 return -TARGET_EFAULT;
2540 } else if (optlen >= 1) {
2541 if (get_user_u8(val, optval_addr))
2542 return -TARGET_EFAULT;
2544 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2545 break;
2546 case IP_ADD_MEMBERSHIP:
2547 case IP_DROP_MEMBERSHIP:
2548 if (optlen < sizeof (struct target_ip_mreq) ||
2549 optlen > sizeof (struct target_ip_mreqn))
2550 return -TARGET_EINVAL;
2552 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2553 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2554 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2555 break;
2557 case IP_BLOCK_SOURCE:
2558 case IP_UNBLOCK_SOURCE:
2559 case IP_ADD_SOURCE_MEMBERSHIP:
2560 case IP_DROP_SOURCE_MEMBERSHIP:
2561 if (optlen != sizeof (struct target_ip_mreq_source))
2562 return -TARGET_EINVAL;
2564 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2565 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2566 unlock_user (ip_mreq_source, optval_addr, 0);
2567 break;
2569 default:
2570 goto unimplemented;
2572 break;
2573 case SOL_IPV6:
2574 switch (optname) {
2575 case IPV6_MTU_DISCOVER:
2576 case IPV6_MTU:
2577 case IPV6_V6ONLY:
2578 case IPV6_RECVPKTINFO:
2579 val = 0;
2580 if (optlen < sizeof(uint32_t)) {
2581 return -TARGET_EINVAL;
2583 if (get_user_u32(val, optval_addr)) {
2584 return -TARGET_EFAULT;
2586 ret = get_errno(setsockopt(sockfd, level, optname,
2587 &val, sizeof(val)));
2588 break;
2589 default:
2590 goto unimplemented;
2592 break;
2593 case SOL_RAW:
2594 switch (optname) {
2595 case ICMP_FILTER:
2596 /* struct icmp_filter takes an u32 value */
2597 if (optlen < sizeof(uint32_t)) {
2598 return -TARGET_EINVAL;
2601 if (get_user_u32(val, optval_addr)) {
2602 return -TARGET_EFAULT;
2604 ret = get_errno(setsockopt(sockfd, level, optname,
2605 &val, sizeof(val)));
2606 break;
2608 default:
2609 goto unimplemented;
2611 break;
2612 case TARGET_SOL_SOCKET:
2613 switch (optname) {
2614 case TARGET_SO_RCVTIMEO:
2616 struct timeval tv;
2618 optname = SO_RCVTIMEO;
2620 set_timeout:
2621 if (optlen != sizeof(struct target_timeval)) {
2622 return -TARGET_EINVAL;
2625 if (copy_from_user_timeval(&tv, optval_addr)) {
2626 return -TARGET_EFAULT;
2629 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2630 &tv, sizeof(tv)));
2631 return ret;
2633 case TARGET_SO_SNDTIMEO:
2634 optname = SO_SNDTIMEO;
2635 goto set_timeout;
2636 case TARGET_SO_ATTACH_FILTER:
2638 struct target_sock_fprog *tfprog;
2639 struct target_sock_filter *tfilter;
2640 struct sock_fprog fprog;
2641 struct sock_filter *filter;
2642 int i;
2644 if (optlen != sizeof(*tfprog)) {
2645 return -TARGET_EINVAL;
2647 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2648 return -TARGET_EFAULT;
2650 if (!lock_user_struct(VERIFY_READ, tfilter,
2651 tswapal(tfprog->filter), 0)) {
2652 unlock_user_struct(tfprog, optval_addr, 1);
2653 return -TARGET_EFAULT;
2656 fprog.len = tswap16(tfprog->len);
2657 filter = g_try_new(struct sock_filter, fprog.len);
2658 if (filter == NULL) {
2659 unlock_user_struct(tfilter, tfprog->filter, 1);
2660 unlock_user_struct(tfprog, optval_addr, 1);
2661 return -TARGET_ENOMEM;
2663 for (i = 0; i < fprog.len; i++) {
2664 filter[i].code = tswap16(tfilter[i].code);
2665 filter[i].jt = tfilter[i].jt;
2666 filter[i].jf = tfilter[i].jf;
2667 filter[i].k = tswap32(tfilter[i].k);
2669 fprog.filter = filter;
2671 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2672 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2673 g_free(filter);
2675 unlock_user_struct(tfilter, tfprog->filter, 1);
2676 unlock_user_struct(tfprog, optval_addr, 1);
2677 return ret;
2679 case TARGET_SO_BINDTODEVICE:
2681 char *dev_ifname, *addr_ifname;
2683 if (optlen > IFNAMSIZ - 1) {
2684 optlen = IFNAMSIZ - 1;
2686 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2687 if (!dev_ifname) {
2688 return -TARGET_EFAULT;
2690 optname = SO_BINDTODEVICE;
2691 addr_ifname = alloca(IFNAMSIZ);
2692 memcpy(addr_ifname, dev_ifname, optlen);
2693 addr_ifname[optlen] = 0;
2694 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2695 addr_ifname, optlen));
2696 unlock_user (dev_ifname, optval_addr, 0);
2697 return ret;
2699 /* Options with 'int' argument. */
2700 case TARGET_SO_DEBUG:
2701 optname = SO_DEBUG;
2702 break;
2703 case TARGET_SO_REUSEADDR:
2704 optname = SO_REUSEADDR;
2705 break;
2706 case TARGET_SO_TYPE:
2707 optname = SO_TYPE;
2708 break;
2709 case TARGET_SO_ERROR:
2710 optname = SO_ERROR;
2711 break;
2712 case TARGET_SO_DONTROUTE:
2713 optname = SO_DONTROUTE;
2714 break;
2715 case TARGET_SO_BROADCAST:
2716 optname = SO_BROADCAST;
2717 break;
2718 case TARGET_SO_SNDBUF:
2719 optname = SO_SNDBUF;
2720 break;
2721 case TARGET_SO_SNDBUFFORCE:
2722 optname = SO_SNDBUFFORCE;
2723 break;
2724 case TARGET_SO_RCVBUF:
2725 optname = SO_RCVBUF;
2726 break;
2727 case TARGET_SO_RCVBUFFORCE:
2728 optname = SO_RCVBUFFORCE;
2729 break;
2730 case TARGET_SO_KEEPALIVE:
2731 optname = SO_KEEPALIVE;
2732 break;
2733 case TARGET_SO_OOBINLINE:
2734 optname = SO_OOBINLINE;
2735 break;
2736 case TARGET_SO_NO_CHECK:
2737 optname = SO_NO_CHECK;
2738 break;
2739 case TARGET_SO_PRIORITY:
2740 optname = SO_PRIORITY;
2741 break;
2742 #ifdef SO_BSDCOMPAT
2743 case TARGET_SO_BSDCOMPAT:
2744 optname = SO_BSDCOMPAT;
2745 break;
2746 #endif
2747 case TARGET_SO_PASSCRED:
2748 optname = SO_PASSCRED;
2749 break;
2750 case TARGET_SO_PASSSEC:
2751 optname = SO_PASSSEC;
2752 break;
2753 case TARGET_SO_TIMESTAMP:
2754 optname = SO_TIMESTAMP;
2755 break;
2756 case TARGET_SO_RCVLOWAT:
2757 optname = SO_RCVLOWAT;
2758 break;
2759 break;
2760 default:
2761 goto unimplemented;
2763 if (optlen < sizeof(uint32_t))
2764 return -TARGET_EINVAL;
2766 if (get_user_u32(val, optval_addr))
2767 return -TARGET_EFAULT;
2768 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2769 break;
2770 default:
2771 unimplemented:
2772 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2773 ret = -TARGET_ENOPROTOOPT;
2775 return ret;
2778 /* do_getsockopt() Must return target values and target errnos. */
2779 static abi_long do_getsockopt(int sockfd, int level, int optname,
2780 abi_ulong optval_addr, abi_ulong optlen)
2782 abi_long ret;
2783 int len, val;
2784 socklen_t lv;
2786 switch(level) {
2787 case TARGET_SOL_SOCKET:
2788 level = SOL_SOCKET;
2789 switch (optname) {
2790 /* These don't just return a single integer */
2791 case TARGET_SO_LINGER:
2792 case TARGET_SO_RCVTIMEO:
2793 case TARGET_SO_SNDTIMEO:
2794 case TARGET_SO_PEERNAME:
2795 goto unimplemented;
2796 case TARGET_SO_PEERCRED: {
2797 struct ucred cr;
2798 socklen_t crlen;
2799 struct target_ucred *tcr;
2801 if (get_user_u32(len, optlen)) {
2802 return -TARGET_EFAULT;
2804 if (len < 0) {
2805 return -TARGET_EINVAL;
2808 crlen = sizeof(cr);
2809 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2810 &cr, &crlen));
2811 if (ret < 0) {
2812 return ret;
2814 if (len > crlen) {
2815 len = crlen;
2817 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2818 return -TARGET_EFAULT;
2820 __put_user(cr.pid, &tcr->pid);
2821 __put_user(cr.uid, &tcr->uid);
2822 __put_user(cr.gid, &tcr->gid);
2823 unlock_user_struct(tcr, optval_addr, 1);
2824 if (put_user_u32(len, optlen)) {
2825 return -TARGET_EFAULT;
2827 break;
2829 /* Options with 'int' argument. */
2830 case TARGET_SO_DEBUG:
2831 optname = SO_DEBUG;
2832 goto int_case;
2833 case TARGET_SO_REUSEADDR:
2834 optname = SO_REUSEADDR;
2835 goto int_case;
2836 case TARGET_SO_TYPE:
2837 optname = SO_TYPE;
2838 goto int_case;
2839 case TARGET_SO_ERROR:
2840 optname = SO_ERROR;
2841 goto int_case;
2842 case TARGET_SO_DONTROUTE:
2843 optname = SO_DONTROUTE;
2844 goto int_case;
2845 case TARGET_SO_BROADCAST:
2846 optname = SO_BROADCAST;
2847 goto int_case;
2848 case TARGET_SO_SNDBUF:
2849 optname = SO_SNDBUF;
2850 goto int_case;
2851 case TARGET_SO_RCVBUF:
2852 optname = SO_RCVBUF;
2853 goto int_case;
2854 case TARGET_SO_KEEPALIVE:
2855 optname = SO_KEEPALIVE;
2856 goto int_case;
2857 case TARGET_SO_OOBINLINE:
2858 optname = SO_OOBINLINE;
2859 goto int_case;
2860 case TARGET_SO_NO_CHECK:
2861 optname = SO_NO_CHECK;
2862 goto int_case;
2863 case TARGET_SO_PRIORITY:
2864 optname = SO_PRIORITY;
2865 goto int_case;
2866 #ifdef SO_BSDCOMPAT
2867 case TARGET_SO_BSDCOMPAT:
2868 optname = SO_BSDCOMPAT;
2869 goto int_case;
2870 #endif
2871 case TARGET_SO_PASSCRED:
2872 optname = SO_PASSCRED;
2873 goto int_case;
2874 case TARGET_SO_TIMESTAMP:
2875 optname = SO_TIMESTAMP;
2876 goto int_case;
2877 case TARGET_SO_RCVLOWAT:
2878 optname = SO_RCVLOWAT;
2879 goto int_case;
2880 case TARGET_SO_ACCEPTCONN:
2881 optname = SO_ACCEPTCONN;
2882 goto int_case;
2883 default:
2884 goto int_case;
2886 break;
2887 case SOL_TCP:
2888 /* TCP options all take an 'int' value. */
2889 int_case:
2890 if (get_user_u32(len, optlen))
2891 return -TARGET_EFAULT;
2892 if (len < 0)
2893 return -TARGET_EINVAL;
2894 lv = sizeof(lv);
2895 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2896 if (ret < 0)
2897 return ret;
2898 if (optname == SO_TYPE) {
2899 val = host_to_target_sock_type(val);
2901 if (len > lv)
2902 len = lv;
2903 if (len == 4) {
2904 if (put_user_u32(val, optval_addr))
2905 return -TARGET_EFAULT;
2906 } else {
2907 if (put_user_u8(val, optval_addr))
2908 return -TARGET_EFAULT;
2910 if (put_user_u32(len, optlen))
2911 return -TARGET_EFAULT;
2912 break;
2913 case SOL_IP:
2914 switch(optname) {
2915 case IP_TOS:
2916 case IP_TTL:
2917 case IP_HDRINCL:
2918 case IP_ROUTER_ALERT:
2919 case IP_RECVOPTS:
2920 case IP_RETOPTS:
2921 case IP_PKTINFO:
2922 case IP_MTU_DISCOVER:
2923 case IP_RECVERR:
2924 case IP_RECVTOS:
2925 #ifdef IP_FREEBIND
2926 case IP_FREEBIND:
2927 #endif
2928 case IP_MULTICAST_TTL:
2929 case IP_MULTICAST_LOOP:
2930 if (get_user_u32(len, optlen))
2931 return -TARGET_EFAULT;
2932 if (len < 0)
2933 return -TARGET_EINVAL;
2934 lv = sizeof(lv);
2935 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2936 if (ret < 0)
2937 return ret;
2938 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2939 len = 1;
2940 if (put_user_u32(len, optlen)
2941 || put_user_u8(val, optval_addr))
2942 return -TARGET_EFAULT;
2943 } else {
2944 if (len > sizeof(int))
2945 len = sizeof(int);
2946 if (put_user_u32(len, optlen)
2947 || put_user_u32(val, optval_addr))
2948 return -TARGET_EFAULT;
2950 break;
2951 default:
2952 ret = -TARGET_ENOPROTOOPT;
2953 break;
2955 break;
2956 default:
2957 unimplemented:
2958 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2959 level, optname);
2960 ret = -TARGET_EOPNOTSUPP;
2961 break;
2963 return ret;
2966 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2967 int count, int copy)
2969 struct target_iovec *target_vec;
2970 struct iovec *vec;
2971 abi_ulong total_len, max_len;
2972 int i;
2973 int err = 0;
2974 bool bad_address = false;
2976 if (count == 0) {
2977 errno = 0;
2978 return NULL;
2980 if (count < 0 || count > IOV_MAX) {
2981 errno = EINVAL;
2982 return NULL;
2985 vec = g_try_new0(struct iovec, count);
2986 if (vec == NULL) {
2987 errno = ENOMEM;
2988 return NULL;
2991 target_vec = lock_user(VERIFY_READ, target_addr,
2992 count * sizeof(struct target_iovec), 1);
2993 if (target_vec == NULL) {
2994 err = EFAULT;
2995 goto fail2;
2998 /* ??? If host page size > target page size, this will result in a
2999 value larger than what we can actually support. */
3000 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3001 total_len = 0;
3003 for (i = 0; i < count; i++) {
3004 abi_ulong base = tswapal(target_vec[i].iov_base);
3005 abi_long len = tswapal(target_vec[i].iov_len);
3007 if (len < 0) {
3008 err = EINVAL;
3009 goto fail;
3010 } else if (len == 0) {
3011 /* Zero length pointer is ignored. */
3012 vec[i].iov_base = 0;
3013 } else {
3014 vec[i].iov_base = lock_user(type, base, len, copy);
3015 /* If the first buffer pointer is bad, this is a fault. But
3016 * subsequent bad buffers will result in a partial write; this
3017 * is realized by filling the vector with null pointers and
3018 * zero lengths. */
3019 if (!vec[i].iov_base) {
3020 if (i == 0) {
3021 err = EFAULT;
3022 goto fail;
3023 } else {
3024 bad_address = true;
3027 if (bad_address) {
3028 len = 0;
3030 if (len > max_len - total_len) {
3031 len = max_len - total_len;
3034 vec[i].iov_len = len;
3035 total_len += len;
3038 unlock_user(target_vec, target_addr, 0);
3039 return vec;
3041 fail:
3042 while (--i >= 0) {
3043 if (tswapal(target_vec[i].iov_len) > 0) {
3044 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3047 unlock_user(target_vec, target_addr, 0);
3048 fail2:
3049 g_free(vec);
3050 errno = err;
3051 return NULL;
3054 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3055 int count, int copy)
3057 struct target_iovec *target_vec;
3058 int i;
3060 target_vec = lock_user(VERIFY_READ, target_addr,
3061 count * sizeof(struct target_iovec), 1);
3062 if (target_vec) {
3063 for (i = 0; i < count; i++) {
3064 abi_ulong base = tswapal(target_vec[i].iov_base);
3065 abi_long len = tswapal(target_vec[i].iov_len);
3066 if (len < 0) {
3067 break;
3069 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3071 unlock_user(target_vec, target_addr, 0);
3074 g_free(vec);
3077 static inline int target_to_host_sock_type(int *type)
3079 int host_type = 0;
3080 int target_type = *type;
3082 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3083 case TARGET_SOCK_DGRAM:
3084 host_type = SOCK_DGRAM;
3085 break;
3086 case TARGET_SOCK_STREAM:
3087 host_type = SOCK_STREAM;
3088 break;
3089 default:
3090 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3091 break;
3093 if (target_type & TARGET_SOCK_CLOEXEC) {
3094 #if defined(SOCK_CLOEXEC)
3095 host_type |= SOCK_CLOEXEC;
3096 #else
3097 return -TARGET_EINVAL;
3098 #endif
3100 if (target_type & TARGET_SOCK_NONBLOCK) {
3101 #if defined(SOCK_NONBLOCK)
3102 host_type |= SOCK_NONBLOCK;
3103 #elif !defined(O_NONBLOCK)
3104 return -TARGET_EINVAL;
3105 #endif
3107 *type = host_type;
3108 return 0;
3111 /* Try to emulate socket type flags after socket creation. */
3112 static int sock_flags_fixup(int fd, int target_type)
3114 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3115 if (target_type & TARGET_SOCK_NONBLOCK) {
3116 int flags = fcntl(fd, F_GETFL);
3117 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3118 close(fd);
3119 return -TARGET_EINVAL;
3122 #endif
3123 return fd;
3126 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3127 abi_ulong target_addr,
3128 socklen_t len)
3130 struct sockaddr *addr = host_addr;
3131 struct target_sockaddr *target_saddr;
3133 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3134 if (!target_saddr) {
3135 return -TARGET_EFAULT;
3138 memcpy(addr, target_saddr, len);
3139 addr->sa_family = tswap16(target_saddr->sa_family);
3140 /* spkt_protocol is big-endian */
3142 unlock_user(target_saddr, target_addr, 0);
3143 return 0;
3146 static TargetFdTrans target_packet_trans = {
3147 .target_to_host_addr = packet_target_to_host_sockaddr,
3150 #ifdef CONFIG_RTNETLINK
3151 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3153 abi_long ret;
3155 ret = target_to_host_nlmsg_route(buf, len);
3156 if (ret < 0) {
3157 return ret;
3160 return len;
3163 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3165 abi_long ret;
3167 ret = host_to_target_nlmsg_route(buf, len);
3168 if (ret < 0) {
3169 return ret;
3172 return len;
3175 static TargetFdTrans target_netlink_route_trans = {
3176 .target_to_host_data = netlink_route_target_to_host,
3177 .host_to_target_data = netlink_route_host_to_target,
3179 #endif /* CONFIG_RTNETLINK */
3181 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3183 abi_long ret;
3185 ret = target_to_host_nlmsg_audit(buf, len);
3186 if (ret < 0) {
3187 return ret;
3190 return len;
3193 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3195 abi_long ret;
3197 ret = host_to_target_nlmsg_audit(buf, len);
3198 if (ret < 0) {
3199 return ret;
3202 return len;
3205 static TargetFdTrans target_netlink_audit_trans = {
3206 .target_to_host_data = netlink_audit_target_to_host,
3207 .host_to_target_data = netlink_audit_host_to_target,
3210 /* do_socket() Must return target values and target errnos. */
3211 static abi_long do_socket(int domain, int type, int protocol)
3213 int target_type = type;
3214 int ret;
3216 ret = target_to_host_sock_type(&type);
3217 if (ret) {
3218 return ret;
3221 if (domain == PF_NETLINK && !(
3222 #ifdef CONFIG_RTNETLINK
3223 protocol == NETLINK_ROUTE ||
3224 #endif
3225 protocol == NETLINK_KOBJECT_UEVENT ||
3226 protocol == NETLINK_AUDIT)) {
3227 return -EPFNOSUPPORT;
3230 if (domain == AF_PACKET ||
3231 (domain == AF_INET && type == SOCK_PACKET)) {
3232 protocol = tswap16(protocol);
3235 ret = get_errno(socket(domain, type, protocol));
3236 if (ret >= 0) {
3237 ret = sock_flags_fixup(ret, target_type);
3238 if (type == SOCK_PACKET) {
3239 /* Manage an obsolete case :
3240 * if socket type is SOCK_PACKET, bind by name
3242 fd_trans_register(ret, &target_packet_trans);
3243 } else if (domain == PF_NETLINK) {
3244 switch (protocol) {
3245 #ifdef CONFIG_RTNETLINK
3246 case NETLINK_ROUTE:
3247 fd_trans_register(ret, &target_netlink_route_trans);
3248 break;
3249 #endif
3250 case NETLINK_KOBJECT_UEVENT:
3251 /* nothing to do: messages are strings */
3252 break;
3253 case NETLINK_AUDIT:
3254 fd_trans_register(ret, &target_netlink_audit_trans);
3255 break;
3256 default:
3257 g_assert_not_reached();
3261 return ret;
3264 /* do_bind() Must return target values and target errnos. */
3265 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3266 socklen_t addrlen)
3268 void *addr;
3269 abi_long ret;
3271 if ((int)addrlen < 0) {
3272 return -TARGET_EINVAL;
3275 addr = alloca(addrlen+1);
3277 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3278 if (ret)
3279 return ret;
3281 return get_errno(bind(sockfd, addr, addrlen));
3284 /* do_connect() Must return target values and target errnos. */
3285 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3286 socklen_t addrlen)
3288 void *addr;
3289 abi_long ret;
3291 if ((int)addrlen < 0) {
3292 return -TARGET_EINVAL;
3295 addr = alloca(addrlen+1);
3297 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3298 if (ret)
3299 return ret;
3301 return get_errno(safe_connect(sockfd, addr, addrlen));
3304 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3305 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3306 int flags, int send)
3308 abi_long ret, len;
3309 struct msghdr msg;
3310 int count;
3311 struct iovec *vec;
3312 abi_ulong target_vec;
3314 if (msgp->msg_name) {
3315 msg.msg_namelen = tswap32(msgp->msg_namelen);
3316 msg.msg_name = alloca(msg.msg_namelen+1);
3317 ret = target_to_host_sockaddr(fd, msg.msg_name,
3318 tswapal(msgp->msg_name),
3319 msg.msg_namelen);
3320 if (ret) {
3321 goto out2;
3323 } else {
3324 msg.msg_name = NULL;
3325 msg.msg_namelen = 0;
3327 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3328 msg.msg_control = alloca(msg.msg_controllen);
3329 msg.msg_flags = tswap32(msgp->msg_flags);
3331 count = tswapal(msgp->msg_iovlen);
3332 target_vec = tswapal(msgp->msg_iov);
3333 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3334 target_vec, count, send);
3335 if (vec == NULL) {
3336 ret = -host_to_target_errno(errno);
3337 goto out2;
3339 msg.msg_iovlen = count;
3340 msg.msg_iov = vec;
3342 if (send) {
3343 if (fd_trans_target_to_host_data(fd)) {
3344 void *host_msg;
3346 host_msg = g_malloc(msg.msg_iov->iov_len);
3347 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3348 ret = fd_trans_target_to_host_data(fd)(host_msg,
3349 msg.msg_iov->iov_len);
3350 if (ret >= 0) {
3351 msg.msg_iov->iov_base = host_msg;
3352 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3354 g_free(host_msg);
3355 } else {
3356 ret = target_to_host_cmsg(&msg, msgp);
3357 if (ret == 0) {
3358 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3361 } else {
3362 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3363 if (!is_error(ret)) {
3364 len = ret;
3365 if (fd_trans_host_to_target_data(fd)) {
3366 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3367 len);
3368 } else {
3369 ret = host_to_target_cmsg(msgp, &msg);
3371 if (!is_error(ret)) {
3372 msgp->msg_namelen = tswap32(msg.msg_namelen);
3373 if (msg.msg_name != NULL) {
3374 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3375 msg.msg_name, msg.msg_namelen);
3376 if (ret) {
3377 goto out;
3381 ret = len;
3386 out:
3387 unlock_iovec(vec, target_vec, count, !send);
3388 out2:
3389 return ret;
3392 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3393 int flags, int send)
3395 abi_long ret;
3396 struct target_msghdr *msgp;
3398 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3399 msgp,
3400 target_msg,
3401 send ? 1 : 0)) {
3402 return -TARGET_EFAULT;
3404 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3405 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3406 return ret;
3409 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3410 * so it might not have this *mmsg-specific flag either.
3412 #ifndef MSG_WAITFORONE
3413 #define MSG_WAITFORONE 0x10000
3414 #endif
3416 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3417 unsigned int vlen, unsigned int flags,
3418 int send)
3420 struct target_mmsghdr *mmsgp;
3421 abi_long ret = 0;
3422 int i;
3424 if (vlen > UIO_MAXIOV) {
3425 vlen = UIO_MAXIOV;
3428 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3429 if (!mmsgp) {
3430 return -TARGET_EFAULT;
3433 for (i = 0; i < vlen; i++) {
3434 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3435 if (is_error(ret)) {
3436 break;
3438 mmsgp[i].msg_len = tswap32(ret);
3439 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3440 if (flags & MSG_WAITFORONE) {
3441 flags |= MSG_DONTWAIT;
3445 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3447 /* Return number of datagrams sent if we sent any at all;
3448 * otherwise return the error.
3450 if (i) {
3451 return i;
3453 return ret;
3456 /* do_accept4() Must return target values and target errnos. */
3457 static abi_long do_accept4(int fd, abi_ulong target_addr,
3458 abi_ulong target_addrlen_addr, int flags)
3460 socklen_t addrlen;
3461 void *addr;
3462 abi_long ret;
3463 int host_flags;
3465 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3467 if (target_addr == 0) {
3468 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3471 /* linux returns EINVAL if addrlen pointer is invalid */
3472 if (get_user_u32(addrlen, target_addrlen_addr))
3473 return -TARGET_EINVAL;
3475 if ((int)addrlen < 0) {
3476 return -TARGET_EINVAL;
3479 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3480 return -TARGET_EINVAL;
3482 addr = alloca(addrlen);
3484 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3485 if (!is_error(ret)) {
3486 host_to_target_sockaddr(target_addr, addr, addrlen);
3487 if (put_user_u32(addrlen, target_addrlen_addr))
3488 ret = -TARGET_EFAULT;
3490 return ret;
3493 /* do_getpeername() Must return target values and target errnos. */
3494 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3495 abi_ulong target_addrlen_addr)
3497 socklen_t addrlen;
3498 void *addr;
3499 abi_long ret;
3501 if (get_user_u32(addrlen, target_addrlen_addr))
3502 return -TARGET_EFAULT;
3504 if ((int)addrlen < 0) {
3505 return -TARGET_EINVAL;
3508 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3509 return -TARGET_EFAULT;
3511 addr = alloca(addrlen);
3513 ret = get_errno(getpeername(fd, addr, &addrlen));
3514 if (!is_error(ret)) {
3515 host_to_target_sockaddr(target_addr, addr, addrlen);
3516 if (put_user_u32(addrlen, target_addrlen_addr))
3517 ret = -TARGET_EFAULT;
3519 return ret;
3522 /* do_getsockname() Must return target values and target errnos. */
3523 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3524 abi_ulong target_addrlen_addr)
3526 socklen_t addrlen;
3527 void *addr;
3528 abi_long ret;
3530 if (get_user_u32(addrlen, target_addrlen_addr))
3531 return -TARGET_EFAULT;
3533 if ((int)addrlen < 0) {
3534 return -TARGET_EINVAL;
3537 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3538 return -TARGET_EFAULT;
3540 addr = alloca(addrlen);
3542 ret = get_errno(getsockname(fd, addr, &addrlen));
3543 if (!is_error(ret)) {
3544 host_to_target_sockaddr(target_addr, addr, addrlen);
3545 if (put_user_u32(addrlen, target_addrlen_addr))
3546 ret = -TARGET_EFAULT;
3548 return ret;
3551 /* do_socketpair() Must return target values and target errnos. */
3552 static abi_long do_socketpair(int domain, int type, int protocol,
3553 abi_ulong target_tab_addr)
3555 int tab[2];
3556 abi_long ret;
3558 target_to_host_sock_type(&type);
3560 ret = get_errno(socketpair(domain, type, protocol, tab));
3561 if (!is_error(ret)) {
3562 if (put_user_s32(tab[0], target_tab_addr)
3563 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3564 ret = -TARGET_EFAULT;
3566 return ret;
3569 /* do_sendto() Must return target values and target errnos. */
3570 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3571 abi_ulong target_addr, socklen_t addrlen)
3573 void *addr;
3574 void *host_msg;
3575 void *copy_msg = NULL;
3576 abi_long ret;
3578 if ((int)addrlen < 0) {
3579 return -TARGET_EINVAL;
3582 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3583 if (!host_msg)
3584 return -TARGET_EFAULT;
3585 if (fd_trans_target_to_host_data(fd)) {
3586 copy_msg = host_msg;
3587 host_msg = g_malloc(len);
3588 memcpy(host_msg, copy_msg, len);
3589 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3590 if (ret < 0) {
3591 goto fail;
3594 if (target_addr) {
3595 addr = alloca(addrlen+1);
3596 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3597 if (ret) {
3598 goto fail;
3600 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3601 } else {
3602 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3604 fail:
3605 if (copy_msg) {
3606 g_free(host_msg);
3607 host_msg = copy_msg;
3609 unlock_user(host_msg, msg, 0);
3610 return ret;
3613 /* do_recvfrom() Must return target values and target errnos. */
3614 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3615 abi_ulong target_addr,
3616 abi_ulong target_addrlen)
3618 socklen_t addrlen;
3619 void *addr;
3620 void *host_msg;
3621 abi_long ret;
3623 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3624 if (!host_msg)
3625 return -TARGET_EFAULT;
3626 if (target_addr) {
3627 if (get_user_u32(addrlen, target_addrlen)) {
3628 ret = -TARGET_EFAULT;
3629 goto fail;
3631 if ((int)addrlen < 0) {
3632 ret = -TARGET_EINVAL;
3633 goto fail;
3635 addr = alloca(addrlen);
3636 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3637 addr, &addrlen));
3638 } else {
3639 addr = NULL; /* To keep compiler quiet. */
3640 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3642 if (!is_error(ret)) {
3643 if (fd_trans_host_to_target_data(fd)) {
3644 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3646 if (target_addr) {
3647 host_to_target_sockaddr(target_addr, addr, addrlen);
3648 if (put_user_u32(addrlen, target_addrlen)) {
3649 ret = -TARGET_EFAULT;
3650 goto fail;
3653 unlock_user(host_msg, msg, len);
3654 } else {
3655 fail:
3656 unlock_user(host_msg, msg, 0);
3658 return ret;
3661 #ifdef TARGET_NR_socketcall
3662 /* do_socketcall() Must return target values and target errnos. */
3663 static abi_long do_socketcall(int num, abi_ulong vptr)
3665 static const unsigned ac[] = { /* number of arguments per call */
3666 [SOCKOP_socket] = 3, /* domain, type, protocol */
3667 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3668 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3669 [SOCKOP_listen] = 2, /* sockfd, backlog */
3670 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3671 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3672 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3673 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3674 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3675 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3676 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3677 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3678 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3679 [SOCKOP_shutdown] = 2, /* sockfd, how */
3680 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3681 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3682 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3683 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3684 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3685 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3687 abi_long a[6]; /* max 6 args */
3689 /* first, collect the arguments in a[] according to ac[] */
3690 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3691 unsigned i;
3692 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3693 for (i = 0; i < ac[num]; ++i) {
3694 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3695 return -TARGET_EFAULT;
3700 /* now when we have the args, actually handle the call */
3701 switch (num) {
3702 case SOCKOP_socket: /* domain, type, protocol */
3703 return do_socket(a[0], a[1], a[2]);
3704 case SOCKOP_bind: /* sockfd, addr, addrlen */
3705 return do_bind(a[0], a[1], a[2]);
3706 case SOCKOP_connect: /* sockfd, addr, addrlen */
3707 return do_connect(a[0], a[1], a[2]);
3708 case SOCKOP_listen: /* sockfd, backlog */
3709 return get_errno(listen(a[0], a[1]));
3710 case SOCKOP_accept: /* sockfd, addr, addrlen */
3711 return do_accept4(a[0], a[1], a[2], 0);
3712 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3713 return do_accept4(a[0], a[1], a[2], a[3]);
3714 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3715 return do_getsockname(a[0], a[1], a[2]);
3716 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3717 return do_getpeername(a[0], a[1], a[2]);
3718 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3719 return do_socketpair(a[0], a[1], a[2], a[3]);
3720 case SOCKOP_send: /* sockfd, msg, len, flags */
3721 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3722 case SOCKOP_recv: /* sockfd, msg, len, flags */
3723 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3724 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3725 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3726 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3727 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3728 case SOCKOP_shutdown: /* sockfd, how */
3729 return get_errno(shutdown(a[0], a[1]));
3730 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3731 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3732 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3733 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3734 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3735 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3736 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3737 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3738 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3739 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3740 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3741 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3742 default:
3743 gemu_log("Unsupported socketcall: %d\n", num);
3744 return -TARGET_ENOSYS;
3747 #endif
3749 #define N_SHM_REGIONS 32
3751 static struct shm_region {
3752 abi_ulong start;
3753 abi_ulong size;
3754 bool in_use;
3755 } shm_regions[N_SHM_REGIONS];
3757 #ifndef TARGET_SEMID64_DS
3758 /* asm-generic version of this struct */
3759 struct target_semid64_ds
3761 struct target_ipc_perm sem_perm;
3762 abi_ulong sem_otime;
3763 #if TARGET_ABI_BITS == 32
3764 abi_ulong __unused1;
3765 #endif
3766 abi_ulong sem_ctime;
3767 #if TARGET_ABI_BITS == 32
3768 abi_ulong __unused2;
3769 #endif
3770 abi_ulong sem_nsems;
3771 abi_ulong __unused3;
3772 abi_ulong __unused4;
3774 #endif
3776 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3777 abi_ulong target_addr)
3779 struct target_ipc_perm *target_ip;
3780 struct target_semid64_ds *target_sd;
3782 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3783 return -TARGET_EFAULT;
3784 target_ip = &(target_sd->sem_perm);
3785 host_ip->__key = tswap32(target_ip->__key);
3786 host_ip->uid = tswap32(target_ip->uid);
3787 host_ip->gid = tswap32(target_ip->gid);
3788 host_ip->cuid = tswap32(target_ip->cuid);
3789 host_ip->cgid = tswap32(target_ip->cgid);
3790 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3791 host_ip->mode = tswap32(target_ip->mode);
3792 #else
3793 host_ip->mode = tswap16(target_ip->mode);
3794 #endif
3795 #if defined(TARGET_PPC)
3796 host_ip->__seq = tswap32(target_ip->__seq);
3797 #else
3798 host_ip->__seq = tswap16(target_ip->__seq);
3799 #endif
3800 unlock_user_struct(target_sd, target_addr, 0);
3801 return 0;
3804 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3805 struct ipc_perm *host_ip)
3807 struct target_ipc_perm *target_ip;
3808 struct target_semid64_ds *target_sd;
3810 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3811 return -TARGET_EFAULT;
3812 target_ip = &(target_sd->sem_perm);
3813 target_ip->__key = tswap32(host_ip->__key);
3814 target_ip->uid = tswap32(host_ip->uid);
3815 target_ip->gid = tswap32(host_ip->gid);
3816 target_ip->cuid = tswap32(host_ip->cuid);
3817 target_ip->cgid = tswap32(host_ip->cgid);
3818 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3819 target_ip->mode = tswap32(host_ip->mode);
3820 #else
3821 target_ip->mode = tswap16(host_ip->mode);
3822 #endif
3823 #if defined(TARGET_PPC)
3824 target_ip->__seq = tswap32(host_ip->__seq);
3825 #else
3826 target_ip->__seq = tswap16(host_ip->__seq);
3827 #endif
3828 unlock_user_struct(target_sd, target_addr, 1);
3829 return 0;
3832 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3833 abi_ulong target_addr)
3835 struct target_semid64_ds *target_sd;
3837 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3838 return -TARGET_EFAULT;
3839 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3840 return -TARGET_EFAULT;
3841 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3842 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3843 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3844 unlock_user_struct(target_sd, target_addr, 0);
3845 return 0;
3848 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3849 struct semid_ds *host_sd)
3851 struct target_semid64_ds *target_sd;
3853 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3854 return -TARGET_EFAULT;
3855 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3856 return -TARGET_EFAULT;
3857 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3858 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3859 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3860 unlock_user_struct(target_sd, target_addr, 1);
3861 return 0;
3864 struct target_seminfo {
3865 int semmap;
3866 int semmni;
3867 int semmns;
3868 int semmnu;
3869 int semmsl;
3870 int semopm;
3871 int semume;
3872 int semusz;
3873 int semvmx;
3874 int semaem;
3877 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3878 struct seminfo *host_seminfo)
3880 struct target_seminfo *target_seminfo;
3881 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3882 return -TARGET_EFAULT;
3883 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3884 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3885 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3886 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3887 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3888 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3889 __put_user(host_seminfo->semume, &target_seminfo->semume);
3890 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3891 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3892 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3893 unlock_user_struct(target_seminfo, target_addr, 1);
3894 return 0;
3897 union semun {
3898 int val;
3899 struct semid_ds *buf;
3900 unsigned short *array;
3901 struct seminfo *__buf;
3904 union target_semun {
3905 int val;
3906 abi_ulong buf;
3907 abi_ulong array;
3908 abi_ulong __buf;
3911 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3912 abi_ulong target_addr)
3914 int nsems;
3915 unsigned short *array;
3916 union semun semun;
3917 struct semid_ds semid_ds;
3918 int i, ret;
3920 semun.buf = &semid_ds;
3922 ret = semctl(semid, 0, IPC_STAT, semun);
3923 if (ret == -1)
3924 return get_errno(ret);
3926 nsems = semid_ds.sem_nsems;
3928 *host_array = g_try_new(unsigned short, nsems);
3929 if (!*host_array) {
3930 return -TARGET_ENOMEM;
3932 array = lock_user(VERIFY_READ, target_addr,
3933 nsems*sizeof(unsigned short), 1);
3934 if (!array) {
3935 g_free(*host_array);
3936 return -TARGET_EFAULT;
3939 for(i=0; i<nsems; i++) {
3940 __get_user((*host_array)[i], &array[i]);
3942 unlock_user(array, target_addr, 0);
3944 return 0;
3947 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3948 unsigned short **host_array)
3950 int nsems;
3951 unsigned short *array;
3952 union semun semun;
3953 struct semid_ds semid_ds;
3954 int i, ret;
3956 semun.buf = &semid_ds;
3958 ret = semctl(semid, 0, IPC_STAT, semun);
3959 if (ret == -1)
3960 return get_errno(ret);
3962 nsems = semid_ds.sem_nsems;
3964 array = lock_user(VERIFY_WRITE, target_addr,
3965 nsems*sizeof(unsigned short), 0);
3966 if (!array)
3967 return -TARGET_EFAULT;
3969 for(i=0; i<nsems; i++) {
3970 __put_user((*host_array)[i], &array[i]);
3972 g_free(*host_array);
3973 unlock_user(array, target_addr, 1);
3975 return 0;
3978 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3979 abi_ulong target_arg)
3981 union target_semun target_su = { .buf = target_arg };
3982 union semun arg;
3983 struct semid_ds dsarg;
3984 unsigned short *array = NULL;
3985 struct seminfo seminfo;
3986 abi_long ret = -TARGET_EINVAL;
3987 abi_long err;
3988 cmd &= 0xff;
3990 switch( cmd ) {
3991 case GETVAL:
3992 case SETVAL:
3993 /* In 64 bit cross-endian situations, we will erroneously pick up
3994 * the wrong half of the union for the "val" element. To rectify
3995 * this, the entire 8-byte structure is byteswapped, followed by
3996 * a swap of the 4 byte val field. In other cases, the data is
3997 * already in proper host byte order. */
3998 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3999 target_su.buf = tswapal(target_su.buf);
4000 arg.val = tswap32(target_su.val);
4001 } else {
4002 arg.val = target_su.val;
4004 ret = get_errno(semctl(semid, semnum, cmd, arg));
4005 break;
4006 case GETALL:
4007 case SETALL:
4008 err = target_to_host_semarray(semid, &array, target_su.array);
4009 if (err)
4010 return err;
4011 arg.array = array;
4012 ret = get_errno(semctl(semid, semnum, cmd, arg));
4013 err = host_to_target_semarray(semid, target_su.array, &array);
4014 if (err)
4015 return err;
4016 break;
4017 case IPC_STAT:
4018 case IPC_SET:
4019 case SEM_STAT:
4020 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4021 if (err)
4022 return err;
4023 arg.buf = &dsarg;
4024 ret = get_errno(semctl(semid, semnum, cmd, arg));
4025 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4026 if (err)
4027 return err;
4028 break;
4029 case IPC_INFO:
4030 case SEM_INFO:
4031 arg.__buf = &seminfo;
4032 ret = get_errno(semctl(semid, semnum, cmd, arg));
4033 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4034 if (err)
4035 return err;
4036 break;
4037 case IPC_RMID:
4038 case GETPID:
4039 case GETNCNT:
4040 case GETZCNT:
4041 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4042 break;
4045 return ret;
4048 struct target_sembuf {
4049 unsigned short sem_num;
4050 short sem_op;
4051 short sem_flg;
4054 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4055 abi_ulong target_addr,
4056 unsigned nsops)
4058 struct target_sembuf *target_sembuf;
4059 int i;
4061 target_sembuf = lock_user(VERIFY_READ, target_addr,
4062 nsops*sizeof(struct target_sembuf), 1);
4063 if (!target_sembuf)
4064 return -TARGET_EFAULT;
4066 for(i=0; i<nsops; i++) {
4067 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4068 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4069 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4072 unlock_user(target_sembuf, target_addr, 0);
4074 return 0;
4077 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4079 struct sembuf sops[nsops];
4081 if (target_to_host_sembuf(sops, ptr, nsops))
4082 return -TARGET_EFAULT;
4084 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4087 struct target_msqid_ds
4089 struct target_ipc_perm msg_perm;
4090 abi_ulong msg_stime;
4091 #if TARGET_ABI_BITS == 32
4092 abi_ulong __unused1;
4093 #endif
4094 abi_ulong msg_rtime;
4095 #if TARGET_ABI_BITS == 32
4096 abi_ulong __unused2;
4097 #endif
4098 abi_ulong msg_ctime;
4099 #if TARGET_ABI_BITS == 32
4100 abi_ulong __unused3;
4101 #endif
4102 abi_ulong __msg_cbytes;
4103 abi_ulong msg_qnum;
4104 abi_ulong msg_qbytes;
4105 abi_ulong msg_lspid;
4106 abi_ulong msg_lrpid;
4107 abi_ulong __unused4;
4108 abi_ulong __unused5;
4111 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4112 abi_ulong target_addr)
4114 struct target_msqid_ds *target_md;
4116 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4117 return -TARGET_EFAULT;
4118 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4119 return -TARGET_EFAULT;
4120 host_md->msg_stime = tswapal(target_md->msg_stime);
4121 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4122 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4123 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4124 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4125 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4126 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4127 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4128 unlock_user_struct(target_md, target_addr, 0);
4129 return 0;
4132 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4133 struct msqid_ds *host_md)
4135 struct target_msqid_ds *target_md;
4137 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4138 return -TARGET_EFAULT;
4139 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4140 return -TARGET_EFAULT;
4141 target_md->msg_stime = tswapal(host_md->msg_stime);
4142 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4143 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4144 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4145 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4146 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4147 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4148 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4149 unlock_user_struct(target_md, target_addr, 1);
4150 return 0;
4153 struct target_msginfo {
4154 int msgpool;
4155 int msgmap;
4156 int msgmax;
4157 int msgmnb;
4158 int msgmni;
4159 int msgssz;
4160 int msgtql;
4161 unsigned short int msgseg;
4164 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4165 struct msginfo *host_msginfo)
4167 struct target_msginfo *target_msginfo;
4168 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4169 return -TARGET_EFAULT;
4170 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4171 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4172 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4173 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4174 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4175 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4176 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4177 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4178 unlock_user_struct(target_msginfo, target_addr, 1);
4179 return 0;
4182 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4184 struct msqid_ds dsarg;
4185 struct msginfo msginfo;
4186 abi_long ret = -TARGET_EINVAL;
4188 cmd &= 0xff;
4190 switch (cmd) {
4191 case IPC_STAT:
4192 case IPC_SET:
4193 case MSG_STAT:
4194 if (target_to_host_msqid_ds(&dsarg,ptr))
4195 return -TARGET_EFAULT;
4196 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4197 if (host_to_target_msqid_ds(ptr,&dsarg))
4198 return -TARGET_EFAULT;
4199 break;
4200 case IPC_RMID:
4201 ret = get_errno(msgctl(msgid, cmd, NULL));
4202 break;
4203 case IPC_INFO:
4204 case MSG_INFO:
4205 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4206 if (host_to_target_msginfo(ptr, &msginfo))
4207 return -TARGET_EFAULT;
4208 break;
4211 return ret;
4214 struct target_msgbuf {
4215 abi_long mtype;
4216 char mtext[1];
4219 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4220 ssize_t msgsz, int msgflg)
4222 struct target_msgbuf *target_mb;
4223 struct msgbuf *host_mb;
4224 abi_long ret = 0;
4226 if (msgsz < 0) {
4227 return -TARGET_EINVAL;
4230 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4231 return -TARGET_EFAULT;
4232 host_mb = g_try_malloc(msgsz + sizeof(long));
4233 if (!host_mb) {
4234 unlock_user_struct(target_mb, msgp, 0);
4235 return -TARGET_ENOMEM;
4237 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4238 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4239 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4240 g_free(host_mb);
4241 unlock_user_struct(target_mb, msgp, 0);
4243 return ret;
4246 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4247 ssize_t msgsz, abi_long msgtyp,
4248 int msgflg)
4250 struct target_msgbuf *target_mb;
4251 char *target_mtext;
4252 struct msgbuf *host_mb;
4253 abi_long ret = 0;
4255 if (msgsz < 0) {
4256 return -TARGET_EINVAL;
4259 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4260 return -TARGET_EFAULT;
4262 host_mb = g_try_malloc(msgsz + sizeof(long));
4263 if (!host_mb) {
4264 ret = -TARGET_ENOMEM;
4265 goto end;
4267 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4269 if (ret > 0) {
4270 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4271 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4272 if (!target_mtext) {
4273 ret = -TARGET_EFAULT;
4274 goto end;
4276 memcpy(target_mb->mtext, host_mb->mtext, ret);
4277 unlock_user(target_mtext, target_mtext_addr, ret);
4280 target_mb->mtype = tswapal(host_mb->mtype);
4282 end:
4283 if (target_mb)
4284 unlock_user_struct(target_mb, msgp, 1);
4285 g_free(host_mb);
4286 return ret;
4289 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4290 abi_ulong target_addr)
4292 struct target_shmid_ds *target_sd;
4294 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4295 return -TARGET_EFAULT;
4296 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4297 return -TARGET_EFAULT;
4298 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4299 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4300 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4301 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4302 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4303 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4304 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4305 unlock_user_struct(target_sd, target_addr, 0);
4306 return 0;
4309 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4310 struct shmid_ds *host_sd)
4312 struct target_shmid_ds *target_sd;
4314 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4315 return -TARGET_EFAULT;
4316 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4317 return -TARGET_EFAULT;
4318 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4319 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4320 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4321 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4322 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4323 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4324 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4325 unlock_user_struct(target_sd, target_addr, 1);
4326 return 0;
4329 struct target_shminfo {
4330 abi_ulong shmmax;
4331 abi_ulong shmmin;
4332 abi_ulong shmmni;
4333 abi_ulong shmseg;
4334 abi_ulong shmall;
4337 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4338 struct shminfo *host_shminfo)
4340 struct target_shminfo *target_shminfo;
4341 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4342 return -TARGET_EFAULT;
4343 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4344 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4345 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4346 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4347 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4348 unlock_user_struct(target_shminfo, target_addr, 1);
4349 return 0;
4352 struct target_shm_info {
4353 int used_ids;
4354 abi_ulong shm_tot;
4355 abi_ulong shm_rss;
4356 abi_ulong shm_swp;
4357 abi_ulong swap_attempts;
4358 abi_ulong swap_successes;
4361 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4362 struct shm_info *host_shm_info)
4364 struct target_shm_info *target_shm_info;
4365 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4366 return -TARGET_EFAULT;
4367 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4368 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4369 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4370 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4371 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4372 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4373 unlock_user_struct(target_shm_info, target_addr, 1);
4374 return 0;
4377 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4379 struct shmid_ds dsarg;
4380 struct shminfo shminfo;
4381 struct shm_info shm_info;
4382 abi_long ret = -TARGET_EINVAL;
4384 cmd &= 0xff;
4386 switch(cmd) {
4387 case IPC_STAT:
4388 case IPC_SET:
4389 case SHM_STAT:
4390 if (target_to_host_shmid_ds(&dsarg, buf))
4391 return -TARGET_EFAULT;
4392 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4393 if (host_to_target_shmid_ds(buf, &dsarg))
4394 return -TARGET_EFAULT;
4395 break;
4396 case IPC_INFO:
4397 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4398 if (host_to_target_shminfo(buf, &shminfo))
4399 return -TARGET_EFAULT;
4400 break;
4401 case SHM_INFO:
4402 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4403 if (host_to_target_shm_info(buf, &shm_info))
4404 return -TARGET_EFAULT;
4405 break;
4406 case IPC_RMID:
4407 case SHM_LOCK:
4408 case SHM_UNLOCK:
4409 ret = get_errno(shmctl(shmid, cmd, NULL));
4410 break;
4413 return ret;
4416 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4418 abi_long raddr;
4419 void *host_raddr;
4420 struct shmid_ds shm_info;
4421 int i,ret;
4423 /* find out the length of the shared memory segment */
4424 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4425 if (is_error(ret)) {
4426 /* can't get length, bail out */
4427 return ret;
4430 mmap_lock();
4432 if (shmaddr)
4433 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4434 else {
4435 abi_ulong mmap_start;
4437 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4439 if (mmap_start == -1) {
4440 errno = ENOMEM;
4441 host_raddr = (void *)-1;
4442 } else
4443 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4446 if (host_raddr == (void *)-1) {
4447 mmap_unlock();
4448 return get_errno((long)host_raddr);
4450 raddr=h2g((unsigned long)host_raddr);
4452 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4453 PAGE_VALID | PAGE_READ |
4454 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4456 for (i = 0; i < N_SHM_REGIONS; i++) {
4457 if (!shm_regions[i].in_use) {
4458 shm_regions[i].in_use = true;
4459 shm_regions[i].start = raddr;
4460 shm_regions[i].size = shm_info.shm_segsz;
4461 break;
4465 mmap_unlock();
4466 return raddr;
4470 static inline abi_long do_shmdt(abi_ulong shmaddr)
4472 int i;
4474 for (i = 0; i < N_SHM_REGIONS; ++i) {
4475 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4476 shm_regions[i].in_use = false;
4477 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4478 break;
4482 return get_errno(shmdt(g2h(shmaddr)));
4485 #ifdef TARGET_NR_ipc
4486 /* ??? This only works with linear mappings. */
4487 /* do_ipc() must return target values and target errnos. */
4488 static abi_long do_ipc(unsigned int call, abi_long first,
4489 abi_long second, abi_long third,
4490 abi_long ptr, abi_long fifth)
4492 int version;
4493 abi_long ret = 0;
4495 version = call >> 16;
4496 call &= 0xffff;
4498 switch (call) {
4499 case IPCOP_semop:
4500 ret = do_semop(first, ptr, second);
4501 break;
4503 case IPCOP_semget:
4504 ret = get_errno(semget(first, second, third));
4505 break;
4507 case IPCOP_semctl: {
4508 /* The semun argument to semctl is passed by value, so dereference the
4509 * ptr argument. */
4510 abi_ulong atptr;
4511 get_user_ual(atptr, ptr);
4512 ret = do_semctl(first, second, third, atptr);
4513 break;
4516 case IPCOP_msgget:
4517 ret = get_errno(msgget(first, second));
4518 break;
4520 case IPCOP_msgsnd:
4521 ret = do_msgsnd(first, ptr, second, third);
4522 break;
4524 case IPCOP_msgctl:
4525 ret = do_msgctl(first, second, ptr);
4526 break;
4528 case IPCOP_msgrcv:
4529 switch (version) {
4530 case 0:
4532 struct target_ipc_kludge {
4533 abi_long msgp;
4534 abi_long msgtyp;
4535 } *tmp;
4537 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4538 ret = -TARGET_EFAULT;
4539 break;
4542 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4544 unlock_user_struct(tmp, ptr, 0);
4545 break;
4547 default:
4548 ret = do_msgrcv(first, ptr, second, fifth, third);
4550 break;
4552 case IPCOP_shmat:
4553 switch (version) {
4554 default:
4556 abi_ulong raddr;
4557 raddr = do_shmat(first, ptr, second);
4558 if (is_error(raddr))
4559 return get_errno(raddr);
4560 if (put_user_ual(raddr, third))
4561 return -TARGET_EFAULT;
4562 break;
4564 case 1:
4565 ret = -TARGET_EINVAL;
4566 break;
4568 break;
4569 case IPCOP_shmdt:
4570 ret = do_shmdt(ptr);
4571 break;
4573 case IPCOP_shmget:
4574 /* IPC_* flag values are the same on all linux platforms */
4575 ret = get_errno(shmget(first, second, third));
4576 break;
4578 /* IPC_* and SHM_* command values are the same on all linux platforms */
4579 case IPCOP_shmctl:
4580 ret = do_shmctl(first, second, ptr);
4581 break;
4582 default:
4583 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4584 ret = -TARGET_ENOSYS;
4585 break;
4587 return ret;
4589 #endif
4591 /* kernel structure types definitions */
4593 #define STRUCT(name, ...) STRUCT_ ## name,
4594 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4595 enum {
4596 #include "syscall_types.h"
4597 STRUCT_MAX
4599 #undef STRUCT
4600 #undef STRUCT_SPECIAL
4602 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4603 #define STRUCT_SPECIAL(name)
4604 #include "syscall_types.h"
4605 #undef STRUCT
4606 #undef STRUCT_SPECIAL
4608 typedef struct IOCTLEntry IOCTLEntry;
4610 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4611 int fd, int cmd, abi_long arg);
4613 struct IOCTLEntry {
4614 int target_cmd;
4615 unsigned int host_cmd;
4616 const char *name;
4617 int access;
4618 do_ioctl_fn *do_ioctl;
4619 const argtype arg_type[5];
4622 #define IOC_R 0x0001
4623 #define IOC_W 0x0002
4624 #define IOC_RW (IOC_R | IOC_W)
4626 #define MAX_STRUCT_SIZE 4096
4628 #ifdef CONFIG_FIEMAP
4629 /* So fiemap access checks don't overflow on 32 bit systems.
4630 * This is very slightly smaller than the limit imposed by
4631 * the underlying kernel.
4633 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4634 / sizeof(struct fiemap_extent))
4636 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4637 int fd, int cmd, abi_long arg)
4639 /* The parameter for this ioctl is a struct fiemap followed
4640 * by an array of struct fiemap_extent whose size is set
4641 * in fiemap->fm_extent_count. The array is filled in by the
4642 * ioctl.
4644 int target_size_in, target_size_out;
4645 struct fiemap *fm;
4646 const argtype *arg_type = ie->arg_type;
4647 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4648 void *argptr, *p;
4649 abi_long ret;
4650 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4651 uint32_t outbufsz;
4652 int free_fm = 0;
4654 assert(arg_type[0] == TYPE_PTR);
4655 assert(ie->access == IOC_RW);
4656 arg_type++;
4657 target_size_in = thunk_type_size(arg_type, 0);
4658 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4659 if (!argptr) {
4660 return -TARGET_EFAULT;
4662 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4663 unlock_user(argptr, arg, 0);
4664 fm = (struct fiemap *)buf_temp;
4665 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4666 return -TARGET_EINVAL;
4669 outbufsz = sizeof (*fm) +
4670 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4672 if (outbufsz > MAX_STRUCT_SIZE) {
4673 /* We can't fit all the extents into the fixed size buffer.
4674 * Allocate one that is large enough and use it instead.
4676 fm = g_try_malloc(outbufsz);
4677 if (!fm) {
4678 return -TARGET_ENOMEM;
4680 memcpy(fm, buf_temp, sizeof(struct fiemap));
4681 free_fm = 1;
4683 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4684 if (!is_error(ret)) {
4685 target_size_out = target_size_in;
4686 /* An extent_count of 0 means we were only counting the extents
4687 * so there are no structs to copy
4689 if (fm->fm_extent_count != 0) {
4690 target_size_out += fm->fm_mapped_extents * extent_size;
4692 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4693 if (!argptr) {
4694 ret = -TARGET_EFAULT;
4695 } else {
4696 /* Convert the struct fiemap */
4697 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4698 if (fm->fm_extent_count != 0) {
4699 p = argptr + target_size_in;
4700 /* ...and then all the struct fiemap_extents */
4701 for (i = 0; i < fm->fm_mapped_extents; i++) {
4702 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4703 THUNK_TARGET);
4704 p += extent_size;
4707 unlock_user(argptr, arg, target_size_out);
4710 if (free_fm) {
4711 g_free(fm);
4713 return ret;
4715 #endif
4717 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4718 int fd, int cmd, abi_long arg)
4720 const argtype *arg_type = ie->arg_type;
4721 int target_size;
4722 void *argptr;
4723 int ret;
4724 struct ifconf *host_ifconf;
4725 uint32_t outbufsz;
4726 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4727 int target_ifreq_size;
4728 int nb_ifreq;
4729 int free_buf = 0;
4730 int i;
4731 int target_ifc_len;
4732 abi_long target_ifc_buf;
4733 int host_ifc_len;
4734 char *host_ifc_buf;
4736 assert(arg_type[0] == TYPE_PTR);
4737 assert(ie->access == IOC_RW);
4739 arg_type++;
4740 target_size = thunk_type_size(arg_type, 0);
4742 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4743 if (!argptr)
4744 return -TARGET_EFAULT;
4745 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4746 unlock_user(argptr, arg, 0);
4748 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4749 target_ifc_len = host_ifconf->ifc_len;
4750 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4752 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4753 nb_ifreq = target_ifc_len / target_ifreq_size;
4754 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4756 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4757 if (outbufsz > MAX_STRUCT_SIZE) {
4758 /* We can't fit all the extents into the fixed size buffer.
4759 * Allocate one that is large enough and use it instead.
4761 host_ifconf = malloc(outbufsz);
4762 if (!host_ifconf) {
4763 return -TARGET_ENOMEM;
4765 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4766 free_buf = 1;
4768 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4770 host_ifconf->ifc_len = host_ifc_len;
4771 host_ifconf->ifc_buf = host_ifc_buf;
4773 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4774 if (!is_error(ret)) {
4775 /* convert host ifc_len to target ifc_len */
4777 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4778 target_ifc_len = nb_ifreq * target_ifreq_size;
4779 host_ifconf->ifc_len = target_ifc_len;
4781 /* restore target ifc_buf */
4783 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4785 /* copy struct ifconf to target user */
4787 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4788 if (!argptr)
4789 return -TARGET_EFAULT;
4790 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4791 unlock_user(argptr, arg, target_size);
4793 /* copy ifreq[] to target user */
4795 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4796 for (i = 0; i < nb_ifreq ; i++) {
4797 thunk_convert(argptr + i * target_ifreq_size,
4798 host_ifc_buf + i * sizeof(struct ifreq),
4799 ifreq_arg_type, THUNK_TARGET);
4801 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4804 if (free_buf) {
4805 free(host_ifconf);
4808 return ret;
4811 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4812 int cmd, abi_long arg)
4814 void *argptr;
4815 struct dm_ioctl *host_dm;
4816 abi_long guest_data;
4817 uint32_t guest_data_size;
4818 int target_size;
4819 const argtype *arg_type = ie->arg_type;
4820 abi_long ret;
4821 void *big_buf = NULL;
4822 char *host_data;
4824 arg_type++;
4825 target_size = thunk_type_size(arg_type, 0);
4826 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4827 if (!argptr) {
4828 ret = -TARGET_EFAULT;
4829 goto out;
4831 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4832 unlock_user(argptr, arg, 0);
4834 /* buf_temp is too small, so fetch things into a bigger buffer */
4835 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4836 memcpy(big_buf, buf_temp, target_size);
4837 buf_temp = big_buf;
4838 host_dm = big_buf;
4840 guest_data = arg + host_dm->data_start;
4841 if ((guest_data - arg) < 0) {
4842 ret = -EINVAL;
4843 goto out;
4845 guest_data_size = host_dm->data_size - host_dm->data_start;
4846 host_data = (char*)host_dm + host_dm->data_start;
4848 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4849 switch (ie->host_cmd) {
4850 case DM_REMOVE_ALL:
4851 case DM_LIST_DEVICES:
4852 case DM_DEV_CREATE:
4853 case DM_DEV_REMOVE:
4854 case DM_DEV_SUSPEND:
4855 case DM_DEV_STATUS:
4856 case DM_DEV_WAIT:
4857 case DM_TABLE_STATUS:
4858 case DM_TABLE_CLEAR:
4859 case DM_TABLE_DEPS:
4860 case DM_LIST_VERSIONS:
4861 /* no input data */
4862 break;
4863 case DM_DEV_RENAME:
4864 case DM_DEV_SET_GEOMETRY:
4865 /* data contains only strings */
4866 memcpy(host_data, argptr, guest_data_size);
4867 break;
4868 case DM_TARGET_MSG:
4869 memcpy(host_data, argptr, guest_data_size);
4870 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4871 break;
4872 case DM_TABLE_LOAD:
4874 void *gspec = argptr;
4875 void *cur_data = host_data;
4876 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4877 int spec_size = thunk_type_size(arg_type, 0);
4878 int i;
4880 for (i = 0; i < host_dm->target_count; i++) {
4881 struct dm_target_spec *spec = cur_data;
4882 uint32_t next;
4883 int slen;
4885 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4886 slen = strlen((char*)gspec + spec_size) + 1;
4887 next = spec->next;
4888 spec->next = sizeof(*spec) + slen;
4889 strcpy((char*)&spec[1], gspec + spec_size);
4890 gspec += next;
4891 cur_data += spec->next;
4893 break;
4895 default:
4896 ret = -TARGET_EINVAL;
4897 unlock_user(argptr, guest_data, 0);
4898 goto out;
4900 unlock_user(argptr, guest_data, 0);
4902 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4903 if (!is_error(ret)) {
4904 guest_data = arg + host_dm->data_start;
4905 guest_data_size = host_dm->data_size - host_dm->data_start;
4906 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4907 switch (ie->host_cmd) {
4908 case DM_REMOVE_ALL:
4909 case DM_DEV_CREATE:
4910 case DM_DEV_REMOVE:
4911 case DM_DEV_RENAME:
4912 case DM_DEV_SUSPEND:
4913 case DM_DEV_STATUS:
4914 case DM_TABLE_LOAD:
4915 case DM_TABLE_CLEAR:
4916 case DM_TARGET_MSG:
4917 case DM_DEV_SET_GEOMETRY:
4918 /* no return data */
4919 break;
4920 case DM_LIST_DEVICES:
4922 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4923 uint32_t remaining_data = guest_data_size;
4924 void *cur_data = argptr;
4925 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4926 int nl_size = 12; /* can't use thunk_size due to alignment */
4928 while (1) {
4929 uint32_t next = nl->next;
4930 if (next) {
4931 nl->next = nl_size + (strlen(nl->name) + 1);
4933 if (remaining_data < nl->next) {
4934 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4935 break;
4937 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4938 strcpy(cur_data + nl_size, nl->name);
4939 cur_data += nl->next;
4940 remaining_data -= nl->next;
4941 if (!next) {
4942 break;
4944 nl = (void*)nl + next;
4946 break;
4948 case DM_DEV_WAIT:
4949 case DM_TABLE_STATUS:
4951 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4952 void *cur_data = argptr;
4953 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4954 int spec_size = thunk_type_size(arg_type, 0);
4955 int i;
4957 for (i = 0; i < host_dm->target_count; i++) {
4958 uint32_t next = spec->next;
4959 int slen = strlen((char*)&spec[1]) + 1;
4960 spec->next = (cur_data - argptr) + spec_size + slen;
4961 if (guest_data_size < spec->next) {
4962 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4963 break;
4965 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4966 strcpy(cur_data + spec_size, (char*)&spec[1]);
4967 cur_data = argptr + spec->next;
4968 spec = (void*)host_dm + host_dm->data_start + next;
4970 break;
4972 case DM_TABLE_DEPS:
4974 void *hdata = (void*)host_dm + host_dm->data_start;
4975 int count = *(uint32_t*)hdata;
4976 uint64_t *hdev = hdata + 8;
4977 uint64_t *gdev = argptr + 8;
4978 int i;
4980 *(uint32_t*)argptr = tswap32(count);
4981 for (i = 0; i < count; i++) {
4982 *gdev = tswap64(*hdev);
4983 gdev++;
4984 hdev++;
4986 break;
4988 case DM_LIST_VERSIONS:
4990 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4991 uint32_t remaining_data = guest_data_size;
4992 void *cur_data = argptr;
4993 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4994 int vers_size = thunk_type_size(arg_type, 0);
4996 while (1) {
4997 uint32_t next = vers->next;
4998 if (next) {
4999 vers->next = vers_size + (strlen(vers->name) + 1);
5001 if (remaining_data < vers->next) {
5002 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5003 break;
5005 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5006 strcpy(cur_data + vers_size, vers->name);
5007 cur_data += vers->next;
5008 remaining_data -= vers->next;
5009 if (!next) {
5010 break;
5012 vers = (void*)vers + next;
5014 break;
5016 default:
5017 unlock_user(argptr, guest_data, 0);
5018 ret = -TARGET_EINVAL;
5019 goto out;
5021 unlock_user(argptr, guest_data, guest_data_size);
5023 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5024 if (!argptr) {
5025 ret = -TARGET_EFAULT;
5026 goto out;
5028 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5029 unlock_user(argptr, arg, target_size);
5031 out:
5032 g_free(big_buf);
5033 return ret;
5036 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5037 int cmd, abi_long arg)
5039 void *argptr;
5040 int target_size;
5041 const argtype *arg_type = ie->arg_type;
5042 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5043 abi_long ret;
5045 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5046 struct blkpg_partition host_part;
5048 /* Read and convert blkpg */
5049 arg_type++;
5050 target_size = thunk_type_size(arg_type, 0);
5051 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5052 if (!argptr) {
5053 ret = -TARGET_EFAULT;
5054 goto out;
5056 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5057 unlock_user(argptr, arg, 0);
5059 switch (host_blkpg->op) {
5060 case BLKPG_ADD_PARTITION:
5061 case BLKPG_DEL_PARTITION:
5062 /* payload is struct blkpg_partition */
5063 break;
5064 default:
5065 /* Unknown opcode */
5066 ret = -TARGET_EINVAL;
5067 goto out;
5070 /* Read and convert blkpg->data */
5071 arg = (abi_long)(uintptr_t)host_blkpg->data;
5072 target_size = thunk_type_size(part_arg_type, 0);
5073 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5074 if (!argptr) {
5075 ret = -TARGET_EFAULT;
5076 goto out;
5078 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5079 unlock_user(argptr, arg, 0);
5081 /* Swizzle the data pointer to our local copy and call! */
5082 host_blkpg->data = &host_part;
5083 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5085 out:
5086 return ret;
5089 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5090 int fd, int cmd, abi_long arg)
5092 const argtype *arg_type = ie->arg_type;
5093 const StructEntry *se;
5094 const argtype *field_types;
5095 const int *dst_offsets, *src_offsets;
5096 int target_size;
5097 void *argptr;
5098 abi_ulong *target_rt_dev_ptr;
5099 unsigned long *host_rt_dev_ptr;
5100 abi_long ret;
5101 int i;
5103 assert(ie->access == IOC_W);
5104 assert(*arg_type == TYPE_PTR);
5105 arg_type++;
5106 assert(*arg_type == TYPE_STRUCT);
5107 target_size = thunk_type_size(arg_type, 0);
5108 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5109 if (!argptr) {
5110 return -TARGET_EFAULT;
5112 arg_type++;
5113 assert(*arg_type == (int)STRUCT_rtentry);
5114 se = struct_entries + *arg_type++;
5115 assert(se->convert[0] == NULL);
5116 /* convert struct here to be able to catch rt_dev string */
5117 field_types = se->field_types;
5118 dst_offsets = se->field_offsets[THUNK_HOST];
5119 src_offsets = se->field_offsets[THUNK_TARGET];
5120 for (i = 0; i < se->nb_fields; i++) {
5121 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5122 assert(*field_types == TYPE_PTRVOID);
5123 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5124 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5125 if (*target_rt_dev_ptr != 0) {
5126 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5127 tswapal(*target_rt_dev_ptr));
5128 if (!*host_rt_dev_ptr) {
5129 unlock_user(argptr, arg, 0);
5130 return -TARGET_EFAULT;
5132 } else {
5133 *host_rt_dev_ptr = 0;
5135 field_types++;
5136 continue;
5138 field_types = thunk_convert(buf_temp + dst_offsets[i],
5139 argptr + src_offsets[i],
5140 field_types, THUNK_HOST);
5142 unlock_user(argptr, arg, 0);
5144 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5145 if (*host_rt_dev_ptr != 0) {
5146 unlock_user((void *)*host_rt_dev_ptr,
5147 *target_rt_dev_ptr, 0);
5149 return ret;
5152 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5153 int fd, int cmd, abi_long arg)
5155 int sig = target_to_host_signal(arg);
5156 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5159 static IOCTLEntry ioctl_entries[] = {
5160 #define IOCTL(cmd, access, ...) \
5161 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5162 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5163 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5164 #include "ioctls.h"
5165 { 0, 0, },
5168 /* ??? Implement proper locking for ioctls. */
5169 /* do_ioctl() Must return target values and target errnos. */
5170 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5172 const IOCTLEntry *ie;
5173 const argtype *arg_type;
5174 abi_long ret;
5175 uint8_t buf_temp[MAX_STRUCT_SIZE];
5176 int target_size;
5177 void *argptr;
5179 ie = ioctl_entries;
5180 for(;;) {
5181 if (ie->target_cmd == 0) {
5182 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5183 return -TARGET_ENOSYS;
5185 if (ie->target_cmd == cmd)
5186 break;
5187 ie++;
5189 arg_type = ie->arg_type;
5190 #if defined(DEBUG)
5191 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5192 #endif
5193 if (ie->do_ioctl) {
5194 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5197 switch(arg_type[0]) {
5198 case TYPE_NULL:
5199 /* no argument */
5200 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5201 break;
5202 case TYPE_PTRVOID:
5203 case TYPE_INT:
5204 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5205 break;
5206 case TYPE_PTR:
5207 arg_type++;
5208 target_size = thunk_type_size(arg_type, 0);
5209 switch(ie->access) {
5210 case IOC_R:
5211 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5212 if (!is_error(ret)) {
5213 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5214 if (!argptr)
5215 return -TARGET_EFAULT;
5216 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5217 unlock_user(argptr, arg, target_size);
5219 break;
5220 case IOC_W:
5221 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5222 if (!argptr)
5223 return -TARGET_EFAULT;
5224 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5225 unlock_user(argptr, arg, 0);
5226 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5227 break;
5228 default:
5229 case IOC_RW:
5230 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5231 if (!argptr)
5232 return -TARGET_EFAULT;
5233 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5234 unlock_user(argptr, arg, 0);
5235 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5236 if (!is_error(ret)) {
5237 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5238 if (!argptr)
5239 return -TARGET_EFAULT;
5240 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5241 unlock_user(argptr, arg, target_size);
5243 break;
5245 break;
5246 default:
5247 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5248 (long)cmd, arg_type[0]);
5249 ret = -TARGET_ENOSYS;
5250 break;
5252 return ret;
5255 static const bitmask_transtbl iflag_tbl[] = {
5256 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5257 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5258 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5259 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5260 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5261 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5262 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5263 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5264 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5265 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5266 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5267 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5268 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5269 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5270 { 0, 0, 0, 0 }
5273 static const bitmask_transtbl oflag_tbl[] = {
5274 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5275 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5276 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5277 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5278 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5279 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5280 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5281 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5282 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5283 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5284 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5285 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5286 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5287 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5288 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5289 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5290 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5291 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5292 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5293 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5294 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5295 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5296 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5297 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5298 { 0, 0, 0, 0 }
5301 static const bitmask_transtbl cflag_tbl[] = {
5302 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5303 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5304 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5305 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5306 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5307 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5308 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5309 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5310 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5311 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5312 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5313 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5314 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5315 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5316 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5317 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5318 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5319 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5320 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5321 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5322 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5323 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5324 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5325 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5326 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5327 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5328 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5329 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5330 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5331 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5332 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5333 { 0, 0, 0, 0 }
5336 static const bitmask_transtbl lflag_tbl[] = {
5337 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5338 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5339 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5340 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5341 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5342 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5343 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5344 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5345 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5346 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5347 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5348 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5349 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5350 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5351 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5352 { 0, 0, 0, 0 }
5355 static void target_to_host_termios (void *dst, const void *src)
5357 struct host_termios *host = dst;
5358 const struct target_termios *target = src;
5360 host->c_iflag =
5361 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5362 host->c_oflag =
5363 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5364 host->c_cflag =
5365 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5366 host->c_lflag =
5367 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5368 host->c_line = target->c_line;
5370 memset(host->c_cc, 0, sizeof(host->c_cc));
5371 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5372 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5373 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5374 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5375 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5376 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5377 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5378 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5379 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5380 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5381 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5382 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5383 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5384 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5385 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5386 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5387 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5390 static void host_to_target_termios (void *dst, const void *src)
5392 struct target_termios *target = dst;
5393 const struct host_termios *host = src;
5395 target->c_iflag =
5396 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5397 target->c_oflag =
5398 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5399 target->c_cflag =
5400 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5401 target->c_lflag =
5402 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5403 target->c_line = host->c_line;
5405 memset(target->c_cc, 0, sizeof(target->c_cc));
5406 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5407 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5408 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5409 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5410 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5411 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5412 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5413 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5414 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5415 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5416 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5417 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5418 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5419 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5420 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5421 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5422 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5425 static const StructEntry struct_termios_def = {
5426 .convert = { host_to_target_termios, target_to_host_termios },
5427 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5428 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5431 static bitmask_transtbl mmap_flags_tbl[] = {
5432 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5433 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5434 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5435 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5436 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5437 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5438 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5439 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5440 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5441 MAP_NORESERVE },
5442 { 0, 0, 0, 0 }
5445 #if defined(TARGET_I386)
5447 /* NOTE: there is really one LDT for all the threads */
5448 static uint8_t *ldt_table;
5450 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5452 int size;
5453 void *p;
5455 if (!ldt_table)
5456 return 0;
5457 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5458 if (size > bytecount)
5459 size = bytecount;
5460 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5461 if (!p)
5462 return -TARGET_EFAULT;
5463 /* ??? Should this by byteswapped? */
5464 memcpy(p, ldt_table, size);
5465 unlock_user(p, ptr, size);
5466 return size;
5469 /* XXX: add locking support */
5470 static abi_long write_ldt(CPUX86State *env,
5471 abi_ulong ptr, unsigned long bytecount, int oldmode)
5473 struct target_modify_ldt_ldt_s ldt_info;
5474 struct target_modify_ldt_ldt_s *target_ldt_info;
5475 int seg_32bit, contents, read_exec_only, limit_in_pages;
5476 int seg_not_present, useable, lm;
5477 uint32_t *lp, entry_1, entry_2;
5479 if (bytecount != sizeof(ldt_info))
5480 return -TARGET_EINVAL;
5481 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5482 return -TARGET_EFAULT;
5483 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5484 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5485 ldt_info.limit = tswap32(target_ldt_info->limit);
5486 ldt_info.flags = tswap32(target_ldt_info->flags);
5487 unlock_user_struct(target_ldt_info, ptr, 0);
5489 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5490 return -TARGET_EINVAL;
5491 seg_32bit = ldt_info.flags & 1;
5492 contents = (ldt_info.flags >> 1) & 3;
5493 read_exec_only = (ldt_info.flags >> 3) & 1;
5494 limit_in_pages = (ldt_info.flags >> 4) & 1;
5495 seg_not_present = (ldt_info.flags >> 5) & 1;
5496 useable = (ldt_info.flags >> 6) & 1;
5497 #ifdef TARGET_ABI32
5498 lm = 0;
5499 #else
5500 lm = (ldt_info.flags >> 7) & 1;
5501 #endif
5502 if (contents == 3) {
5503 if (oldmode)
5504 return -TARGET_EINVAL;
5505 if (seg_not_present == 0)
5506 return -TARGET_EINVAL;
5508 /* allocate the LDT */
5509 if (!ldt_table) {
5510 env->ldt.base = target_mmap(0,
5511 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5512 PROT_READ|PROT_WRITE,
5513 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5514 if (env->ldt.base == -1)
5515 return -TARGET_ENOMEM;
5516 memset(g2h(env->ldt.base), 0,
5517 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5518 env->ldt.limit = 0xffff;
5519 ldt_table = g2h(env->ldt.base);
5522 /* NOTE: same code as Linux kernel */
5523 /* Allow LDTs to be cleared by the user. */
5524 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5525 if (oldmode ||
5526 (contents == 0 &&
5527 read_exec_only == 1 &&
5528 seg_32bit == 0 &&
5529 limit_in_pages == 0 &&
5530 seg_not_present == 1 &&
5531 useable == 0 )) {
5532 entry_1 = 0;
5533 entry_2 = 0;
5534 goto install;
5538 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5539 (ldt_info.limit & 0x0ffff);
5540 entry_2 = (ldt_info.base_addr & 0xff000000) |
5541 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5542 (ldt_info.limit & 0xf0000) |
5543 ((read_exec_only ^ 1) << 9) |
5544 (contents << 10) |
5545 ((seg_not_present ^ 1) << 15) |
5546 (seg_32bit << 22) |
5547 (limit_in_pages << 23) |
5548 (lm << 21) |
5549 0x7000;
5550 if (!oldmode)
5551 entry_2 |= (useable << 20);
5553 /* Install the new entry ... */
5554 install:
5555 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5556 lp[0] = tswap32(entry_1);
5557 lp[1] = tswap32(entry_2);
5558 return 0;
5561 /* specific and weird i386 syscalls */
5562 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5563 unsigned long bytecount)
5565 abi_long ret;
5567 switch (func) {
5568 case 0:
5569 ret = read_ldt(ptr, bytecount);
5570 break;
5571 case 1:
5572 ret = write_ldt(env, ptr, bytecount, 1);
5573 break;
5574 case 0x11:
5575 ret = write_ldt(env, ptr, bytecount, 0);
5576 break;
5577 default:
5578 ret = -TARGET_ENOSYS;
5579 break;
5581 return ret;
5584 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5585 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5587 uint64_t *gdt_table = g2h(env->gdt.base);
5588 struct target_modify_ldt_ldt_s ldt_info;
5589 struct target_modify_ldt_ldt_s *target_ldt_info;
5590 int seg_32bit, contents, read_exec_only, limit_in_pages;
5591 int seg_not_present, useable, lm;
5592 uint32_t *lp, entry_1, entry_2;
5593 int i;
5595 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5596 if (!target_ldt_info)
5597 return -TARGET_EFAULT;
5598 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5599 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5600 ldt_info.limit = tswap32(target_ldt_info->limit);
5601 ldt_info.flags = tswap32(target_ldt_info->flags);
5602 if (ldt_info.entry_number == -1) {
5603 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5604 if (gdt_table[i] == 0) {
5605 ldt_info.entry_number = i;
5606 target_ldt_info->entry_number = tswap32(i);
5607 break;
5611 unlock_user_struct(target_ldt_info, ptr, 1);
5613 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5614 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5615 return -TARGET_EINVAL;
5616 seg_32bit = ldt_info.flags & 1;
5617 contents = (ldt_info.flags >> 1) & 3;
5618 read_exec_only = (ldt_info.flags >> 3) & 1;
5619 limit_in_pages = (ldt_info.flags >> 4) & 1;
5620 seg_not_present = (ldt_info.flags >> 5) & 1;
5621 useable = (ldt_info.flags >> 6) & 1;
5622 #ifdef TARGET_ABI32
5623 lm = 0;
5624 #else
5625 lm = (ldt_info.flags >> 7) & 1;
5626 #endif
5628 if (contents == 3) {
5629 if (seg_not_present == 0)
5630 return -TARGET_EINVAL;
5633 /* NOTE: same code as Linux kernel */
5634 /* Allow LDTs to be cleared by the user. */
5635 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5636 if ((contents == 0 &&
5637 read_exec_only == 1 &&
5638 seg_32bit == 0 &&
5639 limit_in_pages == 0 &&
5640 seg_not_present == 1 &&
5641 useable == 0 )) {
5642 entry_1 = 0;
5643 entry_2 = 0;
5644 goto install;
5648 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5649 (ldt_info.limit & 0x0ffff);
5650 entry_2 = (ldt_info.base_addr & 0xff000000) |
5651 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5652 (ldt_info.limit & 0xf0000) |
5653 ((read_exec_only ^ 1) << 9) |
5654 (contents << 10) |
5655 ((seg_not_present ^ 1) << 15) |
5656 (seg_32bit << 22) |
5657 (limit_in_pages << 23) |
5658 (useable << 20) |
5659 (lm << 21) |
5660 0x7000;
5662 /* Install the new entry ... */
5663 install:
5664 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5665 lp[0] = tswap32(entry_1);
5666 lp[1] = tswap32(entry_2);
5667 return 0;
5670 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5672 struct target_modify_ldt_ldt_s *target_ldt_info;
5673 uint64_t *gdt_table = g2h(env->gdt.base);
5674 uint32_t base_addr, limit, flags;
5675 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5676 int seg_not_present, useable, lm;
5677 uint32_t *lp, entry_1, entry_2;
5679 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5680 if (!target_ldt_info)
5681 return -TARGET_EFAULT;
5682 idx = tswap32(target_ldt_info->entry_number);
5683 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5684 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5685 unlock_user_struct(target_ldt_info, ptr, 1);
5686 return -TARGET_EINVAL;
5688 lp = (uint32_t *)(gdt_table + idx);
5689 entry_1 = tswap32(lp[0]);
5690 entry_2 = tswap32(lp[1]);
5692 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5693 contents = (entry_2 >> 10) & 3;
5694 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5695 seg_32bit = (entry_2 >> 22) & 1;
5696 limit_in_pages = (entry_2 >> 23) & 1;
5697 useable = (entry_2 >> 20) & 1;
5698 #ifdef TARGET_ABI32
5699 lm = 0;
5700 #else
5701 lm = (entry_2 >> 21) & 1;
5702 #endif
5703 flags = (seg_32bit << 0) | (contents << 1) |
5704 (read_exec_only << 3) | (limit_in_pages << 4) |
5705 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5706 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5707 base_addr = (entry_1 >> 16) |
5708 (entry_2 & 0xff000000) |
5709 ((entry_2 & 0xff) << 16);
5710 target_ldt_info->base_addr = tswapal(base_addr);
5711 target_ldt_info->limit = tswap32(limit);
5712 target_ldt_info->flags = tswap32(flags);
5713 unlock_user_struct(target_ldt_info, ptr, 1);
5714 return 0;
5716 #endif /* TARGET_I386 && TARGET_ABI32 */
5718 #ifndef TARGET_ABI32
5719 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5721 abi_long ret = 0;
5722 abi_ulong val;
5723 int idx;
5725 switch(code) {
5726 case TARGET_ARCH_SET_GS:
5727 case TARGET_ARCH_SET_FS:
5728 if (code == TARGET_ARCH_SET_GS)
5729 idx = R_GS;
5730 else
5731 idx = R_FS;
5732 cpu_x86_load_seg(env, idx, 0);
5733 env->segs[idx].base = addr;
5734 break;
5735 case TARGET_ARCH_GET_GS:
5736 case TARGET_ARCH_GET_FS:
5737 if (code == TARGET_ARCH_GET_GS)
5738 idx = R_GS;
5739 else
5740 idx = R_FS;
5741 val = env->segs[idx].base;
5742 if (put_user(val, addr, abi_ulong))
5743 ret = -TARGET_EFAULT;
5744 break;
5745 default:
5746 ret = -TARGET_EINVAL;
5747 break;
5749 return ret;
5751 #endif
5753 #endif /* defined(TARGET_I386) */
5755 #define NEW_STACK_SIZE 0x40000
5758 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5759 typedef struct {
5760 CPUArchState *env;
5761 pthread_mutex_t mutex;
5762 pthread_cond_t cond;
5763 pthread_t thread;
5764 uint32_t tid;
5765 abi_ulong child_tidptr;
5766 abi_ulong parent_tidptr;
5767 sigset_t sigmask;
5768 } new_thread_info;
5770 static void *clone_func(void *arg)
5772 new_thread_info *info = arg;
5773 CPUArchState *env;
5774 CPUState *cpu;
5775 TaskState *ts;
5777 rcu_register_thread();
5778 env = info->env;
5779 cpu = ENV_GET_CPU(env);
5780 thread_cpu = cpu;
5781 ts = (TaskState *)cpu->opaque;
5782 info->tid = gettid();
5783 cpu->host_tid = info->tid;
5784 task_settid(ts);
5785 if (info->child_tidptr)
5786 put_user_u32(info->tid, info->child_tidptr);
5787 if (info->parent_tidptr)
5788 put_user_u32(info->tid, info->parent_tidptr);
5789 /* Enable signals. */
5790 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5791 /* Signal to the parent that we're ready. */
5792 pthread_mutex_lock(&info->mutex);
5793 pthread_cond_broadcast(&info->cond);
5794 pthread_mutex_unlock(&info->mutex);
5795 /* Wait until the parent has finshed initializing the tls state. */
5796 pthread_mutex_lock(&clone_lock);
5797 pthread_mutex_unlock(&clone_lock);
5798 cpu_loop(env);
5799 /* never exits */
5800 return NULL;
5803 /* do_fork() Must return host values and target errnos (unlike most
5804 do_*() functions). */
5805 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5806 abi_ulong parent_tidptr, target_ulong newtls,
5807 abi_ulong child_tidptr)
5809 CPUState *cpu = ENV_GET_CPU(env);
5810 int ret;
5811 TaskState *ts;
5812 CPUState *new_cpu;
5813 CPUArchState *new_env;
5814 unsigned int nptl_flags;
5815 sigset_t sigmask;
5817 /* Emulate vfork() with fork() */
5818 if (flags & CLONE_VFORK)
5819 flags &= ~(CLONE_VFORK | CLONE_VM);
5821 if (flags & CLONE_VM) {
5822 TaskState *parent_ts = (TaskState *)cpu->opaque;
5823 new_thread_info info;
5824 pthread_attr_t attr;
5826 ts = g_new0(TaskState, 1);
5827 init_task_state(ts);
5828 /* we create a new CPU instance. */
5829 new_env = cpu_copy(env);
5830 /* Init regs that differ from the parent. */
5831 cpu_clone_regs(new_env, newsp);
5832 new_cpu = ENV_GET_CPU(new_env);
5833 new_cpu->opaque = ts;
5834 ts->bprm = parent_ts->bprm;
5835 ts->info = parent_ts->info;
5836 ts->signal_mask = parent_ts->signal_mask;
5837 nptl_flags = flags;
5838 flags &= ~CLONE_NPTL_FLAGS2;
5840 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5841 ts->child_tidptr = child_tidptr;
5844 if (nptl_flags & CLONE_SETTLS)
5845 cpu_set_tls (new_env, newtls);
5847 /* Grab a mutex so that thread setup appears atomic. */
5848 pthread_mutex_lock(&clone_lock);
5850 memset(&info, 0, sizeof(info));
5851 pthread_mutex_init(&info.mutex, NULL);
5852 pthread_mutex_lock(&info.mutex);
5853 pthread_cond_init(&info.cond, NULL);
5854 info.env = new_env;
5855 if (nptl_flags & CLONE_CHILD_SETTID)
5856 info.child_tidptr = child_tidptr;
5857 if (nptl_flags & CLONE_PARENT_SETTID)
5858 info.parent_tidptr = parent_tidptr;
5860 ret = pthread_attr_init(&attr);
5861 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5862 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5863 /* It is not safe to deliver signals until the child has finished
5864 initializing, so temporarily block all signals. */
5865 sigfillset(&sigmask);
5866 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5868 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5869 /* TODO: Free new CPU state if thread creation failed. */
5871 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5872 pthread_attr_destroy(&attr);
5873 if (ret == 0) {
5874 /* Wait for the child to initialize. */
5875 pthread_cond_wait(&info.cond, &info.mutex);
5876 ret = info.tid;
5877 if (flags & CLONE_PARENT_SETTID)
5878 put_user_u32(ret, parent_tidptr);
5879 } else {
5880 ret = -1;
5882 pthread_mutex_unlock(&info.mutex);
5883 pthread_cond_destroy(&info.cond);
5884 pthread_mutex_destroy(&info.mutex);
5885 pthread_mutex_unlock(&clone_lock);
5886 } else {
5887 /* if no CLONE_VM, we consider it is a fork */
5888 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5889 return -TARGET_EINVAL;
5892 if (block_signals()) {
5893 return -TARGET_ERESTARTSYS;
5896 fork_start();
5897 ret = fork();
5898 if (ret == 0) {
5899 /* Child Process. */
5900 rcu_after_fork();
5901 cpu_clone_regs(env, newsp);
5902 fork_end(1);
5903 /* There is a race condition here. The parent process could
5904 theoretically read the TID in the child process before the child
5905 tid is set. This would require using either ptrace
5906 (not implemented) or having *_tidptr to point at a shared memory
5907 mapping. We can't repeat the spinlock hack used above because
5908 the child process gets its own copy of the lock. */
5909 if (flags & CLONE_CHILD_SETTID)
5910 put_user_u32(gettid(), child_tidptr);
5911 if (flags & CLONE_PARENT_SETTID)
5912 put_user_u32(gettid(), parent_tidptr);
5913 ts = (TaskState *)cpu->opaque;
5914 if (flags & CLONE_SETTLS)
5915 cpu_set_tls (env, newtls);
5916 if (flags & CLONE_CHILD_CLEARTID)
5917 ts->child_tidptr = child_tidptr;
5918 } else {
5919 fork_end(0);
5922 return ret;
5925 /* warning : doesn't handle linux specific flags... */
5926 static int target_to_host_fcntl_cmd(int cmd)
5928 switch(cmd) {
5929 case TARGET_F_DUPFD:
5930 case TARGET_F_GETFD:
5931 case TARGET_F_SETFD:
5932 case TARGET_F_GETFL:
5933 case TARGET_F_SETFL:
5934 return cmd;
5935 case TARGET_F_GETLK:
5936 return F_GETLK64;
5937 case TARGET_F_SETLK:
5938 return F_SETLK64;
5939 case TARGET_F_SETLKW:
5940 return F_SETLKW64;
5941 case TARGET_F_GETOWN:
5942 return F_GETOWN;
5943 case TARGET_F_SETOWN:
5944 return F_SETOWN;
5945 case TARGET_F_GETSIG:
5946 return F_GETSIG;
5947 case TARGET_F_SETSIG:
5948 return F_SETSIG;
5949 #if TARGET_ABI_BITS == 32
5950 case TARGET_F_GETLK64:
5951 return F_GETLK64;
5952 case TARGET_F_SETLK64:
5953 return F_SETLK64;
5954 case TARGET_F_SETLKW64:
5955 return F_SETLKW64;
5956 #endif
5957 case TARGET_F_SETLEASE:
5958 return F_SETLEASE;
5959 case TARGET_F_GETLEASE:
5960 return F_GETLEASE;
5961 #ifdef F_DUPFD_CLOEXEC
5962 case TARGET_F_DUPFD_CLOEXEC:
5963 return F_DUPFD_CLOEXEC;
5964 #endif
5965 case TARGET_F_NOTIFY:
5966 return F_NOTIFY;
5967 #ifdef F_GETOWN_EX
5968 case TARGET_F_GETOWN_EX:
5969 return F_GETOWN_EX;
5970 #endif
5971 #ifdef F_SETOWN_EX
5972 case TARGET_F_SETOWN_EX:
5973 return F_SETOWN_EX;
5974 #endif
5975 #ifdef F_SETPIPE_SZ
5976 case TARGET_F_SETPIPE_SZ:
5977 return F_SETPIPE_SZ;
5978 case TARGET_F_GETPIPE_SZ:
5979 return F_GETPIPE_SZ;
5980 #endif
5981 default:
5982 return -TARGET_EINVAL;
5984 return -TARGET_EINVAL;
5987 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5988 static const bitmask_transtbl flock_tbl[] = {
5989 TRANSTBL_CONVERT(F_RDLCK),
5990 TRANSTBL_CONVERT(F_WRLCK),
5991 TRANSTBL_CONVERT(F_UNLCK),
5992 TRANSTBL_CONVERT(F_EXLCK),
5993 TRANSTBL_CONVERT(F_SHLCK),
5994 { 0, 0, 0, 0 }
5997 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5998 abi_ulong target_flock_addr)
6000 struct target_flock *target_fl;
6001 short l_type;
6003 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6004 return -TARGET_EFAULT;
6007 __get_user(l_type, &target_fl->l_type);
6008 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6009 __get_user(fl->l_whence, &target_fl->l_whence);
6010 __get_user(fl->l_start, &target_fl->l_start);
6011 __get_user(fl->l_len, &target_fl->l_len);
6012 __get_user(fl->l_pid, &target_fl->l_pid);
6013 unlock_user_struct(target_fl, target_flock_addr, 0);
6014 return 0;
6017 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6018 const struct flock64 *fl)
6020 struct target_flock *target_fl;
6021 short l_type;
6023 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6024 return -TARGET_EFAULT;
6027 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6028 __put_user(l_type, &target_fl->l_type);
6029 __put_user(fl->l_whence, &target_fl->l_whence);
6030 __put_user(fl->l_start, &target_fl->l_start);
6031 __put_user(fl->l_len, &target_fl->l_len);
6032 __put_user(fl->l_pid, &target_fl->l_pid);
6033 unlock_user_struct(target_fl, target_flock_addr, 1);
6034 return 0;
6037 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6038 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6040 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6041 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6042 abi_ulong target_flock_addr)
6044 struct target_eabi_flock64 *target_fl;
6045 short l_type;
6047 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6048 return -TARGET_EFAULT;
6051 __get_user(l_type, &target_fl->l_type);
6052 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6053 __get_user(fl->l_whence, &target_fl->l_whence);
6054 __get_user(fl->l_start, &target_fl->l_start);
6055 __get_user(fl->l_len, &target_fl->l_len);
6056 __get_user(fl->l_pid, &target_fl->l_pid);
6057 unlock_user_struct(target_fl, target_flock_addr, 0);
6058 return 0;
6061 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6062 const struct flock64 *fl)
6064 struct target_eabi_flock64 *target_fl;
6065 short l_type;
6067 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6068 return -TARGET_EFAULT;
6071 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6072 __put_user(l_type, &target_fl->l_type);
6073 __put_user(fl->l_whence, &target_fl->l_whence);
6074 __put_user(fl->l_start, &target_fl->l_start);
6075 __put_user(fl->l_len, &target_fl->l_len);
6076 __put_user(fl->l_pid, &target_fl->l_pid);
6077 unlock_user_struct(target_fl, target_flock_addr, 1);
6078 return 0;
6080 #endif
6082 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6083 abi_ulong target_flock_addr)
6085 struct target_flock64 *target_fl;
6086 short l_type;
6088 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6089 return -TARGET_EFAULT;
6092 __get_user(l_type, &target_fl->l_type);
6093 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6094 __get_user(fl->l_whence, &target_fl->l_whence);
6095 __get_user(fl->l_start, &target_fl->l_start);
6096 __get_user(fl->l_len, &target_fl->l_len);
6097 __get_user(fl->l_pid, &target_fl->l_pid);
6098 unlock_user_struct(target_fl, target_flock_addr, 0);
6099 return 0;
6102 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6103 const struct flock64 *fl)
6105 struct target_flock64 *target_fl;
6106 short l_type;
6108 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6109 return -TARGET_EFAULT;
6112 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6113 __put_user(l_type, &target_fl->l_type);
6114 __put_user(fl->l_whence, &target_fl->l_whence);
6115 __put_user(fl->l_start, &target_fl->l_start);
6116 __put_user(fl->l_len, &target_fl->l_len);
6117 __put_user(fl->l_pid, &target_fl->l_pid);
6118 unlock_user_struct(target_fl, target_flock_addr, 1);
6119 return 0;
6122 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6124 struct flock64 fl64;
6125 #ifdef F_GETOWN_EX
6126 struct f_owner_ex fox;
6127 struct target_f_owner_ex *target_fox;
6128 #endif
6129 abi_long ret;
6130 int host_cmd = target_to_host_fcntl_cmd(cmd);
6132 if (host_cmd == -TARGET_EINVAL)
6133 return host_cmd;
6135 switch(cmd) {
6136 case TARGET_F_GETLK:
6137 ret = copy_from_user_flock(&fl64, arg);
6138 if (ret) {
6139 return ret;
6141 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6142 if (ret == 0) {
6143 ret = copy_to_user_flock(arg, &fl64);
6145 break;
6147 case TARGET_F_SETLK:
6148 case TARGET_F_SETLKW:
6149 ret = copy_from_user_flock(&fl64, arg);
6150 if (ret) {
6151 return ret;
6153 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6154 break;
6156 case TARGET_F_GETLK64:
6157 ret = copy_from_user_flock64(&fl64, arg);
6158 if (ret) {
6159 return ret;
6161 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6162 if (ret == 0) {
6163 ret = copy_to_user_flock64(arg, &fl64);
6165 break;
6166 case TARGET_F_SETLK64:
6167 case TARGET_F_SETLKW64:
6168 ret = copy_from_user_flock64(&fl64, arg);
6169 if (ret) {
6170 return ret;
6172 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6173 break;
6175 case TARGET_F_GETFL:
6176 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6177 if (ret >= 0) {
6178 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6180 break;
6182 case TARGET_F_SETFL:
6183 ret = get_errno(safe_fcntl(fd, host_cmd,
6184 target_to_host_bitmask(arg,
6185 fcntl_flags_tbl)));
6186 break;
6188 #ifdef F_GETOWN_EX
6189 case TARGET_F_GETOWN_EX:
6190 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6191 if (ret >= 0) {
6192 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6193 return -TARGET_EFAULT;
6194 target_fox->type = tswap32(fox.type);
6195 target_fox->pid = tswap32(fox.pid);
6196 unlock_user_struct(target_fox, arg, 1);
6198 break;
6199 #endif
6201 #ifdef F_SETOWN_EX
6202 case TARGET_F_SETOWN_EX:
6203 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6204 return -TARGET_EFAULT;
6205 fox.type = tswap32(target_fox->type);
6206 fox.pid = tswap32(target_fox->pid);
6207 unlock_user_struct(target_fox, arg, 0);
6208 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6209 break;
6210 #endif
6212 case TARGET_F_SETOWN:
6213 case TARGET_F_GETOWN:
6214 case TARGET_F_SETSIG:
6215 case TARGET_F_GETSIG:
6216 case TARGET_F_SETLEASE:
6217 case TARGET_F_GETLEASE:
6218 case TARGET_F_SETPIPE_SZ:
6219 case TARGET_F_GETPIPE_SZ:
6220 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6221 break;
6223 default:
6224 ret = get_errno(safe_fcntl(fd, cmd, arg));
6225 break;
6227 return ret;
6230 #ifdef USE_UID16
6232 static inline int high2lowuid(int uid)
6234 if (uid > 65535)
6235 return 65534;
6236 else
6237 return uid;
6240 static inline int high2lowgid(int gid)
6242 if (gid > 65535)
6243 return 65534;
6244 else
6245 return gid;
6248 static inline int low2highuid(int uid)
6250 if ((int16_t)uid == -1)
6251 return -1;
6252 else
6253 return uid;
6256 static inline int low2highgid(int gid)
6258 if ((int16_t)gid == -1)
6259 return -1;
6260 else
6261 return gid;
6263 static inline int tswapid(int id)
6265 return tswap16(id);
6268 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6270 #else /* !USE_UID16 */
6271 static inline int high2lowuid(int uid)
6273 return uid;
6275 static inline int high2lowgid(int gid)
6277 return gid;
6279 static inline int low2highuid(int uid)
6281 return uid;
6283 static inline int low2highgid(int gid)
6285 return gid;
6287 static inline int tswapid(int id)
6289 return tswap32(id);
6292 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6294 #endif /* USE_UID16 */
6296 /* We must do direct syscalls for setting UID/GID, because we want to
6297 * implement the Linux system call semantics of "change only for this thread",
6298 * not the libc/POSIX semantics of "change for all threads in process".
6299 * (See http://ewontfix.com/17/ for more details.)
6300 * We use the 32-bit version of the syscalls if present; if it is not
6301 * then either the host architecture supports 32-bit UIDs natively with
6302 * the standard syscall, or the 16-bit UID is the best we can do.
6304 #ifdef __NR_setuid32
6305 #define __NR_sys_setuid __NR_setuid32
6306 #else
6307 #define __NR_sys_setuid __NR_setuid
6308 #endif
6309 #ifdef __NR_setgid32
6310 #define __NR_sys_setgid __NR_setgid32
6311 #else
6312 #define __NR_sys_setgid __NR_setgid
6313 #endif
6314 #ifdef __NR_setresuid32
6315 #define __NR_sys_setresuid __NR_setresuid32
6316 #else
6317 #define __NR_sys_setresuid __NR_setresuid
6318 #endif
6319 #ifdef __NR_setresgid32
6320 #define __NR_sys_setresgid __NR_setresgid32
6321 #else
6322 #define __NR_sys_setresgid __NR_setresgid
6323 #endif
6325 _syscall1(int, sys_setuid, uid_t, uid)
6326 _syscall1(int, sys_setgid, gid_t, gid)
6327 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6328 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6330 void syscall_init(void)
6332 IOCTLEntry *ie;
6333 const argtype *arg_type;
6334 int size;
6335 int i;
6337 thunk_init(STRUCT_MAX);
6339 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6340 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6341 #include "syscall_types.h"
6342 #undef STRUCT
6343 #undef STRUCT_SPECIAL
6345 /* Build target_to_host_errno_table[] table from
6346 * host_to_target_errno_table[]. */
6347 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6348 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6351 /* we patch the ioctl size if necessary. We rely on the fact that
6352 no ioctl has all the bits at '1' in the size field */
6353 ie = ioctl_entries;
6354 while (ie->target_cmd != 0) {
6355 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6356 TARGET_IOC_SIZEMASK) {
6357 arg_type = ie->arg_type;
6358 if (arg_type[0] != TYPE_PTR) {
6359 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6360 ie->target_cmd);
6361 exit(1);
6363 arg_type++;
6364 size = thunk_type_size(arg_type, 0);
6365 ie->target_cmd = (ie->target_cmd &
6366 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6367 (size << TARGET_IOC_SIZESHIFT);
6370 /* automatic consistency check if same arch */
6371 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6372 (defined(__x86_64__) && defined(TARGET_X86_64))
6373 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6374 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6375 ie->name, ie->target_cmd, ie->host_cmd);
6377 #endif
6378 ie++;
6382 #if TARGET_ABI_BITS == 32
6383 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6385 #ifdef TARGET_WORDS_BIGENDIAN
6386 return ((uint64_t)word0 << 32) | word1;
6387 #else
6388 return ((uint64_t)word1 << 32) | word0;
6389 #endif
6391 #else /* TARGET_ABI_BITS == 32 */
6392 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6394 return word0;
6396 #endif /* TARGET_ABI_BITS != 32 */
6398 #ifdef TARGET_NR_truncate64
6399 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6400 abi_long arg2,
6401 abi_long arg3,
6402 abi_long arg4)
6404 if (regpairs_aligned(cpu_env)) {
6405 arg2 = arg3;
6406 arg3 = arg4;
6408 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6410 #endif
6412 #ifdef TARGET_NR_ftruncate64
6413 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6414 abi_long arg2,
6415 abi_long arg3,
6416 abi_long arg4)
6418 if (regpairs_aligned(cpu_env)) {
6419 arg2 = arg3;
6420 arg3 = arg4;
6422 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6424 #endif
6426 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6427 abi_ulong target_addr)
6429 struct target_timespec *target_ts;
6431 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6432 return -TARGET_EFAULT;
6433 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6434 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6435 unlock_user_struct(target_ts, target_addr, 0);
6436 return 0;
6439 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6440 struct timespec *host_ts)
6442 struct target_timespec *target_ts;
6444 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6445 return -TARGET_EFAULT;
6446 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6447 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6448 unlock_user_struct(target_ts, target_addr, 1);
6449 return 0;
6452 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6453 abi_ulong target_addr)
6455 struct target_itimerspec *target_itspec;
6457 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6458 return -TARGET_EFAULT;
6461 host_itspec->it_interval.tv_sec =
6462 tswapal(target_itspec->it_interval.tv_sec);
6463 host_itspec->it_interval.tv_nsec =
6464 tswapal(target_itspec->it_interval.tv_nsec);
6465 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6466 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6468 unlock_user_struct(target_itspec, target_addr, 1);
6469 return 0;
6472 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6473 struct itimerspec *host_its)
6475 struct target_itimerspec *target_itspec;
6477 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6478 return -TARGET_EFAULT;
6481 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6482 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6484 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6485 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6487 unlock_user_struct(target_itspec, target_addr, 0);
6488 return 0;
6491 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6492 abi_ulong target_addr)
6494 struct target_sigevent *target_sevp;
6496 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6497 return -TARGET_EFAULT;
6500 /* This union is awkward on 64 bit systems because it has a 32 bit
6501 * integer and a pointer in it; we follow the conversion approach
6502 * used for handling sigval types in signal.c so the guest should get
6503 * the correct value back even if we did a 64 bit byteswap and it's
6504 * using the 32 bit integer.
6506 host_sevp->sigev_value.sival_ptr =
6507 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6508 host_sevp->sigev_signo =
6509 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6510 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6511 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6513 unlock_user_struct(target_sevp, target_addr, 1);
6514 return 0;
6517 #if defined(TARGET_NR_mlockall)
6518 static inline int target_to_host_mlockall_arg(int arg)
6520 int result = 0;
6522 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6523 result |= MCL_CURRENT;
6525 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6526 result |= MCL_FUTURE;
6528 return result;
6530 #endif
6532 static inline abi_long host_to_target_stat64(void *cpu_env,
6533 abi_ulong target_addr,
6534 struct stat *host_st)
6536 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6537 if (((CPUARMState *)cpu_env)->eabi) {
6538 struct target_eabi_stat64 *target_st;
6540 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6541 return -TARGET_EFAULT;
6542 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6543 __put_user(host_st->st_dev, &target_st->st_dev);
6544 __put_user(host_st->st_ino, &target_st->st_ino);
6545 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6546 __put_user(host_st->st_ino, &target_st->__st_ino);
6547 #endif
6548 __put_user(host_st->st_mode, &target_st->st_mode);
6549 __put_user(host_st->st_nlink, &target_st->st_nlink);
6550 __put_user(host_st->st_uid, &target_st->st_uid);
6551 __put_user(host_st->st_gid, &target_st->st_gid);
6552 __put_user(host_st->st_rdev, &target_st->st_rdev);
6553 __put_user(host_st->st_size, &target_st->st_size);
6554 __put_user(host_st->st_blksize, &target_st->st_blksize);
6555 __put_user(host_st->st_blocks, &target_st->st_blocks);
6556 __put_user(host_st->st_atime, &target_st->target_st_atime);
6557 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6558 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6559 unlock_user_struct(target_st, target_addr, 1);
6560 } else
6561 #endif
6563 #if defined(TARGET_HAS_STRUCT_STAT64)
6564 struct target_stat64 *target_st;
6565 #else
6566 struct target_stat *target_st;
6567 #endif
6569 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6570 return -TARGET_EFAULT;
6571 memset(target_st, 0, sizeof(*target_st));
6572 __put_user(host_st->st_dev, &target_st->st_dev);
6573 __put_user(host_st->st_ino, &target_st->st_ino);
6574 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6575 __put_user(host_st->st_ino, &target_st->__st_ino);
6576 #endif
6577 __put_user(host_st->st_mode, &target_st->st_mode);
6578 __put_user(host_st->st_nlink, &target_st->st_nlink);
6579 __put_user(host_st->st_uid, &target_st->st_uid);
6580 __put_user(host_st->st_gid, &target_st->st_gid);
6581 __put_user(host_st->st_rdev, &target_st->st_rdev);
6582 /* XXX: better use of kernel struct */
6583 __put_user(host_st->st_size, &target_st->st_size);
6584 __put_user(host_st->st_blksize, &target_st->st_blksize);
6585 __put_user(host_st->st_blocks, &target_st->st_blocks);
6586 __put_user(host_st->st_atime, &target_st->target_st_atime);
6587 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6588 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6589 unlock_user_struct(target_st, target_addr, 1);
6592 return 0;
6595 /* ??? Using host futex calls even when target atomic operations
6596 are not really atomic probably breaks things. However implementing
6597 futexes locally would make futexes shared between multiple processes
6598 tricky. However they're probably useless because guest atomic
6599 operations won't work either. */
6600 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6601 target_ulong uaddr2, int val3)
6603 struct timespec ts, *pts;
6604 int base_op;
6606 /* ??? We assume FUTEX_* constants are the same on both host
6607 and target. */
6608 #ifdef FUTEX_CMD_MASK
6609 base_op = op & FUTEX_CMD_MASK;
6610 #else
6611 base_op = op;
6612 #endif
6613 switch (base_op) {
6614 case FUTEX_WAIT:
6615 case FUTEX_WAIT_BITSET:
6616 if (timeout) {
6617 pts = &ts;
6618 target_to_host_timespec(pts, timeout);
6619 } else {
6620 pts = NULL;
6622 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6623 pts, NULL, val3));
6624 case FUTEX_WAKE:
6625 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6626 case FUTEX_FD:
6627 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6628 case FUTEX_REQUEUE:
6629 case FUTEX_CMP_REQUEUE:
6630 case FUTEX_WAKE_OP:
6631 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6632 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6633 But the prototype takes a `struct timespec *'; insert casts
6634 to satisfy the compiler. We do not need to tswap TIMEOUT
6635 since it's not compared to guest memory. */
6636 pts = (struct timespec *)(uintptr_t) timeout;
6637 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6638 g2h(uaddr2),
6639 (base_op == FUTEX_CMP_REQUEUE
6640 ? tswap32(val3)
6641 : val3)));
6642 default:
6643 return -TARGET_ENOSYS;
6646 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6647 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6648 abi_long handle, abi_long mount_id,
6649 abi_long flags)
6651 struct file_handle *target_fh;
6652 struct file_handle *fh;
6653 int mid = 0;
6654 abi_long ret;
6655 char *name;
6656 unsigned int size, total_size;
6658 if (get_user_s32(size, handle)) {
6659 return -TARGET_EFAULT;
6662 name = lock_user_string(pathname);
6663 if (!name) {
6664 return -TARGET_EFAULT;
6667 total_size = sizeof(struct file_handle) + size;
6668 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6669 if (!target_fh) {
6670 unlock_user(name, pathname, 0);
6671 return -TARGET_EFAULT;
6674 fh = g_malloc0(total_size);
6675 fh->handle_bytes = size;
6677 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6678 unlock_user(name, pathname, 0);
6680 /* man name_to_handle_at(2):
6681 * Other than the use of the handle_bytes field, the caller should treat
6682 * the file_handle structure as an opaque data type
6685 memcpy(target_fh, fh, total_size);
6686 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6687 target_fh->handle_type = tswap32(fh->handle_type);
6688 g_free(fh);
6689 unlock_user(target_fh, handle, total_size);
6691 if (put_user_s32(mid, mount_id)) {
6692 return -TARGET_EFAULT;
6695 return ret;
6698 #endif
6700 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6701 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6702 abi_long flags)
6704 struct file_handle *target_fh;
6705 struct file_handle *fh;
6706 unsigned int size, total_size;
6707 abi_long ret;
6709 if (get_user_s32(size, handle)) {
6710 return -TARGET_EFAULT;
6713 total_size = sizeof(struct file_handle) + size;
6714 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6715 if (!target_fh) {
6716 return -TARGET_EFAULT;
6719 fh = g_memdup(target_fh, total_size);
6720 fh->handle_bytes = size;
6721 fh->handle_type = tswap32(target_fh->handle_type);
6723 ret = get_errno(open_by_handle_at(mount_fd, fh,
6724 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6726 g_free(fh);
6728 unlock_user(target_fh, handle, total_size);
6730 return ret;
6732 #endif
6734 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6736 /* signalfd siginfo conversion */
6738 static void
6739 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6740 const struct signalfd_siginfo *info)
6742 int sig = host_to_target_signal(info->ssi_signo);
6744 /* linux/signalfd.h defines a ssi_addr_lsb
6745 * not defined in sys/signalfd.h but used by some kernels
6748 #ifdef BUS_MCEERR_AO
6749 if (tinfo->ssi_signo == SIGBUS &&
6750 (tinfo->ssi_code == BUS_MCEERR_AR ||
6751 tinfo->ssi_code == BUS_MCEERR_AO)) {
6752 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6753 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6754 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6756 #endif
6758 tinfo->ssi_signo = tswap32(sig);
6759 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6760 tinfo->ssi_code = tswap32(info->ssi_code);
6761 tinfo->ssi_pid = tswap32(info->ssi_pid);
6762 tinfo->ssi_uid = tswap32(info->ssi_uid);
6763 tinfo->ssi_fd = tswap32(info->ssi_fd);
6764 tinfo->ssi_tid = tswap32(info->ssi_tid);
6765 tinfo->ssi_band = tswap32(info->ssi_band);
6766 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6767 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6768 tinfo->ssi_status = tswap32(info->ssi_status);
6769 tinfo->ssi_int = tswap32(info->ssi_int);
6770 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6771 tinfo->ssi_utime = tswap64(info->ssi_utime);
6772 tinfo->ssi_stime = tswap64(info->ssi_stime);
6773 tinfo->ssi_addr = tswap64(info->ssi_addr);
6776 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6778 int i;
6780 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6781 host_to_target_signalfd_siginfo(buf + i, buf + i);
6784 return len;
6787 static TargetFdTrans target_signalfd_trans = {
6788 .host_to_target_data = host_to_target_data_signalfd,
6791 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6793 int host_flags;
6794 target_sigset_t *target_mask;
6795 sigset_t host_mask;
6796 abi_long ret;
6798 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6799 return -TARGET_EINVAL;
6801 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6802 return -TARGET_EFAULT;
6805 target_to_host_sigset(&host_mask, target_mask);
6807 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6809 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6810 if (ret >= 0) {
6811 fd_trans_register(ret, &target_signalfd_trans);
6814 unlock_user_struct(target_mask, mask, 0);
6816 return ret;
6818 #endif
6820 /* Map host to target signal numbers for the wait family of syscalls.
6821 Assume all other status bits are the same. */
6822 int host_to_target_waitstatus(int status)
6824 if (WIFSIGNALED(status)) {
6825 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6827 if (WIFSTOPPED(status)) {
6828 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6829 | (status & 0xff);
6831 return status;
6834 static int open_self_cmdline(void *cpu_env, int fd)
6836 int fd_orig = -1;
6837 bool word_skipped = false;
6839 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6840 if (fd_orig < 0) {
6841 return fd_orig;
6844 while (true) {
6845 ssize_t nb_read;
6846 char buf[128];
6847 char *cp_buf = buf;
6849 nb_read = read(fd_orig, buf, sizeof(buf));
6850 if (nb_read < 0) {
6851 int e = errno;
6852 fd_orig = close(fd_orig);
6853 errno = e;
6854 return -1;
6855 } else if (nb_read == 0) {
6856 break;
6859 if (!word_skipped) {
6860 /* Skip the first string, which is the path to qemu-*-static
6861 instead of the actual command. */
6862 cp_buf = memchr(buf, 0, nb_read);
6863 if (cp_buf) {
6864 /* Null byte found, skip one string */
6865 cp_buf++;
6866 nb_read -= cp_buf - buf;
6867 word_skipped = true;
6871 if (word_skipped) {
6872 if (write(fd, cp_buf, nb_read) != nb_read) {
6873 int e = errno;
6874 close(fd_orig);
6875 errno = e;
6876 return -1;
6881 return close(fd_orig);
6884 static int open_self_maps(void *cpu_env, int fd)
6886 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6887 TaskState *ts = cpu->opaque;
6888 FILE *fp;
6889 char *line = NULL;
6890 size_t len = 0;
6891 ssize_t read;
6893 fp = fopen("/proc/self/maps", "r");
6894 if (fp == NULL) {
6895 return -1;
6898 while ((read = getline(&line, &len, fp)) != -1) {
6899 int fields, dev_maj, dev_min, inode;
6900 uint64_t min, max, offset;
6901 char flag_r, flag_w, flag_x, flag_p;
6902 char path[512] = "";
6903 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6904 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6905 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6907 if ((fields < 10) || (fields > 11)) {
6908 continue;
6910 if (h2g_valid(min)) {
6911 int flags = page_get_flags(h2g(min));
6912 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6913 if (page_check_range(h2g(min), max - min, flags) == -1) {
6914 continue;
6916 if (h2g(min) == ts->info->stack_limit) {
6917 pstrcpy(path, sizeof(path), " [stack]");
6919 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6920 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6921 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6922 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6923 path[0] ? " " : "", path);
6927 free(line);
6928 fclose(fp);
6930 return 0;
6933 static int open_self_stat(void *cpu_env, int fd)
6935 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6936 TaskState *ts = cpu->opaque;
6937 abi_ulong start_stack = ts->info->start_stack;
6938 int i;
6940 for (i = 0; i < 44; i++) {
6941 char buf[128];
6942 int len;
6943 uint64_t val = 0;
6945 if (i == 0) {
6946 /* pid */
6947 val = getpid();
6948 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6949 } else if (i == 1) {
6950 /* app name */
6951 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6952 } else if (i == 27) {
6953 /* stack bottom */
6954 val = start_stack;
6955 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6956 } else {
6957 /* for the rest, there is MasterCard */
6958 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6961 len = strlen(buf);
6962 if (write(fd, buf, len) != len) {
6963 return -1;
6967 return 0;
6970 static int open_self_auxv(void *cpu_env, int fd)
6972 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6973 TaskState *ts = cpu->opaque;
6974 abi_ulong auxv = ts->info->saved_auxv;
6975 abi_ulong len = ts->info->auxv_len;
6976 char *ptr;
6979 * Auxiliary vector is stored in target process stack.
6980 * read in whole auxv vector and copy it to file
6982 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6983 if (ptr != NULL) {
6984 while (len > 0) {
6985 ssize_t r;
6986 r = write(fd, ptr, len);
6987 if (r <= 0) {
6988 break;
6990 len -= r;
6991 ptr += r;
6993 lseek(fd, 0, SEEK_SET);
6994 unlock_user(ptr, auxv, len);
6997 return 0;
7000 static int is_proc_myself(const char *filename, const char *entry)
7002 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7003 filename += strlen("/proc/");
7004 if (!strncmp(filename, "self/", strlen("self/"))) {
7005 filename += strlen("self/");
7006 } else if (*filename >= '1' && *filename <= '9') {
7007 char myself[80];
7008 snprintf(myself, sizeof(myself), "%d/", getpid());
7009 if (!strncmp(filename, myself, strlen(myself))) {
7010 filename += strlen(myself);
7011 } else {
7012 return 0;
7014 } else {
7015 return 0;
7017 if (!strcmp(filename, entry)) {
7018 return 1;
7021 return 0;
7024 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7025 static int is_proc(const char *filename, const char *entry)
7027 return strcmp(filename, entry) == 0;
7030 static int open_net_route(void *cpu_env, int fd)
7032 FILE *fp;
7033 char *line = NULL;
7034 size_t len = 0;
7035 ssize_t read;
7037 fp = fopen("/proc/net/route", "r");
7038 if (fp == NULL) {
7039 return -1;
7042 /* read header */
7044 read = getline(&line, &len, fp);
7045 dprintf(fd, "%s", line);
7047 /* read routes */
7049 while ((read = getline(&line, &len, fp)) != -1) {
7050 char iface[16];
7051 uint32_t dest, gw, mask;
7052 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7053 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7054 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7055 &mask, &mtu, &window, &irtt);
7056 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7057 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7058 metric, tswap32(mask), mtu, window, irtt);
7061 free(line);
7062 fclose(fp);
7064 return 0;
7066 #endif
7068 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7070 struct fake_open {
7071 const char *filename;
7072 int (*fill)(void *cpu_env, int fd);
7073 int (*cmp)(const char *s1, const char *s2);
7075 const struct fake_open *fake_open;
7076 static const struct fake_open fakes[] = {
7077 { "maps", open_self_maps, is_proc_myself },
7078 { "stat", open_self_stat, is_proc_myself },
7079 { "auxv", open_self_auxv, is_proc_myself },
7080 { "cmdline", open_self_cmdline, is_proc_myself },
7081 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7082 { "/proc/net/route", open_net_route, is_proc },
7083 #endif
7084 { NULL, NULL, NULL }
7087 if (is_proc_myself(pathname, "exe")) {
7088 int execfd = qemu_getauxval(AT_EXECFD);
7089 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7092 for (fake_open = fakes; fake_open->filename; fake_open++) {
7093 if (fake_open->cmp(pathname, fake_open->filename)) {
7094 break;
7098 if (fake_open->filename) {
7099 const char *tmpdir;
7100 char filename[PATH_MAX];
7101 int fd, r;
7103 /* create temporary file to map stat to */
7104 tmpdir = getenv("TMPDIR");
7105 if (!tmpdir)
7106 tmpdir = "/tmp";
7107 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7108 fd = mkstemp(filename);
7109 if (fd < 0) {
7110 return fd;
7112 unlink(filename);
7114 if ((r = fake_open->fill(cpu_env, fd))) {
7115 int e = errno;
7116 close(fd);
7117 errno = e;
7118 return r;
7120 lseek(fd, 0, SEEK_SET);
7122 return fd;
7125 return safe_openat(dirfd, path(pathname), flags, mode);
7128 #define TIMER_MAGIC 0x0caf0000
7129 #define TIMER_MAGIC_MASK 0xffff0000
7131 /* Convert QEMU provided timer ID back to internal 16bit index format */
7132 static target_timer_t get_timer_id(abi_long arg)
7134 target_timer_t timerid = arg;
7136 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7137 return -TARGET_EINVAL;
7140 timerid &= 0xffff;
7142 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7143 return -TARGET_EINVAL;
7146 return timerid;
7149 /* do_syscall() should always have a single exit point at the end so
7150 that actions, such as logging of syscall results, can be performed.
7151 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7152 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7153 abi_long arg2, abi_long arg3, abi_long arg4,
7154 abi_long arg5, abi_long arg6, abi_long arg7,
7155 abi_long arg8)
7157 CPUState *cpu = ENV_GET_CPU(cpu_env);
7158 abi_long ret;
7159 struct stat st;
7160 struct statfs stfs;
7161 void *p;
7163 #if defined(DEBUG_ERESTARTSYS)
7164 /* Debug-only code for exercising the syscall-restart code paths
7165 * in the per-architecture cpu main loops: restart every syscall
7166 * the guest makes once before letting it through.
7169 static int flag;
7171 flag = !flag;
7172 if (flag) {
7173 return -TARGET_ERESTARTSYS;
7176 #endif
7178 #ifdef DEBUG
7179 gemu_log("syscall %d", num);
7180 #endif
7181 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7182 if(do_strace)
7183 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7185 switch(num) {
7186 case TARGET_NR_exit:
7187 /* In old applications this may be used to implement _exit(2).
7188 However in threaded applictions it is used for thread termination,
7189 and _exit_group is used for application termination.
7190 Do thread termination if we have more then one thread. */
7192 if (block_signals()) {
7193 ret = -TARGET_ERESTARTSYS;
7194 break;
7197 if (CPU_NEXT(first_cpu)) {
7198 TaskState *ts;
7200 cpu_list_lock();
7201 /* Remove the CPU from the list. */
7202 QTAILQ_REMOVE(&cpus, cpu, node);
7203 cpu_list_unlock();
7204 ts = cpu->opaque;
7205 if (ts->child_tidptr) {
7206 put_user_u32(0, ts->child_tidptr);
7207 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7208 NULL, NULL, 0);
7210 thread_cpu = NULL;
7211 object_unref(OBJECT(cpu));
7212 g_free(ts);
7213 rcu_unregister_thread();
7214 pthread_exit(NULL);
7216 #ifdef TARGET_GPROF
7217 _mcleanup();
7218 #endif
7219 gdb_exit(cpu_env, arg1);
7220 _exit(arg1);
7221 ret = 0; /* avoid warning */
7222 break;
7223 case TARGET_NR_read:
7224 if (arg3 == 0)
7225 ret = 0;
7226 else {
7227 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7228 goto efault;
7229 ret = get_errno(safe_read(arg1, p, arg3));
7230 if (ret >= 0 &&
7231 fd_trans_host_to_target_data(arg1)) {
7232 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7234 unlock_user(p, arg2, ret);
7236 break;
7237 case TARGET_NR_write:
7238 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7239 goto efault;
7240 ret = get_errno(safe_write(arg1, p, arg3));
7241 unlock_user(p, arg2, 0);
7242 break;
7243 #ifdef TARGET_NR_open
7244 case TARGET_NR_open:
7245 if (!(p = lock_user_string(arg1)))
7246 goto efault;
7247 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7248 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7249 arg3));
7250 fd_trans_unregister(ret);
7251 unlock_user(p, arg1, 0);
7252 break;
7253 #endif
7254 case TARGET_NR_openat:
7255 if (!(p = lock_user_string(arg2)))
7256 goto efault;
7257 ret = get_errno(do_openat(cpu_env, arg1, p,
7258 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7259 arg4));
7260 fd_trans_unregister(ret);
7261 unlock_user(p, arg2, 0);
7262 break;
7263 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7264 case TARGET_NR_name_to_handle_at:
7265 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7266 break;
7267 #endif
7268 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7269 case TARGET_NR_open_by_handle_at:
7270 ret = do_open_by_handle_at(arg1, arg2, arg3);
7271 fd_trans_unregister(ret);
7272 break;
7273 #endif
7274 case TARGET_NR_close:
7275 fd_trans_unregister(arg1);
7276 ret = get_errno(close(arg1));
7277 break;
7278 case TARGET_NR_brk:
7279 ret = do_brk(arg1);
7280 break;
7281 #ifdef TARGET_NR_fork
7282 case TARGET_NR_fork:
7283 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7284 break;
7285 #endif
7286 #ifdef TARGET_NR_waitpid
7287 case TARGET_NR_waitpid:
7289 int status;
7290 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7291 if (!is_error(ret) && arg2 && ret
7292 && put_user_s32(host_to_target_waitstatus(status), arg2))
7293 goto efault;
7295 break;
7296 #endif
7297 #ifdef TARGET_NR_waitid
7298 case TARGET_NR_waitid:
7300 siginfo_t info;
7301 info.si_pid = 0;
7302 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7303 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7304 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7305 goto efault;
7306 host_to_target_siginfo(p, &info);
7307 unlock_user(p, arg3, sizeof(target_siginfo_t));
7310 break;
7311 #endif
7312 #ifdef TARGET_NR_creat /* not on alpha */
7313 case TARGET_NR_creat:
7314 if (!(p = lock_user_string(arg1)))
7315 goto efault;
7316 ret = get_errno(creat(p, arg2));
7317 fd_trans_unregister(ret);
7318 unlock_user(p, arg1, 0);
7319 break;
7320 #endif
7321 #ifdef TARGET_NR_link
7322 case TARGET_NR_link:
7324 void * p2;
7325 p = lock_user_string(arg1);
7326 p2 = lock_user_string(arg2);
7327 if (!p || !p2)
7328 ret = -TARGET_EFAULT;
7329 else
7330 ret = get_errno(link(p, p2));
7331 unlock_user(p2, arg2, 0);
7332 unlock_user(p, arg1, 0);
7334 break;
7335 #endif
7336 #if defined(TARGET_NR_linkat)
7337 case TARGET_NR_linkat:
7339 void * p2 = NULL;
7340 if (!arg2 || !arg4)
7341 goto efault;
7342 p = lock_user_string(arg2);
7343 p2 = lock_user_string(arg4);
7344 if (!p || !p2)
7345 ret = -TARGET_EFAULT;
7346 else
7347 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7348 unlock_user(p, arg2, 0);
7349 unlock_user(p2, arg4, 0);
7351 break;
7352 #endif
7353 #ifdef TARGET_NR_unlink
7354 case TARGET_NR_unlink:
7355 if (!(p = lock_user_string(arg1)))
7356 goto efault;
7357 ret = get_errno(unlink(p));
7358 unlock_user(p, arg1, 0);
7359 break;
7360 #endif
7361 #if defined(TARGET_NR_unlinkat)
7362 case TARGET_NR_unlinkat:
7363 if (!(p = lock_user_string(arg2)))
7364 goto efault;
7365 ret = get_errno(unlinkat(arg1, p, arg3));
7366 unlock_user(p, arg2, 0);
7367 break;
7368 #endif
7369 case TARGET_NR_execve:
7371 char **argp, **envp;
7372 int argc, envc;
7373 abi_ulong gp;
7374 abi_ulong guest_argp;
7375 abi_ulong guest_envp;
7376 abi_ulong addr;
7377 char **q;
7378 int total_size = 0;
7380 argc = 0;
7381 guest_argp = arg2;
7382 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7383 if (get_user_ual(addr, gp))
7384 goto efault;
7385 if (!addr)
7386 break;
7387 argc++;
7389 envc = 0;
7390 guest_envp = arg3;
7391 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7392 if (get_user_ual(addr, gp))
7393 goto efault;
7394 if (!addr)
7395 break;
7396 envc++;
7399 argp = alloca((argc + 1) * sizeof(void *));
7400 envp = alloca((envc + 1) * sizeof(void *));
7402 for (gp = guest_argp, q = argp; gp;
7403 gp += sizeof(abi_ulong), q++) {
7404 if (get_user_ual(addr, gp))
7405 goto execve_efault;
7406 if (!addr)
7407 break;
7408 if (!(*q = lock_user_string(addr)))
7409 goto execve_efault;
7410 total_size += strlen(*q) + 1;
7412 *q = NULL;
7414 for (gp = guest_envp, q = envp; gp;
7415 gp += sizeof(abi_ulong), q++) {
7416 if (get_user_ual(addr, gp))
7417 goto execve_efault;
7418 if (!addr)
7419 break;
7420 if (!(*q = lock_user_string(addr)))
7421 goto execve_efault;
7422 total_size += strlen(*q) + 1;
7424 *q = NULL;
7426 if (!(p = lock_user_string(arg1)))
7427 goto execve_efault;
7428 /* Although execve() is not an interruptible syscall it is
7429 * a special case where we must use the safe_syscall wrapper:
7430 * if we allow a signal to happen before we make the host
7431 * syscall then we will 'lose' it, because at the point of
7432 * execve the process leaves QEMU's control. So we use the
7433 * safe syscall wrapper to ensure that we either take the
7434 * signal as a guest signal, or else it does not happen
7435 * before the execve completes and makes it the other
7436 * program's problem.
7438 ret = get_errno(safe_execve(p, argp, envp));
7439 unlock_user(p, arg1, 0);
7441 goto execve_end;
7443 execve_efault:
7444 ret = -TARGET_EFAULT;
7446 execve_end:
7447 for (gp = guest_argp, q = argp; *q;
7448 gp += sizeof(abi_ulong), q++) {
7449 if (get_user_ual(addr, gp)
7450 || !addr)
7451 break;
7452 unlock_user(*q, addr, 0);
7454 for (gp = guest_envp, q = envp; *q;
7455 gp += sizeof(abi_ulong), q++) {
7456 if (get_user_ual(addr, gp)
7457 || !addr)
7458 break;
7459 unlock_user(*q, addr, 0);
7462 break;
7463 case TARGET_NR_chdir:
7464 if (!(p = lock_user_string(arg1)))
7465 goto efault;
7466 ret = get_errno(chdir(p));
7467 unlock_user(p, arg1, 0);
7468 break;
7469 #ifdef TARGET_NR_time
7470 case TARGET_NR_time:
7472 time_t host_time;
7473 ret = get_errno(time(&host_time));
7474 if (!is_error(ret)
7475 && arg1
7476 && put_user_sal(host_time, arg1))
7477 goto efault;
7479 break;
7480 #endif
7481 #ifdef TARGET_NR_mknod
7482 case TARGET_NR_mknod:
7483 if (!(p = lock_user_string(arg1)))
7484 goto efault;
7485 ret = get_errno(mknod(p, arg2, arg3));
7486 unlock_user(p, arg1, 0);
7487 break;
7488 #endif
7489 #if defined(TARGET_NR_mknodat)
7490 case TARGET_NR_mknodat:
7491 if (!(p = lock_user_string(arg2)))
7492 goto efault;
7493 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7494 unlock_user(p, arg2, 0);
7495 break;
7496 #endif
7497 #ifdef TARGET_NR_chmod
7498 case TARGET_NR_chmod:
7499 if (!(p = lock_user_string(arg1)))
7500 goto efault;
7501 ret = get_errno(chmod(p, arg2));
7502 unlock_user(p, arg1, 0);
7503 break;
7504 #endif
7505 #ifdef TARGET_NR_break
7506 case TARGET_NR_break:
7507 goto unimplemented;
7508 #endif
7509 #ifdef TARGET_NR_oldstat
7510 case TARGET_NR_oldstat:
7511 goto unimplemented;
7512 #endif
7513 case TARGET_NR_lseek:
7514 ret = get_errno(lseek(arg1, arg2, arg3));
7515 break;
7516 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7517 /* Alpha specific */
7518 case TARGET_NR_getxpid:
7519 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7520 ret = get_errno(getpid());
7521 break;
7522 #endif
7523 #ifdef TARGET_NR_getpid
7524 case TARGET_NR_getpid:
7525 ret = get_errno(getpid());
7526 break;
7527 #endif
7528 case TARGET_NR_mount:
7530 /* need to look at the data field */
7531 void *p2, *p3;
7533 if (arg1) {
7534 p = lock_user_string(arg1);
7535 if (!p) {
7536 goto efault;
7538 } else {
7539 p = NULL;
7542 p2 = lock_user_string(arg2);
7543 if (!p2) {
7544 if (arg1) {
7545 unlock_user(p, arg1, 0);
7547 goto efault;
7550 if (arg3) {
7551 p3 = lock_user_string(arg3);
7552 if (!p3) {
7553 if (arg1) {
7554 unlock_user(p, arg1, 0);
7556 unlock_user(p2, arg2, 0);
7557 goto efault;
7559 } else {
7560 p3 = NULL;
7563 /* FIXME - arg5 should be locked, but it isn't clear how to
7564 * do that since it's not guaranteed to be a NULL-terminated
7565 * string.
7567 if (!arg5) {
7568 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7569 } else {
7570 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7572 ret = get_errno(ret);
7574 if (arg1) {
7575 unlock_user(p, arg1, 0);
7577 unlock_user(p2, arg2, 0);
7578 if (arg3) {
7579 unlock_user(p3, arg3, 0);
7582 break;
7583 #ifdef TARGET_NR_umount
7584 case TARGET_NR_umount:
7585 if (!(p = lock_user_string(arg1)))
7586 goto efault;
7587 ret = get_errno(umount(p));
7588 unlock_user(p, arg1, 0);
7589 break;
7590 #endif
7591 #ifdef TARGET_NR_stime /* not on alpha */
7592 case TARGET_NR_stime:
7594 time_t host_time;
7595 if (get_user_sal(host_time, arg1))
7596 goto efault;
7597 ret = get_errno(stime(&host_time));
7599 break;
7600 #endif
7601 case TARGET_NR_ptrace:
7602 goto unimplemented;
7603 #ifdef TARGET_NR_alarm /* not on alpha */
7604 case TARGET_NR_alarm:
7605 ret = alarm(arg1);
7606 break;
7607 #endif
7608 #ifdef TARGET_NR_oldfstat
7609 case TARGET_NR_oldfstat:
7610 goto unimplemented;
7611 #endif
7612 #ifdef TARGET_NR_pause /* not on alpha */
7613 case TARGET_NR_pause:
7614 if (!block_signals()) {
7615 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7617 ret = -TARGET_EINTR;
7618 break;
7619 #endif
7620 #ifdef TARGET_NR_utime
7621 case TARGET_NR_utime:
7623 struct utimbuf tbuf, *host_tbuf;
7624 struct target_utimbuf *target_tbuf;
7625 if (arg2) {
7626 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7627 goto efault;
7628 tbuf.actime = tswapal(target_tbuf->actime);
7629 tbuf.modtime = tswapal(target_tbuf->modtime);
7630 unlock_user_struct(target_tbuf, arg2, 0);
7631 host_tbuf = &tbuf;
7632 } else {
7633 host_tbuf = NULL;
7635 if (!(p = lock_user_string(arg1)))
7636 goto efault;
7637 ret = get_errno(utime(p, host_tbuf));
7638 unlock_user(p, arg1, 0);
7640 break;
7641 #endif
7642 #ifdef TARGET_NR_utimes
7643 case TARGET_NR_utimes:
7645 struct timeval *tvp, tv[2];
7646 if (arg2) {
7647 if (copy_from_user_timeval(&tv[0], arg2)
7648 || copy_from_user_timeval(&tv[1],
7649 arg2 + sizeof(struct target_timeval)))
7650 goto efault;
7651 tvp = tv;
7652 } else {
7653 tvp = NULL;
7655 if (!(p = lock_user_string(arg1)))
7656 goto efault;
7657 ret = get_errno(utimes(p, tvp));
7658 unlock_user(p, arg1, 0);
7660 break;
7661 #endif
7662 #if defined(TARGET_NR_futimesat)
7663 case TARGET_NR_futimesat:
7665 struct timeval *tvp, tv[2];
7666 if (arg3) {
7667 if (copy_from_user_timeval(&tv[0], arg3)
7668 || copy_from_user_timeval(&tv[1],
7669 arg3 + sizeof(struct target_timeval)))
7670 goto efault;
7671 tvp = tv;
7672 } else {
7673 tvp = NULL;
7675 if (!(p = lock_user_string(arg2)))
7676 goto efault;
7677 ret = get_errno(futimesat(arg1, path(p), tvp));
7678 unlock_user(p, arg2, 0);
7680 break;
7681 #endif
7682 #ifdef TARGET_NR_stty
7683 case TARGET_NR_stty:
7684 goto unimplemented;
7685 #endif
7686 #ifdef TARGET_NR_gtty
7687 case TARGET_NR_gtty:
7688 goto unimplemented;
7689 #endif
7690 #ifdef TARGET_NR_access
7691 case TARGET_NR_access:
7692 if (!(p = lock_user_string(arg1)))
7693 goto efault;
7694 ret = get_errno(access(path(p), arg2));
7695 unlock_user(p, arg1, 0);
7696 break;
7697 #endif
7698 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7699 case TARGET_NR_faccessat:
7700 if (!(p = lock_user_string(arg2)))
7701 goto efault;
7702 ret = get_errno(faccessat(arg1, p, arg3, 0));
7703 unlock_user(p, arg2, 0);
7704 break;
7705 #endif
7706 #ifdef TARGET_NR_nice /* not on alpha */
7707 case TARGET_NR_nice:
7708 ret = get_errno(nice(arg1));
7709 break;
7710 #endif
7711 #ifdef TARGET_NR_ftime
7712 case TARGET_NR_ftime:
7713 goto unimplemented;
7714 #endif
7715 case TARGET_NR_sync:
7716 sync();
7717 ret = 0;
7718 break;
7719 case TARGET_NR_kill:
7720 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7721 break;
7722 #ifdef TARGET_NR_rename
7723 case TARGET_NR_rename:
7725 void *p2;
7726 p = lock_user_string(arg1);
7727 p2 = lock_user_string(arg2);
7728 if (!p || !p2)
7729 ret = -TARGET_EFAULT;
7730 else
7731 ret = get_errno(rename(p, p2));
7732 unlock_user(p2, arg2, 0);
7733 unlock_user(p, arg1, 0);
7735 break;
7736 #endif
7737 #if defined(TARGET_NR_renameat)
7738 case TARGET_NR_renameat:
7740 void *p2;
7741 p = lock_user_string(arg2);
7742 p2 = lock_user_string(arg4);
7743 if (!p || !p2)
7744 ret = -TARGET_EFAULT;
7745 else
7746 ret = get_errno(renameat(arg1, p, arg3, p2));
7747 unlock_user(p2, arg4, 0);
7748 unlock_user(p, arg2, 0);
7750 break;
7751 #endif
7752 #ifdef TARGET_NR_mkdir
7753 case TARGET_NR_mkdir:
7754 if (!(p = lock_user_string(arg1)))
7755 goto efault;
7756 ret = get_errno(mkdir(p, arg2));
7757 unlock_user(p, arg1, 0);
7758 break;
7759 #endif
7760 #if defined(TARGET_NR_mkdirat)
7761 case TARGET_NR_mkdirat:
7762 if (!(p = lock_user_string(arg2)))
7763 goto efault;
7764 ret = get_errno(mkdirat(arg1, p, arg3));
7765 unlock_user(p, arg2, 0);
7766 break;
7767 #endif
7768 #ifdef TARGET_NR_rmdir
7769 case TARGET_NR_rmdir:
7770 if (!(p = lock_user_string(arg1)))
7771 goto efault;
7772 ret = get_errno(rmdir(p));
7773 unlock_user(p, arg1, 0);
7774 break;
7775 #endif
7776 case TARGET_NR_dup:
7777 ret = get_errno(dup(arg1));
7778 if (ret >= 0) {
7779 fd_trans_dup(arg1, ret);
7781 break;
7782 #ifdef TARGET_NR_pipe
7783 case TARGET_NR_pipe:
7784 ret = do_pipe(cpu_env, arg1, 0, 0);
7785 break;
7786 #endif
7787 #ifdef TARGET_NR_pipe2
7788 case TARGET_NR_pipe2:
7789 ret = do_pipe(cpu_env, arg1,
7790 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7791 break;
7792 #endif
7793 case TARGET_NR_times:
7795 struct target_tms *tmsp;
7796 struct tms tms;
7797 ret = get_errno(times(&tms));
7798 if (arg1) {
7799 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7800 if (!tmsp)
7801 goto efault;
7802 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7803 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7804 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7805 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7807 if (!is_error(ret))
7808 ret = host_to_target_clock_t(ret);
7810 break;
7811 #ifdef TARGET_NR_prof
7812 case TARGET_NR_prof:
7813 goto unimplemented;
7814 #endif
7815 #ifdef TARGET_NR_signal
7816 case TARGET_NR_signal:
7817 goto unimplemented;
7818 #endif
7819 case TARGET_NR_acct:
7820 if (arg1 == 0) {
7821 ret = get_errno(acct(NULL));
7822 } else {
7823 if (!(p = lock_user_string(arg1)))
7824 goto efault;
7825 ret = get_errno(acct(path(p)));
7826 unlock_user(p, arg1, 0);
7828 break;
7829 #ifdef TARGET_NR_umount2
7830 case TARGET_NR_umount2:
7831 if (!(p = lock_user_string(arg1)))
7832 goto efault;
7833 ret = get_errno(umount2(p, arg2));
7834 unlock_user(p, arg1, 0);
7835 break;
7836 #endif
7837 #ifdef TARGET_NR_lock
7838 case TARGET_NR_lock:
7839 goto unimplemented;
7840 #endif
7841 case TARGET_NR_ioctl:
7842 ret = do_ioctl(arg1, arg2, arg3);
7843 break;
7844 case TARGET_NR_fcntl:
7845 ret = do_fcntl(arg1, arg2, arg3);
7846 break;
7847 #ifdef TARGET_NR_mpx
7848 case TARGET_NR_mpx:
7849 goto unimplemented;
7850 #endif
7851 case TARGET_NR_setpgid:
7852 ret = get_errno(setpgid(arg1, arg2));
7853 break;
7854 #ifdef TARGET_NR_ulimit
7855 case TARGET_NR_ulimit:
7856 goto unimplemented;
7857 #endif
7858 #ifdef TARGET_NR_oldolduname
7859 case TARGET_NR_oldolduname:
7860 goto unimplemented;
7861 #endif
7862 case TARGET_NR_umask:
7863 ret = get_errno(umask(arg1));
7864 break;
7865 case TARGET_NR_chroot:
7866 if (!(p = lock_user_string(arg1)))
7867 goto efault;
7868 ret = get_errno(chroot(p));
7869 unlock_user(p, arg1, 0);
7870 break;
7871 #ifdef TARGET_NR_ustat
7872 case TARGET_NR_ustat:
7873 goto unimplemented;
7874 #endif
7875 #ifdef TARGET_NR_dup2
7876 case TARGET_NR_dup2:
7877 ret = get_errno(dup2(arg1, arg2));
7878 if (ret >= 0) {
7879 fd_trans_dup(arg1, arg2);
7881 break;
7882 #endif
7883 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7884 case TARGET_NR_dup3:
7885 ret = get_errno(dup3(arg1, arg2, arg3));
7886 if (ret >= 0) {
7887 fd_trans_dup(arg1, arg2);
7889 break;
7890 #endif
7891 #ifdef TARGET_NR_getppid /* not on alpha */
7892 case TARGET_NR_getppid:
7893 ret = get_errno(getppid());
7894 break;
7895 #endif
7896 #ifdef TARGET_NR_getpgrp
7897 case TARGET_NR_getpgrp:
7898 ret = get_errno(getpgrp());
7899 break;
7900 #endif
7901 case TARGET_NR_setsid:
7902 ret = get_errno(setsid());
7903 break;
7904 #ifdef TARGET_NR_sigaction
7905 case TARGET_NR_sigaction:
7907 #if defined(TARGET_ALPHA)
7908 struct target_sigaction act, oact, *pact = 0;
7909 struct target_old_sigaction *old_act;
7910 if (arg2) {
7911 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7912 goto efault;
7913 act._sa_handler = old_act->_sa_handler;
7914 target_siginitset(&act.sa_mask, old_act->sa_mask);
7915 act.sa_flags = old_act->sa_flags;
7916 act.sa_restorer = 0;
7917 unlock_user_struct(old_act, arg2, 0);
7918 pact = &act;
7920 ret = get_errno(do_sigaction(arg1, pact, &oact));
7921 if (!is_error(ret) && arg3) {
7922 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7923 goto efault;
7924 old_act->_sa_handler = oact._sa_handler;
7925 old_act->sa_mask = oact.sa_mask.sig[0];
7926 old_act->sa_flags = oact.sa_flags;
7927 unlock_user_struct(old_act, arg3, 1);
7929 #elif defined(TARGET_MIPS)
7930 struct target_sigaction act, oact, *pact, *old_act;
7932 if (arg2) {
7933 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7934 goto efault;
7935 act._sa_handler = old_act->_sa_handler;
7936 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7937 act.sa_flags = old_act->sa_flags;
7938 unlock_user_struct(old_act, arg2, 0);
7939 pact = &act;
7940 } else {
7941 pact = NULL;
7944 ret = get_errno(do_sigaction(arg1, pact, &oact));
7946 if (!is_error(ret) && arg3) {
7947 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7948 goto efault;
7949 old_act->_sa_handler = oact._sa_handler;
7950 old_act->sa_flags = oact.sa_flags;
7951 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7952 old_act->sa_mask.sig[1] = 0;
7953 old_act->sa_mask.sig[2] = 0;
7954 old_act->sa_mask.sig[3] = 0;
7955 unlock_user_struct(old_act, arg3, 1);
7957 #else
7958 struct target_old_sigaction *old_act;
7959 struct target_sigaction act, oact, *pact;
7960 if (arg2) {
7961 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7962 goto efault;
7963 act._sa_handler = old_act->_sa_handler;
7964 target_siginitset(&act.sa_mask, old_act->sa_mask);
7965 act.sa_flags = old_act->sa_flags;
7966 act.sa_restorer = old_act->sa_restorer;
7967 unlock_user_struct(old_act, arg2, 0);
7968 pact = &act;
7969 } else {
7970 pact = NULL;
7972 ret = get_errno(do_sigaction(arg1, pact, &oact));
7973 if (!is_error(ret) && arg3) {
7974 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7975 goto efault;
7976 old_act->_sa_handler = oact._sa_handler;
7977 old_act->sa_mask = oact.sa_mask.sig[0];
7978 old_act->sa_flags = oact.sa_flags;
7979 old_act->sa_restorer = oact.sa_restorer;
7980 unlock_user_struct(old_act, arg3, 1);
7982 #endif
7984 break;
7985 #endif
7986 case TARGET_NR_rt_sigaction:
7988 #if defined(TARGET_ALPHA)
7989 struct target_sigaction act, oact, *pact = 0;
7990 struct target_rt_sigaction *rt_act;
7992 if (arg4 != sizeof(target_sigset_t)) {
7993 ret = -TARGET_EINVAL;
7994 break;
7996 if (arg2) {
7997 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7998 goto efault;
7999 act._sa_handler = rt_act->_sa_handler;
8000 act.sa_mask = rt_act->sa_mask;
8001 act.sa_flags = rt_act->sa_flags;
8002 act.sa_restorer = arg5;
8003 unlock_user_struct(rt_act, arg2, 0);
8004 pact = &act;
8006 ret = get_errno(do_sigaction(arg1, pact, &oact));
8007 if (!is_error(ret) && arg3) {
8008 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8009 goto efault;
8010 rt_act->_sa_handler = oact._sa_handler;
8011 rt_act->sa_mask = oact.sa_mask;
8012 rt_act->sa_flags = oact.sa_flags;
8013 unlock_user_struct(rt_act, arg3, 1);
8015 #else
8016 struct target_sigaction *act;
8017 struct target_sigaction *oact;
8019 if (arg4 != sizeof(target_sigset_t)) {
8020 ret = -TARGET_EINVAL;
8021 break;
8023 if (arg2) {
8024 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8025 goto efault;
8026 } else
8027 act = NULL;
8028 if (arg3) {
8029 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8030 ret = -TARGET_EFAULT;
8031 goto rt_sigaction_fail;
8033 } else
8034 oact = NULL;
8035 ret = get_errno(do_sigaction(arg1, act, oact));
8036 rt_sigaction_fail:
8037 if (act)
8038 unlock_user_struct(act, arg2, 0);
8039 if (oact)
8040 unlock_user_struct(oact, arg3, 1);
8041 #endif
8043 break;
8044 #ifdef TARGET_NR_sgetmask /* not on alpha */
8045 case TARGET_NR_sgetmask:
8047 sigset_t cur_set;
8048 abi_ulong target_set;
8049 ret = do_sigprocmask(0, NULL, &cur_set);
8050 if (!ret) {
8051 host_to_target_old_sigset(&target_set, &cur_set);
8052 ret = target_set;
8055 break;
8056 #endif
8057 #ifdef TARGET_NR_ssetmask /* not on alpha */
8058 case TARGET_NR_ssetmask:
8060 sigset_t set, oset, cur_set;
8061 abi_ulong target_set = arg1;
8062 /* We only have one word of the new mask so we must read
8063 * the rest of it with do_sigprocmask() and OR in this word.
8064 * We are guaranteed that a do_sigprocmask() that only queries
8065 * the signal mask will not fail.
8067 ret = do_sigprocmask(0, NULL, &cur_set);
8068 assert(!ret);
8069 target_to_host_old_sigset(&set, &target_set);
8070 sigorset(&set, &set, &cur_set);
8071 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8072 if (!ret) {
8073 host_to_target_old_sigset(&target_set, &oset);
8074 ret = target_set;
8077 break;
8078 #endif
8079 #ifdef TARGET_NR_sigprocmask
8080 case TARGET_NR_sigprocmask:
8082 #if defined(TARGET_ALPHA)
8083 sigset_t set, oldset;
8084 abi_ulong mask;
8085 int how;
8087 switch (arg1) {
8088 case TARGET_SIG_BLOCK:
8089 how = SIG_BLOCK;
8090 break;
8091 case TARGET_SIG_UNBLOCK:
8092 how = SIG_UNBLOCK;
8093 break;
8094 case TARGET_SIG_SETMASK:
8095 how = SIG_SETMASK;
8096 break;
8097 default:
8098 ret = -TARGET_EINVAL;
8099 goto fail;
8101 mask = arg2;
8102 target_to_host_old_sigset(&set, &mask);
8104 ret = do_sigprocmask(how, &set, &oldset);
8105 if (!is_error(ret)) {
8106 host_to_target_old_sigset(&mask, &oldset);
8107 ret = mask;
8108 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8110 #else
8111 sigset_t set, oldset, *set_ptr;
8112 int how;
8114 if (arg2) {
8115 switch (arg1) {
8116 case TARGET_SIG_BLOCK:
8117 how = SIG_BLOCK;
8118 break;
8119 case TARGET_SIG_UNBLOCK:
8120 how = SIG_UNBLOCK;
8121 break;
8122 case TARGET_SIG_SETMASK:
8123 how = SIG_SETMASK;
8124 break;
8125 default:
8126 ret = -TARGET_EINVAL;
8127 goto fail;
8129 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8130 goto efault;
8131 target_to_host_old_sigset(&set, p);
8132 unlock_user(p, arg2, 0);
8133 set_ptr = &set;
8134 } else {
8135 how = 0;
8136 set_ptr = NULL;
8138 ret = do_sigprocmask(how, set_ptr, &oldset);
8139 if (!is_error(ret) && arg3) {
8140 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8141 goto efault;
8142 host_to_target_old_sigset(p, &oldset);
8143 unlock_user(p, arg3, sizeof(target_sigset_t));
8145 #endif
8147 break;
8148 #endif
8149 case TARGET_NR_rt_sigprocmask:
8151 int how = arg1;
8152 sigset_t set, oldset, *set_ptr;
8154 if (arg4 != sizeof(target_sigset_t)) {
8155 ret = -TARGET_EINVAL;
8156 break;
8159 if (arg2) {
8160 switch(how) {
8161 case TARGET_SIG_BLOCK:
8162 how = SIG_BLOCK;
8163 break;
8164 case TARGET_SIG_UNBLOCK:
8165 how = SIG_UNBLOCK;
8166 break;
8167 case TARGET_SIG_SETMASK:
8168 how = SIG_SETMASK;
8169 break;
8170 default:
8171 ret = -TARGET_EINVAL;
8172 goto fail;
8174 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8175 goto efault;
8176 target_to_host_sigset(&set, p);
8177 unlock_user(p, arg2, 0);
8178 set_ptr = &set;
8179 } else {
8180 how = 0;
8181 set_ptr = NULL;
8183 ret = do_sigprocmask(how, set_ptr, &oldset);
8184 if (!is_error(ret) && arg3) {
8185 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8186 goto efault;
8187 host_to_target_sigset(p, &oldset);
8188 unlock_user(p, arg3, sizeof(target_sigset_t));
8191 break;
8192 #ifdef TARGET_NR_sigpending
8193 case TARGET_NR_sigpending:
8195 sigset_t set;
8196 ret = get_errno(sigpending(&set));
8197 if (!is_error(ret)) {
8198 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8199 goto efault;
8200 host_to_target_old_sigset(p, &set);
8201 unlock_user(p, arg1, sizeof(target_sigset_t));
8204 break;
8205 #endif
8206 case TARGET_NR_rt_sigpending:
8208 sigset_t set;
8210 /* Yes, this check is >, not != like most. We follow the kernel's
8211 * logic and it does it like this because it implements
8212 * NR_sigpending through the same code path, and in that case
8213 * the old_sigset_t is smaller in size.
8215 if (arg2 > sizeof(target_sigset_t)) {
8216 ret = -TARGET_EINVAL;
8217 break;
8220 ret = get_errno(sigpending(&set));
8221 if (!is_error(ret)) {
8222 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8223 goto efault;
8224 host_to_target_sigset(p, &set);
8225 unlock_user(p, arg1, sizeof(target_sigset_t));
8228 break;
8229 #ifdef TARGET_NR_sigsuspend
8230 case TARGET_NR_sigsuspend:
8232 TaskState *ts = cpu->opaque;
8233 #if defined(TARGET_ALPHA)
8234 abi_ulong mask = arg1;
8235 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8236 #else
8237 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8238 goto efault;
8239 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8240 unlock_user(p, arg1, 0);
8241 #endif
8242 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8243 SIGSET_T_SIZE));
8244 if (ret != -TARGET_ERESTARTSYS) {
8245 ts->in_sigsuspend = 1;
8248 break;
8249 #endif
8250 case TARGET_NR_rt_sigsuspend:
8252 TaskState *ts = cpu->opaque;
8254 if (arg2 != sizeof(target_sigset_t)) {
8255 ret = -TARGET_EINVAL;
8256 break;
8258 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8259 goto efault;
8260 target_to_host_sigset(&ts->sigsuspend_mask, p);
8261 unlock_user(p, arg1, 0);
8262 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8263 SIGSET_T_SIZE));
8264 if (ret != -TARGET_ERESTARTSYS) {
8265 ts->in_sigsuspend = 1;
8268 break;
8269 case TARGET_NR_rt_sigtimedwait:
8271 sigset_t set;
8272 struct timespec uts, *puts;
8273 siginfo_t uinfo;
8275 if (arg4 != sizeof(target_sigset_t)) {
8276 ret = -TARGET_EINVAL;
8277 break;
8280 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8281 goto efault;
8282 target_to_host_sigset(&set, p);
8283 unlock_user(p, arg1, 0);
8284 if (arg3) {
8285 puts = &uts;
8286 target_to_host_timespec(puts, arg3);
8287 } else {
8288 puts = NULL;
8290 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8291 SIGSET_T_SIZE));
8292 if (!is_error(ret)) {
8293 if (arg2) {
8294 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8296 if (!p) {
8297 goto efault;
8299 host_to_target_siginfo(p, &uinfo);
8300 unlock_user(p, arg2, sizeof(target_siginfo_t));
8302 ret = host_to_target_signal(ret);
8305 break;
8306 case TARGET_NR_rt_sigqueueinfo:
8308 siginfo_t uinfo;
8310 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8311 if (!p) {
8312 goto efault;
8314 target_to_host_siginfo(&uinfo, p);
8315 unlock_user(p, arg1, 0);
8316 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8318 break;
8319 #ifdef TARGET_NR_sigreturn
8320 case TARGET_NR_sigreturn:
8321 if (block_signals()) {
8322 ret = -TARGET_ERESTARTSYS;
8323 } else {
8324 ret = do_sigreturn(cpu_env);
8326 break;
8327 #endif
8328 case TARGET_NR_rt_sigreturn:
8329 if (block_signals()) {
8330 ret = -TARGET_ERESTARTSYS;
8331 } else {
8332 ret = do_rt_sigreturn(cpu_env);
8334 break;
8335 case TARGET_NR_sethostname:
8336 if (!(p = lock_user_string(arg1)))
8337 goto efault;
8338 ret = get_errno(sethostname(p, arg2));
8339 unlock_user(p, arg1, 0);
8340 break;
8341 case TARGET_NR_setrlimit:
8343 int resource = target_to_host_resource(arg1);
8344 struct target_rlimit *target_rlim;
8345 struct rlimit rlim;
8346 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8347 goto efault;
8348 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8349 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8350 unlock_user_struct(target_rlim, arg2, 0);
8351 ret = get_errno(setrlimit(resource, &rlim));
8353 break;
8354 case TARGET_NR_getrlimit:
8356 int resource = target_to_host_resource(arg1);
8357 struct target_rlimit *target_rlim;
8358 struct rlimit rlim;
8360 ret = get_errno(getrlimit(resource, &rlim));
8361 if (!is_error(ret)) {
8362 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8363 goto efault;
8364 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8365 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8366 unlock_user_struct(target_rlim, arg2, 1);
8369 break;
8370 case TARGET_NR_getrusage:
8372 struct rusage rusage;
8373 ret = get_errno(getrusage(arg1, &rusage));
8374 if (!is_error(ret)) {
8375 ret = host_to_target_rusage(arg2, &rusage);
8378 break;
8379 case TARGET_NR_gettimeofday:
8381 struct timeval tv;
8382 ret = get_errno(gettimeofday(&tv, NULL));
8383 if (!is_error(ret)) {
8384 if (copy_to_user_timeval(arg1, &tv))
8385 goto efault;
8388 break;
8389 case TARGET_NR_settimeofday:
8391 struct timeval tv, *ptv = NULL;
8392 struct timezone tz, *ptz = NULL;
8394 if (arg1) {
8395 if (copy_from_user_timeval(&tv, arg1)) {
8396 goto efault;
8398 ptv = &tv;
8401 if (arg2) {
8402 if (copy_from_user_timezone(&tz, arg2)) {
8403 goto efault;
8405 ptz = &tz;
8408 ret = get_errno(settimeofday(ptv, ptz));
8410 break;
8411 #if defined(TARGET_NR_select)
8412 case TARGET_NR_select:
8413 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8414 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8415 #else
8417 struct target_sel_arg_struct *sel;
8418 abi_ulong inp, outp, exp, tvp;
8419 long nsel;
8421 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8422 goto efault;
8423 nsel = tswapal(sel->n);
8424 inp = tswapal(sel->inp);
8425 outp = tswapal(sel->outp);
8426 exp = tswapal(sel->exp);
8427 tvp = tswapal(sel->tvp);
8428 unlock_user_struct(sel, arg1, 0);
8429 ret = do_select(nsel, inp, outp, exp, tvp);
8431 #endif
8432 break;
8433 #endif
8434 #ifdef TARGET_NR_pselect6
8435 case TARGET_NR_pselect6:
8437 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8438 fd_set rfds, wfds, efds;
8439 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8440 struct timespec ts, *ts_ptr;
8443 * The 6th arg is actually two args smashed together,
8444 * so we cannot use the C library.
8446 sigset_t set;
8447 struct {
8448 sigset_t *set;
8449 size_t size;
8450 } sig, *sig_ptr;
8452 abi_ulong arg_sigset, arg_sigsize, *arg7;
8453 target_sigset_t *target_sigset;
8455 n = arg1;
8456 rfd_addr = arg2;
8457 wfd_addr = arg3;
8458 efd_addr = arg4;
8459 ts_addr = arg5;
8461 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8462 if (ret) {
8463 goto fail;
8465 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8466 if (ret) {
8467 goto fail;
8469 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8470 if (ret) {
8471 goto fail;
8475 * This takes a timespec, and not a timeval, so we cannot
8476 * use the do_select() helper ...
8478 if (ts_addr) {
8479 if (target_to_host_timespec(&ts, ts_addr)) {
8480 goto efault;
8482 ts_ptr = &ts;
8483 } else {
8484 ts_ptr = NULL;
8487 /* Extract the two packed args for the sigset */
8488 if (arg6) {
8489 sig_ptr = &sig;
8490 sig.size = SIGSET_T_SIZE;
8492 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8493 if (!arg7) {
8494 goto efault;
8496 arg_sigset = tswapal(arg7[0]);
8497 arg_sigsize = tswapal(arg7[1]);
8498 unlock_user(arg7, arg6, 0);
8500 if (arg_sigset) {
8501 sig.set = &set;
8502 if (arg_sigsize != sizeof(*target_sigset)) {
8503 /* Like the kernel, we enforce correct size sigsets */
8504 ret = -TARGET_EINVAL;
8505 goto fail;
8507 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8508 sizeof(*target_sigset), 1);
8509 if (!target_sigset) {
8510 goto efault;
8512 target_to_host_sigset(&set, target_sigset);
8513 unlock_user(target_sigset, arg_sigset, 0);
8514 } else {
8515 sig.set = NULL;
8517 } else {
8518 sig_ptr = NULL;
8521 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8522 ts_ptr, sig_ptr));
8524 if (!is_error(ret)) {
8525 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8526 goto efault;
8527 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8528 goto efault;
8529 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8530 goto efault;
8532 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8533 goto efault;
8536 break;
8537 #endif
8538 #ifdef TARGET_NR_symlink
8539 case TARGET_NR_symlink:
8541 void *p2;
8542 p = lock_user_string(arg1);
8543 p2 = lock_user_string(arg2);
8544 if (!p || !p2)
8545 ret = -TARGET_EFAULT;
8546 else
8547 ret = get_errno(symlink(p, p2));
8548 unlock_user(p2, arg2, 0);
8549 unlock_user(p, arg1, 0);
8551 break;
8552 #endif
8553 #if defined(TARGET_NR_symlinkat)
8554 case TARGET_NR_symlinkat:
8556 void *p2;
8557 p = lock_user_string(arg1);
8558 p2 = lock_user_string(arg3);
8559 if (!p || !p2)
8560 ret = -TARGET_EFAULT;
8561 else
8562 ret = get_errno(symlinkat(p, arg2, p2));
8563 unlock_user(p2, arg3, 0);
8564 unlock_user(p, arg1, 0);
8566 break;
8567 #endif
8568 #ifdef TARGET_NR_oldlstat
8569 case TARGET_NR_oldlstat:
8570 goto unimplemented;
8571 #endif
8572 #ifdef TARGET_NR_readlink
8573 case TARGET_NR_readlink:
8575 void *p2;
8576 p = lock_user_string(arg1);
8577 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8578 if (!p || !p2) {
8579 ret = -TARGET_EFAULT;
8580 } else if (!arg3) {
8581 /* Short circuit this for the magic exe check. */
8582 ret = -TARGET_EINVAL;
8583 } else if (is_proc_myself((const char *)p, "exe")) {
8584 char real[PATH_MAX], *temp;
8585 temp = realpath(exec_path, real);
8586 /* Return value is # of bytes that we wrote to the buffer. */
8587 if (temp == NULL) {
8588 ret = get_errno(-1);
8589 } else {
8590 /* Don't worry about sign mismatch as earlier mapping
8591 * logic would have thrown a bad address error. */
8592 ret = MIN(strlen(real), arg3);
8593 /* We cannot NUL terminate the string. */
8594 memcpy(p2, real, ret);
8596 } else {
8597 ret = get_errno(readlink(path(p), p2, arg3));
8599 unlock_user(p2, arg2, ret);
8600 unlock_user(p, arg1, 0);
8602 break;
8603 #endif
8604 #if defined(TARGET_NR_readlinkat)
8605 case TARGET_NR_readlinkat:
8607 void *p2;
8608 p = lock_user_string(arg2);
8609 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8610 if (!p || !p2) {
8611 ret = -TARGET_EFAULT;
8612 } else if (is_proc_myself((const char *)p, "exe")) {
8613 char real[PATH_MAX], *temp;
8614 temp = realpath(exec_path, real);
8615 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8616 snprintf((char *)p2, arg4, "%s", real);
8617 } else {
8618 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8620 unlock_user(p2, arg3, ret);
8621 unlock_user(p, arg2, 0);
8623 break;
8624 #endif
8625 #ifdef TARGET_NR_uselib
8626 case TARGET_NR_uselib:
8627 goto unimplemented;
8628 #endif
8629 #ifdef TARGET_NR_swapon
8630 case TARGET_NR_swapon:
8631 if (!(p = lock_user_string(arg1)))
8632 goto efault;
8633 ret = get_errno(swapon(p, arg2));
8634 unlock_user(p, arg1, 0);
8635 break;
8636 #endif
8637 case TARGET_NR_reboot:
8638 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8639 /* arg4 must be ignored in all other cases */
8640 p = lock_user_string(arg4);
8641 if (!p) {
8642 goto efault;
8644 ret = get_errno(reboot(arg1, arg2, arg3, p));
8645 unlock_user(p, arg4, 0);
8646 } else {
8647 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8649 break;
8650 #ifdef TARGET_NR_readdir
8651 case TARGET_NR_readdir:
8652 goto unimplemented;
8653 #endif
8654 #ifdef TARGET_NR_mmap
8655 case TARGET_NR_mmap:
8656 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8657 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8658 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8659 || defined(TARGET_S390X)
8661 abi_ulong *v;
8662 abi_ulong v1, v2, v3, v4, v5, v6;
8663 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8664 goto efault;
8665 v1 = tswapal(v[0]);
8666 v2 = tswapal(v[1]);
8667 v3 = tswapal(v[2]);
8668 v4 = tswapal(v[3]);
8669 v5 = tswapal(v[4]);
8670 v6 = tswapal(v[5]);
8671 unlock_user(v, arg1, 0);
8672 ret = get_errno(target_mmap(v1, v2, v3,
8673 target_to_host_bitmask(v4, mmap_flags_tbl),
8674 v5, v6));
8676 #else
8677 ret = get_errno(target_mmap(arg1, arg2, arg3,
8678 target_to_host_bitmask(arg4, mmap_flags_tbl),
8679 arg5,
8680 arg6));
8681 #endif
8682 break;
8683 #endif
8684 #ifdef TARGET_NR_mmap2
8685 case TARGET_NR_mmap2:
8686 #ifndef MMAP_SHIFT
8687 #define MMAP_SHIFT 12
8688 #endif
8689 ret = get_errno(target_mmap(arg1, arg2, arg3,
8690 target_to_host_bitmask(arg4, mmap_flags_tbl),
8691 arg5,
8692 arg6 << MMAP_SHIFT));
8693 break;
8694 #endif
8695 case TARGET_NR_munmap:
8696 ret = get_errno(target_munmap(arg1, arg2));
8697 break;
8698 case TARGET_NR_mprotect:
8700 TaskState *ts = cpu->opaque;
8701 /* Special hack to detect libc making the stack executable. */
8702 if ((arg3 & PROT_GROWSDOWN)
8703 && arg1 >= ts->info->stack_limit
8704 && arg1 <= ts->info->start_stack) {
8705 arg3 &= ~PROT_GROWSDOWN;
8706 arg2 = arg2 + arg1 - ts->info->stack_limit;
8707 arg1 = ts->info->stack_limit;
8710 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8711 break;
8712 #ifdef TARGET_NR_mremap
8713 case TARGET_NR_mremap:
8714 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8715 break;
8716 #endif
8717 /* ??? msync/mlock/munlock are broken for softmmu. */
8718 #ifdef TARGET_NR_msync
8719 case TARGET_NR_msync:
8720 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8721 break;
8722 #endif
8723 #ifdef TARGET_NR_mlock
8724 case TARGET_NR_mlock:
8725 ret = get_errno(mlock(g2h(arg1), arg2));
8726 break;
8727 #endif
8728 #ifdef TARGET_NR_munlock
8729 case TARGET_NR_munlock:
8730 ret = get_errno(munlock(g2h(arg1), arg2));
8731 break;
8732 #endif
8733 #ifdef TARGET_NR_mlockall
8734 case TARGET_NR_mlockall:
8735 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8736 break;
8737 #endif
8738 #ifdef TARGET_NR_munlockall
8739 case TARGET_NR_munlockall:
8740 ret = get_errno(munlockall());
8741 break;
8742 #endif
8743 case TARGET_NR_truncate:
8744 if (!(p = lock_user_string(arg1)))
8745 goto efault;
8746 ret = get_errno(truncate(p, arg2));
8747 unlock_user(p, arg1, 0);
8748 break;
8749 case TARGET_NR_ftruncate:
8750 ret = get_errno(ftruncate(arg1, arg2));
8751 break;
8752 case TARGET_NR_fchmod:
8753 ret = get_errno(fchmod(arg1, arg2));
8754 break;
8755 #if defined(TARGET_NR_fchmodat)
8756 case TARGET_NR_fchmodat:
8757 if (!(p = lock_user_string(arg2)))
8758 goto efault;
8759 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8760 unlock_user(p, arg2, 0);
8761 break;
8762 #endif
8763 case TARGET_NR_getpriority:
8764 /* Note that negative values are valid for getpriority, so we must
8765 differentiate based on errno settings. */
8766 errno = 0;
8767 ret = getpriority(arg1, arg2);
8768 if (ret == -1 && errno != 0) {
8769 ret = -host_to_target_errno(errno);
8770 break;
8772 #ifdef TARGET_ALPHA
8773 /* Return value is the unbiased priority. Signal no error. */
8774 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8775 #else
8776 /* Return value is a biased priority to avoid negative numbers. */
8777 ret = 20 - ret;
8778 #endif
8779 break;
8780 case TARGET_NR_setpriority:
8781 ret = get_errno(setpriority(arg1, arg2, arg3));
8782 break;
8783 #ifdef TARGET_NR_profil
8784 case TARGET_NR_profil:
8785 goto unimplemented;
8786 #endif
8787 case TARGET_NR_statfs:
8788 if (!(p = lock_user_string(arg1)))
8789 goto efault;
8790 ret = get_errno(statfs(path(p), &stfs));
8791 unlock_user(p, arg1, 0);
8792 convert_statfs:
8793 if (!is_error(ret)) {
8794 struct target_statfs *target_stfs;
8796 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8797 goto efault;
8798 __put_user(stfs.f_type, &target_stfs->f_type);
8799 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8800 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8801 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8802 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8803 __put_user(stfs.f_files, &target_stfs->f_files);
8804 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8805 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8806 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8807 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8808 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8809 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8810 unlock_user_struct(target_stfs, arg2, 1);
8812 break;
8813 case TARGET_NR_fstatfs:
8814 ret = get_errno(fstatfs(arg1, &stfs));
8815 goto convert_statfs;
8816 #ifdef TARGET_NR_statfs64
8817 case TARGET_NR_statfs64:
8818 if (!(p = lock_user_string(arg1)))
8819 goto efault;
8820 ret = get_errno(statfs(path(p), &stfs));
8821 unlock_user(p, arg1, 0);
8822 convert_statfs64:
8823 if (!is_error(ret)) {
8824 struct target_statfs64 *target_stfs;
8826 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8827 goto efault;
8828 __put_user(stfs.f_type, &target_stfs->f_type);
8829 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8830 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8831 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8832 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8833 __put_user(stfs.f_files, &target_stfs->f_files);
8834 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8835 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8836 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8837 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8838 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8839 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8840 unlock_user_struct(target_stfs, arg3, 1);
8842 break;
8843 case TARGET_NR_fstatfs64:
8844 ret = get_errno(fstatfs(arg1, &stfs));
8845 goto convert_statfs64;
8846 #endif
8847 #ifdef TARGET_NR_ioperm
8848 case TARGET_NR_ioperm:
8849 goto unimplemented;
8850 #endif
8851 #ifdef TARGET_NR_socketcall
8852 case TARGET_NR_socketcall:
8853 ret = do_socketcall(arg1, arg2);
8854 break;
8855 #endif
8856 #ifdef TARGET_NR_accept
8857 case TARGET_NR_accept:
8858 ret = do_accept4(arg1, arg2, arg3, 0);
8859 break;
8860 #endif
8861 #ifdef TARGET_NR_accept4
8862 case TARGET_NR_accept4:
8863 ret = do_accept4(arg1, arg2, arg3, arg4);
8864 break;
8865 #endif
8866 #ifdef TARGET_NR_bind
8867 case TARGET_NR_bind:
8868 ret = do_bind(arg1, arg2, arg3);
8869 break;
8870 #endif
8871 #ifdef TARGET_NR_connect
8872 case TARGET_NR_connect:
8873 ret = do_connect(arg1, arg2, arg3);
8874 break;
8875 #endif
8876 #ifdef TARGET_NR_getpeername
8877 case TARGET_NR_getpeername:
8878 ret = do_getpeername(arg1, arg2, arg3);
8879 break;
8880 #endif
8881 #ifdef TARGET_NR_getsockname
8882 case TARGET_NR_getsockname:
8883 ret = do_getsockname(arg1, arg2, arg3);
8884 break;
8885 #endif
8886 #ifdef TARGET_NR_getsockopt
8887 case TARGET_NR_getsockopt:
8888 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8889 break;
8890 #endif
8891 #ifdef TARGET_NR_listen
8892 case TARGET_NR_listen:
8893 ret = get_errno(listen(arg1, arg2));
8894 break;
8895 #endif
8896 #ifdef TARGET_NR_recv
8897 case TARGET_NR_recv:
8898 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8899 break;
8900 #endif
8901 #ifdef TARGET_NR_recvfrom
8902 case TARGET_NR_recvfrom:
8903 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8904 break;
8905 #endif
8906 #ifdef TARGET_NR_recvmsg
8907 case TARGET_NR_recvmsg:
8908 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8909 break;
8910 #endif
8911 #ifdef TARGET_NR_send
8912 case TARGET_NR_send:
8913 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8914 break;
8915 #endif
8916 #ifdef TARGET_NR_sendmsg
8917 case TARGET_NR_sendmsg:
8918 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8919 break;
8920 #endif
8921 #ifdef TARGET_NR_sendmmsg
8922 case TARGET_NR_sendmmsg:
8923 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8924 break;
8925 case TARGET_NR_recvmmsg:
8926 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8927 break;
8928 #endif
8929 #ifdef TARGET_NR_sendto
8930 case TARGET_NR_sendto:
8931 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8932 break;
8933 #endif
8934 #ifdef TARGET_NR_shutdown
8935 case TARGET_NR_shutdown:
8936 ret = get_errno(shutdown(arg1, arg2));
8937 break;
8938 #endif
8939 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8940 case TARGET_NR_getrandom:
8941 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8942 if (!p) {
8943 goto efault;
8945 ret = get_errno(getrandom(p, arg2, arg3));
8946 unlock_user(p, arg1, ret);
8947 break;
8948 #endif
8949 #ifdef TARGET_NR_socket
8950 case TARGET_NR_socket:
8951 ret = do_socket(arg1, arg2, arg3);
8952 fd_trans_unregister(ret);
8953 break;
8954 #endif
8955 #ifdef TARGET_NR_socketpair
8956 case TARGET_NR_socketpair:
8957 ret = do_socketpair(arg1, arg2, arg3, arg4);
8958 break;
8959 #endif
8960 #ifdef TARGET_NR_setsockopt
8961 case TARGET_NR_setsockopt:
8962 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8963 break;
8964 #endif
8966 case TARGET_NR_syslog:
8967 if (!(p = lock_user_string(arg2)))
8968 goto efault;
8969 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8970 unlock_user(p, arg2, 0);
8971 break;
8973 case TARGET_NR_setitimer:
8975 struct itimerval value, ovalue, *pvalue;
8977 if (arg2) {
8978 pvalue = &value;
8979 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8980 || copy_from_user_timeval(&pvalue->it_value,
8981 arg2 + sizeof(struct target_timeval)))
8982 goto efault;
8983 } else {
8984 pvalue = NULL;
8986 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8987 if (!is_error(ret) && arg3) {
8988 if (copy_to_user_timeval(arg3,
8989 &ovalue.it_interval)
8990 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8991 &ovalue.it_value))
8992 goto efault;
8995 break;
8996 case TARGET_NR_getitimer:
8998 struct itimerval value;
9000 ret = get_errno(getitimer(arg1, &value));
9001 if (!is_error(ret) && arg2) {
9002 if (copy_to_user_timeval(arg2,
9003 &value.it_interval)
9004 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9005 &value.it_value))
9006 goto efault;
9009 break;
9010 #ifdef TARGET_NR_stat
9011 case TARGET_NR_stat:
9012 if (!(p = lock_user_string(arg1)))
9013 goto efault;
9014 ret = get_errno(stat(path(p), &st));
9015 unlock_user(p, arg1, 0);
9016 goto do_stat;
9017 #endif
9018 #ifdef TARGET_NR_lstat
9019 case TARGET_NR_lstat:
9020 if (!(p = lock_user_string(arg1)))
9021 goto efault;
9022 ret = get_errno(lstat(path(p), &st));
9023 unlock_user(p, arg1, 0);
9024 goto do_stat;
9025 #endif
9026 case TARGET_NR_fstat:
9028 ret = get_errno(fstat(arg1, &st));
9029 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9030 do_stat:
9031 #endif
9032 if (!is_error(ret)) {
9033 struct target_stat *target_st;
9035 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9036 goto efault;
9037 memset(target_st, 0, sizeof(*target_st));
9038 __put_user(st.st_dev, &target_st->st_dev);
9039 __put_user(st.st_ino, &target_st->st_ino);
9040 __put_user(st.st_mode, &target_st->st_mode);
9041 __put_user(st.st_uid, &target_st->st_uid);
9042 __put_user(st.st_gid, &target_st->st_gid);
9043 __put_user(st.st_nlink, &target_st->st_nlink);
9044 __put_user(st.st_rdev, &target_st->st_rdev);
9045 __put_user(st.st_size, &target_st->st_size);
9046 __put_user(st.st_blksize, &target_st->st_blksize);
9047 __put_user(st.st_blocks, &target_st->st_blocks);
9048 __put_user(st.st_atime, &target_st->target_st_atime);
9049 __put_user(st.st_mtime, &target_st->target_st_mtime);
9050 __put_user(st.st_ctime, &target_st->target_st_ctime);
9051 unlock_user_struct(target_st, arg2, 1);
9054 break;
9055 #ifdef TARGET_NR_olduname
9056 case TARGET_NR_olduname:
9057 goto unimplemented;
9058 #endif
9059 #ifdef TARGET_NR_iopl
9060 case TARGET_NR_iopl:
9061 goto unimplemented;
9062 #endif
9063 case TARGET_NR_vhangup:
9064 ret = get_errno(vhangup());
9065 break;
9066 #ifdef TARGET_NR_idle
9067 case TARGET_NR_idle:
9068 goto unimplemented;
9069 #endif
9070 #ifdef TARGET_NR_syscall
9071 case TARGET_NR_syscall:
9072 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9073 arg6, arg7, arg8, 0);
9074 break;
9075 #endif
9076 case TARGET_NR_wait4:
9078 int status;
9079 abi_long status_ptr = arg2;
9080 struct rusage rusage, *rusage_ptr;
9081 abi_ulong target_rusage = arg4;
9082 abi_long rusage_err;
9083 if (target_rusage)
9084 rusage_ptr = &rusage;
9085 else
9086 rusage_ptr = NULL;
9087 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9088 if (!is_error(ret)) {
9089 if (status_ptr && ret) {
9090 status = host_to_target_waitstatus(status);
9091 if (put_user_s32(status, status_ptr))
9092 goto efault;
9094 if (target_rusage) {
9095 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9096 if (rusage_err) {
9097 ret = rusage_err;
9102 break;
9103 #ifdef TARGET_NR_swapoff
9104 case TARGET_NR_swapoff:
9105 if (!(p = lock_user_string(arg1)))
9106 goto efault;
9107 ret = get_errno(swapoff(p));
9108 unlock_user(p, arg1, 0);
9109 break;
9110 #endif
9111 case TARGET_NR_sysinfo:
9113 struct target_sysinfo *target_value;
9114 struct sysinfo value;
9115 ret = get_errno(sysinfo(&value));
9116 if (!is_error(ret) && arg1)
9118 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9119 goto efault;
9120 __put_user(value.uptime, &target_value->uptime);
9121 __put_user(value.loads[0], &target_value->loads[0]);
9122 __put_user(value.loads[1], &target_value->loads[1]);
9123 __put_user(value.loads[2], &target_value->loads[2]);
9124 __put_user(value.totalram, &target_value->totalram);
9125 __put_user(value.freeram, &target_value->freeram);
9126 __put_user(value.sharedram, &target_value->sharedram);
9127 __put_user(value.bufferram, &target_value->bufferram);
9128 __put_user(value.totalswap, &target_value->totalswap);
9129 __put_user(value.freeswap, &target_value->freeswap);
9130 __put_user(value.procs, &target_value->procs);
9131 __put_user(value.totalhigh, &target_value->totalhigh);
9132 __put_user(value.freehigh, &target_value->freehigh);
9133 __put_user(value.mem_unit, &target_value->mem_unit);
9134 unlock_user_struct(target_value, arg1, 1);
9137 break;
9138 #ifdef TARGET_NR_ipc
9139 case TARGET_NR_ipc:
9140 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
9141 break;
9142 #endif
9143 #ifdef TARGET_NR_semget
9144 case TARGET_NR_semget:
9145 ret = get_errno(semget(arg1, arg2, arg3));
9146 break;
9147 #endif
9148 #ifdef TARGET_NR_semop
9149 case TARGET_NR_semop:
9150 ret = do_semop(arg1, arg2, arg3);
9151 break;
9152 #endif
9153 #ifdef TARGET_NR_semctl
9154 case TARGET_NR_semctl:
9155 ret = do_semctl(arg1, arg2, arg3, arg4);
9156 break;
9157 #endif
9158 #ifdef TARGET_NR_msgctl
9159 case TARGET_NR_msgctl:
9160 ret = do_msgctl(arg1, arg2, arg3);
9161 break;
9162 #endif
9163 #ifdef TARGET_NR_msgget
9164 case TARGET_NR_msgget:
9165 ret = get_errno(msgget(arg1, arg2));
9166 break;
9167 #endif
9168 #ifdef TARGET_NR_msgrcv
9169 case TARGET_NR_msgrcv:
9170 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9171 break;
9172 #endif
9173 #ifdef TARGET_NR_msgsnd
9174 case TARGET_NR_msgsnd:
9175 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9176 break;
9177 #endif
9178 #ifdef TARGET_NR_shmget
9179 case TARGET_NR_shmget:
9180 ret = get_errno(shmget(arg1, arg2, arg3));
9181 break;
9182 #endif
9183 #ifdef TARGET_NR_shmctl
9184 case TARGET_NR_shmctl:
9185 ret = do_shmctl(arg1, arg2, arg3);
9186 break;
9187 #endif
9188 #ifdef TARGET_NR_shmat
9189 case TARGET_NR_shmat:
9190 ret = do_shmat(arg1, arg2, arg3);
9191 break;
9192 #endif
9193 #ifdef TARGET_NR_shmdt
9194 case TARGET_NR_shmdt:
9195 ret = do_shmdt(arg1);
9196 break;
9197 #endif
9198 case TARGET_NR_fsync:
9199 ret = get_errno(fsync(arg1));
9200 break;
9201 case TARGET_NR_clone:
9202 /* Linux manages to have three different orderings for its
9203 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9204 * match the kernel's CONFIG_CLONE_* settings.
9205 * Microblaze is further special in that it uses a sixth
9206 * implicit argument to clone for the TLS pointer.
9208 #if defined(TARGET_MICROBLAZE)
9209 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9210 #elif defined(TARGET_CLONE_BACKWARDS)
9211 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9212 #elif defined(TARGET_CLONE_BACKWARDS2)
9213 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9214 #else
9215 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9216 #endif
9217 break;
9218 #ifdef __NR_exit_group
9219 /* new thread calls */
9220 case TARGET_NR_exit_group:
9221 #ifdef TARGET_GPROF
9222 _mcleanup();
9223 #endif
9224 gdb_exit(cpu_env, arg1);
9225 ret = get_errno(exit_group(arg1));
9226 break;
9227 #endif
9228 case TARGET_NR_setdomainname:
9229 if (!(p = lock_user_string(arg1)))
9230 goto efault;
9231 ret = get_errno(setdomainname(p, arg2));
9232 unlock_user(p, arg1, 0);
9233 break;
9234 case TARGET_NR_uname:
9235 /* no need to transcode because we use the linux syscall */
9237 struct new_utsname * buf;
9239 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9240 goto efault;
9241 ret = get_errno(sys_uname(buf));
9242 if (!is_error(ret)) {
9243 /* Overwrite the native machine name with whatever is being
9244 emulated. */
9245 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9246 /* Allow the user to override the reported release. */
9247 if (qemu_uname_release && *qemu_uname_release) {
9248 g_strlcpy(buf->release, qemu_uname_release,
9249 sizeof(buf->release));
9252 unlock_user_struct(buf, arg1, 1);
9254 break;
9255 #ifdef TARGET_I386
9256 case TARGET_NR_modify_ldt:
9257 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9258 break;
9259 #if !defined(TARGET_X86_64)
9260 case TARGET_NR_vm86old:
9261 goto unimplemented;
9262 case TARGET_NR_vm86:
9263 ret = do_vm86(cpu_env, arg1, arg2);
9264 break;
9265 #endif
9266 #endif
9267 case TARGET_NR_adjtimex:
9268 goto unimplemented;
9269 #ifdef TARGET_NR_create_module
9270 case TARGET_NR_create_module:
9271 #endif
9272 case TARGET_NR_init_module:
9273 case TARGET_NR_delete_module:
9274 #ifdef TARGET_NR_get_kernel_syms
9275 case TARGET_NR_get_kernel_syms:
9276 #endif
9277 goto unimplemented;
9278 case TARGET_NR_quotactl:
9279 goto unimplemented;
9280 case TARGET_NR_getpgid:
9281 ret = get_errno(getpgid(arg1));
9282 break;
9283 case TARGET_NR_fchdir:
9284 ret = get_errno(fchdir(arg1));
9285 break;
9286 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9287 case TARGET_NR_bdflush:
9288 goto unimplemented;
9289 #endif
9290 #ifdef TARGET_NR_sysfs
9291 case TARGET_NR_sysfs:
9292 goto unimplemented;
9293 #endif
9294 case TARGET_NR_personality:
9295 ret = get_errno(personality(arg1));
9296 break;
9297 #ifdef TARGET_NR_afs_syscall
9298 case TARGET_NR_afs_syscall:
9299 goto unimplemented;
9300 #endif
9301 #ifdef TARGET_NR__llseek /* Not on alpha */
9302 case TARGET_NR__llseek:
9304 int64_t res;
9305 #if !defined(__NR_llseek)
9306 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
9307 if (res == -1) {
9308 ret = get_errno(res);
9309 } else {
9310 ret = 0;
9312 #else
9313 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9314 #endif
9315 if ((ret == 0) && put_user_s64(res, arg4)) {
9316 goto efault;
9319 break;
9320 #endif
9321 #ifdef TARGET_NR_getdents
9322 case TARGET_NR_getdents:
9323 #ifdef __NR_getdents
9324 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9326 struct target_dirent *target_dirp;
9327 struct linux_dirent *dirp;
9328 abi_long count = arg3;
9330 dirp = g_try_malloc(count);
9331 if (!dirp) {
9332 ret = -TARGET_ENOMEM;
9333 goto fail;
9336 ret = get_errno(sys_getdents(arg1, dirp, count));
9337 if (!is_error(ret)) {
9338 struct linux_dirent *de;
9339 struct target_dirent *tde;
9340 int len = ret;
9341 int reclen, treclen;
9342 int count1, tnamelen;
9344 count1 = 0;
9345 de = dirp;
9346 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9347 goto efault;
9348 tde = target_dirp;
9349 while (len > 0) {
9350 reclen = de->d_reclen;
9351 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9352 assert(tnamelen >= 0);
9353 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9354 assert(count1 + treclen <= count);
9355 tde->d_reclen = tswap16(treclen);
9356 tde->d_ino = tswapal(de->d_ino);
9357 tde->d_off = tswapal(de->d_off);
9358 memcpy(tde->d_name, de->d_name, tnamelen);
9359 de = (struct linux_dirent *)((char *)de + reclen);
9360 len -= reclen;
9361 tde = (struct target_dirent *)((char *)tde + treclen);
9362 count1 += treclen;
9364 ret = count1;
9365 unlock_user(target_dirp, arg2, ret);
9367 g_free(dirp);
9369 #else
9371 struct linux_dirent *dirp;
9372 abi_long count = arg3;
9374 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9375 goto efault;
9376 ret = get_errno(sys_getdents(arg1, dirp, count));
9377 if (!is_error(ret)) {
9378 struct linux_dirent *de;
9379 int len = ret;
9380 int reclen;
9381 de = dirp;
9382 while (len > 0) {
9383 reclen = de->d_reclen;
9384 if (reclen > len)
9385 break;
9386 de->d_reclen = tswap16(reclen);
9387 tswapls(&de->d_ino);
9388 tswapls(&de->d_off);
9389 de = (struct linux_dirent *)((char *)de + reclen);
9390 len -= reclen;
9393 unlock_user(dirp, arg2, ret);
9395 #endif
9396 #else
9397 /* Implement getdents in terms of getdents64 */
9399 struct linux_dirent64 *dirp;
9400 abi_long count = arg3;
9402 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9403 if (!dirp) {
9404 goto efault;
9406 ret = get_errno(sys_getdents64(arg1, dirp, count));
9407 if (!is_error(ret)) {
9408 /* Convert the dirent64 structs to target dirent. We do this
9409 * in-place, since we can guarantee that a target_dirent is no
9410 * larger than a dirent64; however this means we have to be
9411 * careful to read everything before writing in the new format.
9413 struct linux_dirent64 *de;
9414 struct target_dirent *tde;
9415 int len = ret;
9416 int tlen = 0;
9418 de = dirp;
9419 tde = (struct target_dirent *)dirp;
9420 while (len > 0) {
9421 int namelen, treclen;
9422 int reclen = de->d_reclen;
9423 uint64_t ino = de->d_ino;
9424 int64_t off = de->d_off;
9425 uint8_t type = de->d_type;
9427 namelen = strlen(de->d_name);
9428 treclen = offsetof(struct target_dirent, d_name)
9429 + namelen + 2;
9430 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9432 memmove(tde->d_name, de->d_name, namelen + 1);
9433 tde->d_ino = tswapal(ino);
9434 tde->d_off = tswapal(off);
9435 tde->d_reclen = tswap16(treclen);
9436 /* The target_dirent type is in what was formerly a padding
9437 * byte at the end of the structure:
9439 *(((char *)tde) + treclen - 1) = type;
9441 de = (struct linux_dirent64 *)((char *)de + reclen);
9442 tde = (struct target_dirent *)((char *)tde + treclen);
9443 len -= reclen;
9444 tlen += treclen;
9446 ret = tlen;
9448 unlock_user(dirp, arg2, ret);
9450 #endif
9451 break;
9452 #endif /* TARGET_NR_getdents */
9453 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9454 case TARGET_NR_getdents64:
9456 struct linux_dirent64 *dirp;
9457 abi_long count = arg3;
9458 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9459 goto efault;
9460 ret = get_errno(sys_getdents64(arg1, dirp, count));
9461 if (!is_error(ret)) {
9462 struct linux_dirent64 *de;
9463 int len = ret;
9464 int reclen;
9465 de = dirp;
9466 while (len > 0) {
9467 reclen = de->d_reclen;
9468 if (reclen > len)
9469 break;
9470 de->d_reclen = tswap16(reclen);
9471 tswap64s((uint64_t *)&de->d_ino);
9472 tswap64s((uint64_t *)&de->d_off);
9473 de = (struct linux_dirent64 *)((char *)de + reclen);
9474 len -= reclen;
9477 unlock_user(dirp, arg2, ret);
9479 break;
9480 #endif /* TARGET_NR_getdents64 */
9481 #if defined(TARGET_NR__newselect)
9482 case TARGET_NR__newselect:
9483 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9484 break;
9485 #endif
9486 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9487 # ifdef TARGET_NR_poll
9488 case TARGET_NR_poll:
9489 # endif
9490 # ifdef TARGET_NR_ppoll
9491 case TARGET_NR_ppoll:
9492 # endif
9494 struct target_pollfd *target_pfd;
9495 unsigned int nfds = arg2;
9496 struct pollfd *pfd;
9497 unsigned int i;
9499 pfd = NULL;
9500 target_pfd = NULL;
9501 if (nfds) {
9502 target_pfd = lock_user(VERIFY_WRITE, arg1,
9503 sizeof(struct target_pollfd) * nfds, 1);
9504 if (!target_pfd) {
9505 goto efault;
9508 pfd = alloca(sizeof(struct pollfd) * nfds);
9509 for (i = 0; i < nfds; i++) {
9510 pfd[i].fd = tswap32(target_pfd[i].fd);
9511 pfd[i].events = tswap16(target_pfd[i].events);
9515 switch (num) {
9516 # ifdef TARGET_NR_ppoll
9517 case TARGET_NR_ppoll:
9519 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9520 target_sigset_t *target_set;
9521 sigset_t _set, *set = &_set;
9523 if (arg3) {
9524 if (target_to_host_timespec(timeout_ts, arg3)) {
9525 unlock_user(target_pfd, arg1, 0);
9526 goto efault;
9528 } else {
9529 timeout_ts = NULL;
9532 if (arg4) {
9533 if (arg5 != sizeof(target_sigset_t)) {
9534 unlock_user(target_pfd, arg1, 0);
9535 ret = -TARGET_EINVAL;
9536 break;
9539 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9540 if (!target_set) {
9541 unlock_user(target_pfd, arg1, 0);
9542 goto efault;
9544 target_to_host_sigset(set, target_set);
9545 } else {
9546 set = NULL;
9549 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9550 set, SIGSET_T_SIZE));
9552 if (!is_error(ret) && arg3) {
9553 host_to_target_timespec(arg3, timeout_ts);
9555 if (arg4) {
9556 unlock_user(target_set, arg4, 0);
9558 break;
9560 # endif
9561 # ifdef TARGET_NR_poll
9562 case TARGET_NR_poll:
9564 struct timespec ts, *pts;
9566 if (arg3 >= 0) {
9567 /* Convert ms to secs, ns */
9568 ts.tv_sec = arg3 / 1000;
9569 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9570 pts = &ts;
9571 } else {
9572 /* -ve poll() timeout means "infinite" */
9573 pts = NULL;
9575 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9576 break;
9578 # endif
9579 default:
9580 g_assert_not_reached();
9583 if (!is_error(ret)) {
9584 for(i = 0; i < nfds; i++) {
9585 target_pfd[i].revents = tswap16(pfd[i].revents);
9588 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9590 break;
9591 #endif
9592 case TARGET_NR_flock:
9593 /* NOTE: the flock constant seems to be the same for every
9594 Linux platform */
9595 ret = get_errno(safe_flock(arg1, arg2));
9596 break;
9597 case TARGET_NR_readv:
9599 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9600 if (vec != NULL) {
9601 ret = get_errno(safe_readv(arg1, vec, arg3));
9602 unlock_iovec(vec, arg2, arg3, 1);
9603 } else {
9604 ret = -host_to_target_errno(errno);
9607 break;
9608 case TARGET_NR_writev:
9610 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9611 if (vec != NULL) {
9612 ret = get_errno(safe_writev(arg1, vec, arg3));
9613 unlock_iovec(vec, arg2, arg3, 0);
9614 } else {
9615 ret = -host_to_target_errno(errno);
9618 break;
9619 case TARGET_NR_getsid:
9620 ret = get_errno(getsid(arg1));
9621 break;
9622 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9623 case TARGET_NR_fdatasync:
9624 ret = get_errno(fdatasync(arg1));
9625 break;
9626 #endif
9627 #ifdef TARGET_NR__sysctl
9628 case TARGET_NR__sysctl:
9629 /* We don't implement this, but ENOTDIR is always a safe
9630 return value. */
9631 ret = -TARGET_ENOTDIR;
9632 break;
9633 #endif
9634 case TARGET_NR_sched_getaffinity:
9636 unsigned int mask_size;
9637 unsigned long *mask;
9640 * sched_getaffinity needs multiples of ulong, so need to take
9641 * care of mismatches between target ulong and host ulong sizes.
9643 if (arg2 & (sizeof(abi_ulong) - 1)) {
9644 ret = -TARGET_EINVAL;
9645 break;
9647 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9649 mask = alloca(mask_size);
9650 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9652 if (!is_error(ret)) {
9653 if (ret > arg2) {
9654 /* More data returned than the caller's buffer will fit.
9655 * This only happens if sizeof(abi_long) < sizeof(long)
9656 * and the caller passed us a buffer holding an odd number
9657 * of abi_longs. If the host kernel is actually using the
9658 * extra 4 bytes then fail EINVAL; otherwise we can just
9659 * ignore them and only copy the interesting part.
9661 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9662 if (numcpus > arg2 * 8) {
9663 ret = -TARGET_EINVAL;
9664 break;
9666 ret = arg2;
9669 if (copy_to_user(arg3, mask, ret)) {
9670 goto efault;
9674 break;
9675 case TARGET_NR_sched_setaffinity:
9677 unsigned int mask_size;
9678 unsigned long *mask;
9681 * sched_setaffinity needs multiples of ulong, so need to take
9682 * care of mismatches between target ulong and host ulong sizes.
9684 if (arg2 & (sizeof(abi_ulong) - 1)) {
9685 ret = -TARGET_EINVAL;
9686 break;
9688 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9690 mask = alloca(mask_size);
9691 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9692 goto efault;
9694 memcpy(mask, p, arg2);
9695 unlock_user_struct(p, arg2, 0);
9697 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9699 break;
9700 case TARGET_NR_sched_setparam:
9702 struct sched_param *target_schp;
9703 struct sched_param schp;
9705 if (arg2 == 0) {
9706 return -TARGET_EINVAL;
9708 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9709 goto efault;
9710 schp.sched_priority = tswap32(target_schp->sched_priority);
9711 unlock_user_struct(target_schp, arg2, 0);
9712 ret = get_errno(sched_setparam(arg1, &schp));
9714 break;
9715 case TARGET_NR_sched_getparam:
9717 struct sched_param *target_schp;
9718 struct sched_param schp;
9720 if (arg2 == 0) {
9721 return -TARGET_EINVAL;
9723 ret = get_errno(sched_getparam(arg1, &schp));
9724 if (!is_error(ret)) {
9725 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9726 goto efault;
9727 target_schp->sched_priority = tswap32(schp.sched_priority);
9728 unlock_user_struct(target_schp, arg2, 1);
9731 break;
9732 case TARGET_NR_sched_setscheduler:
9734 struct sched_param *target_schp;
9735 struct sched_param schp;
9736 if (arg3 == 0) {
9737 return -TARGET_EINVAL;
9739 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9740 goto efault;
9741 schp.sched_priority = tswap32(target_schp->sched_priority);
9742 unlock_user_struct(target_schp, arg3, 0);
9743 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9745 break;
9746 case TARGET_NR_sched_getscheduler:
9747 ret = get_errno(sched_getscheduler(arg1));
9748 break;
9749 case TARGET_NR_sched_yield:
9750 ret = get_errno(sched_yield());
9751 break;
9752 case TARGET_NR_sched_get_priority_max:
9753 ret = get_errno(sched_get_priority_max(arg1));
9754 break;
9755 case TARGET_NR_sched_get_priority_min:
9756 ret = get_errno(sched_get_priority_min(arg1));
9757 break;
9758 case TARGET_NR_sched_rr_get_interval:
9760 struct timespec ts;
9761 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9762 if (!is_error(ret)) {
9763 ret = host_to_target_timespec(arg2, &ts);
9766 break;
9767 case TARGET_NR_nanosleep:
9769 struct timespec req, rem;
9770 target_to_host_timespec(&req, arg1);
9771 ret = get_errno(safe_nanosleep(&req, &rem));
9772 if (is_error(ret) && arg2) {
9773 host_to_target_timespec(arg2, &rem);
9776 break;
9777 #ifdef TARGET_NR_query_module
9778 case TARGET_NR_query_module:
9779 goto unimplemented;
9780 #endif
9781 #ifdef TARGET_NR_nfsservctl
9782 case TARGET_NR_nfsservctl:
9783 goto unimplemented;
9784 #endif
9785 case TARGET_NR_prctl:
9786 switch (arg1) {
9787 case PR_GET_PDEATHSIG:
9789 int deathsig;
9790 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9791 if (!is_error(ret) && arg2
9792 && put_user_ual(deathsig, arg2)) {
9793 goto efault;
9795 break;
9797 #ifdef PR_GET_NAME
9798 case PR_GET_NAME:
9800 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9801 if (!name) {
9802 goto efault;
9804 ret = get_errno(prctl(arg1, (unsigned long)name,
9805 arg3, arg4, arg5));
9806 unlock_user(name, arg2, 16);
9807 break;
9809 case PR_SET_NAME:
9811 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9812 if (!name) {
9813 goto efault;
9815 ret = get_errno(prctl(arg1, (unsigned long)name,
9816 arg3, arg4, arg5));
9817 unlock_user(name, arg2, 0);
9818 break;
9820 #endif
9821 default:
9822 /* Most prctl options have no pointer arguments */
9823 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9824 break;
9826 break;
9827 #ifdef TARGET_NR_arch_prctl
9828 case TARGET_NR_arch_prctl:
9829 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9830 ret = do_arch_prctl(cpu_env, arg1, arg2);
9831 break;
9832 #else
9833 goto unimplemented;
9834 #endif
9835 #endif
9836 #ifdef TARGET_NR_pread64
9837 case TARGET_NR_pread64:
9838 if (regpairs_aligned(cpu_env)) {
9839 arg4 = arg5;
9840 arg5 = arg6;
9842 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9843 goto efault;
9844 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9845 unlock_user(p, arg2, ret);
9846 break;
9847 case TARGET_NR_pwrite64:
9848 if (regpairs_aligned(cpu_env)) {
9849 arg4 = arg5;
9850 arg5 = arg6;
9852 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9853 goto efault;
9854 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9855 unlock_user(p, arg2, 0);
9856 break;
9857 #endif
9858 case TARGET_NR_getcwd:
9859 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9860 goto efault;
9861 ret = get_errno(sys_getcwd1(p, arg2));
9862 unlock_user(p, arg1, ret);
9863 break;
9864 case TARGET_NR_capget:
9865 case TARGET_NR_capset:
9867 struct target_user_cap_header *target_header;
9868 struct target_user_cap_data *target_data = NULL;
9869 struct __user_cap_header_struct header;
9870 struct __user_cap_data_struct data[2];
9871 struct __user_cap_data_struct *dataptr = NULL;
9872 int i, target_datalen;
9873 int data_items = 1;
9875 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9876 goto efault;
9878 header.version = tswap32(target_header->version);
9879 header.pid = tswap32(target_header->pid);
9881 if (header.version != _LINUX_CAPABILITY_VERSION) {
9882 /* Version 2 and up takes pointer to two user_data structs */
9883 data_items = 2;
9886 target_datalen = sizeof(*target_data) * data_items;
9888 if (arg2) {
9889 if (num == TARGET_NR_capget) {
9890 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9891 } else {
9892 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9894 if (!target_data) {
9895 unlock_user_struct(target_header, arg1, 0);
9896 goto efault;
9899 if (num == TARGET_NR_capset) {
9900 for (i = 0; i < data_items; i++) {
9901 data[i].effective = tswap32(target_data[i].effective);
9902 data[i].permitted = tswap32(target_data[i].permitted);
9903 data[i].inheritable = tswap32(target_data[i].inheritable);
9907 dataptr = data;
9910 if (num == TARGET_NR_capget) {
9911 ret = get_errno(capget(&header, dataptr));
9912 } else {
9913 ret = get_errno(capset(&header, dataptr));
9916 /* The kernel always updates version for both capget and capset */
9917 target_header->version = tswap32(header.version);
9918 unlock_user_struct(target_header, arg1, 1);
9920 if (arg2) {
9921 if (num == TARGET_NR_capget) {
9922 for (i = 0; i < data_items; i++) {
9923 target_data[i].effective = tswap32(data[i].effective);
9924 target_data[i].permitted = tswap32(data[i].permitted);
9925 target_data[i].inheritable = tswap32(data[i].inheritable);
9927 unlock_user(target_data, arg2, target_datalen);
9928 } else {
9929 unlock_user(target_data, arg2, 0);
9932 break;
9934 case TARGET_NR_sigaltstack:
9935 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9936 break;
9938 #ifdef CONFIG_SENDFILE
9939 case TARGET_NR_sendfile:
9941 off_t *offp = NULL;
9942 off_t off;
9943 if (arg3) {
9944 ret = get_user_sal(off, arg3);
9945 if (is_error(ret)) {
9946 break;
9948 offp = &off;
9950 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9951 if (!is_error(ret) && arg3) {
9952 abi_long ret2 = put_user_sal(off, arg3);
9953 if (is_error(ret2)) {
9954 ret = ret2;
9957 break;
9959 #ifdef TARGET_NR_sendfile64
9960 case TARGET_NR_sendfile64:
9962 off_t *offp = NULL;
9963 off_t off;
9964 if (arg3) {
9965 ret = get_user_s64(off, arg3);
9966 if (is_error(ret)) {
9967 break;
9969 offp = &off;
9971 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9972 if (!is_error(ret) && arg3) {
9973 abi_long ret2 = put_user_s64(off, arg3);
9974 if (is_error(ret2)) {
9975 ret = ret2;
9978 break;
9980 #endif
9981 #else
9982 case TARGET_NR_sendfile:
9983 #ifdef TARGET_NR_sendfile64
9984 case TARGET_NR_sendfile64:
9985 #endif
9986 goto unimplemented;
9987 #endif
9989 #ifdef TARGET_NR_getpmsg
9990 case TARGET_NR_getpmsg:
9991 goto unimplemented;
9992 #endif
9993 #ifdef TARGET_NR_putpmsg
9994 case TARGET_NR_putpmsg:
9995 goto unimplemented;
9996 #endif
9997 #ifdef TARGET_NR_vfork
9998 case TARGET_NR_vfork:
9999 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10000 0, 0, 0, 0));
10001 break;
10002 #endif
10003 #ifdef TARGET_NR_ugetrlimit
10004 case TARGET_NR_ugetrlimit:
10006 struct rlimit rlim;
10007 int resource = target_to_host_resource(arg1);
10008 ret = get_errno(getrlimit(resource, &rlim));
10009 if (!is_error(ret)) {
10010 struct target_rlimit *target_rlim;
10011 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10012 goto efault;
10013 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10014 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10015 unlock_user_struct(target_rlim, arg2, 1);
10017 break;
10019 #endif
10020 #ifdef TARGET_NR_truncate64
10021 case TARGET_NR_truncate64:
10022 if (!(p = lock_user_string(arg1)))
10023 goto efault;
10024 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10025 unlock_user(p, arg1, 0);
10026 break;
10027 #endif
10028 #ifdef TARGET_NR_ftruncate64
10029 case TARGET_NR_ftruncate64:
10030 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10031 break;
10032 #endif
10033 #ifdef TARGET_NR_stat64
10034 case TARGET_NR_stat64:
10035 if (!(p = lock_user_string(arg1)))
10036 goto efault;
10037 ret = get_errno(stat(path(p), &st));
10038 unlock_user(p, arg1, 0);
10039 if (!is_error(ret))
10040 ret = host_to_target_stat64(cpu_env, arg2, &st);
10041 break;
10042 #endif
10043 #ifdef TARGET_NR_lstat64
10044 case TARGET_NR_lstat64:
10045 if (!(p = lock_user_string(arg1)))
10046 goto efault;
10047 ret = get_errno(lstat(path(p), &st));
10048 unlock_user(p, arg1, 0);
10049 if (!is_error(ret))
10050 ret = host_to_target_stat64(cpu_env, arg2, &st);
10051 break;
10052 #endif
10053 #ifdef TARGET_NR_fstat64
10054 case TARGET_NR_fstat64:
10055 ret = get_errno(fstat(arg1, &st));
10056 if (!is_error(ret))
10057 ret = host_to_target_stat64(cpu_env, arg2, &st);
10058 break;
10059 #endif
10060 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10061 #ifdef TARGET_NR_fstatat64
10062 case TARGET_NR_fstatat64:
10063 #endif
10064 #ifdef TARGET_NR_newfstatat
10065 case TARGET_NR_newfstatat:
10066 #endif
10067 if (!(p = lock_user_string(arg2)))
10068 goto efault;
10069 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10070 if (!is_error(ret))
10071 ret = host_to_target_stat64(cpu_env, arg3, &st);
10072 break;
10073 #endif
10074 #ifdef TARGET_NR_lchown
10075 case TARGET_NR_lchown:
10076 if (!(p = lock_user_string(arg1)))
10077 goto efault;
10078 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10079 unlock_user(p, arg1, 0);
10080 break;
10081 #endif
10082 #ifdef TARGET_NR_getuid
10083 case TARGET_NR_getuid:
10084 ret = get_errno(high2lowuid(getuid()));
10085 break;
10086 #endif
10087 #ifdef TARGET_NR_getgid
10088 case TARGET_NR_getgid:
10089 ret = get_errno(high2lowgid(getgid()));
10090 break;
10091 #endif
10092 #ifdef TARGET_NR_geteuid
10093 case TARGET_NR_geteuid:
10094 ret = get_errno(high2lowuid(geteuid()));
10095 break;
10096 #endif
10097 #ifdef TARGET_NR_getegid
10098 case TARGET_NR_getegid:
10099 ret = get_errno(high2lowgid(getegid()));
10100 break;
10101 #endif
10102 case TARGET_NR_setreuid:
10103 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10104 break;
10105 case TARGET_NR_setregid:
10106 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10107 break;
10108 case TARGET_NR_getgroups:
10110 int gidsetsize = arg1;
10111 target_id *target_grouplist;
10112 gid_t *grouplist;
10113 int i;
10115 grouplist = alloca(gidsetsize * sizeof(gid_t));
10116 ret = get_errno(getgroups(gidsetsize, grouplist));
10117 if (gidsetsize == 0)
10118 break;
10119 if (!is_error(ret)) {
10120 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10121 if (!target_grouplist)
10122 goto efault;
10123 for(i = 0;i < ret; i++)
10124 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10125 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10128 break;
10129 case TARGET_NR_setgroups:
10131 int gidsetsize = arg1;
10132 target_id *target_grouplist;
10133 gid_t *grouplist = NULL;
10134 int i;
10135 if (gidsetsize) {
10136 grouplist = alloca(gidsetsize * sizeof(gid_t));
10137 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10138 if (!target_grouplist) {
10139 ret = -TARGET_EFAULT;
10140 goto fail;
10142 for (i = 0; i < gidsetsize; i++) {
10143 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10145 unlock_user(target_grouplist, arg2, 0);
10147 ret = get_errno(setgroups(gidsetsize, grouplist));
10149 break;
10150 case TARGET_NR_fchown:
10151 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10152 break;
10153 #if defined(TARGET_NR_fchownat)
10154 case TARGET_NR_fchownat:
10155 if (!(p = lock_user_string(arg2)))
10156 goto efault;
10157 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10158 low2highgid(arg4), arg5));
10159 unlock_user(p, arg2, 0);
10160 break;
10161 #endif
10162 #ifdef TARGET_NR_setresuid
10163 case TARGET_NR_setresuid:
10164 ret = get_errno(sys_setresuid(low2highuid(arg1),
10165 low2highuid(arg2),
10166 low2highuid(arg3)));
10167 break;
10168 #endif
10169 #ifdef TARGET_NR_getresuid
10170 case TARGET_NR_getresuid:
10172 uid_t ruid, euid, suid;
10173 ret = get_errno(getresuid(&ruid, &euid, &suid));
10174 if (!is_error(ret)) {
10175 if (put_user_id(high2lowuid(ruid), arg1)
10176 || put_user_id(high2lowuid(euid), arg2)
10177 || put_user_id(high2lowuid(suid), arg3))
10178 goto efault;
10181 break;
10182 #endif
10183 #ifdef TARGET_NR_getresgid
10184 case TARGET_NR_setresgid:
10185 ret = get_errno(sys_setresgid(low2highgid(arg1),
10186 low2highgid(arg2),
10187 low2highgid(arg3)));
10188 break;
10189 #endif
10190 #ifdef TARGET_NR_getresgid
10191 case TARGET_NR_getresgid:
10193 gid_t rgid, egid, sgid;
10194 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10195 if (!is_error(ret)) {
10196 if (put_user_id(high2lowgid(rgid), arg1)
10197 || put_user_id(high2lowgid(egid), arg2)
10198 || put_user_id(high2lowgid(sgid), arg3))
10199 goto efault;
10202 break;
10203 #endif
10204 #ifdef TARGET_NR_chown
10205 case TARGET_NR_chown:
10206 if (!(p = lock_user_string(arg1)))
10207 goto efault;
10208 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10209 unlock_user(p, arg1, 0);
10210 break;
10211 #endif
10212 case TARGET_NR_setuid:
10213 ret = get_errno(sys_setuid(low2highuid(arg1)));
10214 break;
10215 case TARGET_NR_setgid:
10216 ret = get_errno(sys_setgid(low2highgid(arg1)));
10217 break;
10218 case TARGET_NR_setfsuid:
10219 ret = get_errno(setfsuid(arg1));
10220 break;
10221 case TARGET_NR_setfsgid:
10222 ret = get_errno(setfsgid(arg1));
10223 break;
10225 #ifdef TARGET_NR_lchown32
10226 case TARGET_NR_lchown32:
10227 if (!(p = lock_user_string(arg1)))
10228 goto efault;
10229 ret = get_errno(lchown(p, arg2, arg3));
10230 unlock_user(p, arg1, 0);
10231 break;
10232 #endif
10233 #ifdef TARGET_NR_getuid32
10234 case TARGET_NR_getuid32:
10235 ret = get_errno(getuid());
10236 break;
10237 #endif
10239 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10240 /* Alpha specific */
10241 case TARGET_NR_getxuid:
10243 uid_t euid;
10244 euid=geteuid();
10245 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10247 ret = get_errno(getuid());
10248 break;
10249 #endif
10250 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10251 /* Alpha specific */
10252 case TARGET_NR_getxgid:
10254 uid_t egid;
10255 egid=getegid();
10256 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10258 ret = get_errno(getgid());
10259 break;
10260 #endif
10261 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10262 /* Alpha specific */
10263 case TARGET_NR_osf_getsysinfo:
10264 ret = -TARGET_EOPNOTSUPP;
10265 switch (arg1) {
10266 case TARGET_GSI_IEEE_FP_CONTROL:
10268 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10270 /* Copied from linux ieee_fpcr_to_swcr. */
10271 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10272 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10273 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10274 | SWCR_TRAP_ENABLE_DZE
10275 | SWCR_TRAP_ENABLE_OVF);
10276 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10277 | SWCR_TRAP_ENABLE_INE);
10278 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10279 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10281 if (put_user_u64 (swcr, arg2))
10282 goto efault;
10283 ret = 0;
10285 break;
10287 /* case GSI_IEEE_STATE_AT_SIGNAL:
10288 -- Not implemented in linux kernel.
10289 case GSI_UACPROC:
10290 -- Retrieves current unaligned access state; not much used.
10291 case GSI_PROC_TYPE:
10292 -- Retrieves implver information; surely not used.
10293 case GSI_GET_HWRPB:
10294 -- Grabs a copy of the HWRPB; surely not used.
10297 break;
10298 #endif
10299 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10300 /* Alpha specific */
10301 case TARGET_NR_osf_setsysinfo:
10302 ret = -TARGET_EOPNOTSUPP;
10303 switch (arg1) {
10304 case TARGET_SSI_IEEE_FP_CONTROL:
10306 uint64_t swcr, fpcr, orig_fpcr;
10308 if (get_user_u64 (swcr, arg2)) {
10309 goto efault;
10311 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10312 fpcr = orig_fpcr & FPCR_DYN_MASK;
10314 /* Copied from linux ieee_swcr_to_fpcr. */
10315 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10316 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10317 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10318 | SWCR_TRAP_ENABLE_DZE
10319 | SWCR_TRAP_ENABLE_OVF)) << 48;
10320 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10321 | SWCR_TRAP_ENABLE_INE)) << 57;
10322 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10323 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10325 cpu_alpha_store_fpcr(cpu_env, fpcr);
10326 ret = 0;
10328 break;
10330 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10332 uint64_t exc, fpcr, orig_fpcr;
10333 int si_code;
10335 if (get_user_u64(exc, arg2)) {
10336 goto efault;
10339 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10341 /* We only add to the exception status here. */
10342 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10344 cpu_alpha_store_fpcr(cpu_env, fpcr);
10345 ret = 0;
10347 /* Old exceptions are not signaled. */
10348 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10350 /* If any exceptions set by this call,
10351 and are unmasked, send a signal. */
10352 si_code = 0;
10353 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10354 si_code = TARGET_FPE_FLTRES;
10356 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10357 si_code = TARGET_FPE_FLTUND;
10359 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10360 si_code = TARGET_FPE_FLTOVF;
10362 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10363 si_code = TARGET_FPE_FLTDIV;
10365 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10366 si_code = TARGET_FPE_FLTINV;
10368 if (si_code != 0) {
10369 target_siginfo_t info;
10370 info.si_signo = SIGFPE;
10371 info.si_errno = 0;
10372 info.si_code = si_code;
10373 info._sifields._sigfault._addr
10374 = ((CPUArchState *)cpu_env)->pc;
10375 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10378 break;
10380 /* case SSI_NVPAIRS:
10381 -- Used with SSIN_UACPROC to enable unaligned accesses.
10382 case SSI_IEEE_STATE_AT_SIGNAL:
10383 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10384 -- Not implemented in linux kernel
10387 break;
10388 #endif
10389 #ifdef TARGET_NR_osf_sigprocmask
10390 /* Alpha specific. */
10391 case TARGET_NR_osf_sigprocmask:
10393 abi_ulong mask;
10394 int how;
10395 sigset_t set, oldset;
10397 switch(arg1) {
10398 case TARGET_SIG_BLOCK:
10399 how = SIG_BLOCK;
10400 break;
10401 case TARGET_SIG_UNBLOCK:
10402 how = SIG_UNBLOCK;
10403 break;
10404 case TARGET_SIG_SETMASK:
10405 how = SIG_SETMASK;
10406 break;
10407 default:
10408 ret = -TARGET_EINVAL;
10409 goto fail;
10411 mask = arg2;
10412 target_to_host_old_sigset(&set, &mask);
10413 ret = do_sigprocmask(how, &set, &oldset);
10414 if (!ret) {
10415 host_to_target_old_sigset(&mask, &oldset);
10416 ret = mask;
10419 break;
10420 #endif
10422 #ifdef TARGET_NR_getgid32
10423 case TARGET_NR_getgid32:
10424 ret = get_errno(getgid());
10425 break;
10426 #endif
10427 #ifdef TARGET_NR_geteuid32
10428 case TARGET_NR_geteuid32:
10429 ret = get_errno(geteuid());
10430 break;
10431 #endif
10432 #ifdef TARGET_NR_getegid32
10433 case TARGET_NR_getegid32:
10434 ret = get_errno(getegid());
10435 break;
10436 #endif
10437 #ifdef TARGET_NR_setreuid32
10438 case TARGET_NR_setreuid32:
10439 ret = get_errno(setreuid(arg1, arg2));
10440 break;
10441 #endif
10442 #ifdef TARGET_NR_setregid32
10443 case TARGET_NR_setregid32:
10444 ret = get_errno(setregid(arg1, arg2));
10445 break;
10446 #endif
10447 #ifdef TARGET_NR_getgroups32
10448 case TARGET_NR_getgroups32:
10450 int gidsetsize = arg1;
10451 uint32_t *target_grouplist;
10452 gid_t *grouplist;
10453 int i;
10455 grouplist = alloca(gidsetsize * sizeof(gid_t));
10456 ret = get_errno(getgroups(gidsetsize, grouplist));
10457 if (gidsetsize == 0)
10458 break;
10459 if (!is_error(ret)) {
10460 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10461 if (!target_grouplist) {
10462 ret = -TARGET_EFAULT;
10463 goto fail;
10465 for(i = 0;i < ret; i++)
10466 target_grouplist[i] = tswap32(grouplist[i]);
10467 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10470 break;
10471 #endif
10472 #ifdef TARGET_NR_setgroups32
10473 case TARGET_NR_setgroups32:
10475 int gidsetsize = arg1;
10476 uint32_t *target_grouplist;
10477 gid_t *grouplist;
10478 int i;
10480 grouplist = alloca(gidsetsize * sizeof(gid_t));
10481 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10482 if (!target_grouplist) {
10483 ret = -TARGET_EFAULT;
10484 goto fail;
10486 for(i = 0;i < gidsetsize; i++)
10487 grouplist[i] = tswap32(target_grouplist[i]);
10488 unlock_user(target_grouplist, arg2, 0);
10489 ret = get_errno(setgroups(gidsetsize, grouplist));
10491 break;
10492 #endif
10493 #ifdef TARGET_NR_fchown32
10494 case TARGET_NR_fchown32:
10495 ret = get_errno(fchown(arg1, arg2, arg3));
10496 break;
10497 #endif
10498 #ifdef TARGET_NR_setresuid32
10499 case TARGET_NR_setresuid32:
10500 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10501 break;
10502 #endif
10503 #ifdef TARGET_NR_getresuid32
10504 case TARGET_NR_getresuid32:
10506 uid_t ruid, euid, suid;
10507 ret = get_errno(getresuid(&ruid, &euid, &suid));
10508 if (!is_error(ret)) {
10509 if (put_user_u32(ruid, arg1)
10510 || put_user_u32(euid, arg2)
10511 || put_user_u32(suid, arg3))
10512 goto efault;
10515 break;
10516 #endif
10517 #ifdef TARGET_NR_setresgid32
10518 case TARGET_NR_setresgid32:
10519 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10520 break;
10521 #endif
10522 #ifdef TARGET_NR_getresgid32
10523 case TARGET_NR_getresgid32:
10525 gid_t rgid, egid, sgid;
10526 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10527 if (!is_error(ret)) {
10528 if (put_user_u32(rgid, arg1)
10529 || put_user_u32(egid, arg2)
10530 || put_user_u32(sgid, arg3))
10531 goto efault;
10534 break;
10535 #endif
10536 #ifdef TARGET_NR_chown32
10537 case TARGET_NR_chown32:
10538 if (!(p = lock_user_string(arg1)))
10539 goto efault;
10540 ret = get_errno(chown(p, arg2, arg3));
10541 unlock_user(p, arg1, 0);
10542 break;
10543 #endif
10544 #ifdef TARGET_NR_setuid32
10545 case TARGET_NR_setuid32:
10546 ret = get_errno(sys_setuid(arg1));
10547 break;
10548 #endif
10549 #ifdef TARGET_NR_setgid32
10550 case TARGET_NR_setgid32:
10551 ret = get_errno(sys_setgid(arg1));
10552 break;
10553 #endif
10554 #ifdef TARGET_NR_setfsuid32
10555 case TARGET_NR_setfsuid32:
10556 ret = get_errno(setfsuid(arg1));
10557 break;
10558 #endif
10559 #ifdef TARGET_NR_setfsgid32
10560 case TARGET_NR_setfsgid32:
10561 ret = get_errno(setfsgid(arg1));
10562 break;
10563 #endif
10565 case TARGET_NR_pivot_root:
10566 goto unimplemented;
10567 #ifdef TARGET_NR_mincore
10568 case TARGET_NR_mincore:
10570 void *a;
10571 ret = -TARGET_EFAULT;
10572 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10573 goto efault;
10574 if (!(p = lock_user_string(arg3)))
10575 goto mincore_fail;
10576 ret = get_errno(mincore(a, arg2, p));
10577 unlock_user(p, arg3, ret);
10578 mincore_fail:
10579 unlock_user(a, arg1, 0);
10581 break;
10582 #endif
10583 #ifdef TARGET_NR_arm_fadvise64_64
10584 case TARGET_NR_arm_fadvise64_64:
10585 /* arm_fadvise64_64 looks like fadvise64_64 but
10586 * with different argument order: fd, advice, offset, len
10587 * rather than the usual fd, offset, len, advice.
10588 * Note that offset and len are both 64-bit so appear as
10589 * pairs of 32-bit registers.
10591 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10592 target_offset64(arg5, arg6), arg2);
10593 ret = -host_to_target_errno(ret);
10594 break;
10595 #endif
10597 #if TARGET_ABI_BITS == 32
10599 #ifdef TARGET_NR_fadvise64_64
10600 case TARGET_NR_fadvise64_64:
10601 /* 6 args: fd, offset (high, low), len (high, low), advice */
10602 if (regpairs_aligned(cpu_env)) {
10603 /* offset is in (3,4), len in (5,6) and advice in 7 */
10604 arg2 = arg3;
10605 arg3 = arg4;
10606 arg4 = arg5;
10607 arg5 = arg6;
10608 arg6 = arg7;
10610 ret = -host_to_target_errno(posix_fadvise(arg1,
10611 target_offset64(arg2, arg3),
10612 target_offset64(arg4, arg5),
10613 arg6));
10614 break;
10615 #endif
10617 #ifdef TARGET_NR_fadvise64
10618 case TARGET_NR_fadvise64:
10619 /* 5 args: fd, offset (high, low), len, advice */
10620 if (regpairs_aligned(cpu_env)) {
10621 /* offset is in (3,4), len in 5 and advice in 6 */
10622 arg2 = arg3;
10623 arg3 = arg4;
10624 arg4 = arg5;
10625 arg5 = arg6;
10627 ret = -host_to_target_errno(posix_fadvise(arg1,
10628 target_offset64(arg2, arg3),
10629 arg4, arg5));
10630 break;
10631 #endif
10633 #else /* not a 32-bit ABI */
10634 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10635 #ifdef TARGET_NR_fadvise64_64
10636 case TARGET_NR_fadvise64_64:
10637 #endif
10638 #ifdef TARGET_NR_fadvise64
10639 case TARGET_NR_fadvise64:
10640 #endif
10641 #ifdef TARGET_S390X
10642 switch (arg4) {
10643 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10644 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10645 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10646 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10647 default: break;
10649 #endif
10650 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10651 break;
10652 #endif
10653 #endif /* end of 64-bit ABI fadvise handling */
10655 #ifdef TARGET_NR_madvise
10656 case TARGET_NR_madvise:
10657 /* A straight passthrough may not be safe because qemu sometimes
10658 turns private file-backed mappings into anonymous mappings.
10659 This will break MADV_DONTNEED.
10660 This is a hint, so ignoring and returning success is ok. */
10661 ret = get_errno(0);
10662 break;
10663 #endif
10664 #if TARGET_ABI_BITS == 32
10665 case TARGET_NR_fcntl64:
10667 int cmd;
10668 struct flock64 fl;
10669 from_flock64_fn *copyfrom = copy_from_user_flock64;
10670 to_flock64_fn *copyto = copy_to_user_flock64;
10672 #ifdef TARGET_ARM
10673 if (((CPUARMState *)cpu_env)->eabi) {
10674 copyfrom = copy_from_user_eabi_flock64;
10675 copyto = copy_to_user_eabi_flock64;
10677 #endif
10679 cmd = target_to_host_fcntl_cmd(arg2);
10680 if (cmd == -TARGET_EINVAL) {
10681 ret = cmd;
10682 break;
10685 switch(arg2) {
10686 case TARGET_F_GETLK64:
10687 ret = copyfrom(&fl, arg3);
10688 if (ret) {
10689 break;
10691 ret = get_errno(fcntl(arg1, cmd, &fl));
10692 if (ret == 0) {
10693 ret = copyto(arg3, &fl);
10695 break;
10697 case TARGET_F_SETLK64:
10698 case TARGET_F_SETLKW64:
10699 ret = copyfrom(&fl, arg3);
10700 if (ret) {
10701 break;
10703 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10704 break;
10705 default:
10706 ret = do_fcntl(arg1, arg2, arg3);
10707 break;
10709 break;
10711 #endif
10712 #ifdef TARGET_NR_cacheflush
10713 case TARGET_NR_cacheflush:
10714 /* self-modifying code is handled automatically, so nothing needed */
10715 ret = 0;
10716 break;
10717 #endif
10718 #ifdef TARGET_NR_security
10719 case TARGET_NR_security:
10720 goto unimplemented;
10721 #endif
10722 #ifdef TARGET_NR_getpagesize
10723 case TARGET_NR_getpagesize:
10724 ret = TARGET_PAGE_SIZE;
10725 break;
10726 #endif
10727 case TARGET_NR_gettid:
10728 ret = get_errno(gettid());
10729 break;
10730 #ifdef TARGET_NR_readahead
10731 case TARGET_NR_readahead:
10732 #if TARGET_ABI_BITS == 32
10733 if (regpairs_aligned(cpu_env)) {
10734 arg2 = arg3;
10735 arg3 = arg4;
10736 arg4 = arg5;
10738 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10739 #else
10740 ret = get_errno(readahead(arg1, arg2, arg3));
10741 #endif
10742 break;
10743 #endif
10744 #ifdef CONFIG_ATTR
10745 #ifdef TARGET_NR_setxattr
10746 case TARGET_NR_listxattr:
10747 case TARGET_NR_llistxattr:
10749 void *p, *b = 0;
10750 if (arg2) {
10751 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10752 if (!b) {
10753 ret = -TARGET_EFAULT;
10754 break;
10757 p = lock_user_string(arg1);
10758 if (p) {
10759 if (num == TARGET_NR_listxattr) {
10760 ret = get_errno(listxattr(p, b, arg3));
10761 } else {
10762 ret = get_errno(llistxattr(p, b, arg3));
10764 } else {
10765 ret = -TARGET_EFAULT;
10767 unlock_user(p, arg1, 0);
10768 unlock_user(b, arg2, arg3);
10769 break;
10771 case TARGET_NR_flistxattr:
10773 void *b = 0;
10774 if (arg2) {
10775 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10776 if (!b) {
10777 ret = -TARGET_EFAULT;
10778 break;
10781 ret = get_errno(flistxattr(arg1, b, arg3));
10782 unlock_user(b, arg2, arg3);
10783 break;
10785 case TARGET_NR_setxattr:
10786 case TARGET_NR_lsetxattr:
10788 void *p, *n, *v = 0;
10789 if (arg3) {
10790 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10791 if (!v) {
10792 ret = -TARGET_EFAULT;
10793 break;
10796 p = lock_user_string(arg1);
10797 n = lock_user_string(arg2);
10798 if (p && n) {
10799 if (num == TARGET_NR_setxattr) {
10800 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10801 } else {
10802 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10804 } else {
10805 ret = -TARGET_EFAULT;
10807 unlock_user(p, arg1, 0);
10808 unlock_user(n, arg2, 0);
10809 unlock_user(v, arg3, 0);
10811 break;
10812 case TARGET_NR_fsetxattr:
10814 void *n, *v = 0;
10815 if (arg3) {
10816 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10817 if (!v) {
10818 ret = -TARGET_EFAULT;
10819 break;
10822 n = lock_user_string(arg2);
10823 if (n) {
10824 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10825 } else {
10826 ret = -TARGET_EFAULT;
10828 unlock_user(n, arg2, 0);
10829 unlock_user(v, arg3, 0);
10831 break;
10832 case TARGET_NR_getxattr:
10833 case TARGET_NR_lgetxattr:
10835 void *p, *n, *v = 0;
10836 if (arg3) {
10837 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10838 if (!v) {
10839 ret = -TARGET_EFAULT;
10840 break;
10843 p = lock_user_string(arg1);
10844 n = lock_user_string(arg2);
10845 if (p && n) {
10846 if (num == TARGET_NR_getxattr) {
10847 ret = get_errno(getxattr(p, n, v, arg4));
10848 } else {
10849 ret = get_errno(lgetxattr(p, n, v, arg4));
10851 } else {
10852 ret = -TARGET_EFAULT;
10854 unlock_user(p, arg1, 0);
10855 unlock_user(n, arg2, 0);
10856 unlock_user(v, arg3, arg4);
10858 break;
10859 case TARGET_NR_fgetxattr:
10861 void *n, *v = 0;
10862 if (arg3) {
10863 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10864 if (!v) {
10865 ret = -TARGET_EFAULT;
10866 break;
10869 n = lock_user_string(arg2);
10870 if (n) {
10871 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10872 } else {
10873 ret = -TARGET_EFAULT;
10875 unlock_user(n, arg2, 0);
10876 unlock_user(v, arg3, arg4);
10878 break;
10879 case TARGET_NR_removexattr:
10880 case TARGET_NR_lremovexattr:
10882 void *p, *n;
10883 p = lock_user_string(arg1);
10884 n = lock_user_string(arg2);
10885 if (p && n) {
10886 if (num == TARGET_NR_removexattr) {
10887 ret = get_errno(removexattr(p, n));
10888 } else {
10889 ret = get_errno(lremovexattr(p, n));
10891 } else {
10892 ret = -TARGET_EFAULT;
10894 unlock_user(p, arg1, 0);
10895 unlock_user(n, arg2, 0);
10897 break;
10898 case TARGET_NR_fremovexattr:
10900 void *n;
10901 n = lock_user_string(arg2);
10902 if (n) {
10903 ret = get_errno(fremovexattr(arg1, n));
10904 } else {
10905 ret = -TARGET_EFAULT;
10907 unlock_user(n, arg2, 0);
10909 break;
10910 #endif
10911 #endif /* CONFIG_ATTR */
10912 #ifdef TARGET_NR_set_thread_area
10913 case TARGET_NR_set_thread_area:
10914 #if defined(TARGET_MIPS)
10915 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10916 ret = 0;
10917 break;
10918 #elif defined(TARGET_CRIS)
10919 if (arg1 & 0xff)
10920 ret = -TARGET_EINVAL;
10921 else {
10922 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10923 ret = 0;
10925 break;
10926 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10927 ret = do_set_thread_area(cpu_env, arg1);
10928 break;
10929 #elif defined(TARGET_M68K)
10931 TaskState *ts = cpu->opaque;
10932 ts->tp_value = arg1;
10933 ret = 0;
10934 break;
10936 #else
10937 goto unimplemented_nowarn;
10938 #endif
10939 #endif
10940 #ifdef TARGET_NR_get_thread_area
10941 case TARGET_NR_get_thread_area:
10942 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10943 ret = do_get_thread_area(cpu_env, arg1);
10944 break;
10945 #elif defined(TARGET_M68K)
10947 TaskState *ts = cpu->opaque;
10948 ret = ts->tp_value;
10949 break;
10951 #else
10952 goto unimplemented_nowarn;
10953 #endif
10954 #endif
10955 #ifdef TARGET_NR_getdomainname
10956 case TARGET_NR_getdomainname:
10957 goto unimplemented_nowarn;
10958 #endif
10960 #ifdef TARGET_NR_clock_gettime
10961 case TARGET_NR_clock_gettime:
10963 struct timespec ts;
10964 ret = get_errno(clock_gettime(arg1, &ts));
10965 if (!is_error(ret)) {
10966 host_to_target_timespec(arg2, &ts);
10968 break;
10970 #endif
10971 #ifdef TARGET_NR_clock_getres
10972 case TARGET_NR_clock_getres:
10974 struct timespec ts;
10975 ret = get_errno(clock_getres(arg1, &ts));
10976 if (!is_error(ret)) {
10977 host_to_target_timespec(arg2, &ts);
10979 break;
10981 #endif
10982 #ifdef TARGET_NR_clock_nanosleep
10983 case TARGET_NR_clock_nanosleep:
10985 struct timespec ts;
10986 target_to_host_timespec(&ts, arg3);
10987 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10988 &ts, arg4 ? &ts : NULL));
10989 if (arg4)
10990 host_to_target_timespec(arg4, &ts);
10992 #if defined(TARGET_PPC)
10993 /* clock_nanosleep is odd in that it returns positive errno values.
10994 * On PPC, CR0 bit 3 should be set in such a situation. */
10995 if (ret && ret != -TARGET_ERESTARTSYS) {
10996 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10998 #endif
10999 break;
11001 #endif
11003 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11004 case TARGET_NR_set_tid_address:
11005 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11006 break;
11007 #endif
11009 case TARGET_NR_tkill:
11010 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11011 break;
11013 case TARGET_NR_tgkill:
11014 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11015 target_to_host_signal(arg3)));
11016 break;
11018 #ifdef TARGET_NR_set_robust_list
11019 case TARGET_NR_set_robust_list:
11020 case TARGET_NR_get_robust_list:
11021 /* The ABI for supporting robust futexes has userspace pass
11022 * the kernel a pointer to a linked list which is updated by
11023 * userspace after the syscall; the list is walked by the kernel
11024 * when the thread exits. Since the linked list in QEMU guest
11025 * memory isn't a valid linked list for the host and we have
11026 * no way to reliably intercept the thread-death event, we can't
11027 * support these. Silently return ENOSYS so that guest userspace
11028 * falls back to a non-robust futex implementation (which should
11029 * be OK except in the corner case of the guest crashing while
11030 * holding a mutex that is shared with another process via
11031 * shared memory).
11033 goto unimplemented_nowarn;
11034 #endif
11036 #if defined(TARGET_NR_utimensat)
11037 case TARGET_NR_utimensat:
11039 struct timespec *tsp, ts[2];
11040 if (!arg3) {
11041 tsp = NULL;
11042 } else {
11043 target_to_host_timespec(ts, arg3);
11044 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11045 tsp = ts;
11047 if (!arg2)
11048 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11049 else {
11050 if (!(p = lock_user_string(arg2))) {
11051 ret = -TARGET_EFAULT;
11052 goto fail;
11054 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11055 unlock_user(p, arg2, 0);
11058 break;
11059 #endif
11060 case TARGET_NR_futex:
11061 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11062 break;
11063 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11064 case TARGET_NR_inotify_init:
11065 ret = get_errno(sys_inotify_init());
11066 break;
11067 #endif
11068 #ifdef CONFIG_INOTIFY1
11069 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11070 case TARGET_NR_inotify_init1:
11071 ret = get_errno(sys_inotify_init1(arg1));
11072 break;
11073 #endif
11074 #endif
11075 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11076 case TARGET_NR_inotify_add_watch:
11077 p = lock_user_string(arg2);
11078 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11079 unlock_user(p, arg2, 0);
11080 break;
11081 #endif
11082 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11083 case TARGET_NR_inotify_rm_watch:
11084 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11085 break;
11086 #endif
11088 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11089 case TARGET_NR_mq_open:
11091 struct mq_attr posix_mq_attr, *attrp;
11093 p = lock_user_string(arg1 - 1);
11094 if (arg4 != 0) {
11095 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11096 attrp = &posix_mq_attr;
11097 } else {
11098 attrp = 0;
11100 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11101 unlock_user (p, arg1, 0);
11103 break;
11105 case TARGET_NR_mq_unlink:
11106 p = lock_user_string(arg1 - 1);
11107 ret = get_errno(mq_unlink(p));
11108 unlock_user (p, arg1, 0);
11109 break;
11111 case TARGET_NR_mq_timedsend:
11113 struct timespec ts;
11115 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11116 if (arg5 != 0) {
11117 target_to_host_timespec(&ts, arg5);
11118 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11119 host_to_target_timespec(arg5, &ts);
11120 } else {
11121 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11123 unlock_user (p, arg2, arg3);
11125 break;
11127 case TARGET_NR_mq_timedreceive:
11129 struct timespec ts;
11130 unsigned int prio;
11132 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11133 if (arg5 != 0) {
11134 target_to_host_timespec(&ts, arg5);
11135 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11136 &prio, &ts));
11137 host_to_target_timespec(arg5, &ts);
11138 } else {
11139 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11140 &prio, NULL));
11142 unlock_user (p, arg2, arg3);
11143 if (arg4 != 0)
11144 put_user_u32(prio, arg4);
11146 break;
11148 /* Not implemented for now... */
11149 /* case TARGET_NR_mq_notify: */
11150 /* break; */
11152 case TARGET_NR_mq_getsetattr:
11154 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11155 ret = 0;
11156 if (arg3 != 0) {
11157 ret = mq_getattr(arg1, &posix_mq_attr_out);
11158 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11160 if (arg2 != 0) {
11161 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11162 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11166 break;
11167 #endif
11169 #ifdef CONFIG_SPLICE
11170 #ifdef TARGET_NR_tee
11171 case TARGET_NR_tee:
11173 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11175 break;
11176 #endif
11177 #ifdef TARGET_NR_splice
11178 case TARGET_NR_splice:
11180 loff_t loff_in, loff_out;
11181 loff_t *ploff_in = NULL, *ploff_out = NULL;
11182 if (arg2) {
11183 if (get_user_u64(loff_in, arg2)) {
11184 goto efault;
11186 ploff_in = &loff_in;
11188 if (arg4) {
11189 if (get_user_u64(loff_out, arg4)) {
11190 goto efault;
11192 ploff_out = &loff_out;
11194 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11195 if (arg2) {
11196 if (put_user_u64(loff_in, arg2)) {
11197 goto efault;
11200 if (arg4) {
11201 if (put_user_u64(loff_out, arg4)) {
11202 goto efault;
11206 break;
11207 #endif
11208 #ifdef TARGET_NR_vmsplice
11209 case TARGET_NR_vmsplice:
11211 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11212 if (vec != NULL) {
11213 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11214 unlock_iovec(vec, arg2, arg3, 0);
11215 } else {
11216 ret = -host_to_target_errno(errno);
11219 break;
11220 #endif
11221 #endif /* CONFIG_SPLICE */
11222 #ifdef CONFIG_EVENTFD
11223 #if defined(TARGET_NR_eventfd)
11224 case TARGET_NR_eventfd:
11225 ret = get_errno(eventfd(arg1, 0));
11226 fd_trans_unregister(ret);
11227 break;
11228 #endif
11229 #if defined(TARGET_NR_eventfd2)
11230 case TARGET_NR_eventfd2:
11232 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11233 if (arg2 & TARGET_O_NONBLOCK) {
11234 host_flags |= O_NONBLOCK;
11236 if (arg2 & TARGET_O_CLOEXEC) {
11237 host_flags |= O_CLOEXEC;
11239 ret = get_errno(eventfd(arg1, host_flags));
11240 fd_trans_unregister(ret);
11241 break;
11243 #endif
11244 #endif /* CONFIG_EVENTFD */
11245 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11246 case TARGET_NR_fallocate:
11247 #if TARGET_ABI_BITS == 32
11248 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11249 target_offset64(arg5, arg6)));
11250 #else
11251 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11252 #endif
11253 break;
11254 #endif
11255 #if defined(CONFIG_SYNC_FILE_RANGE)
11256 #if defined(TARGET_NR_sync_file_range)
11257 case TARGET_NR_sync_file_range:
11258 #if TARGET_ABI_BITS == 32
11259 #if defined(TARGET_MIPS)
11260 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11261 target_offset64(arg5, arg6), arg7));
11262 #else
11263 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11264 target_offset64(arg4, arg5), arg6));
11265 #endif /* !TARGET_MIPS */
11266 #else
11267 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11268 #endif
11269 break;
11270 #endif
11271 #if defined(TARGET_NR_sync_file_range2)
11272 case TARGET_NR_sync_file_range2:
11273 /* This is like sync_file_range but the arguments are reordered */
11274 #if TARGET_ABI_BITS == 32
11275 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11276 target_offset64(arg5, arg6), arg2));
11277 #else
11278 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11279 #endif
11280 break;
11281 #endif
11282 #endif
11283 #if defined(TARGET_NR_signalfd4)
11284 case TARGET_NR_signalfd4:
11285 ret = do_signalfd4(arg1, arg2, arg4);
11286 break;
11287 #endif
11288 #if defined(TARGET_NR_signalfd)
11289 case TARGET_NR_signalfd:
11290 ret = do_signalfd4(arg1, arg2, 0);
11291 break;
11292 #endif
11293 #if defined(CONFIG_EPOLL)
11294 #if defined(TARGET_NR_epoll_create)
11295 case TARGET_NR_epoll_create:
11296 ret = get_errno(epoll_create(arg1));
11297 break;
11298 #endif
11299 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11300 case TARGET_NR_epoll_create1:
11301 ret = get_errno(epoll_create1(arg1));
11302 break;
11303 #endif
11304 #if defined(TARGET_NR_epoll_ctl)
11305 case TARGET_NR_epoll_ctl:
11307 struct epoll_event ep;
11308 struct epoll_event *epp = 0;
11309 if (arg4) {
11310 struct target_epoll_event *target_ep;
11311 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11312 goto efault;
11314 ep.events = tswap32(target_ep->events);
11315 /* The epoll_data_t union is just opaque data to the kernel,
11316 * so we transfer all 64 bits across and need not worry what
11317 * actual data type it is.
11319 ep.data.u64 = tswap64(target_ep->data.u64);
11320 unlock_user_struct(target_ep, arg4, 0);
11321 epp = &ep;
11323 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11324 break;
11326 #endif
11328 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11329 #if defined(TARGET_NR_epoll_wait)
11330 case TARGET_NR_epoll_wait:
11331 #endif
11332 #if defined(TARGET_NR_epoll_pwait)
11333 case TARGET_NR_epoll_pwait:
11334 #endif
11336 struct target_epoll_event *target_ep;
11337 struct epoll_event *ep;
11338 int epfd = arg1;
11339 int maxevents = arg3;
11340 int timeout = arg4;
11342 target_ep = lock_user(VERIFY_WRITE, arg2,
11343 maxevents * sizeof(struct target_epoll_event), 1);
11344 if (!target_ep) {
11345 goto efault;
11348 ep = alloca(maxevents * sizeof(struct epoll_event));
11350 switch (num) {
11351 #if defined(TARGET_NR_epoll_pwait)
11352 case TARGET_NR_epoll_pwait:
11354 target_sigset_t *target_set;
11355 sigset_t _set, *set = &_set;
11357 if (arg5) {
11358 if (arg6 != sizeof(target_sigset_t)) {
11359 ret = -TARGET_EINVAL;
11360 break;
11363 target_set = lock_user(VERIFY_READ, arg5,
11364 sizeof(target_sigset_t), 1);
11365 if (!target_set) {
11366 unlock_user(target_ep, arg2, 0);
11367 goto efault;
11369 target_to_host_sigset(set, target_set);
11370 unlock_user(target_set, arg5, 0);
11371 } else {
11372 set = NULL;
11375 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11376 set, SIGSET_T_SIZE));
11377 break;
11379 #endif
11380 #if defined(TARGET_NR_epoll_wait)
11381 case TARGET_NR_epoll_wait:
11382 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11383 NULL, 0));
11384 break;
11385 #endif
11386 default:
11387 ret = -TARGET_ENOSYS;
11389 if (!is_error(ret)) {
11390 int i;
11391 for (i = 0; i < ret; i++) {
11392 target_ep[i].events = tswap32(ep[i].events);
11393 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11396 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11397 break;
11399 #endif
11400 #endif
11401 #ifdef TARGET_NR_prlimit64
11402 case TARGET_NR_prlimit64:
11404 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11405 struct target_rlimit64 *target_rnew, *target_rold;
11406 struct host_rlimit64 rnew, rold, *rnewp = 0;
11407 int resource = target_to_host_resource(arg2);
11408 if (arg3) {
11409 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11410 goto efault;
11412 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11413 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11414 unlock_user_struct(target_rnew, arg3, 0);
11415 rnewp = &rnew;
11418 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11419 if (!is_error(ret) && arg4) {
11420 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11421 goto efault;
11423 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11424 target_rold->rlim_max = tswap64(rold.rlim_max);
11425 unlock_user_struct(target_rold, arg4, 1);
11427 break;
11429 #endif
11430 #ifdef TARGET_NR_gethostname
11431 case TARGET_NR_gethostname:
11433 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11434 if (name) {
11435 ret = get_errno(gethostname(name, arg2));
11436 unlock_user(name, arg1, arg2);
11437 } else {
11438 ret = -TARGET_EFAULT;
11440 break;
11442 #endif
11443 #ifdef TARGET_NR_atomic_cmpxchg_32
11444 case TARGET_NR_atomic_cmpxchg_32:
11446 /* should use start_exclusive from main.c */
11447 abi_ulong mem_value;
11448 if (get_user_u32(mem_value, arg6)) {
11449 target_siginfo_t info;
11450 info.si_signo = SIGSEGV;
11451 info.si_errno = 0;
11452 info.si_code = TARGET_SEGV_MAPERR;
11453 info._sifields._sigfault._addr = arg6;
11454 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11455 ret = 0xdeadbeef;
11458 if (mem_value == arg2)
11459 put_user_u32(arg1, arg6);
11460 ret = mem_value;
11461 break;
11463 #endif
11464 #ifdef TARGET_NR_atomic_barrier
11465 case TARGET_NR_atomic_barrier:
11467 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11468 ret = 0;
11469 break;
11471 #endif
11473 #ifdef TARGET_NR_timer_create
11474 case TARGET_NR_timer_create:
11476 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11478 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11480 int clkid = arg1;
11481 int timer_index = next_free_host_timer();
11483 if (timer_index < 0) {
11484 ret = -TARGET_EAGAIN;
11485 } else {
11486 timer_t *phtimer = g_posix_timers + timer_index;
11488 if (arg2) {
11489 phost_sevp = &host_sevp;
11490 ret = target_to_host_sigevent(phost_sevp, arg2);
11491 if (ret != 0) {
11492 break;
11496 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11497 if (ret) {
11498 phtimer = NULL;
11499 } else {
11500 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11501 goto efault;
11505 break;
11507 #endif
11509 #ifdef TARGET_NR_timer_settime
11510 case TARGET_NR_timer_settime:
11512 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11513 * struct itimerspec * old_value */
11514 target_timer_t timerid = get_timer_id(arg1);
11516 if (timerid < 0) {
11517 ret = timerid;
11518 } else if (arg3 == 0) {
11519 ret = -TARGET_EINVAL;
11520 } else {
11521 timer_t htimer = g_posix_timers[timerid];
11522 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11524 target_to_host_itimerspec(&hspec_new, arg3);
11525 ret = get_errno(
11526 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11527 host_to_target_itimerspec(arg2, &hspec_old);
11529 break;
11531 #endif
11533 #ifdef TARGET_NR_timer_gettime
11534 case TARGET_NR_timer_gettime:
11536 /* args: timer_t timerid, struct itimerspec *curr_value */
11537 target_timer_t timerid = get_timer_id(arg1);
11539 if (timerid < 0) {
11540 ret = timerid;
11541 } else if (!arg2) {
11542 ret = -TARGET_EFAULT;
11543 } else {
11544 timer_t htimer = g_posix_timers[timerid];
11545 struct itimerspec hspec;
11546 ret = get_errno(timer_gettime(htimer, &hspec));
11548 if (host_to_target_itimerspec(arg2, &hspec)) {
11549 ret = -TARGET_EFAULT;
11552 break;
11554 #endif
11556 #ifdef TARGET_NR_timer_getoverrun
11557 case TARGET_NR_timer_getoverrun:
11559 /* args: timer_t timerid */
11560 target_timer_t timerid = get_timer_id(arg1);
11562 if (timerid < 0) {
11563 ret = timerid;
11564 } else {
11565 timer_t htimer = g_posix_timers[timerid];
11566 ret = get_errno(timer_getoverrun(htimer));
11568 fd_trans_unregister(ret);
11569 break;
11571 #endif
11573 #ifdef TARGET_NR_timer_delete
11574 case TARGET_NR_timer_delete:
11576 /* args: timer_t timerid */
11577 target_timer_t timerid = get_timer_id(arg1);
11579 if (timerid < 0) {
11580 ret = timerid;
11581 } else {
11582 timer_t htimer = g_posix_timers[timerid];
11583 ret = get_errno(timer_delete(htimer));
11584 g_posix_timers[timerid] = 0;
11586 break;
11588 #endif
11590 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11591 case TARGET_NR_timerfd_create:
11592 ret = get_errno(timerfd_create(arg1,
11593 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11594 break;
11595 #endif
11597 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11598 case TARGET_NR_timerfd_gettime:
11600 struct itimerspec its_curr;
11602 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11604 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11605 goto efault;
11608 break;
11609 #endif
11611 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11612 case TARGET_NR_timerfd_settime:
11614 struct itimerspec its_new, its_old, *p_new;
11616 if (arg3) {
11617 if (target_to_host_itimerspec(&its_new, arg3)) {
11618 goto efault;
11620 p_new = &its_new;
11621 } else {
11622 p_new = NULL;
11625 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11627 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11628 goto efault;
11631 break;
11632 #endif
11634 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11635 case TARGET_NR_ioprio_get:
11636 ret = get_errno(ioprio_get(arg1, arg2));
11637 break;
11638 #endif
11640 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11641 case TARGET_NR_ioprio_set:
11642 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11643 break;
11644 #endif
11646 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11647 case TARGET_NR_setns:
11648 ret = get_errno(setns(arg1, arg2));
11649 break;
11650 #endif
11651 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11652 case TARGET_NR_unshare:
11653 ret = get_errno(unshare(arg1));
11654 break;
11655 #endif
11657 default:
11658 unimplemented:
11659 gemu_log("qemu: Unsupported syscall: %d\n", num);
11660 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11661 unimplemented_nowarn:
11662 #endif
11663 ret = -TARGET_ENOSYS;
11664 break;
11666 fail:
11667 #ifdef DEBUG
11668 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11669 #endif
11670 if(do_strace)
11671 print_syscall_ret(num, ret);
11672 trace_guest_user_syscall_ret(cpu, num, ret);
11673 return ret;
11674 efault:
11675 ret = -TARGET_EFAULT;
11676 goto fail;