Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / linux-user / syscall.c
blob6b8c2ca24e3bff4073f1cb887bb6128fbe6e8e41
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #include "qemu/sockets.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #ifdef CONFIG_RTNETLINK
107 #include <linux/rtnetlink.h>
108 #include <linux/if_bridge.h>
109 #endif
110 #include <linux/audit.h>
111 #include "linux_loop.h"
112 #include "uname.h"
114 #include "qemu.h"
116 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
117 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 //#define DEBUG
120 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
121 * once. This exercises the codepaths for restart.
123 //#define DEBUG_ERESTARTSYS
125 //#include <linux/msdos_fs.h>
126 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
127 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
129 #undef _syscall0
130 #undef _syscall1
131 #undef _syscall2
132 #undef _syscall3
133 #undef _syscall4
134 #undef _syscall5
135 #undef _syscall6
137 #define _syscall0(type,name) \
138 static type name (void) \
140 return syscall(__NR_##name); \
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
146 return syscall(__NR_##name, arg1); \
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
152 return syscall(__NR_##name, arg1, arg2); \
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 type5,arg5) \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
178 type6 arg6) \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_futex __NR_futex
192 #define __NR_sys_inotify_init __NR_inotify_init
193 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
194 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
196 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 defined(__s390x__)
198 #define __NR__llseek __NR_lseek
199 #endif
201 /* Newer kernel ports have llseek() instead of _llseek() */
202 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
203 #define TARGET_NR__llseek TARGET_NR_llseek
204 #endif
206 #ifdef __NR_gettid
207 _syscall0(int, gettid)
208 #else
209 /* This is a replacement for the host gettid() and must return a host
210 errno. */
211 static int gettid(void) {
212 return -ENOSYS;
214 #endif
215 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
216 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
217 #endif
218 #if !defined(__NR_getdents) || \
219 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
220 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
221 #endif
222 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
223 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
224 loff_t *, res, uint, wh);
225 #endif
226 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
227 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
228 #ifdef __NR_exit_group
229 _syscall1(int,exit_group,int,error_code)
230 #endif
231 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
232 _syscall1(int,set_tid_address,int *,tidptr)
233 #endif
234 #if defined(TARGET_NR_futex) && defined(__NR_futex)
235 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
236 const struct timespec *,timeout,int *,uaddr2,int,val3)
237 #endif
238 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
239 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
240 unsigned long *, user_mask_ptr);
241 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
242 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
243 unsigned long *, user_mask_ptr);
244 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
245 void *, arg);
246 _syscall2(int, capget, struct __user_cap_header_struct *, header,
247 struct __user_cap_data_struct *, data);
248 _syscall2(int, capset, struct __user_cap_header_struct *, header,
249 struct __user_cap_data_struct *, data);
250 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
251 _syscall2(int, ioprio_get, int, which, int, who)
252 #endif
253 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
254 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
255 #endif
256 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
257 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
258 #endif
260 static bitmask_transtbl fcntl_flags_tbl[] = {
261 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
262 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
263 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
264 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
265 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
266 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
267 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
268 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
269 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
270 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
271 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
272 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
273 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
274 #if defined(O_DIRECT)
275 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
276 #endif
277 #if defined(O_NOATIME)
278 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
279 #endif
280 #if defined(O_CLOEXEC)
281 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
282 #endif
283 #if defined(O_PATH)
284 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
285 #endif
286 /* Don't terminate the list prematurely on 64-bit host+guest. */
287 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
288 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
289 #endif
290 { 0, 0, 0, 0 }
293 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
294 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
295 typedef struct TargetFdTrans {
296 TargetFdDataFunc host_to_target_data;
297 TargetFdDataFunc target_to_host_data;
298 TargetFdAddrFunc target_to_host_addr;
299 } TargetFdTrans;
301 static TargetFdTrans **target_fd_trans;
303 static unsigned int target_fd_max;
305 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
307 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
308 return target_fd_trans[fd]->target_to_host_data;
310 return NULL;
313 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
315 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
316 return target_fd_trans[fd]->host_to_target_data;
318 return NULL;
321 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
323 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
324 return target_fd_trans[fd]->target_to_host_addr;
326 return NULL;
329 static void fd_trans_register(int fd, TargetFdTrans *trans)
331 unsigned int oldmax;
333 if (fd >= target_fd_max) {
334 oldmax = target_fd_max;
335 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
336 target_fd_trans = g_renew(TargetFdTrans *,
337 target_fd_trans, target_fd_max);
338 memset((void *)(target_fd_trans + oldmax), 0,
339 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
341 target_fd_trans[fd] = trans;
344 static void fd_trans_unregister(int fd)
346 if (fd >= 0 && fd < target_fd_max) {
347 target_fd_trans[fd] = NULL;
351 static void fd_trans_dup(int oldfd, int newfd)
353 fd_trans_unregister(newfd);
354 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
355 fd_trans_register(newfd, target_fd_trans[oldfd]);
359 static int sys_getcwd1(char *buf, size_t size)
361 if (getcwd(buf, size) == NULL) {
362 /* getcwd() sets errno */
363 return (-1);
365 return strlen(buf)+1;
368 #ifdef TARGET_NR_utimensat
369 #ifdef CONFIG_UTIMENSAT
370 static int sys_utimensat(int dirfd, const char *pathname,
371 const struct timespec times[2], int flags)
373 if (pathname == NULL)
374 return futimens(dirfd, times);
375 else
376 return utimensat(dirfd, pathname, times, flags);
378 #elif defined(__NR_utimensat)
379 #define __NR_sys_utimensat __NR_utimensat
380 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
381 const struct timespec *,tsp,int,flags)
382 #else
383 static int sys_utimensat(int dirfd, const char *pathname,
384 const struct timespec times[2], int flags)
386 errno = ENOSYS;
387 return -1;
389 #endif
390 #endif /* TARGET_NR_utimensat */
392 #ifdef CONFIG_INOTIFY
393 #include <sys/inotify.h>
395 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
396 static int sys_inotify_init(void)
398 return (inotify_init());
400 #endif
401 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
402 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
404 return (inotify_add_watch(fd, pathname, mask));
406 #endif
407 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
408 static int sys_inotify_rm_watch(int fd, int32_t wd)
410 return (inotify_rm_watch(fd, wd));
412 #endif
413 #ifdef CONFIG_INOTIFY1
414 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
415 static int sys_inotify_init1(int flags)
417 return (inotify_init1(flags));
419 #endif
420 #endif
421 #else
422 /* Userspace can usually survive runtime without inotify */
423 #undef TARGET_NR_inotify_init
424 #undef TARGET_NR_inotify_init1
425 #undef TARGET_NR_inotify_add_watch
426 #undef TARGET_NR_inotify_rm_watch
427 #endif /* CONFIG_INOTIFY */
429 #if defined(TARGET_NR_prlimit64)
430 #ifndef __NR_prlimit64
431 # define __NR_prlimit64 -1
432 #endif
433 #define __NR_sys_prlimit64 __NR_prlimit64
434 /* The glibc rlimit structure may not be that used by the underlying syscall */
435 struct host_rlimit64 {
436 uint64_t rlim_cur;
437 uint64_t rlim_max;
439 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
440 const struct host_rlimit64 *, new_limit,
441 struct host_rlimit64 *, old_limit)
442 #endif
445 #if defined(TARGET_NR_timer_create)
446 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
447 static timer_t g_posix_timers[32] = { 0, } ;
449 static inline int next_free_host_timer(void)
451 int k ;
452 /* FIXME: Does finding the next free slot require a lock? */
453 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
454 if (g_posix_timers[k] == 0) {
455 g_posix_timers[k] = (timer_t) 1;
456 return k;
459 return -1;
461 #endif
463 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
464 #ifdef TARGET_ARM
465 static inline int regpairs_aligned(void *cpu_env) {
466 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
468 #elif defined(TARGET_MIPS)
469 static inline int regpairs_aligned(void *cpu_env) { return 1; }
470 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
471 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
472 * of registers which translates to the same as ARM/MIPS, because we start with
473 * r3 as arg1 */
474 static inline int regpairs_aligned(void *cpu_env) { return 1; }
475 #else
476 static inline int regpairs_aligned(void *cpu_env) { return 0; }
477 #endif
479 #define ERRNO_TABLE_SIZE 1200
481 /* target_to_host_errno_table[] is initialized from
482 * host_to_target_errno_table[] in syscall_init(). */
483 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
487 * This list is the union of errno values overridden in asm-<arch>/errno.h
488 * minus the errnos that are not actually generic to all archs.
490 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
491 [EAGAIN] = TARGET_EAGAIN,
492 [EIDRM] = TARGET_EIDRM,
493 [ECHRNG] = TARGET_ECHRNG,
494 [EL2NSYNC] = TARGET_EL2NSYNC,
495 [EL3HLT] = TARGET_EL3HLT,
496 [EL3RST] = TARGET_EL3RST,
497 [ELNRNG] = TARGET_ELNRNG,
498 [EUNATCH] = TARGET_EUNATCH,
499 [ENOCSI] = TARGET_ENOCSI,
500 [EL2HLT] = TARGET_EL2HLT,
501 [EDEADLK] = TARGET_EDEADLK,
502 [ENOLCK] = TARGET_ENOLCK,
503 [EBADE] = TARGET_EBADE,
504 [EBADR] = TARGET_EBADR,
505 [EXFULL] = TARGET_EXFULL,
506 [ENOANO] = TARGET_ENOANO,
507 [EBADRQC] = TARGET_EBADRQC,
508 [EBADSLT] = TARGET_EBADSLT,
509 [EBFONT] = TARGET_EBFONT,
510 [ENOSTR] = TARGET_ENOSTR,
511 [ENODATA] = TARGET_ENODATA,
512 [ETIME] = TARGET_ETIME,
513 [ENOSR] = TARGET_ENOSR,
514 [ENONET] = TARGET_ENONET,
515 [ENOPKG] = TARGET_ENOPKG,
516 [EREMOTE] = TARGET_EREMOTE,
517 [ENOLINK] = TARGET_ENOLINK,
518 [EADV] = TARGET_EADV,
519 [ESRMNT] = TARGET_ESRMNT,
520 [ECOMM] = TARGET_ECOMM,
521 [EPROTO] = TARGET_EPROTO,
522 [EDOTDOT] = TARGET_EDOTDOT,
523 [EMULTIHOP] = TARGET_EMULTIHOP,
524 [EBADMSG] = TARGET_EBADMSG,
525 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
526 [EOVERFLOW] = TARGET_EOVERFLOW,
527 [ENOTUNIQ] = TARGET_ENOTUNIQ,
528 [EBADFD] = TARGET_EBADFD,
529 [EREMCHG] = TARGET_EREMCHG,
530 [ELIBACC] = TARGET_ELIBACC,
531 [ELIBBAD] = TARGET_ELIBBAD,
532 [ELIBSCN] = TARGET_ELIBSCN,
533 [ELIBMAX] = TARGET_ELIBMAX,
534 [ELIBEXEC] = TARGET_ELIBEXEC,
535 [EILSEQ] = TARGET_EILSEQ,
536 [ENOSYS] = TARGET_ENOSYS,
537 [ELOOP] = TARGET_ELOOP,
538 [ERESTART] = TARGET_ERESTART,
539 [ESTRPIPE] = TARGET_ESTRPIPE,
540 [ENOTEMPTY] = TARGET_ENOTEMPTY,
541 [EUSERS] = TARGET_EUSERS,
542 [ENOTSOCK] = TARGET_ENOTSOCK,
543 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
544 [EMSGSIZE] = TARGET_EMSGSIZE,
545 [EPROTOTYPE] = TARGET_EPROTOTYPE,
546 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
547 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
548 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
549 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
550 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
551 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
552 [EADDRINUSE] = TARGET_EADDRINUSE,
553 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
554 [ENETDOWN] = TARGET_ENETDOWN,
555 [ENETUNREACH] = TARGET_ENETUNREACH,
556 [ENETRESET] = TARGET_ENETRESET,
557 [ECONNABORTED] = TARGET_ECONNABORTED,
558 [ECONNRESET] = TARGET_ECONNRESET,
559 [ENOBUFS] = TARGET_ENOBUFS,
560 [EISCONN] = TARGET_EISCONN,
561 [ENOTCONN] = TARGET_ENOTCONN,
562 [EUCLEAN] = TARGET_EUCLEAN,
563 [ENOTNAM] = TARGET_ENOTNAM,
564 [ENAVAIL] = TARGET_ENAVAIL,
565 [EISNAM] = TARGET_EISNAM,
566 [EREMOTEIO] = TARGET_EREMOTEIO,
567 [ESHUTDOWN] = TARGET_ESHUTDOWN,
568 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
569 [ETIMEDOUT] = TARGET_ETIMEDOUT,
570 [ECONNREFUSED] = TARGET_ECONNREFUSED,
571 [EHOSTDOWN] = TARGET_EHOSTDOWN,
572 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
573 [EALREADY] = TARGET_EALREADY,
574 [EINPROGRESS] = TARGET_EINPROGRESS,
575 [ESTALE] = TARGET_ESTALE,
576 [ECANCELED] = TARGET_ECANCELED,
577 [ENOMEDIUM] = TARGET_ENOMEDIUM,
578 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
579 #ifdef ENOKEY
580 [ENOKEY] = TARGET_ENOKEY,
581 #endif
582 #ifdef EKEYEXPIRED
583 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
584 #endif
585 #ifdef EKEYREVOKED
586 [EKEYREVOKED] = TARGET_EKEYREVOKED,
587 #endif
588 #ifdef EKEYREJECTED
589 [EKEYREJECTED] = TARGET_EKEYREJECTED,
590 #endif
591 #ifdef EOWNERDEAD
592 [EOWNERDEAD] = TARGET_EOWNERDEAD,
593 #endif
594 #ifdef ENOTRECOVERABLE
595 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
596 #endif
599 static inline int host_to_target_errno(int err)
601 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
602 host_to_target_errno_table[err]) {
603 return host_to_target_errno_table[err];
605 return err;
608 static inline int target_to_host_errno(int err)
610 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
611 target_to_host_errno_table[err]) {
612 return target_to_host_errno_table[err];
614 return err;
617 static inline abi_long get_errno(abi_long ret)
619 if (ret == -1)
620 return -host_to_target_errno(errno);
621 else
622 return ret;
625 static inline int is_error(abi_long ret)
627 return (abi_ulong)ret >= (abi_ulong)(-4096);
630 const char *target_strerror(int err)
632 if (err == TARGET_ERESTARTSYS) {
633 return "To be restarted";
635 if (err == TARGET_QEMU_ESIGRETURN) {
636 return "Successful exit from sigreturn";
639 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
640 return NULL;
642 return strerror(target_to_host_errno(err));
645 #define safe_syscall0(type, name) \
646 static type safe_##name(void) \
648 return safe_syscall(__NR_##name); \
651 #define safe_syscall1(type, name, type1, arg1) \
652 static type safe_##name(type1 arg1) \
654 return safe_syscall(__NR_##name, arg1); \
657 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
658 static type safe_##name(type1 arg1, type2 arg2) \
660 return safe_syscall(__NR_##name, arg1, arg2); \
663 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
664 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
666 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
669 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
670 type4, arg4) \
671 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
673 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
676 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
677 type4, arg4, type5, arg5) \
678 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
679 type5 arg5) \
681 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
684 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
685 type4, arg4, type5, arg5, type6, arg6) \
686 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
687 type5 arg5, type6 arg6) \
689 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
692 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
693 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
694 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
695 int, flags, mode_t, mode)
696 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
697 struct rusage *, rusage)
698 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
699 int, options, struct rusage *, rusage)
700 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
701 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
702 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
703 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
704 struct timespec *, tsp, const sigset_t *, sigmask,
705 size_t, sigsetsize)
706 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
707 int, maxevents, int, timeout, const sigset_t *, sigmask,
708 size_t, sigsetsize)
709 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
710 const struct timespec *,timeout,int *,uaddr2,int,val3)
711 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
712 safe_syscall2(int, kill, pid_t, pid, int, sig)
713 safe_syscall2(int, tkill, int, tid, int, sig)
714 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
715 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
716 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
717 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
718 socklen_t, addrlen)
719 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
720 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
721 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
722 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
723 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
724 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
725 safe_syscall2(int, flock, int, fd, int, operation)
726 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
727 const struct timespec *, uts, size_t, sigsetsize)
728 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
729 int, flags)
730 safe_syscall2(int, nanosleep, const struct timespec *, req,
731 struct timespec *, rem)
732 #ifdef TARGET_NR_clock_nanosleep
733 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
734 const struct timespec *, req, struct timespec *, rem)
735 #endif
736 #ifdef __NR_msgsnd
737 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
738 int, flags)
739 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
740 long, msgtype, int, flags)
741 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
742 unsigned, nsops, const struct timespec *, timeout)
743 #else
744 /* This host kernel architecture uses a single ipc syscall; fake up
745 * wrappers for the sub-operations to hide this implementation detail.
746 * Annoyingly we can't include linux/ipc.h to get the constant definitions
747 * for the call parameter because some structs in there conflict with the
748 * sys/ipc.h ones. So we just define them here, and rely on them being
749 * the same for all host architectures.
751 #define Q_SEMTIMEDOP 4
752 #define Q_MSGSND 11
753 #define Q_MSGRCV 12
754 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
756 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
757 void *, ptr, long, fifth)
758 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
760 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
762 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
764 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
766 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
767 const struct timespec *timeout)
769 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
770 (long)timeout);
772 #endif
773 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
774 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
775 size_t, len, unsigned, prio, const struct timespec *, timeout)
776 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
777 size_t, len, unsigned *, prio, const struct timespec *, timeout)
778 #endif
779 /* We do ioctl like this rather than via safe_syscall3 to preserve the
780 * "third argument might be integer or pointer or not present" behaviour of
781 * the libc function.
783 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
784 /* Similarly for fcntl. Note that callers must always:
785 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
786 * use the flock64 struct rather than unsuffixed flock
787 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
789 #ifdef __NR_fcntl64
790 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
791 #else
792 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
793 #endif
795 static inline int host_to_target_sock_type(int host_type)
797 int target_type;
799 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
800 case SOCK_DGRAM:
801 target_type = TARGET_SOCK_DGRAM;
802 break;
803 case SOCK_STREAM:
804 target_type = TARGET_SOCK_STREAM;
805 break;
806 default:
807 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
808 break;
811 #if defined(SOCK_CLOEXEC)
812 if (host_type & SOCK_CLOEXEC) {
813 target_type |= TARGET_SOCK_CLOEXEC;
815 #endif
817 #if defined(SOCK_NONBLOCK)
818 if (host_type & SOCK_NONBLOCK) {
819 target_type |= TARGET_SOCK_NONBLOCK;
821 #endif
823 return target_type;
826 static abi_ulong target_brk;
827 static abi_ulong target_original_brk;
828 static abi_ulong brk_page;
830 void target_set_brk(abi_ulong new_brk)
832 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
833 brk_page = HOST_PAGE_ALIGN(target_brk);
836 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
837 #define DEBUGF_BRK(message, args...)
839 /* do_brk() must return target values and target errnos. */
840 abi_long do_brk(abi_ulong new_brk)
842 abi_long mapped_addr;
843 abi_ulong new_alloc_size;
845 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
847 if (!new_brk) {
848 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
849 return target_brk;
851 if (new_brk < target_original_brk) {
852 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
853 target_brk);
854 return target_brk;
857 /* If the new brk is less than the highest page reserved to the
858 * target heap allocation, set it and we're almost done... */
859 if (new_brk <= brk_page) {
860 /* Heap contents are initialized to zero, as for anonymous
861 * mapped pages. */
862 if (new_brk > target_brk) {
863 memset(g2h(target_brk), 0, new_brk - target_brk);
865 target_brk = new_brk;
866 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
867 return target_brk;
870 /* We need to allocate more memory after the brk... Note that
871 * we don't use MAP_FIXED because that will map over the top of
872 * any existing mapping (like the one with the host libc or qemu
873 * itself); instead we treat "mapped but at wrong address" as
874 * a failure and unmap again.
876 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
877 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
878 PROT_READ|PROT_WRITE,
879 MAP_ANON|MAP_PRIVATE, 0, 0));
881 if (mapped_addr == brk_page) {
882 /* Heap contents are initialized to zero, as for anonymous
883 * mapped pages. Technically the new pages are already
884 * initialized to zero since they *are* anonymous mapped
885 * pages, however we have to take care with the contents that
886 * come from the remaining part of the previous page: it may
887 * contains garbage data due to a previous heap usage (grown
888 * then shrunken). */
889 memset(g2h(target_brk), 0, brk_page - target_brk);
891 target_brk = new_brk;
892 brk_page = HOST_PAGE_ALIGN(target_brk);
893 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
894 target_brk);
895 return target_brk;
896 } else if (mapped_addr != -1) {
897 /* Mapped but at wrong address, meaning there wasn't actually
898 * enough space for this brk.
900 target_munmap(mapped_addr, new_alloc_size);
901 mapped_addr = -1;
902 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
904 else {
905 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
908 #if defined(TARGET_ALPHA)
909 /* We (partially) emulate OSF/1 on Alpha, which requires we
910 return a proper errno, not an unchanged brk value. */
911 return -TARGET_ENOMEM;
912 #endif
913 /* For everything else, return the previous break. */
914 return target_brk;
917 static inline abi_long copy_from_user_fdset(fd_set *fds,
918 abi_ulong target_fds_addr,
919 int n)
921 int i, nw, j, k;
922 abi_ulong b, *target_fds;
924 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
925 if (!(target_fds = lock_user(VERIFY_READ,
926 target_fds_addr,
927 sizeof(abi_ulong) * nw,
928 1)))
929 return -TARGET_EFAULT;
931 FD_ZERO(fds);
932 k = 0;
933 for (i = 0; i < nw; i++) {
934 /* grab the abi_ulong */
935 __get_user(b, &target_fds[i]);
936 for (j = 0; j < TARGET_ABI_BITS; j++) {
937 /* check the bit inside the abi_ulong */
938 if ((b >> j) & 1)
939 FD_SET(k, fds);
940 k++;
944 unlock_user(target_fds, target_fds_addr, 0);
946 return 0;
949 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
950 abi_ulong target_fds_addr,
951 int n)
953 if (target_fds_addr) {
954 if (copy_from_user_fdset(fds, target_fds_addr, n))
955 return -TARGET_EFAULT;
956 *fds_ptr = fds;
957 } else {
958 *fds_ptr = NULL;
960 return 0;
963 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
964 const fd_set *fds,
965 int n)
967 int i, nw, j, k;
968 abi_long v;
969 abi_ulong *target_fds;
971 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
972 if (!(target_fds = lock_user(VERIFY_WRITE,
973 target_fds_addr,
974 sizeof(abi_ulong) * nw,
975 0)))
976 return -TARGET_EFAULT;
978 k = 0;
979 for (i = 0; i < nw; i++) {
980 v = 0;
981 for (j = 0; j < TARGET_ABI_BITS; j++) {
982 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
983 k++;
985 __put_user(v, &target_fds[i]);
988 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
990 return 0;
993 #if defined(__alpha__)
994 #define HOST_HZ 1024
995 #else
996 #define HOST_HZ 100
997 #endif
999 static inline abi_long host_to_target_clock_t(long ticks)
1001 #if HOST_HZ == TARGET_HZ
1002 return ticks;
1003 #else
1004 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1005 #endif
1008 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1009 const struct rusage *rusage)
1011 struct target_rusage *target_rusage;
1013 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1014 return -TARGET_EFAULT;
1015 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1016 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1017 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1018 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1019 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1020 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1021 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1022 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1023 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1024 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1025 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1026 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1027 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1028 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1029 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1030 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1031 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1032 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1033 unlock_user_struct(target_rusage, target_addr, 1);
1035 return 0;
1038 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1040 abi_ulong target_rlim_swap;
1041 rlim_t result;
1043 target_rlim_swap = tswapal(target_rlim);
1044 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1045 return RLIM_INFINITY;
1047 result = target_rlim_swap;
1048 if (target_rlim_swap != (rlim_t)result)
1049 return RLIM_INFINITY;
1051 return result;
1054 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1056 abi_ulong target_rlim_swap;
1057 abi_ulong result;
1059 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1060 target_rlim_swap = TARGET_RLIM_INFINITY;
1061 else
1062 target_rlim_swap = rlim;
1063 result = tswapal(target_rlim_swap);
1065 return result;
1068 static inline int target_to_host_resource(int code)
1070 switch (code) {
1071 case TARGET_RLIMIT_AS:
1072 return RLIMIT_AS;
1073 case TARGET_RLIMIT_CORE:
1074 return RLIMIT_CORE;
1075 case TARGET_RLIMIT_CPU:
1076 return RLIMIT_CPU;
1077 case TARGET_RLIMIT_DATA:
1078 return RLIMIT_DATA;
1079 case TARGET_RLIMIT_FSIZE:
1080 return RLIMIT_FSIZE;
1081 case TARGET_RLIMIT_LOCKS:
1082 return RLIMIT_LOCKS;
1083 case TARGET_RLIMIT_MEMLOCK:
1084 return RLIMIT_MEMLOCK;
1085 case TARGET_RLIMIT_MSGQUEUE:
1086 return RLIMIT_MSGQUEUE;
1087 case TARGET_RLIMIT_NICE:
1088 return RLIMIT_NICE;
1089 case TARGET_RLIMIT_NOFILE:
1090 return RLIMIT_NOFILE;
1091 case TARGET_RLIMIT_NPROC:
1092 return RLIMIT_NPROC;
1093 case TARGET_RLIMIT_RSS:
1094 return RLIMIT_RSS;
1095 case TARGET_RLIMIT_RTPRIO:
1096 return RLIMIT_RTPRIO;
1097 case TARGET_RLIMIT_SIGPENDING:
1098 return RLIMIT_SIGPENDING;
1099 case TARGET_RLIMIT_STACK:
1100 return RLIMIT_STACK;
1101 default:
1102 return code;
1106 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1107 abi_ulong target_tv_addr)
1109 struct target_timeval *target_tv;
1111 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1112 return -TARGET_EFAULT;
1114 __get_user(tv->tv_sec, &target_tv->tv_sec);
1115 __get_user(tv->tv_usec, &target_tv->tv_usec);
1117 unlock_user_struct(target_tv, target_tv_addr, 0);
1119 return 0;
1122 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1123 const struct timeval *tv)
1125 struct target_timeval *target_tv;
1127 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1128 return -TARGET_EFAULT;
1130 __put_user(tv->tv_sec, &target_tv->tv_sec);
1131 __put_user(tv->tv_usec, &target_tv->tv_usec);
1133 unlock_user_struct(target_tv, target_tv_addr, 1);
1135 return 0;
1138 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1139 abi_ulong target_tz_addr)
1141 struct target_timezone *target_tz;
1143 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1144 return -TARGET_EFAULT;
1147 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1148 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1150 unlock_user_struct(target_tz, target_tz_addr, 0);
1152 return 0;
1155 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1156 #include <mqueue.h>
1158 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1159 abi_ulong target_mq_attr_addr)
1161 struct target_mq_attr *target_mq_attr;
1163 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1164 target_mq_attr_addr, 1))
1165 return -TARGET_EFAULT;
1167 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1168 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1169 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1170 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1172 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1174 return 0;
1177 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1178 const struct mq_attr *attr)
1180 struct target_mq_attr *target_mq_attr;
1182 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1183 target_mq_attr_addr, 0))
1184 return -TARGET_EFAULT;
1186 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1187 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1188 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1189 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1191 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1193 return 0;
1195 #endif
1197 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1198 /* do_select() must return target values and target errnos. */
1199 static abi_long do_select(int n,
1200 abi_ulong rfd_addr, abi_ulong wfd_addr,
1201 abi_ulong efd_addr, abi_ulong target_tv_addr)
1203 fd_set rfds, wfds, efds;
1204 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1205 struct timeval tv;
1206 struct timespec ts, *ts_ptr;
1207 abi_long ret;
1209 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1210 if (ret) {
1211 return ret;
1213 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1214 if (ret) {
1215 return ret;
1217 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1218 if (ret) {
1219 return ret;
1222 if (target_tv_addr) {
1223 if (copy_from_user_timeval(&tv, target_tv_addr))
1224 return -TARGET_EFAULT;
1225 ts.tv_sec = tv.tv_sec;
1226 ts.tv_nsec = tv.tv_usec * 1000;
1227 ts_ptr = &ts;
1228 } else {
1229 ts_ptr = NULL;
1232 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1233 ts_ptr, NULL));
1235 if (!is_error(ret)) {
1236 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1237 return -TARGET_EFAULT;
1238 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1239 return -TARGET_EFAULT;
1240 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1241 return -TARGET_EFAULT;
1243 if (target_tv_addr) {
1244 tv.tv_sec = ts.tv_sec;
1245 tv.tv_usec = ts.tv_nsec / 1000;
1246 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1247 return -TARGET_EFAULT;
1252 return ret;
1254 #endif
1256 static abi_long do_pipe2(int host_pipe[], int flags)
1258 #ifdef CONFIG_PIPE2
1259 return pipe2(host_pipe, flags);
1260 #else
1261 return -ENOSYS;
1262 #endif
1265 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1266 int flags, int is_pipe2)
1268 int host_pipe[2];
1269 abi_long ret;
1270 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1272 if (is_error(ret))
1273 return get_errno(ret);
1275 /* Several targets have special calling conventions for the original
1276 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1277 if (!is_pipe2) {
1278 #if defined(TARGET_ALPHA)
1279 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1280 return host_pipe[0];
1281 #elif defined(TARGET_MIPS)
1282 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1283 return host_pipe[0];
1284 #elif defined(TARGET_SH4)
1285 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1286 return host_pipe[0];
1287 #elif defined(TARGET_SPARC)
1288 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1289 return host_pipe[0];
1290 #endif
1293 if (put_user_s32(host_pipe[0], pipedes)
1294 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1295 return -TARGET_EFAULT;
1296 return get_errno(ret);
1299 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1300 abi_ulong target_addr,
1301 socklen_t len)
1303 struct target_ip_mreqn *target_smreqn;
1305 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1306 if (!target_smreqn)
1307 return -TARGET_EFAULT;
1308 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1309 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1310 if (len == sizeof(struct target_ip_mreqn))
1311 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1312 unlock_user(target_smreqn, target_addr, 0);
1314 return 0;
1317 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1318 abi_ulong target_addr,
1319 socklen_t len)
1321 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1322 sa_family_t sa_family;
1323 struct target_sockaddr *target_saddr;
1325 if (fd_trans_target_to_host_addr(fd)) {
1326 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1329 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1330 if (!target_saddr)
1331 return -TARGET_EFAULT;
1333 sa_family = tswap16(target_saddr->sa_family);
1335 /* Oops. The caller might send a incomplete sun_path; sun_path
1336 * must be terminated by \0 (see the manual page), but
1337 * unfortunately it is quite common to specify sockaddr_un
1338 * length as "strlen(x->sun_path)" while it should be
1339 * "strlen(...) + 1". We'll fix that here if needed.
1340 * Linux kernel has a similar feature.
1343 if (sa_family == AF_UNIX) {
1344 if (len < unix_maxlen && len > 0) {
1345 char *cp = (char*)target_saddr;
1347 if ( cp[len-1] && !cp[len] )
1348 len++;
1350 if (len > unix_maxlen)
1351 len = unix_maxlen;
1354 memcpy(addr, target_saddr, len);
1355 addr->sa_family = sa_family;
1356 if (sa_family == AF_NETLINK) {
1357 struct sockaddr_nl *nladdr;
1359 nladdr = (struct sockaddr_nl *)addr;
1360 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1361 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1362 } else if (sa_family == AF_PACKET) {
1363 struct target_sockaddr_ll *lladdr;
1365 lladdr = (struct target_sockaddr_ll *)addr;
1366 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1367 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1369 unlock_user(target_saddr, target_addr, 0);
1371 return 0;
1374 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1375 struct sockaddr *addr,
1376 socklen_t len)
1378 struct target_sockaddr *target_saddr;
1380 if (len == 0) {
1381 return 0;
1384 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1385 if (!target_saddr)
1386 return -TARGET_EFAULT;
1387 memcpy(target_saddr, addr, len);
1388 if (len >= offsetof(struct target_sockaddr, sa_family) +
1389 sizeof(target_saddr->sa_family)) {
1390 target_saddr->sa_family = tswap16(addr->sa_family);
1392 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1393 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1394 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1395 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1396 } else if (addr->sa_family == AF_PACKET) {
1397 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1398 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1399 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1401 unlock_user(target_saddr, target_addr, len);
1403 return 0;
1406 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1407 struct target_msghdr *target_msgh)
1409 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1410 abi_long msg_controllen;
1411 abi_ulong target_cmsg_addr;
1412 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1413 socklen_t space = 0;
1415 msg_controllen = tswapal(target_msgh->msg_controllen);
1416 if (msg_controllen < sizeof (struct target_cmsghdr))
1417 goto the_end;
1418 target_cmsg_addr = tswapal(target_msgh->msg_control);
1419 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1420 target_cmsg_start = target_cmsg;
1421 if (!target_cmsg)
1422 return -TARGET_EFAULT;
1424 while (cmsg && target_cmsg) {
1425 void *data = CMSG_DATA(cmsg);
1426 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1428 int len = tswapal(target_cmsg->cmsg_len)
1429 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1431 space += CMSG_SPACE(len);
1432 if (space > msgh->msg_controllen) {
1433 space -= CMSG_SPACE(len);
1434 /* This is a QEMU bug, since we allocated the payload
1435 * area ourselves (unlike overflow in host-to-target
1436 * conversion, which is just the guest giving us a buffer
1437 * that's too small). It can't happen for the payload types
1438 * we currently support; if it becomes an issue in future
1439 * we would need to improve our allocation strategy to
1440 * something more intelligent than "twice the size of the
1441 * target buffer we're reading from".
1443 gemu_log("Host cmsg overflow\n");
1444 break;
1447 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1448 cmsg->cmsg_level = SOL_SOCKET;
1449 } else {
1450 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1452 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1453 cmsg->cmsg_len = CMSG_LEN(len);
1455 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1456 int *fd = (int *)data;
1457 int *target_fd = (int *)target_data;
1458 int i, numfds = len / sizeof(int);
1460 for (i = 0; i < numfds; i++) {
1461 __get_user(fd[i], target_fd + i);
1463 } else if (cmsg->cmsg_level == SOL_SOCKET
1464 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1465 struct ucred *cred = (struct ucred *)data;
1466 struct target_ucred *target_cred =
1467 (struct target_ucred *)target_data;
1469 __get_user(cred->pid, &target_cred->pid);
1470 __get_user(cred->uid, &target_cred->uid);
1471 __get_user(cred->gid, &target_cred->gid);
1472 } else {
1473 gemu_log("Unsupported ancillary data: %d/%d\n",
1474 cmsg->cmsg_level, cmsg->cmsg_type);
1475 memcpy(data, target_data, len);
1478 cmsg = CMSG_NXTHDR(msgh, cmsg);
1479 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1480 target_cmsg_start);
1482 unlock_user(target_cmsg, target_cmsg_addr, 0);
1483 the_end:
1484 msgh->msg_controllen = space;
1485 return 0;
1488 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1489 struct msghdr *msgh)
1491 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1492 abi_long msg_controllen;
1493 abi_ulong target_cmsg_addr;
1494 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1495 socklen_t space = 0;
1497 msg_controllen = tswapal(target_msgh->msg_controllen);
1498 if (msg_controllen < sizeof (struct target_cmsghdr))
1499 goto the_end;
1500 target_cmsg_addr = tswapal(target_msgh->msg_control);
1501 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1502 target_cmsg_start = target_cmsg;
1503 if (!target_cmsg)
1504 return -TARGET_EFAULT;
1506 while (cmsg && target_cmsg) {
1507 void *data = CMSG_DATA(cmsg);
1508 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1510 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1511 int tgt_len, tgt_space;
1513 /* We never copy a half-header but may copy half-data;
1514 * this is Linux's behaviour in put_cmsg(). Note that
1515 * truncation here is a guest problem (which we report
1516 * to the guest via the CTRUNC bit), unlike truncation
1517 * in target_to_host_cmsg, which is a QEMU bug.
1519 if (msg_controllen < sizeof(struct cmsghdr)) {
1520 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1521 break;
1524 if (cmsg->cmsg_level == SOL_SOCKET) {
1525 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1526 } else {
1527 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1529 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1531 tgt_len = TARGET_CMSG_LEN(len);
1533 /* Payload types which need a different size of payload on
1534 * the target must adjust tgt_len here.
1536 switch (cmsg->cmsg_level) {
1537 case SOL_SOCKET:
1538 switch (cmsg->cmsg_type) {
1539 case SO_TIMESTAMP:
1540 tgt_len = sizeof(struct target_timeval);
1541 break;
1542 default:
1543 break;
1545 default:
1546 break;
1549 if (msg_controllen < tgt_len) {
1550 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1551 tgt_len = msg_controllen;
1554 /* We must now copy-and-convert len bytes of payload
1555 * into tgt_len bytes of destination space. Bear in mind
1556 * that in both source and destination we may be dealing
1557 * with a truncated value!
1559 switch (cmsg->cmsg_level) {
1560 case SOL_SOCKET:
1561 switch (cmsg->cmsg_type) {
1562 case SCM_RIGHTS:
1564 int *fd = (int *)data;
1565 int *target_fd = (int *)target_data;
1566 int i, numfds = tgt_len / sizeof(int);
1568 for (i = 0; i < numfds; i++) {
1569 __put_user(fd[i], target_fd + i);
1571 break;
1573 case SO_TIMESTAMP:
1575 struct timeval *tv = (struct timeval *)data;
1576 struct target_timeval *target_tv =
1577 (struct target_timeval *)target_data;
1579 if (len != sizeof(struct timeval) ||
1580 tgt_len != sizeof(struct target_timeval)) {
1581 goto unimplemented;
1584 /* copy struct timeval to target */
1585 __put_user(tv->tv_sec, &target_tv->tv_sec);
1586 __put_user(tv->tv_usec, &target_tv->tv_usec);
1587 break;
1589 case SCM_CREDENTIALS:
1591 struct ucred *cred = (struct ucred *)data;
1592 struct target_ucred *target_cred =
1593 (struct target_ucred *)target_data;
1595 __put_user(cred->pid, &target_cred->pid);
1596 __put_user(cred->uid, &target_cred->uid);
1597 __put_user(cred->gid, &target_cred->gid);
1598 break;
1600 default:
1601 goto unimplemented;
1603 break;
1605 default:
1606 unimplemented:
1607 gemu_log("Unsupported ancillary data: %d/%d\n",
1608 cmsg->cmsg_level, cmsg->cmsg_type);
1609 memcpy(target_data, data, MIN(len, tgt_len));
1610 if (tgt_len > len) {
1611 memset(target_data + len, 0, tgt_len - len);
1615 target_cmsg->cmsg_len = tswapal(tgt_len);
1616 tgt_space = TARGET_CMSG_SPACE(len);
1617 if (msg_controllen < tgt_space) {
1618 tgt_space = msg_controllen;
1620 msg_controllen -= tgt_space;
1621 space += tgt_space;
1622 cmsg = CMSG_NXTHDR(msgh, cmsg);
1623 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1624 target_cmsg_start);
1626 unlock_user(target_cmsg, target_cmsg_addr, space);
1627 the_end:
1628 target_msgh->msg_controllen = tswapal(space);
1629 return 0;
1632 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1634 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1635 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1636 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1637 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1638 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1641 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1642 size_t len,
1643 abi_long (*host_to_target_nlmsg)
1644 (struct nlmsghdr *))
1646 uint32_t nlmsg_len;
1647 abi_long ret;
1649 while (len > sizeof(struct nlmsghdr)) {
1651 nlmsg_len = nlh->nlmsg_len;
1652 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1653 nlmsg_len > len) {
1654 break;
1657 switch (nlh->nlmsg_type) {
1658 case NLMSG_DONE:
1659 tswap_nlmsghdr(nlh);
1660 return 0;
1661 case NLMSG_NOOP:
1662 break;
1663 case NLMSG_ERROR:
1665 struct nlmsgerr *e = NLMSG_DATA(nlh);
1666 e->error = tswap32(e->error);
1667 tswap_nlmsghdr(&e->msg);
1668 tswap_nlmsghdr(nlh);
1669 return 0;
1671 default:
1672 ret = host_to_target_nlmsg(nlh);
1673 if (ret < 0) {
1674 tswap_nlmsghdr(nlh);
1675 return ret;
1677 break;
1679 tswap_nlmsghdr(nlh);
1680 len -= NLMSG_ALIGN(nlmsg_len);
1681 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1683 return 0;
1686 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1687 size_t len,
1688 abi_long (*target_to_host_nlmsg)
1689 (struct nlmsghdr *))
1691 int ret;
1693 while (len > sizeof(struct nlmsghdr)) {
1694 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1695 tswap32(nlh->nlmsg_len) > len) {
1696 break;
1698 tswap_nlmsghdr(nlh);
1699 switch (nlh->nlmsg_type) {
1700 case NLMSG_DONE:
1701 return 0;
1702 case NLMSG_NOOP:
1703 break;
1704 case NLMSG_ERROR:
1706 struct nlmsgerr *e = NLMSG_DATA(nlh);
1707 e->error = tswap32(e->error);
1708 tswap_nlmsghdr(&e->msg);
1709 return 0;
1711 default:
1712 ret = target_to_host_nlmsg(nlh);
1713 if (ret < 0) {
1714 return ret;
1717 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1718 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1720 return 0;
1723 #ifdef CONFIG_RTNETLINK
1724 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1725 size_t len, void *context,
1726 abi_long (*host_to_target_nlattr)
1727 (struct nlattr *,
1728 void *context))
1730 unsigned short nla_len;
1731 abi_long ret;
1733 while (len > sizeof(struct nlattr)) {
1734 nla_len = nlattr->nla_len;
1735 if (nla_len < sizeof(struct nlattr) ||
1736 nla_len > len) {
1737 break;
1739 ret = host_to_target_nlattr(nlattr, context);
1740 nlattr->nla_len = tswap16(nlattr->nla_len);
1741 nlattr->nla_type = tswap16(nlattr->nla_type);
1742 if (ret < 0) {
1743 return ret;
1745 len -= NLA_ALIGN(nla_len);
1746 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1748 return 0;
1751 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1752 size_t len,
1753 abi_long (*host_to_target_rtattr)
1754 (struct rtattr *))
1756 unsigned short rta_len;
1757 abi_long ret;
1759 while (len > sizeof(struct rtattr)) {
1760 rta_len = rtattr->rta_len;
1761 if (rta_len < sizeof(struct rtattr) ||
1762 rta_len > len) {
1763 break;
1765 ret = host_to_target_rtattr(rtattr);
1766 rtattr->rta_len = tswap16(rtattr->rta_len);
1767 rtattr->rta_type = tswap16(rtattr->rta_type);
1768 if (ret < 0) {
1769 return ret;
1771 len -= RTA_ALIGN(rta_len);
1772 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1774 return 0;
1777 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1779 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1780 void *context)
1782 uint16_t *u16;
1783 uint32_t *u32;
1784 uint64_t *u64;
1786 switch (nlattr->nla_type) {
1787 /* no data */
1788 case IFLA_BR_FDB_FLUSH:
1789 break;
1790 /* binary */
1791 case IFLA_BR_GROUP_ADDR:
1792 break;
1793 /* uint8_t */
1794 case IFLA_BR_VLAN_FILTERING:
1795 case IFLA_BR_TOPOLOGY_CHANGE:
1796 case IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
1797 case IFLA_BR_MCAST_ROUTER:
1798 case IFLA_BR_MCAST_SNOOPING:
1799 case IFLA_BR_MCAST_QUERY_USE_IFADDR:
1800 case IFLA_BR_MCAST_QUERIER:
1801 case IFLA_BR_NF_CALL_IPTABLES:
1802 case IFLA_BR_NF_CALL_IP6TABLES:
1803 case IFLA_BR_NF_CALL_ARPTABLES:
1804 break;
1805 /* uint16_t */
1806 case IFLA_BR_PRIORITY:
1807 case IFLA_BR_VLAN_PROTOCOL:
1808 case IFLA_BR_GROUP_FWD_MASK:
1809 case IFLA_BR_ROOT_PORT:
1810 case IFLA_BR_VLAN_DEFAULT_PVID:
1811 u16 = NLA_DATA(nlattr);
1812 *u16 = tswap16(*u16);
1813 break;
1814 /* uint32_t */
1815 case IFLA_BR_FORWARD_DELAY:
1816 case IFLA_BR_HELLO_TIME:
1817 case IFLA_BR_MAX_AGE:
1818 case IFLA_BR_AGEING_TIME:
1819 case IFLA_BR_STP_STATE:
1820 case IFLA_BR_ROOT_PATH_COST:
1821 case IFLA_BR_MCAST_HASH_ELASTICITY:
1822 case IFLA_BR_MCAST_HASH_MAX:
1823 case IFLA_BR_MCAST_LAST_MEMBER_CNT:
1824 case IFLA_BR_MCAST_STARTUP_QUERY_CNT:
1825 u32 = NLA_DATA(nlattr);
1826 *u32 = tswap32(*u32);
1827 break;
1828 /* uint64_t */
1829 case IFLA_BR_HELLO_TIMER:
1830 case IFLA_BR_TCN_TIMER:
1831 case IFLA_BR_GC_TIMER:
1832 case IFLA_BR_TOPOLOGY_CHANGE_TIMER:
1833 case IFLA_BR_MCAST_LAST_MEMBER_INTVL:
1834 case IFLA_BR_MCAST_MEMBERSHIP_INTVL:
1835 case IFLA_BR_MCAST_QUERIER_INTVL:
1836 case IFLA_BR_MCAST_QUERY_INTVL:
1837 case IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
1838 case IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
1839 u64 = NLA_DATA(nlattr);
1840 *u64 = tswap64(*u64);
1841 break;
1842 /* ifla_bridge_id: uin8_t[] */
1843 case IFLA_BR_ROOT_ID:
1844 case IFLA_BR_BRIDGE_ID:
1845 break;
1846 default:
1847 gemu_log("Unknown IFLA_BR type %d\n", nlattr->nla_type);
1848 break;
1850 return 0;
1853 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
1854 void *context)
1856 uint16_t *u16;
1857 uint32_t *u32;
1858 uint64_t *u64;
1860 switch (nlattr->nla_type) {
1861 /* uint8_t */
1862 case IFLA_BRPORT_STATE:
1863 case IFLA_BRPORT_MODE:
1864 case IFLA_BRPORT_GUARD:
1865 case IFLA_BRPORT_PROTECT:
1866 case IFLA_BRPORT_FAST_LEAVE:
1867 case IFLA_BRPORT_LEARNING:
1868 case IFLA_BRPORT_UNICAST_FLOOD:
1869 case IFLA_BRPORT_PROXYARP:
1870 case IFLA_BRPORT_LEARNING_SYNC:
1871 case IFLA_BRPORT_PROXYARP_WIFI:
1872 case IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
1873 case IFLA_BRPORT_CONFIG_PENDING:
1874 case IFLA_BRPORT_MULTICAST_ROUTER:
1875 break;
1876 /* uint16_t */
1877 case IFLA_BRPORT_PRIORITY:
1878 case IFLA_BRPORT_DESIGNATED_PORT:
1879 case IFLA_BRPORT_DESIGNATED_COST:
1880 case IFLA_BRPORT_ID:
1881 case IFLA_BRPORT_NO:
1882 u16 = NLA_DATA(nlattr);
1883 *u16 = tswap16(*u16);
1884 break;
1885 /* uin32_t */
1886 case IFLA_BRPORT_COST:
1887 u32 = NLA_DATA(nlattr);
1888 *u32 = tswap32(*u32);
1889 break;
1890 /* uint64_t */
1891 case IFLA_BRPORT_MESSAGE_AGE_TIMER:
1892 case IFLA_BRPORT_FORWARD_DELAY_TIMER:
1893 case IFLA_BRPORT_HOLD_TIMER:
1894 u64 = NLA_DATA(nlattr);
1895 *u64 = tswap64(*u64);
1896 break;
1897 /* ifla_bridge_id: uint8_t[] */
1898 case IFLA_BRPORT_ROOT_ID:
1899 case IFLA_BRPORT_BRIDGE_ID:
1900 break;
1901 default:
1902 gemu_log("Unknown IFLA_BRPORT type %d\n", nlattr->nla_type);
1903 break;
1905 return 0;
1908 struct linkinfo_context {
1909 int len;
1910 char *name;
1911 int slave_len;
1912 char *slave_name;
1915 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
1916 void *context)
1918 struct linkinfo_context *li_context = context;
1920 switch (nlattr->nla_type) {
1921 /* string */
1922 case IFLA_INFO_KIND:
1923 li_context->name = NLA_DATA(nlattr);
1924 li_context->len = nlattr->nla_len - NLA_HDRLEN;
1925 break;
1926 case IFLA_INFO_SLAVE_KIND:
1927 li_context->slave_name = NLA_DATA(nlattr);
1928 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
1929 break;
1930 /* stats */
1931 case IFLA_INFO_XSTATS:
1932 /* FIXME: only used by CAN */
1933 break;
1934 /* nested */
1935 case IFLA_INFO_DATA:
1936 if (strncmp(li_context->name, "bridge",
1937 li_context->len) == 0) {
1938 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
1939 nlattr->nla_len,
1940 NULL,
1941 host_to_target_data_bridge_nlattr);
1942 } else {
1943 gemu_log("Unknown IFLA_INFO_KIND %s\n", li_context->name);
1945 break;
1946 case IFLA_INFO_SLAVE_DATA:
1947 if (strncmp(li_context->slave_name, "bridge",
1948 li_context->slave_len) == 0) {
1949 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
1950 nlattr->nla_len,
1951 NULL,
1952 host_to_target_slave_data_bridge_nlattr);
1953 } else {
1954 gemu_log("Unknown IFLA_INFO_SLAVE_KIND %s\n",
1955 li_context->slave_name);
1957 break;
1958 default:
1959 gemu_log("Unknown host IFLA_INFO type: %d\n", nlattr->nla_type);
1960 break;
1963 return 0;
1966 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
1967 void *context)
1969 uint32_t *u32;
1970 int i;
1972 switch (nlattr->nla_type) {
1973 case IFLA_INET_CONF:
1974 u32 = NLA_DATA(nlattr);
1975 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
1976 i++) {
1977 u32[i] = tswap32(u32[i]);
1979 break;
1980 default:
1981 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
1983 return 0;
1986 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
1987 void *context)
1989 uint32_t *u32;
1990 uint64_t *u64;
1991 struct ifla_cacheinfo *ci;
1992 int i;
1994 switch (nlattr->nla_type) {
1995 /* binaries */
1996 case IFLA_INET6_TOKEN:
1997 break;
1998 /* uint8_t */
1999 case IFLA_INET6_ADDR_GEN_MODE:
2000 break;
2001 /* uint32_t */
2002 case IFLA_INET6_FLAGS:
2003 u32 = NLA_DATA(nlattr);
2004 *u32 = tswap32(*u32);
2005 break;
2006 /* uint32_t[] */
2007 case IFLA_INET6_CONF:
2008 u32 = NLA_DATA(nlattr);
2009 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2010 i++) {
2011 u32[i] = tswap32(u32[i]);
2013 break;
2014 /* ifla_cacheinfo */
2015 case IFLA_INET6_CACHEINFO:
2016 ci = NLA_DATA(nlattr);
2017 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2018 ci->tstamp = tswap32(ci->tstamp);
2019 ci->reachable_time = tswap32(ci->reachable_time);
2020 ci->retrans_time = tswap32(ci->retrans_time);
2021 break;
2022 /* uint64_t[] */
2023 case IFLA_INET6_STATS:
2024 case IFLA_INET6_ICMP6STATS:
2025 u64 = NLA_DATA(nlattr);
2026 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2027 i++) {
2028 u64[i] = tswap64(u64[i]);
2030 break;
2031 default:
2032 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2034 return 0;
2037 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2038 void *context)
2040 switch (nlattr->nla_type) {
2041 case AF_INET:
2042 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2043 NULL,
2044 host_to_target_data_inet_nlattr);
2045 case AF_INET6:
2046 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2047 NULL,
2048 host_to_target_data_inet6_nlattr);
2049 default:
2050 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2051 break;
2053 return 0;
2056 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2058 uint32_t *u32;
2059 struct rtnl_link_stats *st;
2060 struct rtnl_link_stats64 *st64;
2061 struct rtnl_link_ifmap *map;
2062 struct linkinfo_context li_context;
2064 switch (rtattr->rta_type) {
2065 /* binary stream */
2066 case IFLA_ADDRESS:
2067 case IFLA_BROADCAST:
2068 /* string */
2069 case IFLA_IFNAME:
2070 case IFLA_QDISC:
2071 break;
2072 /* uin8_t */
2073 case IFLA_OPERSTATE:
2074 case IFLA_LINKMODE:
2075 case IFLA_CARRIER:
2076 case IFLA_PROTO_DOWN:
2077 break;
2078 /* uint32_t */
2079 case IFLA_MTU:
2080 case IFLA_LINK:
2081 case IFLA_WEIGHT:
2082 case IFLA_TXQLEN:
2083 case IFLA_CARRIER_CHANGES:
2084 case IFLA_NUM_RX_QUEUES:
2085 case IFLA_NUM_TX_QUEUES:
2086 case IFLA_PROMISCUITY:
2087 case IFLA_EXT_MASK:
2088 case IFLA_LINK_NETNSID:
2089 case IFLA_GROUP:
2090 case IFLA_MASTER:
2091 case IFLA_NUM_VF:
2092 u32 = RTA_DATA(rtattr);
2093 *u32 = tswap32(*u32);
2094 break;
2095 /* struct rtnl_link_stats */
2096 case IFLA_STATS:
2097 st = RTA_DATA(rtattr);
2098 st->rx_packets = tswap32(st->rx_packets);
2099 st->tx_packets = tswap32(st->tx_packets);
2100 st->rx_bytes = tswap32(st->rx_bytes);
2101 st->tx_bytes = tswap32(st->tx_bytes);
2102 st->rx_errors = tswap32(st->rx_errors);
2103 st->tx_errors = tswap32(st->tx_errors);
2104 st->rx_dropped = tswap32(st->rx_dropped);
2105 st->tx_dropped = tswap32(st->tx_dropped);
2106 st->multicast = tswap32(st->multicast);
2107 st->collisions = tswap32(st->collisions);
2109 /* detailed rx_errors: */
2110 st->rx_length_errors = tswap32(st->rx_length_errors);
2111 st->rx_over_errors = tswap32(st->rx_over_errors);
2112 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2113 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2114 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2115 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2117 /* detailed tx_errors */
2118 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2119 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2120 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2121 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2122 st->tx_window_errors = tswap32(st->tx_window_errors);
2124 /* for cslip etc */
2125 st->rx_compressed = tswap32(st->rx_compressed);
2126 st->tx_compressed = tswap32(st->tx_compressed);
2127 break;
2128 /* struct rtnl_link_stats64 */
2129 case IFLA_STATS64:
2130 st64 = RTA_DATA(rtattr);
2131 st64->rx_packets = tswap64(st64->rx_packets);
2132 st64->tx_packets = tswap64(st64->tx_packets);
2133 st64->rx_bytes = tswap64(st64->rx_bytes);
2134 st64->tx_bytes = tswap64(st64->tx_bytes);
2135 st64->rx_errors = tswap64(st64->rx_errors);
2136 st64->tx_errors = tswap64(st64->tx_errors);
2137 st64->rx_dropped = tswap64(st64->rx_dropped);
2138 st64->tx_dropped = tswap64(st64->tx_dropped);
2139 st64->multicast = tswap64(st64->multicast);
2140 st64->collisions = tswap64(st64->collisions);
2142 /* detailed rx_errors: */
2143 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2144 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2145 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2146 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2147 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2148 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2150 /* detailed tx_errors */
2151 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2152 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2153 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2154 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2155 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2157 /* for cslip etc */
2158 st64->rx_compressed = tswap64(st64->rx_compressed);
2159 st64->tx_compressed = tswap64(st64->tx_compressed);
2160 break;
2161 /* struct rtnl_link_ifmap */
2162 case IFLA_MAP:
2163 map = RTA_DATA(rtattr);
2164 map->mem_start = tswap64(map->mem_start);
2165 map->mem_end = tswap64(map->mem_end);
2166 map->base_addr = tswap64(map->base_addr);
2167 map->irq = tswap16(map->irq);
2168 break;
2169 /* nested */
2170 case IFLA_LINKINFO:
2171 memset(&li_context, 0, sizeof(li_context));
2172 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2173 &li_context,
2174 host_to_target_data_linkinfo_nlattr);
2175 case IFLA_AF_SPEC:
2176 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2177 NULL,
2178 host_to_target_data_spec_nlattr);
2179 default:
2180 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
2181 break;
2183 return 0;
2186 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2188 uint32_t *u32;
2189 struct ifa_cacheinfo *ci;
2191 switch (rtattr->rta_type) {
2192 /* binary: depends on family type */
2193 case IFA_ADDRESS:
2194 case IFA_LOCAL:
2195 break;
2196 /* string */
2197 case IFA_LABEL:
2198 break;
2199 /* u32 */
2200 case IFA_FLAGS:
2201 case IFA_BROADCAST:
2202 u32 = RTA_DATA(rtattr);
2203 *u32 = tswap32(*u32);
2204 break;
2205 /* struct ifa_cacheinfo */
2206 case IFA_CACHEINFO:
2207 ci = RTA_DATA(rtattr);
2208 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2209 ci->ifa_valid = tswap32(ci->ifa_valid);
2210 ci->cstamp = tswap32(ci->cstamp);
2211 ci->tstamp = tswap32(ci->tstamp);
2212 break;
2213 default:
2214 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2215 break;
2217 return 0;
2220 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2222 uint32_t *u32;
2223 switch (rtattr->rta_type) {
2224 /* binary: depends on family type */
2225 case RTA_GATEWAY:
2226 case RTA_DST:
2227 case RTA_PREFSRC:
2228 break;
2229 /* u32 */
2230 case RTA_PRIORITY:
2231 case RTA_TABLE:
2232 case RTA_OIF:
2233 u32 = RTA_DATA(rtattr);
2234 *u32 = tswap32(*u32);
2235 break;
2236 default:
2237 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2238 break;
2240 return 0;
2243 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2244 uint32_t rtattr_len)
2246 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2247 host_to_target_data_link_rtattr);
2250 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2251 uint32_t rtattr_len)
2253 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2254 host_to_target_data_addr_rtattr);
2257 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2258 uint32_t rtattr_len)
2260 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2261 host_to_target_data_route_rtattr);
2264 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2266 uint32_t nlmsg_len;
2267 struct ifinfomsg *ifi;
2268 struct ifaddrmsg *ifa;
2269 struct rtmsg *rtm;
2271 nlmsg_len = nlh->nlmsg_len;
2272 switch (nlh->nlmsg_type) {
2273 case RTM_NEWLINK:
2274 case RTM_DELLINK:
2275 case RTM_GETLINK:
2276 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2277 ifi = NLMSG_DATA(nlh);
2278 ifi->ifi_type = tswap16(ifi->ifi_type);
2279 ifi->ifi_index = tswap32(ifi->ifi_index);
2280 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2281 ifi->ifi_change = tswap32(ifi->ifi_change);
2282 host_to_target_link_rtattr(IFLA_RTA(ifi),
2283 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2285 break;
2286 case RTM_NEWADDR:
2287 case RTM_DELADDR:
2288 case RTM_GETADDR:
2289 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2290 ifa = NLMSG_DATA(nlh);
2291 ifa->ifa_index = tswap32(ifa->ifa_index);
2292 host_to_target_addr_rtattr(IFA_RTA(ifa),
2293 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2295 break;
2296 case RTM_NEWROUTE:
2297 case RTM_DELROUTE:
2298 case RTM_GETROUTE:
2299 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2300 rtm = NLMSG_DATA(nlh);
2301 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2302 host_to_target_route_rtattr(RTM_RTA(rtm),
2303 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2305 break;
2306 default:
2307 return -TARGET_EINVAL;
2309 return 0;
2312 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2313 size_t len)
2315 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2318 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2319 size_t len,
2320 abi_long (*target_to_host_rtattr)
2321 (struct rtattr *))
2323 abi_long ret;
2325 while (len >= sizeof(struct rtattr)) {
2326 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2327 tswap16(rtattr->rta_len) > len) {
2328 break;
2330 rtattr->rta_len = tswap16(rtattr->rta_len);
2331 rtattr->rta_type = tswap16(rtattr->rta_type);
2332 ret = target_to_host_rtattr(rtattr);
2333 if (ret < 0) {
2334 return ret;
2336 len -= RTA_ALIGN(rtattr->rta_len);
2337 rtattr = (struct rtattr *)(((char *)rtattr) +
2338 RTA_ALIGN(rtattr->rta_len));
2340 return 0;
2343 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2345 switch (rtattr->rta_type) {
2346 default:
2347 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
2348 break;
2350 return 0;
2353 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2355 switch (rtattr->rta_type) {
2356 /* binary: depends on family type */
2357 case IFA_LOCAL:
2358 case IFA_ADDRESS:
2359 break;
2360 default:
2361 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2362 break;
2364 return 0;
2367 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2369 uint32_t *u32;
2370 switch (rtattr->rta_type) {
2371 /* binary: depends on family type */
2372 case RTA_DST:
2373 case RTA_SRC:
2374 case RTA_GATEWAY:
2375 break;
2376 /* u32 */
2377 case RTA_OIF:
2378 u32 = RTA_DATA(rtattr);
2379 *u32 = tswap32(*u32);
2380 break;
2381 default:
2382 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2383 break;
2385 return 0;
2388 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2389 uint32_t rtattr_len)
2391 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2392 target_to_host_data_link_rtattr);
2395 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2396 uint32_t rtattr_len)
2398 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2399 target_to_host_data_addr_rtattr);
2402 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2403 uint32_t rtattr_len)
2405 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2406 target_to_host_data_route_rtattr);
2409 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2411 struct ifinfomsg *ifi;
2412 struct ifaddrmsg *ifa;
2413 struct rtmsg *rtm;
2415 switch (nlh->nlmsg_type) {
2416 case RTM_GETLINK:
2417 break;
2418 case RTM_NEWLINK:
2419 case RTM_DELLINK:
2420 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2421 ifi = NLMSG_DATA(nlh);
2422 ifi->ifi_type = tswap16(ifi->ifi_type);
2423 ifi->ifi_index = tswap32(ifi->ifi_index);
2424 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2425 ifi->ifi_change = tswap32(ifi->ifi_change);
2426 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2427 NLMSG_LENGTH(sizeof(*ifi)));
2429 break;
2430 case RTM_GETADDR:
2431 case RTM_NEWADDR:
2432 case RTM_DELADDR:
2433 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2434 ifa = NLMSG_DATA(nlh);
2435 ifa->ifa_index = tswap32(ifa->ifa_index);
2436 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2437 NLMSG_LENGTH(sizeof(*ifa)));
2439 break;
2440 case RTM_GETROUTE:
2441 break;
2442 case RTM_NEWROUTE:
2443 case RTM_DELROUTE:
2444 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2445 rtm = NLMSG_DATA(nlh);
2446 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2447 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2448 NLMSG_LENGTH(sizeof(*rtm)));
2450 break;
2451 default:
2452 return -TARGET_EOPNOTSUPP;
2454 return 0;
2457 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2459 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2461 #endif /* CONFIG_RTNETLINK */
2463 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2465 switch (nlh->nlmsg_type) {
2466 default:
2467 gemu_log("Unknown host audit message type %d\n",
2468 nlh->nlmsg_type);
2469 return -TARGET_EINVAL;
2471 return 0;
2474 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2475 size_t len)
2477 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2480 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2482 switch (nlh->nlmsg_type) {
2483 case AUDIT_USER:
2484 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2485 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2486 break;
2487 default:
2488 gemu_log("Unknown target audit message type %d\n",
2489 nlh->nlmsg_type);
2490 return -TARGET_EINVAL;
2493 return 0;
2496 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2498 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2501 /* do_setsockopt() Must return target values and target errnos. */
2502 static abi_long do_setsockopt(int sockfd, int level, int optname,
2503 abi_ulong optval_addr, socklen_t optlen)
2505 abi_long ret;
2506 int val;
2507 struct ip_mreqn *ip_mreq;
2508 struct ip_mreq_source *ip_mreq_source;
2510 switch(level) {
2511 case SOL_TCP:
2512 /* TCP options all take an 'int' value. */
2513 if (optlen < sizeof(uint32_t))
2514 return -TARGET_EINVAL;
2516 if (get_user_u32(val, optval_addr))
2517 return -TARGET_EFAULT;
2518 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2519 break;
2520 case SOL_IP:
2521 switch(optname) {
2522 case IP_TOS:
2523 case IP_TTL:
2524 case IP_HDRINCL:
2525 case IP_ROUTER_ALERT:
2526 case IP_RECVOPTS:
2527 case IP_RETOPTS:
2528 case IP_PKTINFO:
2529 case IP_MTU_DISCOVER:
2530 case IP_RECVERR:
2531 case IP_RECVTOS:
2532 #ifdef IP_FREEBIND
2533 case IP_FREEBIND:
2534 #endif
2535 case IP_MULTICAST_TTL:
2536 case IP_MULTICAST_LOOP:
2537 val = 0;
2538 if (optlen >= sizeof(uint32_t)) {
2539 if (get_user_u32(val, optval_addr))
2540 return -TARGET_EFAULT;
2541 } else if (optlen >= 1) {
2542 if (get_user_u8(val, optval_addr))
2543 return -TARGET_EFAULT;
2545 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2546 break;
2547 case IP_ADD_MEMBERSHIP:
2548 case IP_DROP_MEMBERSHIP:
2549 if (optlen < sizeof (struct target_ip_mreq) ||
2550 optlen > sizeof (struct target_ip_mreqn))
2551 return -TARGET_EINVAL;
2553 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2554 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2555 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2556 break;
2558 case IP_BLOCK_SOURCE:
2559 case IP_UNBLOCK_SOURCE:
2560 case IP_ADD_SOURCE_MEMBERSHIP:
2561 case IP_DROP_SOURCE_MEMBERSHIP:
2562 if (optlen != sizeof (struct target_ip_mreq_source))
2563 return -TARGET_EINVAL;
2565 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2566 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2567 unlock_user (ip_mreq_source, optval_addr, 0);
2568 break;
2570 default:
2571 goto unimplemented;
2573 break;
2574 case SOL_IPV6:
2575 switch (optname) {
2576 case IPV6_MTU_DISCOVER:
2577 case IPV6_MTU:
2578 case IPV6_V6ONLY:
2579 case IPV6_RECVPKTINFO:
2580 val = 0;
2581 if (optlen < sizeof(uint32_t)) {
2582 return -TARGET_EINVAL;
2584 if (get_user_u32(val, optval_addr)) {
2585 return -TARGET_EFAULT;
2587 ret = get_errno(setsockopt(sockfd, level, optname,
2588 &val, sizeof(val)));
2589 break;
2590 default:
2591 goto unimplemented;
2593 break;
2594 case SOL_RAW:
2595 switch (optname) {
2596 case ICMP_FILTER:
2597 /* struct icmp_filter takes an u32 value */
2598 if (optlen < sizeof(uint32_t)) {
2599 return -TARGET_EINVAL;
2602 if (get_user_u32(val, optval_addr)) {
2603 return -TARGET_EFAULT;
2605 ret = get_errno(setsockopt(sockfd, level, optname,
2606 &val, sizeof(val)));
2607 break;
2609 default:
2610 goto unimplemented;
2612 break;
2613 case TARGET_SOL_SOCKET:
2614 switch (optname) {
2615 case TARGET_SO_RCVTIMEO:
2617 struct timeval tv;
2619 optname = SO_RCVTIMEO;
2621 set_timeout:
2622 if (optlen != sizeof(struct target_timeval)) {
2623 return -TARGET_EINVAL;
2626 if (copy_from_user_timeval(&tv, optval_addr)) {
2627 return -TARGET_EFAULT;
2630 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2631 &tv, sizeof(tv)));
2632 return ret;
2634 case TARGET_SO_SNDTIMEO:
2635 optname = SO_SNDTIMEO;
2636 goto set_timeout;
2637 case TARGET_SO_ATTACH_FILTER:
2639 struct target_sock_fprog *tfprog;
2640 struct target_sock_filter *tfilter;
2641 struct sock_fprog fprog;
2642 struct sock_filter *filter;
2643 int i;
2645 if (optlen != sizeof(*tfprog)) {
2646 return -TARGET_EINVAL;
2648 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2649 return -TARGET_EFAULT;
2651 if (!lock_user_struct(VERIFY_READ, tfilter,
2652 tswapal(tfprog->filter), 0)) {
2653 unlock_user_struct(tfprog, optval_addr, 1);
2654 return -TARGET_EFAULT;
2657 fprog.len = tswap16(tfprog->len);
2658 filter = g_try_new(struct sock_filter, fprog.len);
2659 if (filter == NULL) {
2660 unlock_user_struct(tfilter, tfprog->filter, 1);
2661 unlock_user_struct(tfprog, optval_addr, 1);
2662 return -TARGET_ENOMEM;
2664 for (i = 0; i < fprog.len; i++) {
2665 filter[i].code = tswap16(tfilter[i].code);
2666 filter[i].jt = tfilter[i].jt;
2667 filter[i].jf = tfilter[i].jf;
2668 filter[i].k = tswap32(tfilter[i].k);
2670 fprog.filter = filter;
2672 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2673 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2674 g_free(filter);
2676 unlock_user_struct(tfilter, tfprog->filter, 1);
2677 unlock_user_struct(tfprog, optval_addr, 1);
2678 return ret;
2680 case TARGET_SO_BINDTODEVICE:
2682 char *dev_ifname, *addr_ifname;
2684 if (optlen > IFNAMSIZ - 1) {
2685 optlen = IFNAMSIZ - 1;
2687 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2688 if (!dev_ifname) {
2689 return -TARGET_EFAULT;
2691 optname = SO_BINDTODEVICE;
2692 addr_ifname = alloca(IFNAMSIZ);
2693 memcpy(addr_ifname, dev_ifname, optlen);
2694 addr_ifname[optlen] = 0;
2695 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2696 addr_ifname, optlen));
2697 unlock_user (dev_ifname, optval_addr, 0);
2698 return ret;
2700 /* Options with 'int' argument. */
2701 case TARGET_SO_DEBUG:
2702 optname = SO_DEBUG;
2703 break;
2704 case TARGET_SO_REUSEADDR:
2705 optname = SO_REUSEADDR;
2706 break;
2707 case TARGET_SO_TYPE:
2708 optname = SO_TYPE;
2709 break;
2710 case TARGET_SO_ERROR:
2711 optname = SO_ERROR;
2712 break;
2713 case TARGET_SO_DONTROUTE:
2714 optname = SO_DONTROUTE;
2715 break;
2716 case TARGET_SO_BROADCAST:
2717 optname = SO_BROADCAST;
2718 break;
2719 case TARGET_SO_SNDBUF:
2720 optname = SO_SNDBUF;
2721 break;
2722 case TARGET_SO_SNDBUFFORCE:
2723 optname = SO_SNDBUFFORCE;
2724 break;
2725 case TARGET_SO_RCVBUF:
2726 optname = SO_RCVBUF;
2727 break;
2728 case TARGET_SO_RCVBUFFORCE:
2729 optname = SO_RCVBUFFORCE;
2730 break;
2731 case TARGET_SO_KEEPALIVE:
2732 optname = SO_KEEPALIVE;
2733 break;
2734 case TARGET_SO_OOBINLINE:
2735 optname = SO_OOBINLINE;
2736 break;
2737 case TARGET_SO_NO_CHECK:
2738 optname = SO_NO_CHECK;
2739 break;
2740 case TARGET_SO_PRIORITY:
2741 optname = SO_PRIORITY;
2742 break;
2743 #ifdef SO_BSDCOMPAT
2744 case TARGET_SO_BSDCOMPAT:
2745 optname = SO_BSDCOMPAT;
2746 break;
2747 #endif
2748 case TARGET_SO_PASSCRED:
2749 optname = SO_PASSCRED;
2750 break;
2751 case TARGET_SO_PASSSEC:
2752 optname = SO_PASSSEC;
2753 break;
2754 case TARGET_SO_TIMESTAMP:
2755 optname = SO_TIMESTAMP;
2756 break;
2757 case TARGET_SO_RCVLOWAT:
2758 optname = SO_RCVLOWAT;
2759 break;
2760 break;
2761 default:
2762 goto unimplemented;
2764 if (optlen < sizeof(uint32_t))
2765 return -TARGET_EINVAL;
2767 if (get_user_u32(val, optval_addr))
2768 return -TARGET_EFAULT;
2769 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2770 break;
2771 default:
2772 unimplemented:
2773 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2774 ret = -TARGET_ENOPROTOOPT;
2776 return ret;
2779 /* do_getsockopt() Must return target values and target errnos. */
2780 static abi_long do_getsockopt(int sockfd, int level, int optname,
2781 abi_ulong optval_addr, abi_ulong optlen)
2783 abi_long ret;
2784 int len, val;
2785 socklen_t lv;
2787 switch(level) {
2788 case TARGET_SOL_SOCKET:
2789 level = SOL_SOCKET;
2790 switch (optname) {
2791 /* These don't just return a single integer */
2792 case TARGET_SO_LINGER:
2793 case TARGET_SO_RCVTIMEO:
2794 case TARGET_SO_SNDTIMEO:
2795 case TARGET_SO_PEERNAME:
2796 goto unimplemented;
2797 case TARGET_SO_PEERCRED: {
2798 struct ucred cr;
2799 socklen_t crlen;
2800 struct target_ucred *tcr;
2802 if (get_user_u32(len, optlen)) {
2803 return -TARGET_EFAULT;
2805 if (len < 0) {
2806 return -TARGET_EINVAL;
2809 crlen = sizeof(cr);
2810 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2811 &cr, &crlen));
2812 if (ret < 0) {
2813 return ret;
2815 if (len > crlen) {
2816 len = crlen;
2818 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2819 return -TARGET_EFAULT;
2821 __put_user(cr.pid, &tcr->pid);
2822 __put_user(cr.uid, &tcr->uid);
2823 __put_user(cr.gid, &tcr->gid);
2824 unlock_user_struct(tcr, optval_addr, 1);
2825 if (put_user_u32(len, optlen)) {
2826 return -TARGET_EFAULT;
2828 break;
2830 /* Options with 'int' argument. */
2831 case TARGET_SO_DEBUG:
2832 optname = SO_DEBUG;
2833 goto int_case;
2834 case TARGET_SO_REUSEADDR:
2835 optname = SO_REUSEADDR;
2836 goto int_case;
2837 case TARGET_SO_TYPE:
2838 optname = SO_TYPE;
2839 goto int_case;
2840 case TARGET_SO_ERROR:
2841 optname = SO_ERROR;
2842 goto int_case;
2843 case TARGET_SO_DONTROUTE:
2844 optname = SO_DONTROUTE;
2845 goto int_case;
2846 case TARGET_SO_BROADCAST:
2847 optname = SO_BROADCAST;
2848 goto int_case;
2849 case TARGET_SO_SNDBUF:
2850 optname = SO_SNDBUF;
2851 goto int_case;
2852 case TARGET_SO_RCVBUF:
2853 optname = SO_RCVBUF;
2854 goto int_case;
2855 case TARGET_SO_KEEPALIVE:
2856 optname = SO_KEEPALIVE;
2857 goto int_case;
2858 case TARGET_SO_OOBINLINE:
2859 optname = SO_OOBINLINE;
2860 goto int_case;
2861 case TARGET_SO_NO_CHECK:
2862 optname = SO_NO_CHECK;
2863 goto int_case;
2864 case TARGET_SO_PRIORITY:
2865 optname = SO_PRIORITY;
2866 goto int_case;
2867 #ifdef SO_BSDCOMPAT
2868 case TARGET_SO_BSDCOMPAT:
2869 optname = SO_BSDCOMPAT;
2870 goto int_case;
2871 #endif
2872 case TARGET_SO_PASSCRED:
2873 optname = SO_PASSCRED;
2874 goto int_case;
2875 case TARGET_SO_TIMESTAMP:
2876 optname = SO_TIMESTAMP;
2877 goto int_case;
2878 case TARGET_SO_RCVLOWAT:
2879 optname = SO_RCVLOWAT;
2880 goto int_case;
2881 case TARGET_SO_ACCEPTCONN:
2882 optname = SO_ACCEPTCONN;
2883 goto int_case;
2884 default:
2885 goto int_case;
2887 break;
2888 case SOL_TCP:
2889 /* TCP options all take an 'int' value. */
2890 int_case:
2891 if (get_user_u32(len, optlen))
2892 return -TARGET_EFAULT;
2893 if (len < 0)
2894 return -TARGET_EINVAL;
2895 lv = sizeof(lv);
2896 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2897 if (ret < 0)
2898 return ret;
2899 if (optname == SO_TYPE) {
2900 val = host_to_target_sock_type(val);
2902 if (len > lv)
2903 len = lv;
2904 if (len == 4) {
2905 if (put_user_u32(val, optval_addr))
2906 return -TARGET_EFAULT;
2907 } else {
2908 if (put_user_u8(val, optval_addr))
2909 return -TARGET_EFAULT;
2911 if (put_user_u32(len, optlen))
2912 return -TARGET_EFAULT;
2913 break;
2914 case SOL_IP:
2915 switch(optname) {
2916 case IP_TOS:
2917 case IP_TTL:
2918 case IP_HDRINCL:
2919 case IP_ROUTER_ALERT:
2920 case IP_RECVOPTS:
2921 case IP_RETOPTS:
2922 case IP_PKTINFO:
2923 case IP_MTU_DISCOVER:
2924 case IP_RECVERR:
2925 case IP_RECVTOS:
2926 #ifdef IP_FREEBIND
2927 case IP_FREEBIND:
2928 #endif
2929 case IP_MULTICAST_TTL:
2930 case IP_MULTICAST_LOOP:
2931 if (get_user_u32(len, optlen))
2932 return -TARGET_EFAULT;
2933 if (len < 0)
2934 return -TARGET_EINVAL;
2935 lv = sizeof(lv);
2936 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2937 if (ret < 0)
2938 return ret;
2939 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2940 len = 1;
2941 if (put_user_u32(len, optlen)
2942 || put_user_u8(val, optval_addr))
2943 return -TARGET_EFAULT;
2944 } else {
2945 if (len > sizeof(int))
2946 len = sizeof(int);
2947 if (put_user_u32(len, optlen)
2948 || put_user_u32(val, optval_addr))
2949 return -TARGET_EFAULT;
2951 break;
2952 default:
2953 ret = -TARGET_ENOPROTOOPT;
2954 break;
2956 break;
2957 default:
2958 unimplemented:
2959 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2960 level, optname);
2961 ret = -TARGET_EOPNOTSUPP;
2962 break;
2964 return ret;
2967 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2968 int count, int copy)
2970 struct target_iovec *target_vec;
2971 struct iovec *vec;
2972 abi_ulong total_len, max_len;
2973 int i;
2974 int err = 0;
2975 bool bad_address = false;
2977 if (count == 0) {
2978 errno = 0;
2979 return NULL;
2981 if (count < 0 || count > IOV_MAX) {
2982 errno = EINVAL;
2983 return NULL;
2986 vec = g_try_new0(struct iovec, count);
2987 if (vec == NULL) {
2988 errno = ENOMEM;
2989 return NULL;
2992 target_vec = lock_user(VERIFY_READ, target_addr,
2993 count * sizeof(struct target_iovec), 1);
2994 if (target_vec == NULL) {
2995 err = EFAULT;
2996 goto fail2;
2999 /* ??? If host page size > target page size, this will result in a
3000 value larger than what we can actually support. */
3001 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3002 total_len = 0;
3004 for (i = 0; i < count; i++) {
3005 abi_ulong base = tswapal(target_vec[i].iov_base);
3006 abi_long len = tswapal(target_vec[i].iov_len);
3008 if (len < 0) {
3009 err = EINVAL;
3010 goto fail;
3011 } else if (len == 0) {
3012 /* Zero length pointer is ignored. */
3013 vec[i].iov_base = 0;
3014 } else {
3015 vec[i].iov_base = lock_user(type, base, len, copy);
3016 /* If the first buffer pointer is bad, this is a fault. But
3017 * subsequent bad buffers will result in a partial write; this
3018 * is realized by filling the vector with null pointers and
3019 * zero lengths. */
3020 if (!vec[i].iov_base) {
3021 if (i == 0) {
3022 err = EFAULT;
3023 goto fail;
3024 } else {
3025 bad_address = true;
3028 if (bad_address) {
3029 len = 0;
3031 if (len > max_len - total_len) {
3032 len = max_len - total_len;
3035 vec[i].iov_len = len;
3036 total_len += len;
3039 unlock_user(target_vec, target_addr, 0);
3040 return vec;
3042 fail:
3043 while (--i >= 0) {
3044 if (tswapal(target_vec[i].iov_len) > 0) {
3045 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3048 unlock_user(target_vec, target_addr, 0);
3049 fail2:
3050 g_free(vec);
3051 errno = err;
3052 return NULL;
3055 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3056 int count, int copy)
3058 struct target_iovec *target_vec;
3059 int i;
3061 target_vec = lock_user(VERIFY_READ, target_addr,
3062 count * sizeof(struct target_iovec), 1);
3063 if (target_vec) {
3064 for (i = 0; i < count; i++) {
3065 abi_ulong base = tswapal(target_vec[i].iov_base);
3066 abi_long len = tswapal(target_vec[i].iov_len);
3067 if (len < 0) {
3068 break;
3070 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3072 unlock_user(target_vec, target_addr, 0);
3075 g_free(vec);
3078 static inline int target_to_host_sock_type(int *type)
3080 int host_type = 0;
3081 int target_type = *type;
3083 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3084 case TARGET_SOCK_DGRAM:
3085 host_type = SOCK_DGRAM;
3086 break;
3087 case TARGET_SOCK_STREAM:
3088 host_type = SOCK_STREAM;
3089 break;
3090 default:
3091 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3092 break;
3094 if (target_type & TARGET_SOCK_CLOEXEC) {
3095 #if defined(SOCK_CLOEXEC)
3096 host_type |= SOCK_CLOEXEC;
3097 #else
3098 return -TARGET_EINVAL;
3099 #endif
3101 if (target_type & TARGET_SOCK_NONBLOCK) {
3102 #if defined(SOCK_NONBLOCK)
3103 host_type |= SOCK_NONBLOCK;
3104 #elif !defined(O_NONBLOCK)
3105 return -TARGET_EINVAL;
3106 #endif
3108 *type = host_type;
3109 return 0;
3112 /* Try to emulate socket type flags after socket creation. */
3113 static int sock_flags_fixup(int fd, int target_type)
3115 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3116 if (target_type & TARGET_SOCK_NONBLOCK) {
3117 int flags = fcntl(fd, F_GETFL);
3118 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3119 close(fd);
3120 return -TARGET_EINVAL;
3123 #endif
3124 return fd;
3127 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3128 abi_ulong target_addr,
3129 socklen_t len)
3131 struct sockaddr *addr = host_addr;
3132 struct target_sockaddr *target_saddr;
3134 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3135 if (!target_saddr) {
3136 return -TARGET_EFAULT;
3139 memcpy(addr, target_saddr, len);
3140 addr->sa_family = tswap16(target_saddr->sa_family);
3141 /* spkt_protocol is big-endian */
3143 unlock_user(target_saddr, target_addr, 0);
3144 return 0;
3147 static TargetFdTrans target_packet_trans = {
3148 .target_to_host_addr = packet_target_to_host_sockaddr,
3151 #ifdef CONFIG_RTNETLINK
3152 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3154 abi_long ret;
3156 ret = target_to_host_nlmsg_route(buf, len);
3157 if (ret < 0) {
3158 return ret;
3161 return len;
3164 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3166 abi_long ret;
3168 ret = host_to_target_nlmsg_route(buf, len);
3169 if (ret < 0) {
3170 return ret;
3173 return len;
3176 static TargetFdTrans target_netlink_route_trans = {
3177 .target_to_host_data = netlink_route_target_to_host,
3178 .host_to_target_data = netlink_route_host_to_target,
3180 #endif /* CONFIG_RTNETLINK */
3182 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3184 abi_long ret;
3186 ret = target_to_host_nlmsg_audit(buf, len);
3187 if (ret < 0) {
3188 return ret;
3191 return len;
3194 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3196 abi_long ret;
3198 ret = host_to_target_nlmsg_audit(buf, len);
3199 if (ret < 0) {
3200 return ret;
3203 return len;
3206 static TargetFdTrans target_netlink_audit_trans = {
3207 .target_to_host_data = netlink_audit_target_to_host,
3208 .host_to_target_data = netlink_audit_host_to_target,
3211 /* do_socket() Must return target values and target errnos. */
3212 static abi_long do_socket(int domain, int type, int protocol)
3214 int target_type = type;
3215 int ret;
3217 ret = target_to_host_sock_type(&type);
3218 if (ret) {
3219 return ret;
3222 if (domain == PF_NETLINK && !(
3223 #ifdef CONFIG_RTNETLINK
3224 protocol == NETLINK_ROUTE ||
3225 #endif
3226 protocol == NETLINK_KOBJECT_UEVENT ||
3227 protocol == NETLINK_AUDIT)) {
3228 return -EPFNOSUPPORT;
3231 if (domain == AF_PACKET ||
3232 (domain == AF_INET && type == SOCK_PACKET)) {
3233 protocol = tswap16(protocol);
3236 ret = get_errno(socket(domain, type, protocol));
3237 if (ret >= 0) {
3238 ret = sock_flags_fixup(ret, target_type);
3239 if (type == SOCK_PACKET) {
3240 /* Manage an obsolete case :
3241 * if socket type is SOCK_PACKET, bind by name
3243 fd_trans_register(ret, &target_packet_trans);
3244 } else if (domain == PF_NETLINK) {
3245 switch (protocol) {
3246 #ifdef CONFIG_RTNETLINK
3247 case NETLINK_ROUTE:
3248 fd_trans_register(ret, &target_netlink_route_trans);
3249 break;
3250 #endif
3251 case NETLINK_KOBJECT_UEVENT:
3252 /* nothing to do: messages are strings */
3253 break;
3254 case NETLINK_AUDIT:
3255 fd_trans_register(ret, &target_netlink_audit_trans);
3256 break;
3257 default:
3258 g_assert_not_reached();
3262 return ret;
3265 /* do_bind() Must return target values and target errnos. */
3266 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3267 socklen_t addrlen)
3269 void *addr;
3270 abi_long ret;
3272 if ((int)addrlen < 0) {
3273 return -TARGET_EINVAL;
3276 addr = alloca(addrlen+1);
3278 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3279 if (ret)
3280 return ret;
3282 return get_errno(bind(sockfd, addr, addrlen));
3285 /* do_connect() Must return target values and target errnos. */
3286 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3287 socklen_t addrlen)
3289 void *addr;
3290 abi_long ret;
3292 if ((int)addrlen < 0) {
3293 return -TARGET_EINVAL;
3296 addr = alloca(addrlen+1);
3298 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3299 if (ret)
3300 return ret;
3302 return get_errno(safe_connect(sockfd, addr, addrlen));
3305 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3306 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3307 int flags, int send)
3309 abi_long ret, len;
3310 struct msghdr msg;
3311 int count;
3312 struct iovec *vec;
3313 abi_ulong target_vec;
3315 if (msgp->msg_name) {
3316 msg.msg_namelen = tswap32(msgp->msg_namelen);
3317 msg.msg_name = alloca(msg.msg_namelen+1);
3318 ret = target_to_host_sockaddr(fd, msg.msg_name,
3319 tswapal(msgp->msg_name),
3320 msg.msg_namelen);
3321 if (ret) {
3322 goto out2;
3324 } else {
3325 msg.msg_name = NULL;
3326 msg.msg_namelen = 0;
3328 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3329 msg.msg_control = alloca(msg.msg_controllen);
3330 msg.msg_flags = tswap32(msgp->msg_flags);
3332 count = tswapal(msgp->msg_iovlen);
3333 target_vec = tswapal(msgp->msg_iov);
3334 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3335 target_vec, count, send);
3336 if (vec == NULL) {
3337 ret = -host_to_target_errno(errno);
3338 goto out2;
3340 msg.msg_iovlen = count;
3341 msg.msg_iov = vec;
3343 if (send) {
3344 if (fd_trans_target_to_host_data(fd)) {
3345 void *host_msg;
3347 host_msg = g_malloc(msg.msg_iov->iov_len);
3348 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3349 ret = fd_trans_target_to_host_data(fd)(host_msg,
3350 msg.msg_iov->iov_len);
3351 if (ret >= 0) {
3352 msg.msg_iov->iov_base = host_msg;
3353 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3355 g_free(host_msg);
3356 } else {
3357 ret = target_to_host_cmsg(&msg, msgp);
3358 if (ret == 0) {
3359 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3362 } else {
3363 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3364 if (!is_error(ret)) {
3365 len = ret;
3366 if (fd_trans_host_to_target_data(fd)) {
3367 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3368 len);
3369 } else {
3370 ret = host_to_target_cmsg(msgp, &msg);
3372 if (!is_error(ret)) {
3373 msgp->msg_namelen = tswap32(msg.msg_namelen);
3374 if (msg.msg_name != NULL) {
3375 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3376 msg.msg_name, msg.msg_namelen);
3377 if (ret) {
3378 goto out;
3382 ret = len;
3387 out:
3388 unlock_iovec(vec, target_vec, count, !send);
3389 out2:
3390 return ret;
3393 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3394 int flags, int send)
3396 abi_long ret;
3397 struct target_msghdr *msgp;
3399 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3400 msgp,
3401 target_msg,
3402 send ? 1 : 0)) {
3403 return -TARGET_EFAULT;
3405 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3406 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3407 return ret;
3410 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3411 * so it might not have this *mmsg-specific flag either.
3413 #ifndef MSG_WAITFORONE
3414 #define MSG_WAITFORONE 0x10000
3415 #endif
3417 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3418 unsigned int vlen, unsigned int flags,
3419 int send)
3421 struct target_mmsghdr *mmsgp;
3422 abi_long ret = 0;
3423 int i;
3425 if (vlen > UIO_MAXIOV) {
3426 vlen = UIO_MAXIOV;
3429 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3430 if (!mmsgp) {
3431 return -TARGET_EFAULT;
3434 for (i = 0; i < vlen; i++) {
3435 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3436 if (is_error(ret)) {
3437 break;
3439 mmsgp[i].msg_len = tswap32(ret);
3440 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3441 if (flags & MSG_WAITFORONE) {
3442 flags |= MSG_DONTWAIT;
3446 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3448 /* Return number of datagrams sent if we sent any at all;
3449 * otherwise return the error.
3451 if (i) {
3452 return i;
3454 return ret;
3457 /* do_accept4() Must return target values and target errnos. */
3458 static abi_long do_accept4(int fd, abi_ulong target_addr,
3459 abi_ulong target_addrlen_addr, int flags)
3461 socklen_t addrlen;
3462 void *addr;
3463 abi_long ret;
3464 int host_flags;
3466 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3468 if (target_addr == 0) {
3469 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3472 /* linux returns EINVAL if addrlen pointer is invalid */
3473 if (get_user_u32(addrlen, target_addrlen_addr))
3474 return -TARGET_EINVAL;
3476 if ((int)addrlen < 0) {
3477 return -TARGET_EINVAL;
3480 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3481 return -TARGET_EINVAL;
3483 addr = alloca(addrlen);
3485 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3486 if (!is_error(ret)) {
3487 host_to_target_sockaddr(target_addr, addr, addrlen);
3488 if (put_user_u32(addrlen, target_addrlen_addr))
3489 ret = -TARGET_EFAULT;
3491 return ret;
3494 /* do_getpeername() Must return target values and target errnos. */
3495 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3496 abi_ulong target_addrlen_addr)
3498 socklen_t addrlen;
3499 void *addr;
3500 abi_long ret;
3502 if (get_user_u32(addrlen, target_addrlen_addr))
3503 return -TARGET_EFAULT;
3505 if ((int)addrlen < 0) {
3506 return -TARGET_EINVAL;
3509 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3510 return -TARGET_EFAULT;
3512 addr = alloca(addrlen);
3514 ret = get_errno(getpeername(fd, addr, &addrlen));
3515 if (!is_error(ret)) {
3516 host_to_target_sockaddr(target_addr, addr, addrlen);
3517 if (put_user_u32(addrlen, target_addrlen_addr))
3518 ret = -TARGET_EFAULT;
3520 return ret;
3523 /* do_getsockname() Must return target values and target errnos. */
3524 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3525 abi_ulong target_addrlen_addr)
3527 socklen_t addrlen;
3528 void *addr;
3529 abi_long ret;
3531 if (get_user_u32(addrlen, target_addrlen_addr))
3532 return -TARGET_EFAULT;
3534 if ((int)addrlen < 0) {
3535 return -TARGET_EINVAL;
3538 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3539 return -TARGET_EFAULT;
3541 addr = alloca(addrlen);
3543 ret = get_errno(getsockname(fd, addr, &addrlen));
3544 if (!is_error(ret)) {
3545 host_to_target_sockaddr(target_addr, addr, addrlen);
3546 if (put_user_u32(addrlen, target_addrlen_addr))
3547 ret = -TARGET_EFAULT;
3549 return ret;
3552 /* do_socketpair() Must return target values and target errnos. */
3553 static abi_long do_socketpair(int domain, int type, int protocol,
3554 abi_ulong target_tab_addr)
3556 int tab[2];
3557 abi_long ret;
3559 target_to_host_sock_type(&type);
3561 ret = get_errno(socketpair(domain, type, protocol, tab));
3562 if (!is_error(ret)) {
3563 if (put_user_s32(tab[0], target_tab_addr)
3564 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3565 ret = -TARGET_EFAULT;
3567 return ret;
3570 /* do_sendto() Must return target values and target errnos. */
3571 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3572 abi_ulong target_addr, socklen_t addrlen)
3574 void *addr;
3575 void *host_msg;
3576 void *copy_msg = NULL;
3577 abi_long ret;
3579 if ((int)addrlen < 0) {
3580 return -TARGET_EINVAL;
3583 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3584 if (!host_msg)
3585 return -TARGET_EFAULT;
3586 if (fd_trans_target_to_host_data(fd)) {
3587 copy_msg = host_msg;
3588 host_msg = g_malloc(len);
3589 memcpy(host_msg, copy_msg, len);
3590 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3591 if (ret < 0) {
3592 goto fail;
3595 if (target_addr) {
3596 addr = alloca(addrlen+1);
3597 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3598 if (ret) {
3599 goto fail;
3601 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3602 } else {
3603 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3605 fail:
3606 if (copy_msg) {
3607 g_free(host_msg);
3608 host_msg = copy_msg;
3610 unlock_user(host_msg, msg, 0);
3611 return ret;
3614 /* do_recvfrom() Must return target values and target errnos. */
3615 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3616 abi_ulong target_addr,
3617 abi_ulong target_addrlen)
3619 socklen_t addrlen;
3620 void *addr;
3621 void *host_msg;
3622 abi_long ret;
3624 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3625 if (!host_msg)
3626 return -TARGET_EFAULT;
3627 if (target_addr) {
3628 if (get_user_u32(addrlen, target_addrlen)) {
3629 ret = -TARGET_EFAULT;
3630 goto fail;
3632 if ((int)addrlen < 0) {
3633 ret = -TARGET_EINVAL;
3634 goto fail;
3636 addr = alloca(addrlen);
3637 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3638 addr, &addrlen));
3639 } else {
3640 addr = NULL; /* To keep compiler quiet. */
3641 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3643 if (!is_error(ret)) {
3644 if (fd_trans_host_to_target_data(fd)) {
3645 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3647 if (target_addr) {
3648 host_to_target_sockaddr(target_addr, addr, addrlen);
3649 if (put_user_u32(addrlen, target_addrlen)) {
3650 ret = -TARGET_EFAULT;
3651 goto fail;
3654 unlock_user(host_msg, msg, len);
3655 } else {
3656 fail:
3657 unlock_user(host_msg, msg, 0);
3659 return ret;
3662 #ifdef TARGET_NR_socketcall
3663 /* do_socketcall() Must return target values and target errnos. */
3664 static abi_long do_socketcall(int num, abi_ulong vptr)
3666 static const unsigned ac[] = { /* number of arguments per call */
3667 [SOCKOP_socket] = 3, /* domain, type, protocol */
3668 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3669 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3670 [SOCKOP_listen] = 2, /* sockfd, backlog */
3671 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3672 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3673 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3674 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3675 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3676 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3677 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3678 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3679 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3680 [SOCKOP_shutdown] = 2, /* sockfd, how */
3681 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3682 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3683 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3684 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3685 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3686 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3688 abi_long a[6]; /* max 6 args */
3690 /* first, collect the arguments in a[] according to ac[] */
3691 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3692 unsigned i;
3693 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3694 for (i = 0; i < ac[num]; ++i) {
3695 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3696 return -TARGET_EFAULT;
3701 /* now when we have the args, actually handle the call */
3702 switch (num) {
3703 case SOCKOP_socket: /* domain, type, protocol */
3704 return do_socket(a[0], a[1], a[2]);
3705 case SOCKOP_bind: /* sockfd, addr, addrlen */
3706 return do_bind(a[0], a[1], a[2]);
3707 case SOCKOP_connect: /* sockfd, addr, addrlen */
3708 return do_connect(a[0], a[1], a[2]);
3709 case SOCKOP_listen: /* sockfd, backlog */
3710 return get_errno(listen(a[0], a[1]));
3711 case SOCKOP_accept: /* sockfd, addr, addrlen */
3712 return do_accept4(a[0], a[1], a[2], 0);
3713 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3714 return do_accept4(a[0], a[1], a[2], a[3]);
3715 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3716 return do_getsockname(a[0], a[1], a[2]);
3717 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3718 return do_getpeername(a[0], a[1], a[2]);
3719 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3720 return do_socketpair(a[0], a[1], a[2], a[3]);
3721 case SOCKOP_send: /* sockfd, msg, len, flags */
3722 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3723 case SOCKOP_recv: /* sockfd, msg, len, flags */
3724 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3725 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3726 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3727 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3728 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3729 case SOCKOP_shutdown: /* sockfd, how */
3730 return get_errno(shutdown(a[0], a[1]));
3731 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3732 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3733 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3734 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3735 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3736 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3737 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3738 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3739 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3740 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3741 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3742 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3743 default:
3744 gemu_log("Unsupported socketcall: %d\n", num);
3745 return -TARGET_ENOSYS;
3748 #endif
3750 #define N_SHM_REGIONS 32
3752 static struct shm_region {
3753 abi_ulong start;
3754 abi_ulong size;
3755 bool in_use;
3756 } shm_regions[N_SHM_REGIONS];
3758 #ifndef TARGET_SEMID64_DS
3759 /* asm-generic version of this struct */
3760 struct target_semid64_ds
3762 struct target_ipc_perm sem_perm;
3763 abi_ulong sem_otime;
3764 #if TARGET_ABI_BITS == 32
3765 abi_ulong __unused1;
3766 #endif
3767 abi_ulong sem_ctime;
3768 #if TARGET_ABI_BITS == 32
3769 abi_ulong __unused2;
3770 #endif
3771 abi_ulong sem_nsems;
3772 abi_ulong __unused3;
3773 abi_ulong __unused4;
3775 #endif
3777 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3778 abi_ulong target_addr)
3780 struct target_ipc_perm *target_ip;
3781 struct target_semid64_ds *target_sd;
3783 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3784 return -TARGET_EFAULT;
3785 target_ip = &(target_sd->sem_perm);
3786 host_ip->__key = tswap32(target_ip->__key);
3787 host_ip->uid = tswap32(target_ip->uid);
3788 host_ip->gid = tswap32(target_ip->gid);
3789 host_ip->cuid = tswap32(target_ip->cuid);
3790 host_ip->cgid = tswap32(target_ip->cgid);
3791 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3792 host_ip->mode = tswap32(target_ip->mode);
3793 #else
3794 host_ip->mode = tswap16(target_ip->mode);
3795 #endif
3796 #if defined(TARGET_PPC)
3797 host_ip->__seq = tswap32(target_ip->__seq);
3798 #else
3799 host_ip->__seq = tswap16(target_ip->__seq);
3800 #endif
3801 unlock_user_struct(target_sd, target_addr, 0);
3802 return 0;
3805 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3806 struct ipc_perm *host_ip)
3808 struct target_ipc_perm *target_ip;
3809 struct target_semid64_ds *target_sd;
3811 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3812 return -TARGET_EFAULT;
3813 target_ip = &(target_sd->sem_perm);
3814 target_ip->__key = tswap32(host_ip->__key);
3815 target_ip->uid = tswap32(host_ip->uid);
3816 target_ip->gid = tswap32(host_ip->gid);
3817 target_ip->cuid = tswap32(host_ip->cuid);
3818 target_ip->cgid = tswap32(host_ip->cgid);
3819 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3820 target_ip->mode = tswap32(host_ip->mode);
3821 #else
3822 target_ip->mode = tswap16(host_ip->mode);
3823 #endif
3824 #if defined(TARGET_PPC)
3825 target_ip->__seq = tswap32(host_ip->__seq);
3826 #else
3827 target_ip->__seq = tswap16(host_ip->__seq);
3828 #endif
3829 unlock_user_struct(target_sd, target_addr, 1);
3830 return 0;
3833 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3834 abi_ulong target_addr)
3836 struct target_semid64_ds *target_sd;
3838 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3839 return -TARGET_EFAULT;
3840 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3841 return -TARGET_EFAULT;
3842 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3843 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3844 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3845 unlock_user_struct(target_sd, target_addr, 0);
3846 return 0;
3849 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3850 struct semid_ds *host_sd)
3852 struct target_semid64_ds *target_sd;
3854 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3855 return -TARGET_EFAULT;
3856 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3857 return -TARGET_EFAULT;
3858 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3859 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3860 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3861 unlock_user_struct(target_sd, target_addr, 1);
3862 return 0;
3865 struct target_seminfo {
3866 int semmap;
3867 int semmni;
3868 int semmns;
3869 int semmnu;
3870 int semmsl;
3871 int semopm;
3872 int semume;
3873 int semusz;
3874 int semvmx;
3875 int semaem;
3878 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3879 struct seminfo *host_seminfo)
3881 struct target_seminfo *target_seminfo;
3882 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3883 return -TARGET_EFAULT;
3884 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3885 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3886 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3887 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3888 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3889 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3890 __put_user(host_seminfo->semume, &target_seminfo->semume);
3891 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3892 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3893 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3894 unlock_user_struct(target_seminfo, target_addr, 1);
3895 return 0;
3898 union semun {
3899 int val;
3900 struct semid_ds *buf;
3901 unsigned short *array;
3902 struct seminfo *__buf;
3905 union target_semun {
3906 int val;
3907 abi_ulong buf;
3908 abi_ulong array;
3909 abi_ulong __buf;
3912 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3913 abi_ulong target_addr)
3915 int nsems;
3916 unsigned short *array;
3917 union semun semun;
3918 struct semid_ds semid_ds;
3919 int i, ret;
3921 semun.buf = &semid_ds;
3923 ret = semctl(semid, 0, IPC_STAT, semun);
3924 if (ret == -1)
3925 return get_errno(ret);
3927 nsems = semid_ds.sem_nsems;
3929 *host_array = g_try_new(unsigned short, nsems);
3930 if (!*host_array) {
3931 return -TARGET_ENOMEM;
3933 array = lock_user(VERIFY_READ, target_addr,
3934 nsems*sizeof(unsigned short), 1);
3935 if (!array) {
3936 g_free(*host_array);
3937 return -TARGET_EFAULT;
3940 for(i=0; i<nsems; i++) {
3941 __get_user((*host_array)[i], &array[i]);
3943 unlock_user(array, target_addr, 0);
3945 return 0;
3948 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3949 unsigned short **host_array)
3951 int nsems;
3952 unsigned short *array;
3953 union semun semun;
3954 struct semid_ds semid_ds;
3955 int i, ret;
3957 semun.buf = &semid_ds;
3959 ret = semctl(semid, 0, IPC_STAT, semun);
3960 if (ret == -1)
3961 return get_errno(ret);
3963 nsems = semid_ds.sem_nsems;
3965 array = lock_user(VERIFY_WRITE, target_addr,
3966 nsems*sizeof(unsigned short), 0);
3967 if (!array)
3968 return -TARGET_EFAULT;
3970 for(i=0; i<nsems; i++) {
3971 __put_user((*host_array)[i], &array[i]);
3973 g_free(*host_array);
3974 unlock_user(array, target_addr, 1);
3976 return 0;
3979 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3980 abi_ulong target_arg)
3982 union target_semun target_su = { .buf = target_arg };
3983 union semun arg;
3984 struct semid_ds dsarg;
3985 unsigned short *array = NULL;
3986 struct seminfo seminfo;
3987 abi_long ret = -TARGET_EINVAL;
3988 abi_long err;
3989 cmd &= 0xff;
3991 switch( cmd ) {
3992 case GETVAL:
3993 case SETVAL:
3994 /* In 64 bit cross-endian situations, we will erroneously pick up
3995 * the wrong half of the union for the "val" element. To rectify
3996 * this, the entire 8-byte structure is byteswapped, followed by
3997 * a swap of the 4 byte val field. In other cases, the data is
3998 * already in proper host byte order. */
3999 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4000 target_su.buf = tswapal(target_su.buf);
4001 arg.val = tswap32(target_su.val);
4002 } else {
4003 arg.val = target_su.val;
4005 ret = get_errno(semctl(semid, semnum, cmd, arg));
4006 break;
4007 case GETALL:
4008 case SETALL:
4009 err = target_to_host_semarray(semid, &array, target_su.array);
4010 if (err)
4011 return err;
4012 arg.array = array;
4013 ret = get_errno(semctl(semid, semnum, cmd, arg));
4014 err = host_to_target_semarray(semid, target_su.array, &array);
4015 if (err)
4016 return err;
4017 break;
4018 case IPC_STAT:
4019 case IPC_SET:
4020 case SEM_STAT:
4021 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4022 if (err)
4023 return err;
4024 arg.buf = &dsarg;
4025 ret = get_errno(semctl(semid, semnum, cmd, arg));
4026 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4027 if (err)
4028 return err;
4029 break;
4030 case IPC_INFO:
4031 case SEM_INFO:
4032 arg.__buf = &seminfo;
4033 ret = get_errno(semctl(semid, semnum, cmd, arg));
4034 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4035 if (err)
4036 return err;
4037 break;
4038 case IPC_RMID:
4039 case GETPID:
4040 case GETNCNT:
4041 case GETZCNT:
4042 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4043 break;
4046 return ret;
4049 struct target_sembuf {
4050 unsigned short sem_num;
4051 short sem_op;
4052 short sem_flg;
4055 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4056 abi_ulong target_addr,
4057 unsigned nsops)
4059 struct target_sembuf *target_sembuf;
4060 int i;
4062 target_sembuf = lock_user(VERIFY_READ, target_addr,
4063 nsops*sizeof(struct target_sembuf), 1);
4064 if (!target_sembuf)
4065 return -TARGET_EFAULT;
4067 for(i=0; i<nsops; i++) {
4068 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4069 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4070 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4073 unlock_user(target_sembuf, target_addr, 0);
4075 return 0;
4078 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4080 struct sembuf sops[nsops];
4082 if (target_to_host_sembuf(sops, ptr, nsops))
4083 return -TARGET_EFAULT;
4085 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4088 struct target_msqid_ds
4090 struct target_ipc_perm msg_perm;
4091 abi_ulong msg_stime;
4092 #if TARGET_ABI_BITS == 32
4093 abi_ulong __unused1;
4094 #endif
4095 abi_ulong msg_rtime;
4096 #if TARGET_ABI_BITS == 32
4097 abi_ulong __unused2;
4098 #endif
4099 abi_ulong msg_ctime;
4100 #if TARGET_ABI_BITS == 32
4101 abi_ulong __unused3;
4102 #endif
4103 abi_ulong __msg_cbytes;
4104 abi_ulong msg_qnum;
4105 abi_ulong msg_qbytes;
4106 abi_ulong msg_lspid;
4107 abi_ulong msg_lrpid;
4108 abi_ulong __unused4;
4109 abi_ulong __unused5;
4112 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4113 abi_ulong target_addr)
4115 struct target_msqid_ds *target_md;
4117 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4118 return -TARGET_EFAULT;
4119 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4120 return -TARGET_EFAULT;
4121 host_md->msg_stime = tswapal(target_md->msg_stime);
4122 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4123 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4124 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4125 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4126 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4127 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4128 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4129 unlock_user_struct(target_md, target_addr, 0);
4130 return 0;
4133 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4134 struct msqid_ds *host_md)
4136 struct target_msqid_ds *target_md;
4138 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4139 return -TARGET_EFAULT;
4140 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4141 return -TARGET_EFAULT;
4142 target_md->msg_stime = tswapal(host_md->msg_stime);
4143 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4144 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4145 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4146 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4147 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4148 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4149 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4150 unlock_user_struct(target_md, target_addr, 1);
4151 return 0;
4154 struct target_msginfo {
4155 int msgpool;
4156 int msgmap;
4157 int msgmax;
4158 int msgmnb;
4159 int msgmni;
4160 int msgssz;
4161 int msgtql;
4162 unsigned short int msgseg;
4165 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4166 struct msginfo *host_msginfo)
4168 struct target_msginfo *target_msginfo;
4169 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4170 return -TARGET_EFAULT;
4171 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4172 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4173 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4174 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4175 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4176 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4177 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4178 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4179 unlock_user_struct(target_msginfo, target_addr, 1);
4180 return 0;
4183 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4185 struct msqid_ds dsarg;
4186 struct msginfo msginfo;
4187 abi_long ret = -TARGET_EINVAL;
4189 cmd &= 0xff;
4191 switch (cmd) {
4192 case IPC_STAT:
4193 case IPC_SET:
4194 case MSG_STAT:
4195 if (target_to_host_msqid_ds(&dsarg,ptr))
4196 return -TARGET_EFAULT;
4197 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4198 if (host_to_target_msqid_ds(ptr,&dsarg))
4199 return -TARGET_EFAULT;
4200 break;
4201 case IPC_RMID:
4202 ret = get_errno(msgctl(msgid, cmd, NULL));
4203 break;
4204 case IPC_INFO:
4205 case MSG_INFO:
4206 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4207 if (host_to_target_msginfo(ptr, &msginfo))
4208 return -TARGET_EFAULT;
4209 break;
4212 return ret;
4215 struct target_msgbuf {
4216 abi_long mtype;
4217 char mtext[1];
4220 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4221 ssize_t msgsz, int msgflg)
4223 struct target_msgbuf *target_mb;
4224 struct msgbuf *host_mb;
4225 abi_long ret = 0;
4227 if (msgsz < 0) {
4228 return -TARGET_EINVAL;
4231 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4232 return -TARGET_EFAULT;
4233 host_mb = g_try_malloc(msgsz + sizeof(long));
4234 if (!host_mb) {
4235 unlock_user_struct(target_mb, msgp, 0);
4236 return -TARGET_ENOMEM;
4238 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4239 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4240 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4241 g_free(host_mb);
4242 unlock_user_struct(target_mb, msgp, 0);
4244 return ret;
4247 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4248 ssize_t msgsz, abi_long msgtyp,
4249 int msgflg)
4251 struct target_msgbuf *target_mb;
4252 char *target_mtext;
4253 struct msgbuf *host_mb;
4254 abi_long ret = 0;
4256 if (msgsz < 0) {
4257 return -TARGET_EINVAL;
4260 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4261 return -TARGET_EFAULT;
4263 host_mb = g_try_malloc(msgsz + sizeof(long));
4264 if (!host_mb) {
4265 ret = -TARGET_ENOMEM;
4266 goto end;
4268 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4270 if (ret > 0) {
4271 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4272 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4273 if (!target_mtext) {
4274 ret = -TARGET_EFAULT;
4275 goto end;
4277 memcpy(target_mb->mtext, host_mb->mtext, ret);
4278 unlock_user(target_mtext, target_mtext_addr, ret);
4281 target_mb->mtype = tswapal(host_mb->mtype);
4283 end:
4284 if (target_mb)
4285 unlock_user_struct(target_mb, msgp, 1);
4286 g_free(host_mb);
4287 return ret;
4290 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4291 abi_ulong target_addr)
4293 struct target_shmid_ds *target_sd;
4295 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4296 return -TARGET_EFAULT;
4297 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4298 return -TARGET_EFAULT;
4299 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4300 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4301 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4302 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4303 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4304 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4305 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4306 unlock_user_struct(target_sd, target_addr, 0);
4307 return 0;
4310 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4311 struct shmid_ds *host_sd)
4313 struct target_shmid_ds *target_sd;
4315 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4316 return -TARGET_EFAULT;
4317 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4318 return -TARGET_EFAULT;
4319 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4320 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4321 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4322 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4323 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4324 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4325 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4326 unlock_user_struct(target_sd, target_addr, 1);
4327 return 0;
4330 struct target_shminfo {
4331 abi_ulong shmmax;
4332 abi_ulong shmmin;
4333 abi_ulong shmmni;
4334 abi_ulong shmseg;
4335 abi_ulong shmall;
4338 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4339 struct shminfo *host_shminfo)
4341 struct target_shminfo *target_shminfo;
4342 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4343 return -TARGET_EFAULT;
4344 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4345 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4346 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4347 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4348 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4349 unlock_user_struct(target_shminfo, target_addr, 1);
4350 return 0;
4353 struct target_shm_info {
4354 int used_ids;
4355 abi_ulong shm_tot;
4356 abi_ulong shm_rss;
4357 abi_ulong shm_swp;
4358 abi_ulong swap_attempts;
4359 abi_ulong swap_successes;
4362 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4363 struct shm_info *host_shm_info)
4365 struct target_shm_info *target_shm_info;
4366 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4367 return -TARGET_EFAULT;
4368 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4369 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4370 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4371 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4372 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4373 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4374 unlock_user_struct(target_shm_info, target_addr, 1);
4375 return 0;
4378 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4380 struct shmid_ds dsarg;
4381 struct shminfo shminfo;
4382 struct shm_info shm_info;
4383 abi_long ret = -TARGET_EINVAL;
4385 cmd &= 0xff;
4387 switch(cmd) {
4388 case IPC_STAT:
4389 case IPC_SET:
4390 case SHM_STAT:
4391 if (target_to_host_shmid_ds(&dsarg, buf))
4392 return -TARGET_EFAULT;
4393 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4394 if (host_to_target_shmid_ds(buf, &dsarg))
4395 return -TARGET_EFAULT;
4396 break;
4397 case IPC_INFO:
4398 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4399 if (host_to_target_shminfo(buf, &shminfo))
4400 return -TARGET_EFAULT;
4401 break;
4402 case SHM_INFO:
4403 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4404 if (host_to_target_shm_info(buf, &shm_info))
4405 return -TARGET_EFAULT;
4406 break;
4407 case IPC_RMID:
4408 case SHM_LOCK:
4409 case SHM_UNLOCK:
4410 ret = get_errno(shmctl(shmid, cmd, NULL));
4411 break;
4414 return ret;
4417 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4419 abi_long raddr;
4420 void *host_raddr;
4421 struct shmid_ds shm_info;
4422 int i,ret;
4424 /* find out the length of the shared memory segment */
4425 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4426 if (is_error(ret)) {
4427 /* can't get length, bail out */
4428 return ret;
4431 mmap_lock();
4433 if (shmaddr)
4434 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4435 else {
4436 abi_ulong mmap_start;
4438 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4440 if (mmap_start == -1) {
4441 errno = ENOMEM;
4442 host_raddr = (void *)-1;
4443 } else
4444 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4447 if (host_raddr == (void *)-1) {
4448 mmap_unlock();
4449 return get_errno((long)host_raddr);
4451 raddr=h2g((unsigned long)host_raddr);
4453 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4454 PAGE_VALID | PAGE_READ |
4455 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4457 for (i = 0; i < N_SHM_REGIONS; i++) {
4458 if (!shm_regions[i].in_use) {
4459 shm_regions[i].in_use = true;
4460 shm_regions[i].start = raddr;
4461 shm_regions[i].size = shm_info.shm_segsz;
4462 break;
4466 mmap_unlock();
4467 return raddr;
4471 static inline abi_long do_shmdt(abi_ulong shmaddr)
4473 int i;
4475 for (i = 0; i < N_SHM_REGIONS; ++i) {
4476 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4477 shm_regions[i].in_use = false;
4478 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4479 break;
4483 return get_errno(shmdt(g2h(shmaddr)));
4486 #ifdef TARGET_NR_ipc
4487 /* ??? This only works with linear mappings. */
4488 /* do_ipc() must return target values and target errnos. */
4489 static abi_long do_ipc(unsigned int call, abi_long first,
4490 abi_long second, abi_long third,
4491 abi_long ptr, abi_long fifth)
4493 int version;
4494 abi_long ret = 0;
4496 version = call >> 16;
4497 call &= 0xffff;
4499 switch (call) {
4500 case IPCOP_semop:
4501 ret = do_semop(first, ptr, second);
4502 break;
4504 case IPCOP_semget:
4505 ret = get_errno(semget(first, second, third));
4506 break;
4508 case IPCOP_semctl: {
4509 /* The semun argument to semctl is passed by value, so dereference the
4510 * ptr argument. */
4511 abi_ulong atptr;
4512 get_user_ual(atptr, ptr);
4513 ret = do_semctl(first, second, third, atptr);
4514 break;
4517 case IPCOP_msgget:
4518 ret = get_errno(msgget(first, second));
4519 break;
4521 case IPCOP_msgsnd:
4522 ret = do_msgsnd(first, ptr, second, third);
4523 break;
4525 case IPCOP_msgctl:
4526 ret = do_msgctl(first, second, ptr);
4527 break;
4529 case IPCOP_msgrcv:
4530 switch (version) {
4531 case 0:
4533 struct target_ipc_kludge {
4534 abi_long msgp;
4535 abi_long msgtyp;
4536 } *tmp;
4538 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4539 ret = -TARGET_EFAULT;
4540 break;
4543 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4545 unlock_user_struct(tmp, ptr, 0);
4546 break;
4548 default:
4549 ret = do_msgrcv(first, ptr, second, fifth, third);
4551 break;
4553 case IPCOP_shmat:
4554 switch (version) {
4555 default:
4557 abi_ulong raddr;
4558 raddr = do_shmat(first, ptr, second);
4559 if (is_error(raddr))
4560 return get_errno(raddr);
4561 if (put_user_ual(raddr, third))
4562 return -TARGET_EFAULT;
4563 break;
4565 case 1:
4566 ret = -TARGET_EINVAL;
4567 break;
4569 break;
4570 case IPCOP_shmdt:
4571 ret = do_shmdt(ptr);
4572 break;
4574 case IPCOP_shmget:
4575 /* IPC_* flag values are the same on all linux platforms */
4576 ret = get_errno(shmget(first, second, third));
4577 break;
4579 /* IPC_* and SHM_* command values are the same on all linux platforms */
4580 case IPCOP_shmctl:
4581 ret = do_shmctl(first, second, ptr);
4582 break;
4583 default:
4584 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4585 ret = -TARGET_ENOSYS;
4586 break;
4588 return ret;
4590 #endif
4592 /* kernel structure types definitions */
4594 #define STRUCT(name, ...) STRUCT_ ## name,
4595 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4596 enum {
4597 #include "syscall_types.h"
4598 STRUCT_MAX
4600 #undef STRUCT
4601 #undef STRUCT_SPECIAL
4603 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4604 #define STRUCT_SPECIAL(name)
4605 #include "syscall_types.h"
4606 #undef STRUCT
4607 #undef STRUCT_SPECIAL
4609 typedef struct IOCTLEntry IOCTLEntry;
4611 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4612 int fd, int cmd, abi_long arg);
4614 struct IOCTLEntry {
4615 int target_cmd;
4616 unsigned int host_cmd;
4617 const char *name;
4618 int access;
4619 do_ioctl_fn *do_ioctl;
4620 const argtype arg_type[5];
4623 #define IOC_R 0x0001
4624 #define IOC_W 0x0002
4625 #define IOC_RW (IOC_R | IOC_W)
4627 #define MAX_STRUCT_SIZE 4096
4629 #ifdef CONFIG_FIEMAP
4630 /* So fiemap access checks don't overflow on 32 bit systems.
4631 * This is very slightly smaller than the limit imposed by
4632 * the underlying kernel.
4634 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4635 / sizeof(struct fiemap_extent))
4637 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4638 int fd, int cmd, abi_long arg)
4640 /* The parameter for this ioctl is a struct fiemap followed
4641 * by an array of struct fiemap_extent whose size is set
4642 * in fiemap->fm_extent_count. The array is filled in by the
4643 * ioctl.
4645 int target_size_in, target_size_out;
4646 struct fiemap *fm;
4647 const argtype *arg_type = ie->arg_type;
4648 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4649 void *argptr, *p;
4650 abi_long ret;
4651 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4652 uint32_t outbufsz;
4653 int free_fm = 0;
4655 assert(arg_type[0] == TYPE_PTR);
4656 assert(ie->access == IOC_RW);
4657 arg_type++;
4658 target_size_in = thunk_type_size(arg_type, 0);
4659 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4660 if (!argptr) {
4661 return -TARGET_EFAULT;
4663 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4664 unlock_user(argptr, arg, 0);
4665 fm = (struct fiemap *)buf_temp;
4666 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4667 return -TARGET_EINVAL;
4670 outbufsz = sizeof (*fm) +
4671 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4673 if (outbufsz > MAX_STRUCT_SIZE) {
4674 /* We can't fit all the extents into the fixed size buffer.
4675 * Allocate one that is large enough and use it instead.
4677 fm = g_try_malloc(outbufsz);
4678 if (!fm) {
4679 return -TARGET_ENOMEM;
4681 memcpy(fm, buf_temp, sizeof(struct fiemap));
4682 free_fm = 1;
4684 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4685 if (!is_error(ret)) {
4686 target_size_out = target_size_in;
4687 /* An extent_count of 0 means we were only counting the extents
4688 * so there are no structs to copy
4690 if (fm->fm_extent_count != 0) {
4691 target_size_out += fm->fm_mapped_extents * extent_size;
4693 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4694 if (!argptr) {
4695 ret = -TARGET_EFAULT;
4696 } else {
4697 /* Convert the struct fiemap */
4698 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4699 if (fm->fm_extent_count != 0) {
4700 p = argptr + target_size_in;
4701 /* ...and then all the struct fiemap_extents */
4702 for (i = 0; i < fm->fm_mapped_extents; i++) {
4703 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4704 THUNK_TARGET);
4705 p += extent_size;
4708 unlock_user(argptr, arg, target_size_out);
4711 if (free_fm) {
4712 g_free(fm);
4714 return ret;
4716 #endif
4718 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4719 int fd, int cmd, abi_long arg)
4721 const argtype *arg_type = ie->arg_type;
4722 int target_size;
4723 void *argptr;
4724 int ret;
4725 struct ifconf *host_ifconf;
4726 uint32_t outbufsz;
4727 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4728 int target_ifreq_size;
4729 int nb_ifreq;
4730 int free_buf = 0;
4731 int i;
4732 int target_ifc_len;
4733 abi_long target_ifc_buf;
4734 int host_ifc_len;
4735 char *host_ifc_buf;
4737 assert(arg_type[0] == TYPE_PTR);
4738 assert(ie->access == IOC_RW);
4740 arg_type++;
4741 target_size = thunk_type_size(arg_type, 0);
4743 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4744 if (!argptr)
4745 return -TARGET_EFAULT;
4746 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4747 unlock_user(argptr, arg, 0);
4749 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4750 target_ifc_len = host_ifconf->ifc_len;
4751 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4753 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4754 nb_ifreq = target_ifc_len / target_ifreq_size;
4755 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4757 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4758 if (outbufsz > MAX_STRUCT_SIZE) {
4759 /* We can't fit all the extents into the fixed size buffer.
4760 * Allocate one that is large enough and use it instead.
4762 host_ifconf = malloc(outbufsz);
4763 if (!host_ifconf) {
4764 return -TARGET_ENOMEM;
4766 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4767 free_buf = 1;
4769 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4771 host_ifconf->ifc_len = host_ifc_len;
4772 host_ifconf->ifc_buf = host_ifc_buf;
4774 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4775 if (!is_error(ret)) {
4776 /* convert host ifc_len to target ifc_len */
4778 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4779 target_ifc_len = nb_ifreq * target_ifreq_size;
4780 host_ifconf->ifc_len = target_ifc_len;
4782 /* restore target ifc_buf */
4784 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4786 /* copy struct ifconf to target user */
4788 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4789 if (!argptr)
4790 return -TARGET_EFAULT;
4791 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4792 unlock_user(argptr, arg, target_size);
4794 /* copy ifreq[] to target user */
4796 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4797 for (i = 0; i < nb_ifreq ; i++) {
4798 thunk_convert(argptr + i * target_ifreq_size,
4799 host_ifc_buf + i * sizeof(struct ifreq),
4800 ifreq_arg_type, THUNK_TARGET);
4802 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4805 if (free_buf) {
4806 free(host_ifconf);
4809 return ret;
4812 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4813 int cmd, abi_long arg)
4815 void *argptr;
4816 struct dm_ioctl *host_dm;
4817 abi_long guest_data;
4818 uint32_t guest_data_size;
4819 int target_size;
4820 const argtype *arg_type = ie->arg_type;
4821 abi_long ret;
4822 void *big_buf = NULL;
4823 char *host_data;
4825 arg_type++;
4826 target_size = thunk_type_size(arg_type, 0);
4827 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4828 if (!argptr) {
4829 ret = -TARGET_EFAULT;
4830 goto out;
4832 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4833 unlock_user(argptr, arg, 0);
4835 /* buf_temp is too small, so fetch things into a bigger buffer */
4836 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4837 memcpy(big_buf, buf_temp, target_size);
4838 buf_temp = big_buf;
4839 host_dm = big_buf;
4841 guest_data = arg + host_dm->data_start;
4842 if ((guest_data - arg) < 0) {
4843 ret = -EINVAL;
4844 goto out;
4846 guest_data_size = host_dm->data_size - host_dm->data_start;
4847 host_data = (char*)host_dm + host_dm->data_start;
4849 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4850 switch (ie->host_cmd) {
4851 case DM_REMOVE_ALL:
4852 case DM_LIST_DEVICES:
4853 case DM_DEV_CREATE:
4854 case DM_DEV_REMOVE:
4855 case DM_DEV_SUSPEND:
4856 case DM_DEV_STATUS:
4857 case DM_DEV_WAIT:
4858 case DM_TABLE_STATUS:
4859 case DM_TABLE_CLEAR:
4860 case DM_TABLE_DEPS:
4861 case DM_LIST_VERSIONS:
4862 /* no input data */
4863 break;
4864 case DM_DEV_RENAME:
4865 case DM_DEV_SET_GEOMETRY:
4866 /* data contains only strings */
4867 memcpy(host_data, argptr, guest_data_size);
4868 break;
4869 case DM_TARGET_MSG:
4870 memcpy(host_data, argptr, guest_data_size);
4871 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4872 break;
4873 case DM_TABLE_LOAD:
4875 void *gspec = argptr;
4876 void *cur_data = host_data;
4877 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4878 int spec_size = thunk_type_size(arg_type, 0);
4879 int i;
4881 for (i = 0; i < host_dm->target_count; i++) {
4882 struct dm_target_spec *spec = cur_data;
4883 uint32_t next;
4884 int slen;
4886 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4887 slen = strlen((char*)gspec + spec_size) + 1;
4888 next = spec->next;
4889 spec->next = sizeof(*spec) + slen;
4890 strcpy((char*)&spec[1], gspec + spec_size);
4891 gspec += next;
4892 cur_data += spec->next;
4894 break;
4896 default:
4897 ret = -TARGET_EINVAL;
4898 unlock_user(argptr, guest_data, 0);
4899 goto out;
4901 unlock_user(argptr, guest_data, 0);
4903 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4904 if (!is_error(ret)) {
4905 guest_data = arg + host_dm->data_start;
4906 guest_data_size = host_dm->data_size - host_dm->data_start;
4907 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4908 switch (ie->host_cmd) {
4909 case DM_REMOVE_ALL:
4910 case DM_DEV_CREATE:
4911 case DM_DEV_REMOVE:
4912 case DM_DEV_RENAME:
4913 case DM_DEV_SUSPEND:
4914 case DM_DEV_STATUS:
4915 case DM_TABLE_LOAD:
4916 case DM_TABLE_CLEAR:
4917 case DM_TARGET_MSG:
4918 case DM_DEV_SET_GEOMETRY:
4919 /* no return data */
4920 break;
4921 case DM_LIST_DEVICES:
4923 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4924 uint32_t remaining_data = guest_data_size;
4925 void *cur_data = argptr;
4926 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4927 int nl_size = 12; /* can't use thunk_size due to alignment */
4929 while (1) {
4930 uint32_t next = nl->next;
4931 if (next) {
4932 nl->next = nl_size + (strlen(nl->name) + 1);
4934 if (remaining_data < nl->next) {
4935 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4936 break;
4938 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4939 strcpy(cur_data + nl_size, nl->name);
4940 cur_data += nl->next;
4941 remaining_data -= nl->next;
4942 if (!next) {
4943 break;
4945 nl = (void*)nl + next;
4947 break;
4949 case DM_DEV_WAIT:
4950 case DM_TABLE_STATUS:
4952 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4953 void *cur_data = argptr;
4954 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4955 int spec_size = thunk_type_size(arg_type, 0);
4956 int i;
4958 for (i = 0; i < host_dm->target_count; i++) {
4959 uint32_t next = spec->next;
4960 int slen = strlen((char*)&spec[1]) + 1;
4961 spec->next = (cur_data - argptr) + spec_size + slen;
4962 if (guest_data_size < spec->next) {
4963 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4964 break;
4966 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4967 strcpy(cur_data + spec_size, (char*)&spec[1]);
4968 cur_data = argptr + spec->next;
4969 spec = (void*)host_dm + host_dm->data_start + next;
4971 break;
4973 case DM_TABLE_DEPS:
4975 void *hdata = (void*)host_dm + host_dm->data_start;
4976 int count = *(uint32_t*)hdata;
4977 uint64_t *hdev = hdata + 8;
4978 uint64_t *gdev = argptr + 8;
4979 int i;
4981 *(uint32_t*)argptr = tswap32(count);
4982 for (i = 0; i < count; i++) {
4983 *gdev = tswap64(*hdev);
4984 gdev++;
4985 hdev++;
4987 break;
4989 case DM_LIST_VERSIONS:
4991 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4992 uint32_t remaining_data = guest_data_size;
4993 void *cur_data = argptr;
4994 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4995 int vers_size = thunk_type_size(arg_type, 0);
4997 while (1) {
4998 uint32_t next = vers->next;
4999 if (next) {
5000 vers->next = vers_size + (strlen(vers->name) + 1);
5002 if (remaining_data < vers->next) {
5003 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5004 break;
5006 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5007 strcpy(cur_data + vers_size, vers->name);
5008 cur_data += vers->next;
5009 remaining_data -= vers->next;
5010 if (!next) {
5011 break;
5013 vers = (void*)vers + next;
5015 break;
5017 default:
5018 unlock_user(argptr, guest_data, 0);
5019 ret = -TARGET_EINVAL;
5020 goto out;
5022 unlock_user(argptr, guest_data, guest_data_size);
5024 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5025 if (!argptr) {
5026 ret = -TARGET_EFAULT;
5027 goto out;
5029 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5030 unlock_user(argptr, arg, target_size);
5032 out:
5033 g_free(big_buf);
5034 return ret;
5037 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5038 int cmd, abi_long arg)
5040 void *argptr;
5041 int target_size;
5042 const argtype *arg_type = ie->arg_type;
5043 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5044 abi_long ret;
5046 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5047 struct blkpg_partition host_part;
5049 /* Read and convert blkpg */
5050 arg_type++;
5051 target_size = thunk_type_size(arg_type, 0);
5052 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5053 if (!argptr) {
5054 ret = -TARGET_EFAULT;
5055 goto out;
5057 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5058 unlock_user(argptr, arg, 0);
5060 switch (host_blkpg->op) {
5061 case BLKPG_ADD_PARTITION:
5062 case BLKPG_DEL_PARTITION:
5063 /* payload is struct blkpg_partition */
5064 break;
5065 default:
5066 /* Unknown opcode */
5067 ret = -TARGET_EINVAL;
5068 goto out;
5071 /* Read and convert blkpg->data */
5072 arg = (abi_long)(uintptr_t)host_blkpg->data;
5073 target_size = thunk_type_size(part_arg_type, 0);
5074 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5075 if (!argptr) {
5076 ret = -TARGET_EFAULT;
5077 goto out;
5079 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5080 unlock_user(argptr, arg, 0);
5082 /* Swizzle the data pointer to our local copy and call! */
5083 host_blkpg->data = &host_part;
5084 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5086 out:
5087 return ret;
5090 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5091 int fd, int cmd, abi_long arg)
5093 const argtype *arg_type = ie->arg_type;
5094 const StructEntry *se;
5095 const argtype *field_types;
5096 const int *dst_offsets, *src_offsets;
5097 int target_size;
5098 void *argptr;
5099 abi_ulong *target_rt_dev_ptr;
5100 unsigned long *host_rt_dev_ptr;
5101 abi_long ret;
5102 int i;
5104 assert(ie->access == IOC_W);
5105 assert(*arg_type == TYPE_PTR);
5106 arg_type++;
5107 assert(*arg_type == TYPE_STRUCT);
5108 target_size = thunk_type_size(arg_type, 0);
5109 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5110 if (!argptr) {
5111 return -TARGET_EFAULT;
5113 arg_type++;
5114 assert(*arg_type == (int)STRUCT_rtentry);
5115 se = struct_entries + *arg_type++;
5116 assert(se->convert[0] == NULL);
5117 /* convert struct here to be able to catch rt_dev string */
5118 field_types = se->field_types;
5119 dst_offsets = se->field_offsets[THUNK_HOST];
5120 src_offsets = se->field_offsets[THUNK_TARGET];
5121 for (i = 0; i < se->nb_fields; i++) {
5122 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5123 assert(*field_types == TYPE_PTRVOID);
5124 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5125 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5126 if (*target_rt_dev_ptr != 0) {
5127 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5128 tswapal(*target_rt_dev_ptr));
5129 if (!*host_rt_dev_ptr) {
5130 unlock_user(argptr, arg, 0);
5131 return -TARGET_EFAULT;
5133 } else {
5134 *host_rt_dev_ptr = 0;
5136 field_types++;
5137 continue;
5139 field_types = thunk_convert(buf_temp + dst_offsets[i],
5140 argptr + src_offsets[i],
5141 field_types, THUNK_HOST);
5143 unlock_user(argptr, arg, 0);
5145 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5146 if (*host_rt_dev_ptr != 0) {
5147 unlock_user((void *)*host_rt_dev_ptr,
5148 *target_rt_dev_ptr, 0);
5150 return ret;
5153 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5154 int fd, int cmd, abi_long arg)
5156 int sig = target_to_host_signal(arg);
5157 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5160 static IOCTLEntry ioctl_entries[] = {
5161 #define IOCTL(cmd, access, ...) \
5162 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5163 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5164 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5165 #include "ioctls.h"
5166 { 0, 0, },
5169 /* ??? Implement proper locking for ioctls. */
5170 /* do_ioctl() Must return target values and target errnos. */
5171 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5173 const IOCTLEntry *ie;
5174 const argtype *arg_type;
5175 abi_long ret;
5176 uint8_t buf_temp[MAX_STRUCT_SIZE];
5177 int target_size;
5178 void *argptr;
5180 ie = ioctl_entries;
5181 for(;;) {
5182 if (ie->target_cmd == 0) {
5183 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5184 return -TARGET_ENOSYS;
5186 if (ie->target_cmd == cmd)
5187 break;
5188 ie++;
5190 arg_type = ie->arg_type;
5191 #if defined(DEBUG)
5192 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5193 #endif
5194 if (ie->do_ioctl) {
5195 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5198 switch(arg_type[0]) {
5199 case TYPE_NULL:
5200 /* no argument */
5201 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5202 break;
5203 case TYPE_PTRVOID:
5204 case TYPE_INT:
5205 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5206 break;
5207 case TYPE_PTR:
5208 arg_type++;
5209 target_size = thunk_type_size(arg_type, 0);
5210 switch(ie->access) {
5211 case IOC_R:
5212 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5213 if (!is_error(ret)) {
5214 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5215 if (!argptr)
5216 return -TARGET_EFAULT;
5217 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5218 unlock_user(argptr, arg, target_size);
5220 break;
5221 case IOC_W:
5222 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5223 if (!argptr)
5224 return -TARGET_EFAULT;
5225 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5226 unlock_user(argptr, arg, 0);
5227 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5228 break;
5229 default:
5230 case IOC_RW:
5231 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5232 if (!argptr)
5233 return -TARGET_EFAULT;
5234 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5235 unlock_user(argptr, arg, 0);
5236 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5237 if (!is_error(ret)) {
5238 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5239 if (!argptr)
5240 return -TARGET_EFAULT;
5241 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5242 unlock_user(argptr, arg, target_size);
5244 break;
5246 break;
5247 default:
5248 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5249 (long)cmd, arg_type[0]);
5250 ret = -TARGET_ENOSYS;
5251 break;
5253 return ret;
5256 static const bitmask_transtbl iflag_tbl[] = {
5257 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5258 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5259 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5260 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5261 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5262 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5263 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5264 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5265 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5266 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5267 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5268 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5269 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5270 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5271 { 0, 0, 0, 0 }
5274 static const bitmask_transtbl oflag_tbl[] = {
5275 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5276 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5277 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5278 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5279 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5280 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5281 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5282 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5283 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5284 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5285 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5286 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5287 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5288 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5289 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5290 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5291 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5292 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5293 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5294 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5295 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5296 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5297 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5298 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5299 { 0, 0, 0, 0 }
5302 static const bitmask_transtbl cflag_tbl[] = {
5303 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5304 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5305 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5306 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5307 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5308 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5309 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5310 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5311 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5312 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5313 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5314 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5315 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5316 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5317 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5318 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5319 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5320 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5321 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5322 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5323 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5324 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5325 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5326 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5327 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5328 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5329 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5330 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5331 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5332 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5333 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5334 { 0, 0, 0, 0 }
5337 static const bitmask_transtbl lflag_tbl[] = {
5338 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5339 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5340 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5341 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5342 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5343 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5344 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5345 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5346 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5347 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5348 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5349 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5350 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5351 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5352 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5353 { 0, 0, 0, 0 }
5356 static void target_to_host_termios (void *dst, const void *src)
5358 struct host_termios *host = dst;
5359 const struct target_termios *target = src;
5361 host->c_iflag =
5362 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5363 host->c_oflag =
5364 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5365 host->c_cflag =
5366 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5367 host->c_lflag =
5368 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5369 host->c_line = target->c_line;
5371 memset(host->c_cc, 0, sizeof(host->c_cc));
5372 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5373 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5374 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5375 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5376 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5377 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5378 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5379 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5380 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5381 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5382 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5383 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5384 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5385 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5386 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5387 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5388 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5391 static void host_to_target_termios (void *dst, const void *src)
5393 struct target_termios *target = dst;
5394 const struct host_termios *host = src;
5396 target->c_iflag =
5397 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5398 target->c_oflag =
5399 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5400 target->c_cflag =
5401 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5402 target->c_lflag =
5403 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5404 target->c_line = host->c_line;
5406 memset(target->c_cc, 0, sizeof(target->c_cc));
5407 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5408 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5409 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5410 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5411 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5412 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5413 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5414 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5415 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5416 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5417 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5418 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5419 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5420 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5421 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5422 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5423 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5426 static const StructEntry struct_termios_def = {
5427 .convert = { host_to_target_termios, target_to_host_termios },
5428 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5429 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5432 static bitmask_transtbl mmap_flags_tbl[] = {
5433 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5434 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5435 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5436 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5437 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5438 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5439 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5440 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5441 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5442 MAP_NORESERVE },
5443 { 0, 0, 0, 0 }
5446 #if defined(TARGET_I386)
5448 /* NOTE: there is really one LDT for all the threads */
5449 static uint8_t *ldt_table;
5451 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5453 int size;
5454 void *p;
5456 if (!ldt_table)
5457 return 0;
5458 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5459 if (size > bytecount)
5460 size = bytecount;
5461 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5462 if (!p)
5463 return -TARGET_EFAULT;
5464 /* ??? Should this by byteswapped? */
5465 memcpy(p, ldt_table, size);
5466 unlock_user(p, ptr, size);
5467 return size;
5470 /* XXX: add locking support */
5471 static abi_long write_ldt(CPUX86State *env,
5472 abi_ulong ptr, unsigned long bytecount, int oldmode)
5474 struct target_modify_ldt_ldt_s ldt_info;
5475 struct target_modify_ldt_ldt_s *target_ldt_info;
5476 int seg_32bit, contents, read_exec_only, limit_in_pages;
5477 int seg_not_present, useable, lm;
5478 uint32_t *lp, entry_1, entry_2;
5480 if (bytecount != sizeof(ldt_info))
5481 return -TARGET_EINVAL;
5482 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5483 return -TARGET_EFAULT;
5484 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5485 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5486 ldt_info.limit = tswap32(target_ldt_info->limit);
5487 ldt_info.flags = tswap32(target_ldt_info->flags);
5488 unlock_user_struct(target_ldt_info, ptr, 0);
5490 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5491 return -TARGET_EINVAL;
5492 seg_32bit = ldt_info.flags & 1;
5493 contents = (ldt_info.flags >> 1) & 3;
5494 read_exec_only = (ldt_info.flags >> 3) & 1;
5495 limit_in_pages = (ldt_info.flags >> 4) & 1;
5496 seg_not_present = (ldt_info.flags >> 5) & 1;
5497 useable = (ldt_info.flags >> 6) & 1;
5498 #ifdef TARGET_ABI32
5499 lm = 0;
5500 #else
5501 lm = (ldt_info.flags >> 7) & 1;
5502 #endif
5503 if (contents == 3) {
5504 if (oldmode)
5505 return -TARGET_EINVAL;
5506 if (seg_not_present == 0)
5507 return -TARGET_EINVAL;
5509 /* allocate the LDT */
5510 if (!ldt_table) {
5511 env->ldt.base = target_mmap(0,
5512 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5513 PROT_READ|PROT_WRITE,
5514 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5515 if (env->ldt.base == -1)
5516 return -TARGET_ENOMEM;
5517 memset(g2h(env->ldt.base), 0,
5518 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5519 env->ldt.limit = 0xffff;
5520 ldt_table = g2h(env->ldt.base);
5523 /* NOTE: same code as Linux kernel */
5524 /* Allow LDTs to be cleared by the user. */
5525 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5526 if (oldmode ||
5527 (contents == 0 &&
5528 read_exec_only == 1 &&
5529 seg_32bit == 0 &&
5530 limit_in_pages == 0 &&
5531 seg_not_present == 1 &&
5532 useable == 0 )) {
5533 entry_1 = 0;
5534 entry_2 = 0;
5535 goto install;
5539 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5540 (ldt_info.limit & 0x0ffff);
5541 entry_2 = (ldt_info.base_addr & 0xff000000) |
5542 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5543 (ldt_info.limit & 0xf0000) |
5544 ((read_exec_only ^ 1) << 9) |
5545 (contents << 10) |
5546 ((seg_not_present ^ 1) << 15) |
5547 (seg_32bit << 22) |
5548 (limit_in_pages << 23) |
5549 (lm << 21) |
5550 0x7000;
5551 if (!oldmode)
5552 entry_2 |= (useable << 20);
5554 /* Install the new entry ... */
5555 install:
5556 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5557 lp[0] = tswap32(entry_1);
5558 lp[1] = tswap32(entry_2);
5559 return 0;
5562 /* specific and weird i386 syscalls */
5563 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5564 unsigned long bytecount)
5566 abi_long ret;
5568 switch (func) {
5569 case 0:
5570 ret = read_ldt(ptr, bytecount);
5571 break;
5572 case 1:
5573 ret = write_ldt(env, ptr, bytecount, 1);
5574 break;
5575 case 0x11:
5576 ret = write_ldt(env, ptr, bytecount, 0);
5577 break;
5578 default:
5579 ret = -TARGET_ENOSYS;
5580 break;
5582 return ret;
5585 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5586 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5588 uint64_t *gdt_table = g2h(env->gdt.base);
5589 struct target_modify_ldt_ldt_s ldt_info;
5590 struct target_modify_ldt_ldt_s *target_ldt_info;
5591 int seg_32bit, contents, read_exec_only, limit_in_pages;
5592 int seg_not_present, useable, lm;
5593 uint32_t *lp, entry_1, entry_2;
5594 int i;
5596 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5597 if (!target_ldt_info)
5598 return -TARGET_EFAULT;
5599 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5600 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5601 ldt_info.limit = tswap32(target_ldt_info->limit);
5602 ldt_info.flags = tswap32(target_ldt_info->flags);
5603 if (ldt_info.entry_number == -1) {
5604 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5605 if (gdt_table[i] == 0) {
5606 ldt_info.entry_number = i;
5607 target_ldt_info->entry_number = tswap32(i);
5608 break;
5612 unlock_user_struct(target_ldt_info, ptr, 1);
5614 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5615 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5616 return -TARGET_EINVAL;
5617 seg_32bit = ldt_info.flags & 1;
5618 contents = (ldt_info.flags >> 1) & 3;
5619 read_exec_only = (ldt_info.flags >> 3) & 1;
5620 limit_in_pages = (ldt_info.flags >> 4) & 1;
5621 seg_not_present = (ldt_info.flags >> 5) & 1;
5622 useable = (ldt_info.flags >> 6) & 1;
5623 #ifdef TARGET_ABI32
5624 lm = 0;
5625 #else
5626 lm = (ldt_info.flags >> 7) & 1;
5627 #endif
5629 if (contents == 3) {
5630 if (seg_not_present == 0)
5631 return -TARGET_EINVAL;
5634 /* NOTE: same code as Linux kernel */
5635 /* Allow LDTs to be cleared by the user. */
5636 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5637 if ((contents == 0 &&
5638 read_exec_only == 1 &&
5639 seg_32bit == 0 &&
5640 limit_in_pages == 0 &&
5641 seg_not_present == 1 &&
5642 useable == 0 )) {
5643 entry_1 = 0;
5644 entry_2 = 0;
5645 goto install;
5649 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5650 (ldt_info.limit & 0x0ffff);
5651 entry_2 = (ldt_info.base_addr & 0xff000000) |
5652 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5653 (ldt_info.limit & 0xf0000) |
5654 ((read_exec_only ^ 1) << 9) |
5655 (contents << 10) |
5656 ((seg_not_present ^ 1) << 15) |
5657 (seg_32bit << 22) |
5658 (limit_in_pages << 23) |
5659 (useable << 20) |
5660 (lm << 21) |
5661 0x7000;
5663 /* Install the new entry ... */
5664 install:
5665 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5666 lp[0] = tswap32(entry_1);
5667 lp[1] = tswap32(entry_2);
5668 return 0;
5671 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5673 struct target_modify_ldt_ldt_s *target_ldt_info;
5674 uint64_t *gdt_table = g2h(env->gdt.base);
5675 uint32_t base_addr, limit, flags;
5676 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5677 int seg_not_present, useable, lm;
5678 uint32_t *lp, entry_1, entry_2;
5680 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5681 if (!target_ldt_info)
5682 return -TARGET_EFAULT;
5683 idx = tswap32(target_ldt_info->entry_number);
5684 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5685 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5686 unlock_user_struct(target_ldt_info, ptr, 1);
5687 return -TARGET_EINVAL;
5689 lp = (uint32_t *)(gdt_table + idx);
5690 entry_1 = tswap32(lp[0]);
5691 entry_2 = tswap32(lp[1]);
5693 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5694 contents = (entry_2 >> 10) & 3;
5695 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5696 seg_32bit = (entry_2 >> 22) & 1;
5697 limit_in_pages = (entry_2 >> 23) & 1;
5698 useable = (entry_2 >> 20) & 1;
5699 #ifdef TARGET_ABI32
5700 lm = 0;
5701 #else
5702 lm = (entry_2 >> 21) & 1;
5703 #endif
5704 flags = (seg_32bit << 0) | (contents << 1) |
5705 (read_exec_only << 3) | (limit_in_pages << 4) |
5706 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5707 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5708 base_addr = (entry_1 >> 16) |
5709 (entry_2 & 0xff000000) |
5710 ((entry_2 & 0xff) << 16);
5711 target_ldt_info->base_addr = tswapal(base_addr);
5712 target_ldt_info->limit = tswap32(limit);
5713 target_ldt_info->flags = tswap32(flags);
5714 unlock_user_struct(target_ldt_info, ptr, 1);
5715 return 0;
5717 #endif /* TARGET_I386 && TARGET_ABI32 */
5719 #ifndef TARGET_ABI32
5720 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5722 abi_long ret = 0;
5723 abi_ulong val;
5724 int idx;
5726 switch(code) {
5727 case TARGET_ARCH_SET_GS:
5728 case TARGET_ARCH_SET_FS:
5729 if (code == TARGET_ARCH_SET_GS)
5730 idx = R_GS;
5731 else
5732 idx = R_FS;
5733 cpu_x86_load_seg(env, idx, 0);
5734 env->segs[idx].base = addr;
5735 break;
5736 case TARGET_ARCH_GET_GS:
5737 case TARGET_ARCH_GET_FS:
5738 if (code == TARGET_ARCH_GET_GS)
5739 idx = R_GS;
5740 else
5741 idx = R_FS;
5742 val = env->segs[idx].base;
5743 if (put_user(val, addr, abi_ulong))
5744 ret = -TARGET_EFAULT;
5745 break;
5746 default:
5747 ret = -TARGET_EINVAL;
5748 break;
5750 return ret;
5752 #endif
5754 #endif /* defined(TARGET_I386) */
5756 #define NEW_STACK_SIZE 0x40000
5759 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5760 typedef struct {
5761 CPUArchState *env;
5762 pthread_mutex_t mutex;
5763 pthread_cond_t cond;
5764 pthread_t thread;
5765 uint32_t tid;
5766 abi_ulong child_tidptr;
5767 abi_ulong parent_tidptr;
5768 sigset_t sigmask;
5769 } new_thread_info;
5771 static void * QEMU_NORETURN clone_func(void *arg)
5773 new_thread_info *info = arg;
5774 CPUArchState *env;
5775 CPUState *cpu;
5776 TaskState *ts;
5778 rcu_register_thread();
5779 env = info->env;
5780 cpu = ENV_GET_CPU(env);
5781 thread_cpu = cpu;
5782 ts = (TaskState *)cpu->opaque;
5783 info->tid = gettid();
5784 cpu->host_tid = info->tid;
5785 task_settid(ts);
5786 if (info->child_tidptr)
5787 put_user_u32(info->tid, info->child_tidptr);
5788 if (info->parent_tidptr)
5789 put_user_u32(info->tid, info->parent_tidptr);
5790 /* Enable signals. */
5791 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5792 /* Signal to the parent that we're ready. */
5793 pthread_mutex_lock(&info->mutex);
5794 pthread_cond_broadcast(&info->cond);
5795 pthread_mutex_unlock(&info->mutex);
5796 /* Wait until the parent has finshed initializing the tls state. */
5797 pthread_mutex_lock(&clone_lock);
5798 pthread_mutex_unlock(&clone_lock);
5799 cpu_loop(env);
5800 /* never exits */
5803 /* do_fork() Must return host values and target errnos (unlike most
5804 do_*() functions). */
5805 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5806 abi_ulong parent_tidptr, target_ulong newtls,
5807 abi_ulong child_tidptr)
5809 CPUState *cpu = ENV_GET_CPU(env);
5810 int ret;
5811 TaskState *ts;
5812 CPUState *new_cpu;
5813 CPUArchState *new_env;
5814 unsigned int nptl_flags;
5815 sigset_t sigmask;
5817 /* Emulate vfork() with fork() */
5818 if (flags & CLONE_VFORK)
5819 flags &= ~(CLONE_VFORK | CLONE_VM);
5821 if (flags & CLONE_VM) {
5822 TaskState *parent_ts = (TaskState *)cpu->opaque;
5823 new_thread_info info;
5824 pthread_attr_t attr;
5826 ts = g_new0(TaskState, 1);
5827 init_task_state(ts);
5828 /* we create a new CPU instance. */
5829 new_env = cpu_copy(env);
5830 /* Init regs that differ from the parent. */
5831 cpu_clone_regs(new_env, newsp);
5832 new_cpu = ENV_GET_CPU(new_env);
5833 new_cpu->opaque = ts;
5834 ts->bprm = parent_ts->bprm;
5835 ts->info = parent_ts->info;
5836 ts->signal_mask = parent_ts->signal_mask;
5837 nptl_flags = flags;
5838 flags &= ~CLONE_NPTL_FLAGS2;
5840 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5841 ts->child_tidptr = child_tidptr;
5844 if (nptl_flags & CLONE_SETTLS)
5845 cpu_set_tls (new_env, newtls);
5847 /* Grab a mutex so that thread setup appears atomic. */
5848 pthread_mutex_lock(&clone_lock);
5850 memset(&info, 0, sizeof(info));
5851 pthread_mutex_init(&info.mutex, NULL);
5852 pthread_mutex_lock(&info.mutex);
5853 pthread_cond_init(&info.cond, NULL);
5854 info.env = new_env;
5855 if (nptl_flags & CLONE_CHILD_SETTID)
5856 info.child_tidptr = child_tidptr;
5857 if (nptl_flags & CLONE_PARENT_SETTID)
5858 info.parent_tidptr = parent_tidptr;
5860 ret = pthread_attr_init(&attr);
5861 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5862 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5863 /* It is not safe to deliver signals until the child has finished
5864 initializing, so temporarily block all signals. */
5865 sigfillset(&sigmask);
5866 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5868 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5869 /* TODO: Free new CPU state if thread creation failed. */
5871 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5872 pthread_attr_destroy(&attr);
5873 if (ret == 0) {
5874 /* Wait for the child to initialize. */
5875 pthread_cond_wait(&info.cond, &info.mutex);
5876 ret = info.tid;
5877 if (flags & CLONE_PARENT_SETTID)
5878 put_user_u32(ret, parent_tidptr);
5879 } else {
5880 ret = -1;
5882 pthread_mutex_unlock(&info.mutex);
5883 pthread_cond_destroy(&info.cond);
5884 pthread_mutex_destroy(&info.mutex);
5885 pthread_mutex_unlock(&clone_lock);
5886 } else {
5887 /* if no CLONE_VM, we consider it is a fork */
5888 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5889 return -TARGET_EINVAL;
5892 if (block_signals()) {
5893 return -TARGET_ERESTARTSYS;
5896 fork_start();
5897 ret = fork();
5898 if (ret == 0) {
5899 /* Child Process. */
5900 rcu_after_fork();
5901 cpu_clone_regs(env, newsp);
5902 fork_end(1);
5903 /* There is a race condition here. The parent process could
5904 theoretically read the TID in the child process before the child
5905 tid is set. This would require using either ptrace
5906 (not implemented) or having *_tidptr to point at a shared memory
5907 mapping. We can't repeat the spinlock hack used above because
5908 the child process gets its own copy of the lock. */
5909 if (flags & CLONE_CHILD_SETTID)
5910 put_user_u32(gettid(), child_tidptr);
5911 if (flags & CLONE_PARENT_SETTID)
5912 put_user_u32(gettid(), parent_tidptr);
5913 ts = (TaskState *)cpu->opaque;
5914 if (flags & CLONE_SETTLS)
5915 cpu_set_tls (env, newtls);
5916 if (flags & CLONE_CHILD_CLEARTID)
5917 ts->child_tidptr = child_tidptr;
5918 } else {
5919 fork_end(0);
5922 return ret;
5925 /* warning : doesn't handle linux specific flags... */
5926 static int target_to_host_fcntl_cmd(int cmd)
5928 switch(cmd) {
5929 case TARGET_F_DUPFD:
5930 case TARGET_F_GETFD:
5931 case TARGET_F_SETFD:
5932 case TARGET_F_GETFL:
5933 case TARGET_F_SETFL:
5934 return cmd;
5935 case TARGET_F_GETLK:
5936 return F_GETLK64;
5937 case TARGET_F_SETLK:
5938 return F_SETLK64;
5939 case TARGET_F_SETLKW:
5940 return F_SETLKW64;
5941 case TARGET_F_GETOWN:
5942 return F_GETOWN;
5943 case TARGET_F_SETOWN:
5944 return F_SETOWN;
5945 case TARGET_F_GETSIG:
5946 return F_GETSIG;
5947 case TARGET_F_SETSIG:
5948 return F_SETSIG;
5949 #if TARGET_ABI_BITS == 32
5950 case TARGET_F_GETLK64:
5951 return F_GETLK64;
5952 case TARGET_F_SETLK64:
5953 return F_SETLK64;
5954 case TARGET_F_SETLKW64:
5955 return F_SETLKW64;
5956 #endif
5957 case TARGET_F_SETLEASE:
5958 return F_SETLEASE;
5959 case TARGET_F_GETLEASE:
5960 return F_GETLEASE;
5961 #ifdef F_DUPFD_CLOEXEC
5962 case TARGET_F_DUPFD_CLOEXEC:
5963 return F_DUPFD_CLOEXEC;
5964 #endif
5965 case TARGET_F_NOTIFY:
5966 return F_NOTIFY;
5967 #ifdef F_GETOWN_EX
5968 case TARGET_F_GETOWN_EX:
5969 return F_GETOWN_EX;
5970 #endif
5971 #ifdef F_SETOWN_EX
5972 case TARGET_F_SETOWN_EX:
5973 return F_SETOWN_EX;
5974 #endif
5975 #ifdef F_SETPIPE_SZ
5976 case TARGET_F_SETPIPE_SZ:
5977 return F_SETPIPE_SZ;
5978 case TARGET_F_GETPIPE_SZ:
5979 return F_GETPIPE_SZ;
5980 #endif
5981 default:
5982 return -TARGET_EINVAL;
5984 return -TARGET_EINVAL;
5987 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5988 static const bitmask_transtbl flock_tbl[] = {
5989 TRANSTBL_CONVERT(F_RDLCK),
5990 TRANSTBL_CONVERT(F_WRLCK),
5991 TRANSTBL_CONVERT(F_UNLCK),
5992 TRANSTBL_CONVERT(F_EXLCK),
5993 TRANSTBL_CONVERT(F_SHLCK),
5994 { 0, 0, 0, 0 }
5997 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5998 abi_ulong target_flock_addr)
6000 struct target_flock *target_fl;
6001 short l_type;
6003 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6004 return -TARGET_EFAULT;
6007 __get_user(l_type, &target_fl->l_type);
6008 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6009 __get_user(fl->l_whence, &target_fl->l_whence);
6010 __get_user(fl->l_start, &target_fl->l_start);
6011 __get_user(fl->l_len, &target_fl->l_len);
6012 __get_user(fl->l_pid, &target_fl->l_pid);
6013 unlock_user_struct(target_fl, target_flock_addr, 0);
6014 return 0;
6017 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6018 const struct flock64 *fl)
6020 struct target_flock *target_fl;
6021 short l_type;
6023 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6024 return -TARGET_EFAULT;
6027 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6028 __put_user(l_type, &target_fl->l_type);
6029 __put_user(fl->l_whence, &target_fl->l_whence);
6030 __put_user(fl->l_start, &target_fl->l_start);
6031 __put_user(fl->l_len, &target_fl->l_len);
6032 __put_user(fl->l_pid, &target_fl->l_pid);
6033 unlock_user_struct(target_fl, target_flock_addr, 1);
6034 return 0;
6037 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6038 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6040 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6041 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6042 abi_ulong target_flock_addr)
6044 struct target_eabi_flock64 *target_fl;
6045 short l_type;
6047 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6048 return -TARGET_EFAULT;
6051 __get_user(l_type, &target_fl->l_type);
6052 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6053 __get_user(fl->l_whence, &target_fl->l_whence);
6054 __get_user(fl->l_start, &target_fl->l_start);
6055 __get_user(fl->l_len, &target_fl->l_len);
6056 __get_user(fl->l_pid, &target_fl->l_pid);
6057 unlock_user_struct(target_fl, target_flock_addr, 0);
6058 return 0;
6061 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6062 const struct flock64 *fl)
6064 struct target_eabi_flock64 *target_fl;
6065 short l_type;
6067 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6068 return -TARGET_EFAULT;
6071 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6072 __put_user(l_type, &target_fl->l_type);
6073 __put_user(fl->l_whence, &target_fl->l_whence);
6074 __put_user(fl->l_start, &target_fl->l_start);
6075 __put_user(fl->l_len, &target_fl->l_len);
6076 __put_user(fl->l_pid, &target_fl->l_pid);
6077 unlock_user_struct(target_fl, target_flock_addr, 1);
6078 return 0;
6080 #endif
6082 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6083 abi_ulong target_flock_addr)
6085 struct target_flock64 *target_fl;
6086 short l_type;
6088 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6089 return -TARGET_EFAULT;
6092 __get_user(l_type, &target_fl->l_type);
6093 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6094 __get_user(fl->l_whence, &target_fl->l_whence);
6095 __get_user(fl->l_start, &target_fl->l_start);
6096 __get_user(fl->l_len, &target_fl->l_len);
6097 __get_user(fl->l_pid, &target_fl->l_pid);
6098 unlock_user_struct(target_fl, target_flock_addr, 0);
6099 return 0;
6102 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6103 const struct flock64 *fl)
6105 struct target_flock64 *target_fl;
6106 short l_type;
6108 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6109 return -TARGET_EFAULT;
6112 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6113 __put_user(l_type, &target_fl->l_type);
6114 __put_user(fl->l_whence, &target_fl->l_whence);
6115 __put_user(fl->l_start, &target_fl->l_start);
6116 __put_user(fl->l_len, &target_fl->l_len);
6117 __put_user(fl->l_pid, &target_fl->l_pid);
6118 unlock_user_struct(target_fl, target_flock_addr, 1);
6119 return 0;
6122 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6124 struct flock64 fl64;
6125 #ifdef F_GETOWN_EX
6126 struct f_owner_ex fox;
6127 struct target_f_owner_ex *target_fox;
6128 #endif
6129 abi_long ret;
6130 int host_cmd = target_to_host_fcntl_cmd(cmd);
6132 if (host_cmd == -TARGET_EINVAL)
6133 return host_cmd;
6135 switch(cmd) {
6136 case TARGET_F_GETLK:
6137 ret = copy_from_user_flock(&fl64, arg);
6138 if (ret) {
6139 return ret;
6141 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6142 if (ret == 0) {
6143 ret = copy_to_user_flock(arg, &fl64);
6145 break;
6147 case TARGET_F_SETLK:
6148 case TARGET_F_SETLKW:
6149 ret = copy_from_user_flock(&fl64, arg);
6150 if (ret) {
6151 return ret;
6153 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6154 break;
6156 case TARGET_F_GETLK64:
6157 ret = copy_from_user_flock64(&fl64, arg);
6158 if (ret) {
6159 return ret;
6161 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6162 if (ret == 0) {
6163 ret = copy_to_user_flock64(arg, &fl64);
6165 break;
6166 case TARGET_F_SETLK64:
6167 case TARGET_F_SETLKW64:
6168 ret = copy_from_user_flock64(&fl64, arg);
6169 if (ret) {
6170 return ret;
6172 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6173 break;
6175 case TARGET_F_GETFL:
6176 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6177 if (ret >= 0) {
6178 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6180 break;
6182 case TARGET_F_SETFL:
6183 ret = get_errno(safe_fcntl(fd, host_cmd,
6184 target_to_host_bitmask(arg,
6185 fcntl_flags_tbl)));
6186 break;
6188 #ifdef F_GETOWN_EX
6189 case TARGET_F_GETOWN_EX:
6190 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6191 if (ret >= 0) {
6192 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6193 return -TARGET_EFAULT;
6194 target_fox->type = tswap32(fox.type);
6195 target_fox->pid = tswap32(fox.pid);
6196 unlock_user_struct(target_fox, arg, 1);
6198 break;
6199 #endif
6201 #ifdef F_SETOWN_EX
6202 case TARGET_F_SETOWN_EX:
6203 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6204 return -TARGET_EFAULT;
6205 fox.type = tswap32(target_fox->type);
6206 fox.pid = tswap32(target_fox->pid);
6207 unlock_user_struct(target_fox, arg, 0);
6208 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6209 break;
6210 #endif
6212 case TARGET_F_SETOWN:
6213 case TARGET_F_GETOWN:
6214 case TARGET_F_SETSIG:
6215 case TARGET_F_GETSIG:
6216 case TARGET_F_SETLEASE:
6217 case TARGET_F_GETLEASE:
6218 case TARGET_F_SETPIPE_SZ:
6219 case TARGET_F_GETPIPE_SZ:
6220 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6221 break;
6223 default:
6224 ret = get_errno(safe_fcntl(fd, cmd, arg));
6225 break;
6227 return ret;
6230 #ifdef USE_UID16
6232 static inline int high2lowuid(int uid)
6234 if (uid > 65535)
6235 return 65534;
6236 else
6237 return uid;
6240 static inline int high2lowgid(int gid)
6242 if (gid > 65535)
6243 return 65534;
6244 else
6245 return gid;
6248 static inline int low2highuid(int uid)
6250 if ((int16_t)uid == -1)
6251 return -1;
6252 else
6253 return uid;
6256 static inline int low2highgid(int gid)
6258 if ((int16_t)gid == -1)
6259 return -1;
6260 else
6261 return gid;
6263 static inline int tswapid(int id)
6265 return tswap16(id);
6268 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6270 #else /* !USE_UID16 */
6271 static inline int high2lowuid(int uid)
6273 return uid;
6275 static inline int high2lowgid(int gid)
6277 return gid;
6279 static inline int low2highuid(int uid)
6281 return uid;
6283 static inline int low2highgid(int gid)
6285 return gid;
6287 static inline int tswapid(int id)
6289 return tswap32(id);
6292 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6294 #endif /* USE_UID16 */
6296 /* We must do direct syscalls for setting UID/GID, because we want to
6297 * implement the Linux system call semantics of "change only for this thread",
6298 * not the libc/POSIX semantics of "change for all threads in process".
6299 * (See http://ewontfix.com/17/ for more details.)
6300 * We use the 32-bit version of the syscalls if present; if it is not
6301 * then either the host architecture supports 32-bit UIDs natively with
6302 * the standard syscall, or the 16-bit UID is the best we can do.
6304 #ifdef __NR_setuid32
6305 #define __NR_sys_setuid __NR_setuid32
6306 #else
6307 #define __NR_sys_setuid __NR_setuid
6308 #endif
6309 #ifdef __NR_setgid32
6310 #define __NR_sys_setgid __NR_setgid32
6311 #else
6312 #define __NR_sys_setgid __NR_setgid
6313 #endif
6314 #ifdef __NR_setresuid32
6315 #define __NR_sys_setresuid __NR_setresuid32
6316 #else
6317 #define __NR_sys_setresuid __NR_setresuid
6318 #endif
6319 #ifdef __NR_setresgid32
6320 #define __NR_sys_setresgid __NR_setresgid32
6321 #else
6322 #define __NR_sys_setresgid __NR_setresgid
6323 #endif
6325 _syscall1(int, sys_setuid, uid_t, uid)
6326 _syscall1(int, sys_setgid, gid_t, gid)
6327 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6328 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6330 void syscall_init(void)
6332 IOCTLEntry *ie;
6333 const argtype *arg_type;
6334 int size;
6335 int i;
6337 thunk_init(STRUCT_MAX);
6339 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6340 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6341 #include "syscall_types.h"
6342 #undef STRUCT
6343 #undef STRUCT_SPECIAL
6345 /* Build target_to_host_errno_table[] table from
6346 * host_to_target_errno_table[]. */
6347 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6348 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6351 /* we patch the ioctl size if necessary. We rely on the fact that
6352 no ioctl has all the bits at '1' in the size field */
6353 ie = ioctl_entries;
6354 while (ie->target_cmd != 0) {
6355 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6356 TARGET_IOC_SIZEMASK) {
6357 arg_type = ie->arg_type;
6358 if (arg_type[0] != TYPE_PTR) {
6359 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6360 ie->target_cmd);
6361 exit(1);
6363 arg_type++;
6364 size = thunk_type_size(arg_type, 0);
6365 ie->target_cmd = (ie->target_cmd &
6366 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6367 (size << TARGET_IOC_SIZESHIFT);
6370 /* automatic consistency check if same arch */
6371 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6372 (defined(__x86_64__) && defined(TARGET_X86_64))
6373 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6374 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6375 ie->name, ie->target_cmd, ie->host_cmd);
6377 #endif
6378 ie++;
6382 #if TARGET_ABI_BITS == 32
6383 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6385 #ifdef TARGET_WORDS_BIGENDIAN
6386 return ((uint64_t)word0 << 32) | word1;
6387 #else
6388 return ((uint64_t)word1 << 32) | word0;
6389 #endif
6391 #else /* TARGET_ABI_BITS == 32 */
6392 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6394 return word0;
6396 #endif /* TARGET_ABI_BITS != 32 */
6398 #ifdef TARGET_NR_truncate64
6399 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6400 abi_long arg2,
6401 abi_long arg3,
6402 abi_long arg4)
6404 if (regpairs_aligned(cpu_env)) {
6405 arg2 = arg3;
6406 arg3 = arg4;
6408 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6410 #endif
6412 #ifdef TARGET_NR_ftruncate64
6413 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6414 abi_long arg2,
6415 abi_long arg3,
6416 abi_long arg4)
6418 if (regpairs_aligned(cpu_env)) {
6419 arg2 = arg3;
6420 arg3 = arg4;
6422 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6424 #endif
6426 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6427 abi_ulong target_addr)
6429 struct target_timespec *target_ts;
6431 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6432 return -TARGET_EFAULT;
6433 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6434 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6435 unlock_user_struct(target_ts, target_addr, 0);
6436 return 0;
6439 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6440 struct timespec *host_ts)
6442 struct target_timespec *target_ts;
6444 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6445 return -TARGET_EFAULT;
6446 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6447 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6448 unlock_user_struct(target_ts, target_addr, 1);
6449 return 0;
6452 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6453 abi_ulong target_addr)
6455 struct target_itimerspec *target_itspec;
6457 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6458 return -TARGET_EFAULT;
6461 host_itspec->it_interval.tv_sec =
6462 tswapal(target_itspec->it_interval.tv_sec);
6463 host_itspec->it_interval.tv_nsec =
6464 tswapal(target_itspec->it_interval.tv_nsec);
6465 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6466 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6468 unlock_user_struct(target_itspec, target_addr, 1);
6469 return 0;
6472 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6473 struct itimerspec *host_its)
6475 struct target_itimerspec *target_itspec;
6477 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6478 return -TARGET_EFAULT;
6481 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6482 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6484 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6485 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6487 unlock_user_struct(target_itspec, target_addr, 0);
6488 return 0;
6491 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6492 abi_ulong target_addr)
6494 struct target_sigevent *target_sevp;
6496 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6497 return -TARGET_EFAULT;
6500 /* This union is awkward on 64 bit systems because it has a 32 bit
6501 * integer and a pointer in it; we follow the conversion approach
6502 * used for handling sigval types in signal.c so the guest should get
6503 * the correct value back even if we did a 64 bit byteswap and it's
6504 * using the 32 bit integer.
6506 host_sevp->sigev_value.sival_ptr =
6507 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6508 host_sevp->sigev_signo =
6509 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6510 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6511 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6513 unlock_user_struct(target_sevp, target_addr, 1);
6514 return 0;
6517 #if defined(TARGET_NR_mlockall)
6518 static inline int target_to_host_mlockall_arg(int arg)
6520 int result = 0;
6522 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6523 result |= MCL_CURRENT;
6525 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6526 result |= MCL_FUTURE;
6528 return result;
6530 #endif
6532 static inline abi_long host_to_target_stat64(void *cpu_env,
6533 abi_ulong target_addr,
6534 struct stat *host_st)
6536 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6537 if (((CPUARMState *)cpu_env)->eabi) {
6538 struct target_eabi_stat64 *target_st;
6540 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6541 return -TARGET_EFAULT;
6542 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6543 __put_user(host_st->st_dev, &target_st->st_dev);
6544 __put_user(host_st->st_ino, &target_st->st_ino);
6545 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6546 __put_user(host_st->st_ino, &target_st->__st_ino);
6547 #endif
6548 __put_user(host_st->st_mode, &target_st->st_mode);
6549 __put_user(host_st->st_nlink, &target_st->st_nlink);
6550 __put_user(host_st->st_uid, &target_st->st_uid);
6551 __put_user(host_st->st_gid, &target_st->st_gid);
6552 __put_user(host_st->st_rdev, &target_st->st_rdev);
6553 __put_user(host_st->st_size, &target_st->st_size);
6554 __put_user(host_st->st_blksize, &target_st->st_blksize);
6555 __put_user(host_st->st_blocks, &target_st->st_blocks);
6556 __put_user(host_st->st_atime, &target_st->target_st_atime);
6557 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6558 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6559 unlock_user_struct(target_st, target_addr, 1);
6560 } else
6561 #endif
6563 #if defined(TARGET_HAS_STRUCT_STAT64)
6564 struct target_stat64 *target_st;
6565 #else
6566 struct target_stat *target_st;
6567 #endif
6569 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6570 return -TARGET_EFAULT;
6571 memset(target_st, 0, sizeof(*target_st));
6572 __put_user(host_st->st_dev, &target_st->st_dev);
6573 __put_user(host_st->st_ino, &target_st->st_ino);
6574 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6575 __put_user(host_st->st_ino, &target_st->__st_ino);
6576 #endif
6577 __put_user(host_st->st_mode, &target_st->st_mode);
6578 __put_user(host_st->st_nlink, &target_st->st_nlink);
6579 __put_user(host_st->st_uid, &target_st->st_uid);
6580 __put_user(host_st->st_gid, &target_st->st_gid);
6581 __put_user(host_st->st_rdev, &target_st->st_rdev);
6582 /* XXX: better use of kernel struct */
6583 __put_user(host_st->st_size, &target_st->st_size);
6584 __put_user(host_st->st_blksize, &target_st->st_blksize);
6585 __put_user(host_st->st_blocks, &target_st->st_blocks);
6586 __put_user(host_st->st_atime, &target_st->target_st_atime);
6587 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6588 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6589 unlock_user_struct(target_st, target_addr, 1);
6592 return 0;
6595 /* ??? Using host futex calls even when target atomic operations
6596 are not really atomic probably breaks things. However implementing
6597 futexes locally would make futexes shared between multiple processes
6598 tricky. However they're probably useless because guest atomic
6599 operations won't work either. */
6600 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6601 target_ulong uaddr2, int val3)
6603 struct timespec ts, *pts;
6604 int base_op;
6606 /* ??? We assume FUTEX_* constants are the same on both host
6607 and target. */
6608 #ifdef FUTEX_CMD_MASK
6609 base_op = op & FUTEX_CMD_MASK;
6610 #else
6611 base_op = op;
6612 #endif
6613 switch (base_op) {
6614 case FUTEX_WAIT:
6615 case FUTEX_WAIT_BITSET:
6616 if (timeout) {
6617 pts = &ts;
6618 target_to_host_timespec(pts, timeout);
6619 } else {
6620 pts = NULL;
6622 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6623 pts, NULL, val3));
6624 case FUTEX_WAKE:
6625 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6626 case FUTEX_FD:
6627 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6628 case FUTEX_REQUEUE:
6629 case FUTEX_CMP_REQUEUE:
6630 case FUTEX_WAKE_OP:
6631 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6632 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6633 But the prototype takes a `struct timespec *'; insert casts
6634 to satisfy the compiler. We do not need to tswap TIMEOUT
6635 since it's not compared to guest memory. */
6636 pts = (struct timespec *)(uintptr_t) timeout;
6637 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6638 g2h(uaddr2),
6639 (base_op == FUTEX_CMP_REQUEUE
6640 ? tswap32(val3)
6641 : val3)));
6642 default:
6643 return -TARGET_ENOSYS;
6646 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6647 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6648 abi_long handle, abi_long mount_id,
6649 abi_long flags)
6651 struct file_handle *target_fh;
6652 struct file_handle *fh;
6653 int mid = 0;
6654 abi_long ret;
6655 char *name;
6656 unsigned int size, total_size;
6658 if (get_user_s32(size, handle)) {
6659 return -TARGET_EFAULT;
6662 name = lock_user_string(pathname);
6663 if (!name) {
6664 return -TARGET_EFAULT;
6667 total_size = sizeof(struct file_handle) + size;
6668 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6669 if (!target_fh) {
6670 unlock_user(name, pathname, 0);
6671 return -TARGET_EFAULT;
6674 fh = g_malloc0(total_size);
6675 fh->handle_bytes = size;
6677 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6678 unlock_user(name, pathname, 0);
6680 /* man name_to_handle_at(2):
6681 * Other than the use of the handle_bytes field, the caller should treat
6682 * the file_handle structure as an opaque data type
6685 memcpy(target_fh, fh, total_size);
6686 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6687 target_fh->handle_type = tswap32(fh->handle_type);
6688 g_free(fh);
6689 unlock_user(target_fh, handle, total_size);
6691 if (put_user_s32(mid, mount_id)) {
6692 return -TARGET_EFAULT;
6695 return ret;
6698 #endif
6700 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6701 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6702 abi_long flags)
6704 struct file_handle *target_fh;
6705 struct file_handle *fh;
6706 unsigned int size, total_size;
6707 abi_long ret;
6709 if (get_user_s32(size, handle)) {
6710 return -TARGET_EFAULT;
6713 total_size = sizeof(struct file_handle) + size;
6714 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6715 if (!target_fh) {
6716 return -TARGET_EFAULT;
6719 fh = g_memdup(target_fh, total_size);
6720 fh->handle_bytes = size;
6721 fh->handle_type = tswap32(target_fh->handle_type);
6723 ret = get_errno(open_by_handle_at(mount_fd, fh,
6724 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6726 g_free(fh);
6728 unlock_user(target_fh, handle, total_size);
6730 return ret;
6732 #endif
6734 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6736 /* signalfd siginfo conversion */
6738 static void
6739 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6740 const struct signalfd_siginfo *info)
6742 int sig = host_to_target_signal(info->ssi_signo);
6744 /* linux/signalfd.h defines a ssi_addr_lsb
6745 * not defined in sys/signalfd.h but used by some kernels
6748 #ifdef BUS_MCEERR_AO
6749 if (tinfo->ssi_signo == SIGBUS &&
6750 (tinfo->ssi_code == BUS_MCEERR_AR ||
6751 tinfo->ssi_code == BUS_MCEERR_AO)) {
6752 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6753 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6754 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6756 #endif
6758 tinfo->ssi_signo = tswap32(sig);
6759 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6760 tinfo->ssi_code = tswap32(info->ssi_code);
6761 tinfo->ssi_pid = tswap32(info->ssi_pid);
6762 tinfo->ssi_uid = tswap32(info->ssi_uid);
6763 tinfo->ssi_fd = tswap32(info->ssi_fd);
6764 tinfo->ssi_tid = tswap32(info->ssi_tid);
6765 tinfo->ssi_band = tswap32(info->ssi_band);
6766 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6767 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6768 tinfo->ssi_status = tswap32(info->ssi_status);
6769 tinfo->ssi_int = tswap32(info->ssi_int);
6770 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6771 tinfo->ssi_utime = tswap64(info->ssi_utime);
6772 tinfo->ssi_stime = tswap64(info->ssi_stime);
6773 tinfo->ssi_addr = tswap64(info->ssi_addr);
6776 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6778 int i;
6780 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6781 host_to_target_signalfd_siginfo(buf + i, buf + i);
6784 return len;
6787 static TargetFdTrans target_signalfd_trans = {
6788 .host_to_target_data = host_to_target_data_signalfd,
6791 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6793 int host_flags;
6794 target_sigset_t *target_mask;
6795 sigset_t host_mask;
6796 abi_long ret;
6798 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6799 return -TARGET_EINVAL;
6801 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6802 return -TARGET_EFAULT;
6805 target_to_host_sigset(&host_mask, target_mask);
6807 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6809 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6810 if (ret >= 0) {
6811 fd_trans_register(ret, &target_signalfd_trans);
6814 unlock_user_struct(target_mask, mask, 0);
6816 return ret;
6818 #endif
6820 /* Map host to target signal numbers for the wait family of syscalls.
6821 Assume all other status bits are the same. */
6822 int host_to_target_waitstatus(int status)
6824 if (WIFSIGNALED(status)) {
6825 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6827 if (WIFSTOPPED(status)) {
6828 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6829 | (status & 0xff);
6831 return status;
6834 static int open_self_cmdline(void *cpu_env, int fd)
6836 int fd_orig = -1;
6837 bool word_skipped = false;
6839 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6840 if (fd_orig < 0) {
6841 return fd_orig;
6844 while (true) {
6845 ssize_t nb_read;
6846 char buf[128];
6847 char *cp_buf = buf;
6849 nb_read = read(fd_orig, buf, sizeof(buf));
6850 if (nb_read < 0) {
6851 int e = errno;
6852 fd_orig = close(fd_orig);
6853 errno = e;
6854 return -1;
6855 } else if (nb_read == 0) {
6856 break;
6859 if (!word_skipped) {
6860 /* Skip the first string, which is the path to qemu-*-static
6861 instead of the actual command. */
6862 cp_buf = memchr(buf, 0, nb_read);
6863 if (cp_buf) {
6864 /* Null byte found, skip one string */
6865 cp_buf++;
6866 nb_read -= cp_buf - buf;
6867 word_skipped = true;
6871 if (word_skipped) {
6872 if (write(fd, cp_buf, nb_read) != nb_read) {
6873 int e = errno;
6874 close(fd_orig);
6875 errno = e;
6876 return -1;
6881 return close(fd_orig);
6884 static int open_self_maps(void *cpu_env, int fd)
6886 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6887 TaskState *ts = cpu->opaque;
6888 FILE *fp;
6889 char *line = NULL;
6890 size_t len = 0;
6891 ssize_t read;
6893 fp = fopen("/proc/self/maps", "r");
6894 if (fp == NULL) {
6895 return -1;
6898 while ((read = getline(&line, &len, fp)) != -1) {
6899 int fields, dev_maj, dev_min, inode;
6900 uint64_t min, max, offset;
6901 char flag_r, flag_w, flag_x, flag_p;
6902 char path[512] = "";
6903 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6904 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6905 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6907 if ((fields < 10) || (fields > 11)) {
6908 continue;
6910 if (h2g_valid(min)) {
6911 int flags = page_get_flags(h2g(min));
6912 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6913 if (page_check_range(h2g(min), max - min, flags) == -1) {
6914 continue;
6916 if (h2g(min) == ts->info->stack_limit) {
6917 pstrcpy(path, sizeof(path), " [stack]");
6919 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6920 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6921 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6922 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6923 path[0] ? " " : "", path);
6927 free(line);
6928 fclose(fp);
6930 return 0;
6933 static int open_self_stat(void *cpu_env, int fd)
6935 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6936 TaskState *ts = cpu->opaque;
6937 abi_ulong start_stack = ts->info->start_stack;
6938 int i;
6940 for (i = 0; i < 44; i++) {
6941 char buf[128];
6942 int len;
6943 uint64_t val = 0;
6945 if (i == 0) {
6946 /* pid */
6947 val = getpid();
6948 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6949 } else if (i == 1) {
6950 /* app name */
6951 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6952 } else if (i == 27) {
6953 /* stack bottom */
6954 val = start_stack;
6955 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6956 } else {
6957 /* for the rest, there is MasterCard */
6958 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6961 len = strlen(buf);
6962 if (write(fd, buf, len) != len) {
6963 return -1;
6967 return 0;
6970 static int open_self_auxv(void *cpu_env, int fd)
6972 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6973 TaskState *ts = cpu->opaque;
6974 abi_ulong auxv = ts->info->saved_auxv;
6975 abi_ulong len = ts->info->auxv_len;
6976 char *ptr;
6979 * Auxiliary vector is stored in target process stack.
6980 * read in whole auxv vector and copy it to file
6982 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6983 if (ptr != NULL) {
6984 while (len > 0) {
6985 ssize_t r;
6986 r = write(fd, ptr, len);
6987 if (r <= 0) {
6988 break;
6990 len -= r;
6991 ptr += r;
6993 lseek(fd, 0, SEEK_SET);
6994 unlock_user(ptr, auxv, len);
6997 return 0;
7000 static int is_proc_myself(const char *filename, const char *entry)
7002 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7003 filename += strlen("/proc/");
7004 if (!strncmp(filename, "self/", strlen("self/"))) {
7005 filename += strlen("self/");
7006 } else if (*filename >= '1' && *filename <= '9') {
7007 char myself[80];
7008 snprintf(myself, sizeof(myself), "%d/", getpid());
7009 if (!strncmp(filename, myself, strlen(myself))) {
7010 filename += strlen(myself);
7011 } else {
7012 return 0;
7014 } else {
7015 return 0;
7017 if (!strcmp(filename, entry)) {
7018 return 1;
7021 return 0;
7024 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7025 static int is_proc(const char *filename, const char *entry)
7027 return strcmp(filename, entry) == 0;
7030 static int open_net_route(void *cpu_env, int fd)
7032 FILE *fp;
7033 char *line = NULL;
7034 size_t len = 0;
7035 ssize_t read;
7037 fp = fopen("/proc/net/route", "r");
7038 if (fp == NULL) {
7039 return -1;
7042 /* read header */
7044 read = getline(&line, &len, fp);
7045 dprintf(fd, "%s", line);
7047 /* read routes */
7049 while ((read = getline(&line, &len, fp)) != -1) {
7050 char iface[16];
7051 uint32_t dest, gw, mask;
7052 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7053 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7054 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7055 &mask, &mtu, &window, &irtt);
7056 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7057 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7058 metric, tswap32(mask), mtu, window, irtt);
7061 free(line);
7062 fclose(fp);
7064 return 0;
7066 #endif
7068 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7070 struct fake_open {
7071 const char *filename;
7072 int (*fill)(void *cpu_env, int fd);
7073 int (*cmp)(const char *s1, const char *s2);
7075 const struct fake_open *fake_open;
7076 static const struct fake_open fakes[] = {
7077 { "maps", open_self_maps, is_proc_myself },
7078 { "stat", open_self_stat, is_proc_myself },
7079 { "auxv", open_self_auxv, is_proc_myself },
7080 { "cmdline", open_self_cmdline, is_proc_myself },
7081 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7082 { "/proc/net/route", open_net_route, is_proc },
7083 #endif
7084 { NULL, NULL, NULL }
7087 if (is_proc_myself(pathname, "exe")) {
7088 int execfd = qemu_getauxval(AT_EXECFD);
7089 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7092 for (fake_open = fakes; fake_open->filename; fake_open++) {
7093 if (fake_open->cmp(pathname, fake_open->filename)) {
7094 break;
7098 if (fake_open->filename) {
7099 const char *tmpdir;
7100 char filename[PATH_MAX];
7101 int fd, r;
7103 /* create temporary file to map stat to */
7104 tmpdir = getenv("TMPDIR");
7105 if (!tmpdir)
7106 tmpdir = "/tmp";
7107 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7108 fd = mkstemp(filename);
7109 if (fd < 0) {
7110 return fd;
7112 unlink(filename);
7114 if ((r = fake_open->fill(cpu_env, fd))) {
7115 int e = errno;
7116 close(fd);
7117 errno = e;
7118 return r;
7120 lseek(fd, 0, SEEK_SET);
7122 return fd;
7125 return safe_openat(dirfd, path(pathname), flags, mode);
7128 #define TIMER_MAGIC 0x0caf0000
7129 #define TIMER_MAGIC_MASK 0xffff0000
7131 /* Convert QEMU provided timer ID back to internal 16bit index format */
7132 static target_timer_t get_timer_id(abi_long arg)
7134 target_timer_t timerid = arg;
7136 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7137 return -TARGET_EINVAL;
7140 timerid &= 0xffff;
7142 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7143 return -TARGET_EINVAL;
7146 return timerid;
7149 /* do_syscall() should always have a single exit point at the end so
7150 that actions, such as logging of syscall results, can be performed.
7151 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7152 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7153 abi_long arg2, abi_long arg3, abi_long arg4,
7154 abi_long arg5, abi_long arg6, abi_long arg7,
7155 abi_long arg8)
7157 CPUState *cpu = ENV_GET_CPU(cpu_env);
7158 abi_long ret;
7159 struct stat st;
7160 struct statfs stfs;
7161 void *p;
7163 #if defined(DEBUG_ERESTARTSYS)
7164 /* Debug-only code for exercising the syscall-restart code paths
7165 * in the per-architecture cpu main loops: restart every syscall
7166 * the guest makes once before letting it through.
7169 static int flag;
7171 flag = !flag;
7172 if (flag) {
7173 return -TARGET_ERESTARTSYS;
7176 #endif
7178 #ifdef DEBUG
7179 gemu_log("syscall %d", num);
7180 #endif
7181 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7182 if(do_strace)
7183 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7185 switch(num) {
7186 case TARGET_NR_exit:
7187 /* In old applications this may be used to implement _exit(2).
7188 However in threaded applictions it is used for thread termination,
7189 and _exit_group is used for application termination.
7190 Do thread termination if we have more then one thread. */
7192 if (block_signals()) {
7193 ret = -TARGET_ERESTARTSYS;
7194 break;
7197 if (CPU_NEXT(first_cpu)) {
7198 TaskState *ts;
7200 cpu_list_lock();
7201 /* Remove the CPU from the list. */
7202 QTAILQ_REMOVE(&cpus, cpu, node);
7203 cpu_list_unlock();
7204 ts = cpu->opaque;
7205 if (ts->child_tidptr) {
7206 put_user_u32(0, ts->child_tidptr);
7207 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7208 NULL, NULL, 0);
7210 thread_cpu = NULL;
7211 object_unref(OBJECT(cpu));
7212 g_free(ts);
7213 rcu_unregister_thread();
7214 pthread_exit(NULL);
7216 #ifdef TARGET_GPROF
7217 _mcleanup();
7218 #endif
7219 gdb_exit(cpu_env, arg1);
7220 _exit(arg1);
7221 ret = 0; /* avoid warning */
7222 break;
7223 case TARGET_NR_read:
7224 if (arg3 == 0)
7225 ret = 0;
7226 else {
7227 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7228 goto efault;
7229 ret = get_errno(safe_read(arg1, p, arg3));
7230 if (ret >= 0 &&
7231 fd_trans_host_to_target_data(arg1)) {
7232 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7234 unlock_user(p, arg2, ret);
7236 break;
7237 case TARGET_NR_write:
7238 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7239 goto efault;
7240 ret = get_errno(safe_write(arg1, p, arg3));
7241 unlock_user(p, arg2, 0);
7242 break;
7243 #ifdef TARGET_NR_open
7244 case TARGET_NR_open:
7245 if (!(p = lock_user_string(arg1)))
7246 goto efault;
7247 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7248 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7249 arg3));
7250 fd_trans_unregister(ret);
7251 unlock_user(p, arg1, 0);
7252 break;
7253 #endif
7254 case TARGET_NR_openat:
7255 if (!(p = lock_user_string(arg2)))
7256 goto efault;
7257 ret = get_errno(do_openat(cpu_env, arg1, p,
7258 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7259 arg4));
7260 fd_trans_unregister(ret);
7261 unlock_user(p, arg2, 0);
7262 break;
7263 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7264 case TARGET_NR_name_to_handle_at:
7265 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7266 break;
7267 #endif
7268 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7269 case TARGET_NR_open_by_handle_at:
7270 ret = do_open_by_handle_at(arg1, arg2, arg3);
7271 fd_trans_unregister(ret);
7272 break;
7273 #endif
7274 case TARGET_NR_close:
7275 fd_trans_unregister(arg1);
7276 ret = get_errno(close(arg1));
7277 break;
7278 case TARGET_NR_brk:
7279 ret = do_brk(arg1);
7280 break;
7281 #ifdef TARGET_NR_fork
7282 case TARGET_NR_fork:
7283 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7284 break;
7285 #endif
7286 #ifdef TARGET_NR_waitpid
7287 case TARGET_NR_waitpid:
7289 int status;
7290 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7291 if (!is_error(ret) && arg2 && ret
7292 && put_user_s32(host_to_target_waitstatus(status), arg2))
7293 goto efault;
7295 break;
7296 #endif
7297 #ifdef TARGET_NR_waitid
7298 case TARGET_NR_waitid:
7300 siginfo_t info;
7301 info.si_pid = 0;
7302 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7303 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7304 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7305 goto efault;
7306 host_to_target_siginfo(p, &info);
7307 unlock_user(p, arg3, sizeof(target_siginfo_t));
7310 break;
7311 #endif
7312 #ifdef TARGET_NR_creat /* not on alpha */
7313 case TARGET_NR_creat:
7314 if (!(p = lock_user_string(arg1)))
7315 goto efault;
7316 ret = get_errno(creat(p, arg2));
7317 fd_trans_unregister(ret);
7318 unlock_user(p, arg1, 0);
7319 break;
7320 #endif
7321 #ifdef TARGET_NR_link
7322 case TARGET_NR_link:
7324 void * p2;
7325 p = lock_user_string(arg1);
7326 p2 = lock_user_string(arg2);
7327 if (!p || !p2)
7328 ret = -TARGET_EFAULT;
7329 else
7330 ret = get_errno(link(p, p2));
7331 unlock_user(p2, arg2, 0);
7332 unlock_user(p, arg1, 0);
7334 break;
7335 #endif
7336 #if defined(TARGET_NR_linkat)
7337 case TARGET_NR_linkat:
7339 void * p2 = NULL;
7340 if (!arg2 || !arg4)
7341 goto efault;
7342 p = lock_user_string(arg2);
7343 p2 = lock_user_string(arg4);
7344 if (!p || !p2)
7345 ret = -TARGET_EFAULT;
7346 else
7347 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7348 unlock_user(p, arg2, 0);
7349 unlock_user(p2, arg4, 0);
7351 break;
7352 #endif
7353 #ifdef TARGET_NR_unlink
7354 case TARGET_NR_unlink:
7355 if (!(p = lock_user_string(arg1)))
7356 goto efault;
7357 ret = get_errno(unlink(p));
7358 unlock_user(p, arg1, 0);
7359 break;
7360 #endif
7361 #if defined(TARGET_NR_unlinkat)
7362 case TARGET_NR_unlinkat:
7363 if (!(p = lock_user_string(arg2)))
7364 goto efault;
7365 ret = get_errno(unlinkat(arg1, p, arg3));
7366 unlock_user(p, arg2, 0);
7367 break;
7368 #endif
7369 case TARGET_NR_execve:
7371 char **argp, **envp;
7372 int argc, envc;
7373 abi_ulong gp;
7374 abi_ulong guest_argp;
7375 abi_ulong guest_envp;
7376 abi_ulong addr;
7377 char **q;
7378 int total_size = 0;
7380 argc = 0;
7381 guest_argp = arg2;
7382 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7383 if (get_user_ual(addr, gp))
7384 goto efault;
7385 if (!addr)
7386 break;
7387 argc++;
7389 envc = 0;
7390 guest_envp = arg3;
7391 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7392 if (get_user_ual(addr, gp))
7393 goto efault;
7394 if (!addr)
7395 break;
7396 envc++;
7399 argp = alloca((argc + 1) * sizeof(void *));
7400 envp = alloca((envc + 1) * sizeof(void *));
7402 for (gp = guest_argp, q = argp; gp;
7403 gp += sizeof(abi_ulong), q++) {
7404 if (get_user_ual(addr, gp))
7405 goto execve_efault;
7406 if (!addr)
7407 break;
7408 if (!(*q = lock_user_string(addr)))
7409 goto execve_efault;
7410 total_size += strlen(*q) + 1;
7412 *q = NULL;
7414 for (gp = guest_envp, q = envp; gp;
7415 gp += sizeof(abi_ulong), q++) {
7416 if (get_user_ual(addr, gp))
7417 goto execve_efault;
7418 if (!addr)
7419 break;
7420 if (!(*q = lock_user_string(addr)))
7421 goto execve_efault;
7422 total_size += strlen(*q) + 1;
7424 *q = NULL;
7426 if (!(p = lock_user_string(arg1)))
7427 goto execve_efault;
7428 /* Although execve() is not an interruptible syscall it is
7429 * a special case where we must use the safe_syscall wrapper:
7430 * if we allow a signal to happen before we make the host
7431 * syscall then we will 'lose' it, because at the point of
7432 * execve the process leaves QEMU's control. So we use the
7433 * safe syscall wrapper to ensure that we either take the
7434 * signal as a guest signal, or else it does not happen
7435 * before the execve completes and makes it the other
7436 * program's problem.
7438 ret = get_errno(safe_execve(p, argp, envp));
7439 unlock_user(p, arg1, 0);
7441 goto execve_end;
7443 execve_efault:
7444 ret = -TARGET_EFAULT;
7446 execve_end:
7447 for (gp = guest_argp, q = argp; *q;
7448 gp += sizeof(abi_ulong), q++) {
7449 if (get_user_ual(addr, gp)
7450 || !addr)
7451 break;
7452 unlock_user(*q, addr, 0);
7454 for (gp = guest_envp, q = envp; *q;
7455 gp += sizeof(abi_ulong), q++) {
7456 if (get_user_ual(addr, gp)
7457 || !addr)
7458 break;
7459 unlock_user(*q, addr, 0);
7462 break;
7463 case TARGET_NR_chdir:
7464 if (!(p = lock_user_string(arg1)))
7465 goto efault;
7466 ret = get_errno(chdir(p));
7467 unlock_user(p, arg1, 0);
7468 break;
7469 #ifdef TARGET_NR_time
7470 case TARGET_NR_time:
7472 time_t host_time;
7473 ret = get_errno(time(&host_time));
7474 if (!is_error(ret)
7475 && arg1
7476 && put_user_sal(host_time, arg1))
7477 goto efault;
7479 break;
7480 #endif
7481 #ifdef TARGET_NR_mknod
7482 case TARGET_NR_mknod:
7483 if (!(p = lock_user_string(arg1)))
7484 goto efault;
7485 ret = get_errno(mknod(p, arg2, arg3));
7486 unlock_user(p, arg1, 0);
7487 break;
7488 #endif
7489 #if defined(TARGET_NR_mknodat)
7490 case TARGET_NR_mknodat:
7491 if (!(p = lock_user_string(arg2)))
7492 goto efault;
7493 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7494 unlock_user(p, arg2, 0);
7495 break;
7496 #endif
7497 #ifdef TARGET_NR_chmod
7498 case TARGET_NR_chmod:
7499 if (!(p = lock_user_string(arg1)))
7500 goto efault;
7501 ret = get_errno(chmod(p, arg2));
7502 unlock_user(p, arg1, 0);
7503 break;
7504 #endif
7505 #ifdef TARGET_NR_break
7506 case TARGET_NR_break:
7507 goto unimplemented;
7508 #endif
7509 #ifdef TARGET_NR_oldstat
7510 case TARGET_NR_oldstat:
7511 goto unimplemented;
7512 #endif
7513 case TARGET_NR_lseek:
7514 ret = get_errno(lseek(arg1, arg2, arg3));
7515 break;
7516 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7517 /* Alpha specific */
7518 case TARGET_NR_getxpid:
7519 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7520 ret = get_errno(getpid());
7521 break;
7522 #endif
7523 #ifdef TARGET_NR_getpid
7524 case TARGET_NR_getpid:
7525 ret = get_errno(getpid());
7526 break;
7527 #endif
7528 case TARGET_NR_mount:
7530 /* need to look at the data field */
7531 void *p2, *p3;
7533 if (arg1) {
7534 p = lock_user_string(arg1);
7535 if (!p) {
7536 goto efault;
7538 } else {
7539 p = NULL;
7542 p2 = lock_user_string(arg2);
7543 if (!p2) {
7544 if (arg1) {
7545 unlock_user(p, arg1, 0);
7547 goto efault;
7550 if (arg3) {
7551 p3 = lock_user_string(arg3);
7552 if (!p3) {
7553 if (arg1) {
7554 unlock_user(p, arg1, 0);
7556 unlock_user(p2, arg2, 0);
7557 goto efault;
7559 } else {
7560 p3 = NULL;
7563 /* FIXME - arg5 should be locked, but it isn't clear how to
7564 * do that since it's not guaranteed to be a NULL-terminated
7565 * string.
7567 if (!arg5) {
7568 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7569 } else {
7570 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7572 ret = get_errno(ret);
7574 if (arg1) {
7575 unlock_user(p, arg1, 0);
7577 unlock_user(p2, arg2, 0);
7578 if (arg3) {
7579 unlock_user(p3, arg3, 0);
7582 break;
7583 #ifdef TARGET_NR_umount
7584 case TARGET_NR_umount:
7585 if (!(p = lock_user_string(arg1)))
7586 goto efault;
7587 ret = get_errno(umount(p));
7588 unlock_user(p, arg1, 0);
7589 break;
7590 #endif
7591 #ifdef TARGET_NR_stime /* not on alpha */
7592 case TARGET_NR_stime:
7594 time_t host_time;
7595 if (get_user_sal(host_time, arg1))
7596 goto efault;
7597 ret = get_errno(stime(&host_time));
7599 break;
7600 #endif
7601 case TARGET_NR_ptrace:
7602 goto unimplemented;
7603 #ifdef TARGET_NR_alarm /* not on alpha */
7604 case TARGET_NR_alarm:
7605 ret = alarm(arg1);
7606 break;
7607 #endif
7608 #ifdef TARGET_NR_oldfstat
7609 case TARGET_NR_oldfstat:
7610 goto unimplemented;
7611 #endif
7612 #ifdef TARGET_NR_pause /* not on alpha */
7613 case TARGET_NR_pause:
7614 if (!block_signals()) {
7615 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7617 ret = -TARGET_EINTR;
7618 break;
7619 #endif
7620 #ifdef TARGET_NR_utime
7621 case TARGET_NR_utime:
7623 struct utimbuf tbuf, *host_tbuf;
7624 struct target_utimbuf *target_tbuf;
7625 if (arg2) {
7626 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7627 goto efault;
7628 tbuf.actime = tswapal(target_tbuf->actime);
7629 tbuf.modtime = tswapal(target_tbuf->modtime);
7630 unlock_user_struct(target_tbuf, arg2, 0);
7631 host_tbuf = &tbuf;
7632 } else {
7633 host_tbuf = NULL;
7635 if (!(p = lock_user_string(arg1)))
7636 goto efault;
7637 ret = get_errno(utime(p, host_tbuf));
7638 unlock_user(p, arg1, 0);
7640 break;
7641 #endif
7642 #ifdef TARGET_NR_utimes
7643 case TARGET_NR_utimes:
7645 struct timeval *tvp, tv[2];
7646 if (arg2) {
7647 if (copy_from_user_timeval(&tv[0], arg2)
7648 || copy_from_user_timeval(&tv[1],
7649 arg2 + sizeof(struct target_timeval)))
7650 goto efault;
7651 tvp = tv;
7652 } else {
7653 tvp = NULL;
7655 if (!(p = lock_user_string(arg1)))
7656 goto efault;
7657 ret = get_errno(utimes(p, tvp));
7658 unlock_user(p, arg1, 0);
7660 break;
7661 #endif
7662 #if defined(TARGET_NR_futimesat)
7663 case TARGET_NR_futimesat:
7665 struct timeval *tvp, tv[2];
7666 if (arg3) {
7667 if (copy_from_user_timeval(&tv[0], arg3)
7668 || copy_from_user_timeval(&tv[1],
7669 arg3 + sizeof(struct target_timeval)))
7670 goto efault;
7671 tvp = tv;
7672 } else {
7673 tvp = NULL;
7675 if (!(p = lock_user_string(arg2)))
7676 goto efault;
7677 ret = get_errno(futimesat(arg1, path(p), tvp));
7678 unlock_user(p, arg2, 0);
7680 break;
7681 #endif
7682 #ifdef TARGET_NR_stty
7683 case TARGET_NR_stty:
7684 goto unimplemented;
7685 #endif
7686 #ifdef TARGET_NR_gtty
7687 case TARGET_NR_gtty:
7688 goto unimplemented;
7689 #endif
7690 #ifdef TARGET_NR_access
7691 case TARGET_NR_access:
7692 if (!(p = lock_user_string(arg1)))
7693 goto efault;
7694 ret = get_errno(access(path(p), arg2));
7695 unlock_user(p, arg1, 0);
7696 break;
7697 #endif
7698 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7699 case TARGET_NR_faccessat:
7700 if (!(p = lock_user_string(arg2)))
7701 goto efault;
7702 ret = get_errno(faccessat(arg1, p, arg3, 0));
7703 unlock_user(p, arg2, 0);
7704 break;
7705 #endif
7706 #ifdef TARGET_NR_nice /* not on alpha */
7707 case TARGET_NR_nice:
7708 ret = get_errno(nice(arg1));
7709 break;
7710 #endif
7711 #ifdef TARGET_NR_ftime
7712 case TARGET_NR_ftime:
7713 goto unimplemented;
7714 #endif
7715 case TARGET_NR_sync:
7716 sync();
7717 ret = 0;
7718 break;
7719 case TARGET_NR_kill:
7720 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7721 break;
7722 #ifdef TARGET_NR_rename
7723 case TARGET_NR_rename:
7725 void *p2;
7726 p = lock_user_string(arg1);
7727 p2 = lock_user_string(arg2);
7728 if (!p || !p2)
7729 ret = -TARGET_EFAULT;
7730 else
7731 ret = get_errno(rename(p, p2));
7732 unlock_user(p2, arg2, 0);
7733 unlock_user(p, arg1, 0);
7735 break;
7736 #endif
7737 #if defined(TARGET_NR_renameat)
7738 case TARGET_NR_renameat:
7740 void *p2;
7741 p = lock_user_string(arg2);
7742 p2 = lock_user_string(arg4);
7743 if (!p || !p2)
7744 ret = -TARGET_EFAULT;
7745 else
7746 ret = get_errno(renameat(arg1, p, arg3, p2));
7747 unlock_user(p2, arg4, 0);
7748 unlock_user(p, arg2, 0);
7750 break;
7751 #endif
7752 #ifdef TARGET_NR_mkdir
7753 case TARGET_NR_mkdir:
7754 if (!(p = lock_user_string(arg1)))
7755 goto efault;
7756 ret = get_errno(mkdir(p, arg2));
7757 unlock_user(p, arg1, 0);
7758 break;
7759 #endif
7760 #if defined(TARGET_NR_mkdirat)
7761 case TARGET_NR_mkdirat:
7762 if (!(p = lock_user_string(arg2)))
7763 goto efault;
7764 ret = get_errno(mkdirat(arg1, p, arg3));
7765 unlock_user(p, arg2, 0);
7766 break;
7767 #endif
7768 #ifdef TARGET_NR_rmdir
7769 case TARGET_NR_rmdir:
7770 if (!(p = lock_user_string(arg1)))
7771 goto efault;
7772 ret = get_errno(rmdir(p));
7773 unlock_user(p, arg1, 0);
7774 break;
7775 #endif
7776 case TARGET_NR_dup:
7777 ret = get_errno(dup(arg1));
7778 if (ret >= 0) {
7779 fd_trans_dup(arg1, ret);
7781 break;
7782 #ifdef TARGET_NR_pipe
7783 case TARGET_NR_pipe:
7784 ret = do_pipe(cpu_env, arg1, 0, 0);
7785 break;
7786 #endif
7787 #ifdef TARGET_NR_pipe2
7788 case TARGET_NR_pipe2:
7789 ret = do_pipe(cpu_env, arg1,
7790 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7791 break;
7792 #endif
7793 case TARGET_NR_times:
7795 struct target_tms *tmsp;
7796 struct tms tms;
7797 ret = get_errno(times(&tms));
7798 if (arg1) {
7799 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7800 if (!tmsp)
7801 goto efault;
7802 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7803 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7804 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7805 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7807 if (!is_error(ret))
7808 ret = host_to_target_clock_t(ret);
7810 break;
7811 #ifdef TARGET_NR_prof
7812 case TARGET_NR_prof:
7813 goto unimplemented;
7814 #endif
7815 #ifdef TARGET_NR_signal
7816 case TARGET_NR_signal:
7817 goto unimplemented;
7818 #endif
7819 case TARGET_NR_acct:
7820 if (arg1 == 0) {
7821 ret = get_errno(acct(NULL));
7822 } else {
7823 if (!(p = lock_user_string(arg1)))
7824 goto efault;
7825 ret = get_errno(acct(path(p)));
7826 unlock_user(p, arg1, 0);
7828 break;
7829 #ifdef TARGET_NR_umount2
7830 case TARGET_NR_umount2:
7831 if (!(p = lock_user_string(arg1)))
7832 goto efault;
7833 ret = get_errno(umount2(p, arg2));
7834 unlock_user(p, arg1, 0);
7835 break;
7836 #endif
7837 #ifdef TARGET_NR_lock
7838 case TARGET_NR_lock:
7839 goto unimplemented;
7840 #endif
7841 case TARGET_NR_ioctl:
7842 ret = do_ioctl(arg1, arg2, arg3);
7843 break;
7844 case TARGET_NR_fcntl:
7845 ret = do_fcntl(arg1, arg2, arg3);
7846 break;
7847 #ifdef TARGET_NR_mpx
7848 case TARGET_NR_mpx:
7849 goto unimplemented;
7850 #endif
7851 case TARGET_NR_setpgid:
7852 ret = get_errno(setpgid(arg1, arg2));
7853 break;
7854 #ifdef TARGET_NR_ulimit
7855 case TARGET_NR_ulimit:
7856 goto unimplemented;
7857 #endif
7858 #ifdef TARGET_NR_oldolduname
7859 case TARGET_NR_oldolduname:
7860 goto unimplemented;
7861 #endif
7862 case TARGET_NR_umask:
7863 ret = get_errno(umask(arg1));
7864 break;
7865 case TARGET_NR_chroot:
7866 if (!(p = lock_user_string(arg1)))
7867 goto efault;
7868 ret = get_errno(chroot(p));
7869 unlock_user(p, arg1, 0);
7870 break;
7871 #ifdef TARGET_NR_ustat
7872 case TARGET_NR_ustat:
7873 goto unimplemented;
7874 #endif
7875 #ifdef TARGET_NR_dup2
7876 case TARGET_NR_dup2:
7877 ret = get_errno(dup2(arg1, arg2));
7878 if (ret >= 0) {
7879 fd_trans_dup(arg1, arg2);
7881 break;
7882 #endif
7883 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7884 case TARGET_NR_dup3:
7885 ret = get_errno(dup3(arg1, arg2, arg3));
7886 if (ret >= 0) {
7887 fd_trans_dup(arg1, arg2);
7889 break;
7890 #endif
7891 #ifdef TARGET_NR_getppid /* not on alpha */
7892 case TARGET_NR_getppid:
7893 ret = get_errno(getppid());
7894 break;
7895 #endif
7896 #ifdef TARGET_NR_getpgrp
7897 case TARGET_NR_getpgrp:
7898 ret = get_errno(getpgrp());
7899 break;
7900 #endif
7901 case TARGET_NR_setsid:
7902 ret = get_errno(setsid());
7903 break;
7904 #ifdef TARGET_NR_sigaction
7905 case TARGET_NR_sigaction:
7907 #if defined(TARGET_ALPHA)
7908 struct target_sigaction act, oact, *pact = 0;
7909 struct target_old_sigaction *old_act;
7910 if (arg2) {
7911 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7912 goto efault;
7913 act._sa_handler = old_act->_sa_handler;
7914 target_siginitset(&act.sa_mask, old_act->sa_mask);
7915 act.sa_flags = old_act->sa_flags;
7916 act.sa_restorer = 0;
7917 unlock_user_struct(old_act, arg2, 0);
7918 pact = &act;
7920 ret = get_errno(do_sigaction(arg1, pact, &oact));
7921 if (!is_error(ret) && arg3) {
7922 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7923 goto efault;
7924 old_act->_sa_handler = oact._sa_handler;
7925 old_act->sa_mask = oact.sa_mask.sig[0];
7926 old_act->sa_flags = oact.sa_flags;
7927 unlock_user_struct(old_act, arg3, 1);
7929 #elif defined(TARGET_MIPS)
7930 struct target_sigaction act, oact, *pact, *old_act;
7932 if (arg2) {
7933 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7934 goto efault;
7935 act._sa_handler = old_act->_sa_handler;
7936 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7937 act.sa_flags = old_act->sa_flags;
7938 unlock_user_struct(old_act, arg2, 0);
7939 pact = &act;
7940 } else {
7941 pact = NULL;
7944 ret = get_errno(do_sigaction(arg1, pact, &oact));
7946 if (!is_error(ret) && arg3) {
7947 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7948 goto efault;
7949 old_act->_sa_handler = oact._sa_handler;
7950 old_act->sa_flags = oact.sa_flags;
7951 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7952 old_act->sa_mask.sig[1] = 0;
7953 old_act->sa_mask.sig[2] = 0;
7954 old_act->sa_mask.sig[3] = 0;
7955 unlock_user_struct(old_act, arg3, 1);
7957 #else
7958 struct target_old_sigaction *old_act;
7959 struct target_sigaction act, oact, *pact;
7960 if (arg2) {
7961 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7962 goto efault;
7963 act._sa_handler = old_act->_sa_handler;
7964 target_siginitset(&act.sa_mask, old_act->sa_mask);
7965 act.sa_flags = old_act->sa_flags;
7966 act.sa_restorer = old_act->sa_restorer;
7967 unlock_user_struct(old_act, arg2, 0);
7968 pact = &act;
7969 } else {
7970 pact = NULL;
7972 ret = get_errno(do_sigaction(arg1, pact, &oact));
7973 if (!is_error(ret) && arg3) {
7974 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7975 goto efault;
7976 old_act->_sa_handler = oact._sa_handler;
7977 old_act->sa_mask = oact.sa_mask.sig[0];
7978 old_act->sa_flags = oact.sa_flags;
7979 old_act->sa_restorer = oact.sa_restorer;
7980 unlock_user_struct(old_act, arg3, 1);
7982 #endif
7984 break;
7985 #endif
7986 case TARGET_NR_rt_sigaction:
7988 #if defined(TARGET_ALPHA)
7989 struct target_sigaction act, oact, *pact = 0;
7990 struct target_rt_sigaction *rt_act;
7992 if (arg4 != sizeof(target_sigset_t)) {
7993 ret = -TARGET_EINVAL;
7994 break;
7996 if (arg2) {
7997 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7998 goto efault;
7999 act._sa_handler = rt_act->_sa_handler;
8000 act.sa_mask = rt_act->sa_mask;
8001 act.sa_flags = rt_act->sa_flags;
8002 act.sa_restorer = arg5;
8003 unlock_user_struct(rt_act, arg2, 0);
8004 pact = &act;
8006 ret = get_errno(do_sigaction(arg1, pact, &oact));
8007 if (!is_error(ret) && arg3) {
8008 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8009 goto efault;
8010 rt_act->_sa_handler = oact._sa_handler;
8011 rt_act->sa_mask = oact.sa_mask;
8012 rt_act->sa_flags = oact.sa_flags;
8013 unlock_user_struct(rt_act, arg3, 1);
8015 #else
8016 struct target_sigaction *act;
8017 struct target_sigaction *oact;
8019 if (arg4 != sizeof(target_sigset_t)) {
8020 ret = -TARGET_EINVAL;
8021 break;
8023 if (arg2) {
8024 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8025 goto efault;
8026 } else
8027 act = NULL;
8028 if (arg3) {
8029 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8030 ret = -TARGET_EFAULT;
8031 goto rt_sigaction_fail;
8033 } else
8034 oact = NULL;
8035 ret = get_errno(do_sigaction(arg1, act, oact));
8036 rt_sigaction_fail:
8037 if (act)
8038 unlock_user_struct(act, arg2, 0);
8039 if (oact)
8040 unlock_user_struct(oact, arg3, 1);
8041 #endif
8043 break;
8044 #ifdef TARGET_NR_sgetmask /* not on alpha */
8045 case TARGET_NR_sgetmask:
8047 sigset_t cur_set;
8048 abi_ulong target_set;
8049 ret = do_sigprocmask(0, NULL, &cur_set);
8050 if (!ret) {
8051 host_to_target_old_sigset(&target_set, &cur_set);
8052 ret = target_set;
8055 break;
8056 #endif
8057 #ifdef TARGET_NR_ssetmask /* not on alpha */
8058 case TARGET_NR_ssetmask:
8060 sigset_t set, oset, cur_set;
8061 abi_ulong target_set = arg1;
8062 /* We only have one word of the new mask so we must read
8063 * the rest of it with do_sigprocmask() and OR in this word.
8064 * We are guaranteed that a do_sigprocmask() that only queries
8065 * the signal mask will not fail.
8067 ret = do_sigprocmask(0, NULL, &cur_set);
8068 assert(!ret);
8069 target_to_host_old_sigset(&set, &target_set);
8070 sigorset(&set, &set, &cur_set);
8071 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8072 if (!ret) {
8073 host_to_target_old_sigset(&target_set, &oset);
8074 ret = target_set;
8077 break;
8078 #endif
8079 #ifdef TARGET_NR_sigprocmask
8080 case TARGET_NR_sigprocmask:
8082 #if defined(TARGET_ALPHA)
8083 sigset_t set, oldset;
8084 abi_ulong mask;
8085 int how;
8087 switch (arg1) {
8088 case TARGET_SIG_BLOCK:
8089 how = SIG_BLOCK;
8090 break;
8091 case TARGET_SIG_UNBLOCK:
8092 how = SIG_UNBLOCK;
8093 break;
8094 case TARGET_SIG_SETMASK:
8095 how = SIG_SETMASK;
8096 break;
8097 default:
8098 ret = -TARGET_EINVAL;
8099 goto fail;
8101 mask = arg2;
8102 target_to_host_old_sigset(&set, &mask);
8104 ret = do_sigprocmask(how, &set, &oldset);
8105 if (!is_error(ret)) {
8106 host_to_target_old_sigset(&mask, &oldset);
8107 ret = mask;
8108 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8110 #else
8111 sigset_t set, oldset, *set_ptr;
8112 int how;
8114 if (arg2) {
8115 switch (arg1) {
8116 case TARGET_SIG_BLOCK:
8117 how = SIG_BLOCK;
8118 break;
8119 case TARGET_SIG_UNBLOCK:
8120 how = SIG_UNBLOCK;
8121 break;
8122 case TARGET_SIG_SETMASK:
8123 how = SIG_SETMASK;
8124 break;
8125 default:
8126 ret = -TARGET_EINVAL;
8127 goto fail;
8129 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8130 goto efault;
8131 target_to_host_old_sigset(&set, p);
8132 unlock_user(p, arg2, 0);
8133 set_ptr = &set;
8134 } else {
8135 how = 0;
8136 set_ptr = NULL;
8138 ret = do_sigprocmask(how, set_ptr, &oldset);
8139 if (!is_error(ret) && arg3) {
8140 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8141 goto efault;
8142 host_to_target_old_sigset(p, &oldset);
8143 unlock_user(p, arg3, sizeof(target_sigset_t));
8145 #endif
8147 break;
8148 #endif
8149 case TARGET_NR_rt_sigprocmask:
8151 int how = arg1;
8152 sigset_t set, oldset, *set_ptr;
8154 if (arg4 != sizeof(target_sigset_t)) {
8155 ret = -TARGET_EINVAL;
8156 break;
8159 if (arg2) {
8160 switch(how) {
8161 case TARGET_SIG_BLOCK:
8162 how = SIG_BLOCK;
8163 break;
8164 case TARGET_SIG_UNBLOCK:
8165 how = SIG_UNBLOCK;
8166 break;
8167 case TARGET_SIG_SETMASK:
8168 how = SIG_SETMASK;
8169 break;
8170 default:
8171 ret = -TARGET_EINVAL;
8172 goto fail;
8174 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8175 goto efault;
8176 target_to_host_sigset(&set, p);
8177 unlock_user(p, arg2, 0);
8178 set_ptr = &set;
8179 } else {
8180 how = 0;
8181 set_ptr = NULL;
8183 ret = do_sigprocmask(how, set_ptr, &oldset);
8184 if (!is_error(ret) && arg3) {
8185 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8186 goto efault;
8187 host_to_target_sigset(p, &oldset);
8188 unlock_user(p, arg3, sizeof(target_sigset_t));
8191 break;
8192 #ifdef TARGET_NR_sigpending
8193 case TARGET_NR_sigpending:
8195 sigset_t set;
8196 ret = get_errno(sigpending(&set));
8197 if (!is_error(ret)) {
8198 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8199 goto efault;
8200 host_to_target_old_sigset(p, &set);
8201 unlock_user(p, arg1, sizeof(target_sigset_t));
8204 break;
8205 #endif
8206 case TARGET_NR_rt_sigpending:
8208 sigset_t set;
8210 /* Yes, this check is >, not != like most. We follow the kernel's
8211 * logic and it does it like this because it implements
8212 * NR_sigpending through the same code path, and in that case
8213 * the old_sigset_t is smaller in size.
8215 if (arg2 > sizeof(target_sigset_t)) {
8216 ret = -TARGET_EINVAL;
8217 break;
8220 ret = get_errno(sigpending(&set));
8221 if (!is_error(ret)) {
8222 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8223 goto efault;
8224 host_to_target_sigset(p, &set);
8225 unlock_user(p, arg1, sizeof(target_sigset_t));
8228 break;
8229 #ifdef TARGET_NR_sigsuspend
8230 case TARGET_NR_sigsuspend:
8232 TaskState *ts = cpu->opaque;
8233 #if defined(TARGET_ALPHA)
8234 abi_ulong mask = arg1;
8235 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8236 #else
8237 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8238 goto efault;
8239 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8240 unlock_user(p, arg1, 0);
8241 #endif
8242 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8243 SIGSET_T_SIZE));
8244 if (ret != -TARGET_ERESTARTSYS) {
8245 ts->in_sigsuspend = 1;
8248 break;
8249 #endif
8250 case TARGET_NR_rt_sigsuspend:
8252 TaskState *ts = cpu->opaque;
8254 if (arg2 != sizeof(target_sigset_t)) {
8255 ret = -TARGET_EINVAL;
8256 break;
8258 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8259 goto efault;
8260 target_to_host_sigset(&ts->sigsuspend_mask, p);
8261 unlock_user(p, arg1, 0);
8262 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8263 SIGSET_T_SIZE));
8264 if (ret != -TARGET_ERESTARTSYS) {
8265 ts->in_sigsuspend = 1;
8268 break;
8269 case TARGET_NR_rt_sigtimedwait:
8271 sigset_t set;
8272 struct timespec uts, *puts;
8273 siginfo_t uinfo;
8275 if (arg4 != sizeof(target_sigset_t)) {
8276 ret = -TARGET_EINVAL;
8277 break;
8280 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8281 goto efault;
8282 target_to_host_sigset(&set, p);
8283 unlock_user(p, arg1, 0);
8284 if (arg3) {
8285 puts = &uts;
8286 target_to_host_timespec(puts, arg3);
8287 } else {
8288 puts = NULL;
8290 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8291 SIGSET_T_SIZE));
8292 if (!is_error(ret)) {
8293 if (arg2) {
8294 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8296 if (!p) {
8297 goto efault;
8299 host_to_target_siginfo(p, &uinfo);
8300 unlock_user(p, arg2, sizeof(target_siginfo_t));
8302 ret = host_to_target_signal(ret);
8305 break;
8306 case TARGET_NR_rt_sigqueueinfo:
8308 siginfo_t uinfo;
8310 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8311 if (!p) {
8312 goto efault;
8314 target_to_host_siginfo(&uinfo, p);
8315 unlock_user(p, arg1, 0);
8316 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8318 break;
8319 #ifdef TARGET_NR_sigreturn
8320 case TARGET_NR_sigreturn:
8321 if (block_signals()) {
8322 ret = -TARGET_ERESTARTSYS;
8323 } else {
8324 ret = do_sigreturn(cpu_env);
8326 break;
8327 #endif
8328 case TARGET_NR_rt_sigreturn:
8329 if (block_signals()) {
8330 ret = -TARGET_ERESTARTSYS;
8331 } else {
8332 ret = do_rt_sigreturn(cpu_env);
8334 break;
8335 case TARGET_NR_sethostname:
8336 if (!(p = lock_user_string(arg1)))
8337 goto efault;
8338 ret = get_errno(sethostname(p, arg2));
8339 unlock_user(p, arg1, 0);
8340 break;
8341 case TARGET_NR_setrlimit:
8343 int resource = target_to_host_resource(arg1);
8344 struct target_rlimit *target_rlim;
8345 struct rlimit rlim;
8346 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8347 goto efault;
8348 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8349 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8350 unlock_user_struct(target_rlim, arg2, 0);
8351 ret = get_errno(setrlimit(resource, &rlim));
8353 break;
8354 case TARGET_NR_getrlimit:
8356 int resource = target_to_host_resource(arg1);
8357 struct target_rlimit *target_rlim;
8358 struct rlimit rlim;
8360 ret = get_errno(getrlimit(resource, &rlim));
8361 if (!is_error(ret)) {
8362 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8363 goto efault;
8364 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8365 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8366 unlock_user_struct(target_rlim, arg2, 1);
8369 break;
8370 case TARGET_NR_getrusage:
8372 struct rusage rusage;
8373 ret = get_errno(getrusage(arg1, &rusage));
8374 if (!is_error(ret)) {
8375 ret = host_to_target_rusage(arg2, &rusage);
8378 break;
8379 case TARGET_NR_gettimeofday:
8381 struct timeval tv;
8382 ret = get_errno(gettimeofday(&tv, NULL));
8383 if (!is_error(ret)) {
8384 if (copy_to_user_timeval(arg1, &tv))
8385 goto efault;
8388 break;
8389 case TARGET_NR_settimeofday:
8391 struct timeval tv, *ptv = NULL;
8392 struct timezone tz, *ptz = NULL;
8394 if (arg1) {
8395 if (copy_from_user_timeval(&tv, arg1)) {
8396 goto efault;
8398 ptv = &tv;
8401 if (arg2) {
8402 if (copy_from_user_timezone(&tz, arg2)) {
8403 goto efault;
8405 ptz = &tz;
8408 ret = get_errno(settimeofday(ptv, ptz));
8410 break;
8411 #if defined(TARGET_NR_select)
8412 case TARGET_NR_select:
8413 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8414 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8415 #else
8417 struct target_sel_arg_struct *sel;
8418 abi_ulong inp, outp, exp, tvp;
8419 long nsel;
8421 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8422 goto efault;
8423 nsel = tswapal(sel->n);
8424 inp = tswapal(sel->inp);
8425 outp = tswapal(sel->outp);
8426 exp = tswapal(sel->exp);
8427 tvp = tswapal(sel->tvp);
8428 unlock_user_struct(sel, arg1, 0);
8429 ret = do_select(nsel, inp, outp, exp, tvp);
8431 #endif
8432 break;
8433 #endif
8434 #ifdef TARGET_NR_pselect6
8435 case TARGET_NR_pselect6:
8437 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8438 fd_set rfds, wfds, efds;
8439 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8440 struct timespec ts, *ts_ptr;
8443 * The 6th arg is actually two args smashed together,
8444 * so we cannot use the C library.
8446 sigset_t set;
8447 struct {
8448 sigset_t *set;
8449 size_t size;
8450 } sig, *sig_ptr;
8452 abi_ulong arg_sigset, arg_sigsize, *arg7;
8453 target_sigset_t *target_sigset;
8455 n = arg1;
8456 rfd_addr = arg2;
8457 wfd_addr = arg3;
8458 efd_addr = arg4;
8459 ts_addr = arg5;
8461 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8462 if (ret) {
8463 goto fail;
8465 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8466 if (ret) {
8467 goto fail;
8469 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8470 if (ret) {
8471 goto fail;
8475 * This takes a timespec, and not a timeval, so we cannot
8476 * use the do_select() helper ...
8478 if (ts_addr) {
8479 if (target_to_host_timespec(&ts, ts_addr)) {
8480 goto efault;
8482 ts_ptr = &ts;
8483 } else {
8484 ts_ptr = NULL;
8487 /* Extract the two packed args for the sigset */
8488 if (arg6) {
8489 sig_ptr = &sig;
8490 sig.size = SIGSET_T_SIZE;
8492 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8493 if (!arg7) {
8494 goto efault;
8496 arg_sigset = tswapal(arg7[0]);
8497 arg_sigsize = tswapal(arg7[1]);
8498 unlock_user(arg7, arg6, 0);
8500 if (arg_sigset) {
8501 sig.set = &set;
8502 if (arg_sigsize != sizeof(*target_sigset)) {
8503 /* Like the kernel, we enforce correct size sigsets */
8504 ret = -TARGET_EINVAL;
8505 goto fail;
8507 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8508 sizeof(*target_sigset), 1);
8509 if (!target_sigset) {
8510 goto efault;
8512 target_to_host_sigset(&set, target_sigset);
8513 unlock_user(target_sigset, arg_sigset, 0);
8514 } else {
8515 sig.set = NULL;
8517 } else {
8518 sig_ptr = NULL;
8521 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8522 ts_ptr, sig_ptr));
8524 if (!is_error(ret)) {
8525 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8526 goto efault;
8527 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8528 goto efault;
8529 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8530 goto efault;
8532 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8533 goto efault;
8536 break;
8537 #endif
8538 #ifdef TARGET_NR_symlink
8539 case TARGET_NR_symlink:
8541 void *p2;
8542 p = lock_user_string(arg1);
8543 p2 = lock_user_string(arg2);
8544 if (!p || !p2)
8545 ret = -TARGET_EFAULT;
8546 else
8547 ret = get_errno(symlink(p, p2));
8548 unlock_user(p2, arg2, 0);
8549 unlock_user(p, arg1, 0);
8551 break;
8552 #endif
8553 #if defined(TARGET_NR_symlinkat)
8554 case TARGET_NR_symlinkat:
8556 void *p2;
8557 p = lock_user_string(arg1);
8558 p2 = lock_user_string(arg3);
8559 if (!p || !p2)
8560 ret = -TARGET_EFAULT;
8561 else
8562 ret = get_errno(symlinkat(p, arg2, p2));
8563 unlock_user(p2, arg3, 0);
8564 unlock_user(p, arg1, 0);
8566 break;
8567 #endif
8568 #ifdef TARGET_NR_oldlstat
8569 case TARGET_NR_oldlstat:
8570 goto unimplemented;
8571 #endif
8572 #ifdef TARGET_NR_readlink
8573 case TARGET_NR_readlink:
8575 void *p2;
8576 p = lock_user_string(arg1);
8577 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8578 if (!p || !p2) {
8579 ret = -TARGET_EFAULT;
8580 } else if (!arg3) {
8581 /* Short circuit this for the magic exe check. */
8582 ret = -TARGET_EINVAL;
8583 } else if (is_proc_myself((const char *)p, "exe")) {
8584 char real[PATH_MAX], *temp;
8585 temp = realpath(exec_path, real);
8586 /* Return value is # of bytes that we wrote to the buffer. */
8587 if (temp == NULL) {
8588 ret = get_errno(-1);
8589 } else {
8590 /* Don't worry about sign mismatch as earlier mapping
8591 * logic would have thrown a bad address error. */
8592 ret = MIN(strlen(real), arg3);
8593 /* We cannot NUL terminate the string. */
8594 memcpy(p2, real, ret);
8596 } else {
8597 ret = get_errno(readlink(path(p), p2, arg3));
8599 unlock_user(p2, arg2, ret);
8600 unlock_user(p, arg1, 0);
8602 break;
8603 #endif
8604 #if defined(TARGET_NR_readlinkat)
8605 case TARGET_NR_readlinkat:
8607 void *p2;
8608 p = lock_user_string(arg2);
8609 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8610 if (!p || !p2) {
8611 ret = -TARGET_EFAULT;
8612 } else if (is_proc_myself((const char *)p, "exe")) {
8613 char real[PATH_MAX], *temp;
8614 temp = realpath(exec_path, real);
8615 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8616 snprintf((char *)p2, arg4, "%s", real);
8617 } else {
8618 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8620 unlock_user(p2, arg3, ret);
8621 unlock_user(p, arg2, 0);
8623 break;
8624 #endif
8625 #ifdef TARGET_NR_uselib
8626 case TARGET_NR_uselib:
8627 goto unimplemented;
8628 #endif
8629 #ifdef TARGET_NR_swapon
8630 case TARGET_NR_swapon:
8631 if (!(p = lock_user_string(arg1)))
8632 goto efault;
8633 ret = get_errno(swapon(p, arg2));
8634 unlock_user(p, arg1, 0);
8635 break;
8636 #endif
8637 case TARGET_NR_reboot:
8638 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8639 /* arg4 must be ignored in all other cases */
8640 p = lock_user_string(arg4);
8641 if (!p) {
8642 goto efault;
8644 ret = get_errno(reboot(arg1, arg2, arg3, p));
8645 unlock_user(p, arg4, 0);
8646 } else {
8647 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8649 break;
8650 #ifdef TARGET_NR_readdir
8651 case TARGET_NR_readdir:
8652 goto unimplemented;
8653 #endif
8654 #ifdef TARGET_NR_mmap
8655 case TARGET_NR_mmap:
8656 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8657 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8658 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8659 || defined(TARGET_S390X)
8661 abi_ulong *v;
8662 abi_ulong v1, v2, v3, v4, v5, v6;
8663 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8664 goto efault;
8665 v1 = tswapal(v[0]);
8666 v2 = tswapal(v[1]);
8667 v3 = tswapal(v[2]);
8668 v4 = tswapal(v[3]);
8669 v5 = tswapal(v[4]);
8670 v6 = tswapal(v[5]);
8671 unlock_user(v, arg1, 0);
8672 ret = get_errno(target_mmap(v1, v2, v3,
8673 target_to_host_bitmask(v4, mmap_flags_tbl),
8674 v5, v6));
8676 #else
8677 ret = get_errno(target_mmap(arg1, arg2, arg3,
8678 target_to_host_bitmask(arg4, mmap_flags_tbl),
8679 arg5,
8680 arg6));
8681 #endif
8682 break;
8683 #endif
8684 #ifdef TARGET_NR_mmap2
8685 case TARGET_NR_mmap2:
8686 #ifndef MMAP_SHIFT
8687 #define MMAP_SHIFT 12
8688 #endif
8689 ret = get_errno(target_mmap(arg1, arg2, arg3,
8690 target_to_host_bitmask(arg4, mmap_flags_tbl),
8691 arg5,
8692 arg6 << MMAP_SHIFT));
8693 break;
8694 #endif
8695 case TARGET_NR_munmap:
8696 ret = get_errno(target_munmap(arg1, arg2));
8697 break;
8698 case TARGET_NR_mprotect:
8700 TaskState *ts = cpu->opaque;
8701 /* Special hack to detect libc making the stack executable. */
8702 if ((arg3 & PROT_GROWSDOWN)
8703 && arg1 >= ts->info->stack_limit
8704 && arg1 <= ts->info->start_stack) {
8705 arg3 &= ~PROT_GROWSDOWN;
8706 arg2 = arg2 + arg1 - ts->info->stack_limit;
8707 arg1 = ts->info->stack_limit;
8710 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8711 break;
8712 #ifdef TARGET_NR_mremap
8713 case TARGET_NR_mremap:
8714 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8715 break;
8716 #endif
8717 /* ??? msync/mlock/munlock are broken for softmmu. */
8718 #ifdef TARGET_NR_msync
8719 case TARGET_NR_msync:
8720 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8721 break;
8722 #endif
8723 #ifdef TARGET_NR_mlock
8724 case TARGET_NR_mlock:
8725 ret = get_errno(mlock(g2h(arg1), arg2));
8726 break;
8727 #endif
8728 #ifdef TARGET_NR_munlock
8729 case TARGET_NR_munlock:
8730 ret = get_errno(munlock(g2h(arg1), arg2));
8731 break;
8732 #endif
8733 #ifdef TARGET_NR_mlockall
8734 case TARGET_NR_mlockall:
8735 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8736 break;
8737 #endif
8738 #ifdef TARGET_NR_munlockall
8739 case TARGET_NR_munlockall:
8740 ret = get_errno(munlockall());
8741 break;
8742 #endif
8743 case TARGET_NR_truncate:
8744 if (!(p = lock_user_string(arg1)))
8745 goto efault;
8746 ret = get_errno(truncate(p, arg2));
8747 unlock_user(p, arg1, 0);
8748 break;
8749 case TARGET_NR_ftruncate:
8750 ret = get_errno(ftruncate(arg1, arg2));
8751 break;
8752 case TARGET_NR_fchmod:
8753 ret = get_errno(fchmod(arg1, arg2));
8754 break;
8755 #if defined(TARGET_NR_fchmodat)
8756 case TARGET_NR_fchmodat:
8757 if (!(p = lock_user_string(arg2)))
8758 goto efault;
8759 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8760 unlock_user(p, arg2, 0);
8761 break;
8762 #endif
8763 case TARGET_NR_getpriority:
8764 /* Note that negative values are valid for getpriority, so we must
8765 differentiate based on errno settings. */
8766 errno = 0;
8767 ret = getpriority(arg1, arg2);
8768 if (ret == -1 && errno != 0) {
8769 ret = -host_to_target_errno(errno);
8770 break;
8772 #ifdef TARGET_ALPHA
8773 /* Return value is the unbiased priority. Signal no error. */
8774 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8775 #else
8776 /* Return value is a biased priority to avoid negative numbers. */
8777 ret = 20 - ret;
8778 #endif
8779 break;
8780 case TARGET_NR_setpriority:
8781 ret = get_errno(setpriority(arg1, arg2, arg3));
8782 break;
8783 #ifdef TARGET_NR_profil
8784 case TARGET_NR_profil:
8785 goto unimplemented;
8786 #endif
8787 case TARGET_NR_statfs:
8788 if (!(p = lock_user_string(arg1)))
8789 goto efault;
8790 ret = get_errno(statfs(path(p), &stfs));
8791 unlock_user(p, arg1, 0);
8792 convert_statfs:
8793 if (!is_error(ret)) {
8794 struct target_statfs *target_stfs;
8796 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8797 goto efault;
8798 __put_user(stfs.f_type, &target_stfs->f_type);
8799 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8800 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8801 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8802 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8803 __put_user(stfs.f_files, &target_stfs->f_files);
8804 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8805 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8806 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8807 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8808 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8809 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8810 unlock_user_struct(target_stfs, arg2, 1);
8812 break;
8813 case TARGET_NR_fstatfs:
8814 ret = get_errno(fstatfs(arg1, &stfs));
8815 goto convert_statfs;
8816 #ifdef TARGET_NR_statfs64
8817 case TARGET_NR_statfs64:
8818 if (!(p = lock_user_string(arg1)))
8819 goto efault;
8820 ret = get_errno(statfs(path(p), &stfs));
8821 unlock_user(p, arg1, 0);
8822 convert_statfs64:
8823 if (!is_error(ret)) {
8824 struct target_statfs64 *target_stfs;
8826 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8827 goto efault;
8828 __put_user(stfs.f_type, &target_stfs->f_type);
8829 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8830 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8831 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8832 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8833 __put_user(stfs.f_files, &target_stfs->f_files);
8834 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8835 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8836 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8837 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8838 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8839 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8840 unlock_user_struct(target_stfs, arg3, 1);
8842 break;
8843 case TARGET_NR_fstatfs64:
8844 ret = get_errno(fstatfs(arg1, &stfs));
8845 goto convert_statfs64;
8846 #endif
8847 #ifdef TARGET_NR_ioperm
8848 case TARGET_NR_ioperm:
8849 goto unimplemented;
8850 #endif
8851 #ifdef TARGET_NR_socketcall
8852 case TARGET_NR_socketcall:
8853 ret = do_socketcall(arg1, arg2);
8854 break;
8855 #endif
8856 #ifdef TARGET_NR_accept
8857 case TARGET_NR_accept:
8858 ret = do_accept4(arg1, arg2, arg3, 0);
8859 break;
8860 #endif
8861 #ifdef TARGET_NR_accept4
8862 case TARGET_NR_accept4:
8863 ret = do_accept4(arg1, arg2, arg3, arg4);
8864 break;
8865 #endif
8866 #ifdef TARGET_NR_bind
8867 case TARGET_NR_bind:
8868 ret = do_bind(arg1, arg2, arg3);
8869 break;
8870 #endif
8871 #ifdef TARGET_NR_connect
8872 case TARGET_NR_connect:
8873 ret = do_connect(arg1, arg2, arg3);
8874 break;
8875 #endif
8876 #ifdef TARGET_NR_getpeername
8877 case TARGET_NR_getpeername:
8878 ret = do_getpeername(arg1, arg2, arg3);
8879 break;
8880 #endif
8881 #ifdef TARGET_NR_getsockname
8882 case TARGET_NR_getsockname:
8883 ret = do_getsockname(arg1, arg2, arg3);
8884 break;
8885 #endif
8886 #ifdef TARGET_NR_getsockopt
8887 case TARGET_NR_getsockopt:
8888 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8889 break;
8890 #endif
8891 #ifdef TARGET_NR_listen
8892 case TARGET_NR_listen:
8893 ret = get_errno(listen(arg1, arg2));
8894 break;
8895 #endif
8896 #ifdef TARGET_NR_recv
8897 case TARGET_NR_recv:
8898 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8899 break;
8900 #endif
8901 #ifdef TARGET_NR_recvfrom
8902 case TARGET_NR_recvfrom:
8903 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8904 break;
8905 #endif
8906 #ifdef TARGET_NR_recvmsg
8907 case TARGET_NR_recvmsg:
8908 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8909 break;
8910 #endif
8911 #ifdef TARGET_NR_send
8912 case TARGET_NR_send:
8913 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8914 break;
8915 #endif
8916 #ifdef TARGET_NR_sendmsg
8917 case TARGET_NR_sendmsg:
8918 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8919 break;
8920 #endif
8921 #ifdef TARGET_NR_sendmmsg
8922 case TARGET_NR_sendmmsg:
8923 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8924 break;
8925 case TARGET_NR_recvmmsg:
8926 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8927 break;
8928 #endif
8929 #ifdef TARGET_NR_sendto
8930 case TARGET_NR_sendto:
8931 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8932 break;
8933 #endif
8934 #ifdef TARGET_NR_shutdown
8935 case TARGET_NR_shutdown:
8936 ret = get_errno(shutdown(arg1, arg2));
8937 break;
8938 #endif
8939 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8940 case TARGET_NR_getrandom:
8941 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8942 if (!p) {
8943 goto efault;
8945 ret = get_errno(getrandom(p, arg2, arg3));
8946 unlock_user(p, arg1, ret);
8947 break;
8948 #endif
8949 #ifdef TARGET_NR_socket
8950 case TARGET_NR_socket:
8951 ret = do_socket(arg1, arg2, arg3);
8952 fd_trans_unregister(ret);
8953 break;
8954 #endif
8955 #ifdef TARGET_NR_socketpair
8956 case TARGET_NR_socketpair:
8957 ret = do_socketpair(arg1, arg2, arg3, arg4);
8958 break;
8959 #endif
8960 #ifdef TARGET_NR_setsockopt
8961 case TARGET_NR_setsockopt:
8962 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8963 break;
8964 #endif
8966 case TARGET_NR_syslog:
8967 if (!(p = lock_user_string(arg2)))
8968 goto efault;
8969 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8970 unlock_user(p, arg2, 0);
8971 break;
8973 case TARGET_NR_setitimer:
8975 struct itimerval value, ovalue, *pvalue;
8977 if (arg2) {
8978 pvalue = &value;
8979 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8980 || copy_from_user_timeval(&pvalue->it_value,
8981 arg2 + sizeof(struct target_timeval)))
8982 goto efault;
8983 } else {
8984 pvalue = NULL;
8986 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8987 if (!is_error(ret) && arg3) {
8988 if (copy_to_user_timeval(arg3,
8989 &ovalue.it_interval)
8990 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8991 &ovalue.it_value))
8992 goto efault;
8995 break;
8996 case TARGET_NR_getitimer:
8998 struct itimerval value;
9000 ret = get_errno(getitimer(arg1, &value));
9001 if (!is_error(ret) && arg2) {
9002 if (copy_to_user_timeval(arg2,
9003 &value.it_interval)
9004 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9005 &value.it_value))
9006 goto efault;
9009 break;
9010 #ifdef TARGET_NR_stat
9011 case TARGET_NR_stat:
9012 if (!(p = lock_user_string(arg1)))
9013 goto efault;
9014 ret = get_errno(stat(path(p), &st));
9015 unlock_user(p, arg1, 0);
9016 goto do_stat;
9017 #endif
9018 #ifdef TARGET_NR_lstat
9019 case TARGET_NR_lstat:
9020 if (!(p = lock_user_string(arg1)))
9021 goto efault;
9022 ret = get_errno(lstat(path(p), &st));
9023 unlock_user(p, arg1, 0);
9024 goto do_stat;
9025 #endif
9026 case TARGET_NR_fstat:
9028 ret = get_errno(fstat(arg1, &st));
9029 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9030 do_stat:
9031 #endif
9032 if (!is_error(ret)) {
9033 struct target_stat *target_st;
9035 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9036 goto efault;
9037 memset(target_st, 0, sizeof(*target_st));
9038 __put_user(st.st_dev, &target_st->st_dev);
9039 __put_user(st.st_ino, &target_st->st_ino);
9040 __put_user(st.st_mode, &target_st->st_mode);
9041 __put_user(st.st_uid, &target_st->st_uid);
9042 __put_user(st.st_gid, &target_st->st_gid);
9043 __put_user(st.st_nlink, &target_st->st_nlink);
9044 __put_user(st.st_rdev, &target_st->st_rdev);
9045 __put_user(st.st_size, &target_st->st_size);
9046 __put_user(st.st_blksize, &target_st->st_blksize);
9047 __put_user(st.st_blocks, &target_st->st_blocks);
9048 __put_user(st.st_atime, &target_st->target_st_atime);
9049 __put_user(st.st_mtime, &target_st->target_st_mtime);
9050 __put_user(st.st_ctime, &target_st->target_st_ctime);
9051 unlock_user_struct(target_st, arg2, 1);
9054 break;
9055 #ifdef TARGET_NR_olduname
9056 case TARGET_NR_olduname:
9057 goto unimplemented;
9058 #endif
9059 #ifdef TARGET_NR_iopl
9060 case TARGET_NR_iopl:
9061 goto unimplemented;
9062 #endif
9063 case TARGET_NR_vhangup:
9064 ret = get_errno(vhangup());
9065 break;
9066 #ifdef TARGET_NR_idle
9067 case TARGET_NR_idle:
9068 goto unimplemented;
9069 #endif
9070 #ifdef TARGET_NR_syscall
9071 case TARGET_NR_syscall:
9072 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9073 arg6, arg7, arg8, 0);
9074 break;
9075 #endif
9076 case TARGET_NR_wait4:
9078 int status;
9079 abi_long status_ptr = arg2;
9080 struct rusage rusage, *rusage_ptr;
9081 abi_ulong target_rusage = arg4;
9082 abi_long rusage_err;
9083 if (target_rusage)
9084 rusage_ptr = &rusage;
9085 else
9086 rusage_ptr = NULL;
9087 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9088 if (!is_error(ret)) {
9089 if (status_ptr && ret) {
9090 status = host_to_target_waitstatus(status);
9091 if (put_user_s32(status, status_ptr))
9092 goto efault;
9094 if (target_rusage) {
9095 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9096 if (rusage_err) {
9097 ret = rusage_err;
9102 break;
9103 #ifdef TARGET_NR_swapoff
9104 case TARGET_NR_swapoff:
9105 if (!(p = lock_user_string(arg1)))
9106 goto efault;
9107 ret = get_errno(swapoff(p));
9108 unlock_user(p, arg1, 0);
9109 break;
9110 #endif
9111 case TARGET_NR_sysinfo:
9113 struct target_sysinfo *target_value;
9114 struct sysinfo value;
9115 ret = get_errno(sysinfo(&value));
9116 if (!is_error(ret) && arg1)
9118 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9119 goto efault;
9120 __put_user(value.uptime, &target_value->uptime);
9121 __put_user(value.loads[0], &target_value->loads[0]);
9122 __put_user(value.loads[1], &target_value->loads[1]);
9123 __put_user(value.loads[2], &target_value->loads[2]);
9124 __put_user(value.totalram, &target_value->totalram);
9125 __put_user(value.freeram, &target_value->freeram);
9126 __put_user(value.sharedram, &target_value->sharedram);
9127 __put_user(value.bufferram, &target_value->bufferram);
9128 __put_user(value.totalswap, &target_value->totalswap);
9129 __put_user(value.freeswap, &target_value->freeswap);
9130 __put_user(value.procs, &target_value->procs);
9131 __put_user(value.totalhigh, &target_value->totalhigh);
9132 __put_user(value.freehigh, &target_value->freehigh);
9133 __put_user(value.mem_unit, &target_value->mem_unit);
9134 unlock_user_struct(target_value, arg1, 1);
9137 break;
9138 #ifdef TARGET_NR_ipc
9139 case TARGET_NR_ipc:
9140 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
9141 break;
9142 #endif
9143 #ifdef TARGET_NR_semget
9144 case TARGET_NR_semget:
9145 ret = get_errno(semget(arg1, arg2, arg3));
9146 break;
9147 #endif
9148 #ifdef TARGET_NR_semop
9149 case TARGET_NR_semop:
9150 ret = do_semop(arg1, arg2, arg3);
9151 break;
9152 #endif
9153 #ifdef TARGET_NR_semctl
9154 case TARGET_NR_semctl:
9155 ret = do_semctl(arg1, arg2, arg3, arg4);
9156 break;
9157 #endif
9158 #ifdef TARGET_NR_msgctl
9159 case TARGET_NR_msgctl:
9160 ret = do_msgctl(arg1, arg2, arg3);
9161 break;
9162 #endif
9163 #ifdef TARGET_NR_msgget
9164 case TARGET_NR_msgget:
9165 ret = get_errno(msgget(arg1, arg2));
9166 break;
9167 #endif
9168 #ifdef TARGET_NR_msgrcv
9169 case TARGET_NR_msgrcv:
9170 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9171 break;
9172 #endif
9173 #ifdef TARGET_NR_msgsnd
9174 case TARGET_NR_msgsnd:
9175 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9176 break;
9177 #endif
9178 #ifdef TARGET_NR_shmget
9179 case TARGET_NR_shmget:
9180 ret = get_errno(shmget(arg1, arg2, arg3));
9181 break;
9182 #endif
9183 #ifdef TARGET_NR_shmctl
9184 case TARGET_NR_shmctl:
9185 ret = do_shmctl(arg1, arg2, arg3);
9186 break;
9187 #endif
9188 #ifdef TARGET_NR_shmat
9189 case TARGET_NR_shmat:
9190 ret = do_shmat(arg1, arg2, arg3);
9191 break;
9192 #endif
9193 #ifdef TARGET_NR_shmdt
9194 case TARGET_NR_shmdt:
9195 ret = do_shmdt(arg1);
9196 break;
9197 #endif
9198 case TARGET_NR_fsync:
9199 ret = get_errno(fsync(arg1));
9200 break;
9201 case TARGET_NR_clone:
9202 /* Linux manages to have three different orderings for its
9203 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9204 * match the kernel's CONFIG_CLONE_* settings.
9205 * Microblaze is further special in that it uses a sixth
9206 * implicit argument to clone for the TLS pointer.
9208 #if defined(TARGET_MICROBLAZE)
9209 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9210 #elif defined(TARGET_CLONE_BACKWARDS)
9211 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9212 #elif defined(TARGET_CLONE_BACKWARDS2)
9213 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9214 #else
9215 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9216 #endif
9217 break;
9218 #ifdef __NR_exit_group
9219 /* new thread calls */
9220 case TARGET_NR_exit_group:
9221 #ifdef TARGET_GPROF
9222 _mcleanup();
9223 #endif
9224 gdb_exit(cpu_env, arg1);
9225 ret = get_errno(exit_group(arg1));
9226 break;
9227 #endif
9228 case TARGET_NR_setdomainname:
9229 if (!(p = lock_user_string(arg1)))
9230 goto efault;
9231 ret = get_errno(setdomainname(p, arg2));
9232 unlock_user(p, arg1, 0);
9233 break;
9234 case TARGET_NR_uname:
9235 /* no need to transcode because we use the linux syscall */
9237 struct new_utsname * buf;
9239 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9240 goto efault;
9241 ret = get_errno(sys_uname(buf));
9242 if (!is_error(ret)) {
9243 /* Overwrite the native machine name with whatever is being
9244 emulated. */
9245 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9246 /* Allow the user to override the reported release. */
9247 if (qemu_uname_release && *qemu_uname_release) {
9248 g_strlcpy(buf->release, qemu_uname_release,
9249 sizeof(buf->release));
9252 unlock_user_struct(buf, arg1, 1);
9254 break;
9255 #ifdef TARGET_I386
9256 case TARGET_NR_modify_ldt:
9257 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9258 break;
9259 #if !defined(TARGET_X86_64)
9260 case TARGET_NR_vm86old:
9261 goto unimplemented;
9262 case TARGET_NR_vm86:
9263 ret = do_vm86(cpu_env, arg1, arg2);
9264 break;
9265 #endif
9266 #endif
9267 case TARGET_NR_adjtimex:
9268 goto unimplemented;
9269 #ifdef TARGET_NR_create_module
9270 case TARGET_NR_create_module:
9271 #endif
9272 case TARGET_NR_init_module:
9273 case TARGET_NR_delete_module:
9274 #ifdef TARGET_NR_get_kernel_syms
9275 case TARGET_NR_get_kernel_syms:
9276 #endif
9277 goto unimplemented;
9278 case TARGET_NR_quotactl:
9279 goto unimplemented;
9280 case TARGET_NR_getpgid:
9281 ret = get_errno(getpgid(arg1));
9282 break;
9283 case TARGET_NR_fchdir:
9284 ret = get_errno(fchdir(arg1));
9285 break;
9286 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9287 case TARGET_NR_bdflush:
9288 goto unimplemented;
9289 #endif
9290 #ifdef TARGET_NR_sysfs
9291 case TARGET_NR_sysfs:
9292 goto unimplemented;
9293 #endif
9294 case TARGET_NR_personality:
9295 ret = get_errno(personality(arg1));
9296 break;
9297 #ifdef TARGET_NR_afs_syscall
9298 case TARGET_NR_afs_syscall:
9299 goto unimplemented;
9300 #endif
9301 #ifdef TARGET_NR__llseek /* Not on alpha */
9302 case TARGET_NR__llseek:
9304 int64_t res;
9305 #if !defined(__NR_llseek)
9306 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
9307 if (res == -1) {
9308 ret = get_errno(res);
9309 } else {
9310 ret = 0;
9312 #else
9313 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9314 #endif
9315 if ((ret == 0) && put_user_s64(res, arg4)) {
9316 goto efault;
9319 break;
9320 #endif
9321 #ifdef TARGET_NR_getdents
9322 case TARGET_NR_getdents:
9323 #ifdef __NR_getdents
9324 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9326 struct target_dirent *target_dirp;
9327 struct linux_dirent *dirp;
9328 abi_long count = arg3;
9330 dirp = g_try_malloc(count);
9331 if (!dirp) {
9332 ret = -TARGET_ENOMEM;
9333 goto fail;
9336 ret = get_errno(sys_getdents(arg1, dirp, count));
9337 if (!is_error(ret)) {
9338 struct linux_dirent *de;
9339 struct target_dirent *tde;
9340 int len = ret;
9341 int reclen, treclen;
9342 int count1, tnamelen;
9344 count1 = 0;
9345 de = dirp;
9346 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9347 goto efault;
9348 tde = target_dirp;
9349 while (len > 0) {
9350 reclen = de->d_reclen;
9351 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9352 assert(tnamelen >= 0);
9353 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9354 assert(count1 + treclen <= count);
9355 tde->d_reclen = tswap16(treclen);
9356 tde->d_ino = tswapal(de->d_ino);
9357 tde->d_off = tswapal(de->d_off);
9358 memcpy(tde->d_name, de->d_name, tnamelen);
9359 de = (struct linux_dirent *)((char *)de + reclen);
9360 len -= reclen;
9361 tde = (struct target_dirent *)((char *)tde + treclen);
9362 count1 += treclen;
9364 ret = count1;
9365 unlock_user(target_dirp, arg2, ret);
9367 g_free(dirp);
9369 #else
9371 struct linux_dirent *dirp;
9372 abi_long count = arg3;
9374 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9375 goto efault;
9376 ret = get_errno(sys_getdents(arg1, dirp, count));
9377 if (!is_error(ret)) {
9378 struct linux_dirent *de;
9379 int len = ret;
9380 int reclen;
9381 de = dirp;
9382 while (len > 0) {
9383 reclen = de->d_reclen;
9384 if (reclen > len)
9385 break;
9386 de->d_reclen = tswap16(reclen);
9387 tswapls(&de->d_ino);
9388 tswapls(&de->d_off);
9389 de = (struct linux_dirent *)((char *)de + reclen);
9390 len -= reclen;
9393 unlock_user(dirp, arg2, ret);
9395 #endif
9396 #else
9397 /* Implement getdents in terms of getdents64 */
9399 struct linux_dirent64 *dirp;
9400 abi_long count = arg3;
9402 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9403 if (!dirp) {
9404 goto efault;
9406 ret = get_errno(sys_getdents64(arg1, dirp, count));
9407 if (!is_error(ret)) {
9408 /* Convert the dirent64 structs to target dirent. We do this
9409 * in-place, since we can guarantee that a target_dirent is no
9410 * larger than a dirent64; however this means we have to be
9411 * careful to read everything before writing in the new format.
9413 struct linux_dirent64 *de;
9414 struct target_dirent *tde;
9415 int len = ret;
9416 int tlen = 0;
9418 de = dirp;
9419 tde = (struct target_dirent *)dirp;
9420 while (len > 0) {
9421 int namelen, treclen;
9422 int reclen = de->d_reclen;
9423 uint64_t ino = de->d_ino;
9424 int64_t off = de->d_off;
9425 uint8_t type = de->d_type;
9427 namelen = strlen(de->d_name);
9428 treclen = offsetof(struct target_dirent, d_name)
9429 + namelen + 2;
9430 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9432 memmove(tde->d_name, de->d_name, namelen + 1);
9433 tde->d_ino = tswapal(ino);
9434 tde->d_off = tswapal(off);
9435 tde->d_reclen = tswap16(treclen);
9436 /* The target_dirent type is in what was formerly a padding
9437 * byte at the end of the structure:
9439 *(((char *)tde) + treclen - 1) = type;
9441 de = (struct linux_dirent64 *)((char *)de + reclen);
9442 tde = (struct target_dirent *)((char *)tde + treclen);
9443 len -= reclen;
9444 tlen += treclen;
9446 ret = tlen;
9448 unlock_user(dirp, arg2, ret);
9450 #endif
9451 break;
9452 #endif /* TARGET_NR_getdents */
9453 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9454 case TARGET_NR_getdents64:
9456 struct linux_dirent64 *dirp;
9457 abi_long count = arg3;
9458 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9459 goto efault;
9460 ret = get_errno(sys_getdents64(arg1, dirp, count));
9461 if (!is_error(ret)) {
9462 struct linux_dirent64 *de;
9463 int len = ret;
9464 int reclen;
9465 de = dirp;
9466 while (len > 0) {
9467 reclen = de->d_reclen;
9468 if (reclen > len)
9469 break;
9470 de->d_reclen = tswap16(reclen);
9471 tswap64s((uint64_t *)&de->d_ino);
9472 tswap64s((uint64_t *)&de->d_off);
9473 de = (struct linux_dirent64 *)((char *)de + reclen);
9474 len -= reclen;
9477 unlock_user(dirp, arg2, ret);
9479 break;
9480 #endif /* TARGET_NR_getdents64 */
9481 #if defined(TARGET_NR__newselect)
9482 case TARGET_NR__newselect:
9483 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9484 break;
9485 #endif
9486 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9487 # ifdef TARGET_NR_poll
9488 case TARGET_NR_poll:
9489 # endif
9490 # ifdef TARGET_NR_ppoll
9491 case TARGET_NR_ppoll:
9492 # endif
9494 struct target_pollfd *target_pfd;
9495 unsigned int nfds = arg2;
9496 struct pollfd *pfd;
9497 unsigned int i;
9499 pfd = NULL;
9500 target_pfd = NULL;
9501 if (nfds) {
9502 target_pfd = lock_user(VERIFY_WRITE, arg1,
9503 sizeof(struct target_pollfd) * nfds, 1);
9504 if (!target_pfd) {
9505 goto efault;
9508 pfd = alloca(sizeof(struct pollfd) * nfds);
9509 for (i = 0; i < nfds; i++) {
9510 pfd[i].fd = tswap32(target_pfd[i].fd);
9511 pfd[i].events = tswap16(target_pfd[i].events);
9515 switch (num) {
9516 # ifdef TARGET_NR_ppoll
9517 case TARGET_NR_ppoll:
9519 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9520 target_sigset_t *target_set;
9521 sigset_t _set, *set = &_set;
9523 if (arg3) {
9524 if (target_to_host_timespec(timeout_ts, arg3)) {
9525 unlock_user(target_pfd, arg1, 0);
9526 goto efault;
9528 } else {
9529 timeout_ts = NULL;
9532 if (arg4) {
9533 if (arg5 != sizeof(target_sigset_t)) {
9534 unlock_user(target_pfd, arg1, 0);
9535 ret = -TARGET_EINVAL;
9536 break;
9539 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9540 if (!target_set) {
9541 unlock_user(target_pfd, arg1, 0);
9542 goto efault;
9544 target_to_host_sigset(set, target_set);
9545 } else {
9546 set = NULL;
9549 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9550 set, SIGSET_T_SIZE));
9552 if (!is_error(ret) && arg3) {
9553 host_to_target_timespec(arg3, timeout_ts);
9555 if (arg4) {
9556 unlock_user(target_set, arg4, 0);
9558 break;
9560 # endif
9561 # ifdef TARGET_NR_poll
9562 case TARGET_NR_poll:
9564 struct timespec ts, *pts;
9566 if (arg3 >= 0) {
9567 /* Convert ms to secs, ns */
9568 ts.tv_sec = arg3 / 1000;
9569 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9570 pts = &ts;
9571 } else {
9572 /* -ve poll() timeout means "infinite" */
9573 pts = NULL;
9575 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9576 break;
9578 # endif
9579 default:
9580 g_assert_not_reached();
9583 if (!is_error(ret)) {
9584 for(i = 0; i < nfds; i++) {
9585 target_pfd[i].revents = tswap16(pfd[i].revents);
9588 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9590 break;
9591 #endif
9592 case TARGET_NR_flock:
9593 /* NOTE: the flock constant seems to be the same for every
9594 Linux platform */
9595 ret = get_errno(safe_flock(arg1, arg2));
9596 break;
9597 case TARGET_NR_readv:
9599 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9600 if (vec != NULL) {
9601 ret = get_errno(safe_readv(arg1, vec, arg3));
9602 unlock_iovec(vec, arg2, arg3, 1);
9603 } else {
9604 ret = -host_to_target_errno(errno);
9607 break;
9608 case TARGET_NR_writev:
9610 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9611 if (vec != NULL) {
9612 ret = get_errno(safe_writev(arg1, vec, arg3));
9613 unlock_iovec(vec, arg2, arg3, 0);
9614 } else {
9615 ret = -host_to_target_errno(errno);
9618 break;
9619 case TARGET_NR_getsid:
9620 ret = get_errno(getsid(arg1));
9621 break;
9622 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9623 case TARGET_NR_fdatasync:
9624 ret = get_errno(fdatasync(arg1));
9625 break;
9626 #endif
9627 #ifdef TARGET_NR__sysctl
9628 case TARGET_NR__sysctl:
9629 /* We don't implement this, but ENOTDIR is always a safe
9630 return value. */
9631 ret = -TARGET_ENOTDIR;
9632 break;
9633 #endif
9634 case TARGET_NR_sched_getaffinity:
9636 unsigned int mask_size;
9637 unsigned long *mask;
9640 * sched_getaffinity needs multiples of ulong, so need to take
9641 * care of mismatches between target ulong and host ulong sizes.
9643 if (arg2 & (sizeof(abi_ulong) - 1)) {
9644 ret = -TARGET_EINVAL;
9645 break;
9647 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9649 mask = alloca(mask_size);
9650 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9652 if (!is_error(ret)) {
9653 if (ret > arg2) {
9654 /* More data returned than the caller's buffer will fit.
9655 * This only happens if sizeof(abi_long) < sizeof(long)
9656 * and the caller passed us a buffer holding an odd number
9657 * of abi_longs. If the host kernel is actually using the
9658 * extra 4 bytes then fail EINVAL; otherwise we can just
9659 * ignore them and only copy the interesting part.
9661 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9662 if (numcpus > arg2 * 8) {
9663 ret = -TARGET_EINVAL;
9664 break;
9666 ret = arg2;
9669 if (copy_to_user(arg3, mask, ret)) {
9670 goto efault;
9674 break;
9675 case TARGET_NR_sched_setaffinity:
9677 unsigned int mask_size;
9678 unsigned long *mask;
9681 * sched_setaffinity needs multiples of ulong, so need to take
9682 * care of mismatches between target ulong and host ulong sizes.
9684 if (arg2 & (sizeof(abi_ulong) - 1)) {
9685 ret = -TARGET_EINVAL;
9686 break;
9688 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9690 mask = alloca(mask_size);
9691 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9692 goto efault;
9694 memcpy(mask, p, arg2);
9695 unlock_user_struct(p, arg2, 0);
9697 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9699 break;
9700 case TARGET_NR_sched_setparam:
9702 struct sched_param *target_schp;
9703 struct sched_param schp;
9705 if (arg2 == 0) {
9706 return -TARGET_EINVAL;
9708 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9709 goto efault;
9710 schp.sched_priority = tswap32(target_schp->sched_priority);
9711 unlock_user_struct(target_schp, arg2, 0);
9712 ret = get_errno(sched_setparam(arg1, &schp));
9714 break;
9715 case TARGET_NR_sched_getparam:
9717 struct sched_param *target_schp;
9718 struct sched_param schp;
9720 if (arg2 == 0) {
9721 return -TARGET_EINVAL;
9723 ret = get_errno(sched_getparam(arg1, &schp));
9724 if (!is_error(ret)) {
9725 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9726 goto efault;
9727 target_schp->sched_priority = tswap32(schp.sched_priority);
9728 unlock_user_struct(target_schp, arg2, 1);
9731 break;
9732 case TARGET_NR_sched_setscheduler:
9734 struct sched_param *target_schp;
9735 struct sched_param schp;
9736 if (arg3 == 0) {
9737 return -TARGET_EINVAL;
9739 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9740 goto efault;
9741 schp.sched_priority = tswap32(target_schp->sched_priority);
9742 unlock_user_struct(target_schp, arg3, 0);
9743 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9745 break;
9746 case TARGET_NR_sched_getscheduler:
9747 ret = get_errno(sched_getscheduler(arg1));
9748 break;
9749 case TARGET_NR_sched_yield:
9750 ret = get_errno(sched_yield());
9751 break;
9752 case TARGET_NR_sched_get_priority_max:
9753 ret = get_errno(sched_get_priority_max(arg1));
9754 break;
9755 case TARGET_NR_sched_get_priority_min:
9756 ret = get_errno(sched_get_priority_min(arg1));
9757 break;
9758 case TARGET_NR_sched_rr_get_interval:
9760 struct timespec ts;
9761 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9762 if (!is_error(ret)) {
9763 ret = host_to_target_timespec(arg2, &ts);
9766 break;
9767 case TARGET_NR_nanosleep:
9769 struct timespec req, rem;
9770 target_to_host_timespec(&req, arg1);
9771 ret = get_errno(safe_nanosleep(&req, &rem));
9772 if (is_error(ret) && arg2) {
9773 host_to_target_timespec(arg2, &rem);
9776 break;
9777 #ifdef TARGET_NR_query_module
9778 case TARGET_NR_query_module:
9779 goto unimplemented;
9780 #endif
9781 #ifdef TARGET_NR_nfsservctl
9782 case TARGET_NR_nfsservctl:
9783 goto unimplemented;
9784 #endif
9785 case TARGET_NR_prctl:
9786 switch (arg1) {
9787 case PR_GET_PDEATHSIG:
9789 int deathsig;
9790 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9791 if (!is_error(ret) && arg2
9792 && put_user_ual(deathsig, arg2)) {
9793 goto efault;
9795 break;
9797 #ifdef PR_GET_NAME
9798 case PR_GET_NAME:
9800 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9801 if (!name) {
9802 goto efault;
9804 ret = get_errno(prctl(arg1, (unsigned long)name,
9805 arg3, arg4, arg5));
9806 unlock_user(name, arg2, 16);
9807 break;
9809 case PR_SET_NAME:
9811 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9812 if (!name) {
9813 goto efault;
9815 ret = get_errno(prctl(arg1, (unsigned long)name,
9816 arg3, arg4, arg5));
9817 unlock_user(name, arg2, 0);
9818 break;
9820 #endif
9821 default:
9822 /* Most prctl options have no pointer arguments */
9823 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9824 break;
9826 break;
9827 #ifdef TARGET_NR_arch_prctl
9828 case TARGET_NR_arch_prctl:
9829 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9830 ret = do_arch_prctl(cpu_env, arg1, arg2);
9831 break;
9832 #else
9833 goto unimplemented;
9834 #endif
9835 #endif
9836 #ifdef TARGET_NR_pread64
9837 case TARGET_NR_pread64:
9838 if (regpairs_aligned(cpu_env)) {
9839 arg4 = arg5;
9840 arg5 = arg6;
9842 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9843 goto efault;
9844 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9845 unlock_user(p, arg2, ret);
9846 break;
9847 case TARGET_NR_pwrite64:
9848 if (regpairs_aligned(cpu_env)) {
9849 arg4 = arg5;
9850 arg5 = arg6;
9852 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9853 goto efault;
9854 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9855 unlock_user(p, arg2, 0);
9856 break;
9857 #endif
9858 case TARGET_NR_getcwd:
9859 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9860 goto efault;
9861 ret = get_errno(sys_getcwd1(p, arg2));
9862 unlock_user(p, arg1, ret);
9863 break;
9864 case TARGET_NR_capget:
9865 case TARGET_NR_capset:
9867 struct target_user_cap_header *target_header;
9868 struct target_user_cap_data *target_data = NULL;
9869 struct __user_cap_header_struct header;
9870 struct __user_cap_data_struct data[2];
9871 struct __user_cap_data_struct *dataptr = NULL;
9872 int i, target_datalen;
9873 int data_items = 1;
9875 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9876 goto efault;
9878 header.version = tswap32(target_header->version);
9879 header.pid = tswap32(target_header->pid);
9881 if (header.version != _LINUX_CAPABILITY_VERSION) {
9882 /* Version 2 and up takes pointer to two user_data structs */
9883 data_items = 2;
9886 target_datalen = sizeof(*target_data) * data_items;
9888 if (arg2) {
9889 if (num == TARGET_NR_capget) {
9890 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9891 } else {
9892 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9894 if (!target_data) {
9895 unlock_user_struct(target_header, arg1, 0);
9896 goto efault;
9899 if (num == TARGET_NR_capset) {
9900 for (i = 0; i < data_items; i++) {
9901 data[i].effective = tswap32(target_data[i].effective);
9902 data[i].permitted = tswap32(target_data[i].permitted);
9903 data[i].inheritable = tswap32(target_data[i].inheritable);
9907 dataptr = data;
9910 if (num == TARGET_NR_capget) {
9911 ret = get_errno(capget(&header, dataptr));
9912 } else {
9913 ret = get_errno(capset(&header, dataptr));
9916 /* The kernel always updates version for both capget and capset */
9917 target_header->version = tswap32(header.version);
9918 unlock_user_struct(target_header, arg1, 1);
9920 if (arg2) {
9921 if (num == TARGET_NR_capget) {
9922 for (i = 0; i < data_items; i++) {
9923 target_data[i].effective = tswap32(data[i].effective);
9924 target_data[i].permitted = tswap32(data[i].permitted);
9925 target_data[i].inheritable = tswap32(data[i].inheritable);
9927 unlock_user(target_data, arg2, target_datalen);
9928 } else {
9929 unlock_user(target_data, arg2, 0);
9932 break;
9934 case TARGET_NR_sigaltstack:
9935 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9936 break;
9938 #ifdef CONFIG_SENDFILE
9939 case TARGET_NR_sendfile:
9941 off_t *offp = NULL;
9942 off_t off;
9943 if (arg3) {
9944 ret = get_user_sal(off, arg3);
9945 if (is_error(ret)) {
9946 break;
9948 offp = &off;
9950 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9951 if (!is_error(ret) && arg3) {
9952 abi_long ret2 = put_user_sal(off, arg3);
9953 if (is_error(ret2)) {
9954 ret = ret2;
9957 break;
9959 #ifdef TARGET_NR_sendfile64
9960 case TARGET_NR_sendfile64:
9962 off_t *offp = NULL;
9963 off_t off;
9964 if (arg3) {
9965 ret = get_user_s64(off, arg3);
9966 if (is_error(ret)) {
9967 break;
9969 offp = &off;
9971 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9972 if (!is_error(ret) && arg3) {
9973 abi_long ret2 = put_user_s64(off, arg3);
9974 if (is_error(ret2)) {
9975 ret = ret2;
9978 break;
9980 #endif
9981 #else
9982 case TARGET_NR_sendfile:
9983 #ifdef TARGET_NR_sendfile64
9984 case TARGET_NR_sendfile64:
9985 #endif
9986 goto unimplemented;
9987 #endif
9989 #ifdef TARGET_NR_getpmsg
9990 case TARGET_NR_getpmsg:
9991 goto unimplemented;
9992 #endif
9993 #ifdef TARGET_NR_putpmsg
9994 case TARGET_NR_putpmsg:
9995 goto unimplemented;
9996 #endif
9997 #ifdef TARGET_NR_vfork
9998 case TARGET_NR_vfork:
9999 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10000 0, 0, 0, 0));
10001 break;
10002 #endif
10003 #ifdef TARGET_NR_ugetrlimit
10004 case TARGET_NR_ugetrlimit:
10006 struct rlimit rlim;
10007 int resource = target_to_host_resource(arg1);
10008 ret = get_errno(getrlimit(resource, &rlim));
10009 if (!is_error(ret)) {
10010 struct target_rlimit *target_rlim;
10011 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10012 goto efault;
10013 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10014 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10015 unlock_user_struct(target_rlim, arg2, 1);
10017 break;
10019 #endif
10020 #ifdef TARGET_NR_truncate64
10021 case TARGET_NR_truncate64:
10022 if (!(p = lock_user_string(arg1)))
10023 goto efault;
10024 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10025 unlock_user(p, arg1, 0);
10026 break;
10027 #endif
10028 #ifdef TARGET_NR_ftruncate64
10029 case TARGET_NR_ftruncate64:
10030 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10031 break;
10032 #endif
10033 #ifdef TARGET_NR_stat64
10034 case TARGET_NR_stat64:
10035 if (!(p = lock_user_string(arg1)))
10036 goto efault;
10037 ret = get_errno(stat(path(p), &st));
10038 unlock_user(p, arg1, 0);
10039 if (!is_error(ret))
10040 ret = host_to_target_stat64(cpu_env, arg2, &st);
10041 break;
10042 #endif
10043 #ifdef TARGET_NR_lstat64
10044 case TARGET_NR_lstat64:
10045 if (!(p = lock_user_string(arg1)))
10046 goto efault;
10047 ret = get_errno(lstat(path(p), &st));
10048 unlock_user(p, arg1, 0);
10049 if (!is_error(ret))
10050 ret = host_to_target_stat64(cpu_env, arg2, &st);
10051 break;
10052 #endif
10053 #ifdef TARGET_NR_fstat64
10054 case TARGET_NR_fstat64:
10055 ret = get_errno(fstat(arg1, &st));
10056 if (!is_error(ret))
10057 ret = host_to_target_stat64(cpu_env, arg2, &st);
10058 break;
10059 #endif
10060 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10061 #ifdef TARGET_NR_fstatat64
10062 case TARGET_NR_fstatat64:
10063 #endif
10064 #ifdef TARGET_NR_newfstatat
10065 case TARGET_NR_newfstatat:
10066 #endif
10067 if (!(p = lock_user_string(arg2)))
10068 goto efault;
10069 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10070 if (!is_error(ret))
10071 ret = host_to_target_stat64(cpu_env, arg3, &st);
10072 break;
10073 #endif
10074 #ifdef TARGET_NR_lchown
10075 case TARGET_NR_lchown:
10076 if (!(p = lock_user_string(arg1)))
10077 goto efault;
10078 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10079 unlock_user(p, arg1, 0);
10080 break;
10081 #endif
10082 #ifdef TARGET_NR_getuid
10083 case TARGET_NR_getuid:
10084 ret = get_errno(high2lowuid(getuid()));
10085 break;
10086 #endif
10087 #ifdef TARGET_NR_getgid
10088 case TARGET_NR_getgid:
10089 ret = get_errno(high2lowgid(getgid()));
10090 break;
10091 #endif
10092 #ifdef TARGET_NR_geteuid
10093 case TARGET_NR_geteuid:
10094 ret = get_errno(high2lowuid(geteuid()));
10095 break;
10096 #endif
10097 #ifdef TARGET_NR_getegid
10098 case TARGET_NR_getegid:
10099 ret = get_errno(high2lowgid(getegid()));
10100 break;
10101 #endif
10102 case TARGET_NR_setreuid:
10103 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10104 break;
10105 case TARGET_NR_setregid:
10106 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10107 break;
10108 case TARGET_NR_getgroups:
10110 int gidsetsize = arg1;
10111 target_id *target_grouplist;
10112 gid_t *grouplist;
10113 int i;
10115 grouplist = alloca(gidsetsize * sizeof(gid_t));
10116 ret = get_errno(getgroups(gidsetsize, grouplist));
10117 if (gidsetsize == 0)
10118 break;
10119 if (!is_error(ret)) {
10120 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10121 if (!target_grouplist)
10122 goto efault;
10123 for(i = 0;i < ret; i++)
10124 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10125 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10128 break;
10129 case TARGET_NR_setgroups:
10131 int gidsetsize = arg1;
10132 target_id *target_grouplist;
10133 gid_t *grouplist = NULL;
10134 int i;
10135 if (gidsetsize) {
10136 grouplist = alloca(gidsetsize * sizeof(gid_t));
10137 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10138 if (!target_grouplist) {
10139 ret = -TARGET_EFAULT;
10140 goto fail;
10142 for (i = 0; i < gidsetsize; i++) {
10143 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10145 unlock_user(target_grouplist, arg2, 0);
10147 ret = get_errno(setgroups(gidsetsize, grouplist));
10149 break;
10150 case TARGET_NR_fchown:
10151 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10152 break;
10153 #if defined(TARGET_NR_fchownat)
10154 case TARGET_NR_fchownat:
10155 if (!(p = lock_user_string(arg2)))
10156 goto efault;
10157 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10158 low2highgid(arg4), arg5));
10159 unlock_user(p, arg2, 0);
10160 break;
10161 #endif
10162 #ifdef TARGET_NR_setresuid
10163 case TARGET_NR_setresuid:
10164 ret = get_errno(sys_setresuid(low2highuid(arg1),
10165 low2highuid(arg2),
10166 low2highuid(arg3)));
10167 break;
10168 #endif
10169 #ifdef TARGET_NR_getresuid
10170 case TARGET_NR_getresuid:
10172 uid_t ruid, euid, suid;
10173 ret = get_errno(getresuid(&ruid, &euid, &suid));
10174 if (!is_error(ret)) {
10175 if (put_user_id(high2lowuid(ruid), arg1)
10176 || put_user_id(high2lowuid(euid), arg2)
10177 || put_user_id(high2lowuid(suid), arg3))
10178 goto efault;
10181 break;
10182 #endif
10183 #ifdef TARGET_NR_getresgid
10184 case TARGET_NR_setresgid:
10185 ret = get_errno(sys_setresgid(low2highgid(arg1),
10186 low2highgid(arg2),
10187 low2highgid(arg3)));
10188 break;
10189 #endif
10190 #ifdef TARGET_NR_getresgid
10191 case TARGET_NR_getresgid:
10193 gid_t rgid, egid, sgid;
10194 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10195 if (!is_error(ret)) {
10196 if (put_user_id(high2lowgid(rgid), arg1)
10197 || put_user_id(high2lowgid(egid), arg2)
10198 || put_user_id(high2lowgid(sgid), arg3))
10199 goto efault;
10202 break;
10203 #endif
10204 #ifdef TARGET_NR_chown
10205 case TARGET_NR_chown:
10206 if (!(p = lock_user_string(arg1)))
10207 goto efault;
10208 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10209 unlock_user(p, arg1, 0);
10210 break;
10211 #endif
10212 case TARGET_NR_setuid:
10213 ret = get_errno(sys_setuid(low2highuid(arg1)));
10214 break;
10215 case TARGET_NR_setgid:
10216 ret = get_errno(sys_setgid(low2highgid(arg1)));
10217 break;
10218 case TARGET_NR_setfsuid:
10219 ret = get_errno(setfsuid(arg1));
10220 break;
10221 case TARGET_NR_setfsgid:
10222 ret = get_errno(setfsgid(arg1));
10223 break;
10225 #ifdef TARGET_NR_lchown32
10226 case TARGET_NR_lchown32:
10227 if (!(p = lock_user_string(arg1)))
10228 goto efault;
10229 ret = get_errno(lchown(p, arg2, arg3));
10230 unlock_user(p, arg1, 0);
10231 break;
10232 #endif
10233 #ifdef TARGET_NR_getuid32
10234 case TARGET_NR_getuid32:
10235 ret = get_errno(getuid());
10236 break;
10237 #endif
10239 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10240 /* Alpha specific */
10241 case TARGET_NR_getxuid:
10243 uid_t euid;
10244 euid=geteuid();
10245 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10247 ret = get_errno(getuid());
10248 break;
10249 #endif
10250 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10251 /* Alpha specific */
10252 case TARGET_NR_getxgid:
10254 uid_t egid;
10255 egid=getegid();
10256 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10258 ret = get_errno(getgid());
10259 break;
10260 #endif
10261 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10262 /* Alpha specific */
10263 case TARGET_NR_osf_getsysinfo:
10264 ret = -TARGET_EOPNOTSUPP;
10265 switch (arg1) {
10266 case TARGET_GSI_IEEE_FP_CONTROL:
10268 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10270 /* Copied from linux ieee_fpcr_to_swcr. */
10271 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10272 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10273 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10274 | SWCR_TRAP_ENABLE_DZE
10275 | SWCR_TRAP_ENABLE_OVF);
10276 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10277 | SWCR_TRAP_ENABLE_INE);
10278 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10279 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10281 if (put_user_u64 (swcr, arg2))
10282 goto efault;
10283 ret = 0;
10285 break;
10287 /* case GSI_IEEE_STATE_AT_SIGNAL:
10288 -- Not implemented in linux kernel.
10289 case GSI_UACPROC:
10290 -- Retrieves current unaligned access state; not much used.
10291 case GSI_PROC_TYPE:
10292 -- Retrieves implver information; surely not used.
10293 case GSI_GET_HWRPB:
10294 -- Grabs a copy of the HWRPB; surely not used.
10297 break;
10298 #endif
10299 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10300 /* Alpha specific */
10301 case TARGET_NR_osf_setsysinfo:
10302 ret = -TARGET_EOPNOTSUPP;
10303 switch (arg1) {
10304 case TARGET_SSI_IEEE_FP_CONTROL:
10306 uint64_t swcr, fpcr, orig_fpcr;
10308 if (get_user_u64 (swcr, arg2)) {
10309 goto efault;
10311 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10312 fpcr = orig_fpcr & FPCR_DYN_MASK;
10314 /* Copied from linux ieee_swcr_to_fpcr. */
10315 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10316 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10317 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10318 | SWCR_TRAP_ENABLE_DZE
10319 | SWCR_TRAP_ENABLE_OVF)) << 48;
10320 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10321 | SWCR_TRAP_ENABLE_INE)) << 57;
10322 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10323 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10325 cpu_alpha_store_fpcr(cpu_env, fpcr);
10326 ret = 0;
10328 break;
10330 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10332 uint64_t exc, fpcr, orig_fpcr;
10333 int si_code;
10335 if (get_user_u64(exc, arg2)) {
10336 goto efault;
10339 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10341 /* We only add to the exception status here. */
10342 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10344 cpu_alpha_store_fpcr(cpu_env, fpcr);
10345 ret = 0;
10347 /* Old exceptions are not signaled. */
10348 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10350 /* If any exceptions set by this call,
10351 and are unmasked, send a signal. */
10352 si_code = 0;
10353 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10354 si_code = TARGET_FPE_FLTRES;
10356 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10357 si_code = TARGET_FPE_FLTUND;
10359 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10360 si_code = TARGET_FPE_FLTOVF;
10362 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10363 si_code = TARGET_FPE_FLTDIV;
10365 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10366 si_code = TARGET_FPE_FLTINV;
10368 if (si_code != 0) {
10369 target_siginfo_t info;
10370 info.si_signo = SIGFPE;
10371 info.si_errno = 0;
10372 info.si_code = si_code;
10373 info._sifields._sigfault._addr
10374 = ((CPUArchState *)cpu_env)->pc;
10375 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10378 break;
10380 /* case SSI_NVPAIRS:
10381 -- Used with SSIN_UACPROC to enable unaligned accesses.
10382 case SSI_IEEE_STATE_AT_SIGNAL:
10383 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10384 -- Not implemented in linux kernel
10387 break;
10388 #endif
10389 #ifdef TARGET_NR_osf_sigprocmask
10390 /* Alpha specific. */
10391 case TARGET_NR_osf_sigprocmask:
10393 abi_ulong mask;
10394 int how;
10395 sigset_t set, oldset;
10397 switch(arg1) {
10398 case TARGET_SIG_BLOCK:
10399 how = SIG_BLOCK;
10400 break;
10401 case TARGET_SIG_UNBLOCK:
10402 how = SIG_UNBLOCK;
10403 break;
10404 case TARGET_SIG_SETMASK:
10405 how = SIG_SETMASK;
10406 break;
10407 default:
10408 ret = -TARGET_EINVAL;
10409 goto fail;
10411 mask = arg2;
10412 target_to_host_old_sigset(&set, &mask);
10413 ret = do_sigprocmask(how, &set, &oldset);
10414 if (!ret) {
10415 host_to_target_old_sigset(&mask, &oldset);
10416 ret = mask;
10419 break;
10420 #endif
10422 #ifdef TARGET_NR_getgid32
10423 case TARGET_NR_getgid32:
10424 ret = get_errno(getgid());
10425 break;
10426 #endif
10427 #ifdef TARGET_NR_geteuid32
10428 case TARGET_NR_geteuid32:
10429 ret = get_errno(geteuid());
10430 break;
10431 #endif
10432 #ifdef TARGET_NR_getegid32
10433 case TARGET_NR_getegid32:
10434 ret = get_errno(getegid());
10435 break;
10436 #endif
10437 #ifdef TARGET_NR_setreuid32
10438 case TARGET_NR_setreuid32:
10439 ret = get_errno(setreuid(arg1, arg2));
10440 break;
10441 #endif
10442 #ifdef TARGET_NR_setregid32
10443 case TARGET_NR_setregid32:
10444 ret = get_errno(setregid(arg1, arg2));
10445 break;
10446 #endif
10447 #ifdef TARGET_NR_getgroups32
10448 case TARGET_NR_getgroups32:
10450 int gidsetsize = arg1;
10451 uint32_t *target_grouplist;
10452 gid_t *grouplist;
10453 int i;
10455 grouplist = alloca(gidsetsize * sizeof(gid_t));
10456 ret = get_errno(getgroups(gidsetsize, grouplist));
10457 if (gidsetsize == 0)
10458 break;
10459 if (!is_error(ret)) {
10460 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10461 if (!target_grouplist) {
10462 ret = -TARGET_EFAULT;
10463 goto fail;
10465 for(i = 0;i < ret; i++)
10466 target_grouplist[i] = tswap32(grouplist[i]);
10467 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10470 break;
10471 #endif
10472 #ifdef TARGET_NR_setgroups32
10473 case TARGET_NR_setgroups32:
10475 int gidsetsize = arg1;
10476 uint32_t *target_grouplist;
10477 gid_t *grouplist;
10478 int i;
10480 grouplist = alloca(gidsetsize * sizeof(gid_t));
10481 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10482 if (!target_grouplist) {
10483 ret = -TARGET_EFAULT;
10484 goto fail;
10486 for(i = 0;i < gidsetsize; i++)
10487 grouplist[i] = tswap32(target_grouplist[i]);
10488 unlock_user(target_grouplist, arg2, 0);
10489 ret = get_errno(setgroups(gidsetsize, grouplist));
10491 break;
10492 #endif
10493 #ifdef TARGET_NR_fchown32
10494 case TARGET_NR_fchown32:
10495 ret = get_errno(fchown(arg1, arg2, arg3));
10496 break;
10497 #endif
10498 #ifdef TARGET_NR_setresuid32
10499 case TARGET_NR_setresuid32:
10500 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10501 break;
10502 #endif
10503 #ifdef TARGET_NR_getresuid32
10504 case TARGET_NR_getresuid32:
10506 uid_t ruid, euid, suid;
10507 ret = get_errno(getresuid(&ruid, &euid, &suid));
10508 if (!is_error(ret)) {
10509 if (put_user_u32(ruid, arg1)
10510 || put_user_u32(euid, arg2)
10511 || put_user_u32(suid, arg3))
10512 goto efault;
10515 break;
10516 #endif
10517 #ifdef TARGET_NR_setresgid32
10518 case TARGET_NR_setresgid32:
10519 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10520 break;
10521 #endif
10522 #ifdef TARGET_NR_getresgid32
10523 case TARGET_NR_getresgid32:
10525 gid_t rgid, egid, sgid;
10526 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10527 if (!is_error(ret)) {
10528 if (put_user_u32(rgid, arg1)
10529 || put_user_u32(egid, arg2)
10530 || put_user_u32(sgid, arg3))
10531 goto efault;
10534 break;
10535 #endif
10536 #ifdef TARGET_NR_chown32
10537 case TARGET_NR_chown32:
10538 if (!(p = lock_user_string(arg1)))
10539 goto efault;
10540 ret = get_errno(chown(p, arg2, arg3));
10541 unlock_user(p, arg1, 0);
10542 break;
10543 #endif
10544 #ifdef TARGET_NR_setuid32
10545 case TARGET_NR_setuid32:
10546 ret = get_errno(sys_setuid(arg1));
10547 break;
10548 #endif
10549 #ifdef TARGET_NR_setgid32
10550 case TARGET_NR_setgid32:
10551 ret = get_errno(sys_setgid(arg1));
10552 break;
10553 #endif
10554 #ifdef TARGET_NR_setfsuid32
10555 case TARGET_NR_setfsuid32:
10556 ret = get_errno(setfsuid(arg1));
10557 break;
10558 #endif
10559 #ifdef TARGET_NR_setfsgid32
10560 case TARGET_NR_setfsgid32:
10561 ret = get_errno(setfsgid(arg1));
10562 break;
10563 #endif
10565 case TARGET_NR_pivot_root:
10566 goto unimplemented;
10567 #ifdef TARGET_NR_mincore
10568 case TARGET_NR_mincore:
10570 void *a;
10571 ret = -TARGET_EFAULT;
10572 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10573 goto efault;
10574 if (!(p = lock_user_string(arg3)))
10575 goto mincore_fail;
10576 ret = get_errno(mincore(a, arg2, p));
10577 unlock_user(p, arg3, ret);
10578 mincore_fail:
10579 unlock_user(a, arg1, 0);
10581 break;
10582 #endif
10583 #ifdef TARGET_NR_arm_fadvise64_64
10584 case TARGET_NR_arm_fadvise64_64:
10585 /* arm_fadvise64_64 looks like fadvise64_64 but
10586 * with different argument order: fd, advice, offset, len
10587 * rather than the usual fd, offset, len, advice.
10588 * Note that offset and len are both 64-bit so appear as
10589 * pairs of 32-bit registers.
10591 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10592 target_offset64(arg5, arg6), arg2);
10593 ret = -host_to_target_errno(ret);
10594 break;
10595 #endif
10597 #if TARGET_ABI_BITS == 32
10599 #ifdef TARGET_NR_fadvise64_64
10600 case TARGET_NR_fadvise64_64:
10601 /* 6 args: fd, offset (high, low), len (high, low), advice */
10602 if (regpairs_aligned(cpu_env)) {
10603 /* offset is in (3,4), len in (5,6) and advice in 7 */
10604 arg2 = arg3;
10605 arg3 = arg4;
10606 arg4 = arg5;
10607 arg5 = arg6;
10608 arg6 = arg7;
10610 ret = -host_to_target_errno(posix_fadvise(arg1,
10611 target_offset64(arg2, arg3),
10612 target_offset64(arg4, arg5),
10613 arg6));
10614 break;
10615 #endif
10617 #ifdef TARGET_NR_fadvise64
10618 case TARGET_NR_fadvise64:
10619 /* 5 args: fd, offset (high, low), len, advice */
10620 if (regpairs_aligned(cpu_env)) {
10621 /* offset is in (3,4), len in 5 and advice in 6 */
10622 arg2 = arg3;
10623 arg3 = arg4;
10624 arg4 = arg5;
10625 arg5 = arg6;
10627 ret = -host_to_target_errno(posix_fadvise(arg1,
10628 target_offset64(arg2, arg3),
10629 arg4, arg5));
10630 break;
10631 #endif
10633 #else /* not a 32-bit ABI */
10634 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10635 #ifdef TARGET_NR_fadvise64_64
10636 case TARGET_NR_fadvise64_64:
10637 #endif
10638 #ifdef TARGET_NR_fadvise64
10639 case TARGET_NR_fadvise64:
10640 #endif
10641 #ifdef TARGET_S390X
10642 switch (arg4) {
10643 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10644 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10645 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10646 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10647 default: break;
10649 #endif
10650 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10651 break;
10652 #endif
10653 #endif /* end of 64-bit ABI fadvise handling */
10655 #ifdef TARGET_NR_madvise
10656 case TARGET_NR_madvise:
10657 /* A straight passthrough may not be safe because qemu sometimes
10658 turns private file-backed mappings into anonymous mappings.
10659 This will break MADV_DONTNEED.
10660 This is a hint, so ignoring and returning success is ok. */
10661 ret = get_errno(0);
10662 break;
10663 #endif
10664 #if TARGET_ABI_BITS == 32
10665 case TARGET_NR_fcntl64:
10667 int cmd;
10668 struct flock64 fl;
10669 from_flock64_fn *copyfrom = copy_from_user_flock64;
10670 to_flock64_fn *copyto = copy_to_user_flock64;
10672 #ifdef TARGET_ARM
10673 if (((CPUARMState *)cpu_env)->eabi) {
10674 copyfrom = copy_from_user_eabi_flock64;
10675 copyto = copy_to_user_eabi_flock64;
10677 #endif
10679 cmd = target_to_host_fcntl_cmd(arg2);
10680 if (cmd == -TARGET_EINVAL) {
10681 ret = cmd;
10682 break;
10685 switch(arg2) {
10686 case TARGET_F_GETLK64:
10687 ret = copyfrom(&fl, arg3);
10688 if (ret) {
10689 break;
10691 ret = get_errno(fcntl(arg1, cmd, &fl));
10692 if (ret == 0) {
10693 ret = copyto(arg3, &fl);
10695 break;
10697 case TARGET_F_SETLK64:
10698 case TARGET_F_SETLKW64:
10699 ret = copyfrom(&fl, arg3);
10700 if (ret) {
10701 break;
10703 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10704 break;
10705 default:
10706 ret = do_fcntl(arg1, arg2, arg3);
10707 break;
10709 break;
10711 #endif
10712 #ifdef TARGET_NR_cacheflush
10713 case TARGET_NR_cacheflush:
10714 /* self-modifying code is handled automatically, so nothing needed */
10715 ret = 0;
10716 break;
10717 #endif
10718 #ifdef TARGET_NR_security
10719 case TARGET_NR_security:
10720 goto unimplemented;
10721 #endif
10722 #ifdef TARGET_NR_getpagesize
10723 case TARGET_NR_getpagesize:
10724 ret = TARGET_PAGE_SIZE;
10725 break;
10726 #endif
10727 case TARGET_NR_gettid:
10728 ret = get_errno(gettid());
10729 break;
10730 #ifdef TARGET_NR_readahead
10731 case TARGET_NR_readahead:
10732 #if TARGET_ABI_BITS == 32
10733 if (regpairs_aligned(cpu_env)) {
10734 arg2 = arg3;
10735 arg3 = arg4;
10736 arg4 = arg5;
10738 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10739 #else
10740 ret = get_errno(readahead(arg1, arg2, arg3));
10741 #endif
10742 break;
10743 #endif
10744 #ifdef CONFIG_ATTR
10745 #ifdef TARGET_NR_setxattr
10746 case TARGET_NR_listxattr:
10747 case TARGET_NR_llistxattr:
10749 void *p, *b = 0;
10750 if (arg2) {
10751 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10752 if (!b) {
10753 ret = -TARGET_EFAULT;
10754 break;
10757 p = lock_user_string(arg1);
10758 if (p) {
10759 if (num == TARGET_NR_listxattr) {
10760 ret = get_errno(listxattr(p, b, arg3));
10761 } else {
10762 ret = get_errno(llistxattr(p, b, arg3));
10764 } else {
10765 ret = -TARGET_EFAULT;
10767 unlock_user(p, arg1, 0);
10768 unlock_user(b, arg2, arg3);
10769 break;
10771 case TARGET_NR_flistxattr:
10773 void *b = 0;
10774 if (arg2) {
10775 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10776 if (!b) {
10777 ret = -TARGET_EFAULT;
10778 break;
10781 ret = get_errno(flistxattr(arg1, b, arg3));
10782 unlock_user(b, arg2, arg3);
10783 break;
10785 case TARGET_NR_setxattr:
10786 case TARGET_NR_lsetxattr:
10788 void *p, *n, *v = 0;
10789 if (arg3) {
10790 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10791 if (!v) {
10792 ret = -TARGET_EFAULT;
10793 break;
10796 p = lock_user_string(arg1);
10797 n = lock_user_string(arg2);
10798 if (p && n) {
10799 if (num == TARGET_NR_setxattr) {
10800 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10801 } else {
10802 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10804 } else {
10805 ret = -TARGET_EFAULT;
10807 unlock_user(p, arg1, 0);
10808 unlock_user(n, arg2, 0);
10809 unlock_user(v, arg3, 0);
10811 break;
10812 case TARGET_NR_fsetxattr:
10814 void *n, *v = 0;
10815 if (arg3) {
10816 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10817 if (!v) {
10818 ret = -TARGET_EFAULT;
10819 break;
10822 n = lock_user_string(arg2);
10823 if (n) {
10824 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10825 } else {
10826 ret = -TARGET_EFAULT;
10828 unlock_user(n, arg2, 0);
10829 unlock_user(v, arg3, 0);
10831 break;
10832 case TARGET_NR_getxattr:
10833 case TARGET_NR_lgetxattr:
10835 void *p, *n, *v = 0;
10836 if (arg3) {
10837 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10838 if (!v) {
10839 ret = -TARGET_EFAULT;
10840 break;
10843 p = lock_user_string(arg1);
10844 n = lock_user_string(arg2);
10845 if (p && n) {
10846 if (num == TARGET_NR_getxattr) {
10847 ret = get_errno(getxattr(p, n, v, arg4));
10848 } else {
10849 ret = get_errno(lgetxattr(p, n, v, arg4));
10851 } else {
10852 ret = -TARGET_EFAULT;
10854 unlock_user(p, arg1, 0);
10855 unlock_user(n, arg2, 0);
10856 unlock_user(v, arg3, arg4);
10858 break;
10859 case TARGET_NR_fgetxattr:
10861 void *n, *v = 0;
10862 if (arg3) {
10863 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10864 if (!v) {
10865 ret = -TARGET_EFAULT;
10866 break;
10869 n = lock_user_string(arg2);
10870 if (n) {
10871 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10872 } else {
10873 ret = -TARGET_EFAULT;
10875 unlock_user(n, arg2, 0);
10876 unlock_user(v, arg3, arg4);
10878 break;
10879 case TARGET_NR_removexattr:
10880 case TARGET_NR_lremovexattr:
10882 void *p, *n;
10883 p = lock_user_string(arg1);
10884 n = lock_user_string(arg2);
10885 if (p && n) {
10886 if (num == TARGET_NR_removexattr) {
10887 ret = get_errno(removexattr(p, n));
10888 } else {
10889 ret = get_errno(lremovexattr(p, n));
10891 } else {
10892 ret = -TARGET_EFAULT;
10894 unlock_user(p, arg1, 0);
10895 unlock_user(n, arg2, 0);
10897 break;
10898 case TARGET_NR_fremovexattr:
10900 void *n;
10901 n = lock_user_string(arg2);
10902 if (n) {
10903 ret = get_errno(fremovexattr(arg1, n));
10904 } else {
10905 ret = -TARGET_EFAULT;
10907 unlock_user(n, arg2, 0);
10909 break;
10910 #endif
10911 #endif /* CONFIG_ATTR */
10912 #ifdef TARGET_NR_set_thread_area
10913 case TARGET_NR_set_thread_area:
10914 #if defined(TARGET_MIPS)
10915 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10916 ret = 0;
10917 break;
10918 #elif defined(TARGET_CRIS)
10919 if (arg1 & 0xff)
10920 ret = -TARGET_EINVAL;
10921 else {
10922 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10923 ret = 0;
10925 break;
10926 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10927 ret = do_set_thread_area(cpu_env, arg1);
10928 break;
10929 #elif defined(TARGET_M68K)
10931 TaskState *ts = cpu->opaque;
10932 ts->tp_value = arg1;
10933 ret = 0;
10934 break;
10936 #else
10937 goto unimplemented_nowarn;
10938 #endif
10939 #endif
10940 #ifdef TARGET_NR_get_thread_area
10941 case TARGET_NR_get_thread_area:
10942 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10943 ret = do_get_thread_area(cpu_env, arg1);
10944 break;
10945 #elif defined(TARGET_M68K)
10947 TaskState *ts = cpu->opaque;
10948 ret = ts->tp_value;
10949 break;
10951 #else
10952 goto unimplemented_nowarn;
10953 #endif
10954 #endif
10955 #ifdef TARGET_NR_getdomainname
10956 case TARGET_NR_getdomainname:
10957 goto unimplemented_nowarn;
10958 #endif
10960 #ifdef TARGET_NR_clock_gettime
10961 case TARGET_NR_clock_gettime:
10963 struct timespec ts;
10964 ret = get_errno(clock_gettime(arg1, &ts));
10965 if (!is_error(ret)) {
10966 host_to_target_timespec(arg2, &ts);
10968 break;
10970 #endif
10971 #ifdef TARGET_NR_clock_getres
10972 case TARGET_NR_clock_getres:
10974 struct timespec ts;
10975 ret = get_errno(clock_getres(arg1, &ts));
10976 if (!is_error(ret)) {
10977 host_to_target_timespec(arg2, &ts);
10979 break;
10981 #endif
10982 #ifdef TARGET_NR_clock_nanosleep
10983 case TARGET_NR_clock_nanosleep:
10985 struct timespec ts;
10986 target_to_host_timespec(&ts, arg3);
10987 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10988 &ts, arg4 ? &ts : NULL));
10989 if (arg4)
10990 host_to_target_timespec(arg4, &ts);
10992 #if defined(TARGET_PPC)
10993 /* clock_nanosleep is odd in that it returns positive errno values.
10994 * On PPC, CR0 bit 3 should be set in such a situation. */
10995 if (ret && ret != -TARGET_ERESTARTSYS) {
10996 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10998 #endif
10999 break;
11001 #endif
11003 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11004 case TARGET_NR_set_tid_address:
11005 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11006 break;
11007 #endif
11009 case TARGET_NR_tkill:
11010 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11011 break;
11013 case TARGET_NR_tgkill:
11014 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11015 target_to_host_signal(arg3)));
11016 break;
11018 #ifdef TARGET_NR_set_robust_list
11019 case TARGET_NR_set_robust_list:
11020 case TARGET_NR_get_robust_list:
11021 /* The ABI for supporting robust futexes has userspace pass
11022 * the kernel a pointer to a linked list which is updated by
11023 * userspace after the syscall; the list is walked by the kernel
11024 * when the thread exits. Since the linked list in QEMU guest
11025 * memory isn't a valid linked list for the host and we have
11026 * no way to reliably intercept the thread-death event, we can't
11027 * support these. Silently return ENOSYS so that guest userspace
11028 * falls back to a non-robust futex implementation (which should
11029 * be OK except in the corner case of the guest crashing while
11030 * holding a mutex that is shared with another process via
11031 * shared memory).
11033 goto unimplemented_nowarn;
11034 #endif
11036 #if defined(TARGET_NR_utimensat)
11037 case TARGET_NR_utimensat:
11039 struct timespec *tsp, ts[2];
11040 if (!arg3) {
11041 tsp = NULL;
11042 } else {
11043 target_to_host_timespec(ts, arg3);
11044 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11045 tsp = ts;
11047 if (!arg2)
11048 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11049 else {
11050 if (!(p = lock_user_string(arg2))) {
11051 ret = -TARGET_EFAULT;
11052 goto fail;
11054 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11055 unlock_user(p, arg2, 0);
11058 break;
11059 #endif
11060 case TARGET_NR_futex:
11061 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11062 break;
11063 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11064 case TARGET_NR_inotify_init:
11065 ret = get_errno(sys_inotify_init());
11066 break;
11067 #endif
11068 #ifdef CONFIG_INOTIFY1
11069 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11070 case TARGET_NR_inotify_init1:
11071 ret = get_errno(sys_inotify_init1(arg1));
11072 break;
11073 #endif
11074 #endif
11075 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11076 case TARGET_NR_inotify_add_watch:
11077 p = lock_user_string(arg2);
11078 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11079 unlock_user(p, arg2, 0);
11080 break;
11081 #endif
11082 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11083 case TARGET_NR_inotify_rm_watch:
11084 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11085 break;
11086 #endif
11088 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11089 case TARGET_NR_mq_open:
11091 struct mq_attr posix_mq_attr, *attrp;
11093 p = lock_user_string(arg1 - 1);
11094 if (arg4 != 0) {
11095 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11096 attrp = &posix_mq_attr;
11097 } else {
11098 attrp = 0;
11100 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11101 unlock_user (p, arg1, 0);
11103 break;
11105 case TARGET_NR_mq_unlink:
11106 p = lock_user_string(arg1 - 1);
11107 ret = get_errno(mq_unlink(p));
11108 unlock_user (p, arg1, 0);
11109 break;
11111 case TARGET_NR_mq_timedsend:
11113 struct timespec ts;
11115 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11116 if (arg5 != 0) {
11117 target_to_host_timespec(&ts, arg5);
11118 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11119 host_to_target_timespec(arg5, &ts);
11120 } else {
11121 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11123 unlock_user (p, arg2, arg3);
11125 break;
11127 case TARGET_NR_mq_timedreceive:
11129 struct timespec ts;
11130 unsigned int prio;
11132 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11133 if (arg5 != 0) {
11134 target_to_host_timespec(&ts, arg5);
11135 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11136 &prio, &ts));
11137 host_to_target_timespec(arg5, &ts);
11138 } else {
11139 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11140 &prio, NULL));
11142 unlock_user (p, arg2, arg3);
11143 if (arg4 != 0)
11144 put_user_u32(prio, arg4);
11146 break;
11148 /* Not implemented for now... */
11149 /* case TARGET_NR_mq_notify: */
11150 /* break; */
11152 case TARGET_NR_mq_getsetattr:
11154 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11155 ret = 0;
11156 if (arg3 != 0) {
11157 ret = mq_getattr(arg1, &posix_mq_attr_out);
11158 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11160 if (arg2 != 0) {
11161 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11162 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11166 break;
11167 #endif
11169 #ifdef CONFIG_SPLICE
11170 #ifdef TARGET_NR_tee
11171 case TARGET_NR_tee:
11173 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11175 break;
11176 #endif
11177 #ifdef TARGET_NR_splice
11178 case TARGET_NR_splice:
11180 loff_t loff_in, loff_out;
11181 loff_t *ploff_in = NULL, *ploff_out = NULL;
11182 if (arg2) {
11183 if (get_user_u64(loff_in, arg2)) {
11184 goto efault;
11186 ploff_in = &loff_in;
11188 if (arg4) {
11189 if (get_user_u64(loff_out, arg4)) {
11190 goto efault;
11192 ploff_out = &loff_out;
11194 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11195 if (arg2) {
11196 if (put_user_u64(loff_in, arg2)) {
11197 goto efault;
11200 if (arg4) {
11201 if (put_user_u64(loff_out, arg4)) {
11202 goto efault;
11206 break;
11207 #endif
11208 #ifdef TARGET_NR_vmsplice
11209 case TARGET_NR_vmsplice:
11211 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11212 if (vec != NULL) {
11213 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11214 unlock_iovec(vec, arg2, arg3, 0);
11215 } else {
11216 ret = -host_to_target_errno(errno);
11219 break;
11220 #endif
11221 #endif /* CONFIG_SPLICE */
11222 #ifdef CONFIG_EVENTFD
11223 #if defined(TARGET_NR_eventfd)
11224 case TARGET_NR_eventfd:
11225 ret = get_errno(eventfd(arg1, 0));
11226 fd_trans_unregister(ret);
11227 break;
11228 #endif
11229 #if defined(TARGET_NR_eventfd2)
11230 case TARGET_NR_eventfd2:
11232 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11233 if (arg2 & TARGET_O_NONBLOCK) {
11234 host_flags |= O_NONBLOCK;
11236 if (arg2 & TARGET_O_CLOEXEC) {
11237 host_flags |= O_CLOEXEC;
11239 ret = get_errno(eventfd(arg1, host_flags));
11240 fd_trans_unregister(ret);
11241 break;
11243 #endif
11244 #endif /* CONFIG_EVENTFD */
11245 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11246 case TARGET_NR_fallocate:
11247 #if TARGET_ABI_BITS == 32
11248 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11249 target_offset64(arg5, arg6)));
11250 #else
11251 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11252 #endif
11253 break;
11254 #endif
11255 #if defined(CONFIG_SYNC_FILE_RANGE)
11256 #if defined(TARGET_NR_sync_file_range)
11257 case TARGET_NR_sync_file_range:
11258 #if TARGET_ABI_BITS == 32
11259 #if defined(TARGET_MIPS)
11260 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11261 target_offset64(arg5, arg6), arg7));
11262 #else
11263 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11264 target_offset64(arg4, arg5), arg6));
11265 #endif /* !TARGET_MIPS */
11266 #else
11267 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11268 #endif
11269 break;
11270 #endif
11271 #if defined(TARGET_NR_sync_file_range2)
11272 case TARGET_NR_sync_file_range2:
11273 /* This is like sync_file_range but the arguments are reordered */
11274 #if TARGET_ABI_BITS == 32
11275 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11276 target_offset64(arg5, arg6), arg2));
11277 #else
11278 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11279 #endif
11280 break;
11281 #endif
11282 #endif
11283 #if defined(TARGET_NR_signalfd4)
11284 case TARGET_NR_signalfd4:
11285 ret = do_signalfd4(arg1, arg2, arg4);
11286 break;
11287 #endif
11288 #if defined(TARGET_NR_signalfd)
11289 case TARGET_NR_signalfd:
11290 ret = do_signalfd4(arg1, arg2, 0);
11291 break;
11292 #endif
11293 #if defined(CONFIG_EPOLL)
11294 #if defined(TARGET_NR_epoll_create)
11295 case TARGET_NR_epoll_create:
11296 ret = get_errno(epoll_create(arg1));
11297 break;
11298 #endif
11299 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11300 case TARGET_NR_epoll_create1:
11301 ret = get_errno(epoll_create1(arg1));
11302 break;
11303 #endif
11304 #if defined(TARGET_NR_epoll_ctl)
11305 case TARGET_NR_epoll_ctl:
11307 struct epoll_event ep;
11308 struct epoll_event *epp = 0;
11309 if (arg4) {
11310 struct target_epoll_event *target_ep;
11311 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11312 goto efault;
11314 ep.events = tswap32(target_ep->events);
11315 /* The epoll_data_t union is just opaque data to the kernel,
11316 * so we transfer all 64 bits across and need not worry what
11317 * actual data type it is.
11319 ep.data.u64 = tswap64(target_ep->data.u64);
11320 unlock_user_struct(target_ep, arg4, 0);
11321 epp = &ep;
11323 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11324 break;
11326 #endif
11328 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11329 #if defined(TARGET_NR_epoll_wait)
11330 case TARGET_NR_epoll_wait:
11331 #endif
11332 #if defined(TARGET_NR_epoll_pwait)
11333 case TARGET_NR_epoll_pwait:
11334 #endif
11336 struct target_epoll_event *target_ep;
11337 struct epoll_event *ep;
11338 int epfd = arg1;
11339 int maxevents = arg3;
11340 int timeout = arg4;
11342 target_ep = lock_user(VERIFY_WRITE, arg2,
11343 maxevents * sizeof(struct target_epoll_event), 1);
11344 if (!target_ep) {
11345 goto efault;
11348 ep = alloca(maxevents * sizeof(struct epoll_event));
11350 switch (num) {
11351 #if defined(TARGET_NR_epoll_pwait)
11352 case TARGET_NR_epoll_pwait:
11354 target_sigset_t *target_set;
11355 sigset_t _set, *set = &_set;
11357 if (arg5) {
11358 if (arg6 != sizeof(target_sigset_t)) {
11359 ret = -TARGET_EINVAL;
11360 break;
11363 target_set = lock_user(VERIFY_READ, arg5,
11364 sizeof(target_sigset_t), 1);
11365 if (!target_set) {
11366 unlock_user(target_ep, arg2, 0);
11367 goto efault;
11369 target_to_host_sigset(set, target_set);
11370 unlock_user(target_set, arg5, 0);
11371 } else {
11372 set = NULL;
11375 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11376 set, SIGSET_T_SIZE));
11377 break;
11379 #endif
11380 #if defined(TARGET_NR_epoll_wait)
11381 case TARGET_NR_epoll_wait:
11382 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11383 NULL, 0));
11384 break;
11385 #endif
11386 default:
11387 ret = -TARGET_ENOSYS;
11389 if (!is_error(ret)) {
11390 int i;
11391 for (i = 0; i < ret; i++) {
11392 target_ep[i].events = tswap32(ep[i].events);
11393 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11396 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11397 break;
11399 #endif
11400 #endif
11401 #ifdef TARGET_NR_prlimit64
11402 case TARGET_NR_prlimit64:
11404 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11405 struct target_rlimit64 *target_rnew, *target_rold;
11406 struct host_rlimit64 rnew, rold, *rnewp = 0;
11407 int resource = target_to_host_resource(arg2);
11408 if (arg3) {
11409 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11410 goto efault;
11412 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11413 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11414 unlock_user_struct(target_rnew, arg3, 0);
11415 rnewp = &rnew;
11418 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11419 if (!is_error(ret) && arg4) {
11420 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11421 goto efault;
11423 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11424 target_rold->rlim_max = tswap64(rold.rlim_max);
11425 unlock_user_struct(target_rold, arg4, 1);
11427 break;
11429 #endif
11430 #ifdef TARGET_NR_gethostname
11431 case TARGET_NR_gethostname:
11433 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11434 if (name) {
11435 ret = get_errno(gethostname(name, arg2));
11436 unlock_user(name, arg1, arg2);
11437 } else {
11438 ret = -TARGET_EFAULT;
11440 break;
11442 #endif
11443 #ifdef TARGET_NR_atomic_cmpxchg_32
11444 case TARGET_NR_atomic_cmpxchg_32:
11446 /* should use start_exclusive from main.c */
11447 abi_ulong mem_value;
11448 if (get_user_u32(mem_value, arg6)) {
11449 target_siginfo_t info;
11450 info.si_signo = SIGSEGV;
11451 info.si_errno = 0;
11452 info.si_code = TARGET_SEGV_MAPERR;
11453 info._sifields._sigfault._addr = arg6;
11454 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11455 ret = 0xdeadbeef;
11458 if (mem_value == arg2)
11459 put_user_u32(arg1, arg6);
11460 ret = mem_value;
11461 break;
11463 #endif
11464 #ifdef TARGET_NR_atomic_barrier
11465 case TARGET_NR_atomic_barrier:
11467 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11468 ret = 0;
11469 break;
11471 #endif
11473 #ifdef TARGET_NR_timer_create
11474 case TARGET_NR_timer_create:
11476 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11478 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11480 int clkid = arg1;
11481 int timer_index = next_free_host_timer();
11483 if (timer_index < 0) {
11484 ret = -TARGET_EAGAIN;
11485 } else {
11486 timer_t *phtimer = g_posix_timers + timer_index;
11488 if (arg2) {
11489 phost_sevp = &host_sevp;
11490 ret = target_to_host_sigevent(phost_sevp, arg2);
11491 if (ret != 0) {
11492 break;
11496 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11497 if (ret) {
11498 phtimer = NULL;
11499 } else {
11500 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11501 goto efault;
11505 break;
11507 #endif
11509 #ifdef TARGET_NR_timer_settime
11510 case TARGET_NR_timer_settime:
11512 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11513 * struct itimerspec * old_value */
11514 target_timer_t timerid = get_timer_id(arg1);
11516 if (timerid < 0) {
11517 ret = timerid;
11518 } else if (arg3 == 0) {
11519 ret = -TARGET_EINVAL;
11520 } else {
11521 timer_t htimer = g_posix_timers[timerid];
11522 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11524 target_to_host_itimerspec(&hspec_new, arg3);
11525 ret = get_errno(
11526 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11527 host_to_target_itimerspec(arg2, &hspec_old);
11529 break;
11531 #endif
11533 #ifdef TARGET_NR_timer_gettime
11534 case TARGET_NR_timer_gettime:
11536 /* args: timer_t timerid, struct itimerspec *curr_value */
11537 target_timer_t timerid = get_timer_id(arg1);
11539 if (timerid < 0) {
11540 ret = timerid;
11541 } else if (!arg2) {
11542 ret = -TARGET_EFAULT;
11543 } else {
11544 timer_t htimer = g_posix_timers[timerid];
11545 struct itimerspec hspec;
11546 ret = get_errno(timer_gettime(htimer, &hspec));
11548 if (host_to_target_itimerspec(arg2, &hspec)) {
11549 ret = -TARGET_EFAULT;
11552 break;
11554 #endif
11556 #ifdef TARGET_NR_timer_getoverrun
11557 case TARGET_NR_timer_getoverrun:
11559 /* args: timer_t timerid */
11560 target_timer_t timerid = get_timer_id(arg1);
11562 if (timerid < 0) {
11563 ret = timerid;
11564 } else {
11565 timer_t htimer = g_posix_timers[timerid];
11566 ret = get_errno(timer_getoverrun(htimer));
11568 fd_trans_unregister(ret);
11569 break;
11571 #endif
11573 #ifdef TARGET_NR_timer_delete
11574 case TARGET_NR_timer_delete:
11576 /* args: timer_t timerid */
11577 target_timer_t timerid = get_timer_id(arg1);
11579 if (timerid < 0) {
11580 ret = timerid;
11581 } else {
11582 timer_t htimer = g_posix_timers[timerid];
11583 ret = get_errno(timer_delete(htimer));
11584 g_posix_timers[timerid] = 0;
11586 break;
11588 #endif
11590 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11591 case TARGET_NR_timerfd_create:
11592 ret = get_errno(timerfd_create(arg1,
11593 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11594 break;
11595 #endif
11597 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11598 case TARGET_NR_timerfd_gettime:
11600 struct itimerspec its_curr;
11602 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11604 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11605 goto efault;
11608 break;
11609 #endif
11611 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11612 case TARGET_NR_timerfd_settime:
11614 struct itimerspec its_new, its_old, *p_new;
11616 if (arg3) {
11617 if (target_to_host_itimerspec(&its_new, arg3)) {
11618 goto efault;
11620 p_new = &its_new;
11621 } else {
11622 p_new = NULL;
11625 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11627 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11628 goto efault;
11631 break;
11632 #endif
11634 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11635 case TARGET_NR_ioprio_get:
11636 ret = get_errno(ioprio_get(arg1, arg2));
11637 break;
11638 #endif
11640 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11641 case TARGET_NR_ioprio_set:
11642 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11643 break;
11644 #endif
11646 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11647 case TARGET_NR_setns:
11648 ret = get_errno(setns(arg1, arg2));
11649 break;
11650 #endif
11651 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11652 case TARGET_NR_unshare:
11653 ret = get_errno(unshare(arg1));
11654 break;
11655 #endif
11657 default:
11658 unimplemented:
11659 gemu_log("qemu: Unsupported syscall: %d\n", num);
11660 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11661 unimplemented_nowarn:
11662 #endif
11663 ret = -TARGET_ENOSYS;
11664 break;
11666 fail:
11667 #ifdef DEBUG
11668 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11669 #endif
11670 if(do_strace)
11671 print_syscall_ret(num, ret);
11672 trace_guest_user_syscall_ret(cpu, num, ret);
11673 return ret;
11674 efault:
11675 ret = -TARGET_EFAULT;
11676 goto fail;