linux-user: add fd_trans helper in do_recvfrom()
[qemu/ar7.git] / linux-user / syscall.c
blobf9ce9d8bb071e9f787aa2cdf207ae8f2a834d452
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <linux/netlink.h>
104 #ifdef CONFIG_RTNETLINK
105 #include <linux/rtnetlink.h>
106 #endif
107 #include <linux/audit.h>
108 #include "linux_loop.h"
109 #include "uname.h"
111 #include "qemu.h"
113 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
114 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
116 //#define DEBUG
117 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
118 * once. This exercises the codepaths for restart.
120 //#define DEBUG_ERESTARTSYS
122 //#include <linux/msdos_fs.h>
123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126 #undef _syscall0
127 #undef _syscall1
128 #undef _syscall2
129 #undef _syscall3
130 #undef _syscall4
131 #undef _syscall5
132 #undef _syscall6
134 #define _syscall0(type,name) \
135 static type name (void) \
137 return syscall(__NR_##name); \
140 #define _syscall1(type,name,type1,arg1) \
141 static type name (type1 arg1) \
143 return syscall(__NR_##name, arg1); \
146 #define _syscall2(type,name,type1,arg1,type2,arg2) \
147 static type name (type1 arg1,type2 arg2) \
149 return syscall(__NR_##name, arg1, arg2); \
152 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
153 static type name (type1 arg1,type2 arg2,type3 arg3) \
155 return syscall(__NR_##name, arg1, arg2, arg3); \
158 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
159 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
161 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
164 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
168 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
172 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
173 type5,arg5,type6,arg6) \
174 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
175 type6 arg6) \
177 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
181 #define __NR_sys_uname __NR_uname
182 #define __NR_sys_getcwd1 __NR_getcwd
183 #define __NR_sys_getdents __NR_getdents
184 #define __NR_sys_getdents64 __NR_getdents64
185 #define __NR_sys_getpriority __NR_getpriority
186 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
187 #define __NR_sys_syslog __NR_syslog
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
194 defined(__s390x__)
195 #define __NR__llseek __NR_lseek
196 #endif
198 /* Newer kernel ports have llseek() instead of _llseek() */
199 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
200 #define TARGET_NR__llseek TARGET_NR_llseek
201 #endif
203 #ifdef __NR_gettid
204 _syscall0(int, gettid)
205 #else
206 /* This is a replacement for the host gettid() and must return a host
207 errno. */
208 static int gettid(void) {
209 return -ENOSYS;
211 #endif
212 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
213 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
214 #endif
215 #if !defined(__NR_getdents) || \
216 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
217 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
218 #endif
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
221 loff_t *, res, uint, wh);
222 #endif
223 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
224 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
225 #ifdef __NR_exit_group
226 _syscall1(int,exit_group,int,error_code)
227 #endif
228 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
229 _syscall1(int,set_tid_address,int *,tidptr)
230 #endif
231 #if defined(TARGET_NR_futex) && defined(__NR_futex)
232 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
233 const struct timespec *,timeout,int *,uaddr2,int,val3)
234 #endif
235 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
236 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
237 unsigned long *, user_mask_ptr);
238 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
239 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
240 unsigned long *, user_mask_ptr);
241 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
242 void *, arg);
243 _syscall2(int, capget, struct __user_cap_header_struct *, header,
244 struct __user_cap_data_struct *, data);
245 _syscall2(int, capset, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
248 _syscall2(int, ioprio_get, int, which, int, who)
249 #endif
250 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
251 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
252 #endif
253 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
254 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
255 #endif
257 static bitmask_transtbl fcntl_flags_tbl[] = {
258 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
259 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
260 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
261 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
262 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
263 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
264 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
265 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
266 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
267 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
268 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
269 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
270 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
271 #if defined(O_DIRECT)
272 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
273 #endif
274 #if defined(O_NOATIME)
275 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
276 #endif
277 #if defined(O_CLOEXEC)
278 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
279 #endif
280 #if defined(O_PATH)
281 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
282 #endif
283 /* Don't terminate the list prematurely on 64-bit host+guest. */
284 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
285 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
286 #endif
287 { 0, 0, 0, 0 }
290 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
291 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
292 typedef struct TargetFdTrans {
293 TargetFdDataFunc host_to_target_data;
294 TargetFdDataFunc target_to_host_data;
295 TargetFdAddrFunc target_to_host_addr;
296 } TargetFdTrans;
298 static TargetFdTrans **target_fd_trans;
300 static unsigned int target_fd_max;
302 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
304 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
305 return target_fd_trans[fd]->target_to_host_data;
307 return NULL;
310 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
312 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
313 return target_fd_trans[fd]->host_to_target_data;
315 return NULL;
318 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
320 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
321 return target_fd_trans[fd]->target_to_host_addr;
323 return NULL;
326 static void fd_trans_register(int fd, TargetFdTrans *trans)
328 unsigned int oldmax;
330 if (fd >= target_fd_max) {
331 oldmax = target_fd_max;
332 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
333 target_fd_trans = g_renew(TargetFdTrans *,
334 target_fd_trans, target_fd_max);
335 memset((void *)(target_fd_trans + oldmax), 0,
336 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
338 target_fd_trans[fd] = trans;
341 static void fd_trans_unregister(int fd)
343 if (fd >= 0 && fd < target_fd_max) {
344 target_fd_trans[fd] = NULL;
348 static void fd_trans_dup(int oldfd, int newfd)
350 fd_trans_unregister(newfd);
351 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
352 fd_trans_register(newfd, target_fd_trans[oldfd]);
356 static int sys_getcwd1(char *buf, size_t size)
358 if (getcwd(buf, size) == NULL) {
359 /* getcwd() sets errno */
360 return (-1);
362 return strlen(buf)+1;
365 #ifdef TARGET_NR_utimensat
366 #ifdef CONFIG_UTIMENSAT
367 static int sys_utimensat(int dirfd, const char *pathname,
368 const struct timespec times[2], int flags)
370 if (pathname == NULL)
371 return futimens(dirfd, times);
372 else
373 return utimensat(dirfd, pathname, times, flags);
375 #elif defined(__NR_utimensat)
376 #define __NR_sys_utimensat __NR_utimensat
377 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
378 const struct timespec *,tsp,int,flags)
379 #else
380 static int sys_utimensat(int dirfd, const char *pathname,
381 const struct timespec times[2], int flags)
383 errno = ENOSYS;
384 return -1;
386 #endif
387 #endif /* TARGET_NR_utimensat */
389 #ifdef CONFIG_INOTIFY
390 #include <sys/inotify.h>
392 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
393 static int sys_inotify_init(void)
395 return (inotify_init());
397 #endif
398 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
399 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
401 return (inotify_add_watch(fd, pathname, mask));
403 #endif
404 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
405 static int sys_inotify_rm_watch(int fd, int32_t wd)
407 return (inotify_rm_watch(fd, wd));
409 #endif
410 #ifdef CONFIG_INOTIFY1
411 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
412 static int sys_inotify_init1(int flags)
414 return (inotify_init1(flags));
416 #endif
417 #endif
418 #else
419 /* Userspace can usually survive runtime without inotify */
420 #undef TARGET_NR_inotify_init
421 #undef TARGET_NR_inotify_init1
422 #undef TARGET_NR_inotify_add_watch
423 #undef TARGET_NR_inotify_rm_watch
424 #endif /* CONFIG_INOTIFY */
426 #if defined(TARGET_NR_prlimit64)
427 #ifndef __NR_prlimit64
428 # define __NR_prlimit64 -1
429 #endif
430 #define __NR_sys_prlimit64 __NR_prlimit64
431 /* The glibc rlimit structure may not be that used by the underlying syscall */
432 struct host_rlimit64 {
433 uint64_t rlim_cur;
434 uint64_t rlim_max;
436 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
437 const struct host_rlimit64 *, new_limit,
438 struct host_rlimit64 *, old_limit)
439 #endif
442 #if defined(TARGET_NR_timer_create)
443 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
444 static timer_t g_posix_timers[32] = { 0, } ;
446 static inline int next_free_host_timer(void)
448 int k ;
449 /* FIXME: Does finding the next free slot require a lock? */
450 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
451 if (g_posix_timers[k] == 0) {
452 g_posix_timers[k] = (timer_t) 1;
453 return k;
456 return -1;
458 #endif
460 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
461 #ifdef TARGET_ARM
462 static inline int regpairs_aligned(void *cpu_env) {
463 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
465 #elif defined(TARGET_MIPS)
466 static inline int regpairs_aligned(void *cpu_env) { return 1; }
467 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
468 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
469 * of registers which translates to the same as ARM/MIPS, because we start with
470 * r3 as arg1 */
471 static inline int regpairs_aligned(void *cpu_env) { return 1; }
472 #else
473 static inline int regpairs_aligned(void *cpu_env) { return 0; }
474 #endif
476 #define ERRNO_TABLE_SIZE 1200
478 /* target_to_host_errno_table[] is initialized from
479 * host_to_target_errno_table[] in syscall_init(). */
480 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
484 * This list is the union of errno values overridden in asm-<arch>/errno.h
485 * minus the errnos that are not actually generic to all archs.
487 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
488 [EAGAIN] = TARGET_EAGAIN,
489 [EIDRM] = TARGET_EIDRM,
490 [ECHRNG] = TARGET_ECHRNG,
491 [EL2NSYNC] = TARGET_EL2NSYNC,
492 [EL3HLT] = TARGET_EL3HLT,
493 [EL3RST] = TARGET_EL3RST,
494 [ELNRNG] = TARGET_ELNRNG,
495 [EUNATCH] = TARGET_EUNATCH,
496 [ENOCSI] = TARGET_ENOCSI,
497 [EL2HLT] = TARGET_EL2HLT,
498 [EDEADLK] = TARGET_EDEADLK,
499 [ENOLCK] = TARGET_ENOLCK,
500 [EBADE] = TARGET_EBADE,
501 [EBADR] = TARGET_EBADR,
502 [EXFULL] = TARGET_EXFULL,
503 [ENOANO] = TARGET_ENOANO,
504 [EBADRQC] = TARGET_EBADRQC,
505 [EBADSLT] = TARGET_EBADSLT,
506 [EBFONT] = TARGET_EBFONT,
507 [ENOSTR] = TARGET_ENOSTR,
508 [ENODATA] = TARGET_ENODATA,
509 [ETIME] = TARGET_ETIME,
510 [ENOSR] = TARGET_ENOSR,
511 [ENONET] = TARGET_ENONET,
512 [ENOPKG] = TARGET_ENOPKG,
513 [EREMOTE] = TARGET_EREMOTE,
514 [ENOLINK] = TARGET_ENOLINK,
515 [EADV] = TARGET_EADV,
516 [ESRMNT] = TARGET_ESRMNT,
517 [ECOMM] = TARGET_ECOMM,
518 [EPROTO] = TARGET_EPROTO,
519 [EDOTDOT] = TARGET_EDOTDOT,
520 [EMULTIHOP] = TARGET_EMULTIHOP,
521 [EBADMSG] = TARGET_EBADMSG,
522 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
523 [EOVERFLOW] = TARGET_EOVERFLOW,
524 [ENOTUNIQ] = TARGET_ENOTUNIQ,
525 [EBADFD] = TARGET_EBADFD,
526 [EREMCHG] = TARGET_EREMCHG,
527 [ELIBACC] = TARGET_ELIBACC,
528 [ELIBBAD] = TARGET_ELIBBAD,
529 [ELIBSCN] = TARGET_ELIBSCN,
530 [ELIBMAX] = TARGET_ELIBMAX,
531 [ELIBEXEC] = TARGET_ELIBEXEC,
532 [EILSEQ] = TARGET_EILSEQ,
533 [ENOSYS] = TARGET_ENOSYS,
534 [ELOOP] = TARGET_ELOOP,
535 [ERESTART] = TARGET_ERESTART,
536 [ESTRPIPE] = TARGET_ESTRPIPE,
537 [ENOTEMPTY] = TARGET_ENOTEMPTY,
538 [EUSERS] = TARGET_EUSERS,
539 [ENOTSOCK] = TARGET_ENOTSOCK,
540 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
541 [EMSGSIZE] = TARGET_EMSGSIZE,
542 [EPROTOTYPE] = TARGET_EPROTOTYPE,
543 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
544 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
545 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
546 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
547 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
548 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
549 [EADDRINUSE] = TARGET_EADDRINUSE,
550 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
551 [ENETDOWN] = TARGET_ENETDOWN,
552 [ENETUNREACH] = TARGET_ENETUNREACH,
553 [ENETRESET] = TARGET_ENETRESET,
554 [ECONNABORTED] = TARGET_ECONNABORTED,
555 [ECONNRESET] = TARGET_ECONNRESET,
556 [ENOBUFS] = TARGET_ENOBUFS,
557 [EISCONN] = TARGET_EISCONN,
558 [ENOTCONN] = TARGET_ENOTCONN,
559 [EUCLEAN] = TARGET_EUCLEAN,
560 [ENOTNAM] = TARGET_ENOTNAM,
561 [ENAVAIL] = TARGET_ENAVAIL,
562 [EISNAM] = TARGET_EISNAM,
563 [EREMOTEIO] = TARGET_EREMOTEIO,
564 [ESHUTDOWN] = TARGET_ESHUTDOWN,
565 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
566 [ETIMEDOUT] = TARGET_ETIMEDOUT,
567 [ECONNREFUSED] = TARGET_ECONNREFUSED,
568 [EHOSTDOWN] = TARGET_EHOSTDOWN,
569 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
570 [EALREADY] = TARGET_EALREADY,
571 [EINPROGRESS] = TARGET_EINPROGRESS,
572 [ESTALE] = TARGET_ESTALE,
573 [ECANCELED] = TARGET_ECANCELED,
574 [ENOMEDIUM] = TARGET_ENOMEDIUM,
575 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
576 #ifdef ENOKEY
577 [ENOKEY] = TARGET_ENOKEY,
578 #endif
579 #ifdef EKEYEXPIRED
580 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
581 #endif
582 #ifdef EKEYREVOKED
583 [EKEYREVOKED] = TARGET_EKEYREVOKED,
584 #endif
585 #ifdef EKEYREJECTED
586 [EKEYREJECTED] = TARGET_EKEYREJECTED,
587 #endif
588 #ifdef EOWNERDEAD
589 [EOWNERDEAD] = TARGET_EOWNERDEAD,
590 #endif
591 #ifdef ENOTRECOVERABLE
592 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
593 #endif
596 static inline int host_to_target_errno(int err)
598 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
599 host_to_target_errno_table[err]) {
600 return host_to_target_errno_table[err];
602 return err;
605 static inline int target_to_host_errno(int err)
607 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
608 target_to_host_errno_table[err]) {
609 return target_to_host_errno_table[err];
611 return err;
614 static inline abi_long get_errno(abi_long ret)
616 if (ret == -1)
617 return -host_to_target_errno(errno);
618 else
619 return ret;
622 static inline int is_error(abi_long ret)
624 return (abi_ulong)ret >= (abi_ulong)(-4096);
627 const char *target_strerror(int err)
629 if (err == TARGET_ERESTARTSYS) {
630 return "To be restarted";
632 if (err == TARGET_QEMU_ESIGRETURN) {
633 return "Successful exit from sigreturn";
636 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
637 return NULL;
639 return strerror(target_to_host_errno(err));
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
645 return safe_syscall(__NR_##name); \
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
651 return safe_syscall(__NR_##name, arg1); \
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
657 return safe_syscall(__NR_##name, arg1, arg2); \
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
667 type4, arg4) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676 type5 arg5) \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
689 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
690 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
691 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
692 int, flags, mode_t, mode)
693 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
694 struct rusage *, rusage)
695 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
696 int, options, struct rusage *, rusage)
697 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
698 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
699 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
700 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
701 struct timespec *, tsp, const sigset_t *, sigmask,
702 size_t, sigsetsize)
703 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
704 int, maxevents, int, timeout, const sigset_t *, sigmask,
705 size_t, sigsetsize)
706 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
707 const struct timespec *,timeout,int *,uaddr2,int,val3)
708 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
709 safe_syscall2(int, kill, pid_t, pid, int, sig)
710 safe_syscall2(int, tkill, int, tid, int, sig)
711 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
712 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
713 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
714 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
715 socklen_t, addrlen)
716 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
717 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
718 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
719 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
720 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
721 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
722 safe_syscall2(int, flock, int, fd, int, operation)
723 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
724 const struct timespec *, uts, size_t, sigsetsize)
725 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
726 int, flags)
727 safe_syscall2(int, nanosleep, const struct timespec *, req,
728 struct timespec *, rem)
729 #ifdef TARGET_NR_clock_nanosleep
730 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
731 const struct timespec *, req, struct timespec *, rem)
732 #endif
733 #ifdef __NR_msgsnd
734 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
735 int, flags)
736 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
737 long, msgtype, int, flags)
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739 unsigned, nsops, const struct timespec *, timeout)
740 #else
741 /* This host kernel architecture uses a single ipc syscall; fake up
742 * wrappers for the sub-operations to hide this implementation detail.
743 * Annoyingly we can't include linux/ipc.h to get the constant definitions
744 * for the call parameter because some structs in there conflict with the
745 * sys/ipc.h ones. So we just define them here, and rely on them being
746 * the same for all host architectures.
748 #define Q_SEMTIMEDOP 4
749 #define Q_MSGSND 11
750 #define Q_MSGRCV 12
751 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
753 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
754 void *, ptr, long, fifth)
755 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
757 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
759 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
761 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
763 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
764 const struct timespec *timeout)
766 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
767 (long)timeout);
769 #endif
770 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
771 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
772 size_t, len, unsigned, prio, const struct timespec *, timeout)
773 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
774 size_t, len, unsigned *, prio, const struct timespec *, timeout)
775 #endif
776 /* We do ioctl like this rather than via safe_syscall3 to preserve the
777 * "third argument might be integer or pointer or not present" behaviour of
778 * the libc function.
780 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
781 /* Similarly for fcntl. Note that callers must always:
782 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
783 * use the flock64 struct rather than unsuffixed flock
784 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
786 #ifdef __NR_fcntl64
787 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
788 #else
789 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
790 #endif
792 static inline int host_to_target_sock_type(int host_type)
794 int target_type;
796 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
797 case SOCK_DGRAM:
798 target_type = TARGET_SOCK_DGRAM;
799 break;
800 case SOCK_STREAM:
801 target_type = TARGET_SOCK_STREAM;
802 break;
803 default:
804 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
805 break;
808 #if defined(SOCK_CLOEXEC)
809 if (host_type & SOCK_CLOEXEC) {
810 target_type |= TARGET_SOCK_CLOEXEC;
812 #endif
814 #if defined(SOCK_NONBLOCK)
815 if (host_type & SOCK_NONBLOCK) {
816 target_type |= TARGET_SOCK_NONBLOCK;
818 #endif
820 return target_type;
823 static abi_ulong target_brk;
824 static abi_ulong target_original_brk;
825 static abi_ulong brk_page;
827 void target_set_brk(abi_ulong new_brk)
829 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
830 brk_page = HOST_PAGE_ALIGN(target_brk);
833 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
834 #define DEBUGF_BRK(message, args...)
836 /* do_brk() must return target values and target errnos. */
837 abi_long do_brk(abi_ulong new_brk)
839 abi_long mapped_addr;
840 int new_alloc_size;
842 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
844 if (!new_brk) {
845 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
846 return target_brk;
848 if (new_brk < target_original_brk) {
849 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
850 target_brk);
851 return target_brk;
854 /* If the new brk is less than the highest page reserved to the
855 * target heap allocation, set it and we're almost done... */
856 if (new_brk <= brk_page) {
857 /* Heap contents are initialized to zero, as for anonymous
858 * mapped pages. */
859 if (new_brk > target_brk) {
860 memset(g2h(target_brk), 0, new_brk - target_brk);
862 target_brk = new_brk;
863 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
864 return target_brk;
867 /* We need to allocate more memory after the brk... Note that
868 * we don't use MAP_FIXED because that will map over the top of
869 * any existing mapping (like the one with the host libc or qemu
870 * itself); instead we treat "mapped but at wrong address" as
871 * a failure and unmap again.
873 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
874 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
875 PROT_READ|PROT_WRITE,
876 MAP_ANON|MAP_PRIVATE, 0, 0));
878 if (mapped_addr == brk_page) {
879 /* Heap contents are initialized to zero, as for anonymous
880 * mapped pages. Technically the new pages are already
881 * initialized to zero since they *are* anonymous mapped
882 * pages, however we have to take care with the contents that
883 * come from the remaining part of the previous page: it may
884 * contains garbage data due to a previous heap usage (grown
885 * then shrunken). */
886 memset(g2h(target_brk), 0, brk_page - target_brk);
888 target_brk = new_brk;
889 brk_page = HOST_PAGE_ALIGN(target_brk);
890 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
891 target_brk);
892 return target_brk;
893 } else if (mapped_addr != -1) {
894 /* Mapped but at wrong address, meaning there wasn't actually
895 * enough space for this brk.
897 target_munmap(mapped_addr, new_alloc_size);
898 mapped_addr = -1;
899 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
901 else {
902 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
905 #if defined(TARGET_ALPHA)
906 /* We (partially) emulate OSF/1 on Alpha, which requires we
907 return a proper errno, not an unchanged brk value. */
908 return -TARGET_ENOMEM;
909 #endif
910 /* For everything else, return the previous break. */
911 return target_brk;
914 static inline abi_long copy_from_user_fdset(fd_set *fds,
915 abi_ulong target_fds_addr,
916 int n)
918 int i, nw, j, k;
919 abi_ulong b, *target_fds;
921 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
922 if (!(target_fds = lock_user(VERIFY_READ,
923 target_fds_addr,
924 sizeof(abi_ulong) * nw,
925 1)))
926 return -TARGET_EFAULT;
928 FD_ZERO(fds);
929 k = 0;
930 for (i = 0; i < nw; i++) {
931 /* grab the abi_ulong */
932 __get_user(b, &target_fds[i]);
933 for (j = 0; j < TARGET_ABI_BITS; j++) {
934 /* check the bit inside the abi_ulong */
935 if ((b >> j) & 1)
936 FD_SET(k, fds);
937 k++;
941 unlock_user(target_fds, target_fds_addr, 0);
943 return 0;
946 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
947 abi_ulong target_fds_addr,
948 int n)
950 if (target_fds_addr) {
951 if (copy_from_user_fdset(fds, target_fds_addr, n))
952 return -TARGET_EFAULT;
953 *fds_ptr = fds;
954 } else {
955 *fds_ptr = NULL;
957 return 0;
960 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
961 const fd_set *fds,
962 int n)
964 int i, nw, j, k;
965 abi_long v;
966 abi_ulong *target_fds;
968 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
969 if (!(target_fds = lock_user(VERIFY_WRITE,
970 target_fds_addr,
971 sizeof(abi_ulong) * nw,
972 0)))
973 return -TARGET_EFAULT;
975 k = 0;
976 for (i = 0; i < nw; i++) {
977 v = 0;
978 for (j = 0; j < TARGET_ABI_BITS; j++) {
979 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
980 k++;
982 __put_user(v, &target_fds[i]);
985 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
987 return 0;
990 #if defined(__alpha__)
991 #define HOST_HZ 1024
992 #else
993 #define HOST_HZ 100
994 #endif
996 static inline abi_long host_to_target_clock_t(long ticks)
998 #if HOST_HZ == TARGET_HZ
999 return ticks;
1000 #else
1001 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1002 #endif
1005 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1006 const struct rusage *rusage)
1008 struct target_rusage *target_rusage;
1010 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1011 return -TARGET_EFAULT;
1012 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1013 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1014 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1015 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1016 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1017 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1018 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1019 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1020 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1021 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1022 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1023 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1024 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1025 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1026 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1027 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1028 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1029 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1030 unlock_user_struct(target_rusage, target_addr, 1);
1032 return 0;
1035 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1037 abi_ulong target_rlim_swap;
1038 rlim_t result;
1040 target_rlim_swap = tswapal(target_rlim);
1041 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1042 return RLIM_INFINITY;
1044 result = target_rlim_swap;
1045 if (target_rlim_swap != (rlim_t)result)
1046 return RLIM_INFINITY;
1048 return result;
1051 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1053 abi_ulong target_rlim_swap;
1054 abi_ulong result;
1056 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1057 target_rlim_swap = TARGET_RLIM_INFINITY;
1058 else
1059 target_rlim_swap = rlim;
1060 result = tswapal(target_rlim_swap);
1062 return result;
1065 static inline int target_to_host_resource(int code)
1067 switch (code) {
1068 case TARGET_RLIMIT_AS:
1069 return RLIMIT_AS;
1070 case TARGET_RLIMIT_CORE:
1071 return RLIMIT_CORE;
1072 case TARGET_RLIMIT_CPU:
1073 return RLIMIT_CPU;
1074 case TARGET_RLIMIT_DATA:
1075 return RLIMIT_DATA;
1076 case TARGET_RLIMIT_FSIZE:
1077 return RLIMIT_FSIZE;
1078 case TARGET_RLIMIT_LOCKS:
1079 return RLIMIT_LOCKS;
1080 case TARGET_RLIMIT_MEMLOCK:
1081 return RLIMIT_MEMLOCK;
1082 case TARGET_RLIMIT_MSGQUEUE:
1083 return RLIMIT_MSGQUEUE;
1084 case TARGET_RLIMIT_NICE:
1085 return RLIMIT_NICE;
1086 case TARGET_RLIMIT_NOFILE:
1087 return RLIMIT_NOFILE;
1088 case TARGET_RLIMIT_NPROC:
1089 return RLIMIT_NPROC;
1090 case TARGET_RLIMIT_RSS:
1091 return RLIMIT_RSS;
1092 case TARGET_RLIMIT_RTPRIO:
1093 return RLIMIT_RTPRIO;
1094 case TARGET_RLIMIT_SIGPENDING:
1095 return RLIMIT_SIGPENDING;
1096 case TARGET_RLIMIT_STACK:
1097 return RLIMIT_STACK;
1098 default:
1099 return code;
1103 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1104 abi_ulong target_tv_addr)
1106 struct target_timeval *target_tv;
1108 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1109 return -TARGET_EFAULT;
1111 __get_user(tv->tv_sec, &target_tv->tv_sec);
1112 __get_user(tv->tv_usec, &target_tv->tv_usec);
1114 unlock_user_struct(target_tv, target_tv_addr, 0);
1116 return 0;
1119 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1120 const struct timeval *tv)
1122 struct target_timeval *target_tv;
1124 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1125 return -TARGET_EFAULT;
1127 __put_user(tv->tv_sec, &target_tv->tv_sec);
1128 __put_user(tv->tv_usec, &target_tv->tv_usec);
1130 unlock_user_struct(target_tv, target_tv_addr, 1);
1132 return 0;
1135 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1136 abi_ulong target_tz_addr)
1138 struct target_timezone *target_tz;
1140 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1141 return -TARGET_EFAULT;
1144 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1145 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1147 unlock_user_struct(target_tz, target_tz_addr, 0);
1149 return 0;
1152 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1153 #include <mqueue.h>
1155 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1156 abi_ulong target_mq_attr_addr)
1158 struct target_mq_attr *target_mq_attr;
1160 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1161 target_mq_attr_addr, 1))
1162 return -TARGET_EFAULT;
1164 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1165 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1166 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1167 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1169 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1171 return 0;
1174 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1175 const struct mq_attr *attr)
1177 struct target_mq_attr *target_mq_attr;
1179 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1180 target_mq_attr_addr, 0))
1181 return -TARGET_EFAULT;
1183 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1184 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1185 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1186 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1188 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1190 return 0;
1192 #endif
1194 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1195 /* do_select() must return target values and target errnos. */
1196 static abi_long do_select(int n,
1197 abi_ulong rfd_addr, abi_ulong wfd_addr,
1198 abi_ulong efd_addr, abi_ulong target_tv_addr)
1200 fd_set rfds, wfds, efds;
1201 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1202 struct timeval tv;
1203 struct timespec ts, *ts_ptr;
1204 abi_long ret;
1206 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1207 if (ret) {
1208 return ret;
1210 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1211 if (ret) {
1212 return ret;
1214 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1215 if (ret) {
1216 return ret;
1219 if (target_tv_addr) {
1220 if (copy_from_user_timeval(&tv, target_tv_addr))
1221 return -TARGET_EFAULT;
1222 ts.tv_sec = tv.tv_sec;
1223 ts.tv_nsec = tv.tv_usec * 1000;
1224 ts_ptr = &ts;
1225 } else {
1226 ts_ptr = NULL;
1229 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1230 ts_ptr, NULL));
1232 if (!is_error(ret)) {
1233 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1234 return -TARGET_EFAULT;
1235 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1236 return -TARGET_EFAULT;
1237 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1238 return -TARGET_EFAULT;
1240 if (target_tv_addr) {
1241 tv.tv_sec = ts.tv_sec;
1242 tv.tv_usec = ts.tv_nsec / 1000;
1243 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1244 return -TARGET_EFAULT;
1249 return ret;
1251 #endif
1253 static abi_long do_pipe2(int host_pipe[], int flags)
1255 #ifdef CONFIG_PIPE2
1256 return pipe2(host_pipe, flags);
1257 #else
1258 return -ENOSYS;
1259 #endif
1262 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1263 int flags, int is_pipe2)
1265 int host_pipe[2];
1266 abi_long ret;
1267 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1269 if (is_error(ret))
1270 return get_errno(ret);
1272 /* Several targets have special calling conventions for the original
1273 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1274 if (!is_pipe2) {
1275 #if defined(TARGET_ALPHA)
1276 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1277 return host_pipe[0];
1278 #elif defined(TARGET_MIPS)
1279 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1280 return host_pipe[0];
1281 #elif defined(TARGET_SH4)
1282 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1283 return host_pipe[0];
1284 #elif defined(TARGET_SPARC)
1285 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1286 return host_pipe[0];
1287 #endif
1290 if (put_user_s32(host_pipe[0], pipedes)
1291 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1292 return -TARGET_EFAULT;
1293 return get_errno(ret);
1296 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1297 abi_ulong target_addr,
1298 socklen_t len)
1300 struct target_ip_mreqn *target_smreqn;
1302 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1303 if (!target_smreqn)
1304 return -TARGET_EFAULT;
1305 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1306 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1307 if (len == sizeof(struct target_ip_mreqn))
1308 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1309 unlock_user(target_smreqn, target_addr, 0);
1311 return 0;
1314 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1315 abi_ulong target_addr,
1316 socklen_t len)
1318 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1319 sa_family_t sa_family;
1320 struct target_sockaddr *target_saddr;
1322 if (fd_trans_target_to_host_addr(fd)) {
1323 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1326 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1327 if (!target_saddr)
1328 return -TARGET_EFAULT;
1330 sa_family = tswap16(target_saddr->sa_family);
1332 /* Oops. The caller might send a incomplete sun_path; sun_path
1333 * must be terminated by \0 (see the manual page), but
1334 * unfortunately it is quite common to specify sockaddr_un
1335 * length as "strlen(x->sun_path)" while it should be
1336 * "strlen(...) + 1". We'll fix that here if needed.
1337 * Linux kernel has a similar feature.
1340 if (sa_family == AF_UNIX) {
1341 if (len < unix_maxlen && len > 0) {
1342 char *cp = (char*)target_saddr;
1344 if ( cp[len-1] && !cp[len] )
1345 len++;
1347 if (len > unix_maxlen)
1348 len = unix_maxlen;
1351 memcpy(addr, target_saddr, len);
1352 addr->sa_family = sa_family;
1353 if (sa_family == AF_NETLINK) {
1354 struct sockaddr_nl *nladdr;
1356 nladdr = (struct sockaddr_nl *)addr;
1357 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1358 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1359 } else if (sa_family == AF_PACKET) {
1360 struct target_sockaddr_ll *lladdr;
1362 lladdr = (struct target_sockaddr_ll *)addr;
1363 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1364 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1366 unlock_user(target_saddr, target_addr, 0);
1368 return 0;
1371 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1372 struct sockaddr *addr,
1373 socklen_t len)
1375 struct target_sockaddr *target_saddr;
1377 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1378 if (!target_saddr)
1379 return -TARGET_EFAULT;
1380 memcpy(target_saddr, addr, len);
1381 target_saddr->sa_family = tswap16(addr->sa_family);
1382 if (addr->sa_family == AF_NETLINK) {
1383 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1384 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1385 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1387 unlock_user(target_saddr, target_addr, len);
1389 return 0;
1392 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1393 struct target_msghdr *target_msgh)
1395 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1396 abi_long msg_controllen;
1397 abi_ulong target_cmsg_addr;
1398 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1399 socklen_t space = 0;
1401 msg_controllen = tswapal(target_msgh->msg_controllen);
1402 if (msg_controllen < sizeof (struct target_cmsghdr))
1403 goto the_end;
1404 target_cmsg_addr = tswapal(target_msgh->msg_control);
1405 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1406 target_cmsg_start = target_cmsg;
1407 if (!target_cmsg)
1408 return -TARGET_EFAULT;
1410 while (cmsg && target_cmsg) {
1411 void *data = CMSG_DATA(cmsg);
1412 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1414 int len = tswapal(target_cmsg->cmsg_len)
1415 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1417 space += CMSG_SPACE(len);
1418 if (space > msgh->msg_controllen) {
1419 space -= CMSG_SPACE(len);
1420 /* This is a QEMU bug, since we allocated the payload
1421 * area ourselves (unlike overflow in host-to-target
1422 * conversion, which is just the guest giving us a buffer
1423 * that's too small). It can't happen for the payload types
1424 * we currently support; if it becomes an issue in future
1425 * we would need to improve our allocation strategy to
1426 * something more intelligent than "twice the size of the
1427 * target buffer we're reading from".
1429 gemu_log("Host cmsg overflow\n");
1430 break;
1433 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1434 cmsg->cmsg_level = SOL_SOCKET;
1435 } else {
1436 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1438 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1439 cmsg->cmsg_len = CMSG_LEN(len);
1441 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1442 int *fd = (int *)data;
1443 int *target_fd = (int *)target_data;
1444 int i, numfds = len / sizeof(int);
1446 for (i = 0; i < numfds; i++) {
1447 __get_user(fd[i], target_fd + i);
1449 } else if (cmsg->cmsg_level == SOL_SOCKET
1450 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1451 struct ucred *cred = (struct ucred *)data;
1452 struct target_ucred *target_cred =
1453 (struct target_ucred *)target_data;
1455 __get_user(cred->pid, &target_cred->pid);
1456 __get_user(cred->uid, &target_cred->uid);
1457 __get_user(cred->gid, &target_cred->gid);
1458 } else {
1459 gemu_log("Unsupported ancillary data: %d/%d\n",
1460 cmsg->cmsg_level, cmsg->cmsg_type);
1461 memcpy(data, target_data, len);
1464 cmsg = CMSG_NXTHDR(msgh, cmsg);
1465 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1466 target_cmsg_start);
1468 unlock_user(target_cmsg, target_cmsg_addr, 0);
1469 the_end:
1470 msgh->msg_controllen = space;
1471 return 0;
1474 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1475 struct msghdr *msgh)
1477 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1478 abi_long msg_controllen;
1479 abi_ulong target_cmsg_addr;
1480 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1481 socklen_t space = 0;
1483 msg_controllen = tswapal(target_msgh->msg_controllen);
1484 if (msg_controllen < sizeof (struct target_cmsghdr))
1485 goto the_end;
1486 target_cmsg_addr = tswapal(target_msgh->msg_control);
1487 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1488 target_cmsg_start = target_cmsg;
1489 if (!target_cmsg)
1490 return -TARGET_EFAULT;
1492 while (cmsg && target_cmsg) {
1493 void *data = CMSG_DATA(cmsg);
1494 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1496 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1497 int tgt_len, tgt_space;
1499 /* We never copy a half-header but may copy half-data;
1500 * this is Linux's behaviour in put_cmsg(). Note that
1501 * truncation here is a guest problem (which we report
1502 * to the guest via the CTRUNC bit), unlike truncation
1503 * in target_to_host_cmsg, which is a QEMU bug.
1505 if (msg_controllen < sizeof(struct cmsghdr)) {
1506 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1507 break;
1510 if (cmsg->cmsg_level == SOL_SOCKET) {
1511 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1512 } else {
1513 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1515 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1517 tgt_len = TARGET_CMSG_LEN(len);
1519 /* Payload types which need a different size of payload on
1520 * the target must adjust tgt_len here.
1522 switch (cmsg->cmsg_level) {
1523 case SOL_SOCKET:
1524 switch (cmsg->cmsg_type) {
1525 case SO_TIMESTAMP:
1526 tgt_len = sizeof(struct target_timeval);
1527 break;
1528 default:
1529 break;
1531 default:
1532 break;
1535 if (msg_controllen < tgt_len) {
1536 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1537 tgt_len = msg_controllen;
1540 /* We must now copy-and-convert len bytes of payload
1541 * into tgt_len bytes of destination space. Bear in mind
1542 * that in both source and destination we may be dealing
1543 * with a truncated value!
1545 switch (cmsg->cmsg_level) {
1546 case SOL_SOCKET:
1547 switch (cmsg->cmsg_type) {
1548 case SCM_RIGHTS:
1550 int *fd = (int *)data;
1551 int *target_fd = (int *)target_data;
1552 int i, numfds = tgt_len / sizeof(int);
1554 for (i = 0; i < numfds; i++) {
1555 __put_user(fd[i], target_fd + i);
1557 break;
1559 case SO_TIMESTAMP:
1561 struct timeval *tv = (struct timeval *)data;
1562 struct target_timeval *target_tv =
1563 (struct target_timeval *)target_data;
1565 if (len != sizeof(struct timeval) ||
1566 tgt_len != sizeof(struct target_timeval)) {
1567 goto unimplemented;
1570 /* copy struct timeval to target */
1571 __put_user(tv->tv_sec, &target_tv->tv_sec);
1572 __put_user(tv->tv_usec, &target_tv->tv_usec);
1573 break;
1575 case SCM_CREDENTIALS:
1577 struct ucred *cred = (struct ucred *)data;
1578 struct target_ucred *target_cred =
1579 (struct target_ucred *)target_data;
1581 __put_user(cred->pid, &target_cred->pid);
1582 __put_user(cred->uid, &target_cred->uid);
1583 __put_user(cred->gid, &target_cred->gid);
1584 break;
1586 default:
1587 goto unimplemented;
1589 break;
1591 default:
1592 unimplemented:
1593 gemu_log("Unsupported ancillary data: %d/%d\n",
1594 cmsg->cmsg_level, cmsg->cmsg_type);
1595 memcpy(target_data, data, MIN(len, tgt_len));
1596 if (tgt_len > len) {
1597 memset(target_data + len, 0, tgt_len - len);
1601 target_cmsg->cmsg_len = tswapal(tgt_len);
1602 tgt_space = TARGET_CMSG_SPACE(len);
1603 if (msg_controllen < tgt_space) {
1604 tgt_space = msg_controllen;
1606 msg_controllen -= tgt_space;
1607 space += tgt_space;
1608 cmsg = CMSG_NXTHDR(msgh, cmsg);
1609 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1610 target_cmsg_start);
1612 unlock_user(target_cmsg, target_cmsg_addr, space);
1613 the_end:
1614 target_msgh->msg_controllen = tswapal(space);
1615 return 0;
1618 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1620 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1621 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1622 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1623 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1624 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1627 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1628 size_t len,
1629 abi_long (*host_to_target_nlmsg)
1630 (struct nlmsghdr *))
1632 uint32_t nlmsg_len;
1633 abi_long ret;
1635 while (len > sizeof(struct nlmsghdr)) {
1637 nlmsg_len = nlh->nlmsg_len;
1638 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1639 nlmsg_len > len) {
1640 break;
1643 switch (nlh->nlmsg_type) {
1644 case NLMSG_DONE:
1645 tswap_nlmsghdr(nlh);
1646 return 0;
1647 case NLMSG_NOOP:
1648 break;
1649 case NLMSG_ERROR:
1651 struct nlmsgerr *e = NLMSG_DATA(nlh);
1652 e->error = tswap32(e->error);
1653 tswap_nlmsghdr(&e->msg);
1654 tswap_nlmsghdr(nlh);
1655 return 0;
1657 default:
1658 ret = host_to_target_nlmsg(nlh);
1659 if (ret < 0) {
1660 tswap_nlmsghdr(nlh);
1661 return ret;
1663 break;
1665 tswap_nlmsghdr(nlh);
1666 len -= NLMSG_ALIGN(nlmsg_len);
1667 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1669 return 0;
1672 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1673 size_t len,
1674 abi_long (*target_to_host_nlmsg)
1675 (struct nlmsghdr *))
1677 int ret;
1679 while (len > sizeof(struct nlmsghdr)) {
1680 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1681 tswap32(nlh->nlmsg_len) > len) {
1682 break;
1684 tswap_nlmsghdr(nlh);
1685 switch (nlh->nlmsg_type) {
1686 case NLMSG_DONE:
1687 return 0;
1688 case NLMSG_NOOP:
1689 break;
1690 case NLMSG_ERROR:
1692 struct nlmsgerr *e = NLMSG_DATA(nlh);
1693 e->error = tswap32(e->error);
1694 tswap_nlmsghdr(&e->msg);
1695 return 0;
1697 default:
1698 ret = target_to_host_nlmsg(nlh);
1699 if (ret < 0) {
1700 return ret;
1703 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1704 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1706 return 0;
1709 #ifdef CONFIG_RTNETLINK
1710 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1711 size_t len,
1712 abi_long (*host_to_target_rtattr)
1713 (struct rtattr *))
1715 unsigned short rta_len;
1716 abi_long ret;
1718 while (len > sizeof(struct rtattr)) {
1719 rta_len = rtattr->rta_len;
1720 if (rta_len < sizeof(struct rtattr) ||
1721 rta_len > len) {
1722 break;
1724 ret = host_to_target_rtattr(rtattr);
1725 rtattr->rta_len = tswap16(rtattr->rta_len);
1726 rtattr->rta_type = tswap16(rtattr->rta_type);
1727 if (ret < 0) {
1728 return ret;
1730 len -= RTA_ALIGN(rta_len);
1731 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1733 return 0;
1736 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
1738 uint32_t *u32;
1739 struct rtnl_link_stats *st;
1740 struct rtnl_link_stats64 *st64;
1741 struct rtnl_link_ifmap *map;
1743 switch (rtattr->rta_type) {
1744 /* binary stream */
1745 case IFLA_ADDRESS:
1746 case IFLA_BROADCAST:
1747 /* string */
1748 case IFLA_IFNAME:
1749 case IFLA_QDISC:
1750 break;
1751 /* uin8_t */
1752 case IFLA_OPERSTATE:
1753 case IFLA_LINKMODE:
1754 case IFLA_CARRIER:
1755 case IFLA_PROTO_DOWN:
1756 break;
1757 /* uint32_t */
1758 case IFLA_MTU:
1759 case IFLA_LINK:
1760 case IFLA_WEIGHT:
1761 case IFLA_TXQLEN:
1762 case IFLA_CARRIER_CHANGES:
1763 case IFLA_NUM_RX_QUEUES:
1764 case IFLA_NUM_TX_QUEUES:
1765 case IFLA_PROMISCUITY:
1766 case IFLA_EXT_MASK:
1767 case IFLA_LINK_NETNSID:
1768 case IFLA_GROUP:
1769 case IFLA_MASTER:
1770 case IFLA_NUM_VF:
1771 u32 = RTA_DATA(rtattr);
1772 *u32 = tswap32(*u32);
1773 break;
1774 /* struct rtnl_link_stats */
1775 case IFLA_STATS:
1776 st = RTA_DATA(rtattr);
1777 st->rx_packets = tswap32(st->rx_packets);
1778 st->tx_packets = tswap32(st->tx_packets);
1779 st->rx_bytes = tswap32(st->rx_bytes);
1780 st->tx_bytes = tswap32(st->tx_bytes);
1781 st->rx_errors = tswap32(st->rx_errors);
1782 st->tx_errors = tswap32(st->tx_errors);
1783 st->rx_dropped = tswap32(st->rx_dropped);
1784 st->tx_dropped = tswap32(st->tx_dropped);
1785 st->multicast = tswap32(st->multicast);
1786 st->collisions = tswap32(st->collisions);
1788 /* detailed rx_errors: */
1789 st->rx_length_errors = tswap32(st->rx_length_errors);
1790 st->rx_over_errors = tswap32(st->rx_over_errors);
1791 st->rx_crc_errors = tswap32(st->rx_crc_errors);
1792 st->rx_frame_errors = tswap32(st->rx_frame_errors);
1793 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
1794 st->rx_missed_errors = tswap32(st->rx_missed_errors);
1796 /* detailed tx_errors */
1797 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
1798 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
1799 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
1800 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
1801 st->tx_window_errors = tswap32(st->tx_window_errors);
1803 /* for cslip etc */
1804 st->rx_compressed = tswap32(st->rx_compressed);
1805 st->tx_compressed = tswap32(st->tx_compressed);
1806 break;
1807 /* struct rtnl_link_stats64 */
1808 case IFLA_STATS64:
1809 st64 = RTA_DATA(rtattr);
1810 st64->rx_packets = tswap64(st64->rx_packets);
1811 st64->tx_packets = tswap64(st64->tx_packets);
1812 st64->rx_bytes = tswap64(st64->rx_bytes);
1813 st64->tx_bytes = tswap64(st64->tx_bytes);
1814 st64->rx_errors = tswap64(st64->rx_errors);
1815 st64->tx_errors = tswap64(st64->tx_errors);
1816 st64->rx_dropped = tswap64(st64->rx_dropped);
1817 st64->tx_dropped = tswap64(st64->tx_dropped);
1818 st64->multicast = tswap64(st64->multicast);
1819 st64->collisions = tswap64(st64->collisions);
1821 /* detailed rx_errors: */
1822 st64->rx_length_errors = tswap64(st64->rx_length_errors);
1823 st64->rx_over_errors = tswap64(st64->rx_over_errors);
1824 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
1825 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
1826 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
1827 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
1829 /* detailed tx_errors */
1830 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
1831 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
1832 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
1833 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
1834 st64->tx_window_errors = tswap64(st64->tx_window_errors);
1836 /* for cslip etc */
1837 st64->rx_compressed = tswap64(st64->rx_compressed);
1838 st64->tx_compressed = tswap64(st64->tx_compressed);
1839 break;
1840 /* struct rtnl_link_ifmap */
1841 case IFLA_MAP:
1842 map = RTA_DATA(rtattr);
1843 map->mem_start = tswap64(map->mem_start);
1844 map->mem_end = tswap64(map->mem_end);
1845 map->base_addr = tswap64(map->base_addr);
1846 map->irq = tswap16(map->irq);
1847 break;
1848 /* nested */
1849 case IFLA_AF_SPEC:
1850 case IFLA_LINKINFO:
1851 /* FIXME: implement nested type */
1852 gemu_log("Unimplemented nested type %d\n", rtattr->rta_type);
1853 break;
1854 default:
1855 gemu_log("Unknown host IFLA type: %d\n", rtattr->rta_type);
1856 break;
1858 return 0;
1861 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
1863 uint32_t *u32;
1864 struct ifa_cacheinfo *ci;
1866 switch (rtattr->rta_type) {
1867 /* binary: depends on family type */
1868 case IFA_ADDRESS:
1869 case IFA_LOCAL:
1870 break;
1871 /* string */
1872 case IFA_LABEL:
1873 break;
1874 /* u32 */
1875 case IFA_FLAGS:
1876 case IFA_BROADCAST:
1877 u32 = RTA_DATA(rtattr);
1878 *u32 = tswap32(*u32);
1879 break;
1880 /* struct ifa_cacheinfo */
1881 case IFA_CACHEINFO:
1882 ci = RTA_DATA(rtattr);
1883 ci->ifa_prefered = tswap32(ci->ifa_prefered);
1884 ci->ifa_valid = tswap32(ci->ifa_valid);
1885 ci->cstamp = tswap32(ci->cstamp);
1886 ci->tstamp = tswap32(ci->tstamp);
1887 break;
1888 default:
1889 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
1890 break;
1892 return 0;
1895 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
1897 uint32_t *u32;
1898 switch (rtattr->rta_type) {
1899 /* binary: depends on family type */
1900 case RTA_GATEWAY:
1901 case RTA_DST:
1902 case RTA_PREFSRC:
1903 break;
1904 /* u32 */
1905 case RTA_PRIORITY:
1906 case RTA_TABLE:
1907 case RTA_OIF:
1908 u32 = RTA_DATA(rtattr);
1909 *u32 = tswap32(*u32);
1910 break;
1911 default:
1912 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
1913 break;
1915 return 0;
1918 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
1919 uint32_t rtattr_len)
1921 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1922 host_to_target_data_link_rtattr);
1925 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
1926 uint32_t rtattr_len)
1928 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1929 host_to_target_data_addr_rtattr);
1932 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
1933 uint32_t rtattr_len)
1935 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
1936 host_to_target_data_route_rtattr);
1939 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
1941 uint32_t nlmsg_len;
1942 struct ifinfomsg *ifi;
1943 struct ifaddrmsg *ifa;
1944 struct rtmsg *rtm;
1946 nlmsg_len = nlh->nlmsg_len;
1947 switch (nlh->nlmsg_type) {
1948 case RTM_NEWLINK:
1949 case RTM_DELLINK:
1950 case RTM_GETLINK:
1951 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
1952 ifi = NLMSG_DATA(nlh);
1953 ifi->ifi_type = tswap16(ifi->ifi_type);
1954 ifi->ifi_index = tswap32(ifi->ifi_index);
1955 ifi->ifi_flags = tswap32(ifi->ifi_flags);
1956 ifi->ifi_change = tswap32(ifi->ifi_change);
1957 host_to_target_link_rtattr(IFLA_RTA(ifi),
1958 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
1960 break;
1961 case RTM_NEWADDR:
1962 case RTM_DELADDR:
1963 case RTM_GETADDR:
1964 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
1965 ifa = NLMSG_DATA(nlh);
1966 ifa->ifa_index = tswap32(ifa->ifa_index);
1967 host_to_target_addr_rtattr(IFA_RTA(ifa),
1968 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
1970 break;
1971 case RTM_NEWROUTE:
1972 case RTM_DELROUTE:
1973 case RTM_GETROUTE:
1974 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
1975 rtm = NLMSG_DATA(nlh);
1976 rtm->rtm_flags = tswap32(rtm->rtm_flags);
1977 host_to_target_route_rtattr(RTM_RTA(rtm),
1978 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
1980 break;
1981 default:
1982 return -TARGET_EINVAL;
1984 return 0;
1987 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
1988 size_t len)
1990 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
1993 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
1994 size_t len,
1995 abi_long (*target_to_host_rtattr)
1996 (struct rtattr *))
1998 abi_long ret;
2000 while (len >= sizeof(struct rtattr)) {
2001 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2002 tswap16(rtattr->rta_len) > len) {
2003 break;
2005 rtattr->rta_len = tswap16(rtattr->rta_len);
2006 rtattr->rta_type = tswap16(rtattr->rta_type);
2007 ret = target_to_host_rtattr(rtattr);
2008 if (ret < 0) {
2009 return ret;
2011 len -= RTA_ALIGN(rtattr->rta_len);
2012 rtattr = (struct rtattr *)(((char *)rtattr) +
2013 RTA_ALIGN(rtattr->rta_len));
2015 return 0;
2018 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2020 switch (rtattr->rta_type) {
2021 default:
2022 gemu_log("Unknown target IFLA type: %d\n", rtattr->rta_type);
2023 break;
2025 return 0;
2028 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2030 switch (rtattr->rta_type) {
2031 /* binary: depends on family type */
2032 case IFA_LOCAL:
2033 case IFA_ADDRESS:
2034 break;
2035 default:
2036 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2037 break;
2039 return 0;
2042 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2044 uint32_t *u32;
2045 switch (rtattr->rta_type) {
2046 /* binary: depends on family type */
2047 case RTA_DST:
2048 case RTA_SRC:
2049 case RTA_GATEWAY:
2050 break;
2051 /* u32 */
2052 case RTA_OIF:
2053 u32 = RTA_DATA(rtattr);
2054 *u32 = tswap32(*u32);
2055 break;
2056 default:
2057 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2058 break;
2060 return 0;
2063 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2064 uint32_t rtattr_len)
2066 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2067 target_to_host_data_link_rtattr);
2070 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2071 uint32_t rtattr_len)
2073 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2074 target_to_host_data_addr_rtattr);
2077 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2078 uint32_t rtattr_len)
2080 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2081 target_to_host_data_route_rtattr);
2084 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2086 struct ifinfomsg *ifi;
2087 struct ifaddrmsg *ifa;
2088 struct rtmsg *rtm;
2090 switch (nlh->nlmsg_type) {
2091 case RTM_GETLINK:
2092 break;
2093 case RTM_NEWLINK:
2094 case RTM_DELLINK:
2095 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2096 ifi = NLMSG_DATA(nlh);
2097 ifi->ifi_type = tswap16(ifi->ifi_type);
2098 ifi->ifi_index = tswap32(ifi->ifi_index);
2099 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2100 ifi->ifi_change = tswap32(ifi->ifi_change);
2101 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2102 NLMSG_LENGTH(sizeof(*ifi)));
2104 break;
2105 case RTM_GETADDR:
2106 case RTM_NEWADDR:
2107 case RTM_DELADDR:
2108 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2109 ifa = NLMSG_DATA(nlh);
2110 ifa->ifa_index = tswap32(ifa->ifa_index);
2111 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2112 NLMSG_LENGTH(sizeof(*ifa)));
2114 break;
2115 case RTM_GETROUTE:
2116 break;
2117 case RTM_NEWROUTE:
2118 case RTM_DELROUTE:
2119 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2120 rtm = NLMSG_DATA(nlh);
2121 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2122 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2123 NLMSG_LENGTH(sizeof(*rtm)));
2125 break;
2126 default:
2127 return -TARGET_EOPNOTSUPP;
2129 return 0;
2132 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2134 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2136 #endif /* CONFIG_RTNETLINK */
2138 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2140 switch (nlh->nlmsg_type) {
2141 default:
2142 gemu_log("Unknown host audit message type %d\n",
2143 nlh->nlmsg_type);
2144 return -TARGET_EINVAL;
2146 return 0;
2149 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2150 size_t len)
2152 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2155 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2157 switch (nlh->nlmsg_type) {
2158 case AUDIT_USER:
2159 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2160 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2161 break;
2162 default:
2163 gemu_log("Unknown target audit message type %d\n",
2164 nlh->nlmsg_type);
2165 return -TARGET_EINVAL;
2168 return 0;
2171 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2173 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2176 /* do_setsockopt() Must return target values and target errnos. */
2177 static abi_long do_setsockopt(int sockfd, int level, int optname,
2178 abi_ulong optval_addr, socklen_t optlen)
2180 abi_long ret;
2181 int val;
2182 struct ip_mreqn *ip_mreq;
2183 struct ip_mreq_source *ip_mreq_source;
2185 switch(level) {
2186 case SOL_TCP:
2187 /* TCP options all take an 'int' value. */
2188 if (optlen < sizeof(uint32_t))
2189 return -TARGET_EINVAL;
2191 if (get_user_u32(val, optval_addr))
2192 return -TARGET_EFAULT;
2193 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2194 break;
2195 case SOL_IP:
2196 switch(optname) {
2197 case IP_TOS:
2198 case IP_TTL:
2199 case IP_HDRINCL:
2200 case IP_ROUTER_ALERT:
2201 case IP_RECVOPTS:
2202 case IP_RETOPTS:
2203 case IP_PKTINFO:
2204 case IP_MTU_DISCOVER:
2205 case IP_RECVERR:
2206 case IP_RECVTOS:
2207 #ifdef IP_FREEBIND
2208 case IP_FREEBIND:
2209 #endif
2210 case IP_MULTICAST_TTL:
2211 case IP_MULTICAST_LOOP:
2212 val = 0;
2213 if (optlen >= sizeof(uint32_t)) {
2214 if (get_user_u32(val, optval_addr))
2215 return -TARGET_EFAULT;
2216 } else if (optlen >= 1) {
2217 if (get_user_u8(val, optval_addr))
2218 return -TARGET_EFAULT;
2220 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2221 break;
2222 case IP_ADD_MEMBERSHIP:
2223 case IP_DROP_MEMBERSHIP:
2224 if (optlen < sizeof (struct target_ip_mreq) ||
2225 optlen > sizeof (struct target_ip_mreqn))
2226 return -TARGET_EINVAL;
2228 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2229 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2230 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2231 break;
2233 case IP_BLOCK_SOURCE:
2234 case IP_UNBLOCK_SOURCE:
2235 case IP_ADD_SOURCE_MEMBERSHIP:
2236 case IP_DROP_SOURCE_MEMBERSHIP:
2237 if (optlen != sizeof (struct target_ip_mreq_source))
2238 return -TARGET_EINVAL;
2240 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2241 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2242 unlock_user (ip_mreq_source, optval_addr, 0);
2243 break;
2245 default:
2246 goto unimplemented;
2248 break;
2249 case SOL_IPV6:
2250 switch (optname) {
2251 case IPV6_MTU_DISCOVER:
2252 case IPV6_MTU:
2253 case IPV6_V6ONLY:
2254 case IPV6_RECVPKTINFO:
2255 val = 0;
2256 if (optlen < sizeof(uint32_t)) {
2257 return -TARGET_EINVAL;
2259 if (get_user_u32(val, optval_addr)) {
2260 return -TARGET_EFAULT;
2262 ret = get_errno(setsockopt(sockfd, level, optname,
2263 &val, sizeof(val)));
2264 break;
2265 default:
2266 goto unimplemented;
2268 break;
2269 case SOL_RAW:
2270 switch (optname) {
2271 case ICMP_FILTER:
2272 /* struct icmp_filter takes an u32 value */
2273 if (optlen < sizeof(uint32_t)) {
2274 return -TARGET_EINVAL;
2277 if (get_user_u32(val, optval_addr)) {
2278 return -TARGET_EFAULT;
2280 ret = get_errno(setsockopt(sockfd, level, optname,
2281 &val, sizeof(val)));
2282 break;
2284 default:
2285 goto unimplemented;
2287 break;
2288 case TARGET_SOL_SOCKET:
2289 switch (optname) {
2290 case TARGET_SO_RCVTIMEO:
2292 struct timeval tv;
2294 optname = SO_RCVTIMEO;
2296 set_timeout:
2297 if (optlen != sizeof(struct target_timeval)) {
2298 return -TARGET_EINVAL;
2301 if (copy_from_user_timeval(&tv, optval_addr)) {
2302 return -TARGET_EFAULT;
2305 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2306 &tv, sizeof(tv)));
2307 return ret;
2309 case TARGET_SO_SNDTIMEO:
2310 optname = SO_SNDTIMEO;
2311 goto set_timeout;
2312 case TARGET_SO_ATTACH_FILTER:
2314 struct target_sock_fprog *tfprog;
2315 struct target_sock_filter *tfilter;
2316 struct sock_fprog fprog;
2317 struct sock_filter *filter;
2318 int i;
2320 if (optlen != sizeof(*tfprog)) {
2321 return -TARGET_EINVAL;
2323 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2324 return -TARGET_EFAULT;
2326 if (!lock_user_struct(VERIFY_READ, tfilter,
2327 tswapal(tfprog->filter), 0)) {
2328 unlock_user_struct(tfprog, optval_addr, 1);
2329 return -TARGET_EFAULT;
2332 fprog.len = tswap16(tfprog->len);
2333 filter = g_try_new(struct sock_filter, fprog.len);
2334 if (filter == NULL) {
2335 unlock_user_struct(tfilter, tfprog->filter, 1);
2336 unlock_user_struct(tfprog, optval_addr, 1);
2337 return -TARGET_ENOMEM;
2339 for (i = 0; i < fprog.len; i++) {
2340 filter[i].code = tswap16(tfilter[i].code);
2341 filter[i].jt = tfilter[i].jt;
2342 filter[i].jf = tfilter[i].jf;
2343 filter[i].k = tswap32(tfilter[i].k);
2345 fprog.filter = filter;
2347 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2348 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2349 g_free(filter);
2351 unlock_user_struct(tfilter, tfprog->filter, 1);
2352 unlock_user_struct(tfprog, optval_addr, 1);
2353 return ret;
2355 case TARGET_SO_BINDTODEVICE:
2357 char *dev_ifname, *addr_ifname;
2359 if (optlen > IFNAMSIZ - 1) {
2360 optlen = IFNAMSIZ - 1;
2362 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2363 if (!dev_ifname) {
2364 return -TARGET_EFAULT;
2366 optname = SO_BINDTODEVICE;
2367 addr_ifname = alloca(IFNAMSIZ);
2368 memcpy(addr_ifname, dev_ifname, optlen);
2369 addr_ifname[optlen] = 0;
2370 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2371 addr_ifname, optlen));
2372 unlock_user (dev_ifname, optval_addr, 0);
2373 return ret;
2375 /* Options with 'int' argument. */
2376 case TARGET_SO_DEBUG:
2377 optname = SO_DEBUG;
2378 break;
2379 case TARGET_SO_REUSEADDR:
2380 optname = SO_REUSEADDR;
2381 break;
2382 case TARGET_SO_TYPE:
2383 optname = SO_TYPE;
2384 break;
2385 case TARGET_SO_ERROR:
2386 optname = SO_ERROR;
2387 break;
2388 case TARGET_SO_DONTROUTE:
2389 optname = SO_DONTROUTE;
2390 break;
2391 case TARGET_SO_BROADCAST:
2392 optname = SO_BROADCAST;
2393 break;
2394 case TARGET_SO_SNDBUF:
2395 optname = SO_SNDBUF;
2396 break;
2397 case TARGET_SO_SNDBUFFORCE:
2398 optname = SO_SNDBUFFORCE;
2399 break;
2400 case TARGET_SO_RCVBUF:
2401 optname = SO_RCVBUF;
2402 break;
2403 case TARGET_SO_RCVBUFFORCE:
2404 optname = SO_RCVBUFFORCE;
2405 break;
2406 case TARGET_SO_KEEPALIVE:
2407 optname = SO_KEEPALIVE;
2408 break;
2409 case TARGET_SO_OOBINLINE:
2410 optname = SO_OOBINLINE;
2411 break;
2412 case TARGET_SO_NO_CHECK:
2413 optname = SO_NO_CHECK;
2414 break;
2415 case TARGET_SO_PRIORITY:
2416 optname = SO_PRIORITY;
2417 break;
2418 #ifdef SO_BSDCOMPAT
2419 case TARGET_SO_BSDCOMPAT:
2420 optname = SO_BSDCOMPAT;
2421 break;
2422 #endif
2423 case TARGET_SO_PASSCRED:
2424 optname = SO_PASSCRED;
2425 break;
2426 case TARGET_SO_PASSSEC:
2427 optname = SO_PASSSEC;
2428 break;
2429 case TARGET_SO_TIMESTAMP:
2430 optname = SO_TIMESTAMP;
2431 break;
2432 case TARGET_SO_RCVLOWAT:
2433 optname = SO_RCVLOWAT;
2434 break;
2435 break;
2436 default:
2437 goto unimplemented;
2439 if (optlen < sizeof(uint32_t))
2440 return -TARGET_EINVAL;
2442 if (get_user_u32(val, optval_addr))
2443 return -TARGET_EFAULT;
2444 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2445 break;
2446 default:
2447 unimplemented:
2448 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2449 ret = -TARGET_ENOPROTOOPT;
2451 return ret;
2454 /* do_getsockopt() Must return target values and target errnos. */
2455 static abi_long do_getsockopt(int sockfd, int level, int optname,
2456 abi_ulong optval_addr, abi_ulong optlen)
2458 abi_long ret;
2459 int len, val;
2460 socklen_t lv;
2462 switch(level) {
2463 case TARGET_SOL_SOCKET:
2464 level = SOL_SOCKET;
2465 switch (optname) {
2466 /* These don't just return a single integer */
2467 case TARGET_SO_LINGER:
2468 case TARGET_SO_RCVTIMEO:
2469 case TARGET_SO_SNDTIMEO:
2470 case TARGET_SO_PEERNAME:
2471 goto unimplemented;
2472 case TARGET_SO_PEERCRED: {
2473 struct ucred cr;
2474 socklen_t crlen;
2475 struct target_ucred *tcr;
2477 if (get_user_u32(len, optlen)) {
2478 return -TARGET_EFAULT;
2480 if (len < 0) {
2481 return -TARGET_EINVAL;
2484 crlen = sizeof(cr);
2485 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2486 &cr, &crlen));
2487 if (ret < 0) {
2488 return ret;
2490 if (len > crlen) {
2491 len = crlen;
2493 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2494 return -TARGET_EFAULT;
2496 __put_user(cr.pid, &tcr->pid);
2497 __put_user(cr.uid, &tcr->uid);
2498 __put_user(cr.gid, &tcr->gid);
2499 unlock_user_struct(tcr, optval_addr, 1);
2500 if (put_user_u32(len, optlen)) {
2501 return -TARGET_EFAULT;
2503 break;
2505 /* Options with 'int' argument. */
2506 case TARGET_SO_DEBUG:
2507 optname = SO_DEBUG;
2508 goto int_case;
2509 case TARGET_SO_REUSEADDR:
2510 optname = SO_REUSEADDR;
2511 goto int_case;
2512 case TARGET_SO_TYPE:
2513 optname = SO_TYPE;
2514 goto int_case;
2515 case TARGET_SO_ERROR:
2516 optname = SO_ERROR;
2517 goto int_case;
2518 case TARGET_SO_DONTROUTE:
2519 optname = SO_DONTROUTE;
2520 goto int_case;
2521 case TARGET_SO_BROADCAST:
2522 optname = SO_BROADCAST;
2523 goto int_case;
2524 case TARGET_SO_SNDBUF:
2525 optname = SO_SNDBUF;
2526 goto int_case;
2527 case TARGET_SO_RCVBUF:
2528 optname = SO_RCVBUF;
2529 goto int_case;
2530 case TARGET_SO_KEEPALIVE:
2531 optname = SO_KEEPALIVE;
2532 goto int_case;
2533 case TARGET_SO_OOBINLINE:
2534 optname = SO_OOBINLINE;
2535 goto int_case;
2536 case TARGET_SO_NO_CHECK:
2537 optname = SO_NO_CHECK;
2538 goto int_case;
2539 case TARGET_SO_PRIORITY:
2540 optname = SO_PRIORITY;
2541 goto int_case;
2542 #ifdef SO_BSDCOMPAT
2543 case TARGET_SO_BSDCOMPAT:
2544 optname = SO_BSDCOMPAT;
2545 goto int_case;
2546 #endif
2547 case TARGET_SO_PASSCRED:
2548 optname = SO_PASSCRED;
2549 goto int_case;
2550 case TARGET_SO_TIMESTAMP:
2551 optname = SO_TIMESTAMP;
2552 goto int_case;
2553 case TARGET_SO_RCVLOWAT:
2554 optname = SO_RCVLOWAT;
2555 goto int_case;
2556 case TARGET_SO_ACCEPTCONN:
2557 optname = SO_ACCEPTCONN;
2558 goto int_case;
2559 default:
2560 goto int_case;
2562 break;
2563 case SOL_TCP:
2564 /* TCP options all take an 'int' value. */
2565 int_case:
2566 if (get_user_u32(len, optlen))
2567 return -TARGET_EFAULT;
2568 if (len < 0)
2569 return -TARGET_EINVAL;
2570 lv = sizeof(lv);
2571 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2572 if (ret < 0)
2573 return ret;
2574 if (optname == SO_TYPE) {
2575 val = host_to_target_sock_type(val);
2577 if (len > lv)
2578 len = lv;
2579 if (len == 4) {
2580 if (put_user_u32(val, optval_addr))
2581 return -TARGET_EFAULT;
2582 } else {
2583 if (put_user_u8(val, optval_addr))
2584 return -TARGET_EFAULT;
2586 if (put_user_u32(len, optlen))
2587 return -TARGET_EFAULT;
2588 break;
2589 case SOL_IP:
2590 switch(optname) {
2591 case IP_TOS:
2592 case IP_TTL:
2593 case IP_HDRINCL:
2594 case IP_ROUTER_ALERT:
2595 case IP_RECVOPTS:
2596 case IP_RETOPTS:
2597 case IP_PKTINFO:
2598 case IP_MTU_DISCOVER:
2599 case IP_RECVERR:
2600 case IP_RECVTOS:
2601 #ifdef IP_FREEBIND
2602 case IP_FREEBIND:
2603 #endif
2604 case IP_MULTICAST_TTL:
2605 case IP_MULTICAST_LOOP:
2606 if (get_user_u32(len, optlen))
2607 return -TARGET_EFAULT;
2608 if (len < 0)
2609 return -TARGET_EINVAL;
2610 lv = sizeof(lv);
2611 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2612 if (ret < 0)
2613 return ret;
2614 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2615 len = 1;
2616 if (put_user_u32(len, optlen)
2617 || put_user_u8(val, optval_addr))
2618 return -TARGET_EFAULT;
2619 } else {
2620 if (len > sizeof(int))
2621 len = sizeof(int);
2622 if (put_user_u32(len, optlen)
2623 || put_user_u32(val, optval_addr))
2624 return -TARGET_EFAULT;
2626 break;
2627 default:
2628 ret = -TARGET_ENOPROTOOPT;
2629 break;
2631 break;
2632 default:
2633 unimplemented:
2634 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2635 level, optname);
2636 ret = -TARGET_EOPNOTSUPP;
2637 break;
2639 return ret;
2642 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2643 int count, int copy)
2645 struct target_iovec *target_vec;
2646 struct iovec *vec;
2647 abi_ulong total_len, max_len;
2648 int i;
2649 int err = 0;
2650 bool bad_address = false;
2652 if (count == 0) {
2653 errno = 0;
2654 return NULL;
2656 if (count < 0 || count > IOV_MAX) {
2657 errno = EINVAL;
2658 return NULL;
2661 vec = g_try_new0(struct iovec, count);
2662 if (vec == NULL) {
2663 errno = ENOMEM;
2664 return NULL;
2667 target_vec = lock_user(VERIFY_READ, target_addr,
2668 count * sizeof(struct target_iovec), 1);
2669 if (target_vec == NULL) {
2670 err = EFAULT;
2671 goto fail2;
2674 /* ??? If host page size > target page size, this will result in a
2675 value larger than what we can actually support. */
2676 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2677 total_len = 0;
2679 for (i = 0; i < count; i++) {
2680 abi_ulong base = tswapal(target_vec[i].iov_base);
2681 abi_long len = tswapal(target_vec[i].iov_len);
2683 if (len < 0) {
2684 err = EINVAL;
2685 goto fail;
2686 } else if (len == 0) {
2687 /* Zero length pointer is ignored. */
2688 vec[i].iov_base = 0;
2689 } else {
2690 vec[i].iov_base = lock_user(type, base, len, copy);
2691 /* If the first buffer pointer is bad, this is a fault. But
2692 * subsequent bad buffers will result in a partial write; this
2693 * is realized by filling the vector with null pointers and
2694 * zero lengths. */
2695 if (!vec[i].iov_base) {
2696 if (i == 0) {
2697 err = EFAULT;
2698 goto fail;
2699 } else {
2700 bad_address = true;
2703 if (bad_address) {
2704 len = 0;
2706 if (len > max_len - total_len) {
2707 len = max_len - total_len;
2710 vec[i].iov_len = len;
2711 total_len += len;
2714 unlock_user(target_vec, target_addr, 0);
2715 return vec;
2717 fail:
2718 while (--i >= 0) {
2719 if (tswapal(target_vec[i].iov_len) > 0) {
2720 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2723 unlock_user(target_vec, target_addr, 0);
2724 fail2:
2725 g_free(vec);
2726 errno = err;
2727 return NULL;
2730 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2731 int count, int copy)
2733 struct target_iovec *target_vec;
2734 int i;
2736 target_vec = lock_user(VERIFY_READ, target_addr,
2737 count * sizeof(struct target_iovec), 1);
2738 if (target_vec) {
2739 for (i = 0; i < count; i++) {
2740 abi_ulong base = tswapal(target_vec[i].iov_base);
2741 abi_long len = tswapal(target_vec[i].iov_len);
2742 if (len < 0) {
2743 break;
2745 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2747 unlock_user(target_vec, target_addr, 0);
2750 g_free(vec);
2753 static inline int target_to_host_sock_type(int *type)
2755 int host_type = 0;
2756 int target_type = *type;
2758 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2759 case TARGET_SOCK_DGRAM:
2760 host_type = SOCK_DGRAM;
2761 break;
2762 case TARGET_SOCK_STREAM:
2763 host_type = SOCK_STREAM;
2764 break;
2765 default:
2766 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2767 break;
2769 if (target_type & TARGET_SOCK_CLOEXEC) {
2770 #if defined(SOCK_CLOEXEC)
2771 host_type |= SOCK_CLOEXEC;
2772 #else
2773 return -TARGET_EINVAL;
2774 #endif
2776 if (target_type & TARGET_SOCK_NONBLOCK) {
2777 #if defined(SOCK_NONBLOCK)
2778 host_type |= SOCK_NONBLOCK;
2779 #elif !defined(O_NONBLOCK)
2780 return -TARGET_EINVAL;
2781 #endif
2783 *type = host_type;
2784 return 0;
2787 /* Try to emulate socket type flags after socket creation. */
2788 static int sock_flags_fixup(int fd, int target_type)
2790 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2791 if (target_type & TARGET_SOCK_NONBLOCK) {
2792 int flags = fcntl(fd, F_GETFL);
2793 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2794 close(fd);
2795 return -TARGET_EINVAL;
2798 #endif
2799 return fd;
2802 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2803 abi_ulong target_addr,
2804 socklen_t len)
2806 struct sockaddr *addr = host_addr;
2807 struct target_sockaddr *target_saddr;
2809 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2810 if (!target_saddr) {
2811 return -TARGET_EFAULT;
2814 memcpy(addr, target_saddr, len);
2815 addr->sa_family = tswap16(target_saddr->sa_family);
2816 /* spkt_protocol is big-endian */
2818 unlock_user(target_saddr, target_addr, 0);
2819 return 0;
2822 static TargetFdTrans target_packet_trans = {
2823 .target_to_host_addr = packet_target_to_host_sockaddr,
2826 #ifdef CONFIG_RTNETLINK
2827 static abi_long netlink_route_target_to_host(void *buf, size_t len)
2829 abi_long ret;
2831 ret = target_to_host_nlmsg_route(buf, len);
2832 if (ret < 0) {
2833 return ret;
2836 return len;
2839 static abi_long netlink_route_host_to_target(void *buf, size_t len)
2841 abi_long ret;
2843 ret = host_to_target_nlmsg_route(buf, len);
2844 if (ret < 0) {
2845 return ret;
2848 return len;
2851 static TargetFdTrans target_netlink_route_trans = {
2852 .target_to_host_data = netlink_route_target_to_host,
2853 .host_to_target_data = netlink_route_host_to_target,
2855 #endif /* CONFIG_RTNETLINK */
2857 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
2859 abi_long ret;
2861 ret = target_to_host_nlmsg_audit(buf, len);
2862 if (ret < 0) {
2863 return ret;
2866 return len;
2869 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
2871 abi_long ret;
2873 ret = host_to_target_nlmsg_audit(buf, len);
2874 if (ret < 0) {
2875 return ret;
2878 return len;
2881 static TargetFdTrans target_netlink_audit_trans = {
2882 .target_to_host_data = netlink_audit_target_to_host,
2883 .host_to_target_data = netlink_audit_host_to_target,
2886 /* do_socket() Must return target values and target errnos. */
2887 static abi_long do_socket(int domain, int type, int protocol)
2889 int target_type = type;
2890 int ret;
2892 ret = target_to_host_sock_type(&type);
2893 if (ret) {
2894 return ret;
2897 if (domain == PF_NETLINK && !(
2898 #ifdef CONFIG_RTNETLINK
2899 protocol == NETLINK_ROUTE ||
2900 #endif
2901 protocol == NETLINK_KOBJECT_UEVENT ||
2902 protocol == NETLINK_AUDIT)) {
2903 return -EPFNOSUPPORT;
2906 if (domain == AF_PACKET ||
2907 (domain == AF_INET && type == SOCK_PACKET)) {
2908 protocol = tswap16(protocol);
2911 ret = get_errno(socket(domain, type, protocol));
2912 if (ret >= 0) {
2913 ret = sock_flags_fixup(ret, target_type);
2914 if (type == SOCK_PACKET) {
2915 /* Manage an obsolete case :
2916 * if socket type is SOCK_PACKET, bind by name
2918 fd_trans_register(ret, &target_packet_trans);
2919 } else if (domain == PF_NETLINK) {
2920 switch (protocol) {
2921 #ifdef CONFIG_RTNETLINK
2922 case NETLINK_ROUTE:
2923 fd_trans_register(ret, &target_netlink_route_trans);
2924 break;
2925 #endif
2926 case NETLINK_KOBJECT_UEVENT:
2927 /* nothing to do: messages are strings */
2928 break;
2929 case NETLINK_AUDIT:
2930 fd_trans_register(ret, &target_netlink_audit_trans);
2931 break;
2932 default:
2933 g_assert_not_reached();
2937 return ret;
2940 /* do_bind() Must return target values and target errnos. */
2941 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2942 socklen_t addrlen)
2944 void *addr;
2945 abi_long ret;
2947 if ((int)addrlen < 0) {
2948 return -TARGET_EINVAL;
2951 addr = alloca(addrlen+1);
2953 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2954 if (ret)
2955 return ret;
2957 return get_errno(bind(sockfd, addr, addrlen));
2960 /* do_connect() Must return target values and target errnos. */
2961 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2962 socklen_t addrlen)
2964 void *addr;
2965 abi_long ret;
2967 if ((int)addrlen < 0) {
2968 return -TARGET_EINVAL;
2971 addr = alloca(addrlen+1);
2973 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2974 if (ret)
2975 return ret;
2977 return get_errno(safe_connect(sockfd, addr, addrlen));
2980 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2981 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2982 int flags, int send)
2984 abi_long ret, len;
2985 struct msghdr msg;
2986 int count;
2987 struct iovec *vec;
2988 abi_ulong target_vec;
2990 if (msgp->msg_name) {
2991 msg.msg_namelen = tswap32(msgp->msg_namelen);
2992 msg.msg_name = alloca(msg.msg_namelen+1);
2993 ret = target_to_host_sockaddr(fd, msg.msg_name,
2994 tswapal(msgp->msg_name),
2995 msg.msg_namelen);
2996 if (ret) {
2997 goto out2;
2999 } else {
3000 msg.msg_name = NULL;
3001 msg.msg_namelen = 0;
3003 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3004 msg.msg_control = alloca(msg.msg_controllen);
3005 msg.msg_flags = tswap32(msgp->msg_flags);
3007 count = tswapal(msgp->msg_iovlen);
3008 target_vec = tswapal(msgp->msg_iov);
3009 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3010 target_vec, count, send);
3011 if (vec == NULL) {
3012 ret = -host_to_target_errno(errno);
3013 goto out2;
3015 msg.msg_iovlen = count;
3016 msg.msg_iov = vec;
3018 if (send) {
3019 if (fd_trans_target_to_host_data(fd)) {
3020 void *host_msg;
3022 host_msg = g_malloc(msg.msg_iov->iov_len);
3023 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3024 ret = fd_trans_target_to_host_data(fd)(host_msg,
3025 msg.msg_iov->iov_len);
3026 if (ret >= 0) {
3027 msg.msg_iov->iov_base = host_msg;
3028 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3030 g_free(host_msg);
3031 } else {
3032 ret = target_to_host_cmsg(&msg, msgp);
3033 if (ret == 0) {
3034 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3037 } else {
3038 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3039 if (!is_error(ret)) {
3040 len = ret;
3041 if (fd_trans_host_to_target_data(fd)) {
3042 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3043 len);
3044 } else {
3045 ret = host_to_target_cmsg(msgp, &msg);
3047 if (!is_error(ret)) {
3048 msgp->msg_namelen = tswap32(msg.msg_namelen);
3049 if (msg.msg_name != NULL) {
3050 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3051 msg.msg_name, msg.msg_namelen);
3052 if (ret) {
3053 goto out;
3057 ret = len;
3062 out:
3063 unlock_iovec(vec, target_vec, count, !send);
3064 out2:
3065 return ret;
3068 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3069 int flags, int send)
3071 abi_long ret;
3072 struct target_msghdr *msgp;
3074 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3075 msgp,
3076 target_msg,
3077 send ? 1 : 0)) {
3078 return -TARGET_EFAULT;
3080 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3081 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3082 return ret;
3085 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3086 * so it might not have this *mmsg-specific flag either.
3088 #ifndef MSG_WAITFORONE
3089 #define MSG_WAITFORONE 0x10000
3090 #endif
3092 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3093 unsigned int vlen, unsigned int flags,
3094 int send)
3096 struct target_mmsghdr *mmsgp;
3097 abi_long ret = 0;
3098 int i;
3100 if (vlen > UIO_MAXIOV) {
3101 vlen = UIO_MAXIOV;
3104 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3105 if (!mmsgp) {
3106 return -TARGET_EFAULT;
3109 for (i = 0; i < vlen; i++) {
3110 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3111 if (is_error(ret)) {
3112 break;
3114 mmsgp[i].msg_len = tswap32(ret);
3115 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3116 if (flags & MSG_WAITFORONE) {
3117 flags |= MSG_DONTWAIT;
3121 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3123 /* Return number of datagrams sent if we sent any at all;
3124 * otherwise return the error.
3126 if (i) {
3127 return i;
3129 return ret;
3132 /* do_accept4() Must return target values and target errnos. */
3133 static abi_long do_accept4(int fd, abi_ulong target_addr,
3134 abi_ulong target_addrlen_addr, int flags)
3136 socklen_t addrlen;
3137 void *addr;
3138 abi_long ret;
3139 int host_flags;
3141 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3143 if (target_addr == 0) {
3144 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3147 /* linux returns EINVAL if addrlen pointer is invalid */
3148 if (get_user_u32(addrlen, target_addrlen_addr))
3149 return -TARGET_EINVAL;
3151 if ((int)addrlen < 0) {
3152 return -TARGET_EINVAL;
3155 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3156 return -TARGET_EINVAL;
3158 addr = alloca(addrlen);
3160 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3161 if (!is_error(ret)) {
3162 host_to_target_sockaddr(target_addr, addr, addrlen);
3163 if (put_user_u32(addrlen, target_addrlen_addr))
3164 ret = -TARGET_EFAULT;
3166 return ret;
3169 /* do_getpeername() Must return target values and target errnos. */
3170 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3171 abi_ulong target_addrlen_addr)
3173 socklen_t addrlen;
3174 void *addr;
3175 abi_long ret;
3177 if (get_user_u32(addrlen, target_addrlen_addr))
3178 return -TARGET_EFAULT;
3180 if ((int)addrlen < 0) {
3181 return -TARGET_EINVAL;
3184 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3185 return -TARGET_EFAULT;
3187 addr = alloca(addrlen);
3189 ret = get_errno(getpeername(fd, addr, &addrlen));
3190 if (!is_error(ret)) {
3191 host_to_target_sockaddr(target_addr, addr, addrlen);
3192 if (put_user_u32(addrlen, target_addrlen_addr))
3193 ret = -TARGET_EFAULT;
3195 return ret;
3198 /* do_getsockname() Must return target values and target errnos. */
3199 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3200 abi_ulong target_addrlen_addr)
3202 socklen_t addrlen;
3203 void *addr;
3204 abi_long ret;
3206 if (get_user_u32(addrlen, target_addrlen_addr))
3207 return -TARGET_EFAULT;
3209 if ((int)addrlen < 0) {
3210 return -TARGET_EINVAL;
3213 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3214 return -TARGET_EFAULT;
3216 addr = alloca(addrlen);
3218 ret = get_errno(getsockname(fd, addr, &addrlen));
3219 if (!is_error(ret)) {
3220 host_to_target_sockaddr(target_addr, addr, addrlen);
3221 if (put_user_u32(addrlen, target_addrlen_addr))
3222 ret = -TARGET_EFAULT;
3224 return ret;
3227 /* do_socketpair() Must return target values and target errnos. */
3228 static abi_long do_socketpair(int domain, int type, int protocol,
3229 abi_ulong target_tab_addr)
3231 int tab[2];
3232 abi_long ret;
3234 target_to_host_sock_type(&type);
3236 ret = get_errno(socketpair(domain, type, protocol, tab));
3237 if (!is_error(ret)) {
3238 if (put_user_s32(tab[0], target_tab_addr)
3239 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3240 ret = -TARGET_EFAULT;
3242 return ret;
3245 /* do_sendto() Must return target values and target errnos. */
3246 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3247 abi_ulong target_addr, socklen_t addrlen)
3249 void *addr;
3250 void *host_msg;
3251 void *copy_msg = NULL;
3252 abi_long ret;
3254 if ((int)addrlen < 0) {
3255 return -TARGET_EINVAL;
3258 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3259 if (!host_msg)
3260 return -TARGET_EFAULT;
3261 if (fd_trans_target_to_host_data(fd)) {
3262 copy_msg = host_msg;
3263 host_msg = g_malloc(len);
3264 memcpy(host_msg, copy_msg, len);
3265 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3266 if (ret < 0) {
3267 goto fail;
3270 if (target_addr) {
3271 addr = alloca(addrlen+1);
3272 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3273 if (ret) {
3274 goto fail;
3276 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3277 } else {
3278 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3280 fail:
3281 if (copy_msg) {
3282 g_free(host_msg);
3283 host_msg = copy_msg;
3285 unlock_user(host_msg, msg, 0);
3286 return ret;
3289 /* do_recvfrom() Must return target values and target errnos. */
3290 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3291 abi_ulong target_addr,
3292 abi_ulong target_addrlen)
3294 socklen_t addrlen;
3295 void *addr;
3296 void *host_msg;
3297 abi_long ret;
3299 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3300 if (!host_msg)
3301 return -TARGET_EFAULT;
3302 if (target_addr) {
3303 if (get_user_u32(addrlen, target_addrlen)) {
3304 ret = -TARGET_EFAULT;
3305 goto fail;
3307 if ((int)addrlen < 0) {
3308 ret = -TARGET_EINVAL;
3309 goto fail;
3311 addr = alloca(addrlen);
3312 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3313 addr, &addrlen));
3314 } else {
3315 addr = NULL; /* To keep compiler quiet. */
3316 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3318 if (!is_error(ret)) {
3319 if (fd_trans_host_to_target_data(fd)) {
3320 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3322 if (target_addr) {
3323 host_to_target_sockaddr(target_addr, addr, addrlen);
3324 if (put_user_u32(addrlen, target_addrlen)) {
3325 ret = -TARGET_EFAULT;
3326 goto fail;
3329 unlock_user(host_msg, msg, len);
3330 } else {
3331 fail:
3332 unlock_user(host_msg, msg, 0);
3334 return ret;
3337 #ifdef TARGET_NR_socketcall
3338 /* do_socketcall() Must return target values and target errnos. */
3339 static abi_long do_socketcall(int num, abi_ulong vptr)
3341 static const unsigned ac[] = { /* number of arguments per call */
3342 [SOCKOP_socket] = 3, /* domain, type, protocol */
3343 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3344 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3345 [SOCKOP_listen] = 2, /* sockfd, backlog */
3346 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3347 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3348 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3349 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3350 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3351 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3352 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3353 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3354 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3355 [SOCKOP_shutdown] = 2, /* sockfd, how */
3356 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3357 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3358 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3359 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3360 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3361 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3363 abi_long a[6]; /* max 6 args */
3365 /* first, collect the arguments in a[] according to ac[] */
3366 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3367 unsigned i;
3368 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3369 for (i = 0; i < ac[num]; ++i) {
3370 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3371 return -TARGET_EFAULT;
3376 /* now when we have the args, actually handle the call */
3377 switch (num) {
3378 case SOCKOP_socket: /* domain, type, protocol */
3379 return do_socket(a[0], a[1], a[2]);
3380 case SOCKOP_bind: /* sockfd, addr, addrlen */
3381 return do_bind(a[0], a[1], a[2]);
3382 case SOCKOP_connect: /* sockfd, addr, addrlen */
3383 return do_connect(a[0], a[1], a[2]);
3384 case SOCKOP_listen: /* sockfd, backlog */
3385 return get_errno(listen(a[0], a[1]));
3386 case SOCKOP_accept: /* sockfd, addr, addrlen */
3387 return do_accept4(a[0], a[1], a[2], 0);
3388 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3389 return do_accept4(a[0], a[1], a[2], a[3]);
3390 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3391 return do_getsockname(a[0], a[1], a[2]);
3392 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3393 return do_getpeername(a[0], a[1], a[2]);
3394 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3395 return do_socketpair(a[0], a[1], a[2], a[3]);
3396 case SOCKOP_send: /* sockfd, msg, len, flags */
3397 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3398 case SOCKOP_recv: /* sockfd, msg, len, flags */
3399 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3400 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3401 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3402 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3403 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3404 case SOCKOP_shutdown: /* sockfd, how */
3405 return get_errno(shutdown(a[0], a[1]));
3406 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3407 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3408 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3409 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3410 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3411 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3412 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3413 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3414 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3415 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3416 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3417 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3418 default:
3419 gemu_log("Unsupported socketcall: %d\n", num);
3420 return -TARGET_ENOSYS;
3423 #endif
3425 #define N_SHM_REGIONS 32
3427 static struct shm_region {
3428 abi_ulong start;
3429 abi_ulong size;
3430 bool in_use;
3431 } shm_regions[N_SHM_REGIONS];
3433 struct target_semid_ds
3435 struct target_ipc_perm sem_perm;
3436 abi_ulong sem_otime;
3437 #if !defined(TARGET_PPC64)
3438 abi_ulong __unused1;
3439 #endif
3440 abi_ulong sem_ctime;
3441 #if !defined(TARGET_PPC64)
3442 abi_ulong __unused2;
3443 #endif
3444 abi_ulong sem_nsems;
3445 abi_ulong __unused3;
3446 abi_ulong __unused4;
3449 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3450 abi_ulong target_addr)
3452 struct target_ipc_perm *target_ip;
3453 struct target_semid_ds *target_sd;
3455 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3456 return -TARGET_EFAULT;
3457 target_ip = &(target_sd->sem_perm);
3458 host_ip->__key = tswap32(target_ip->__key);
3459 host_ip->uid = tswap32(target_ip->uid);
3460 host_ip->gid = tswap32(target_ip->gid);
3461 host_ip->cuid = tswap32(target_ip->cuid);
3462 host_ip->cgid = tswap32(target_ip->cgid);
3463 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3464 host_ip->mode = tswap32(target_ip->mode);
3465 #else
3466 host_ip->mode = tswap16(target_ip->mode);
3467 #endif
3468 #if defined(TARGET_PPC)
3469 host_ip->__seq = tswap32(target_ip->__seq);
3470 #else
3471 host_ip->__seq = tswap16(target_ip->__seq);
3472 #endif
3473 unlock_user_struct(target_sd, target_addr, 0);
3474 return 0;
3477 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3478 struct ipc_perm *host_ip)
3480 struct target_ipc_perm *target_ip;
3481 struct target_semid_ds *target_sd;
3483 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3484 return -TARGET_EFAULT;
3485 target_ip = &(target_sd->sem_perm);
3486 target_ip->__key = tswap32(host_ip->__key);
3487 target_ip->uid = tswap32(host_ip->uid);
3488 target_ip->gid = tswap32(host_ip->gid);
3489 target_ip->cuid = tswap32(host_ip->cuid);
3490 target_ip->cgid = tswap32(host_ip->cgid);
3491 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3492 target_ip->mode = tswap32(host_ip->mode);
3493 #else
3494 target_ip->mode = tswap16(host_ip->mode);
3495 #endif
3496 #if defined(TARGET_PPC)
3497 target_ip->__seq = tswap32(host_ip->__seq);
3498 #else
3499 target_ip->__seq = tswap16(host_ip->__seq);
3500 #endif
3501 unlock_user_struct(target_sd, target_addr, 1);
3502 return 0;
3505 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3506 abi_ulong target_addr)
3508 struct target_semid_ds *target_sd;
3510 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3511 return -TARGET_EFAULT;
3512 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3513 return -TARGET_EFAULT;
3514 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3515 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3516 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3517 unlock_user_struct(target_sd, target_addr, 0);
3518 return 0;
3521 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3522 struct semid_ds *host_sd)
3524 struct target_semid_ds *target_sd;
3526 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3527 return -TARGET_EFAULT;
3528 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3529 return -TARGET_EFAULT;
3530 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3531 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3532 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3533 unlock_user_struct(target_sd, target_addr, 1);
3534 return 0;
3537 struct target_seminfo {
3538 int semmap;
3539 int semmni;
3540 int semmns;
3541 int semmnu;
3542 int semmsl;
3543 int semopm;
3544 int semume;
3545 int semusz;
3546 int semvmx;
3547 int semaem;
3550 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3551 struct seminfo *host_seminfo)
3553 struct target_seminfo *target_seminfo;
3554 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3555 return -TARGET_EFAULT;
3556 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3557 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3558 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3559 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3560 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3561 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3562 __put_user(host_seminfo->semume, &target_seminfo->semume);
3563 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3564 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3565 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3566 unlock_user_struct(target_seminfo, target_addr, 1);
3567 return 0;
3570 union semun {
3571 int val;
3572 struct semid_ds *buf;
3573 unsigned short *array;
3574 struct seminfo *__buf;
3577 union target_semun {
3578 int val;
3579 abi_ulong buf;
3580 abi_ulong array;
3581 abi_ulong __buf;
3584 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3585 abi_ulong target_addr)
3587 int nsems;
3588 unsigned short *array;
3589 union semun semun;
3590 struct semid_ds semid_ds;
3591 int i, ret;
3593 semun.buf = &semid_ds;
3595 ret = semctl(semid, 0, IPC_STAT, semun);
3596 if (ret == -1)
3597 return get_errno(ret);
3599 nsems = semid_ds.sem_nsems;
3601 *host_array = g_try_new(unsigned short, nsems);
3602 if (!*host_array) {
3603 return -TARGET_ENOMEM;
3605 array = lock_user(VERIFY_READ, target_addr,
3606 nsems*sizeof(unsigned short), 1);
3607 if (!array) {
3608 g_free(*host_array);
3609 return -TARGET_EFAULT;
3612 for(i=0; i<nsems; i++) {
3613 __get_user((*host_array)[i], &array[i]);
3615 unlock_user(array, target_addr, 0);
3617 return 0;
3620 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3621 unsigned short **host_array)
3623 int nsems;
3624 unsigned short *array;
3625 union semun semun;
3626 struct semid_ds semid_ds;
3627 int i, ret;
3629 semun.buf = &semid_ds;
3631 ret = semctl(semid, 0, IPC_STAT, semun);
3632 if (ret == -1)
3633 return get_errno(ret);
3635 nsems = semid_ds.sem_nsems;
3637 array = lock_user(VERIFY_WRITE, target_addr,
3638 nsems*sizeof(unsigned short), 0);
3639 if (!array)
3640 return -TARGET_EFAULT;
3642 for(i=0; i<nsems; i++) {
3643 __put_user((*host_array)[i], &array[i]);
3645 g_free(*host_array);
3646 unlock_user(array, target_addr, 1);
3648 return 0;
3651 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3652 abi_ulong target_arg)
3654 union target_semun target_su = { .buf = target_arg };
3655 union semun arg;
3656 struct semid_ds dsarg;
3657 unsigned short *array = NULL;
3658 struct seminfo seminfo;
3659 abi_long ret = -TARGET_EINVAL;
3660 abi_long err;
3661 cmd &= 0xff;
3663 switch( cmd ) {
3664 case GETVAL:
3665 case SETVAL:
3666 /* In 64 bit cross-endian situations, we will erroneously pick up
3667 * the wrong half of the union for the "val" element. To rectify
3668 * this, the entire 8-byte structure is byteswapped, followed by
3669 * a swap of the 4 byte val field. In other cases, the data is
3670 * already in proper host byte order. */
3671 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3672 target_su.buf = tswapal(target_su.buf);
3673 arg.val = tswap32(target_su.val);
3674 } else {
3675 arg.val = target_su.val;
3677 ret = get_errno(semctl(semid, semnum, cmd, arg));
3678 break;
3679 case GETALL:
3680 case SETALL:
3681 err = target_to_host_semarray(semid, &array, target_su.array);
3682 if (err)
3683 return err;
3684 arg.array = array;
3685 ret = get_errno(semctl(semid, semnum, cmd, arg));
3686 err = host_to_target_semarray(semid, target_su.array, &array);
3687 if (err)
3688 return err;
3689 break;
3690 case IPC_STAT:
3691 case IPC_SET:
3692 case SEM_STAT:
3693 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3694 if (err)
3695 return err;
3696 arg.buf = &dsarg;
3697 ret = get_errno(semctl(semid, semnum, cmd, arg));
3698 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3699 if (err)
3700 return err;
3701 break;
3702 case IPC_INFO:
3703 case SEM_INFO:
3704 arg.__buf = &seminfo;
3705 ret = get_errno(semctl(semid, semnum, cmd, arg));
3706 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3707 if (err)
3708 return err;
3709 break;
3710 case IPC_RMID:
3711 case GETPID:
3712 case GETNCNT:
3713 case GETZCNT:
3714 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3715 break;
3718 return ret;
3721 struct target_sembuf {
3722 unsigned short sem_num;
3723 short sem_op;
3724 short sem_flg;
3727 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3728 abi_ulong target_addr,
3729 unsigned nsops)
3731 struct target_sembuf *target_sembuf;
3732 int i;
3734 target_sembuf = lock_user(VERIFY_READ, target_addr,
3735 nsops*sizeof(struct target_sembuf), 1);
3736 if (!target_sembuf)
3737 return -TARGET_EFAULT;
3739 for(i=0; i<nsops; i++) {
3740 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3741 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3742 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3745 unlock_user(target_sembuf, target_addr, 0);
3747 return 0;
3750 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3752 struct sembuf sops[nsops];
3754 if (target_to_host_sembuf(sops, ptr, nsops))
3755 return -TARGET_EFAULT;
3757 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3760 struct target_msqid_ds
3762 struct target_ipc_perm msg_perm;
3763 abi_ulong msg_stime;
3764 #if TARGET_ABI_BITS == 32
3765 abi_ulong __unused1;
3766 #endif
3767 abi_ulong msg_rtime;
3768 #if TARGET_ABI_BITS == 32
3769 abi_ulong __unused2;
3770 #endif
3771 abi_ulong msg_ctime;
3772 #if TARGET_ABI_BITS == 32
3773 abi_ulong __unused3;
3774 #endif
3775 abi_ulong __msg_cbytes;
3776 abi_ulong msg_qnum;
3777 abi_ulong msg_qbytes;
3778 abi_ulong msg_lspid;
3779 abi_ulong msg_lrpid;
3780 abi_ulong __unused4;
3781 abi_ulong __unused5;
3784 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3785 abi_ulong target_addr)
3787 struct target_msqid_ds *target_md;
3789 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3790 return -TARGET_EFAULT;
3791 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3792 return -TARGET_EFAULT;
3793 host_md->msg_stime = tswapal(target_md->msg_stime);
3794 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3795 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3796 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3797 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3798 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3799 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3800 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3801 unlock_user_struct(target_md, target_addr, 0);
3802 return 0;
3805 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3806 struct msqid_ds *host_md)
3808 struct target_msqid_ds *target_md;
3810 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3811 return -TARGET_EFAULT;
3812 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3813 return -TARGET_EFAULT;
3814 target_md->msg_stime = tswapal(host_md->msg_stime);
3815 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3816 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3817 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3818 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3819 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3820 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3821 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3822 unlock_user_struct(target_md, target_addr, 1);
3823 return 0;
3826 struct target_msginfo {
3827 int msgpool;
3828 int msgmap;
3829 int msgmax;
3830 int msgmnb;
3831 int msgmni;
3832 int msgssz;
3833 int msgtql;
3834 unsigned short int msgseg;
3837 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3838 struct msginfo *host_msginfo)
3840 struct target_msginfo *target_msginfo;
3841 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3842 return -TARGET_EFAULT;
3843 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3844 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3845 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3846 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3847 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3848 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3849 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3850 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3851 unlock_user_struct(target_msginfo, target_addr, 1);
3852 return 0;
3855 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3857 struct msqid_ds dsarg;
3858 struct msginfo msginfo;
3859 abi_long ret = -TARGET_EINVAL;
3861 cmd &= 0xff;
3863 switch (cmd) {
3864 case IPC_STAT:
3865 case IPC_SET:
3866 case MSG_STAT:
3867 if (target_to_host_msqid_ds(&dsarg,ptr))
3868 return -TARGET_EFAULT;
3869 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3870 if (host_to_target_msqid_ds(ptr,&dsarg))
3871 return -TARGET_EFAULT;
3872 break;
3873 case IPC_RMID:
3874 ret = get_errno(msgctl(msgid, cmd, NULL));
3875 break;
3876 case IPC_INFO:
3877 case MSG_INFO:
3878 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3879 if (host_to_target_msginfo(ptr, &msginfo))
3880 return -TARGET_EFAULT;
3881 break;
3884 return ret;
3887 struct target_msgbuf {
3888 abi_long mtype;
3889 char mtext[1];
3892 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3893 ssize_t msgsz, int msgflg)
3895 struct target_msgbuf *target_mb;
3896 struct msgbuf *host_mb;
3897 abi_long ret = 0;
3899 if (msgsz < 0) {
3900 return -TARGET_EINVAL;
3903 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3904 return -TARGET_EFAULT;
3905 host_mb = g_try_malloc(msgsz + sizeof(long));
3906 if (!host_mb) {
3907 unlock_user_struct(target_mb, msgp, 0);
3908 return -TARGET_ENOMEM;
3910 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3911 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3912 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3913 g_free(host_mb);
3914 unlock_user_struct(target_mb, msgp, 0);
3916 return ret;
3919 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3920 ssize_t msgsz, abi_long msgtyp,
3921 int msgflg)
3923 struct target_msgbuf *target_mb;
3924 char *target_mtext;
3925 struct msgbuf *host_mb;
3926 abi_long ret = 0;
3928 if (msgsz < 0) {
3929 return -TARGET_EINVAL;
3932 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3933 return -TARGET_EFAULT;
3935 host_mb = g_try_malloc(msgsz + sizeof(long));
3936 if (!host_mb) {
3937 ret = -TARGET_ENOMEM;
3938 goto end;
3940 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3942 if (ret > 0) {
3943 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3944 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3945 if (!target_mtext) {
3946 ret = -TARGET_EFAULT;
3947 goto end;
3949 memcpy(target_mb->mtext, host_mb->mtext, ret);
3950 unlock_user(target_mtext, target_mtext_addr, ret);
3953 target_mb->mtype = tswapal(host_mb->mtype);
3955 end:
3956 if (target_mb)
3957 unlock_user_struct(target_mb, msgp, 1);
3958 g_free(host_mb);
3959 return ret;
3962 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3963 abi_ulong target_addr)
3965 struct target_shmid_ds *target_sd;
3967 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3968 return -TARGET_EFAULT;
3969 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3970 return -TARGET_EFAULT;
3971 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3972 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3973 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3974 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3975 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3976 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3977 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3978 unlock_user_struct(target_sd, target_addr, 0);
3979 return 0;
3982 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3983 struct shmid_ds *host_sd)
3985 struct target_shmid_ds *target_sd;
3987 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3988 return -TARGET_EFAULT;
3989 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3990 return -TARGET_EFAULT;
3991 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3992 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3993 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3994 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3995 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3996 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3997 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3998 unlock_user_struct(target_sd, target_addr, 1);
3999 return 0;
4002 struct target_shminfo {
4003 abi_ulong shmmax;
4004 abi_ulong shmmin;
4005 abi_ulong shmmni;
4006 abi_ulong shmseg;
4007 abi_ulong shmall;
4010 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4011 struct shminfo *host_shminfo)
4013 struct target_shminfo *target_shminfo;
4014 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4015 return -TARGET_EFAULT;
4016 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4017 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4018 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4019 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4020 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4021 unlock_user_struct(target_shminfo, target_addr, 1);
4022 return 0;
4025 struct target_shm_info {
4026 int used_ids;
4027 abi_ulong shm_tot;
4028 abi_ulong shm_rss;
4029 abi_ulong shm_swp;
4030 abi_ulong swap_attempts;
4031 abi_ulong swap_successes;
4034 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4035 struct shm_info *host_shm_info)
4037 struct target_shm_info *target_shm_info;
4038 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4039 return -TARGET_EFAULT;
4040 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4041 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4042 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4043 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4044 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4045 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4046 unlock_user_struct(target_shm_info, target_addr, 1);
4047 return 0;
4050 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4052 struct shmid_ds dsarg;
4053 struct shminfo shminfo;
4054 struct shm_info shm_info;
4055 abi_long ret = -TARGET_EINVAL;
4057 cmd &= 0xff;
4059 switch(cmd) {
4060 case IPC_STAT:
4061 case IPC_SET:
4062 case SHM_STAT:
4063 if (target_to_host_shmid_ds(&dsarg, buf))
4064 return -TARGET_EFAULT;
4065 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4066 if (host_to_target_shmid_ds(buf, &dsarg))
4067 return -TARGET_EFAULT;
4068 break;
4069 case IPC_INFO:
4070 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4071 if (host_to_target_shminfo(buf, &shminfo))
4072 return -TARGET_EFAULT;
4073 break;
4074 case SHM_INFO:
4075 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4076 if (host_to_target_shm_info(buf, &shm_info))
4077 return -TARGET_EFAULT;
4078 break;
4079 case IPC_RMID:
4080 case SHM_LOCK:
4081 case SHM_UNLOCK:
4082 ret = get_errno(shmctl(shmid, cmd, NULL));
4083 break;
4086 return ret;
4089 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
4091 abi_long raddr;
4092 void *host_raddr;
4093 struct shmid_ds shm_info;
4094 int i,ret;
4096 /* find out the length of the shared memory segment */
4097 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4098 if (is_error(ret)) {
4099 /* can't get length, bail out */
4100 return ret;
4103 mmap_lock();
4105 if (shmaddr)
4106 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4107 else {
4108 abi_ulong mmap_start;
4110 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4112 if (mmap_start == -1) {
4113 errno = ENOMEM;
4114 host_raddr = (void *)-1;
4115 } else
4116 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4119 if (host_raddr == (void *)-1) {
4120 mmap_unlock();
4121 return get_errno((long)host_raddr);
4123 raddr=h2g((unsigned long)host_raddr);
4125 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4126 PAGE_VALID | PAGE_READ |
4127 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4129 for (i = 0; i < N_SHM_REGIONS; i++) {
4130 if (!shm_regions[i].in_use) {
4131 shm_regions[i].in_use = true;
4132 shm_regions[i].start = raddr;
4133 shm_regions[i].size = shm_info.shm_segsz;
4134 break;
4138 mmap_unlock();
4139 return raddr;
4143 static inline abi_long do_shmdt(abi_ulong shmaddr)
4145 int i;
4147 for (i = 0; i < N_SHM_REGIONS; ++i) {
4148 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4149 shm_regions[i].in_use = false;
4150 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4151 break;
4155 return get_errno(shmdt(g2h(shmaddr)));
4158 #ifdef TARGET_NR_ipc
4159 /* ??? This only works with linear mappings. */
4160 /* do_ipc() must return target values and target errnos. */
4161 static abi_long do_ipc(unsigned int call, abi_long first,
4162 abi_long second, abi_long third,
4163 abi_long ptr, abi_long fifth)
4165 int version;
4166 abi_long ret = 0;
4168 version = call >> 16;
4169 call &= 0xffff;
4171 switch (call) {
4172 case IPCOP_semop:
4173 ret = do_semop(first, ptr, second);
4174 break;
4176 case IPCOP_semget:
4177 ret = get_errno(semget(first, second, third));
4178 break;
4180 case IPCOP_semctl: {
4181 /* The semun argument to semctl is passed by value, so dereference the
4182 * ptr argument. */
4183 abi_ulong atptr;
4184 get_user_ual(atptr, ptr);
4185 ret = do_semctl(first, second, third, atptr);
4186 break;
4189 case IPCOP_msgget:
4190 ret = get_errno(msgget(first, second));
4191 break;
4193 case IPCOP_msgsnd:
4194 ret = do_msgsnd(first, ptr, second, third);
4195 break;
4197 case IPCOP_msgctl:
4198 ret = do_msgctl(first, second, ptr);
4199 break;
4201 case IPCOP_msgrcv:
4202 switch (version) {
4203 case 0:
4205 struct target_ipc_kludge {
4206 abi_long msgp;
4207 abi_long msgtyp;
4208 } *tmp;
4210 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4211 ret = -TARGET_EFAULT;
4212 break;
4215 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4217 unlock_user_struct(tmp, ptr, 0);
4218 break;
4220 default:
4221 ret = do_msgrcv(first, ptr, second, fifth, third);
4223 break;
4225 case IPCOP_shmat:
4226 switch (version) {
4227 default:
4229 abi_ulong raddr;
4230 raddr = do_shmat(first, ptr, second);
4231 if (is_error(raddr))
4232 return get_errno(raddr);
4233 if (put_user_ual(raddr, third))
4234 return -TARGET_EFAULT;
4235 break;
4237 case 1:
4238 ret = -TARGET_EINVAL;
4239 break;
4241 break;
4242 case IPCOP_shmdt:
4243 ret = do_shmdt(ptr);
4244 break;
4246 case IPCOP_shmget:
4247 /* IPC_* flag values are the same on all linux platforms */
4248 ret = get_errno(shmget(first, second, third));
4249 break;
4251 /* IPC_* and SHM_* command values are the same on all linux platforms */
4252 case IPCOP_shmctl:
4253 ret = do_shmctl(first, second, ptr);
4254 break;
4255 default:
4256 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4257 ret = -TARGET_ENOSYS;
4258 break;
4260 return ret;
4262 #endif
4264 /* kernel structure types definitions */
4266 #define STRUCT(name, ...) STRUCT_ ## name,
4267 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4268 enum {
4269 #include "syscall_types.h"
4270 STRUCT_MAX
4272 #undef STRUCT
4273 #undef STRUCT_SPECIAL
4275 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4276 #define STRUCT_SPECIAL(name)
4277 #include "syscall_types.h"
4278 #undef STRUCT
4279 #undef STRUCT_SPECIAL
4281 typedef struct IOCTLEntry IOCTLEntry;
4283 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4284 int fd, int cmd, abi_long arg);
4286 struct IOCTLEntry {
4287 int target_cmd;
4288 unsigned int host_cmd;
4289 const char *name;
4290 int access;
4291 do_ioctl_fn *do_ioctl;
4292 const argtype arg_type[5];
4295 #define IOC_R 0x0001
4296 #define IOC_W 0x0002
4297 #define IOC_RW (IOC_R | IOC_W)
4299 #define MAX_STRUCT_SIZE 4096
4301 #ifdef CONFIG_FIEMAP
4302 /* So fiemap access checks don't overflow on 32 bit systems.
4303 * This is very slightly smaller than the limit imposed by
4304 * the underlying kernel.
4306 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4307 / sizeof(struct fiemap_extent))
4309 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4310 int fd, int cmd, abi_long arg)
4312 /* The parameter for this ioctl is a struct fiemap followed
4313 * by an array of struct fiemap_extent whose size is set
4314 * in fiemap->fm_extent_count. The array is filled in by the
4315 * ioctl.
4317 int target_size_in, target_size_out;
4318 struct fiemap *fm;
4319 const argtype *arg_type = ie->arg_type;
4320 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4321 void *argptr, *p;
4322 abi_long ret;
4323 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4324 uint32_t outbufsz;
4325 int free_fm = 0;
4327 assert(arg_type[0] == TYPE_PTR);
4328 assert(ie->access == IOC_RW);
4329 arg_type++;
4330 target_size_in = thunk_type_size(arg_type, 0);
4331 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4332 if (!argptr) {
4333 return -TARGET_EFAULT;
4335 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4336 unlock_user(argptr, arg, 0);
4337 fm = (struct fiemap *)buf_temp;
4338 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4339 return -TARGET_EINVAL;
4342 outbufsz = sizeof (*fm) +
4343 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4345 if (outbufsz > MAX_STRUCT_SIZE) {
4346 /* We can't fit all the extents into the fixed size buffer.
4347 * Allocate one that is large enough and use it instead.
4349 fm = g_try_malloc(outbufsz);
4350 if (!fm) {
4351 return -TARGET_ENOMEM;
4353 memcpy(fm, buf_temp, sizeof(struct fiemap));
4354 free_fm = 1;
4356 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4357 if (!is_error(ret)) {
4358 target_size_out = target_size_in;
4359 /* An extent_count of 0 means we were only counting the extents
4360 * so there are no structs to copy
4362 if (fm->fm_extent_count != 0) {
4363 target_size_out += fm->fm_mapped_extents * extent_size;
4365 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4366 if (!argptr) {
4367 ret = -TARGET_EFAULT;
4368 } else {
4369 /* Convert the struct fiemap */
4370 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4371 if (fm->fm_extent_count != 0) {
4372 p = argptr + target_size_in;
4373 /* ...and then all the struct fiemap_extents */
4374 for (i = 0; i < fm->fm_mapped_extents; i++) {
4375 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4376 THUNK_TARGET);
4377 p += extent_size;
4380 unlock_user(argptr, arg, target_size_out);
4383 if (free_fm) {
4384 g_free(fm);
4386 return ret;
4388 #endif
4390 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4391 int fd, int cmd, abi_long arg)
4393 const argtype *arg_type = ie->arg_type;
4394 int target_size;
4395 void *argptr;
4396 int ret;
4397 struct ifconf *host_ifconf;
4398 uint32_t outbufsz;
4399 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4400 int target_ifreq_size;
4401 int nb_ifreq;
4402 int free_buf = 0;
4403 int i;
4404 int target_ifc_len;
4405 abi_long target_ifc_buf;
4406 int host_ifc_len;
4407 char *host_ifc_buf;
4409 assert(arg_type[0] == TYPE_PTR);
4410 assert(ie->access == IOC_RW);
4412 arg_type++;
4413 target_size = thunk_type_size(arg_type, 0);
4415 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4416 if (!argptr)
4417 return -TARGET_EFAULT;
4418 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4419 unlock_user(argptr, arg, 0);
4421 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4422 target_ifc_len = host_ifconf->ifc_len;
4423 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4425 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4426 nb_ifreq = target_ifc_len / target_ifreq_size;
4427 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4429 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4430 if (outbufsz > MAX_STRUCT_SIZE) {
4431 /* We can't fit all the extents into the fixed size buffer.
4432 * Allocate one that is large enough and use it instead.
4434 host_ifconf = malloc(outbufsz);
4435 if (!host_ifconf) {
4436 return -TARGET_ENOMEM;
4438 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4439 free_buf = 1;
4441 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4443 host_ifconf->ifc_len = host_ifc_len;
4444 host_ifconf->ifc_buf = host_ifc_buf;
4446 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4447 if (!is_error(ret)) {
4448 /* convert host ifc_len to target ifc_len */
4450 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4451 target_ifc_len = nb_ifreq * target_ifreq_size;
4452 host_ifconf->ifc_len = target_ifc_len;
4454 /* restore target ifc_buf */
4456 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4458 /* copy struct ifconf to target user */
4460 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4461 if (!argptr)
4462 return -TARGET_EFAULT;
4463 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4464 unlock_user(argptr, arg, target_size);
4466 /* copy ifreq[] to target user */
4468 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4469 for (i = 0; i < nb_ifreq ; i++) {
4470 thunk_convert(argptr + i * target_ifreq_size,
4471 host_ifc_buf + i * sizeof(struct ifreq),
4472 ifreq_arg_type, THUNK_TARGET);
4474 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4477 if (free_buf) {
4478 free(host_ifconf);
4481 return ret;
4484 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4485 int cmd, abi_long arg)
4487 void *argptr;
4488 struct dm_ioctl *host_dm;
4489 abi_long guest_data;
4490 uint32_t guest_data_size;
4491 int target_size;
4492 const argtype *arg_type = ie->arg_type;
4493 abi_long ret;
4494 void *big_buf = NULL;
4495 char *host_data;
4497 arg_type++;
4498 target_size = thunk_type_size(arg_type, 0);
4499 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4500 if (!argptr) {
4501 ret = -TARGET_EFAULT;
4502 goto out;
4504 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4505 unlock_user(argptr, arg, 0);
4507 /* buf_temp is too small, so fetch things into a bigger buffer */
4508 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4509 memcpy(big_buf, buf_temp, target_size);
4510 buf_temp = big_buf;
4511 host_dm = big_buf;
4513 guest_data = arg + host_dm->data_start;
4514 if ((guest_data - arg) < 0) {
4515 ret = -EINVAL;
4516 goto out;
4518 guest_data_size = host_dm->data_size - host_dm->data_start;
4519 host_data = (char*)host_dm + host_dm->data_start;
4521 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4522 switch (ie->host_cmd) {
4523 case DM_REMOVE_ALL:
4524 case DM_LIST_DEVICES:
4525 case DM_DEV_CREATE:
4526 case DM_DEV_REMOVE:
4527 case DM_DEV_SUSPEND:
4528 case DM_DEV_STATUS:
4529 case DM_DEV_WAIT:
4530 case DM_TABLE_STATUS:
4531 case DM_TABLE_CLEAR:
4532 case DM_TABLE_DEPS:
4533 case DM_LIST_VERSIONS:
4534 /* no input data */
4535 break;
4536 case DM_DEV_RENAME:
4537 case DM_DEV_SET_GEOMETRY:
4538 /* data contains only strings */
4539 memcpy(host_data, argptr, guest_data_size);
4540 break;
4541 case DM_TARGET_MSG:
4542 memcpy(host_data, argptr, guest_data_size);
4543 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4544 break;
4545 case DM_TABLE_LOAD:
4547 void *gspec = argptr;
4548 void *cur_data = host_data;
4549 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4550 int spec_size = thunk_type_size(arg_type, 0);
4551 int i;
4553 for (i = 0; i < host_dm->target_count; i++) {
4554 struct dm_target_spec *spec = cur_data;
4555 uint32_t next;
4556 int slen;
4558 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4559 slen = strlen((char*)gspec + spec_size) + 1;
4560 next = spec->next;
4561 spec->next = sizeof(*spec) + slen;
4562 strcpy((char*)&spec[1], gspec + spec_size);
4563 gspec += next;
4564 cur_data += spec->next;
4566 break;
4568 default:
4569 ret = -TARGET_EINVAL;
4570 unlock_user(argptr, guest_data, 0);
4571 goto out;
4573 unlock_user(argptr, guest_data, 0);
4575 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4576 if (!is_error(ret)) {
4577 guest_data = arg + host_dm->data_start;
4578 guest_data_size = host_dm->data_size - host_dm->data_start;
4579 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4580 switch (ie->host_cmd) {
4581 case DM_REMOVE_ALL:
4582 case DM_DEV_CREATE:
4583 case DM_DEV_REMOVE:
4584 case DM_DEV_RENAME:
4585 case DM_DEV_SUSPEND:
4586 case DM_DEV_STATUS:
4587 case DM_TABLE_LOAD:
4588 case DM_TABLE_CLEAR:
4589 case DM_TARGET_MSG:
4590 case DM_DEV_SET_GEOMETRY:
4591 /* no return data */
4592 break;
4593 case DM_LIST_DEVICES:
4595 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4596 uint32_t remaining_data = guest_data_size;
4597 void *cur_data = argptr;
4598 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4599 int nl_size = 12; /* can't use thunk_size due to alignment */
4601 while (1) {
4602 uint32_t next = nl->next;
4603 if (next) {
4604 nl->next = nl_size + (strlen(nl->name) + 1);
4606 if (remaining_data < nl->next) {
4607 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4608 break;
4610 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4611 strcpy(cur_data + nl_size, nl->name);
4612 cur_data += nl->next;
4613 remaining_data -= nl->next;
4614 if (!next) {
4615 break;
4617 nl = (void*)nl + next;
4619 break;
4621 case DM_DEV_WAIT:
4622 case DM_TABLE_STATUS:
4624 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4625 void *cur_data = argptr;
4626 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4627 int spec_size = thunk_type_size(arg_type, 0);
4628 int i;
4630 for (i = 0; i < host_dm->target_count; i++) {
4631 uint32_t next = spec->next;
4632 int slen = strlen((char*)&spec[1]) + 1;
4633 spec->next = (cur_data - argptr) + spec_size + slen;
4634 if (guest_data_size < spec->next) {
4635 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4636 break;
4638 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4639 strcpy(cur_data + spec_size, (char*)&spec[1]);
4640 cur_data = argptr + spec->next;
4641 spec = (void*)host_dm + host_dm->data_start + next;
4643 break;
4645 case DM_TABLE_DEPS:
4647 void *hdata = (void*)host_dm + host_dm->data_start;
4648 int count = *(uint32_t*)hdata;
4649 uint64_t *hdev = hdata + 8;
4650 uint64_t *gdev = argptr + 8;
4651 int i;
4653 *(uint32_t*)argptr = tswap32(count);
4654 for (i = 0; i < count; i++) {
4655 *gdev = tswap64(*hdev);
4656 gdev++;
4657 hdev++;
4659 break;
4661 case DM_LIST_VERSIONS:
4663 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4664 uint32_t remaining_data = guest_data_size;
4665 void *cur_data = argptr;
4666 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4667 int vers_size = thunk_type_size(arg_type, 0);
4669 while (1) {
4670 uint32_t next = vers->next;
4671 if (next) {
4672 vers->next = vers_size + (strlen(vers->name) + 1);
4674 if (remaining_data < vers->next) {
4675 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4676 break;
4678 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4679 strcpy(cur_data + vers_size, vers->name);
4680 cur_data += vers->next;
4681 remaining_data -= vers->next;
4682 if (!next) {
4683 break;
4685 vers = (void*)vers + next;
4687 break;
4689 default:
4690 unlock_user(argptr, guest_data, 0);
4691 ret = -TARGET_EINVAL;
4692 goto out;
4694 unlock_user(argptr, guest_data, guest_data_size);
4696 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4697 if (!argptr) {
4698 ret = -TARGET_EFAULT;
4699 goto out;
4701 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4702 unlock_user(argptr, arg, target_size);
4704 out:
4705 g_free(big_buf);
4706 return ret;
4709 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4710 int cmd, abi_long arg)
4712 void *argptr;
4713 int target_size;
4714 const argtype *arg_type = ie->arg_type;
4715 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4716 abi_long ret;
4718 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4719 struct blkpg_partition host_part;
4721 /* Read and convert blkpg */
4722 arg_type++;
4723 target_size = thunk_type_size(arg_type, 0);
4724 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4725 if (!argptr) {
4726 ret = -TARGET_EFAULT;
4727 goto out;
4729 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4730 unlock_user(argptr, arg, 0);
4732 switch (host_blkpg->op) {
4733 case BLKPG_ADD_PARTITION:
4734 case BLKPG_DEL_PARTITION:
4735 /* payload is struct blkpg_partition */
4736 break;
4737 default:
4738 /* Unknown opcode */
4739 ret = -TARGET_EINVAL;
4740 goto out;
4743 /* Read and convert blkpg->data */
4744 arg = (abi_long)(uintptr_t)host_blkpg->data;
4745 target_size = thunk_type_size(part_arg_type, 0);
4746 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4747 if (!argptr) {
4748 ret = -TARGET_EFAULT;
4749 goto out;
4751 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4752 unlock_user(argptr, arg, 0);
4754 /* Swizzle the data pointer to our local copy and call! */
4755 host_blkpg->data = &host_part;
4756 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4758 out:
4759 return ret;
4762 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4763 int fd, int cmd, abi_long arg)
4765 const argtype *arg_type = ie->arg_type;
4766 const StructEntry *se;
4767 const argtype *field_types;
4768 const int *dst_offsets, *src_offsets;
4769 int target_size;
4770 void *argptr;
4771 abi_ulong *target_rt_dev_ptr;
4772 unsigned long *host_rt_dev_ptr;
4773 abi_long ret;
4774 int i;
4776 assert(ie->access == IOC_W);
4777 assert(*arg_type == TYPE_PTR);
4778 arg_type++;
4779 assert(*arg_type == TYPE_STRUCT);
4780 target_size = thunk_type_size(arg_type, 0);
4781 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4782 if (!argptr) {
4783 return -TARGET_EFAULT;
4785 arg_type++;
4786 assert(*arg_type == (int)STRUCT_rtentry);
4787 se = struct_entries + *arg_type++;
4788 assert(se->convert[0] == NULL);
4789 /* convert struct here to be able to catch rt_dev string */
4790 field_types = se->field_types;
4791 dst_offsets = se->field_offsets[THUNK_HOST];
4792 src_offsets = se->field_offsets[THUNK_TARGET];
4793 for (i = 0; i < se->nb_fields; i++) {
4794 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4795 assert(*field_types == TYPE_PTRVOID);
4796 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4797 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4798 if (*target_rt_dev_ptr != 0) {
4799 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4800 tswapal(*target_rt_dev_ptr));
4801 if (!*host_rt_dev_ptr) {
4802 unlock_user(argptr, arg, 0);
4803 return -TARGET_EFAULT;
4805 } else {
4806 *host_rt_dev_ptr = 0;
4808 field_types++;
4809 continue;
4811 field_types = thunk_convert(buf_temp + dst_offsets[i],
4812 argptr + src_offsets[i],
4813 field_types, THUNK_HOST);
4815 unlock_user(argptr, arg, 0);
4817 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4818 if (*host_rt_dev_ptr != 0) {
4819 unlock_user((void *)*host_rt_dev_ptr,
4820 *target_rt_dev_ptr, 0);
4822 return ret;
4825 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4826 int fd, int cmd, abi_long arg)
4828 int sig = target_to_host_signal(arg);
4829 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4832 static IOCTLEntry ioctl_entries[] = {
4833 #define IOCTL(cmd, access, ...) \
4834 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4835 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4836 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4837 #include "ioctls.h"
4838 { 0, 0, },
4841 /* ??? Implement proper locking for ioctls. */
4842 /* do_ioctl() Must return target values and target errnos. */
4843 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4845 const IOCTLEntry *ie;
4846 const argtype *arg_type;
4847 abi_long ret;
4848 uint8_t buf_temp[MAX_STRUCT_SIZE];
4849 int target_size;
4850 void *argptr;
4852 ie = ioctl_entries;
4853 for(;;) {
4854 if (ie->target_cmd == 0) {
4855 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4856 return -TARGET_ENOSYS;
4858 if (ie->target_cmd == cmd)
4859 break;
4860 ie++;
4862 arg_type = ie->arg_type;
4863 #if defined(DEBUG)
4864 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4865 #endif
4866 if (ie->do_ioctl) {
4867 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4870 switch(arg_type[0]) {
4871 case TYPE_NULL:
4872 /* no argument */
4873 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4874 break;
4875 case TYPE_PTRVOID:
4876 case TYPE_INT:
4877 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4878 break;
4879 case TYPE_PTR:
4880 arg_type++;
4881 target_size = thunk_type_size(arg_type, 0);
4882 switch(ie->access) {
4883 case IOC_R:
4884 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4885 if (!is_error(ret)) {
4886 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4887 if (!argptr)
4888 return -TARGET_EFAULT;
4889 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4890 unlock_user(argptr, arg, target_size);
4892 break;
4893 case IOC_W:
4894 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4895 if (!argptr)
4896 return -TARGET_EFAULT;
4897 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4898 unlock_user(argptr, arg, 0);
4899 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4900 break;
4901 default:
4902 case IOC_RW:
4903 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4904 if (!argptr)
4905 return -TARGET_EFAULT;
4906 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4907 unlock_user(argptr, arg, 0);
4908 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4909 if (!is_error(ret)) {
4910 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4911 if (!argptr)
4912 return -TARGET_EFAULT;
4913 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4914 unlock_user(argptr, arg, target_size);
4916 break;
4918 break;
4919 default:
4920 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4921 (long)cmd, arg_type[0]);
4922 ret = -TARGET_ENOSYS;
4923 break;
4925 return ret;
4928 static const bitmask_transtbl iflag_tbl[] = {
4929 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4930 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4931 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4932 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4933 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4934 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4935 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4936 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4937 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4938 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4939 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4940 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4941 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4942 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4943 { 0, 0, 0, 0 }
4946 static const bitmask_transtbl oflag_tbl[] = {
4947 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4948 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4949 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4950 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4951 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4952 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4953 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4954 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4955 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4956 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4957 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4958 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4959 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4960 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4961 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4962 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4963 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4964 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4965 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4966 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4967 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4968 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4969 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4970 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4971 { 0, 0, 0, 0 }
4974 static const bitmask_transtbl cflag_tbl[] = {
4975 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4976 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4977 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4978 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4979 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4980 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4981 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4982 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4983 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4984 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4985 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4986 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4987 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4988 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4989 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4990 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4991 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4992 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4993 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4994 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4995 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4996 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4997 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4998 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4999 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5000 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5001 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5002 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5003 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5004 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5005 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5006 { 0, 0, 0, 0 }
5009 static const bitmask_transtbl lflag_tbl[] = {
5010 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5011 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5012 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5013 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5014 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5015 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5016 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5017 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5018 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5019 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5020 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5021 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5022 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5023 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5024 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5025 { 0, 0, 0, 0 }
5028 static void target_to_host_termios (void *dst, const void *src)
5030 struct host_termios *host = dst;
5031 const struct target_termios *target = src;
5033 host->c_iflag =
5034 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5035 host->c_oflag =
5036 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5037 host->c_cflag =
5038 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5039 host->c_lflag =
5040 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5041 host->c_line = target->c_line;
5043 memset(host->c_cc, 0, sizeof(host->c_cc));
5044 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5045 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5046 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5047 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5048 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5049 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5050 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5051 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5052 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5053 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5054 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5055 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5056 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5057 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5058 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5059 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5060 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5063 static void host_to_target_termios (void *dst, const void *src)
5065 struct target_termios *target = dst;
5066 const struct host_termios *host = src;
5068 target->c_iflag =
5069 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5070 target->c_oflag =
5071 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5072 target->c_cflag =
5073 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5074 target->c_lflag =
5075 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5076 target->c_line = host->c_line;
5078 memset(target->c_cc, 0, sizeof(target->c_cc));
5079 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5080 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5081 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5082 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5083 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5084 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5085 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5086 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5087 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5088 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5089 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5090 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5091 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5092 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5093 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5094 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5095 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5098 static const StructEntry struct_termios_def = {
5099 .convert = { host_to_target_termios, target_to_host_termios },
5100 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5101 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5104 static bitmask_transtbl mmap_flags_tbl[] = {
5105 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5106 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5107 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5108 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5109 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5110 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5111 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5112 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5113 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5114 MAP_NORESERVE },
5115 { 0, 0, 0, 0 }
5118 #if defined(TARGET_I386)
5120 /* NOTE: there is really one LDT for all the threads */
5121 static uint8_t *ldt_table;
5123 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5125 int size;
5126 void *p;
5128 if (!ldt_table)
5129 return 0;
5130 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5131 if (size > bytecount)
5132 size = bytecount;
5133 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5134 if (!p)
5135 return -TARGET_EFAULT;
5136 /* ??? Should this by byteswapped? */
5137 memcpy(p, ldt_table, size);
5138 unlock_user(p, ptr, size);
5139 return size;
5142 /* XXX: add locking support */
5143 static abi_long write_ldt(CPUX86State *env,
5144 abi_ulong ptr, unsigned long bytecount, int oldmode)
5146 struct target_modify_ldt_ldt_s ldt_info;
5147 struct target_modify_ldt_ldt_s *target_ldt_info;
5148 int seg_32bit, contents, read_exec_only, limit_in_pages;
5149 int seg_not_present, useable, lm;
5150 uint32_t *lp, entry_1, entry_2;
5152 if (bytecount != sizeof(ldt_info))
5153 return -TARGET_EINVAL;
5154 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5155 return -TARGET_EFAULT;
5156 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5157 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5158 ldt_info.limit = tswap32(target_ldt_info->limit);
5159 ldt_info.flags = tswap32(target_ldt_info->flags);
5160 unlock_user_struct(target_ldt_info, ptr, 0);
5162 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5163 return -TARGET_EINVAL;
5164 seg_32bit = ldt_info.flags & 1;
5165 contents = (ldt_info.flags >> 1) & 3;
5166 read_exec_only = (ldt_info.flags >> 3) & 1;
5167 limit_in_pages = (ldt_info.flags >> 4) & 1;
5168 seg_not_present = (ldt_info.flags >> 5) & 1;
5169 useable = (ldt_info.flags >> 6) & 1;
5170 #ifdef TARGET_ABI32
5171 lm = 0;
5172 #else
5173 lm = (ldt_info.flags >> 7) & 1;
5174 #endif
5175 if (contents == 3) {
5176 if (oldmode)
5177 return -TARGET_EINVAL;
5178 if (seg_not_present == 0)
5179 return -TARGET_EINVAL;
5181 /* allocate the LDT */
5182 if (!ldt_table) {
5183 env->ldt.base = target_mmap(0,
5184 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5185 PROT_READ|PROT_WRITE,
5186 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5187 if (env->ldt.base == -1)
5188 return -TARGET_ENOMEM;
5189 memset(g2h(env->ldt.base), 0,
5190 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5191 env->ldt.limit = 0xffff;
5192 ldt_table = g2h(env->ldt.base);
5195 /* NOTE: same code as Linux kernel */
5196 /* Allow LDTs to be cleared by the user. */
5197 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5198 if (oldmode ||
5199 (contents == 0 &&
5200 read_exec_only == 1 &&
5201 seg_32bit == 0 &&
5202 limit_in_pages == 0 &&
5203 seg_not_present == 1 &&
5204 useable == 0 )) {
5205 entry_1 = 0;
5206 entry_2 = 0;
5207 goto install;
5211 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5212 (ldt_info.limit & 0x0ffff);
5213 entry_2 = (ldt_info.base_addr & 0xff000000) |
5214 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5215 (ldt_info.limit & 0xf0000) |
5216 ((read_exec_only ^ 1) << 9) |
5217 (contents << 10) |
5218 ((seg_not_present ^ 1) << 15) |
5219 (seg_32bit << 22) |
5220 (limit_in_pages << 23) |
5221 (lm << 21) |
5222 0x7000;
5223 if (!oldmode)
5224 entry_2 |= (useable << 20);
5226 /* Install the new entry ... */
5227 install:
5228 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5229 lp[0] = tswap32(entry_1);
5230 lp[1] = tswap32(entry_2);
5231 return 0;
5234 /* specific and weird i386 syscalls */
5235 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5236 unsigned long bytecount)
5238 abi_long ret;
5240 switch (func) {
5241 case 0:
5242 ret = read_ldt(ptr, bytecount);
5243 break;
5244 case 1:
5245 ret = write_ldt(env, ptr, bytecount, 1);
5246 break;
5247 case 0x11:
5248 ret = write_ldt(env, ptr, bytecount, 0);
5249 break;
5250 default:
5251 ret = -TARGET_ENOSYS;
5252 break;
5254 return ret;
5257 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5258 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5260 uint64_t *gdt_table = g2h(env->gdt.base);
5261 struct target_modify_ldt_ldt_s ldt_info;
5262 struct target_modify_ldt_ldt_s *target_ldt_info;
5263 int seg_32bit, contents, read_exec_only, limit_in_pages;
5264 int seg_not_present, useable, lm;
5265 uint32_t *lp, entry_1, entry_2;
5266 int i;
5268 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5269 if (!target_ldt_info)
5270 return -TARGET_EFAULT;
5271 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5272 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5273 ldt_info.limit = tswap32(target_ldt_info->limit);
5274 ldt_info.flags = tswap32(target_ldt_info->flags);
5275 if (ldt_info.entry_number == -1) {
5276 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5277 if (gdt_table[i] == 0) {
5278 ldt_info.entry_number = i;
5279 target_ldt_info->entry_number = tswap32(i);
5280 break;
5284 unlock_user_struct(target_ldt_info, ptr, 1);
5286 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5287 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5288 return -TARGET_EINVAL;
5289 seg_32bit = ldt_info.flags & 1;
5290 contents = (ldt_info.flags >> 1) & 3;
5291 read_exec_only = (ldt_info.flags >> 3) & 1;
5292 limit_in_pages = (ldt_info.flags >> 4) & 1;
5293 seg_not_present = (ldt_info.flags >> 5) & 1;
5294 useable = (ldt_info.flags >> 6) & 1;
5295 #ifdef TARGET_ABI32
5296 lm = 0;
5297 #else
5298 lm = (ldt_info.flags >> 7) & 1;
5299 #endif
5301 if (contents == 3) {
5302 if (seg_not_present == 0)
5303 return -TARGET_EINVAL;
5306 /* NOTE: same code as Linux kernel */
5307 /* Allow LDTs to be cleared by the user. */
5308 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5309 if ((contents == 0 &&
5310 read_exec_only == 1 &&
5311 seg_32bit == 0 &&
5312 limit_in_pages == 0 &&
5313 seg_not_present == 1 &&
5314 useable == 0 )) {
5315 entry_1 = 0;
5316 entry_2 = 0;
5317 goto install;
5321 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5322 (ldt_info.limit & 0x0ffff);
5323 entry_2 = (ldt_info.base_addr & 0xff000000) |
5324 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5325 (ldt_info.limit & 0xf0000) |
5326 ((read_exec_only ^ 1) << 9) |
5327 (contents << 10) |
5328 ((seg_not_present ^ 1) << 15) |
5329 (seg_32bit << 22) |
5330 (limit_in_pages << 23) |
5331 (useable << 20) |
5332 (lm << 21) |
5333 0x7000;
5335 /* Install the new entry ... */
5336 install:
5337 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5338 lp[0] = tswap32(entry_1);
5339 lp[1] = tswap32(entry_2);
5340 return 0;
5343 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5345 struct target_modify_ldt_ldt_s *target_ldt_info;
5346 uint64_t *gdt_table = g2h(env->gdt.base);
5347 uint32_t base_addr, limit, flags;
5348 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5349 int seg_not_present, useable, lm;
5350 uint32_t *lp, entry_1, entry_2;
5352 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5353 if (!target_ldt_info)
5354 return -TARGET_EFAULT;
5355 idx = tswap32(target_ldt_info->entry_number);
5356 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5357 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5358 unlock_user_struct(target_ldt_info, ptr, 1);
5359 return -TARGET_EINVAL;
5361 lp = (uint32_t *)(gdt_table + idx);
5362 entry_1 = tswap32(lp[0]);
5363 entry_2 = tswap32(lp[1]);
5365 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5366 contents = (entry_2 >> 10) & 3;
5367 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5368 seg_32bit = (entry_2 >> 22) & 1;
5369 limit_in_pages = (entry_2 >> 23) & 1;
5370 useable = (entry_2 >> 20) & 1;
5371 #ifdef TARGET_ABI32
5372 lm = 0;
5373 #else
5374 lm = (entry_2 >> 21) & 1;
5375 #endif
5376 flags = (seg_32bit << 0) | (contents << 1) |
5377 (read_exec_only << 3) | (limit_in_pages << 4) |
5378 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5379 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5380 base_addr = (entry_1 >> 16) |
5381 (entry_2 & 0xff000000) |
5382 ((entry_2 & 0xff) << 16);
5383 target_ldt_info->base_addr = tswapal(base_addr);
5384 target_ldt_info->limit = tswap32(limit);
5385 target_ldt_info->flags = tswap32(flags);
5386 unlock_user_struct(target_ldt_info, ptr, 1);
5387 return 0;
5389 #endif /* TARGET_I386 && TARGET_ABI32 */
5391 #ifndef TARGET_ABI32
5392 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5394 abi_long ret = 0;
5395 abi_ulong val;
5396 int idx;
5398 switch(code) {
5399 case TARGET_ARCH_SET_GS:
5400 case TARGET_ARCH_SET_FS:
5401 if (code == TARGET_ARCH_SET_GS)
5402 idx = R_GS;
5403 else
5404 idx = R_FS;
5405 cpu_x86_load_seg(env, idx, 0);
5406 env->segs[idx].base = addr;
5407 break;
5408 case TARGET_ARCH_GET_GS:
5409 case TARGET_ARCH_GET_FS:
5410 if (code == TARGET_ARCH_GET_GS)
5411 idx = R_GS;
5412 else
5413 idx = R_FS;
5414 val = env->segs[idx].base;
5415 if (put_user(val, addr, abi_ulong))
5416 ret = -TARGET_EFAULT;
5417 break;
5418 default:
5419 ret = -TARGET_EINVAL;
5420 break;
5422 return ret;
5424 #endif
5426 #endif /* defined(TARGET_I386) */
5428 #define NEW_STACK_SIZE 0x40000
5431 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5432 typedef struct {
5433 CPUArchState *env;
5434 pthread_mutex_t mutex;
5435 pthread_cond_t cond;
5436 pthread_t thread;
5437 uint32_t tid;
5438 abi_ulong child_tidptr;
5439 abi_ulong parent_tidptr;
5440 sigset_t sigmask;
5441 } new_thread_info;
5443 static void *clone_func(void *arg)
5445 new_thread_info *info = arg;
5446 CPUArchState *env;
5447 CPUState *cpu;
5448 TaskState *ts;
5450 rcu_register_thread();
5451 env = info->env;
5452 cpu = ENV_GET_CPU(env);
5453 thread_cpu = cpu;
5454 ts = (TaskState *)cpu->opaque;
5455 info->tid = gettid();
5456 cpu->host_tid = info->tid;
5457 task_settid(ts);
5458 if (info->child_tidptr)
5459 put_user_u32(info->tid, info->child_tidptr);
5460 if (info->parent_tidptr)
5461 put_user_u32(info->tid, info->parent_tidptr);
5462 /* Enable signals. */
5463 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5464 /* Signal to the parent that we're ready. */
5465 pthread_mutex_lock(&info->mutex);
5466 pthread_cond_broadcast(&info->cond);
5467 pthread_mutex_unlock(&info->mutex);
5468 /* Wait until the parent has finshed initializing the tls state. */
5469 pthread_mutex_lock(&clone_lock);
5470 pthread_mutex_unlock(&clone_lock);
5471 cpu_loop(env);
5472 /* never exits */
5473 return NULL;
5476 /* do_fork() Must return host values and target errnos (unlike most
5477 do_*() functions). */
5478 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5479 abi_ulong parent_tidptr, target_ulong newtls,
5480 abi_ulong child_tidptr)
5482 CPUState *cpu = ENV_GET_CPU(env);
5483 int ret;
5484 TaskState *ts;
5485 CPUState *new_cpu;
5486 CPUArchState *new_env;
5487 unsigned int nptl_flags;
5488 sigset_t sigmask;
5490 /* Emulate vfork() with fork() */
5491 if (flags & CLONE_VFORK)
5492 flags &= ~(CLONE_VFORK | CLONE_VM);
5494 if (flags & CLONE_VM) {
5495 TaskState *parent_ts = (TaskState *)cpu->opaque;
5496 new_thread_info info;
5497 pthread_attr_t attr;
5499 ts = g_new0(TaskState, 1);
5500 init_task_state(ts);
5501 /* we create a new CPU instance. */
5502 new_env = cpu_copy(env);
5503 /* Init regs that differ from the parent. */
5504 cpu_clone_regs(new_env, newsp);
5505 new_cpu = ENV_GET_CPU(new_env);
5506 new_cpu->opaque = ts;
5507 ts->bprm = parent_ts->bprm;
5508 ts->info = parent_ts->info;
5509 ts->signal_mask = parent_ts->signal_mask;
5510 nptl_flags = flags;
5511 flags &= ~CLONE_NPTL_FLAGS2;
5513 if (nptl_flags & CLONE_CHILD_CLEARTID) {
5514 ts->child_tidptr = child_tidptr;
5517 if (nptl_flags & CLONE_SETTLS)
5518 cpu_set_tls (new_env, newtls);
5520 /* Grab a mutex so that thread setup appears atomic. */
5521 pthread_mutex_lock(&clone_lock);
5523 memset(&info, 0, sizeof(info));
5524 pthread_mutex_init(&info.mutex, NULL);
5525 pthread_mutex_lock(&info.mutex);
5526 pthread_cond_init(&info.cond, NULL);
5527 info.env = new_env;
5528 if (nptl_flags & CLONE_CHILD_SETTID)
5529 info.child_tidptr = child_tidptr;
5530 if (nptl_flags & CLONE_PARENT_SETTID)
5531 info.parent_tidptr = parent_tidptr;
5533 ret = pthread_attr_init(&attr);
5534 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5535 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5536 /* It is not safe to deliver signals until the child has finished
5537 initializing, so temporarily block all signals. */
5538 sigfillset(&sigmask);
5539 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5541 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5542 /* TODO: Free new CPU state if thread creation failed. */
5544 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5545 pthread_attr_destroy(&attr);
5546 if (ret == 0) {
5547 /* Wait for the child to initialize. */
5548 pthread_cond_wait(&info.cond, &info.mutex);
5549 ret = info.tid;
5550 if (flags & CLONE_PARENT_SETTID)
5551 put_user_u32(ret, parent_tidptr);
5552 } else {
5553 ret = -1;
5555 pthread_mutex_unlock(&info.mutex);
5556 pthread_cond_destroy(&info.cond);
5557 pthread_mutex_destroy(&info.mutex);
5558 pthread_mutex_unlock(&clone_lock);
5559 } else {
5560 /* if no CLONE_VM, we consider it is a fork */
5561 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
5562 return -TARGET_EINVAL;
5565 if (block_signals()) {
5566 return -TARGET_ERESTARTSYS;
5569 fork_start();
5570 ret = fork();
5571 if (ret == 0) {
5572 /* Child Process. */
5573 rcu_after_fork();
5574 cpu_clone_regs(env, newsp);
5575 fork_end(1);
5576 /* There is a race condition here. The parent process could
5577 theoretically read the TID in the child process before the child
5578 tid is set. This would require using either ptrace
5579 (not implemented) or having *_tidptr to point at a shared memory
5580 mapping. We can't repeat the spinlock hack used above because
5581 the child process gets its own copy of the lock. */
5582 if (flags & CLONE_CHILD_SETTID)
5583 put_user_u32(gettid(), child_tidptr);
5584 if (flags & CLONE_PARENT_SETTID)
5585 put_user_u32(gettid(), parent_tidptr);
5586 ts = (TaskState *)cpu->opaque;
5587 if (flags & CLONE_SETTLS)
5588 cpu_set_tls (env, newtls);
5589 if (flags & CLONE_CHILD_CLEARTID)
5590 ts->child_tidptr = child_tidptr;
5591 } else {
5592 fork_end(0);
5595 return ret;
5598 /* warning : doesn't handle linux specific flags... */
5599 static int target_to_host_fcntl_cmd(int cmd)
5601 switch(cmd) {
5602 case TARGET_F_DUPFD:
5603 case TARGET_F_GETFD:
5604 case TARGET_F_SETFD:
5605 case TARGET_F_GETFL:
5606 case TARGET_F_SETFL:
5607 return cmd;
5608 case TARGET_F_GETLK:
5609 return F_GETLK64;
5610 case TARGET_F_SETLK:
5611 return F_SETLK64;
5612 case TARGET_F_SETLKW:
5613 return F_SETLKW64;
5614 case TARGET_F_GETOWN:
5615 return F_GETOWN;
5616 case TARGET_F_SETOWN:
5617 return F_SETOWN;
5618 case TARGET_F_GETSIG:
5619 return F_GETSIG;
5620 case TARGET_F_SETSIG:
5621 return F_SETSIG;
5622 #if TARGET_ABI_BITS == 32
5623 case TARGET_F_GETLK64:
5624 return F_GETLK64;
5625 case TARGET_F_SETLK64:
5626 return F_SETLK64;
5627 case TARGET_F_SETLKW64:
5628 return F_SETLKW64;
5629 #endif
5630 case TARGET_F_SETLEASE:
5631 return F_SETLEASE;
5632 case TARGET_F_GETLEASE:
5633 return F_GETLEASE;
5634 #ifdef F_DUPFD_CLOEXEC
5635 case TARGET_F_DUPFD_CLOEXEC:
5636 return F_DUPFD_CLOEXEC;
5637 #endif
5638 case TARGET_F_NOTIFY:
5639 return F_NOTIFY;
5640 #ifdef F_GETOWN_EX
5641 case TARGET_F_GETOWN_EX:
5642 return F_GETOWN_EX;
5643 #endif
5644 #ifdef F_SETOWN_EX
5645 case TARGET_F_SETOWN_EX:
5646 return F_SETOWN_EX;
5647 #endif
5648 #ifdef F_SETPIPE_SZ
5649 case TARGET_F_SETPIPE_SZ:
5650 return F_SETPIPE_SZ;
5651 case TARGET_F_GETPIPE_SZ:
5652 return F_GETPIPE_SZ;
5653 #endif
5654 default:
5655 return -TARGET_EINVAL;
5657 return -TARGET_EINVAL;
5660 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
5661 static const bitmask_transtbl flock_tbl[] = {
5662 TRANSTBL_CONVERT(F_RDLCK),
5663 TRANSTBL_CONVERT(F_WRLCK),
5664 TRANSTBL_CONVERT(F_UNLCK),
5665 TRANSTBL_CONVERT(F_EXLCK),
5666 TRANSTBL_CONVERT(F_SHLCK),
5667 { 0, 0, 0, 0 }
5670 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5671 abi_ulong target_flock_addr)
5673 struct target_flock *target_fl;
5674 short l_type;
5676 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5677 return -TARGET_EFAULT;
5680 __get_user(l_type, &target_fl->l_type);
5681 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5682 __get_user(fl->l_whence, &target_fl->l_whence);
5683 __get_user(fl->l_start, &target_fl->l_start);
5684 __get_user(fl->l_len, &target_fl->l_len);
5685 __get_user(fl->l_pid, &target_fl->l_pid);
5686 unlock_user_struct(target_fl, target_flock_addr, 0);
5687 return 0;
5690 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5691 const struct flock64 *fl)
5693 struct target_flock *target_fl;
5694 short l_type;
5696 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5697 return -TARGET_EFAULT;
5700 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
5701 __put_user(l_type, &target_fl->l_type);
5702 __put_user(fl->l_whence, &target_fl->l_whence);
5703 __put_user(fl->l_start, &target_fl->l_start);
5704 __put_user(fl->l_len, &target_fl->l_len);
5705 __put_user(fl->l_pid, &target_fl->l_pid);
5706 unlock_user_struct(target_fl, target_flock_addr, 1);
5707 return 0;
5710 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5711 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5713 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5714 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
5715 abi_ulong target_flock_addr)
5717 struct target_eabi_flock64 *target_fl;
5718 short l_type;
5720 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5721 return -TARGET_EFAULT;
5724 __get_user(l_type, &target_fl->l_type);
5725 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5726 __get_user(fl->l_whence, &target_fl->l_whence);
5727 __get_user(fl->l_start, &target_fl->l_start);
5728 __get_user(fl->l_len, &target_fl->l_len);
5729 __get_user(fl->l_pid, &target_fl->l_pid);
5730 unlock_user_struct(target_fl, target_flock_addr, 0);
5731 return 0;
5734 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
5735 const struct flock64 *fl)
5737 struct target_eabi_flock64 *target_fl;
5738 short l_type;
5740 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5741 return -TARGET_EFAULT;
5744 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
5745 __put_user(l_type, &target_fl->l_type);
5746 __put_user(fl->l_whence, &target_fl->l_whence);
5747 __put_user(fl->l_start, &target_fl->l_start);
5748 __put_user(fl->l_len, &target_fl->l_len);
5749 __put_user(fl->l_pid, &target_fl->l_pid);
5750 unlock_user_struct(target_fl, target_flock_addr, 1);
5751 return 0;
5753 #endif
5755 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5756 abi_ulong target_flock_addr)
5758 struct target_flock64 *target_fl;
5759 short l_type;
5761 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5762 return -TARGET_EFAULT;
5765 __get_user(l_type, &target_fl->l_type);
5766 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
5767 __get_user(fl->l_whence, &target_fl->l_whence);
5768 __get_user(fl->l_start, &target_fl->l_start);
5769 __get_user(fl->l_len, &target_fl->l_len);
5770 __get_user(fl->l_pid, &target_fl->l_pid);
5771 unlock_user_struct(target_fl, target_flock_addr, 0);
5772 return 0;
5775 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5776 const struct flock64 *fl)
5778 struct target_flock64 *target_fl;
5779 short l_type;
5781 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5782 return -TARGET_EFAULT;
5785 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
5786 __put_user(l_type, &target_fl->l_type);
5787 __put_user(fl->l_whence, &target_fl->l_whence);
5788 __put_user(fl->l_start, &target_fl->l_start);
5789 __put_user(fl->l_len, &target_fl->l_len);
5790 __put_user(fl->l_pid, &target_fl->l_pid);
5791 unlock_user_struct(target_fl, target_flock_addr, 1);
5792 return 0;
5795 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5797 struct flock64 fl64;
5798 #ifdef F_GETOWN_EX
5799 struct f_owner_ex fox;
5800 struct target_f_owner_ex *target_fox;
5801 #endif
5802 abi_long ret;
5803 int host_cmd = target_to_host_fcntl_cmd(cmd);
5805 if (host_cmd == -TARGET_EINVAL)
5806 return host_cmd;
5808 switch(cmd) {
5809 case TARGET_F_GETLK:
5810 ret = copy_from_user_flock(&fl64, arg);
5811 if (ret) {
5812 return ret;
5814 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5815 if (ret == 0) {
5816 ret = copy_to_user_flock(arg, &fl64);
5818 break;
5820 case TARGET_F_SETLK:
5821 case TARGET_F_SETLKW:
5822 ret = copy_from_user_flock(&fl64, arg);
5823 if (ret) {
5824 return ret;
5826 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5827 break;
5829 case TARGET_F_GETLK64:
5830 ret = copy_from_user_flock64(&fl64, arg);
5831 if (ret) {
5832 return ret;
5834 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5835 if (ret == 0) {
5836 ret = copy_to_user_flock64(arg, &fl64);
5838 break;
5839 case TARGET_F_SETLK64:
5840 case TARGET_F_SETLKW64:
5841 ret = copy_from_user_flock64(&fl64, arg);
5842 if (ret) {
5843 return ret;
5845 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5846 break;
5848 case TARGET_F_GETFL:
5849 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5850 if (ret >= 0) {
5851 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5853 break;
5855 case TARGET_F_SETFL:
5856 ret = get_errno(safe_fcntl(fd, host_cmd,
5857 target_to_host_bitmask(arg,
5858 fcntl_flags_tbl)));
5859 break;
5861 #ifdef F_GETOWN_EX
5862 case TARGET_F_GETOWN_EX:
5863 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5864 if (ret >= 0) {
5865 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5866 return -TARGET_EFAULT;
5867 target_fox->type = tswap32(fox.type);
5868 target_fox->pid = tswap32(fox.pid);
5869 unlock_user_struct(target_fox, arg, 1);
5871 break;
5872 #endif
5874 #ifdef F_SETOWN_EX
5875 case TARGET_F_SETOWN_EX:
5876 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5877 return -TARGET_EFAULT;
5878 fox.type = tswap32(target_fox->type);
5879 fox.pid = tswap32(target_fox->pid);
5880 unlock_user_struct(target_fox, arg, 0);
5881 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5882 break;
5883 #endif
5885 case TARGET_F_SETOWN:
5886 case TARGET_F_GETOWN:
5887 case TARGET_F_SETSIG:
5888 case TARGET_F_GETSIG:
5889 case TARGET_F_SETLEASE:
5890 case TARGET_F_GETLEASE:
5891 case TARGET_F_SETPIPE_SZ:
5892 case TARGET_F_GETPIPE_SZ:
5893 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5894 break;
5896 default:
5897 ret = get_errno(safe_fcntl(fd, cmd, arg));
5898 break;
5900 return ret;
5903 #ifdef USE_UID16
5905 static inline int high2lowuid(int uid)
5907 if (uid > 65535)
5908 return 65534;
5909 else
5910 return uid;
5913 static inline int high2lowgid(int gid)
5915 if (gid > 65535)
5916 return 65534;
5917 else
5918 return gid;
5921 static inline int low2highuid(int uid)
5923 if ((int16_t)uid == -1)
5924 return -1;
5925 else
5926 return uid;
5929 static inline int low2highgid(int gid)
5931 if ((int16_t)gid == -1)
5932 return -1;
5933 else
5934 return gid;
5936 static inline int tswapid(int id)
5938 return tswap16(id);
5941 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5943 #else /* !USE_UID16 */
5944 static inline int high2lowuid(int uid)
5946 return uid;
5948 static inline int high2lowgid(int gid)
5950 return gid;
5952 static inline int low2highuid(int uid)
5954 return uid;
5956 static inline int low2highgid(int gid)
5958 return gid;
5960 static inline int tswapid(int id)
5962 return tswap32(id);
5965 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5967 #endif /* USE_UID16 */
5969 /* We must do direct syscalls for setting UID/GID, because we want to
5970 * implement the Linux system call semantics of "change only for this thread",
5971 * not the libc/POSIX semantics of "change for all threads in process".
5972 * (See http://ewontfix.com/17/ for more details.)
5973 * We use the 32-bit version of the syscalls if present; if it is not
5974 * then either the host architecture supports 32-bit UIDs natively with
5975 * the standard syscall, or the 16-bit UID is the best we can do.
5977 #ifdef __NR_setuid32
5978 #define __NR_sys_setuid __NR_setuid32
5979 #else
5980 #define __NR_sys_setuid __NR_setuid
5981 #endif
5982 #ifdef __NR_setgid32
5983 #define __NR_sys_setgid __NR_setgid32
5984 #else
5985 #define __NR_sys_setgid __NR_setgid
5986 #endif
5987 #ifdef __NR_setresuid32
5988 #define __NR_sys_setresuid __NR_setresuid32
5989 #else
5990 #define __NR_sys_setresuid __NR_setresuid
5991 #endif
5992 #ifdef __NR_setresgid32
5993 #define __NR_sys_setresgid __NR_setresgid32
5994 #else
5995 #define __NR_sys_setresgid __NR_setresgid
5996 #endif
5998 _syscall1(int, sys_setuid, uid_t, uid)
5999 _syscall1(int, sys_setgid, gid_t, gid)
6000 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6001 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6003 void syscall_init(void)
6005 IOCTLEntry *ie;
6006 const argtype *arg_type;
6007 int size;
6008 int i;
6010 thunk_init(STRUCT_MAX);
6012 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6013 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6014 #include "syscall_types.h"
6015 #undef STRUCT
6016 #undef STRUCT_SPECIAL
6018 /* Build target_to_host_errno_table[] table from
6019 * host_to_target_errno_table[]. */
6020 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6021 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6024 /* we patch the ioctl size if necessary. We rely on the fact that
6025 no ioctl has all the bits at '1' in the size field */
6026 ie = ioctl_entries;
6027 while (ie->target_cmd != 0) {
6028 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6029 TARGET_IOC_SIZEMASK) {
6030 arg_type = ie->arg_type;
6031 if (arg_type[0] != TYPE_PTR) {
6032 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6033 ie->target_cmd);
6034 exit(1);
6036 arg_type++;
6037 size = thunk_type_size(arg_type, 0);
6038 ie->target_cmd = (ie->target_cmd &
6039 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6040 (size << TARGET_IOC_SIZESHIFT);
6043 /* automatic consistency check if same arch */
6044 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6045 (defined(__x86_64__) && defined(TARGET_X86_64))
6046 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6047 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6048 ie->name, ie->target_cmd, ie->host_cmd);
6050 #endif
6051 ie++;
6055 #if TARGET_ABI_BITS == 32
6056 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6058 #ifdef TARGET_WORDS_BIGENDIAN
6059 return ((uint64_t)word0 << 32) | word1;
6060 #else
6061 return ((uint64_t)word1 << 32) | word0;
6062 #endif
6064 #else /* TARGET_ABI_BITS == 32 */
6065 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6067 return word0;
6069 #endif /* TARGET_ABI_BITS != 32 */
6071 #ifdef TARGET_NR_truncate64
6072 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6073 abi_long arg2,
6074 abi_long arg3,
6075 abi_long arg4)
6077 if (regpairs_aligned(cpu_env)) {
6078 arg2 = arg3;
6079 arg3 = arg4;
6081 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6083 #endif
6085 #ifdef TARGET_NR_ftruncate64
6086 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6087 abi_long arg2,
6088 abi_long arg3,
6089 abi_long arg4)
6091 if (regpairs_aligned(cpu_env)) {
6092 arg2 = arg3;
6093 arg3 = arg4;
6095 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6097 #endif
6099 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6100 abi_ulong target_addr)
6102 struct target_timespec *target_ts;
6104 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6105 return -TARGET_EFAULT;
6106 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6107 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6108 unlock_user_struct(target_ts, target_addr, 0);
6109 return 0;
6112 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6113 struct timespec *host_ts)
6115 struct target_timespec *target_ts;
6117 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6118 return -TARGET_EFAULT;
6119 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6120 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6121 unlock_user_struct(target_ts, target_addr, 1);
6122 return 0;
6125 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6126 abi_ulong target_addr)
6128 struct target_itimerspec *target_itspec;
6130 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6131 return -TARGET_EFAULT;
6134 host_itspec->it_interval.tv_sec =
6135 tswapal(target_itspec->it_interval.tv_sec);
6136 host_itspec->it_interval.tv_nsec =
6137 tswapal(target_itspec->it_interval.tv_nsec);
6138 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6139 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6141 unlock_user_struct(target_itspec, target_addr, 1);
6142 return 0;
6145 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6146 struct itimerspec *host_its)
6148 struct target_itimerspec *target_itspec;
6150 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6151 return -TARGET_EFAULT;
6154 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6155 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6157 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6158 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6160 unlock_user_struct(target_itspec, target_addr, 0);
6161 return 0;
6164 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6165 abi_ulong target_addr)
6167 struct target_sigevent *target_sevp;
6169 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6170 return -TARGET_EFAULT;
6173 /* This union is awkward on 64 bit systems because it has a 32 bit
6174 * integer and a pointer in it; we follow the conversion approach
6175 * used for handling sigval types in signal.c so the guest should get
6176 * the correct value back even if we did a 64 bit byteswap and it's
6177 * using the 32 bit integer.
6179 host_sevp->sigev_value.sival_ptr =
6180 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6181 host_sevp->sigev_signo =
6182 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6183 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6184 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6186 unlock_user_struct(target_sevp, target_addr, 1);
6187 return 0;
6190 #if defined(TARGET_NR_mlockall)
6191 static inline int target_to_host_mlockall_arg(int arg)
6193 int result = 0;
6195 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6196 result |= MCL_CURRENT;
6198 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6199 result |= MCL_FUTURE;
6201 return result;
6203 #endif
6205 static inline abi_long host_to_target_stat64(void *cpu_env,
6206 abi_ulong target_addr,
6207 struct stat *host_st)
6209 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6210 if (((CPUARMState *)cpu_env)->eabi) {
6211 struct target_eabi_stat64 *target_st;
6213 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6214 return -TARGET_EFAULT;
6215 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6216 __put_user(host_st->st_dev, &target_st->st_dev);
6217 __put_user(host_st->st_ino, &target_st->st_ino);
6218 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6219 __put_user(host_st->st_ino, &target_st->__st_ino);
6220 #endif
6221 __put_user(host_st->st_mode, &target_st->st_mode);
6222 __put_user(host_st->st_nlink, &target_st->st_nlink);
6223 __put_user(host_st->st_uid, &target_st->st_uid);
6224 __put_user(host_st->st_gid, &target_st->st_gid);
6225 __put_user(host_st->st_rdev, &target_st->st_rdev);
6226 __put_user(host_st->st_size, &target_st->st_size);
6227 __put_user(host_st->st_blksize, &target_st->st_blksize);
6228 __put_user(host_st->st_blocks, &target_st->st_blocks);
6229 __put_user(host_st->st_atime, &target_st->target_st_atime);
6230 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6231 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6232 unlock_user_struct(target_st, target_addr, 1);
6233 } else
6234 #endif
6236 #if defined(TARGET_HAS_STRUCT_STAT64)
6237 struct target_stat64 *target_st;
6238 #else
6239 struct target_stat *target_st;
6240 #endif
6242 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6243 return -TARGET_EFAULT;
6244 memset(target_st, 0, sizeof(*target_st));
6245 __put_user(host_st->st_dev, &target_st->st_dev);
6246 __put_user(host_st->st_ino, &target_st->st_ino);
6247 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6248 __put_user(host_st->st_ino, &target_st->__st_ino);
6249 #endif
6250 __put_user(host_st->st_mode, &target_st->st_mode);
6251 __put_user(host_st->st_nlink, &target_st->st_nlink);
6252 __put_user(host_st->st_uid, &target_st->st_uid);
6253 __put_user(host_st->st_gid, &target_st->st_gid);
6254 __put_user(host_st->st_rdev, &target_st->st_rdev);
6255 /* XXX: better use of kernel struct */
6256 __put_user(host_st->st_size, &target_st->st_size);
6257 __put_user(host_st->st_blksize, &target_st->st_blksize);
6258 __put_user(host_st->st_blocks, &target_st->st_blocks);
6259 __put_user(host_st->st_atime, &target_st->target_st_atime);
6260 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6261 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6262 unlock_user_struct(target_st, target_addr, 1);
6265 return 0;
6268 /* ??? Using host futex calls even when target atomic operations
6269 are not really atomic probably breaks things. However implementing
6270 futexes locally would make futexes shared between multiple processes
6271 tricky. However they're probably useless because guest atomic
6272 operations won't work either. */
6273 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6274 target_ulong uaddr2, int val3)
6276 struct timespec ts, *pts;
6277 int base_op;
6279 /* ??? We assume FUTEX_* constants are the same on both host
6280 and target. */
6281 #ifdef FUTEX_CMD_MASK
6282 base_op = op & FUTEX_CMD_MASK;
6283 #else
6284 base_op = op;
6285 #endif
6286 switch (base_op) {
6287 case FUTEX_WAIT:
6288 case FUTEX_WAIT_BITSET:
6289 if (timeout) {
6290 pts = &ts;
6291 target_to_host_timespec(pts, timeout);
6292 } else {
6293 pts = NULL;
6295 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6296 pts, NULL, val3));
6297 case FUTEX_WAKE:
6298 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6299 case FUTEX_FD:
6300 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6301 case FUTEX_REQUEUE:
6302 case FUTEX_CMP_REQUEUE:
6303 case FUTEX_WAKE_OP:
6304 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6305 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6306 But the prototype takes a `struct timespec *'; insert casts
6307 to satisfy the compiler. We do not need to tswap TIMEOUT
6308 since it's not compared to guest memory. */
6309 pts = (struct timespec *)(uintptr_t) timeout;
6310 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6311 g2h(uaddr2),
6312 (base_op == FUTEX_CMP_REQUEUE
6313 ? tswap32(val3)
6314 : val3)));
6315 default:
6316 return -TARGET_ENOSYS;
6319 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6320 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6321 abi_long handle, abi_long mount_id,
6322 abi_long flags)
6324 struct file_handle *target_fh;
6325 struct file_handle *fh;
6326 int mid = 0;
6327 abi_long ret;
6328 char *name;
6329 unsigned int size, total_size;
6331 if (get_user_s32(size, handle)) {
6332 return -TARGET_EFAULT;
6335 name = lock_user_string(pathname);
6336 if (!name) {
6337 return -TARGET_EFAULT;
6340 total_size = sizeof(struct file_handle) + size;
6341 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6342 if (!target_fh) {
6343 unlock_user(name, pathname, 0);
6344 return -TARGET_EFAULT;
6347 fh = g_malloc0(total_size);
6348 fh->handle_bytes = size;
6350 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6351 unlock_user(name, pathname, 0);
6353 /* man name_to_handle_at(2):
6354 * Other than the use of the handle_bytes field, the caller should treat
6355 * the file_handle structure as an opaque data type
6358 memcpy(target_fh, fh, total_size);
6359 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6360 target_fh->handle_type = tswap32(fh->handle_type);
6361 g_free(fh);
6362 unlock_user(target_fh, handle, total_size);
6364 if (put_user_s32(mid, mount_id)) {
6365 return -TARGET_EFAULT;
6368 return ret;
6371 #endif
6373 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6374 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6375 abi_long flags)
6377 struct file_handle *target_fh;
6378 struct file_handle *fh;
6379 unsigned int size, total_size;
6380 abi_long ret;
6382 if (get_user_s32(size, handle)) {
6383 return -TARGET_EFAULT;
6386 total_size = sizeof(struct file_handle) + size;
6387 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6388 if (!target_fh) {
6389 return -TARGET_EFAULT;
6392 fh = g_memdup(target_fh, total_size);
6393 fh->handle_bytes = size;
6394 fh->handle_type = tswap32(target_fh->handle_type);
6396 ret = get_errno(open_by_handle_at(mount_fd, fh,
6397 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6399 g_free(fh);
6401 unlock_user(target_fh, handle, total_size);
6403 return ret;
6405 #endif
6407 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6409 /* signalfd siginfo conversion */
6411 static void
6412 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6413 const struct signalfd_siginfo *info)
6415 int sig = host_to_target_signal(info->ssi_signo);
6417 /* linux/signalfd.h defines a ssi_addr_lsb
6418 * not defined in sys/signalfd.h but used by some kernels
6421 #ifdef BUS_MCEERR_AO
6422 if (tinfo->ssi_signo == SIGBUS &&
6423 (tinfo->ssi_code == BUS_MCEERR_AR ||
6424 tinfo->ssi_code == BUS_MCEERR_AO)) {
6425 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6426 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6427 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6429 #endif
6431 tinfo->ssi_signo = tswap32(sig);
6432 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6433 tinfo->ssi_code = tswap32(info->ssi_code);
6434 tinfo->ssi_pid = tswap32(info->ssi_pid);
6435 tinfo->ssi_uid = tswap32(info->ssi_uid);
6436 tinfo->ssi_fd = tswap32(info->ssi_fd);
6437 tinfo->ssi_tid = tswap32(info->ssi_tid);
6438 tinfo->ssi_band = tswap32(info->ssi_band);
6439 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6440 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6441 tinfo->ssi_status = tswap32(info->ssi_status);
6442 tinfo->ssi_int = tswap32(info->ssi_int);
6443 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6444 tinfo->ssi_utime = tswap64(info->ssi_utime);
6445 tinfo->ssi_stime = tswap64(info->ssi_stime);
6446 tinfo->ssi_addr = tswap64(info->ssi_addr);
6449 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6451 int i;
6453 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6454 host_to_target_signalfd_siginfo(buf + i, buf + i);
6457 return len;
6460 static TargetFdTrans target_signalfd_trans = {
6461 .host_to_target_data = host_to_target_data_signalfd,
6464 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6466 int host_flags;
6467 target_sigset_t *target_mask;
6468 sigset_t host_mask;
6469 abi_long ret;
6471 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6472 return -TARGET_EINVAL;
6474 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6475 return -TARGET_EFAULT;
6478 target_to_host_sigset(&host_mask, target_mask);
6480 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6482 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6483 if (ret >= 0) {
6484 fd_trans_register(ret, &target_signalfd_trans);
6487 unlock_user_struct(target_mask, mask, 0);
6489 return ret;
6491 #endif
6493 /* Map host to target signal numbers for the wait family of syscalls.
6494 Assume all other status bits are the same. */
6495 int host_to_target_waitstatus(int status)
6497 if (WIFSIGNALED(status)) {
6498 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6500 if (WIFSTOPPED(status)) {
6501 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6502 | (status & 0xff);
6504 return status;
6507 static int open_self_cmdline(void *cpu_env, int fd)
6509 int fd_orig = -1;
6510 bool word_skipped = false;
6512 fd_orig = open("/proc/self/cmdline", O_RDONLY);
6513 if (fd_orig < 0) {
6514 return fd_orig;
6517 while (true) {
6518 ssize_t nb_read;
6519 char buf[128];
6520 char *cp_buf = buf;
6522 nb_read = read(fd_orig, buf, sizeof(buf));
6523 if (nb_read < 0) {
6524 int e = errno;
6525 fd_orig = close(fd_orig);
6526 errno = e;
6527 return -1;
6528 } else if (nb_read == 0) {
6529 break;
6532 if (!word_skipped) {
6533 /* Skip the first string, which is the path to qemu-*-static
6534 instead of the actual command. */
6535 cp_buf = memchr(buf, 0, sizeof(buf));
6536 if (cp_buf) {
6537 /* Null byte found, skip one string */
6538 cp_buf++;
6539 nb_read -= cp_buf - buf;
6540 word_skipped = true;
6544 if (word_skipped) {
6545 if (write(fd, cp_buf, nb_read) != nb_read) {
6546 int e = errno;
6547 close(fd_orig);
6548 errno = e;
6549 return -1;
6554 return close(fd_orig);
6557 static int open_self_maps(void *cpu_env, int fd)
6559 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6560 TaskState *ts = cpu->opaque;
6561 FILE *fp;
6562 char *line = NULL;
6563 size_t len = 0;
6564 ssize_t read;
6566 fp = fopen("/proc/self/maps", "r");
6567 if (fp == NULL) {
6568 return -1;
6571 while ((read = getline(&line, &len, fp)) != -1) {
6572 int fields, dev_maj, dev_min, inode;
6573 uint64_t min, max, offset;
6574 char flag_r, flag_w, flag_x, flag_p;
6575 char path[512] = "";
6576 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6577 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6578 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6580 if ((fields < 10) || (fields > 11)) {
6581 continue;
6583 if (h2g_valid(min)) {
6584 int flags = page_get_flags(h2g(min));
6585 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
6586 if (page_check_range(h2g(min), max - min, flags) == -1) {
6587 continue;
6589 if (h2g(min) == ts->info->stack_limit) {
6590 pstrcpy(path, sizeof(path), " [stack]");
6592 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
6593 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6594 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6595 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6596 path[0] ? " " : "", path);
6600 free(line);
6601 fclose(fp);
6603 return 0;
6606 static int open_self_stat(void *cpu_env, int fd)
6608 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6609 TaskState *ts = cpu->opaque;
6610 abi_ulong start_stack = ts->info->start_stack;
6611 int i;
6613 for (i = 0; i < 44; i++) {
6614 char buf[128];
6615 int len;
6616 uint64_t val = 0;
6618 if (i == 0) {
6619 /* pid */
6620 val = getpid();
6621 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6622 } else if (i == 1) {
6623 /* app name */
6624 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6625 } else if (i == 27) {
6626 /* stack bottom */
6627 val = start_stack;
6628 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6629 } else {
6630 /* for the rest, there is MasterCard */
6631 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6634 len = strlen(buf);
6635 if (write(fd, buf, len) != len) {
6636 return -1;
6640 return 0;
6643 static int open_self_auxv(void *cpu_env, int fd)
6645 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6646 TaskState *ts = cpu->opaque;
6647 abi_ulong auxv = ts->info->saved_auxv;
6648 abi_ulong len = ts->info->auxv_len;
6649 char *ptr;
6652 * Auxiliary vector is stored in target process stack.
6653 * read in whole auxv vector and copy it to file
6655 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6656 if (ptr != NULL) {
6657 while (len > 0) {
6658 ssize_t r;
6659 r = write(fd, ptr, len);
6660 if (r <= 0) {
6661 break;
6663 len -= r;
6664 ptr += r;
6666 lseek(fd, 0, SEEK_SET);
6667 unlock_user(ptr, auxv, len);
6670 return 0;
6673 static int is_proc_myself(const char *filename, const char *entry)
6675 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6676 filename += strlen("/proc/");
6677 if (!strncmp(filename, "self/", strlen("self/"))) {
6678 filename += strlen("self/");
6679 } else if (*filename >= '1' && *filename <= '9') {
6680 char myself[80];
6681 snprintf(myself, sizeof(myself), "%d/", getpid());
6682 if (!strncmp(filename, myself, strlen(myself))) {
6683 filename += strlen(myself);
6684 } else {
6685 return 0;
6687 } else {
6688 return 0;
6690 if (!strcmp(filename, entry)) {
6691 return 1;
6694 return 0;
6697 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6698 static int is_proc(const char *filename, const char *entry)
6700 return strcmp(filename, entry) == 0;
6703 static int open_net_route(void *cpu_env, int fd)
6705 FILE *fp;
6706 char *line = NULL;
6707 size_t len = 0;
6708 ssize_t read;
6710 fp = fopen("/proc/net/route", "r");
6711 if (fp == NULL) {
6712 return -1;
6715 /* read header */
6717 read = getline(&line, &len, fp);
6718 dprintf(fd, "%s", line);
6720 /* read routes */
6722 while ((read = getline(&line, &len, fp)) != -1) {
6723 char iface[16];
6724 uint32_t dest, gw, mask;
6725 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6726 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6727 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6728 &mask, &mtu, &window, &irtt);
6729 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6730 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6731 metric, tswap32(mask), mtu, window, irtt);
6734 free(line);
6735 fclose(fp);
6737 return 0;
6739 #endif
6741 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6743 struct fake_open {
6744 const char *filename;
6745 int (*fill)(void *cpu_env, int fd);
6746 int (*cmp)(const char *s1, const char *s2);
6748 const struct fake_open *fake_open;
6749 static const struct fake_open fakes[] = {
6750 { "maps", open_self_maps, is_proc_myself },
6751 { "stat", open_self_stat, is_proc_myself },
6752 { "auxv", open_self_auxv, is_proc_myself },
6753 { "cmdline", open_self_cmdline, is_proc_myself },
6754 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6755 { "/proc/net/route", open_net_route, is_proc },
6756 #endif
6757 { NULL, NULL, NULL }
6760 if (is_proc_myself(pathname, "exe")) {
6761 int execfd = qemu_getauxval(AT_EXECFD);
6762 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6765 for (fake_open = fakes; fake_open->filename; fake_open++) {
6766 if (fake_open->cmp(pathname, fake_open->filename)) {
6767 break;
6771 if (fake_open->filename) {
6772 const char *tmpdir;
6773 char filename[PATH_MAX];
6774 int fd, r;
6776 /* create temporary file to map stat to */
6777 tmpdir = getenv("TMPDIR");
6778 if (!tmpdir)
6779 tmpdir = "/tmp";
6780 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6781 fd = mkstemp(filename);
6782 if (fd < 0) {
6783 return fd;
6785 unlink(filename);
6787 if ((r = fake_open->fill(cpu_env, fd))) {
6788 int e = errno;
6789 close(fd);
6790 errno = e;
6791 return r;
6793 lseek(fd, 0, SEEK_SET);
6795 return fd;
6798 return safe_openat(dirfd, path(pathname), flags, mode);
6801 #define TIMER_MAGIC 0x0caf0000
6802 #define TIMER_MAGIC_MASK 0xffff0000
6804 /* Convert QEMU provided timer ID back to internal 16bit index format */
6805 static target_timer_t get_timer_id(abi_long arg)
6807 target_timer_t timerid = arg;
6809 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6810 return -TARGET_EINVAL;
6813 timerid &= 0xffff;
6815 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6816 return -TARGET_EINVAL;
6819 return timerid;
6822 /* do_syscall() should always have a single exit point at the end so
6823 that actions, such as logging of syscall results, can be performed.
6824 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
6825 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
6826 abi_long arg2, abi_long arg3, abi_long arg4,
6827 abi_long arg5, abi_long arg6, abi_long arg7,
6828 abi_long arg8)
6830 CPUState *cpu = ENV_GET_CPU(cpu_env);
6831 abi_long ret;
6832 struct stat st;
6833 struct statfs stfs;
6834 void *p;
6836 #if defined(DEBUG_ERESTARTSYS)
6837 /* Debug-only code for exercising the syscall-restart code paths
6838 * in the per-architecture cpu main loops: restart every syscall
6839 * the guest makes once before letting it through.
6842 static int flag;
6844 flag = !flag;
6845 if (flag) {
6846 return -TARGET_ERESTARTSYS;
6849 #endif
6851 #ifdef DEBUG
6852 gemu_log("syscall %d", num);
6853 #endif
6854 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
6855 if(do_strace)
6856 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
6858 switch(num) {
6859 case TARGET_NR_exit:
6860 /* In old applications this may be used to implement _exit(2).
6861 However in threaded applictions it is used for thread termination,
6862 and _exit_group is used for application termination.
6863 Do thread termination if we have more then one thread. */
6865 if (block_signals()) {
6866 ret = -TARGET_ERESTARTSYS;
6867 break;
6870 if (CPU_NEXT(first_cpu)) {
6871 TaskState *ts;
6873 cpu_list_lock();
6874 /* Remove the CPU from the list. */
6875 QTAILQ_REMOVE(&cpus, cpu, node);
6876 cpu_list_unlock();
6877 ts = cpu->opaque;
6878 if (ts->child_tidptr) {
6879 put_user_u32(0, ts->child_tidptr);
6880 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6881 NULL, NULL, 0);
6883 thread_cpu = NULL;
6884 object_unref(OBJECT(cpu));
6885 g_free(ts);
6886 rcu_unregister_thread();
6887 pthread_exit(NULL);
6889 #ifdef TARGET_GPROF
6890 _mcleanup();
6891 #endif
6892 gdb_exit(cpu_env, arg1);
6893 _exit(arg1);
6894 ret = 0; /* avoid warning */
6895 break;
6896 case TARGET_NR_read:
6897 if (arg3 == 0)
6898 ret = 0;
6899 else {
6900 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6901 goto efault;
6902 ret = get_errno(safe_read(arg1, p, arg3));
6903 if (ret >= 0 &&
6904 fd_trans_host_to_target_data(arg1)) {
6905 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6907 unlock_user(p, arg2, ret);
6909 break;
6910 case TARGET_NR_write:
6911 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6912 goto efault;
6913 ret = get_errno(safe_write(arg1, p, arg3));
6914 unlock_user(p, arg2, 0);
6915 break;
6916 #ifdef TARGET_NR_open
6917 case TARGET_NR_open:
6918 if (!(p = lock_user_string(arg1)))
6919 goto efault;
6920 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6921 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6922 arg3));
6923 fd_trans_unregister(ret);
6924 unlock_user(p, arg1, 0);
6925 break;
6926 #endif
6927 case TARGET_NR_openat:
6928 if (!(p = lock_user_string(arg2)))
6929 goto efault;
6930 ret = get_errno(do_openat(cpu_env, arg1, p,
6931 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6932 arg4));
6933 fd_trans_unregister(ret);
6934 unlock_user(p, arg2, 0);
6935 break;
6936 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6937 case TARGET_NR_name_to_handle_at:
6938 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6939 break;
6940 #endif
6941 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6942 case TARGET_NR_open_by_handle_at:
6943 ret = do_open_by_handle_at(arg1, arg2, arg3);
6944 fd_trans_unregister(ret);
6945 break;
6946 #endif
6947 case TARGET_NR_close:
6948 fd_trans_unregister(arg1);
6949 ret = get_errno(close(arg1));
6950 break;
6951 case TARGET_NR_brk:
6952 ret = do_brk(arg1);
6953 break;
6954 #ifdef TARGET_NR_fork
6955 case TARGET_NR_fork:
6956 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6957 break;
6958 #endif
6959 #ifdef TARGET_NR_waitpid
6960 case TARGET_NR_waitpid:
6962 int status;
6963 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6964 if (!is_error(ret) && arg2 && ret
6965 && put_user_s32(host_to_target_waitstatus(status), arg2))
6966 goto efault;
6968 break;
6969 #endif
6970 #ifdef TARGET_NR_waitid
6971 case TARGET_NR_waitid:
6973 siginfo_t info;
6974 info.si_pid = 0;
6975 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6976 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6977 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6978 goto efault;
6979 host_to_target_siginfo(p, &info);
6980 unlock_user(p, arg3, sizeof(target_siginfo_t));
6983 break;
6984 #endif
6985 #ifdef TARGET_NR_creat /* not on alpha */
6986 case TARGET_NR_creat:
6987 if (!(p = lock_user_string(arg1)))
6988 goto efault;
6989 ret = get_errno(creat(p, arg2));
6990 fd_trans_unregister(ret);
6991 unlock_user(p, arg1, 0);
6992 break;
6993 #endif
6994 #ifdef TARGET_NR_link
6995 case TARGET_NR_link:
6997 void * p2;
6998 p = lock_user_string(arg1);
6999 p2 = lock_user_string(arg2);
7000 if (!p || !p2)
7001 ret = -TARGET_EFAULT;
7002 else
7003 ret = get_errno(link(p, p2));
7004 unlock_user(p2, arg2, 0);
7005 unlock_user(p, arg1, 0);
7007 break;
7008 #endif
7009 #if defined(TARGET_NR_linkat)
7010 case TARGET_NR_linkat:
7012 void * p2 = NULL;
7013 if (!arg2 || !arg4)
7014 goto efault;
7015 p = lock_user_string(arg2);
7016 p2 = lock_user_string(arg4);
7017 if (!p || !p2)
7018 ret = -TARGET_EFAULT;
7019 else
7020 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7021 unlock_user(p, arg2, 0);
7022 unlock_user(p2, arg4, 0);
7024 break;
7025 #endif
7026 #ifdef TARGET_NR_unlink
7027 case TARGET_NR_unlink:
7028 if (!(p = lock_user_string(arg1)))
7029 goto efault;
7030 ret = get_errno(unlink(p));
7031 unlock_user(p, arg1, 0);
7032 break;
7033 #endif
7034 #if defined(TARGET_NR_unlinkat)
7035 case TARGET_NR_unlinkat:
7036 if (!(p = lock_user_string(arg2)))
7037 goto efault;
7038 ret = get_errno(unlinkat(arg1, p, arg3));
7039 unlock_user(p, arg2, 0);
7040 break;
7041 #endif
7042 case TARGET_NR_execve:
7044 char **argp, **envp;
7045 int argc, envc;
7046 abi_ulong gp;
7047 abi_ulong guest_argp;
7048 abi_ulong guest_envp;
7049 abi_ulong addr;
7050 char **q;
7051 int total_size = 0;
7053 argc = 0;
7054 guest_argp = arg2;
7055 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7056 if (get_user_ual(addr, gp))
7057 goto efault;
7058 if (!addr)
7059 break;
7060 argc++;
7062 envc = 0;
7063 guest_envp = arg3;
7064 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7065 if (get_user_ual(addr, gp))
7066 goto efault;
7067 if (!addr)
7068 break;
7069 envc++;
7072 argp = alloca((argc + 1) * sizeof(void *));
7073 envp = alloca((envc + 1) * sizeof(void *));
7075 for (gp = guest_argp, q = argp; gp;
7076 gp += sizeof(abi_ulong), q++) {
7077 if (get_user_ual(addr, gp))
7078 goto execve_efault;
7079 if (!addr)
7080 break;
7081 if (!(*q = lock_user_string(addr)))
7082 goto execve_efault;
7083 total_size += strlen(*q) + 1;
7085 *q = NULL;
7087 for (gp = guest_envp, q = envp; gp;
7088 gp += sizeof(abi_ulong), q++) {
7089 if (get_user_ual(addr, gp))
7090 goto execve_efault;
7091 if (!addr)
7092 break;
7093 if (!(*q = lock_user_string(addr)))
7094 goto execve_efault;
7095 total_size += strlen(*q) + 1;
7097 *q = NULL;
7099 if (!(p = lock_user_string(arg1)))
7100 goto execve_efault;
7101 /* Although execve() is not an interruptible syscall it is
7102 * a special case where we must use the safe_syscall wrapper:
7103 * if we allow a signal to happen before we make the host
7104 * syscall then we will 'lose' it, because at the point of
7105 * execve the process leaves QEMU's control. So we use the
7106 * safe syscall wrapper to ensure that we either take the
7107 * signal as a guest signal, or else it does not happen
7108 * before the execve completes and makes it the other
7109 * program's problem.
7111 ret = get_errno(safe_execve(p, argp, envp));
7112 unlock_user(p, arg1, 0);
7114 goto execve_end;
7116 execve_efault:
7117 ret = -TARGET_EFAULT;
7119 execve_end:
7120 for (gp = guest_argp, q = argp; *q;
7121 gp += sizeof(abi_ulong), q++) {
7122 if (get_user_ual(addr, gp)
7123 || !addr)
7124 break;
7125 unlock_user(*q, addr, 0);
7127 for (gp = guest_envp, q = envp; *q;
7128 gp += sizeof(abi_ulong), q++) {
7129 if (get_user_ual(addr, gp)
7130 || !addr)
7131 break;
7132 unlock_user(*q, addr, 0);
7135 break;
7136 case TARGET_NR_chdir:
7137 if (!(p = lock_user_string(arg1)))
7138 goto efault;
7139 ret = get_errno(chdir(p));
7140 unlock_user(p, arg1, 0);
7141 break;
7142 #ifdef TARGET_NR_time
7143 case TARGET_NR_time:
7145 time_t host_time;
7146 ret = get_errno(time(&host_time));
7147 if (!is_error(ret)
7148 && arg1
7149 && put_user_sal(host_time, arg1))
7150 goto efault;
7152 break;
7153 #endif
7154 #ifdef TARGET_NR_mknod
7155 case TARGET_NR_mknod:
7156 if (!(p = lock_user_string(arg1)))
7157 goto efault;
7158 ret = get_errno(mknod(p, arg2, arg3));
7159 unlock_user(p, arg1, 0);
7160 break;
7161 #endif
7162 #if defined(TARGET_NR_mknodat)
7163 case TARGET_NR_mknodat:
7164 if (!(p = lock_user_string(arg2)))
7165 goto efault;
7166 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7167 unlock_user(p, arg2, 0);
7168 break;
7169 #endif
7170 #ifdef TARGET_NR_chmod
7171 case TARGET_NR_chmod:
7172 if (!(p = lock_user_string(arg1)))
7173 goto efault;
7174 ret = get_errno(chmod(p, arg2));
7175 unlock_user(p, arg1, 0);
7176 break;
7177 #endif
7178 #ifdef TARGET_NR_break
7179 case TARGET_NR_break:
7180 goto unimplemented;
7181 #endif
7182 #ifdef TARGET_NR_oldstat
7183 case TARGET_NR_oldstat:
7184 goto unimplemented;
7185 #endif
7186 case TARGET_NR_lseek:
7187 ret = get_errno(lseek(arg1, arg2, arg3));
7188 break;
7189 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7190 /* Alpha specific */
7191 case TARGET_NR_getxpid:
7192 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7193 ret = get_errno(getpid());
7194 break;
7195 #endif
7196 #ifdef TARGET_NR_getpid
7197 case TARGET_NR_getpid:
7198 ret = get_errno(getpid());
7199 break;
7200 #endif
7201 case TARGET_NR_mount:
7203 /* need to look at the data field */
7204 void *p2, *p3;
7206 if (arg1) {
7207 p = lock_user_string(arg1);
7208 if (!p) {
7209 goto efault;
7211 } else {
7212 p = NULL;
7215 p2 = lock_user_string(arg2);
7216 if (!p2) {
7217 if (arg1) {
7218 unlock_user(p, arg1, 0);
7220 goto efault;
7223 if (arg3) {
7224 p3 = lock_user_string(arg3);
7225 if (!p3) {
7226 if (arg1) {
7227 unlock_user(p, arg1, 0);
7229 unlock_user(p2, arg2, 0);
7230 goto efault;
7232 } else {
7233 p3 = NULL;
7236 /* FIXME - arg5 should be locked, but it isn't clear how to
7237 * do that since it's not guaranteed to be a NULL-terminated
7238 * string.
7240 if (!arg5) {
7241 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7242 } else {
7243 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7245 ret = get_errno(ret);
7247 if (arg1) {
7248 unlock_user(p, arg1, 0);
7250 unlock_user(p2, arg2, 0);
7251 if (arg3) {
7252 unlock_user(p3, arg3, 0);
7255 break;
7256 #ifdef TARGET_NR_umount
7257 case TARGET_NR_umount:
7258 if (!(p = lock_user_string(arg1)))
7259 goto efault;
7260 ret = get_errno(umount(p));
7261 unlock_user(p, arg1, 0);
7262 break;
7263 #endif
7264 #ifdef TARGET_NR_stime /* not on alpha */
7265 case TARGET_NR_stime:
7267 time_t host_time;
7268 if (get_user_sal(host_time, arg1))
7269 goto efault;
7270 ret = get_errno(stime(&host_time));
7272 break;
7273 #endif
7274 case TARGET_NR_ptrace:
7275 goto unimplemented;
7276 #ifdef TARGET_NR_alarm /* not on alpha */
7277 case TARGET_NR_alarm:
7278 ret = alarm(arg1);
7279 break;
7280 #endif
7281 #ifdef TARGET_NR_oldfstat
7282 case TARGET_NR_oldfstat:
7283 goto unimplemented;
7284 #endif
7285 #ifdef TARGET_NR_pause /* not on alpha */
7286 case TARGET_NR_pause:
7287 if (!block_signals()) {
7288 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7290 ret = -TARGET_EINTR;
7291 break;
7292 #endif
7293 #ifdef TARGET_NR_utime
7294 case TARGET_NR_utime:
7296 struct utimbuf tbuf, *host_tbuf;
7297 struct target_utimbuf *target_tbuf;
7298 if (arg2) {
7299 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7300 goto efault;
7301 tbuf.actime = tswapal(target_tbuf->actime);
7302 tbuf.modtime = tswapal(target_tbuf->modtime);
7303 unlock_user_struct(target_tbuf, arg2, 0);
7304 host_tbuf = &tbuf;
7305 } else {
7306 host_tbuf = NULL;
7308 if (!(p = lock_user_string(arg1)))
7309 goto efault;
7310 ret = get_errno(utime(p, host_tbuf));
7311 unlock_user(p, arg1, 0);
7313 break;
7314 #endif
7315 #ifdef TARGET_NR_utimes
7316 case TARGET_NR_utimes:
7318 struct timeval *tvp, tv[2];
7319 if (arg2) {
7320 if (copy_from_user_timeval(&tv[0], arg2)
7321 || copy_from_user_timeval(&tv[1],
7322 arg2 + sizeof(struct target_timeval)))
7323 goto efault;
7324 tvp = tv;
7325 } else {
7326 tvp = NULL;
7328 if (!(p = lock_user_string(arg1)))
7329 goto efault;
7330 ret = get_errno(utimes(p, tvp));
7331 unlock_user(p, arg1, 0);
7333 break;
7334 #endif
7335 #if defined(TARGET_NR_futimesat)
7336 case TARGET_NR_futimesat:
7338 struct timeval *tvp, tv[2];
7339 if (arg3) {
7340 if (copy_from_user_timeval(&tv[0], arg3)
7341 || copy_from_user_timeval(&tv[1],
7342 arg3 + sizeof(struct target_timeval)))
7343 goto efault;
7344 tvp = tv;
7345 } else {
7346 tvp = NULL;
7348 if (!(p = lock_user_string(arg2)))
7349 goto efault;
7350 ret = get_errno(futimesat(arg1, path(p), tvp));
7351 unlock_user(p, arg2, 0);
7353 break;
7354 #endif
7355 #ifdef TARGET_NR_stty
7356 case TARGET_NR_stty:
7357 goto unimplemented;
7358 #endif
7359 #ifdef TARGET_NR_gtty
7360 case TARGET_NR_gtty:
7361 goto unimplemented;
7362 #endif
7363 #ifdef TARGET_NR_access
7364 case TARGET_NR_access:
7365 if (!(p = lock_user_string(arg1)))
7366 goto efault;
7367 ret = get_errno(access(path(p), arg2));
7368 unlock_user(p, arg1, 0);
7369 break;
7370 #endif
7371 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7372 case TARGET_NR_faccessat:
7373 if (!(p = lock_user_string(arg2)))
7374 goto efault;
7375 ret = get_errno(faccessat(arg1, p, arg3, 0));
7376 unlock_user(p, arg2, 0);
7377 break;
7378 #endif
7379 #ifdef TARGET_NR_nice /* not on alpha */
7380 case TARGET_NR_nice:
7381 ret = get_errno(nice(arg1));
7382 break;
7383 #endif
7384 #ifdef TARGET_NR_ftime
7385 case TARGET_NR_ftime:
7386 goto unimplemented;
7387 #endif
7388 case TARGET_NR_sync:
7389 sync();
7390 ret = 0;
7391 break;
7392 case TARGET_NR_kill:
7393 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7394 break;
7395 #ifdef TARGET_NR_rename
7396 case TARGET_NR_rename:
7398 void *p2;
7399 p = lock_user_string(arg1);
7400 p2 = lock_user_string(arg2);
7401 if (!p || !p2)
7402 ret = -TARGET_EFAULT;
7403 else
7404 ret = get_errno(rename(p, p2));
7405 unlock_user(p2, arg2, 0);
7406 unlock_user(p, arg1, 0);
7408 break;
7409 #endif
7410 #if defined(TARGET_NR_renameat)
7411 case TARGET_NR_renameat:
7413 void *p2;
7414 p = lock_user_string(arg2);
7415 p2 = lock_user_string(arg4);
7416 if (!p || !p2)
7417 ret = -TARGET_EFAULT;
7418 else
7419 ret = get_errno(renameat(arg1, p, arg3, p2));
7420 unlock_user(p2, arg4, 0);
7421 unlock_user(p, arg2, 0);
7423 break;
7424 #endif
7425 #ifdef TARGET_NR_mkdir
7426 case TARGET_NR_mkdir:
7427 if (!(p = lock_user_string(arg1)))
7428 goto efault;
7429 ret = get_errno(mkdir(p, arg2));
7430 unlock_user(p, arg1, 0);
7431 break;
7432 #endif
7433 #if defined(TARGET_NR_mkdirat)
7434 case TARGET_NR_mkdirat:
7435 if (!(p = lock_user_string(arg2)))
7436 goto efault;
7437 ret = get_errno(mkdirat(arg1, p, arg3));
7438 unlock_user(p, arg2, 0);
7439 break;
7440 #endif
7441 #ifdef TARGET_NR_rmdir
7442 case TARGET_NR_rmdir:
7443 if (!(p = lock_user_string(arg1)))
7444 goto efault;
7445 ret = get_errno(rmdir(p));
7446 unlock_user(p, arg1, 0);
7447 break;
7448 #endif
7449 case TARGET_NR_dup:
7450 ret = get_errno(dup(arg1));
7451 if (ret >= 0) {
7452 fd_trans_dup(arg1, ret);
7454 break;
7455 #ifdef TARGET_NR_pipe
7456 case TARGET_NR_pipe:
7457 ret = do_pipe(cpu_env, arg1, 0, 0);
7458 break;
7459 #endif
7460 #ifdef TARGET_NR_pipe2
7461 case TARGET_NR_pipe2:
7462 ret = do_pipe(cpu_env, arg1,
7463 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7464 break;
7465 #endif
7466 case TARGET_NR_times:
7468 struct target_tms *tmsp;
7469 struct tms tms;
7470 ret = get_errno(times(&tms));
7471 if (arg1) {
7472 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7473 if (!tmsp)
7474 goto efault;
7475 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7476 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7477 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7478 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7480 if (!is_error(ret))
7481 ret = host_to_target_clock_t(ret);
7483 break;
7484 #ifdef TARGET_NR_prof
7485 case TARGET_NR_prof:
7486 goto unimplemented;
7487 #endif
7488 #ifdef TARGET_NR_signal
7489 case TARGET_NR_signal:
7490 goto unimplemented;
7491 #endif
7492 case TARGET_NR_acct:
7493 if (arg1 == 0) {
7494 ret = get_errno(acct(NULL));
7495 } else {
7496 if (!(p = lock_user_string(arg1)))
7497 goto efault;
7498 ret = get_errno(acct(path(p)));
7499 unlock_user(p, arg1, 0);
7501 break;
7502 #ifdef TARGET_NR_umount2
7503 case TARGET_NR_umount2:
7504 if (!(p = lock_user_string(arg1)))
7505 goto efault;
7506 ret = get_errno(umount2(p, arg2));
7507 unlock_user(p, arg1, 0);
7508 break;
7509 #endif
7510 #ifdef TARGET_NR_lock
7511 case TARGET_NR_lock:
7512 goto unimplemented;
7513 #endif
7514 case TARGET_NR_ioctl:
7515 ret = do_ioctl(arg1, arg2, arg3);
7516 break;
7517 case TARGET_NR_fcntl:
7518 ret = do_fcntl(arg1, arg2, arg3);
7519 break;
7520 #ifdef TARGET_NR_mpx
7521 case TARGET_NR_mpx:
7522 goto unimplemented;
7523 #endif
7524 case TARGET_NR_setpgid:
7525 ret = get_errno(setpgid(arg1, arg2));
7526 break;
7527 #ifdef TARGET_NR_ulimit
7528 case TARGET_NR_ulimit:
7529 goto unimplemented;
7530 #endif
7531 #ifdef TARGET_NR_oldolduname
7532 case TARGET_NR_oldolduname:
7533 goto unimplemented;
7534 #endif
7535 case TARGET_NR_umask:
7536 ret = get_errno(umask(arg1));
7537 break;
7538 case TARGET_NR_chroot:
7539 if (!(p = lock_user_string(arg1)))
7540 goto efault;
7541 ret = get_errno(chroot(p));
7542 unlock_user(p, arg1, 0);
7543 break;
7544 #ifdef TARGET_NR_ustat
7545 case TARGET_NR_ustat:
7546 goto unimplemented;
7547 #endif
7548 #ifdef TARGET_NR_dup2
7549 case TARGET_NR_dup2:
7550 ret = get_errno(dup2(arg1, arg2));
7551 if (ret >= 0) {
7552 fd_trans_dup(arg1, arg2);
7554 break;
7555 #endif
7556 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7557 case TARGET_NR_dup3:
7558 ret = get_errno(dup3(arg1, arg2, arg3));
7559 if (ret >= 0) {
7560 fd_trans_dup(arg1, arg2);
7562 break;
7563 #endif
7564 #ifdef TARGET_NR_getppid /* not on alpha */
7565 case TARGET_NR_getppid:
7566 ret = get_errno(getppid());
7567 break;
7568 #endif
7569 #ifdef TARGET_NR_getpgrp
7570 case TARGET_NR_getpgrp:
7571 ret = get_errno(getpgrp());
7572 break;
7573 #endif
7574 case TARGET_NR_setsid:
7575 ret = get_errno(setsid());
7576 break;
7577 #ifdef TARGET_NR_sigaction
7578 case TARGET_NR_sigaction:
7580 #if defined(TARGET_ALPHA)
7581 struct target_sigaction act, oact, *pact = 0;
7582 struct target_old_sigaction *old_act;
7583 if (arg2) {
7584 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7585 goto efault;
7586 act._sa_handler = old_act->_sa_handler;
7587 target_siginitset(&act.sa_mask, old_act->sa_mask);
7588 act.sa_flags = old_act->sa_flags;
7589 act.sa_restorer = 0;
7590 unlock_user_struct(old_act, arg2, 0);
7591 pact = &act;
7593 ret = get_errno(do_sigaction(arg1, pact, &oact));
7594 if (!is_error(ret) && arg3) {
7595 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7596 goto efault;
7597 old_act->_sa_handler = oact._sa_handler;
7598 old_act->sa_mask = oact.sa_mask.sig[0];
7599 old_act->sa_flags = oact.sa_flags;
7600 unlock_user_struct(old_act, arg3, 1);
7602 #elif defined(TARGET_MIPS)
7603 struct target_sigaction act, oact, *pact, *old_act;
7605 if (arg2) {
7606 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7607 goto efault;
7608 act._sa_handler = old_act->_sa_handler;
7609 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7610 act.sa_flags = old_act->sa_flags;
7611 unlock_user_struct(old_act, arg2, 0);
7612 pact = &act;
7613 } else {
7614 pact = NULL;
7617 ret = get_errno(do_sigaction(arg1, pact, &oact));
7619 if (!is_error(ret) && arg3) {
7620 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7621 goto efault;
7622 old_act->_sa_handler = oact._sa_handler;
7623 old_act->sa_flags = oact.sa_flags;
7624 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7625 old_act->sa_mask.sig[1] = 0;
7626 old_act->sa_mask.sig[2] = 0;
7627 old_act->sa_mask.sig[3] = 0;
7628 unlock_user_struct(old_act, arg3, 1);
7630 #else
7631 struct target_old_sigaction *old_act;
7632 struct target_sigaction act, oact, *pact;
7633 if (arg2) {
7634 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7635 goto efault;
7636 act._sa_handler = old_act->_sa_handler;
7637 target_siginitset(&act.sa_mask, old_act->sa_mask);
7638 act.sa_flags = old_act->sa_flags;
7639 act.sa_restorer = old_act->sa_restorer;
7640 unlock_user_struct(old_act, arg2, 0);
7641 pact = &act;
7642 } else {
7643 pact = NULL;
7645 ret = get_errno(do_sigaction(arg1, pact, &oact));
7646 if (!is_error(ret) && arg3) {
7647 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7648 goto efault;
7649 old_act->_sa_handler = oact._sa_handler;
7650 old_act->sa_mask = oact.sa_mask.sig[0];
7651 old_act->sa_flags = oact.sa_flags;
7652 old_act->sa_restorer = oact.sa_restorer;
7653 unlock_user_struct(old_act, arg3, 1);
7655 #endif
7657 break;
7658 #endif
7659 case TARGET_NR_rt_sigaction:
7661 #if defined(TARGET_ALPHA)
7662 struct target_sigaction act, oact, *pact = 0;
7663 struct target_rt_sigaction *rt_act;
7664 /* ??? arg4 == sizeof(sigset_t). */
7665 if (arg2) {
7666 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7667 goto efault;
7668 act._sa_handler = rt_act->_sa_handler;
7669 act.sa_mask = rt_act->sa_mask;
7670 act.sa_flags = rt_act->sa_flags;
7671 act.sa_restorer = arg5;
7672 unlock_user_struct(rt_act, arg2, 0);
7673 pact = &act;
7675 ret = get_errno(do_sigaction(arg1, pact, &oact));
7676 if (!is_error(ret) && arg3) {
7677 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7678 goto efault;
7679 rt_act->_sa_handler = oact._sa_handler;
7680 rt_act->sa_mask = oact.sa_mask;
7681 rt_act->sa_flags = oact.sa_flags;
7682 unlock_user_struct(rt_act, arg3, 1);
7684 #else
7685 struct target_sigaction *act;
7686 struct target_sigaction *oact;
7688 if (arg2) {
7689 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
7690 goto efault;
7691 } else
7692 act = NULL;
7693 if (arg3) {
7694 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7695 ret = -TARGET_EFAULT;
7696 goto rt_sigaction_fail;
7698 } else
7699 oact = NULL;
7700 ret = get_errno(do_sigaction(arg1, act, oact));
7701 rt_sigaction_fail:
7702 if (act)
7703 unlock_user_struct(act, arg2, 0);
7704 if (oact)
7705 unlock_user_struct(oact, arg3, 1);
7706 #endif
7708 break;
7709 #ifdef TARGET_NR_sgetmask /* not on alpha */
7710 case TARGET_NR_sgetmask:
7712 sigset_t cur_set;
7713 abi_ulong target_set;
7714 ret = do_sigprocmask(0, NULL, &cur_set);
7715 if (!ret) {
7716 host_to_target_old_sigset(&target_set, &cur_set);
7717 ret = target_set;
7720 break;
7721 #endif
7722 #ifdef TARGET_NR_ssetmask /* not on alpha */
7723 case TARGET_NR_ssetmask:
7725 sigset_t set, oset, cur_set;
7726 abi_ulong target_set = arg1;
7727 /* We only have one word of the new mask so we must read
7728 * the rest of it with do_sigprocmask() and OR in this word.
7729 * We are guaranteed that a do_sigprocmask() that only queries
7730 * the signal mask will not fail.
7732 ret = do_sigprocmask(0, NULL, &cur_set);
7733 assert(!ret);
7734 target_to_host_old_sigset(&set, &target_set);
7735 sigorset(&set, &set, &cur_set);
7736 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7737 if (!ret) {
7738 host_to_target_old_sigset(&target_set, &oset);
7739 ret = target_set;
7742 break;
7743 #endif
7744 #ifdef TARGET_NR_sigprocmask
7745 case TARGET_NR_sigprocmask:
7747 #if defined(TARGET_ALPHA)
7748 sigset_t set, oldset;
7749 abi_ulong mask;
7750 int how;
7752 switch (arg1) {
7753 case TARGET_SIG_BLOCK:
7754 how = SIG_BLOCK;
7755 break;
7756 case TARGET_SIG_UNBLOCK:
7757 how = SIG_UNBLOCK;
7758 break;
7759 case TARGET_SIG_SETMASK:
7760 how = SIG_SETMASK;
7761 break;
7762 default:
7763 ret = -TARGET_EINVAL;
7764 goto fail;
7766 mask = arg2;
7767 target_to_host_old_sigset(&set, &mask);
7769 ret = do_sigprocmask(how, &set, &oldset);
7770 if (!is_error(ret)) {
7771 host_to_target_old_sigset(&mask, &oldset);
7772 ret = mask;
7773 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7775 #else
7776 sigset_t set, oldset, *set_ptr;
7777 int how;
7779 if (arg2) {
7780 switch (arg1) {
7781 case TARGET_SIG_BLOCK:
7782 how = SIG_BLOCK;
7783 break;
7784 case TARGET_SIG_UNBLOCK:
7785 how = SIG_UNBLOCK;
7786 break;
7787 case TARGET_SIG_SETMASK:
7788 how = SIG_SETMASK;
7789 break;
7790 default:
7791 ret = -TARGET_EINVAL;
7792 goto fail;
7794 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7795 goto efault;
7796 target_to_host_old_sigset(&set, p);
7797 unlock_user(p, arg2, 0);
7798 set_ptr = &set;
7799 } else {
7800 how = 0;
7801 set_ptr = NULL;
7803 ret = do_sigprocmask(how, set_ptr, &oldset);
7804 if (!is_error(ret) && arg3) {
7805 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7806 goto efault;
7807 host_to_target_old_sigset(p, &oldset);
7808 unlock_user(p, arg3, sizeof(target_sigset_t));
7810 #endif
7812 break;
7813 #endif
7814 case TARGET_NR_rt_sigprocmask:
7816 int how = arg1;
7817 sigset_t set, oldset, *set_ptr;
7819 if (arg2) {
7820 switch(how) {
7821 case TARGET_SIG_BLOCK:
7822 how = SIG_BLOCK;
7823 break;
7824 case TARGET_SIG_UNBLOCK:
7825 how = SIG_UNBLOCK;
7826 break;
7827 case TARGET_SIG_SETMASK:
7828 how = SIG_SETMASK;
7829 break;
7830 default:
7831 ret = -TARGET_EINVAL;
7832 goto fail;
7834 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7835 goto efault;
7836 target_to_host_sigset(&set, p);
7837 unlock_user(p, arg2, 0);
7838 set_ptr = &set;
7839 } else {
7840 how = 0;
7841 set_ptr = NULL;
7843 ret = do_sigprocmask(how, set_ptr, &oldset);
7844 if (!is_error(ret) && arg3) {
7845 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7846 goto efault;
7847 host_to_target_sigset(p, &oldset);
7848 unlock_user(p, arg3, sizeof(target_sigset_t));
7851 break;
7852 #ifdef TARGET_NR_sigpending
7853 case TARGET_NR_sigpending:
7855 sigset_t set;
7856 ret = get_errno(sigpending(&set));
7857 if (!is_error(ret)) {
7858 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7859 goto efault;
7860 host_to_target_old_sigset(p, &set);
7861 unlock_user(p, arg1, sizeof(target_sigset_t));
7864 break;
7865 #endif
7866 case TARGET_NR_rt_sigpending:
7868 sigset_t set;
7869 ret = get_errno(sigpending(&set));
7870 if (!is_error(ret)) {
7871 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7872 goto efault;
7873 host_to_target_sigset(p, &set);
7874 unlock_user(p, arg1, sizeof(target_sigset_t));
7877 break;
7878 #ifdef TARGET_NR_sigsuspend
7879 case TARGET_NR_sigsuspend:
7881 TaskState *ts = cpu->opaque;
7882 #if defined(TARGET_ALPHA)
7883 abi_ulong mask = arg1;
7884 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
7885 #else
7886 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7887 goto efault;
7888 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
7889 unlock_user(p, arg1, 0);
7890 #endif
7891 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7892 SIGSET_T_SIZE));
7893 if (ret != -TARGET_ERESTARTSYS) {
7894 ts->in_sigsuspend = 1;
7897 break;
7898 #endif
7899 case TARGET_NR_rt_sigsuspend:
7901 TaskState *ts = cpu->opaque;
7902 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7903 goto efault;
7904 target_to_host_sigset(&ts->sigsuspend_mask, p);
7905 unlock_user(p, arg1, 0);
7906 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
7907 SIGSET_T_SIZE));
7908 if (ret != -TARGET_ERESTARTSYS) {
7909 ts->in_sigsuspend = 1;
7912 break;
7913 case TARGET_NR_rt_sigtimedwait:
7915 sigset_t set;
7916 struct timespec uts, *puts;
7917 siginfo_t uinfo;
7919 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7920 goto efault;
7921 target_to_host_sigset(&set, p);
7922 unlock_user(p, arg1, 0);
7923 if (arg3) {
7924 puts = &uts;
7925 target_to_host_timespec(puts, arg3);
7926 } else {
7927 puts = NULL;
7929 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
7930 SIGSET_T_SIZE));
7931 if (!is_error(ret)) {
7932 if (arg2) {
7933 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7935 if (!p) {
7936 goto efault;
7938 host_to_target_siginfo(p, &uinfo);
7939 unlock_user(p, arg2, sizeof(target_siginfo_t));
7941 ret = host_to_target_signal(ret);
7944 break;
7945 case TARGET_NR_rt_sigqueueinfo:
7947 siginfo_t uinfo;
7949 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
7950 if (!p) {
7951 goto efault;
7953 target_to_host_siginfo(&uinfo, p);
7954 unlock_user(p, arg1, 0);
7955 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7957 break;
7958 #ifdef TARGET_NR_sigreturn
7959 case TARGET_NR_sigreturn:
7960 if (block_signals()) {
7961 ret = -TARGET_ERESTARTSYS;
7962 } else {
7963 ret = do_sigreturn(cpu_env);
7965 break;
7966 #endif
7967 case TARGET_NR_rt_sigreturn:
7968 if (block_signals()) {
7969 ret = -TARGET_ERESTARTSYS;
7970 } else {
7971 ret = do_rt_sigreturn(cpu_env);
7973 break;
7974 case TARGET_NR_sethostname:
7975 if (!(p = lock_user_string(arg1)))
7976 goto efault;
7977 ret = get_errno(sethostname(p, arg2));
7978 unlock_user(p, arg1, 0);
7979 break;
7980 case TARGET_NR_setrlimit:
7982 int resource = target_to_host_resource(arg1);
7983 struct target_rlimit *target_rlim;
7984 struct rlimit rlim;
7985 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7986 goto efault;
7987 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7988 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7989 unlock_user_struct(target_rlim, arg2, 0);
7990 ret = get_errno(setrlimit(resource, &rlim));
7992 break;
7993 case TARGET_NR_getrlimit:
7995 int resource = target_to_host_resource(arg1);
7996 struct target_rlimit *target_rlim;
7997 struct rlimit rlim;
7999 ret = get_errno(getrlimit(resource, &rlim));
8000 if (!is_error(ret)) {
8001 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8002 goto efault;
8003 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8004 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8005 unlock_user_struct(target_rlim, arg2, 1);
8008 break;
8009 case TARGET_NR_getrusage:
8011 struct rusage rusage;
8012 ret = get_errno(getrusage(arg1, &rusage));
8013 if (!is_error(ret)) {
8014 ret = host_to_target_rusage(arg2, &rusage);
8017 break;
8018 case TARGET_NR_gettimeofday:
8020 struct timeval tv;
8021 ret = get_errno(gettimeofday(&tv, NULL));
8022 if (!is_error(ret)) {
8023 if (copy_to_user_timeval(arg1, &tv))
8024 goto efault;
8027 break;
8028 case TARGET_NR_settimeofday:
8030 struct timeval tv, *ptv = NULL;
8031 struct timezone tz, *ptz = NULL;
8033 if (arg1) {
8034 if (copy_from_user_timeval(&tv, arg1)) {
8035 goto efault;
8037 ptv = &tv;
8040 if (arg2) {
8041 if (copy_from_user_timezone(&tz, arg2)) {
8042 goto efault;
8044 ptz = &tz;
8047 ret = get_errno(settimeofday(ptv, ptz));
8049 break;
8050 #if defined(TARGET_NR_select)
8051 case TARGET_NR_select:
8052 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8053 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8054 #else
8056 struct target_sel_arg_struct *sel;
8057 abi_ulong inp, outp, exp, tvp;
8058 long nsel;
8060 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8061 goto efault;
8062 nsel = tswapal(sel->n);
8063 inp = tswapal(sel->inp);
8064 outp = tswapal(sel->outp);
8065 exp = tswapal(sel->exp);
8066 tvp = tswapal(sel->tvp);
8067 unlock_user_struct(sel, arg1, 0);
8068 ret = do_select(nsel, inp, outp, exp, tvp);
8070 #endif
8071 break;
8072 #endif
8073 #ifdef TARGET_NR_pselect6
8074 case TARGET_NR_pselect6:
8076 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8077 fd_set rfds, wfds, efds;
8078 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8079 struct timespec ts, *ts_ptr;
8082 * The 6th arg is actually two args smashed together,
8083 * so we cannot use the C library.
8085 sigset_t set;
8086 struct {
8087 sigset_t *set;
8088 size_t size;
8089 } sig, *sig_ptr;
8091 abi_ulong arg_sigset, arg_sigsize, *arg7;
8092 target_sigset_t *target_sigset;
8094 n = arg1;
8095 rfd_addr = arg2;
8096 wfd_addr = arg3;
8097 efd_addr = arg4;
8098 ts_addr = arg5;
8100 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8101 if (ret) {
8102 goto fail;
8104 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8105 if (ret) {
8106 goto fail;
8108 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8109 if (ret) {
8110 goto fail;
8114 * This takes a timespec, and not a timeval, so we cannot
8115 * use the do_select() helper ...
8117 if (ts_addr) {
8118 if (target_to_host_timespec(&ts, ts_addr)) {
8119 goto efault;
8121 ts_ptr = &ts;
8122 } else {
8123 ts_ptr = NULL;
8126 /* Extract the two packed args for the sigset */
8127 if (arg6) {
8128 sig_ptr = &sig;
8129 sig.size = SIGSET_T_SIZE;
8131 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8132 if (!arg7) {
8133 goto efault;
8135 arg_sigset = tswapal(arg7[0]);
8136 arg_sigsize = tswapal(arg7[1]);
8137 unlock_user(arg7, arg6, 0);
8139 if (arg_sigset) {
8140 sig.set = &set;
8141 if (arg_sigsize != sizeof(*target_sigset)) {
8142 /* Like the kernel, we enforce correct size sigsets */
8143 ret = -TARGET_EINVAL;
8144 goto fail;
8146 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8147 sizeof(*target_sigset), 1);
8148 if (!target_sigset) {
8149 goto efault;
8151 target_to_host_sigset(&set, target_sigset);
8152 unlock_user(target_sigset, arg_sigset, 0);
8153 } else {
8154 sig.set = NULL;
8156 } else {
8157 sig_ptr = NULL;
8160 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8161 ts_ptr, sig_ptr));
8163 if (!is_error(ret)) {
8164 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8165 goto efault;
8166 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8167 goto efault;
8168 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8169 goto efault;
8171 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8172 goto efault;
8175 break;
8176 #endif
8177 #ifdef TARGET_NR_symlink
8178 case TARGET_NR_symlink:
8180 void *p2;
8181 p = lock_user_string(arg1);
8182 p2 = lock_user_string(arg2);
8183 if (!p || !p2)
8184 ret = -TARGET_EFAULT;
8185 else
8186 ret = get_errno(symlink(p, p2));
8187 unlock_user(p2, arg2, 0);
8188 unlock_user(p, arg1, 0);
8190 break;
8191 #endif
8192 #if defined(TARGET_NR_symlinkat)
8193 case TARGET_NR_symlinkat:
8195 void *p2;
8196 p = lock_user_string(arg1);
8197 p2 = lock_user_string(arg3);
8198 if (!p || !p2)
8199 ret = -TARGET_EFAULT;
8200 else
8201 ret = get_errno(symlinkat(p, arg2, p2));
8202 unlock_user(p2, arg3, 0);
8203 unlock_user(p, arg1, 0);
8205 break;
8206 #endif
8207 #ifdef TARGET_NR_oldlstat
8208 case TARGET_NR_oldlstat:
8209 goto unimplemented;
8210 #endif
8211 #ifdef TARGET_NR_readlink
8212 case TARGET_NR_readlink:
8214 void *p2;
8215 p = lock_user_string(arg1);
8216 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8217 if (!p || !p2) {
8218 ret = -TARGET_EFAULT;
8219 } else if (!arg3) {
8220 /* Short circuit this for the magic exe check. */
8221 ret = -TARGET_EINVAL;
8222 } else if (is_proc_myself((const char *)p, "exe")) {
8223 char real[PATH_MAX], *temp;
8224 temp = realpath(exec_path, real);
8225 /* Return value is # of bytes that we wrote to the buffer. */
8226 if (temp == NULL) {
8227 ret = get_errno(-1);
8228 } else {
8229 /* Don't worry about sign mismatch as earlier mapping
8230 * logic would have thrown a bad address error. */
8231 ret = MIN(strlen(real), arg3);
8232 /* We cannot NUL terminate the string. */
8233 memcpy(p2, real, ret);
8235 } else {
8236 ret = get_errno(readlink(path(p), p2, arg3));
8238 unlock_user(p2, arg2, ret);
8239 unlock_user(p, arg1, 0);
8241 break;
8242 #endif
8243 #if defined(TARGET_NR_readlinkat)
8244 case TARGET_NR_readlinkat:
8246 void *p2;
8247 p = lock_user_string(arg2);
8248 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8249 if (!p || !p2) {
8250 ret = -TARGET_EFAULT;
8251 } else if (is_proc_myself((const char *)p, "exe")) {
8252 char real[PATH_MAX], *temp;
8253 temp = realpath(exec_path, real);
8254 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8255 snprintf((char *)p2, arg4, "%s", real);
8256 } else {
8257 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8259 unlock_user(p2, arg3, ret);
8260 unlock_user(p, arg2, 0);
8262 break;
8263 #endif
8264 #ifdef TARGET_NR_uselib
8265 case TARGET_NR_uselib:
8266 goto unimplemented;
8267 #endif
8268 #ifdef TARGET_NR_swapon
8269 case TARGET_NR_swapon:
8270 if (!(p = lock_user_string(arg1)))
8271 goto efault;
8272 ret = get_errno(swapon(p, arg2));
8273 unlock_user(p, arg1, 0);
8274 break;
8275 #endif
8276 case TARGET_NR_reboot:
8277 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8278 /* arg4 must be ignored in all other cases */
8279 p = lock_user_string(arg4);
8280 if (!p) {
8281 goto efault;
8283 ret = get_errno(reboot(arg1, arg2, arg3, p));
8284 unlock_user(p, arg4, 0);
8285 } else {
8286 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8288 break;
8289 #ifdef TARGET_NR_readdir
8290 case TARGET_NR_readdir:
8291 goto unimplemented;
8292 #endif
8293 #ifdef TARGET_NR_mmap
8294 case TARGET_NR_mmap:
8295 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8296 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8297 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8298 || defined(TARGET_S390X)
8300 abi_ulong *v;
8301 abi_ulong v1, v2, v3, v4, v5, v6;
8302 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8303 goto efault;
8304 v1 = tswapal(v[0]);
8305 v2 = tswapal(v[1]);
8306 v3 = tswapal(v[2]);
8307 v4 = tswapal(v[3]);
8308 v5 = tswapal(v[4]);
8309 v6 = tswapal(v[5]);
8310 unlock_user(v, arg1, 0);
8311 ret = get_errno(target_mmap(v1, v2, v3,
8312 target_to_host_bitmask(v4, mmap_flags_tbl),
8313 v5, v6));
8315 #else
8316 ret = get_errno(target_mmap(arg1, arg2, arg3,
8317 target_to_host_bitmask(arg4, mmap_flags_tbl),
8318 arg5,
8319 arg6));
8320 #endif
8321 break;
8322 #endif
8323 #ifdef TARGET_NR_mmap2
8324 case TARGET_NR_mmap2:
8325 #ifndef MMAP_SHIFT
8326 #define MMAP_SHIFT 12
8327 #endif
8328 ret = get_errno(target_mmap(arg1, arg2, arg3,
8329 target_to_host_bitmask(arg4, mmap_flags_tbl),
8330 arg5,
8331 arg6 << MMAP_SHIFT));
8332 break;
8333 #endif
8334 case TARGET_NR_munmap:
8335 ret = get_errno(target_munmap(arg1, arg2));
8336 break;
8337 case TARGET_NR_mprotect:
8339 TaskState *ts = cpu->opaque;
8340 /* Special hack to detect libc making the stack executable. */
8341 if ((arg3 & PROT_GROWSDOWN)
8342 && arg1 >= ts->info->stack_limit
8343 && arg1 <= ts->info->start_stack) {
8344 arg3 &= ~PROT_GROWSDOWN;
8345 arg2 = arg2 + arg1 - ts->info->stack_limit;
8346 arg1 = ts->info->stack_limit;
8349 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8350 break;
8351 #ifdef TARGET_NR_mremap
8352 case TARGET_NR_mremap:
8353 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8354 break;
8355 #endif
8356 /* ??? msync/mlock/munlock are broken for softmmu. */
8357 #ifdef TARGET_NR_msync
8358 case TARGET_NR_msync:
8359 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8360 break;
8361 #endif
8362 #ifdef TARGET_NR_mlock
8363 case TARGET_NR_mlock:
8364 ret = get_errno(mlock(g2h(arg1), arg2));
8365 break;
8366 #endif
8367 #ifdef TARGET_NR_munlock
8368 case TARGET_NR_munlock:
8369 ret = get_errno(munlock(g2h(arg1), arg2));
8370 break;
8371 #endif
8372 #ifdef TARGET_NR_mlockall
8373 case TARGET_NR_mlockall:
8374 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8375 break;
8376 #endif
8377 #ifdef TARGET_NR_munlockall
8378 case TARGET_NR_munlockall:
8379 ret = get_errno(munlockall());
8380 break;
8381 #endif
8382 case TARGET_NR_truncate:
8383 if (!(p = lock_user_string(arg1)))
8384 goto efault;
8385 ret = get_errno(truncate(p, arg2));
8386 unlock_user(p, arg1, 0);
8387 break;
8388 case TARGET_NR_ftruncate:
8389 ret = get_errno(ftruncate(arg1, arg2));
8390 break;
8391 case TARGET_NR_fchmod:
8392 ret = get_errno(fchmod(arg1, arg2));
8393 break;
8394 #if defined(TARGET_NR_fchmodat)
8395 case TARGET_NR_fchmodat:
8396 if (!(p = lock_user_string(arg2)))
8397 goto efault;
8398 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8399 unlock_user(p, arg2, 0);
8400 break;
8401 #endif
8402 case TARGET_NR_getpriority:
8403 /* Note that negative values are valid for getpriority, so we must
8404 differentiate based on errno settings. */
8405 errno = 0;
8406 ret = getpriority(arg1, arg2);
8407 if (ret == -1 && errno != 0) {
8408 ret = -host_to_target_errno(errno);
8409 break;
8411 #ifdef TARGET_ALPHA
8412 /* Return value is the unbiased priority. Signal no error. */
8413 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8414 #else
8415 /* Return value is a biased priority to avoid negative numbers. */
8416 ret = 20 - ret;
8417 #endif
8418 break;
8419 case TARGET_NR_setpriority:
8420 ret = get_errno(setpriority(arg1, arg2, arg3));
8421 break;
8422 #ifdef TARGET_NR_profil
8423 case TARGET_NR_profil:
8424 goto unimplemented;
8425 #endif
8426 case TARGET_NR_statfs:
8427 if (!(p = lock_user_string(arg1)))
8428 goto efault;
8429 ret = get_errno(statfs(path(p), &stfs));
8430 unlock_user(p, arg1, 0);
8431 convert_statfs:
8432 if (!is_error(ret)) {
8433 struct target_statfs *target_stfs;
8435 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8436 goto efault;
8437 __put_user(stfs.f_type, &target_stfs->f_type);
8438 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8439 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8440 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8441 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8442 __put_user(stfs.f_files, &target_stfs->f_files);
8443 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8444 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8445 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8446 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8447 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8448 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8449 unlock_user_struct(target_stfs, arg2, 1);
8451 break;
8452 case TARGET_NR_fstatfs:
8453 ret = get_errno(fstatfs(arg1, &stfs));
8454 goto convert_statfs;
8455 #ifdef TARGET_NR_statfs64
8456 case TARGET_NR_statfs64:
8457 if (!(p = lock_user_string(arg1)))
8458 goto efault;
8459 ret = get_errno(statfs(path(p), &stfs));
8460 unlock_user(p, arg1, 0);
8461 convert_statfs64:
8462 if (!is_error(ret)) {
8463 struct target_statfs64 *target_stfs;
8465 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8466 goto efault;
8467 __put_user(stfs.f_type, &target_stfs->f_type);
8468 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8469 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8470 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8471 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8472 __put_user(stfs.f_files, &target_stfs->f_files);
8473 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8474 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8475 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8476 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8477 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8478 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8479 unlock_user_struct(target_stfs, arg3, 1);
8481 break;
8482 case TARGET_NR_fstatfs64:
8483 ret = get_errno(fstatfs(arg1, &stfs));
8484 goto convert_statfs64;
8485 #endif
8486 #ifdef TARGET_NR_ioperm
8487 case TARGET_NR_ioperm:
8488 goto unimplemented;
8489 #endif
8490 #ifdef TARGET_NR_socketcall
8491 case TARGET_NR_socketcall:
8492 ret = do_socketcall(arg1, arg2);
8493 break;
8494 #endif
8495 #ifdef TARGET_NR_accept
8496 case TARGET_NR_accept:
8497 ret = do_accept4(arg1, arg2, arg3, 0);
8498 break;
8499 #endif
8500 #ifdef TARGET_NR_accept4
8501 case TARGET_NR_accept4:
8502 ret = do_accept4(arg1, arg2, arg3, arg4);
8503 break;
8504 #endif
8505 #ifdef TARGET_NR_bind
8506 case TARGET_NR_bind:
8507 ret = do_bind(arg1, arg2, arg3);
8508 break;
8509 #endif
8510 #ifdef TARGET_NR_connect
8511 case TARGET_NR_connect:
8512 ret = do_connect(arg1, arg2, arg3);
8513 break;
8514 #endif
8515 #ifdef TARGET_NR_getpeername
8516 case TARGET_NR_getpeername:
8517 ret = do_getpeername(arg1, arg2, arg3);
8518 break;
8519 #endif
8520 #ifdef TARGET_NR_getsockname
8521 case TARGET_NR_getsockname:
8522 ret = do_getsockname(arg1, arg2, arg3);
8523 break;
8524 #endif
8525 #ifdef TARGET_NR_getsockopt
8526 case TARGET_NR_getsockopt:
8527 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8528 break;
8529 #endif
8530 #ifdef TARGET_NR_listen
8531 case TARGET_NR_listen:
8532 ret = get_errno(listen(arg1, arg2));
8533 break;
8534 #endif
8535 #ifdef TARGET_NR_recv
8536 case TARGET_NR_recv:
8537 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8538 break;
8539 #endif
8540 #ifdef TARGET_NR_recvfrom
8541 case TARGET_NR_recvfrom:
8542 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8543 break;
8544 #endif
8545 #ifdef TARGET_NR_recvmsg
8546 case TARGET_NR_recvmsg:
8547 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
8548 break;
8549 #endif
8550 #ifdef TARGET_NR_send
8551 case TARGET_NR_send:
8552 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8553 break;
8554 #endif
8555 #ifdef TARGET_NR_sendmsg
8556 case TARGET_NR_sendmsg:
8557 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
8558 break;
8559 #endif
8560 #ifdef TARGET_NR_sendmmsg
8561 case TARGET_NR_sendmmsg:
8562 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8563 break;
8564 case TARGET_NR_recvmmsg:
8565 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8566 break;
8567 #endif
8568 #ifdef TARGET_NR_sendto
8569 case TARGET_NR_sendto:
8570 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8571 break;
8572 #endif
8573 #ifdef TARGET_NR_shutdown
8574 case TARGET_NR_shutdown:
8575 ret = get_errno(shutdown(arg1, arg2));
8576 break;
8577 #endif
8578 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8579 case TARGET_NR_getrandom:
8580 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8581 if (!p) {
8582 goto efault;
8584 ret = get_errno(getrandom(p, arg2, arg3));
8585 unlock_user(p, arg1, ret);
8586 break;
8587 #endif
8588 #ifdef TARGET_NR_socket
8589 case TARGET_NR_socket:
8590 ret = do_socket(arg1, arg2, arg3);
8591 fd_trans_unregister(ret);
8592 break;
8593 #endif
8594 #ifdef TARGET_NR_socketpair
8595 case TARGET_NR_socketpair:
8596 ret = do_socketpair(arg1, arg2, arg3, arg4);
8597 break;
8598 #endif
8599 #ifdef TARGET_NR_setsockopt
8600 case TARGET_NR_setsockopt:
8601 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8602 break;
8603 #endif
8605 case TARGET_NR_syslog:
8606 if (!(p = lock_user_string(arg2)))
8607 goto efault;
8608 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8609 unlock_user(p, arg2, 0);
8610 break;
8612 case TARGET_NR_setitimer:
8614 struct itimerval value, ovalue, *pvalue;
8616 if (arg2) {
8617 pvalue = &value;
8618 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8619 || copy_from_user_timeval(&pvalue->it_value,
8620 arg2 + sizeof(struct target_timeval)))
8621 goto efault;
8622 } else {
8623 pvalue = NULL;
8625 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8626 if (!is_error(ret) && arg3) {
8627 if (copy_to_user_timeval(arg3,
8628 &ovalue.it_interval)
8629 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8630 &ovalue.it_value))
8631 goto efault;
8634 break;
8635 case TARGET_NR_getitimer:
8637 struct itimerval value;
8639 ret = get_errno(getitimer(arg1, &value));
8640 if (!is_error(ret) && arg2) {
8641 if (copy_to_user_timeval(arg2,
8642 &value.it_interval)
8643 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8644 &value.it_value))
8645 goto efault;
8648 break;
8649 #ifdef TARGET_NR_stat
8650 case TARGET_NR_stat:
8651 if (!(p = lock_user_string(arg1)))
8652 goto efault;
8653 ret = get_errno(stat(path(p), &st));
8654 unlock_user(p, arg1, 0);
8655 goto do_stat;
8656 #endif
8657 #ifdef TARGET_NR_lstat
8658 case TARGET_NR_lstat:
8659 if (!(p = lock_user_string(arg1)))
8660 goto efault;
8661 ret = get_errno(lstat(path(p), &st));
8662 unlock_user(p, arg1, 0);
8663 goto do_stat;
8664 #endif
8665 case TARGET_NR_fstat:
8667 ret = get_errno(fstat(arg1, &st));
8668 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8669 do_stat:
8670 #endif
8671 if (!is_error(ret)) {
8672 struct target_stat *target_st;
8674 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8675 goto efault;
8676 memset(target_st, 0, sizeof(*target_st));
8677 __put_user(st.st_dev, &target_st->st_dev);
8678 __put_user(st.st_ino, &target_st->st_ino);
8679 __put_user(st.st_mode, &target_st->st_mode);
8680 __put_user(st.st_uid, &target_st->st_uid);
8681 __put_user(st.st_gid, &target_st->st_gid);
8682 __put_user(st.st_nlink, &target_st->st_nlink);
8683 __put_user(st.st_rdev, &target_st->st_rdev);
8684 __put_user(st.st_size, &target_st->st_size);
8685 __put_user(st.st_blksize, &target_st->st_blksize);
8686 __put_user(st.st_blocks, &target_st->st_blocks);
8687 __put_user(st.st_atime, &target_st->target_st_atime);
8688 __put_user(st.st_mtime, &target_st->target_st_mtime);
8689 __put_user(st.st_ctime, &target_st->target_st_ctime);
8690 unlock_user_struct(target_st, arg2, 1);
8693 break;
8694 #ifdef TARGET_NR_olduname
8695 case TARGET_NR_olduname:
8696 goto unimplemented;
8697 #endif
8698 #ifdef TARGET_NR_iopl
8699 case TARGET_NR_iopl:
8700 goto unimplemented;
8701 #endif
8702 case TARGET_NR_vhangup:
8703 ret = get_errno(vhangup());
8704 break;
8705 #ifdef TARGET_NR_idle
8706 case TARGET_NR_idle:
8707 goto unimplemented;
8708 #endif
8709 #ifdef TARGET_NR_syscall
8710 case TARGET_NR_syscall:
8711 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8712 arg6, arg7, arg8, 0);
8713 break;
8714 #endif
8715 case TARGET_NR_wait4:
8717 int status;
8718 abi_long status_ptr = arg2;
8719 struct rusage rusage, *rusage_ptr;
8720 abi_ulong target_rusage = arg4;
8721 abi_long rusage_err;
8722 if (target_rusage)
8723 rusage_ptr = &rusage;
8724 else
8725 rusage_ptr = NULL;
8726 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8727 if (!is_error(ret)) {
8728 if (status_ptr && ret) {
8729 status = host_to_target_waitstatus(status);
8730 if (put_user_s32(status, status_ptr))
8731 goto efault;
8733 if (target_rusage) {
8734 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8735 if (rusage_err) {
8736 ret = rusage_err;
8741 break;
8742 #ifdef TARGET_NR_swapoff
8743 case TARGET_NR_swapoff:
8744 if (!(p = lock_user_string(arg1)))
8745 goto efault;
8746 ret = get_errno(swapoff(p));
8747 unlock_user(p, arg1, 0);
8748 break;
8749 #endif
8750 case TARGET_NR_sysinfo:
8752 struct target_sysinfo *target_value;
8753 struct sysinfo value;
8754 ret = get_errno(sysinfo(&value));
8755 if (!is_error(ret) && arg1)
8757 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8758 goto efault;
8759 __put_user(value.uptime, &target_value->uptime);
8760 __put_user(value.loads[0], &target_value->loads[0]);
8761 __put_user(value.loads[1], &target_value->loads[1]);
8762 __put_user(value.loads[2], &target_value->loads[2]);
8763 __put_user(value.totalram, &target_value->totalram);
8764 __put_user(value.freeram, &target_value->freeram);
8765 __put_user(value.sharedram, &target_value->sharedram);
8766 __put_user(value.bufferram, &target_value->bufferram);
8767 __put_user(value.totalswap, &target_value->totalswap);
8768 __put_user(value.freeswap, &target_value->freeswap);
8769 __put_user(value.procs, &target_value->procs);
8770 __put_user(value.totalhigh, &target_value->totalhigh);
8771 __put_user(value.freehigh, &target_value->freehigh);
8772 __put_user(value.mem_unit, &target_value->mem_unit);
8773 unlock_user_struct(target_value, arg1, 1);
8776 break;
8777 #ifdef TARGET_NR_ipc
8778 case TARGET_NR_ipc:
8779 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
8780 break;
8781 #endif
8782 #ifdef TARGET_NR_semget
8783 case TARGET_NR_semget:
8784 ret = get_errno(semget(arg1, arg2, arg3));
8785 break;
8786 #endif
8787 #ifdef TARGET_NR_semop
8788 case TARGET_NR_semop:
8789 ret = do_semop(arg1, arg2, arg3);
8790 break;
8791 #endif
8792 #ifdef TARGET_NR_semctl
8793 case TARGET_NR_semctl:
8794 ret = do_semctl(arg1, arg2, arg3, arg4);
8795 break;
8796 #endif
8797 #ifdef TARGET_NR_msgctl
8798 case TARGET_NR_msgctl:
8799 ret = do_msgctl(arg1, arg2, arg3);
8800 break;
8801 #endif
8802 #ifdef TARGET_NR_msgget
8803 case TARGET_NR_msgget:
8804 ret = get_errno(msgget(arg1, arg2));
8805 break;
8806 #endif
8807 #ifdef TARGET_NR_msgrcv
8808 case TARGET_NR_msgrcv:
8809 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8810 break;
8811 #endif
8812 #ifdef TARGET_NR_msgsnd
8813 case TARGET_NR_msgsnd:
8814 ret = do_msgsnd(arg1, arg2, arg3, arg4);
8815 break;
8816 #endif
8817 #ifdef TARGET_NR_shmget
8818 case TARGET_NR_shmget:
8819 ret = get_errno(shmget(arg1, arg2, arg3));
8820 break;
8821 #endif
8822 #ifdef TARGET_NR_shmctl
8823 case TARGET_NR_shmctl:
8824 ret = do_shmctl(arg1, arg2, arg3);
8825 break;
8826 #endif
8827 #ifdef TARGET_NR_shmat
8828 case TARGET_NR_shmat:
8829 ret = do_shmat(arg1, arg2, arg3);
8830 break;
8831 #endif
8832 #ifdef TARGET_NR_shmdt
8833 case TARGET_NR_shmdt:
8834 ret = do_shmdt(arg1);
8835 break;
8836 #endif
8837 case TARGET_NR_fsync:
8838 ret = get_errno(fsync(arg1));
8839 break;
8840 case TARGET_NR_clone:
8841 /* Linux manages to have three different orderings for its
8842 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8843 * match the kernel's CONFIG_CLONE_* settings.
8844 * Microblaze is further special in that it uses a sixth
8845 * implicit argument to clone for the TLS pointer.
8847 #if defined(TARGET_MICROBLAZE)
8848 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8849 #elif defined(TARGET_CLONE_BACKWARDS)
8850 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8851 #elif defined(TARGET_CLONE_BACKWARDS2)
8852 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8853 #else
8854 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8855 #endif
8856 break;
8857 #ifdef __NR_exit_group
8858 /* new thread calls */
8859 case TARGET_NR_exit_group:
8860 #ifdef TARGET_GPROF
8861 _mcleanup();
8862 #endif
8863 gdb_exit(cpu_env, arg1);
8864 ret = get_errno(exit_group(arg1));
8865 break;
8866 #endif
8867 case TARGET_NR_setdomainname:
8868 if (!(p = lock_user_string(arg1)))
8869 goto efault;
8870 ret = get_errno(setdomainname(p, arg2));
8871 unlock_user(p, arg1, 0);
8872 break;
8873 case TARGET_NR_uname:
8874 /* no need to transcode because we use the linux syscall */
8876 struct new_utsname * buf;
8878 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8879 goto efault;
8880 ret = get_errno(sys_uname(buf));
8881 if (!is_error(ret)) {
8882 /* Overrite the native machine name with whatever is being
8883 emulated. */
8884 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
8885 /* Allow the user to override the reported release. */
8886 if (qemu_uname_release && *qemu_uname_release)
8887 strcpy (buf->release, qemu_uname_release);
8889 unlock_user_struct(buf, arg1, 1);
8891 break;
8892 #ifdef TARGET_I386
8893 case TARGET_NR_modify_ldt:
8894 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
8895 break;
8896 #if !defined(TARGET_X86_64)
8897 case TARGET_NR_vm86old:
8898 goto unimplemented;
8899 case TARGET_NR_vm86:
8900 ret = do_vm86(cpu_env, arg1, arg2);
8901 break;
8902 #endif
8903 #endif
8904 case TARGET_NR_adjtimex:
8905 goto unimplemented;
8906 #ifdef TARGET_NR_create_module
8907 case TARGET_NR_create_module:
8908 #endif
8909 case TARGET_NR_init_module:
8910 case TARGET_NR_delete_module:
8911 #ifdef TARGET_NR_get_kernel_syms
8912 case TARGET_NR_get_kernel_syms:
8913 #endif
8914 goto unimplemented;
8915 case TARGET_NR_quotactl:
8916 goto unimplemented;
8917 case TARGET_NR_getpgid:
8918 ret = get_errno(getpgid(arg1));
8919 break;
8920 case TARGET_NR_fchdir:
8921 ret = get_errno(fchdir(arg1));
8922 break;
8923 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8924 case TARGET_NR_bdflush:
8925 goto unimplemented;
8926 #endif
8927 #ifdef TARGET_NR_sysfs
8928 case TARGET_NR_sysfs:
8929 goto unimplemented;
8930 #endif
8931 case TARGET_NR_personality:
8932 ret = get_errno(personality(arg1));
8933 break;
8934 #ifdef TARGET_NR_afs_syscall
8935 case TARGET_NR_afs_syscall:
8936 goto unimplemented;
8937 #endif
8938 #ifdef TARGET_NR__llseek /* Not on alpha */
8939 case TARGET_NR__llseek:
8941 int64_t res;
8942 #if !defined(__NR_llseek)
8943 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8944 if (res == -1) {
8945 ret = get_errno(res);
8946 } else {
8947 ret = 0;
8949 #else
8950 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8951 #endif
8952 if ((ret == 0) && put_user_s64(res, arg4)) {
8953 goto efault;
8956 break;
8957 #endif
8958 #ifdef TARGET_NR_getdents
8959 case TARGET_NR_getdents:
8960 #ifdef __NR_getdents
8961 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8963 struct target_dirent *target_dirp;
8964 struct linux_dirent *dirp;
8965 abi_long count = arg3;
8967 dirp = g_try_malloc(count);
8968 if (!dirp) {
8969 ret = -TARGET_ENOMEM;
8970 goto fail;
8973 ret = get_errno(sys_getdents(arg1, dirp, count));
8974 if (!is_error(ret)) {
8975 struct linux_dirent *de;
8976 struct target_dirent *tde;
8977 int len = ret;
8978 int reclen, treclen;
8979 int count1, tnamelen;
8981 count1 = 0;
8982 de = dirp;
8983 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8984 goto efault;
8985 tde = target_dirp;
8986 while (len > 0) {
8987 reclen = de->d_reclen;
8988 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8989 assert(tnamelen >= 0);
8990 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8991 assert(count1 + treclen <= count);
8992 tde->d_reclen = tswap16(treclen);
8993 tde->d_ino = tswapal(de->d_ino);
8994 tde->d_off = tswapal(de->d_off);
8995 memcpy(tde->d_name, de->d_name, tnamelen);
8996 de = (struct linux_dirent *)((char *)de + reclen);
8997 len -= reclen;
8998 tde = (struct target_dirent *)((char *)tde + treclen);
8999 count1 += treclen;
9001 ret = count1;
9002 unlock_user(target_dirp, arg2, ret);
9004 g_free(dirp);
9006 #else
9008 struct linux_dirent *dirp;
9009 abi_long count = arg3;
9011 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9012 goto efault;
9013 ret = get_errno(sys_getdents(arg1, dirp, count));
9014 if (!is_error(ret)) {
9015 struct linux_dirent *de;
9016 int len = ret;
9017 int reclen;
9018 de = dirp;
9019 while (len > 0) {
9020 reclen = de->d_reclen;
9021 if (reclen > len)
9022 break;
9023 de->d_reclen = tswap16(reclen);
9024 tswapls(&de->d_ino);
9025 tswapls(&de->d_off);
9026 de = (struct linux_dirent *)((char *)de + reclen);
9027 len -= reclen;
9030 unlock_user(dirp, arg2, ret);
9032 #endif
9033 #else
9034 /* Implement getdents in terms of getdents64 */
9036 struct linux_dirent64 *dirp;
9037 abi_long count = arg3;
9039 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9040 if (!dirp) {
9041 goto efault;
9043 ret = get_errno(sys_getdents64(arg1, dirp, count));
9044 if (!is_error(ret)) {
9045 /* Convert the dirent64 structs to target dirent. We do this
9046 * in-place, since we can guarantee that a target_dirent is no
9047 * larger than a dirent64; however this means we have to be
9048 * careful to read everything before writing in the new format.
9050 struct linux_dirent64 *de;
9051 struct target_dirent *tde;
9052 int len = ret;
9053 int tlen = 0;
9055 de = dirp;
9056 tde = (struct target_dirent *)dirp;
9057 while (len > 0) {
9058 int namelen, treclen;
9059 int reclen = de->d_reclen;
9060 uint64_t ino = de->d_ino;
9061 int64_t off = de->d_off;
9062 uint8_t type = de->d_type;
9064 namelen = strlen(de->d_name);
9065 treclen = offsetof(struct target_dirent, d_name)
9066 + namelen + 2;
9067 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9069 memmove(tde->d_name, de->d_name, namelen + 1);
9070 tde->d_ino = tswapal(ino);
9071 tde->d_off = tswapal(off);
9072 tde->d_reclen = tswap16(treclen);
9073 /* The target_dirent type is in what was formerly a padding
9074 * byte at the end of the structure:
9076 *(((char *)tde) + treclen - 1) = type;
9078 de = (struct linux_dirent64 *)((char *)de + reclen);
9079 tde = (struct target_dirent *)((char *)tde + treclen);
9080 len -= reclen;
9081 tlen += treclen;
9083 ret = tlen;
9085 unlock_user(dirp, arg2, ret);
9087 #endif
9088 break;
9089 #endif /* TARGET_NR_getdents */
9090 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9091 case TARGET_NR_getdents64:
9093 struct linux_dirent64 *dirp;
9094 abi_long count = arg3;
9095 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9096 goto efault;
9097 ret = get_errno(sys_getdents64(arg1, dirp, count));
9098 if (!is_error(ret)) {
9099 struct linux_dirent64 *de;
9100 int len = ret;
9101 int reclen;
9102 de = dirp;
9103 while (len > 0) {
9104 reclen = de->d_reclen;
9105 if (reclen > len)
9106 break;
9107 de->d_reclen = tswap16(reclen);
9108 tswap64s((uint64_t *)&de->d_ino);
9109 tswap64s((uint64_t *)&de->d_off);
9110 de = (struct linux_dirent64 *)((char *)de + reclen);
9111 len -= reclen;
9114 unlock_user(dirp, arg2, ret);
9116 break;
9117 #endif /* TARGET_NR_getdents64 */
9118 #if defined(TARGET_NR__newselect)
9119 case TARGET_NR__newselect:
9120 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9121 break;
9122 #endif
9123 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9124 # ifdef TARGET_NR_poll
9125 case TARGET_NR_poll:
9126 # endif
9127 # ifdef TARGET_NR_ppoll
9128 case TARGET_NR_ppoll:
9129 # endif
9131 struct target_pollfd *target_pfd;
9132 unsigned int nfds = arg2;
9133 struct pollfd *pfd;
9134 unsigned int i;
9136 pfd = NULL;
9137 target_pfd = NULL;
9138 if (nfds) {
9139 target_pfd = lock_user(VERIFY_WRITE, arg1,
9140 sizeof(struct target_pollfd) * nfds, 1);
9141 if (!target_pfd) {
9142 goto efault;
9145 pfd = alloca(sizeof(struct pollfd) * nfds);
9146 for (i = 0; i < nfds; i++) {
9147 pfd[i].fd = tswap32(target_pfd[i].fd);
9148 pfd[i].events = tswap16(target_pfd[i].events);
9152 switch (num) {
9153 # ifdef TARGET_NR_ppoll
9154 case TARGET_NR_ppoll:
9156 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9157 target_sigset_t *target_set;
9158 sigset_t _set, *set = &_set;
9160 if (arg3) {
9161 if (target_to_host_timespec(timeout_ts, arg3)) {
9162 unlock_user(target_pfd, arg1, 0);
9163 goto efault;
9165 } else {
9166 timeout_ts = NULL;
9169 if (arg4) {
9170 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9171 if (!target_set) {
9172 unlock_user(target_pfd, arg1, 0);
9173 goto efault;
9175 target_to_host_sigset(set, target_set);
9176 } else {
9177 set = NULL;
9180 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9181 set, SIGSET_T_SIZE));
9183 if (!is_error(ret) && arg3) {
9184 host_to_target_timespec(arg3, timeout_ts);
9186 if (arg4) {
9187 unlock_user(target_set, arg4, 0);
9189 break;
9191 # endif
9192 # ifdef TARGET_NR_poll
9193 case TARGET_NR_poll:
9195 struct timespec ts, *pts;
9197 if (arg3 >= 0) {
9198 /* Convert ms to secs, ns */
9199 ts.tv_sec = arg3 / 1000;
9200 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9201 pts = &ts;
9202 } else {
9203 /* -ve poll() timeout means "infinite" */
9204 pts = NULL;
9206 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9207 break;
9209 # endif
9210 default:
9211 g_assert_not_reached();
9214 if (!is_error(ret)) {
9215 for(i = 0; i < nfds; i++) {
9216 target_pfd[i].revents = tswap16(pfd[i].revents);
9219 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9221 break;
9222 #endif
9223 case TARGET_NR_flock:
9224 /* NOTE: the flock constant seems to be the same for every
9225 Linux platform */
9226 ret = get_errno(safe_flock(arg1, arg2));
9227 break;
9228 case TARGET_NR_readv:
9230 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9231 if (vec != NULL) {
9232 ret = get_errno(safe_readv(arg1, vec, arg3));
9233 unlock_iovec(vec, arg2, arg3, 1);
9234 } else {
9235 ret = -host_to_target_errno(errno);
9238 break;
9239 case TARGET_NR_writev:
9241 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9242 if (vec != NULL) {
9243 ret = get_errno(safe_writev(arg1, vec, arg3));
9244 unlock_iovec(vec, arg2, arg3, 0);
9245 } else {
9246 ret = -host_to_target_errno(errno);
9249 break;
9250 case TARGET_NR_getsid:
9251 ret = get_errno(getsid(arg1));
9252 break;
9253 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9254 case TARGET_NR_fdatasync:
9255 ret = get_errno(fdatasync(arg1));
9256 break;
9257 #endif
9258 #ifdef TARGET_NR__sysctl
9259 case TARGET_NR__sysctl:
9260 /* We don't implement this, but ENOTDIR is always a safe
9261 return value. */
9262 ret = -TARGET_ENOTDIR;
9263 break;
9264 #endif
9265 case TARGET_NR_sched_getaffinity:
9267 unsigned int mask_size;
9268 unsigned long *mask;
9271 * sched_getaffinity needs multiples of ulong, so need to take
9272 * care of mismatches between target ulong and host ulong sizes.
9274 if (arg2 & (sizeof(abi_ulong) - 1)) {
9275 ret = -TARGET_EINVAL;
9276 break;
9278 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9280 mask = alloca(mask_size);
9281 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9283 if (!is_error(ret)) {
9284 if (ret > arg2) {
9285 /* More data returned than the caller's buffer will fit.
9286 * This only happens if sizeof(abi_long) < sizeof(long)
9287 * and the caller passed us a buffer holding an odd number
9288 * of abi_longs. If the host kernel is actually using the
9289 * extra 4 bytes then fail EINVAL; otherwise we can just
9290 * ignore them and only copy the interesting part.
9292 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9293 if (numcpus > arg2 * 8) {
9294 ret = -TARGET_EINVAL;
9295 break;
9297 ret = arg2;
9300 if (copy_to_user(arg3, mask, ret)) {
9301 goto efault;
9305 break;
9306 case TARGET_NR_sched_setaffinity:
9308 unsigned int mask_size;
9309 unsigned long *mask;
9312 * sched_setaffinity needs multiples of ulong, so need to take
9313 * care of mismatches between target ulong and host ulong sizes.
9315 if (arg2 & (sizeof(abi_ulong) - 1)) {
9316 ret = -TARGET_EINVAL;
9317 break;
9319 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9321 mask = alloca(mask_size);
9322 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9323 goto efault;
9325 memcpy(mask, p, arg2);
9326 unlock_user_struct(p, arg2, 0);
9328 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9330 break;
9331 case TARGET_NR_sched_setparam:
9333 struct sched_param *target_schp;
9334 struct sched_param schp;
9336 if (arg2 == 0) {
9337 return -TARGET_EINVAL;
9339 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9340 goto efault;
9341 schp.sched_priority = tswap32(target_schp->sched_priority);
9342 unlock_user_struct(target_schp, arg2, 0);
9343 ret = get_errno(sched_setparam(arg1, &schp));
9345 break;
9346 case TARGET_NR_sched_getparam:
9348 struct sched_param *target_schp;
9349 struct sched_param schp;
9351 if (arg2 == 0) {
9352 return -TARGET_EINVAL;
9354 ret = get_errno(sched_getparam(arg1, &schp));
9355 if (!is_error(ret)) {
9356 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9357 goto efault;
9358 target_schp->sched_priority = tswap32(schp.sched_priority);
9359 unlock_user_struct(target_schp, arg2, 1);
9362 break;
9363 case TARGET_NR_sched_setscheduler:
9365 struct sched_param *target_schp;
9366 struct sched_param schp;
9367 if (arg3 == 0) {
9368 return -TARGET_EINVAL;
9370 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9371 goto efault;
9372 schp.sched_priority = tswap32(target_schp->sched_priority);
9373 unlock_user_struct(target_schp, arg3, 0);
9374 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9376 break;
9377 case TARGET_NR_sched_getscheduler:
9378 ret = get_errno(sched_getscheduler(arg1));
9379 break;
9380 case TARGET_NR_sched_yield:
9381 ret = get_errno(sched_yield());
9382 break;
9383 case TARGET_NR_sched_get_priority_max:
9384 ret = get_errno(sched_get_priority_max(arg1));
9385 break;
9386 case TARGET_NR_sched_get_priority_min:
9387 ret = get_errno(sched_get_priority_min(arg1));
9388 break;
9389 case TARGET_NR_sched_rr_get_interval:
9391 struct timespec ts;
9392 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9393 if (!is_error(ret)) {
9394 ret = host_to_target_timespec(arg2, &ts);
9397 break;
9398 case TARGET_NR_nanosleep:
9400 struct timespec req, rem;
9401 target_to_host_timespec(&req, arg1);
9402 ret = get_errno(safe_nanosleep(&req, &rem));
9403 if (is_error(ret) && arg2) {
9404 host_to_target_timespec(arg2, &rem);
9407 break;
9408 #ifdef TARGET_NR_query_module
9409 case TARGET_NR_query_module:
9410 goto unimplemented;
9411 #endif
9412 #ifdef TARGET_NR_nfsservctl
9413 case TARGET_NR_nfsservctl:
9414 goto unimplemented;
9415 #endif
9416 case TARGET_NR_prctl:
9417 switch (arg1) {
9418 case PR_GET_PDEATHSIG:
9420 int deathsig;
9421 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9422 if (!is_error(ret) && arg2
9423 && put_user_ual(deathsig, arg2)) {
9424 goto efault;
9426 break;
9428 #ifdef PR_GET_NAME
9429 case PR_GET_NAME:
9431 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9432 if (!name) {
9433 goto efault;
9435 ret = get_errno(prctl(arg1, (unsigned long)name,
9436 arg3, arg4, arg5));
9437 unlock_user(name, arg2, 16);
9438 break;
9440 case PR_SET_NAME:
9442 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9443 if (!name) {
9444 goto efault;
9446 ret = get_errno(prctl(arg1, (unsigned long)name,
9447 arg3, arg4, arg5));
9448 unlock_user(name, arg2, 0);
9449 break;
9451 #endif
9452 default:
9453 /* Most prctl options have no pointer arguments */
9454 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9455 break;
9457 break;
9458 #ifdef TARGET_NR_arch_prctl
9459 case TARGET_NR_arch_prctl:
9460 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9461 ret = do_arch_prctl(cpu_env, arg1, arg2);
9462 break;
9463 #else
9464 goto unimplemented;
9465 #endif
9466 #endif
9467 #ifdef TARGET_NR_pread64
9468 case TARGET_NR_pread64:
9469 if (regpairs_aligned(cpu_env)) {
9470 arg4 = arg5;
9471 arg5 = arg6;
9473 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9474 goto efault;
9475 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9476 unlock_user(p, arg2, ret);
9477 break;
9478 case TARGET_NR_pwrite64:
9479 if (regpairs_aligned(cpu_env)) {
9480 arg4 = arg5;
9481 arg5 = arg6;
9483 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9484 goto efault;
9485 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9486 unlock_user(p, arg2, 0);
9487 break;
9488 #endif
9489 case TARGET_NR_getcwd:
9490 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9491 goto efault;
9492 ret = get_errno(sys_getcwd1(p, arg2));
9493 unlock_user(p, arg1, ret);
9494 break;
9495 case TARGET_NR_capget:
9496 case TARGET_NR_capset:
9498 struct target_user_cap_header *target_header;
9499 struct target_user_cap_data *target_data = NULL;
9500 struct __user_cap_header_struct header;
9501 struct __user_cap_data_struct data[2];
9502 struct __user_cap_data_struct *dataptr = NULL;
9503 int i, target_datalen;
9504 int data_items = 1;
9506 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9507 goto efault;
9509 header.version = tswap32(target_header->version);
9510 header.pid = tswap32(target_header->pid);
9512 if (header.version != _LINUX_CAPABILITY_VERSION) {
9513 /* Version 2 and up takes pointer to two user_data structs */
9514 data_items = 2;
9517 target_datalen = sizeof(*target_data) * data_items;
9519 if (arg2) {
9520 if (num == TARGET_NR_capget) {
9521 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9522 } else {
9523 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9525 if (!target_data) {
9526 unlock_user_struct(target_header, arg1, 0);
9527 goto efault;
9530 if (num == TARGET_NR_capset) {
9531 for (i = 0; i < data_items; i++) {
9532 data[i].effective = tswap32(target_data[i].effective);
9533 data[i].permitted = tswap32(target_data[i].permitted);
9534 data[i].inheritable = tswap32(target_data[i].inheritable);
9538 dataptr = data;
9541 if (num == TARGET_NR_capget) {
9542 ret = get_errno(capget(&header, dataptr));
9543 } else {
9544 ret = get_errno(capset(&header, dataptr));
9547 /* The kernel always updates version for both capget and capset */
9548 target_header->version = tswap32(header.version);
9549 unlock_user_struct(target_header, arg1, 1);
9551 if (arg2) {
9552 if (num == TARGET_NR_capget) {
9553 for (i = 0; i < data_items; i++) {
9554 target_data[i].effective = tswap32(data[i].effective);
9555 target_data[i].permitted = tswap32(data[i].permitted);
9556 target_data[i].inheritable = tswap32(data[i].inheritable);
9558 unlock_user(target_data, arg2, target_datalen);
9559 } else {
9560 unlock_user(target_data, arg2, 0);
9563 break;
9565 case TARGET_NR_sigaltstack:
9566 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
9567 break;
9569 #ifdef CONFIG_SENDFILE
9570 case TARGET_NR_sendfile:
9572 off_t *offp = NULL;
9573 off_t off;
9574 if (arg3) {
9575 ret = get_user_sal(off, arg3);
9576 if (is_error(ret)) {
9577 break;
9579 offp = &off;
9581 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9582 if (!is_error(ret) && arg3) {
9583 abi_long ret2 = put_user_sal(off, arg3);
9584 if (is_error(ret2)) {
9585 ret = ret2;
9588 break;
9590 #ifdef TARGET_NR_sendfile64
9591 case TARGET_NR_sendfile64:
9593 off_t *offp = NULL;
9594 off_t off;
9595 if (arg3) {
9596 ret = get_user_s64(off, arg3);
9597 if (is_error(ret)) {
9598 break;
9600 offp = &off;
9602 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9603 if (!is_error(ret) && arg3) {
9604 abi_long ret2 = put_user_s64(off, arg3);
9605 if (is_error(ret2)) {
9606 ret = ret2;
9609 break;
9611 #endif
9612 #else
9613 case TARGET_NR_sendfile:
9614 #ifdef TARGET_NR_sendfile64
9615 case TARGET_NR_sendfile64:
9616 #endif
9617 goto unimplemented;
9618 #endif
9620 #ifdef TARGET_NR_getpmsg
9621 case TARGET_NR_getpmsg:
9622 goto unimplemented;
9623 #endif
9624 #ifdef TARGET_NR_putpmsg
9625 case TARGET_NR_putpmsg:
9626 goto unimplemented;
9627 #endif
9628 #ifdef TARGET_NR_vfork
9629 case TARGET_NR_vfork:
9630 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
9631 0, 0, 0, 0));
9632 break;
9633 #endif
9634 #ifdef TARGET_NR_ugetrlimit
9635 case TARGET_NR_ugetrlimit:
9637 struct rlimit rlim;
9638 int resource = target_to_host_resource(arg1);
9639 ret = get_errno(getrlimit(resource, &rlim));
9640 if (!is_error(ret)) {
9641 struct target_rlimit *target_rlim;
9642 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9643 goto efault;
9644 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9645 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9646 unlock_user_struct(target_rlim, arg2, 1);
9648 break;
9650 #endif
9651 #ifdef TARGET_NR_truncate64
9652 case TARGET_NR_truncate64:
9653 if (!(p = lock_user_string(arg1)))
9654 goto efault;
9655 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9656 unlock_user(p, arg1, 0);
9657 break;
9658 #endif
9659 #ifdef TARGET_NR_ftruncate64
9660 case TARGET_NR_ftruncate64:
9661 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9662 break;
9663 #endif
9664 #ifdef TARGET_NR_stat64
9665 case TARGET_NR_stat64:
9666 if (!(p = lock_user_string(arg1)))
9667 goto efault;
9668 ret = get_errno(stat(path(p), &st));
9669 unlock_user(p, arg1, 0);
9670 if (!is_error(ret))
9671 ret = host_to_target_stat64(cpu_env, arg2, &st);
9672 break;
9673 #endif
9674 #ifdef TARGET_NR_lstat64
9675 case TARGET_NR_lstat64:
9676 if (!(p = lock_user_string(arg1)))
9677 goto efault;
9678 ret = get_errno(lstat(path(p), &st));
9679 unlock_user(p, arg1, 0);
9680 if (!is_error(ret))
9681 ret = host_to_target_stat64(cpu_env, arg2, &st);
9682 break;
9683 #endif
9684 #ifdef TARGET_NR_fstat64
9685 case TARGET_NR_fstat64:
9686 ret = get_errno(fstat(arg1, &st));
9687 if (!is_error(ret))
9688 ret = host_to_target_stat64(cpu_env, arg2, &st);
9689 break;
9690 #endif
9691 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9692 #ifdef TARGET_NR_fstatat64
9693 case TARGET_NR_fstatat64:
9694 #endif
9695 #ifdef TARGET_NR_newfstatat
9696 case TARGET_NR_newfstatat:
9697 #endif
9698 if (!(p = lock_user_string(arg2)))
9699 goto efault;
9700 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
9701 if (!is_error(ret))
9702 ret = host_to_target_stat64(cpu_env, arg3, &st);
9703 break;
9704 #endif
9705 #ifdef TARGET_NR_lchown
9706 case TARGET_NR_lchown:
9707 if (!(p = lock_user_string(arg1)))
9708 goto efault;
9709 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
9710 unlock_user(p, arg1, 0);
9711 break;
9712 #endif
9713 #ifdef TARGET_NR_getuid
9714 case TARGET_NR_getuid:
9715 ret = get_errno(high2lowuid(getuid()));
9716 break;
9717 #endif
9718 #ifdef TARGET_NR_getgid
9719 case TARGET_NR_getgid:
9720 ret = get_errno(high2lowgid(getgid()));
9721 break;
9722 #endif
9723 #ifdef TARGET_NR_geteuid
9724 case TARGET_NR_geteuid:
9725 ret = get_errno(high2lowuid(geteuid()));
9726 break;
9727 #endif
9728 #ifdef TARGET_NR_getegid
9729 case TARGET_NR_getegid:
9730 ret = get_errno(high2lowgid(getegid()));
9731 break;
9732 #endif
9733 case TARGET_NR_setreuid:
9734 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
9735 break;
9736 case TARGET_NR_setregid:
9737 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
9738 break;
9739 case TARGET_NR_getgroups:
9741 int gidsetsize = arg1;
9742 target_id *target_grouplist;
9743 gid_t *grouplist;
9744 int i;
9746 grouplist = alloca(gidsetsize * sizeof(gid_t));
9747 ret = get_errno(getgroups(gidsetsize, grouplist));
9748 if (gidsetsize == 0)
9749 break;
9750 if (!is_error(ret)) {
9751 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
9752 if (!target_grouplist)
9753 goto efault;
9754 for(i = 0;i < ret; i++)
9755 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
9756 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
9759 break;
9760 case TARGET_NR_setgroups:
9762 int gidsetsize = arg1;
9763 target_id *target_grouplist;
9764 gid_t *grouplist = NULL;
9765 int i;
9766 if (gidsetsize) {
9767 grouplist = alloca(gidsetsize * sizeof(gid_t));
9768 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
9769 if (!target_grouplist) {
9770 ret = -TARGET_EFAULT;
9771 goto fail;
9773 for (i = 0; i < gidsetsize; i++) {
9774 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
9776 unlock_user(target_grouplist, arg2, 0);
9778 ret = get_errno(setgroups(gidsetsize, grouplist));
9780 break;
9781 case TARGET_NR_fchown:
9782 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
9783 break;
9784 #if defined(TARGET_NR_fchownat)
9785 case TARGET_NR_fchownat:
9786 if (!(p = lock_user_string(arg2)))
9787 goto efault;
9788 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
9789 low2highgid(arg4), arg5));
9790 unlock_user(p, arg2, 0);
9791 break;
9792 #endif
9793 #ifdef TARGET_NR_setresuid
9794 case TARGET_NR_setresuid:
9795 ret = get_errno(sys_setresuid(low2highuid(arg1),
9796 low2highuid(arg2),
9797 low2highuid(arg3)));
9798 break;
9799 #endif
9800 #ifdef TARGET_NR_getresuid
9801 case TARGET_NR_getresuid:
9803 uid_t ruid, euid, suid;
9804 ret = get_errno(getresuid(&ruid, &euid, &suid));
9805 if (!is_error(ret)) {
9806 if (put_user_id(high2lowuid(ruid), arg1)
9807 || put_user_id(high2lowuid(euid), arg2)
9808 || put_user_id(high2lowuid(suid), arg3))
9809 goto efault;
9812 break;
9813 #endif
9814 #ifdef TARGET_NR_getresgid
9815 case TARGET_NR_setresgid:
9816 ret = get_errno(sys_setresgid(low2highgid(arg1),
9817 low2highgid(arg2),
9818 low2highgid(arg3)));
9819 break;
9820 #endif
9821 #ifdef TARGET_NR_getresgid
9822 case TARGET_NR_getresgid:
9824 gid_t rgid, egid, sgid;
9825 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9826 if (!is_error(ret)) {
9827 if (put_user_id(high2lowgid(rgid), arg1)
9828 || put_user_id(high2lowgid(egid), arg2)
9829 || put_user_id(high2lowgid(sgid), arg3))
9830 goto efault;
9833 break;
9834 #endif
9835 #ifdef TARGET_NR_chown
9836 case TARGET_NR_chown:
9837 if (!(p = lock_user_string(arg1)))
9838 goto efault;
9839 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
9840 unlock_user(p, arg1, 0);
9841 break;
9842 #endif
9843 case TARGET_NR_setuid:
9844 ret = get_errno(sys_setuid(low2highuid(arg1)));
9845 break;
9846 case TARGET_NR_setgid:
9847 ret = get_errno(sys_setgid(low2highgid(arg1)));
9848 break;
9849 case TARGET_NR_setfsuid:
9850 ret = get_errno(setfsuid(arg1));
9851 break;
9852 case TARGET_NR_setfsgid:
9853 ret = get_errno(setfsgid(arg1));
9854 break;
9856 #ifdef TARGET_NR_lchown32
9857 case TARGET_NR_lchown32:
9858 if (!(p = lock_user_string(arg1)))
9859 goto efault;
9860 ret = get_errno(lchown(p, arg2, arg3));
9861 unlock_user(p, arg1, 0);
9862 break;
9863 #endif
9864 #ifdef TARGET_NR_getuid32
9865 case TARGET_NR_getuid32:
9866 ret = get_errno(getuid());
9867 break;
9868 #endif
9870 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
9871 /* Alpha specific */
9872 case TARGET_NR_getxuid:
9874 uid_t euid;
9875 euid=geteuid();
9876 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
9878 ret = get_errno(getuid());
9879 break;
9880 #endif
9881 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
9882 /* Alpha specific */
9883 case TARGET_NR_getxgid:
9885 uid_t egid;
9886 egid=getegid();
9887 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
9889 ret = get_errno(getgid());
9890 break;
9891 #endif
9892 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
9893 /* Alpha specific */
9894 case TARGET_NR_osf_getsysinfo:
9895 ret = -TARGET_EOPNOTSUPP;
9896 switch (arg1) {
9897 case TARGET_GSI_IEEE_FP_CONTROL:
9899 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
9901 /* Copied from linux ieee_fpcr_to_swcr. */
9902 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
9903 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
9904 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
9905 | SWCR_TRAP_ENABLE_DZE
9906 | SWCR_TRAP_ENABLE_OVF);
9907 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
9908 | SWCR_TRAP_ENABLE_INE);
9909 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
9910 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
9912 if (put_user_u64 (swcr, arg2))
9913 goto efault;
9914 ret = 0;
9916 break;
9918 /* case GSI_IEEE_STATE_AT_SIGNAL:
9919 -- Not implemented in linux kernel.
9920 case GSI_UACPROC:
9921 -- Retrieves current unaligned access state; not much used.
9922 case GSI_PROC_TYPE:
9923 -- Retrieves implver information; surely not used.
9924 case GSI_GET_HWRPB:
9925 -- Grabs a copy of the HWRPB; surely not used.
9928 break;
9929 #endif
9930 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9931 /* Alpha specific */
9932 case TARGET_NR_osf_setsysinfo:
9933 ret = -TARGET_EOPNOTSUPP;
9934 switch (arg1) {
9935 case TARGET_SSI_IEEE_FP_CONTROL:
9937 uint64_t swcr, fpcr, orig_fpcr;
9939 if (get_user_u64 (swcr, arg2)) {
9940 goto efault;
9942 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9943 fpcr = orig_fpcr & FPCR_DYN_MASK;
9945 /* Copied from linux ieee_swcr_to_fpcr. */
9946 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9947 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9948 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9949 | SWCR_TRAP_ENABLE_DZE
9950 | SWCR_TRAP_ENABLE_OVF)) << 48;
9951 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9952 | SWCR_TRAP_ENABLE_INE)) << 57;
9953 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9954 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9956 cpu_alpha_store_fpcr(cpu_env, fpcr);
9957 ret = 0;
9959 break;
9961 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9963 uint64_t exc, fpcr, orig_fpcr;
9964 int si_code;
9966 if (get_user_u64(exc, arg2)) {
9967 goto efault;
9970 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9972 /* We only add to the exception status here. */
9973 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9975 cpu_alpha_store_fpcr(cpu_env, fpcr);
9976 ret = 0;
9978 /* Old exceptions are not signaled. */
9979 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9981 /* If any exceptions set by this call,
9982 and are unmasked, send a signal. */
9983 si_code = 0;
9984 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9985 si_code = TARGET_FPE_FLTRES;
9987 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9988 si_code = TARGET_FPE_FLTUND;
9990 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9991 si_code = TARGET_FPE_FLTOVF;
9993 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9994 si_code = TARGET_FPE_FLTDIV;
9996 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9997 si_code = TARGET_FPE_FLTINV;
9999 if (si_code != 0) {
10000 target_siginfo_t info;
10001 info.si_signo = SIGFPE;
10002 info.si_errno = 0;
10003 info.si_code = si_code;
10004 info._sifields._sigfault._addr
10005 = ((CPUArchState *)cpu_env)->pc;
10006 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10009 break;
10011 /* case SSI_NVPAIRS:
10012 -- Used with SSIN_UACPROC to enable unaligned accesses.
10013 case SSI_IEEE_STATE_AT_SIGNAL:
10014 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10015 -- Not implemented in linux kernel
10018 break;
10019 #endif
10020 #ifdef TARGET_NR_osf_sigprocmask
10021 /* Alpha specific. */
10022 case TARGET_NR_osf_sigprocmask:
10024 abi_ulong mask;
10025 int how;
10026 sigset_t set, oldset;
10028 switch(arg1) {
10029 case TARGET_SIG_BLOCK:
10030 how = SIG_BLOCK;
10031 break;
10032 case TARGET_SIG_UNBLOCK:
10033 how = SIG_UNBLOCK;
10034 break;
10035 case TARGET_SIG_SETMASK:
10036 how = SIG_SETMASK;
10037 break;
10038 default:
10039 ret = -TARGET_EINVAL;
10040 goto fail;
10042 mask = arg2;
10043 target_to_host_old_sigset(&set, &mask);
10044 ret = do_sigprocmask(how, &set, &oldset);
10045 if (!ret) {
10046 host_to_target_old_sigset(&mask, &oldset);
10047 ret = mask;
10050 break;
10051 #endif
10053 #ifdef TARGET_NR_getgid32
10054 case TARGET_NR_getgid32:
10055 ret = get_errno(getgid());
10056 break;
10057 #endif
10058 #ifdef TARGET_NR_geteuid32
10059 case TARGET_NR_geteuid32:
10060 ret = get_errno(geteuid());
10061 break;
10062 #endif
10063 #ifdef TARGET_NR_getegid32
10064 case TARGET_NR_getegid32:
10065 ret = get_errno(getegid());
10066 break;
10067 #endif
10068 #ifdef TARGET_NR_setreuid32
10069 case TARGET_NR_setreuid32:
10070 ret = get_errno(setreuid(arg1, arg2));
10071 break;
10072 #endif
10073 #ifdef TARGET_NR_setregid32
10074 case TARGET_NR_setregid32:
10075 ret = get_errno(setregid(arg1, arg2));
10076 break;
10077 #endif
10078 #ifdef TARGET_NR_getgroups32
10079 case TARGET_NR_getgroups32:
10081 int gidsetsize = arg1;
10082 uint32_t *target_grouplist;
10083 gid_t *grouplist;
10084 int i;
10086 grouplist = alloca(gidsetsize * sizeof(gid_t));
10087 ret = get_errno(getgroups(gidsetsize, grouplist));
10088 if (gidsetsize == 0)
10089 break;
10090 if (!is_error(ret)) {
10091 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10092 if (!target_grouplist) {
10093 ret = -TARGET_EFAULT;
10094 goto fail;
10096 for(i = 0;i < ret; i++)
10097 target_grouplist[i] = tswap32(grouplist[i]);
10098 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10101 break;
10102 #endif
10103 #ifdef TARGET_NR_setgroups32
10104 case TARGET_NR_setgroups32:
10106 int gidsetsize = arg1;
10107 uint32_t *target_grouplist;
10108 gid_t *grouplist;
10109 int i;
10111 grouplist = alloca(gidsetsize * sizeof(gid_t));
10112 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10113 if (!target_grouplist) {
10114 ret = -TARGET_EFAULT;
10115 goto fail;
10117 for(i = 0;i < gidsetsize; i++)
10118 grouplist[i] = tswap32(target_grouplist[i]);
10119 unlock_user(target_grouplist, arg2, 0);
10120 ret = get_errno(setgroups(gidsetsize, grouplist));
10122 break;
10123 #endif
10124 #ifdef TARGET_NR_fchown32
10125 case TARGET_NR_fchown32:
10126 ret = get_errno(fchown(arg1, arg2, arg3));
10127 break;
10128 #endif
10129 #ifdef TARGET_NR_setresuid32
10130 case TARGET_NR_setresuid32:
10131 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10132 break;
10133 #endif
10134 #ifdef TARGET_NR_getresuid32
10135 case TARGET_NR_getresuid32:
10137 uid_t ruid, euid, suid;
10138 ret = get_errno(getresuid(&ruid, &euid, &suid));
10139 if (!is_error(ret)) {
10140 if (put_user_u32(ruid, arg1)
10141 || put_user_u32(euid, arg2)
10142 || put_user_u32(suid, arg3))
10143 goto efault;
10146 break;
10147 #endif
10148 #ifdef TARGET_NR_setresgid32
10149 case TARGET_NR_setresgid32:
10150 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10151 break;
10152 #endif
10153 #ifdef TARGET_NR_getresgid32
10154 case TARGET_NR_getresgid32:
10156 gid_t rgid, egid, sgid;
10157 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10158 if (!is_error(ret)) {
10159 if (put_user_u32(rgid, arg1)
10160 || put_user_u32(egid, arg2)
10161 || put_user_u32(sgid, arg3))
10162 goto efault;
10165 break;
10166 #endif
10167 #ifdef TARGET_NR_chown32
10168 case TARGET_NR_chown32:
10169 if (!(p = lock_user_string(arg1)))
10170 goto efault;
10171 ret = get_errno(chown(p, arg2, arg3));
10172 unlock_user(p, arg1, 0);
10173 break;
10174 #endif
10175 #ifdef TARGET_NR_setuid32
10176 case TARGET_NR_setuid32:
10177 ret = get_errno(sys_setuid(arg1));
10178 break;
10179 #endif
10180 #ifdef TARGET_NR_setgid32
10181 case TARGET_NR_setgid32:
10182 ret = get_errno(sys_setgid(arg1));
10183 break;
10184 #endif
10185 #ifdef TARGET_NR_setfsuid32
10186 case TARGET_NR_setfsuid32:
10187 ret = get_errno(setfsuid(arg1));
10188 break;
10189 #endif
10190 #ifdef TARGET_NR_setfsgid32
10191 case TARGET_NR_setfsgid32:
10192 ret = get_errno(setfsgid(arg1));
10193 break;
10194 #endif
10196 case TARGET_NR_pivot_root:
10197 goto unimplemented;
10198 #ifdef TARGET_NR_mincore
10199 case TARGET_NR_mincore:
10201 void *a;
10202 ret = -TARGET_EFAULT;
10203 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10204 goto efault;
10205 if (!(p = lock_user_string(arg3)))
10206 goto mincore_fail;
10207 ret = get_errno(mincore(a, arg2, p));
10208 unlock_user(p, arg3, ret);
10209 mincore_fail:
10210 unlock_user(a, arg1, 0);
10212 break;
10213 #endif
10214 #ifdef TARGET_NR_arm_fadvise64_64
10215 case TARGET_NR_arm_fadvise64_64:
10216 /* arm_fadvise64_64 looks like fadvise64_64 but
10217 * with different argument order: fd, advice, offset, len
10218 * rather than the usual fd, offset, len, advice.
10219 * Note that offset and len are both 64-bit so appear as
10220 * pairs of 32-bit registers.
10222 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10223 target_offset64(arg5, arg6), arg2);
10224 ret = -host_to_target_errno(ret);
10225 break;
10226 #endif
10228 #if TARGET_ABI_BITS == 32
10230 #ifdef TARGET_NR_fadvise64_64
10231 case TARGET_NR_fadvise64_64:
10232 /* 6 args: fd, offset (high, low), len (high, low), advice */
10233 if (regpairs_aligned(cpu_env)) {
10234 /* offset is in (3,4), len in (5,6) and advice in 7 */
10235 arg2 = arg3;
10236 arg3 = arg4;
10237 arg4 = arg5;
10238 arg5 = arg6;
10239 arg6 = arg7;
10241 ret = -host_to_target_errno(posix_fadvise(arg1,
10242 target_offset64(arg2, arg3),
10243 target_offset64(arg4, arg5),
10244 arg6));
10245 break;
10246 #endif
10248 #ifdef TARGET_NR_fadvise64
10249 case TARGET_NR_fadvise64:
10250 /* 5 args: fd, offset (high, low), len, advice */
10251 if (regpairs_aligned(cpu_env)) {
10252 /* offset is in (3,4), len in 5 and advice in 6 */
10253 arg2 = arg3;
10254 arg3 = arg4;
10255 arg4 = arg5;
10256 arg5 = arg6;
10258 ret = -host_to_target_errno(posix_fadvise(arg1,
10259 target_offset64(arg2, arg3),
10260 arg4, arg5));
10261 break;
10262 #endif
10264 #else /* not a 32-bit ABI */
10265 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10266 #ifdef TARGET_NR_fadvise64_64
10267 case TARGET_NR_fadvise64_64:
10268 #endif
10269 #ifdef TARGET_NR_fadvise64
10270 case TARGET_NR_fadvise64:
10271 #endif
10272 #ifdef TARGET_S390X
10273 switch (arg4) {
10274 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10275 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10276 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10277 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10278 default: break;
10280 #endif
10281 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10282 break;
10283 #endif
10284 #endif /* end of 64-bit ABI fadvise handling */
10286 #ifdef TARGET_NR_madvise
10287 case TARGET_NR_madvise:
10288 /* A straight passthrough may not be safe because qemu sometimes
10289 turns private file-backed mappings into anonymous mappings.
10290 This will break MADV_DONTNEED.
10291 This is a hint, so ignoring and returning success is ok. */
10292 ret = get_errno(0);
10293 break;
10294 #endif
10295 #if TARGET_ABI_BITS == 32
10296 case TARGET_NR_fcntl64:
10298 int cmd;
10299 struct flock64 fl;
10300 from_flock64_fn *copyfrom = copy_from_user_flock64;
10301 to_flock64_fn *copyto = copy_to_user_flock64;
10303 #ifdef TARGET_ARM
10304 if (((CPUARMState *)cpu_env)->eabi) {
10305 copyfrom = copy_from_user_eabi_flock64;
10306 copyto = copy_to_user_eabi_flock64;
10308 #endif
10310 cmd = target_to_host_fcntl_cmd(arg2);
10311 if (cmd == -TARGET_EINVAL) {
10312 ret = cmd;
10313 break;
10316 switch(arg2) {
10317 case TARGET_F_GETLK64:
10318 ret = copyfrom(&fl, arg3);
10319 if (ret) {
10320 break;
10322 ret = get_errno(fcntl(arg1, cmd, &fl));
10323 if (ret == 0) {
10324 ret = copyto(arg3, &fl);
10326 break;
10328 case TARGET_F_SETLK64:
10329 case TARGET_F_SETLKW64:
10330 ret = copyfrom(&fl, arg3);
10331 if (ret) {
10332 break;
10334 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10335 break;
10336 default:
10337 ret = do_fcntl(arg1, arg2, arg3);
10338 break;
10340 break;
10342 #endif
10343 #ifdef TARGET_NR_cacheflush
10344 case TARGET_NR_cacheflush:
10345 /* self-modifying code is handled automatically, so nothing needed */
10346 ret = 0;
10347 break;
10348 #endif
10349 #ifdef TARGET_NR_security
10350 case TARGET_NR_security:
10351 goto unimplemented;
10352 #endif
10353 #ifdef TARGET_NR_getpagesize
10354 case TARGET_NR_getpagesize:
10355 ret = TARGET_PAGE_SIZE;
10356 break;
10357 #endif
10358 case TARGET_NR_gettid:
10359 ret = get_errno(gettid());
10360 break;
10361 #ifdef TARGET_NR_readahead
10362 case TARGET_NR_readahead:
10363 #if TARGET_ABI_BITS == 32
10364 if (regpairs_aligned(cpu_env)) {
10365 arg2 = arg3;
10366 arg3 = arg4;
10367 arg4 = arg5;
10369 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10370 #else
10371 ret = get_errno(readahead(arg1, arg2, arg3));
10372 #endif
10373 break;
10374 #endif
10375 #ifdef CONFIG_ATTR
10376 #ifdef TARGET_NR_setxattr
10377 case TARGET_NR_listxattr:
10378 case TARGET_NR_llistxattr:
10380 void *p, *b = 0;
10381 if (arg2) {
10382 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10383 if (!b) {
10384 ret = -TARGET_EFAULT;
10385 break;
10388 p = lock_user_string(arg1);
10389 if (p) {
10390 if (num == TARGET_NR_listxattr) {
10391 ret = get_errno(listxattr(p, b, arg3));
10392 } else {
10393 ret = get_errno(llistxattr(p, b, arg3));
10395 } else {
10396 ret = -TARGET_EFAULT;
10398 unlock_user(p, arg1, 0);
10399 unlock_user(b, arg2, arg3);
10400 break;
10402 case TARGET_NR_flistxattr:
10404 void *b = 0;
10405 if (arg2) {
10406 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10407 if (!b) {
10408 ret = -TARGET_EFAULT;
10409 break;
10412 ret = get_errno(flistxattr(arg1, b, arg3));
10413 unlock_user(b, arg2, arg3);
10414 break;
10416 case TARGET_NR_setxattr:
10417 case TARGET_NR_lsetxattr:
10419 void *p, *n, *v = 0;
10420 if (arg3) {
10421 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10422 if (!v) {
10423 ret = -TARGET_EFAULT;
10424 break;
10427 p = lock_user_string(arg1);
10428 n = lock_user_string(arg2);
10429 if (p && n) {
10430 if (num == TARGET_NR_setxattr) {
10431 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10432 } else {
10433 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10435 } else {
10436 ret = -TARGET_EFAULT;
10438 unlock_user(p, arg1, 0);
10439 unlock_user(n, arg2, 0);
10440 unlock_user(v, arg3, 0);
10442 break;
10443 case TARGET_NR_fsetxattr:
10445 void *n, *v = 0;
10446 if (arg3) {
10447 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10448 if (!v) {
10449 ret = -TARGET_EFAULT;
10450 break;
10453 n = lock_user_string(arg2);
10454 if (n) {
10455 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10456 } else {
10457 ret = -TARGET_EFAULT;
10459 unlock_user(n, arg2, 0);
10460 unlock_user(v, arg3, 0);
10462 break;
10463 case TARGET_NR_getxattr:
10464 case TARGET_NR_lgetxattr:
10466 void *p, *n, *v = 0;
10467 if (arg3) {
10468 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10469 if (!v) {
10470 ret = -TARGET_EFAULT;
10471 break;
10474 p = lock_user_string(arg1);
10475 n = lock_user_string(arg2);
10476 if (p && n) {
10477 if (num == TARGET_NR_getxattr) {
10478 ret = get_errno(getxattr(p, n, v, arg4));
10479 } else {
10480 ret = get_errno(lgetxattr(p, n, v, arg4));
10482 } else {
10483 ret = -TARGET_EFAULT;
10485 unlock_user(p, arg1, 0);
10486 unlock_user(n, arg2, 0);
10487 unlock_user(v, arg3, arg4);
10489 break;
10490 case TARGET_NR_fgetxattr:
10492 void *n, *v = 0;
10493 if (arg3) {
10494 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10495 if (!v) {
10496 ret = -TARGET_EFAULT;
10497 break;
10500 n = lock_user_string(arg2);
10501 if (n) {
10502 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10503 } else {
10504 ret = -TARGET_EFAULT;
10506 unlock_user(n, arg2, 0);
10507 unlock_user(v, arg3, arg4);
10509 break;
10510 case TARGET_NR_removexattr:
10511 case TARGET_NR_lremovexattr:
10513 void *p, *n;
10514 p = lock_user_string(arg1);
10515 n = lock_user_string(arg2);
10516 if (p && n) {
10517 if (num == TARGET_NR_removexattr) {
10518 ret = get_errno(removexattr(p, n));
10519 } else {
10520 ret = get_errno(lremovexattr(p, n));
10522 } else {
10523 ret = -TARGET_EFAULT;
10525 unlock_user(p, arg1, 0);
10526 unlock_user(n, arg2, 0);
10528 break;
10529 case TARGET_NR_fremovexattr:
10531 void *n;
10532 n = lock_user_string(arg2);
10533 if (n) {
10534 ret = get_errno(fremovexattr(arg1, n));
10535 } else {
10536 ret = -TARGET_EFAULT;
10538 unlock_user(n, arg2, 0);
10540 break;
10541 #endif
10542 #endif /* CONFIG_ATTR */
10543 #ifdef TARGET_NR_set_thread_area
10544 case TARGET_NR_set_thread_area:
10545 #if defined(TARGET_MIPS)
10546 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10547 ret = 0;
10548 break;
10549 #elif defined(TARGET_CRIS)
10550 if (arg1 & 0xff)
10551 ret = -TARGET_EINVAL;
10552 else {
10553 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10554 ret = 0;
10556 break;
10557 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10558 ret = do_set_thread_area(cpu_env, arg1);
10559 break;
10560 #elif defined(TARGET_M68K)
10562 TaskState *ts = cpu->opaque;
10563 ts->tp_value = arg1;
10564 ret = 0;
10565 break;
10567 #else
10568 goto unimplemented_nowarn;
10569 #endif
10570 #endif
10571 #ifdef TARGET_NR_get_thread_area
10572 case TARGET_NR_get_thread_area:
10573 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10574 ret = do_get_thread_area(cpu_env, arg1);
10575 break;
10576 #elif defined(TARGET_M68K)
10578 TaskState *ts = cpu->opaque;
10579 ret = ts->tp_value;
10580 break;
10582 #else
10583 goto unimplemented_nowarn;
10584 #endif
10585 #endif
10586 #ifdef TARGET_NR_getdomainname
10587 case TARGET_NR_getdomainname:
10588 goto unimplemented_nowarn;
10589 #endif
10591 #ifdef TARGET_NR_clock_gettime
10592 case TARGET_NR_clock_gettime:
10594 struct timespec ts;
10595 ret = get_errno(clock_gettime(arg1, &ts));
10596 if (!is_error(ret)) {
10597 host_to_target_timespec(arg2, &ts);
10599 break;
10601 #endif
10602 #ifdef TARGET_NR_clock_getres
10603 case TARGET_NR_clock_getres:
10605 struct timespec ts;
10606 ret = get_errno(clock_getres(arg1, &ts));
10607 if (!is_error(ret)) {
10608 host_to_target_timespec(arg2, &ts);
10610 break;
10612 #endif
10613 #ifdef TARGET_NR_clock_nanosleep
10614 case TARGET_NR_clock_nanosleep:
10616 struct timespec ts;
10617 target_to_host_timespec(&ts, arg3);
10618 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10619 &ts, arg4 ? &ts : NULL));
10620 if (arg4)
10621 host_to_target_timespec(arg4, &ts);
10623 #if defined(TARGET_PPC)
10624 /* clock_nanosleep is odd in that it returns positive errno values.
10625 * On PPC, CR0 bit 3 should be set in such a situation. */
10626 if (ret && ret != -TARGET_ERESTARTSYS) {
10627 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10629 #endif
10630 break;
10632 #endif
10634 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10635 case TARGET_NR_set_tid_address:
10636 ret = get_errno(set_tid_address((int *)g2h(arg1)));
10637 break;
10638 #endif
10640 case TARGET_NR_tkill:
10641 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10642 break;
10644 case TARGET_NR_tgkill:
10645 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
10646 target_to_host_signal(arg3)));
10647 break;
10649 #ifdef TARGET_NR_set_robust_list
10650 case TARGET_NR_set_robust_list:
10651 case TARGET_NR_get_robust_list:
10652 /* The ABI for supporting robust futexes has userspace pass
10653 * the kernel a pointer to a linked list which is updated by
10654 * userspace after the syscall; the list is walked by the kernel
10655 * when the thread exits. Since the linked list in QEMU guest
10656 * memory isn't a valid linked list for the host and we have
10657 * no way to reliably intercept the thread-death event, we can't
10658 * support these. Silently return ENOSYS so that guest userspace
10659 * falls back to a non-robust futex implementation (which should
10660 * be OK except in the corner case of the guest crashing while
10661 * holding a mutex that is shared with another process via
10662 * shared memory).
10664 goto unimplemented_nowarn;
10665 #endif
10667 #if defined(TARGET_NR_utimensat)
10668 case TARGET_NR_utimensat:
10670 struct timespec *tsp, ts[2];
10671 if (!arg3) {
10672 tsp = NULL;
10673 } else {
10674 target_to_host_timespec(ts, arg3);
10675 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10676 tsp = ts;
10678 if (!arg2)
10679 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10680 else {
10681 if (!(p = lock_user_string(arg2))) {
10682 ret = -TARGET_EFAULT;
10683 goto fail;
10685 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10686 unlock_user(p, arg2, 0);
10689 break;
10690 #endif
10691 case TARGET_NR_futex:
10692 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10693 break;
10694 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10695 case TARGET_NR_inotify_init:
10696 ret = get_errno(sys_inotify_init());
10697 break;
10698 #endif
10699 #ifdef CONFIG_INOTIFY1
10700 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10701 case TARGET_NR_inotify_init1:
10702 ret = get_errno(sys_inotify_init1(arg1));
10703 break;
10704 #endif
10705 #endif
10706 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10707 case TARGET_NR_inotify_add_watch:
10708 p = lock_user_string(arg2);
10709 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10710 unlock_user(p, arg2, 0);
10711 break;
10712 #endif
10713 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10714 case TARGET_NR_inotify_rm_watch:
10715 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
10716 break;
10717 #endif
10719 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10720 case TARGET_NR_mq_open:
10722 struct mq_attr posix_mq_attr, *attrp;
10724 p = lock_user_string(arg1 - 1);
10725 if (arg4 != 0) {
10726 copy_from_user_mq_attr (&posix_mq_attr, arg4);
10727 attrp = &posix_mq_attr;
10728 } else {
10729 attrp = 0;
10731 ret = get_errno(mq_open(p, arg2, arg3, attrp));
10732 unlock_user (p, arg1, 0);
10734 break;
10736 case TARGET_NR_mq_unlink:
10737 p = lock_user_string(arg1 - 1);
10738 ret = get_errno(mq_unlink(p));
10739 unlock_user (p, arg1, 0);
10740 break;
10742 case TARGET_NR_mq_timedsend:
10744 struct timespec ts;
10746 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10747 if (arg5 != 0) {
10748 target_to_host_timespec(&ts, arg5);
10749 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
10750 host_to_target_timespec(arg5, &ts);
10751 } else {
10752 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
10754 unlock_user (p, arg2, arg3);
10756 break;
10758 case TARGET_NR_mq_timedreceive:
10760 struct timespec ts;
10761 unsigned int prio;
10763 p = lock_user (VERIFY_READ, arg2, arg3, 1);
10764 if (arg5 != 0) {
10765 target_to_host_timespec(&ts, arg5);
10766 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10767 &prio, &ts));
10768 host_to_target_timespec(arg5, &ts);
10769 } else {
10770 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
10771 &prio, NULL));
10773 unlock_user (p, arg2, arg3);
10774 if (arg4 != 0)
10775 put_user_u32(prio, arg4);
10777 break;
10779 /* Not implemented for now... */
10780 /* case TARGET_NR_mq_notify: */
10781 /* break; */
10783 case TARGET_NR_mq_getsetattr:
10785 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
10786 ret = 0;
10787 if (arg3 != 0) {
10788 ret = mq_getattr(arg1, &posix_mq_attr_out);
10789 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
10791 if (arg2 != 0) {
10792 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
10793 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
10797 break;
10798 #endif
10800 #ifdef CONFIG_SPLICE
10801 #ifdef TARGET_NR_tee
10802 case TARGET_NR_tee:
10804 ret = get_errno(tee(arg1,arg2,arg3,arg4));
10806 break;
10807 #endif
10808 #ifdef TARGET_NR_splice
10809 case TARGET_NR_splice:
10811 loff_t loff_in, loff_out;
10812 loff_t *ploff_in = NULL, *ploff_out = NULL;
10813 if (arg2) {
10814 if (get_user_u64(loff_in, arg2)) {
10815 goto efault;
10817 ploff_in = &loff_in;
10819 if (arg4) {
10820 if (get_user_u64(loff_out, arg4)) {
10821 goto efault;
10823 ploff_out = &loff_out;
10825 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
10826 if (arg2) {
10827 if (put_user_u64(loff_in, arg2)) {
10828 goto efault;
10831 if (arg4) {
10832 if (put_user_u64(loff_out, arg4)) {
10833 goto efault;
10837 break;
10838 #endif
10839 #ifdef TARGET_NR_vmsplice
10840 case TARGET_NR_vmsplice:
10842 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10843 if (vec != NULL) {
10844 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
10845 unlock_iovec(vec, arg2, arg3, 0);
10846 } else {
10847 ret = -host_to_target_errno(errno);
10850 break;
10851 #endif
10852 #endif /* CONFIG_SPLICE */
10853 #ifdef CONFIG_EVENTFD
10854 #if defined(TARGET_NR_eventfd)
10855 case TARGET_NR_eventfd:
10856 ret = get_errno(eventfd(arg1, 0));
10857 fd_trans_unregister(ret);
10858 break;
10859 #endif
10860 #if defined(TARGET_NR_eventfd2)
10861 case TARGET_NR_eventfd2:
10863 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
10864 if (arg2 & TARGET_O_NONBLOCK) {
10865 host_flags |= O_NONBLOCK;
10867 if (arg2 & TARGET_O_CLOEXEC) {
10868 host_flags |= O_CLOEXEC;
10870 ret = get_errno(eventfd(arg1, host_flags));
10871 fd_trans_unregister(ret);
10872 break;
10874 #endif
10875 #endif /* CONFIG_EVENTFD */
10876 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
10877 case TARGET_NR_fallocate:
10878 #if TARGET_ABI_BITS == 32
10879 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
10880 target_offset64(arg5, arg6)));
10881 #else
10882 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
10883 #endif
10884 break;
10885 #endif
10886 #if defined(CONFIG_SYNC_FILE_RANGE)
10887 #if defined(TARGET_NR_sync_file_range)
10888 case TARGET_NR_sync_file_range:
10889 #if TARGET_ABI_BITS == 32
10890 #if defined(TARGET_MIPS)
10891 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10892 target_offset64(arg5, arg6), arg7));
10893 #else
10894 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
10895 target_offset64(arg4, arg5), arg6));
10896 #endif /* !TARGET_MIPS */
10897 #else
10898 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
10899 #endif
10900 break;
10901 #endif
10902 #if defined(TARGET_NR_sync_file_range2)
10903 case TARGET_NR_sync_file_range2:
10904 /* This is like sync_file_range but the arguments are reordered */
10905 #if TARGET_ABI_BITS == 32
10906 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
10907 target_offset64(arg5, arg6), arg2));
10908 #else
10909 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
10910 #endif
10911 break;
10912 #endif
10913 #endif
10914 #if defined(TARGET_NR_signalfd4)
10915 case TARGET_NR_signalfd4:
10916 ret = do_signalfd4(arg1, arg2, arg4);
10917 break;
10918 #endif
10919 #if defined(TARGET_NR_signalfd)
10920 case TARGET_NR_signalfd:
10921 ret = do_signalfd4(arg1, arg2, 0);
10922 break;
10923 #endif
10924 #if defined(CONFIG_EPOLL)
10925 #if defined(TARGET_NR_epoll_create)
10926 case TARGET_NR_epoll_create:
10927 ret = get_errno(epoll_create(arg1));
10928 break;
10929 #endif
10930 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10931 case TARGET_NR_epoll_create1:
10932 ret = get_errno(epoll_create1(arg1));
10933 break;
10934 #endif
10935 #if defined(TARGET_NR_epoll_ctl)
10936 case TARGET_NR_epoll_ctl:
10938 struct epoll_event ep;
10939 struct epoll_event *epp = 0;
10940 if (arg4) {
10941 struct target_epoll_event *target_ep;
10942 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10943 goto efault;
10945 ep.events = tswap32(target_ep->events);
10946 /* The epoll_data_t union is just opaque data to the kernel,
10947 * so we transfer all 64 bits across and need not worry what
10948 * actual data type it is.
10950 ep.data.u64 = tswap64(target_ep->data.u64);
10951 unlock_user_struct(target_ep, arg4, 0);
10952 epp = &ep;
10954 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10955 break;
10957 #endif
10959 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
10960 #if defined(TARGET_NR_epoll_wait)
10961 case TARGET_NR_epoll_wait:
10962 #endif
10963 #if defined(TARGET_NR_epoll_pwait)
10964 case TARGET_NR_epoll_pwait:
10965 #endif
10967 struct target_epoll_event *target_ep;
10968 struct epoll_event *ep;
10969 int epfd = arg1;
10970 int maxevents = arg3;
10971 int timeout = arg4;
10973 target_ep = lock_user(VERIFY_WRITE, arg2,
10974 maxevents * sizeof(struct target_epoll_event), 1);
10975 if (!target_ep) {
10976 goto efault;
10979 ep = alloca(maxevents * sizeof(struct epoll_event));
10981 switch (num) {
10982 #if defined(TARGET_NR_epoll_pwait)
10983 case TARGET_NR_epoll_pwait:
10985 target_sigset_t *target_set;
10986 sigset_t _set, *set = &_set;
10988 if (arg5) {
10989 target_set = lock_user(VERIFY_READ, arg5,
10990 sizeof(target_sigset_t), 1);
10991 if (!target_set) {
10992 unlock_user(target_ep, arg2, 0);
10993 goto efault;
10995 target_to_host_sigset(set, target_set);
10996 unlock_user(target_set, arg5, 0);
10997 } else {
10998 set = NULL;
11001 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11002 set, SIGSET_T_SIZE));
11003 break;
11005 #endif
11006 #if defined(TARGET_NR_epoll_wait)
11007 case TARGET_NR_epoll_wait:
11008 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11009 NULL, 0));
11010 break;
11011 #endif
11012 default:
11013 ret = -TARGET_ENOSYS;
11015 if (!is_error(ret)) {
11016 int i;
11017 for (i = 0; i < ret; i++) {
11018 target_ep[i].events = tswap32(ep[i].events);
11019 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11022 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11023 break;
11025 #endif
11026 #endif
11027 #ifdef TARGET_NR_prlimit64
11028 case TARGET_NR_prlimit64:
11030 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11031 struct target_rlimit64 *target_rnew, *target_rold;
11032 struct host_rlimit64 rnew, rold, *rnewp = 0;
11033 int resource = target_to_host_resource(arg2);
11034 if (arg3) {
11035 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11036 goto efault;
11038 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11039 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11040 unlock_user_struct(target_rnew, arg3, 0);
11041 rnewp = &rnew;
11044 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11045 if (!is_error(ret) && arg4) {
11046 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11047 goto efault;
11049 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11050 target_rold->rlim_max = tswap64(rold.rlim_max);
11051 unlock_user_struct(target_rold, arg4, 1);
11053 break;
11055 #endif
11056 #ifdef TARGET_NR_gethostname
11057 case TARGET_NR_gethostname:
11059 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11060 if (name) {
11061 ret = get_errno(gethostname(name, arg2));
11062 unlock_user(name, arg1, arg2);
11063 } else {
11064 ret = -TARGET_EFAULT;
11066 break;
11068 #endif
11069 #ifdef TARGET_NR_atomic_cmpxchg_32
11070 case TARGET_NR_atomic_cmpxchg_32:
11072 /* should use start_exclusive from main.c */
11073 abi_ulong mem_value;
11074 if (get_user_u32(mem_value, arg6)) {
11075 target_siginfo_t info;
11076 info.si_signo = SIGSEGV;
11077 info.si_errno = 0;
11078 info.si_code = TARGET_SEGV_MAPERR;
11079 info._sifields._sigfault._addr = arg6;
11080 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11081 ret = 0xdeadbeef;
11084 if (mem_value == arg2)
11085 put_user_u32(arg1, arg6);
11086 ret = mem_value;
11087 break;
11089 #endif
11090 #ifdef TARGET_NR_atomic_barrier
11091 case TARGET_NR_atomic_barrier:
11093 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11094 ret = 0;
11095 break;
11097 #endif
11099 #ifdef TARGET_NR_timer_create
11100 case TARGET_NR_timer_create:
11102 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11104 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11106 int clkid = arg1;
11107 int timer_index = next_free_host_timer();
11109 if (timer_index < 0) {
11110 ret = -TARGET_EAGAIN;
11111 } else {
11112 timer_t *phtimer = g_posix_timers + timer_index;
11114 if (arg2) {
11115 phost_sevp = &host_sevp;
11116 ret = target_to_host_sigevent(phost_sevp, arg2);
11117 if (ret != 0) {
11118 break;
11122 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11123 if (ret) {
11124 phtimer = NULL;
11125 } else {
11126 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11127 goto efault;
11131 break;
11133 #endif
11135 #ifdef TARGET_NR_timer_settime
11136 case TARGET_NR_timer_settime:
11138 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11139 * struct itimerspec * old_value */
11140 target_timer_t timerid = get_timer_id(arg1);
11142 if (timerid < 0) {
11143 ret = timerid;
11144 } else if (arg3 == 0) {
11145 ret = -TARGET_EINVAL;
11146 } else {
11147 timer_t htimer = g_posix_timers[timerid];
11148 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11150 target_to_host_itimerspec(&hspec_new, arg3);
11151 ret = get_errno(
11152 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11153 host_to_target_itimerspec(arg2, &hspec_old);
11155 break;
11157 #endif
11159 #ifdef TARGET_NR_timer_gettime
11160 case TARGET_NR_timer_gettime:
11162 /* args: timer_t timerid, struct itimerspec *curr_value */
11163 target_timer_t timerid = get_timer_id(arg1);
11165 if (timerid < 0) {
11166 ret = timerid;
11167 } else if (!arg2) {
11168 ret = -TARGET_EFAULT;
11169 } else {
11170 timer_t htimer = g_posix_timers[timerid];
11171 struct itimerspec hspec;
11172 ret = get_errno(timer_gettime(htimer, &hspec));
11174 if (host_to_target_itimerspec(arg2, &hspec)) {
11175 ret = -TARGET_EFAULT;
11178 break;
11180 #endif
11182 #ifdef TARGET_NR_timer_getoverrun
11183 case TARGET_NR_timer_getoverrun:
11185 /* args: timer_t timerid */
11186 target_timer_t timerid = get_timer_id(arg1);
11188 if (timerid < 0) {
11189 ret = timerid;
11190 } else {
11191 timer_t htimer = g_posix_timers[timerid];
11192 ret = get_errno(timer_getoverrun(htimer));
11194 fd_trans_unregister(ret);
11195 break;
11197 #endif
11199 #ifdef TARGET_NR_timer_delete
11200 case TARGET_NR_timer_delete:
11202 /* args: timer_t timerid */
11203 target_timer_t timerid = get_timer_id(arg1);
11205 if (timerid < 0) {
11206 ret = timerid;
11207 } else {
11208 timer_t htimer = g_posix_timers[timerid];
11209 ret = get_errno(timer_delete(htimer));
11210 g_posix_timers[timerid] = 0;
11212 break;
11214 #endif
11216 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11217 case TARGET_NR_timerfd_create:
11218 ret = get_errno(timerfd_create(arg1,
11219 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11220 break;
11221 #endif
11223 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11224 case TARGET_NR_timerfd_gettime:
11226 struct itimerspec its_curr;
11228 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11230 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11231 goto efault;
11234 break;
11235 #endif
11237 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11238 case TARGET_NR_timerfd_settime:
11240 struct itimerspec its_new, its_old, *p_new;
11242 if (arg3) {
11243 if (target_to_host_itimerspec(&its_new, arg3)) {
11244 goto efault;
11246 p_new = &its_new;
11247 } else {
11248 p_new = NULL;
11251 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11253 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11254 goto efault;
11257 break;
11258 #endif
11260 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11261 case TARGET_NR_ioprio_get:
11262 ret = get_errno(ioprio_get(arg1, arg2));
11263 break;
11264 #endif
11266 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11267 case TARGET_NR_ioprio_set:
11268 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11269 break;
11270 #endif
11272 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11273 case TARGET_NR_setns:
11274 ret = get_errno(setns(arg1, arg2));
11275 break;
11276 #endif
11277 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11278 case TARGET_NR_unshare:
11279 ret = get_errno(unshare(arg1));
11280 break;
11281 #endif
11283 default:
11284 unimplemented:
11285 gemu_log("qemu: Unsupported syscall: %d\n", num);
11286 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11287 unimplemented_nowarn:
11288 #endif
11289 ret = -TARGET_ENOSYS;
11290 break;
11292 fail:
11293 #ifdef DEBUG
11294 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11295 #endif
11296 if(do_strace)
11297 print_syscall_ret(num, ret);
11298 trace_guest_user_syscall_ret(cpu, num, ret);
11299 return ret;
11300 efault:
11301 ret = -TARGET_EFAULT;
11302 goto fail;