hw/char: QOM'ify escc.c
[qemu/ar7.git] / linux-user / syscall.c
blobdf70255e5f2483a3a005849f8667ed08c3afb72b
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/mman.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <sys/poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include "linux_loop.h"
105 #include "uname.h"
107 #include "qemu.h"
109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
112 //#define DEBUG
113 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
114 * once. This exercises the codepaths for restart.
116 //#define DEBUG_ERESTARTSYS
118 //#include <linux/msdos_fs.h>
119 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
120 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
123 #undef _syscall0
124 #undef _syscall1
125 #undef _syscall2
126 #undef _syscall3
127 #undef _syscall4
128 #undef _syscall5
129 #undef _syscall6
131 #define _syscall0(type,name) \
132 static type name (void) \
134 return syscall(__NR_##name); \
137 #define _syscall1(type,name,type1,arg1) \
138 static type name (type1 arg1) \
140 return syscall(__NR_##name, arg1); \
143 #define _syscall2(type,name,type1,arg1,type2,arg2) \
144 static type name (type1 arg1,type2 arg2) \
146 return syscall(__NR_##name, arg1, arg2); \
149 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
150 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 return syscall(__NR_##name, arg1, arg2, arg3); \
155 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
161 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
162 type5,arg5) \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
170 type5,arg5,type6,arg6) \
171 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
172 type6 arg6) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 #define __NR_sys_uname __NR_uname
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
184 #define __NR_sys_syslog __NR_syslog
185 #define __NR_sys_tgkill __NR_tgkill
186 #define __NR_sys_tkill __NR_tkill
187 #define __NR_sys_futex __NR_futex
188 #define __NR_sys_inotify_init __NR_inotify_init
189 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
190 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
193 defined(__s390x__)
194 #define __NR__llseek __NR_lseek
195 #endif
197 /* Newer kernel ports have llseek() instead of _llseek() */
198 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
199 #define TARGET_NR__llseek TARGET_NR_llseek
200 #endif
202 #ifdef __NR_gettid
203 _syscall0(int, gettid)
204 #else
205 /* This is a replacement for the host gettid() and must return a host
206 errno. */
207 static int gettid(void) {
208 return -ENOSYS;
210 #endif
211 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
212 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
213 #endif
214 #if !defined(__NR_getdents) || \
215 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
220 loff_t *, res, uint, wh);
221 #endif
222 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
223 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
226 #endif
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill,int,tid,int,sig)
229 #endif
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group,int,error_code)
232 #endif
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address,int *,tidptr)
235 #endif
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
238 const struct timespec *,timeout,int *,uaddr2,int,val3)
239 #endif
240 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
241 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
244 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
247 void *, arg);
248 _syscall2(int, capget, struct __user_cap_header_struct *, header,
249 struct __user_cap_data_struct *, data);
250 _syscall2(int, capset, struct __user_cap_header_struct *, header,
251 struct __user_cap_data_struct *, data);
252 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
253 _syscall2(int, ioprio_get, int, which, int, who)
254 #endif
255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
256 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
257 #endif
258 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
259 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
260 #endif
262 static bitmask_transtbl fcntl_flags_tbl[] = {
263 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
264 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
265 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
266 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
267 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
268 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
269 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
270 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
271 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
272 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
273 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
274 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
275 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
278 #endif
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
281 #endif
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
284 #endif
285 #if defined(O_PATH)
286 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
287 #endif
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
291 #endif
292 { 0, 0, 0, 0 }
295 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
296 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
297 typedef struct TargetFdTrans {
298 TargetFdDataFunc host_to_target_data;
299 TargetFdDataFunc target_to_host_data;
300 TargetFdAddrFunc target_to_host_addr;
301 } TargetFdTrans;
303 static TargetFdTrans **target_fd_trans;
305 static unsigned int target_fd_max;
307 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
309 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
310 return target_fd_trans[fd]->host_to_target_data;
312 return NULL;
315 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
317 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
318 return target_fd_trans[fd]->target_to_host_addr;
320 return NULL;
323 static void fd_trans_register(int fd, TargetFdTrans *trans)
325 unsigned int oldmax;
327 if (fd >= target_fd_max) {
328 oldmax = target_fd_max;
329 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
330 target_fd_trans = g_renew(TargetFdTrans *,
331 target_fd_trans, target_fd_max);
332 memset((void *)(target_fd_trans + oldmax), 0,
333 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
335 target_fd_trans[fd] = trans;
338 static void fd_trans_unregister(int fd)
340 if (fd >= 0 && fd < target_fd_max) {
341 target_fd_trans[fd] = NULL;
345 static void fd_trans_dup(int oldfd, int newfd)
347 fd_trans_unregister(newfd);
348 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
349 fd_trans_register(newfd, target_fd_trans[oldfd]);
353 static int sys_getcwd1(char *buf, size_t size)
355 if (getcwd(buf, size) == NULL) {
356 /* getcwd() sets errno */
357 return (-1);
359 return strlen(buf)+1;
362 #ifdef TARGET_NR_utimensat
363 #ifdef CONFIG_UTIMENSAT
364 static int sys_utimensat(int dirfd, const char *pathname,
365 const struct timespec times[2], int flags)
367 if (pathname == NULL)
368 return futimens(dirfd, times);
369 else
370 return utimensat(dirfd, pathname, times, flags);
372 #elif defined(__NR_utimensat)
373 #define __NR_sys_utimensat __NR_utimensat
374 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
375 const struct timespec *,tsp,int,flags)
376 #else
377 static int sys_utimensat(int dirfd, const char *pathname,
378 const struct timespec times[2], int flags)
380 errno = ENOSYS;
381 return -1;
383 #endif
384 #endif /* TARGET_NR_utimensat */
386 #ifdef CONFIG_INOTIFY
387 #include <sys/inotify.h>
389 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
390 static int sys_inotify_init(void)
392 return (inotify_init());
394 #endif
395 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
396 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
398 return (inotify_add_watch(fd, pathname, mask));
400 #endif
401 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
402 static int sys_inotify_rm_watch(int fd, int32_t wd)
404 return (inotify_rm_watch(fd, wd));
406 #endif
407 #ifdef CONFIG_INOTIFY1
408 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
409 static int sys_inotify_init1(int flags)
411 return (inotify_init1(flags));
413 #endif
414 #endif
415 #else
416 /* Userspace can usually survive runtime without inotify */
417 #undef TARGET_NR_inotify_init
418 #undef TARGET_NR_inotify_init1
419 #undef TARGET_NR_inotify_add_watch
420 #undef TARGET_NR_inotify_rm_watch
421 #endif /* CONFIG_INOTIFY */
423 #if defined(TARGET_NR_ppoll)
424 #ifndef __NR_ppoll
425 # define __NR_ppoll -1
426 #endif
427 #define __NR_sys_ppoll __NR_ppoll
428 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
429 struct timespec *, timeout, const sigset_t *, sigmask,
430 size_t, sigsetsize)
431 #endif
433 #if defined(TARGET_NR_prlimit64)
434 #ifndef __NR_prlimit64
435 # define __NR_prlimit64 -1
436 #endif
437 #define __NR_sys_prlimit64 __NR_prlimit64
438 /* The glibc rlimit structure may not be that used by the underlying syscall */
439 struct host_rlimit64 {
440 uint64_t rlim_cur;
441 uint64_t rlim_max;
443 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
444 const struct host_rlimit64 *, new_limit,
445 struct host_rlimit64 *, old_limit)
446 #endif
449 #if defined(TARGET_NR_timer_create)
450 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
451 static timer_t g_posix_timers[32] = { 0, } ;
453 static inline int next_free_host_timer(void)
455 int k ;
456 /* FIXME: Does finding the next free slot require a lock? */
457 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
458 if (g_posix_timers[k] == 0) {
459 g_posix_timers[k] = (timer_t) 1;
460 return k;
463 return -1;
465 #endif
467 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
468 #ifdef TARGET_ARM
469 static inline int regpairs_aligned(void *cpu_env) {
470 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
472 #elif defined(TARGET_MIPS)
473 static inline int regpairs_aligned(void *cpu_env) { return 1; }
474 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
475 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
476 * of registers which translates to the same as ARM/MIPS, because we start with
477 * r3 as arg1 */
478 static inline int regpairs_aligned(void *cpu_env) { return 1; }
479 #else
480 static inline int regpairs_aligned(void *cpu_env) { return 0; }
481 #endif
483 #define ERRNO_TABLE_SIZE 1200
485 /* target_to_host_errno_table[] is initialized from
486 * host_to_target_errno_table[] in syscall_init(). */
487 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
491 * This list is the union of errno values overridden in asm-<arch>/errno.h
492 * minus the errnos that are not actually generic to all archs.
494 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
495 [EAGAIN] = TARGET_EAGAIN,
496 [EIDRM] = TARGET_EIDRM,
497 [ECHRNG] = TARGET_ECHRNG,
498 [EL2NSYNC] = TARGET_EL2NSYNC,
499 [EL3HLT] = TARGET_EL3HLT,
500 [EL3RST] = TARGET_EL3RST,
501 [ELNRNG] = TARGET_ELNRNG,
502 [EUNATCH] = TARGET_EUNATCH,
503 [ENOCSI] = TARGET_ENOCSI,
504 [EL2HLT] = TARGET_EL2HLT,
505 [EDEADLK] = TARGET_EDEADLK,
506 [ENOLCK] = TARGET_ENOLCK,
507 [EBADE] = TARGET_EBADE,
508 [EBADR] = TARGET_EBADR,
509 [EXFULL] = TARGET_EXFULL,
510 [ENOANO] = TARGET_ENOANO,
511 [EBADRQC] = TARGET_EBADRQC,
512 [EBADSLT] = TARGET_EBADSLT,
513 [EBFONT] = TARGET_EBFONT,
514 [ENOSTR] = TARGET_ENOSTR,
515 [ENODATA] = TARGET_ENODATA,
516 [ETIME] = TARGET_ETIME,
517 [ENOSR] = TARGET_ENOSR,
518 [ENONET] = TARGET_ENONET,
519 [ENOPKG] = TARGET_ENOPKG,
520 [EREMOTE] = TARGET_EREMOTE,
521 [ENOLINK] = TARGET_ENOLINK,
522 [EADV] = TARGET_EADV,
523 [ESRMNT] = TARGET_ESRMNT,
524 [ECOMM] = TARGET_ECOMM,
525 [EPROTO] = TARGET_EPROTO,
526 [EDOTDOT] = TARGET_EDOTDOT,
527 [EMULTIHOP] = TARGET_EMULTIHOP,
528 [EBADMSG] = TARGET_EBADMSG,
529 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
530 [EOVERFLOW] = TARGET_EOVERFLOW,
531 [ENOTUNIQ] = TARGET_ENOTUNIQ,
532 [EBADFD] = TARGET_EBADFD,
533 [EREMCHG] = TARGET_EREMCHG,
534 [ELIBACC] = TARGET_ELIBACC,
535 [ELIBBAD] = TARGET_ELIBBAD,
536 [ELIBSCN] = TARGET_ELIBSCN,
537 [ELIBMAX] = TARGET_ELIBMAX,
538 [ELIBEXEC] = TARGET_ELIBEXEC,
539 [EILSEQ] = TARGET_EILSEQ,
540 [ENOSYS] = TARGET_ENOSYS,
541 [ELOOP] = TARGET_ELOOP,
542 [ERESTART] = TARGET_ERESTART,
543 [ESTRPIPE] = TARGET_ESTRPIPE,
544 [ENOTEMPTY] = TARGET_ENOTEMPTY,
545 [EUSERS] = TARGET_EUSERS,
546 [ENOTSOCK] = TARGET_ENOTSOCK,
547 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
548 [EMSGSIZE] = TARGET_EMSGSIZE,
549 [EPROTOTYPE] = TARGET_EPROTOTYPE,
550 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
551 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
552 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
553 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
554 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
555 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
556 [EADDRINUSE] = TARGET_EADDRINUSE,
557 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
558 [ENETDOWN] = TARGET_ENETDOWN,
559 [ENETUNREACH] = TARGET_ENETUNREACH,
560 [ENETRESET] = TARGET_ENETRESET,
561 [ECONNABORTED] = TARGET_ECONNABORTED,
562 [ECONNRESET] = TARGET_ECONNRESET,
563 [ENOBUFS] = TARGET_ENOBUFS,
564 [EISCONN] = TARGET_EISCONN,
565 [ENOTCONN] = TARGET_ENOTCONN,
566 [EUCLEAN] = TARGET_EUCLEAN,
567 [ENOTNAM] = TARGET_ENOTNAM,
568 [ENAVAIL] = TARGET_ENAVAIL,
569 [EISNAM] = TARGET_EISNAM,
570 [EREMOTEIO] = TARGET_EREMOTEIO,
571 [ESHUTDOWN] = TARGET_ESHUTDOWN,
572 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
573 [ETIMEDOUT] = TARGET_ETIMEDOUT,
574 [ECONNREFUSED] = TARGET_ECONNREFUSED,
575 [EHOSTDOWN] = TARGET_EHOSTDOWN,
576 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
577 [EALREADY] = TARGET_EALREADY,
578 [EINPROGRESS] = TARGET_EINPROGRESS,
579 [ESTALE] = TARGET_ESTALE,
580 [ECANCELED] = TARGET_ECANCELED,
581 [ENOMEDIUM] = TARGET_ENOMEDIUM,
582 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
583 #ifdef ENOKEY
584 [ENOKEY] = TARGET_ENOKEY,
585 #endif
586 #ifdef EKEYEXPIRED
587 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
588 #endif
589 #ifdef EKEYREVOKED
590 [EKEYREVOKED] = TARGET_EKEYREVOKED,
591 #endif
592 #ifdef EKEYREJECTED
593 [EKEYREJECTED] = TARGET_EKEYREJECTED,
594 #endif
595 #ifdef EOWNERDEAD
596 [EOWNERDEAD] = TARGET_EOWNERDEAD,
597 #endif
598 #ifdef ENOTRECOVERABLE
599 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
600 #endif
603 static inline int host_to_target_errno(int err)
605 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
606 host_to_target_errno_table[err]) {
607 return host_to_target_errno_table[err];
609 return err;
612 static inline int target_to_host_errno(int err)
614 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
615 target_to_host_errno_table[err]) {
616 return target_to_host_errno_table[err];
618 return err;
621 static inline abi_long get_errno(abi_long ret)
623 if (ret == -1)
624 return -host_to_target_errno(errno);
625 else
626 return ret;
629 static inline int is_error(abi_long ret)
631 return (abi_ulong)ret >= (abi_ulong)(-4096);
634 char *target_strerror(int err)
636 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
637 return NULL;
639 return strerror(target_to_host_errno(err));
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
645 return safe_syscall(__NR_##name); \
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
651 return safe_syscall(__NR_##name, arg1); \
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
657 return safe_syscall(__NR_##name, arg1, arg2); \
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
667 type4, arg4) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676 type5 arg5) \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
689 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
690 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
691 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
692 int, flags, mode_t, mode)
693 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
694 struct rusage *, rusage)
695 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
696 int, options, struct rusage *, rusage)
697 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
698 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
699 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
700 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
701 const struct timespec *,timeout,int *,uaddr2,int,val3)
703 static inline int host_to_target_sock_type(int host_type)
705 int target_type;
707 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
708 case SOCK_DGRAM:
709 target_type = TARGET_SOCK_DGRAM;
710 break;
711 case SOCK_STREAM:
712 target_type = TARGET_SOCK_STREAM;
713 break;
714 default:
715 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
716 break;
719 #if defined(SOCK_CLOEXEC)
720 if (host_type & SOCK_CLOEXEC) {
721 target_type |= TARGET_SOCK_CLOEXEC;
723 #endif
725 #if defined(SOCK_NONBLOCK)
726 if (host_type & SOCK_NONBLOCK) {
727 target_type |= TARGET_SOCK_NONBLOCK;
729 #endif
731 return target_type;
734 static abi_ulong target_brk;
735 static abi_ulong target_original_brk;
736 static abi_ulong brk_page;
738 void target_set_brk(abi_ulong new_brk)
740 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
741 brk_page = HOST_PAGE_ALIGN(target_brk);
744 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
745 #define DEBUGF_BRK(message, args...)
747 /* do_brk() must return target values and target errnos. */
748 abi_long do_brk(abi_ulong new_brk)
750 abi_long mapped_addr;
751 int new_alloc_size;
753 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
755 if (!new_brk) {
756 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
757 return target_brk;
759 if (new_brk < target_original_brk) {
760 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
761 target_brk);
762 return target_brk;
765 /* If the new brk is less than the highest page reserved to the
766 * target heap allocation, set it and we're almost done... */
767 if (new_brk <= brk_page) {
768 /* Heap contents are initialized to zero, as for anonymous
769 * mapped pages. */
770 if (new_brk > target_brk) {
771 memset(g2h(target_brk), 0, new_brk - target_brk);
773 target_brk = new_brk;
774 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
775 return target_brk;
778 /* We need to allocate more memory after the brk... Note that
779 * we don't use MAP_FIXED because that will map over the top of
780 * any existing mapping (like the one with the host libc or qemu
781 * itself); instead we treat "mapped but at wrong address" as
782 * a failure and unmap again.
784 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
785 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
786 PROT_READ|PROT_WRITE,
787 MAP_ANON|MAP_PRIVATE, 0, 0));
789 if (mapped_addr == brk_page) {
790 /* Heap contents are initialized to zero, as for anonymous
791 * mapped pages. Technically the new pages are already
792 * initialized to zero since they *are* anonymous mapped
793 * pages, however we have to take care with the contents that
794 * come from the remaining part of the previous page: it may
795 * contains garbage data due to a previous heap usage (grown
796 * then shrunken). */
797 memset(g2h(target_brk), 0, brk_page - target_brk);
799 target_brk = new_brk;
800 brk_page = HOST_PAGE_ALIGN(target_brk);
801 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
802 target_brk);
803 return target_brk;
804 } else if (mapped_addr != -1) {
805 /* Mapped but at wrong address, meaning there wasn't actually
806 * enough space for this brk.
808 target_munmap(mapped_addr, new_alloc_size);
809 mapped_addr = -1;
810 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
812 else {
813 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
816 #if defined(TARGET_ALPHA)
817 /* We (partially) emulate OSF/1 on Alpha, which requires we
818 return a proper errno, not an unchanged brk value. */
819 return -TARGET_ENOMEM;
820 #endif
821 /* For everything else, return the previous break. */
822 return target_brk;
825 static inline abi_long copy_from_user_fdset(fd_set *fds,
826 abi_ulong target_fds_addr,
827 int n)
829 int i, nw, j, k;
830 abi_ulong b, *target_fds;
832 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
833 if (!(target_fds = lock_user(VERIFY_READ,
834 target_fds_addr,
835 sizeof(abi_ulong) * nw,
836 1)))
837 return -TARGET_EFAULT;
839 FD_ZERO(fds);
840 k = 0;
841 for (i = 0; i < nw; i++) {
842 /* grab the abi_ulong */
843 __get_user(b, &target_fds[i]);
844 for (j = 0; j < TARGET_ABI_BITS; j++) {
845 /* check the bit inside the abi_ulong */
846 if ((b >> j) & 1)
847 FD_SET(k, fds);
848 k++;
852 unlock_user(target_fds, target_fds_addr, 0);
854 return 0;
857 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
858 abi_ulong target_fds_addr,
859 int n)
861 if (target_fds_addr) {
862 if (copy_from_user_fdset(fds, target_fds_addr, n))
863 return -TARGET_EFAULT;
864 *fds_ptr = fds;
865 } else {
866 *fds_ptr = NULL;
868 return 0;
871 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
872 const fd_set *fds,
873 int n)
875 int i, nw, j, k;
876 abi_long v;
877 abi_ulong *target_fds;
879 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
880 if (!(target_fds = lock_user(VERIFY_WRITE,
881 target_fds_addr,
882 sizeof(abi_ulong) * nw,
883 0)))
884 return -TARGET_EFAULT;
886 k = 0;
887 for (i = 0; i < nw; i++) {
888 v = 0;
889 for (j = 0; j < TARGET_ABI_BITS; j++) {
890 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
891 k++;
893 __put_user(v, &target_fds[i]);
896 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
898 return 0;
901 #if defined(__alpha__)
902 #define HOST_HZ 1024
903 #else
904 #define HOST_HZ 100
905 #endif
907 static inline abi_long host_to_target_clock_t(long ticks)
909 #if HOST_HZ == TARGET_HZ
910 return ticks;
911 #else
912 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
913 #endif
916 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
917 const struct rusage *rusage)
919 struct target_rusage *target_rusage;
921 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
922 return -TARGET_EFAULT;
923 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
924 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
925 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
926 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
927 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
928 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
929 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
930 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
931 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
932 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
933 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
934 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
935 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
936 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
937 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
938 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
939 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
940 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
941 unlock_user_struct(target_rusage, target_addr, 1);
943 return 0;
946 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
948 abi_ulong target_rlim_swap;
949 rlim_t result;
951 target_rlim_swap = tswapal(target_rlim);
952 if (target_rlim_swap == TARGET_RLIM_INFINITY)
953 return RLIM_INFINITY;
955 result = target_rlim_swap;
956 if (target_rlim_swap != (rlim_t)result)
957 return RLIM_INFINITY;
959 return result;
962 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
964 abi_ulong target_rlim_swap;
965 abi_ulong result;
967 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
968 target_rlim_swap = TARGET_RLIM_INFINITY;
969 else
970 target_rlim_swap = rlim;
971 result = tswapal(target_rlim_swap);
973 return result;
976 static inline int target_to_host_resource(int code)
978 switch (code) {
979 case TARGET_RLIMIT_AS:
980 return RLIMIT_AS;
981 case TARGET_RLIMIT_CORE:
982 return RLIMIT_CORE;
983 case TARGET_RLIMIT_CPU:
984 return RLIMIT_CPU;
985 case TARGET_RLIMIT_DATA:
986 return RLIMIT_DATA;
987 case TARGET_RLIMIT_FSIZE:
988 return RLIMIT_FSIZE;
989 case TARGET_RLIMIT_LOCKS:
990 return RLIMIT_LOCKS;
991 case TARGET_RLIMIT_MEMLOCK:
992 return RLIMIT_MEMLOCK;
993 case TARGET_RLIMIT_MSGQUEUE:
994 return RLIMIT_MSGQUEUE;
995 case TARGET_RLIMIT_NICE:
996 return RLIMIT_NICE;
997 case TARGET_RLIMIT_NOFILE:
998 return RLIMIT_NOFILE;
999 case TARGET_RLIMIT_NPROC:
1000 return RLIMIT_NPROC;
1001 case TARGET_RLIMIT_RSS:
1002 return RLIMIT_RSS;
1003 case TARGET_RLIMIT_RTPRIO:
1004 return RLIMIT_RTPRIO;
1005 case TARGET_RLIMIT_SIGPENDING:
1006 return RLIMIT_SIGPENDING;
1007 case TARGET_RLIMIT_STACK:
1008 return RLIMIT_STACK;
1009 default:
1010 return code;
1014 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1015 abi_ulong target_tv_addr)
1017 struct target_timeval *target_tv;
1019 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1020 return -TARGET_EFAULT;
1022 __get_user(tv->tv_sec, &target_tv->tv_sec);
1023 __get_user(tv->tv_usec, &target_tv->tv_usec);
1025 unlock_user_struct(target_tv, target_tv_addr, 0);
1027 return 0;
1030 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1031 const struct timeval *tv)
1033 struct target_timeval *target_tv;
1035 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1036 return -TARGET_EFAULT;
1038 __put_user(tv->tv_sec, &target_tv->tv_sec);
1039 __put_user(tv->tv_usec, &target_tv->tv_usec);
1041 unlock_user_struct(target_tv, target_tv_addr, 1);
1043 return 0;
1046 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1047 abi_ulong target_tz_addr)
1049 struct target_timezone *target_tz;
1051 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1052 return -TARGET_EFAULT;
1055 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1056 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1058 unlock_user_struct(target_tz, target_tz_addr, 0);
1060 return 0;
1063 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1064 #include <mqueue.h>
1066 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1067 abi_ulong target_mq_attr_addr)
1069 struct target_mq_attr *target_mq_attr;
1071 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1072 target_mq_attr_addr, 1))
1073 return -TARGET_EFAULT;
1075 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1076 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1077 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1078 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1080 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1082 return 0;
1085 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1086 const struct mq_attr *attr)
1088 struct target_mq_attr *target_mq_attr;
1090 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1091 target_mq_attr_addr, 0))
1092 return -TARGET_EFAULT;
1094 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1095 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1096 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1097 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1099 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1101 return 0;
1103 #endif
1105 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1106 /* do_select() must return target values and target errnos. */
1107 static abi_long do_select(int n,
1108 abi_ulong rfd_addr, abi_ulong wfd_addr,
1109 abi_ulong efd_addr, abi_ulong target_tv_addr)
1111 fd_set rfds, wfds, efds;
1112 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1113 struct timeval tv;
1114 struct timespec ts, *ts_ptr;
1115 abi_long ret;
1117 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1118 if (ret) {
1119 return ret;
1121 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1122 if (ret) {
1123 return ret;
1125 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1126 if (ret) {
1127 return ret;
1130 if (target_tv_addr) {
1131 if (copy_from_user_timeval(&tv, target_tv_addr))
1132 return -TARGET_EFAULT;
1133 ts.tv_sec = tv.tv_sec;
1134 ts.tv_nsec = tv.tv_usec * 1000;
1135 ts_ptr = &ts;
1136 } else {
1137 ts_ptr = NULL;
1140 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1141 ts_ptr, NULL));
1143 if (!is_error(ret)) {
1144 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1145 return -TARGET_EFAULT;
1146 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1147 return -TARGET_EFAULT;
1148 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1149 return -TARGET_EFAULT;
1151 if (target_tv_addr) {
1152 tv.tv_sec = ts.tv_sec;
1153 tv.tv_usec = ts.tv_nsec / 1000;
1154 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1155 return -TARGET_EFAULT;
1160 return ret;
1162 #endif
1164 static abi_long do_pipe2(int host_pipe[], int flags)
1166 #ifdef CONFIG_PIPE2
1167 return pipe2(host_pipe, flags);
1168 #else
1169 return -ENOSYS;
1170 #endif
1173 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1174 int flags, int is_pipe2)
1176 int host_pipe[2];
1177 abi_long ret;
1178 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1180 if (is_error(ret))
1181 return get_errno(ret);
1183 /* Several targets have special calling conventions for the original
1184 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1185 if (!is_pipe2) {
1186 #if defined(TARGET_ALPHA)
1187 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1188 return host_pipe[0];
1189 #elif defined(TARGET_MIPS)
1190 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1191 return host_pipe[0];
1192 #elif defined(TARGET_SH4)
1193 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1194 return host_pipe[0];
1195 #elif defined(TARGET_SPARC)
1196 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1197 return host_pipe[0];
1198 #endif
1201 if (put_user_s32(host_pipe[0], pipedes)
1202 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1203 return -TARGET_EFAULT;
1204 return get_errno(ret);
1207 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1208 abi_ulong target_addr,
1209 socklen_t len)
1211 struct target_ip_mreqn *target_smreqn;
1213 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1214 if (!target_smreqn)
1215 return -TARGET_EFAULT;
1216 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1217 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1218 if (len == sizeof(struct target_ip_mreqn))
1219 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1220 unlock_user(target_smreqn, target_addr, 0);
1222 return 0;
1225 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1226 abi_ulong target_addr,
1227 socklen_t len)
1229 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1230 sa_family_t sa_family;
1231 struct target_sockaddr *target_saddr;
1233 if (fd_trans_target_to_host_addr(fd)) {
1234 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1237 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1238 if (!target_saddr)
1239 return -TARGET_EFAULT;
1241 sa_family = tswap16(target_saddr->sa_family);
1243 /* Oops. The caller might send a incomplete sun_path; sun_path
1244 * must be terminated by \0 (see the manual page), but
1245 * unfortunately it is quite common to specify sockaddr_un
1246 * length as "strlen(x->sun_path)" while it should be
1247 * "strlen(...) + 1". We'll fix that here if needed.
1248 * Linux kernel has a similar feature.
1251 if (sa_family == AF_UNIX) {
1252 if (len < unix_maxlen && len > 0) {
1253 char *cp = (char*)target_saddr;
1255 if ( cp[len-1] && !cp[len] )
1256 len++;
1258 if (len > unix_maxlen)
1259 len = unix_maxlen;
1262 memcpy(addr, target_saddr, len);
1263 addr->sa_family = sa_family;
1264 if (sa_family == AF_PACKET) {
1265 struct target_sockaddr_ll *lladdr;
1267 lladdr = (struct target_sockaddr_ll *)addr;
1268 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1269 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1271 unlock_user(target_saddr, target_addr, 0);
1273 return 0;
1276 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1277 struct sockaddr *addr,
1278 socklen_t len)
1280 struct target_sockaddr *target_saddr;
1282 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1283 if (!target_saddr)
1284 return -TARGET_EFAULT;
1285 memcpy(target_saddr, addr, len);
1286 target_saddr->sa_family = tswap16(addr->sa_family);
1287 unlock_user(target_saddr, target_addr, len);
1289 return 0;
1292 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1293 struct target_msghdr *target_msgh)
1295 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1296 abi_long msg_controllen;
1297 abi_ulong target_cmsg_addr;
1298 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1299 socklen_t space = 0;
1301 msg_controllen = tswapal(target_msgh->msg_controllen);
1302 if (msg_controllen < sizeof (struct target_cmsghdr))
1303 goto the_end;
1304 target_cmsg_addr = tswapal(target_msgh->msg_control);
1305 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1306 target_cmsg_start = target_cmsg;
1307 if (!target_cmsg)
1308 return -TARGET_EFAULT;
1310 while (cmsg && target_cmsg) {
1311 void *data = CMSG_DATA(cmsg);
1312 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1314 int len = tswapal(target_cmsg->cmsg_len)
1315 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1317 space += CMSG_SPACE(len);
1318 if (space > msgh->msg_controllen) {
1319 space -= CMSG_SPACE(len);
1320 /* This is a QEMU bug, since we allocated the payload
1321 * area ourselves (unlike overflow in host-to-target
1322 * conversion, which is just the guest giving us a buffer
1323 * that's too small). It can't happen for the payload types
1324 * we currently support; if it becomes an issue in future
1325 * we would need to improve our allocation strategy to
1326 * something more intelligent than "twice the size of the
1327 * target buffer we're reading from".
1329 gemu_log("Host cmsg overflow\n");
1330 break;
1333 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1334 cmsg->cmsg_level = SOL_SOCKET;
1335 } else {
1336 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1338 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1339 cmsg->cmsg_len = CMSG_LEN(len);
1341 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1342 int *fd = (int *)data;
1343 int *target_fd = (int *)target_data;
1344 int i, numfds = len / sizeof(int);
1346 for (i = 0; i < numfds; i++) {
1347 __get_user(fd[i], target_fd + i);
1349 } else if (cmsg->cmsg_level == SOL_SOCKET
1350 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1351 struct ucred *cred = (struct ucred *)data;
1352 struct target_ucred *target_cred =
1353 (struct target_ucred *)target_data;
1355 __get_user(cred->pid, &target_cred->pid);
1356 __get_user(cred->uid, &target_cred->uid);
1357 __get_user(cred->gid, &target_cred->gid);
1358 } else {
1359 gemu_log("Unsupported ancillary data: %d/%d\n",
1360 cmsg->cmsg_level, cmsg->cmsg_type);
1361 memcpy(data, target_data, len);
1364 cmsg = CMSG_NXTHDR(msgh, cmsg);
1365 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1366 target_cmsg_start);
1368 unlock_user(target_cmsg, target_cmsg_addr, 0);
1369 the_end:
1370 msgh->msg_controllen = space;
1371 return 0;
1374 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1375 struct msghdr *msgh)
1377 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1378 abi_long msg_controllen;
1379 abi_ulong target_cmsg_addr;
1380 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1381 socklen_t space = 0;
1383 msg_controllen = tswapal(target_msgh->msg_controllen);
1384 if (msg_controllen < sizeof (struct target_cmsghdr))
1385 goto the_end;
1386 target_cmsg_addr = tswapal(target_msgh->msg_control);
1387 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1388 target_cmsg_start = target_cmsg;
1389 if (!target_cmsg)
1390 return -TARGET_EFAULT;
1392 while (cmsg && target_cmsg) {
1393 void *data = CMSG_DATA(cmsg);
1394 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1396 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1397 int tgt_len, tgt_space;
1399 /* We never copy a half-header but may copy half-data;
1400 * this is Linux's behaviour in put_cmsg(). Note that
1401 * truncation here is a guest problem (which we report
1402 * to the guest via the CTRUNC bit), unlike truncation
1403 * in target_to_host_cmsg, which is a QEMU bug.
1405 if (msg_controllen < sizeof(struct cmsghdr)) {
1406 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1407 break;
1410 if (cmsg->cmsg_level == SOL_SOCKET) {
1411 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1412 } else {
1413 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1415 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1417 tgt_len = TARGET_CMSG_LEN(len);
1419 /* Payload types which need a different size of payload on
1420 * the target must adjust tgt_len here.
1422 switch (cmsg->cmsg_level) {
1423 case SOL_SOCKET:
1424 switch (cmsg->cmsg_type) {
1425 case SO_TIMESTAMP:
1426 tgt_len = sizeof(struct target_timeval);
1427 break;
1428 default:
1429 break;
1431 default:
1432 break;
1435 if (msg_controllen < tgt_len) {
1436 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1437 tgt_len = msg_controllen;
1440 /* We must now copy-and-convert len bytes of payload
1441 * into tgt_len bytes of destination space. Bear in mind
1442 * that in both source and destination we may be dealing
1443 * with a truncated value!
1445 switch (cmsg->cmsg_level) {
1446 case SOL_SOCKET:
1447 switch (cmsg->cmsg_type) {
1448 case SCM_RIGHTS:
1450 int *fd = (int *)data;
1451 int *target_fd = (int *)target_data;
1452 int i, numfds = tgt_len / sizeof(int);
1454 for (i = 0; i < numfds; i++) {
1455 __put_user(fd[i], target_fd + i);
1457 break;
1459 case SO_TIMESTAMP:
1461 struct timeval *tv = (struct timeval *)data;
1462 struct target_timeval *target_tv =
1463 (struct target_timeval *)target_data;
1465 if (len != sizeof(struct timeval) ||
1466 tgt_len != sizeof(struct target_timeval)) {
1467 goto unimplemented;
1470 /* copy struct timeval to target */
1471 __put_user(tv->tv_sec, &target_tv->tv_sec);
1472 __put_user(tv->tv_usec, &target_tv->tv_usec);
1473 break;
1475 case SCM_CREDENTIALS:
1477 struct ucred *cred = (struct ucred *)data;
1478 struct target_ucred *target_cred =
1479 (struct target_ucred *)target_data;
1481 __put_user(cred->pid, &target_cred->pid);
1482 __put_user(cred->uid, &target_cred->uid);
1483 __put_user(cred->gid, &target_cred->gid);
1484 break;
1486 default:
1487 goto unimplemented;
1489 break;
1491 default:
1492 unimplemented:
1493 gemu_log("Unsupported ancillary data: %d/%d\n",
1494 cmsg->cmsg_level, cmsg->cmsg_type);
1495 memcpy(target_data, data, MIN(len, tgt_len));
1496 if (tgt_len > len) {
1497 memset(target_data + len, 0, tgt_len - len);
1501 target_cmsg->cmsg_len = tswapal(tgt_len);
1502 tgt_space = TARGET_CMSG_SPACE(len);
1503 if (msg_controllen < tgt_space) {
1504 tgt_space = msg_controllen;
1506 msg_controllen -= tgt_space;
1507 space += tgt_space;
1508 cmsg = CMSG_NXTHDR(msgh, cmsg);
1509 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1510 target_cmsg_start);
1512 unlock_user(target_cmsg, target_cmsg_addr, space);
1513 the_end:
1514 target_msgh->msg_controllen = tswapal(space);
1515 return 0;
1518 /* do_setsockopt() Must return target values and target errnos. */
1519 static abi_long do_setsockopt(int sockfd, int level, int optname,
1520 abi_ulong optval_addr, socklen_t optlen)
1522 abi_long ret;
1523 int val;
1524 struct ip_mreqn *ip_mreq;
1525 struct ip_mreq_source *ip_mreq_source;
1527 switch(level) {
1528 case SOL_TCP:
1529 /* TCP options all take an 'int' value. */
1530 if (optlen < sizeof(uint32_t))
1531 return -TARGET_EINVAL;
1533 if (get_user_u32(val, optval_addr))
1534 return -TARGET_EFAULT;
1535 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1536 break;
1537 case SOL_IP:
1538 switch(optname) {
1539 case IP_TOS:
1540 case IP_TTL:
1541 case IP_HDRINCL:
1542 case IP_ROUTER_ALERT:
1543 case IP_RECVOPTS:
1544 case IP_RETOPTS:
1545 case IP_PKTINFO:
1546 case IP_MTU_DISCOVER:
1547 case IP_RECVERR:
1548 case IP_RECVTOS:
1549 #ifdef IP_FREEBIND
1550 case IP_FREEBIND:
1551 #endif
1552 case IP_MULTICAST_TTL:
1553 case IP_MULTICAST_LOOP:
1554 val = 0;
1555 if (optlen >= sizeof(uint32_t)) {
1556 if (get_user_u32(val, optval_addr))
1557 return -TARGET_EFAULT;
1558 } else if (optlen >= 1) {
1559 if (get_user_u8(val, optval_addr))
1560 return -TARGET_EFAULT;
1562 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1563 break;
1564 case IP_ADD_MEMBERSHIP:
1565 case IP_DROP_MEMBERSHIP:
1566 if (optlen < sizeof (struct target_ip_mreq) ||
1567 optlen > sizeof (struct target_ip_mreqn))
1568 return -TARGET_EINVAL;
1570 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1571 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1572 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1573 break;
1575 case IP_BLOCK_SOURCE:
1576 case IP_UNBLOCK_SOURCE:
1577 case IP_ADD_SOURCE_MEMBERSHIP:
1578 case IP_DROP_SOURCE_MEMBERSHIP:
1579 if (optlen != sizeof (struct target_ip_mreq_source))
1580 return -TARGET_EINVAL;
1582 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1583 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1584 unlock_user (ip_mreq_source, optval_addr, 0);
1585 break;
1587 default:
1588 goto unimplemented;
1590 break;
1591 case SOL_IPV6:
1592 switch (optname) {
1593 case IPV6_MTU_DISCOVER:
1594 case IPV6_MTU:
1595 case IPV6_V6ONLY:
1596 case IPV6_RECVPKTINFO:
1597 val = 0;
1598 if (optlen < sizeof(uint32_t)) {
1599 return -TARGET_EINVAL;
1601 if (get_user_u32(val, optval_addr)) {
1602 return -TARGET_EFAULT;
1604 ret = get_errno(setsockopt(sockfd, level, optname,
1605 &val, sizeof(val)));
1606 break;
1607 default:
1608 goto unimplemented;
1610 break;
1611 case SOL_RAW:
1612 switch (optname) {
1613 case ICMP_FILTER:
1614 /* struct icmp_filter takes an u32 value */
1615 if (optlen < sizeof(uint32_t)) {
1616 return -TARGET_EINVAL;
1619 if (get_user_u32(val, optval_addr)) {
1620 return -TARGET_EFAULT;
1622 ret = get_errno(setsockopt(sockfd, level, optname,
1623 &val, sizeof(val)));
1624 break;
1626 default:
1627 goto unimplemented;
1629 break;
1630 case TARGET_SOL_SOCKET:
1631 switch (optname) {
1632 case TARGET_SO_RCVTIMEO:
1634 struct timeval tv;
1636 optname = SO_RCVTIMEO;
1638 set_timeout:
1639 if (optlen != sizeof(struct target_timeval)) {
1640 return -TARGET_EINVAL;
1643 if (copy_from_user_timeval(&tv, optval_addr)) {
1644 return -TARGET_EFAULT;
1647 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1648 &tv, sizeof(tv)));
1649 return ret;
1651 case TARGET_SO_SNDTIMEO:
1652 optname = SO_SNDTIMEO;
1653 goto set_timeout;
1654 case TARGET_SO_ATTACH_FILTER:
1656 struct target_sock_fprog *tfprog;
1657 struct target_sock_filter *tfilter;
1658 struct sock_fprog fprog;
1659 struct sock_filter *filter;
1660 int i;
1662 if (optlen != sizeof(*tfprog)) {
1663 return -TARGET_EINVAL;
1665 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1666 return -TARGET_EFAULT;
1668 if (!lock_user_struct(VERIFY_READ, tfilter,
1669 tswapal(tfprog->filter), 0)) {
1670 unlock_user_struct(tfprog, optval_addr, 1);
1671 return -TARGET_EFAULT;
1674 fprog.len = tswap16(tfprog->len);
1675 filter = g_try_new(struct sock_filter, fprog.len);
1676 if (filter == NULL) {
1677 unlock_user_struct(tfilter, tfprog->filter, 1);
1678 unlock_user_struct(tfprog, optval_addr, 1);
1679 return -TARGET_ENOMEM;
1681 for (i = 0; i < fprog.len; i++) {
1682 filter[i].code = tswap16(tfilter[i].code);
1683 filter[i].jt = tfilter[i].jt;
1684 filter[i].jf = tfilter[i].jf;
1685 filter[i].k = tswap32(tfilter[i].k);
1687 fprog.filter = filter;
1689 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1690 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1691 g_free(filter);
1693 unlock_user_struct(tfilter, tfprog->filter, 1);
1694 unlock_user_struct(tfprog, optval_addr, 1);
1695 return ret;
1697 case TARGET_SO_BINDTODEVICE:
1699 char *dev_ifname, *addr_ifname;
1701 if (optlen > IFNAMSIZ - 1) {
1702 optlen = IFNAMSIZ - 1;
1704 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1705 if (!dev_ifname) {
1706 return -TARGET_EFAULT;
1708 optname = SO_BINDTODEVICE;
1709 addr_ifname = alloca(IFNAMSIZ);
1710 memcpy(addr_ifname, dev_ifname, optlen);
1711 addr_ifname[optlen] = 0;
1712 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1713 addr_ifname, optlen));
1714 unlock_user (dev_ifname, optval_addr, 0);
1715 return ret;
1717 /* Options with 'int' argument. */
1718 case TARGET_SO_DEBUG:
1719 optname = SO_DEBUG;
1720 break;
1721 case TARGET_SO_REUSEADDR:
1722 optname = SO_REUSEADDR;
1723 break;
1724 case TARGET_SO_TYPE:
1725 optname = SO_TYPE;
1726 break;
1727 case TARGET_SO_ERROR:
1728 optname = SO_ERROR;
1729 break;
1730 case TARGET_SO_DONTROUTE:
1731 optname = SO_DONTROUTE;
1732 break;
1733 case TARGET_SO_BROADCAST:
1734 optname = SO_BROADCAST;
1735 break;
1736 case TARGET_SO_SNDBUF:
1737 optname = SO_SNDBUF;
1738 break;
1739 case TARGET_SO_SNDBUFFORCE:
1740 optname = SO_SNDBUFFORCE;
1741 break;
1742 case TARGET_SO_RCVBUF:
1743 optname = SO_RCVBUF;
1744 break;
1745 case TARGET_SO_RCVBUFFORCE:
1746 optname = SO_RCVBUFFORCE;
1747 break;
1748 case TARGET_SO_KEEPALIVE:
1749 optname = SO_KEEPALIVE;
1750 break;
1751 case TARGET_SO_OOBINLINE:
1752 optname = SO_OOBINLINE;
1753 break;
1754 case TARGET_SO_NO_CHECK:
1755 optname = SO_NO_CHECK;
1756 break;
1757 case TARGET_SO_PRIORITY:
1758 optname = SO_PRIORITY;
1759 break;
1760 #ifdef SO_BSDCOMPAT
1761 case TARGET_SO_BSDCOMPAT:
1762 optname = SO_BSDCOMPAT;
1763 break;
1764 #endif
1765 case TARGET_SO_PASSCRED:
1766 optname = SO_PASSCRED;
1767 break;
1768 case TARGET_SO_PASSSEC:
1769 optname = SO_PASSSEC;
1770 break;
1771 case TARGET_SO_TIMESTAMP:
1772 optname = SO_TIMESTAMP;
1773 break;
1774 case TARGET_SO_RCVLOWAT:
1775 optname = SO_RCVLOWAT;
1776 break;
1777 break;
1778 default:
1779 goto unimplemented;
1781 if (optlen < sizeof(uint32_t))
1782 return -TARGET_EINVAL;
1784 if (get_user_u32(val, optval_addr))
1785 return -TARGET_EFAULT;
1786 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1787 break;
1788 default:
1789 unimplemented:
1790 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1791 ret = -TARGET_ENOPROTOOPT;
1793 return ret;
1796 /* do_getsockopt() Must return target values and target errnos. */
1797 static abi_long do_getsockopt(int sockfd, int level, int optname,
1798 abi_ulong optval_addr, abi_ulong optlen)
1800 abi_long ret;
1801 int len, val;
1802 socklen_t lv;
1804 switch(level) {
1805 case TARGET_SOL_SOCKET:
1806 level = SOL_SOCKET;
1807 switch (optname) {
1808 /* These don't just return a single integer */
1809 case TARGET_SO_LINGER:
1810 case TARGET_SO_RCVTIMEO:
1811 case TARGET_SO_SNDTIMEO:
1812 case TARGET_SO_PEERNAME:
1813 goto unimplemented;
1814 case TARGET_SO_PEERCRED: {
1815 struct ucred cr;
1816 socklen_t crlen;
1817 struct target_ucred *tcr;
1819 if (get_user_u32(len, optlen)) {
1820 return -TARGET_EFAULT;
1822 if (len < 0) {
1823 return -TARGET_EINVAL;
1826 crlen = sizeof(cr);
1827 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1828 &cr, &crlen));
1829 if (ret < 0) {
1830 return ret;
1832 if (len > crlen) {
1833 len = crlen;
1835 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1836 return -TARGET_EFAULT;
1838 __put_user(cr.pid, &tcr->pid);
1839 __put_user(cr.uid, &tcr->uid);
1840 __put_user(cr.gid, &tcr->gid);
1841 unlock_user_struct(tcr, optval_addr, 1);
1842 if (put_user_u32(len, optlen)) {
1843 return -TARGET_EFAULT;
1845 break;
1847 /* Options with 'int' argument. */
1848 case TARGET_SO_DEBUG:
1849 optname = SO_DEBUG;
1850 goto int_case;
1851 case TARGET_SO_REUSEADDR:
1852 optname = SO_REUSEADDR;
1853 goto int_case;
1854 case TARGET_SO_TYPE:
1855 optname = SO_TYPE;
1856 goto int_case;
1857 case TARGET_SO_ERROR:
1858 optname = SO_ERROR;
1859 goto int_case;
1860 case TARGET_SO_DONTROUTE:
1861 optname = SO_DONTROUTE;
1862 goto int_case;
1863 case TARGET_SO_BROADCAST:
1864 optname = SO_BROADCAST;
1865 goto int_case;
1866 case TARGET_SO_SNDBUF:
1867 optname = SO_SNDBUF;
1868 goto int_case;
1869 case TARGET_SO_RCVBUF:
1870 optname = SO_RCVBUF;
1871 goto int_case;
1872 case TARGET_SO_KEEPALIVE:
1873 optname = SO_KEEPALIVE;
1874 goto int_case;
1875 case TARGET_SO_OOBINLINE:
1876 optname = SO_OOBINLINE;
1877 goto int_case;
1878 case TARGET_SO_NO_CHECK:
1879 optname = SO_NO_CHECK;
1880 goto int_case;
1881 case TARGET_SO_PRIORITY:
1882 optname = SO_PRIORITY;
1883 goto int_case;
1884 #ifdef SO_BSDCOMPAT
1885 case TARGET_SO_BSDCOMPAT:
1886 optname = SO_BSDCOMPAT;
1887 goto int_case;
1888 #endif
1889 case TARGET_SO_PASSCRED:
1890 optname = SO_PASSCRED;
1891 goto int_case;
1892 case TARGET_SO_TIMESTAMP:
1893 optname = SO_TIMESTAMP;
1894 goto int_case;
1895 case TARGET_SO_RCVLOWAT:
1896 optname = SO_RCVLOWAT;
1897 goto int_case;
1898 case TARGET_SO_ACCEPTCONN:
1899 optname = SO_ACCEPTCONN;
1900 goto int_case;
1901 default:
1902 goto int_case;
1904 break;
1905 case SOL_TCP:
1906 /* TCP options all take an 'int' value. */
1907 int_case:
1908 if (get_user_u32(len, optlen))
1909 return -TARGET_EFAULT;
1910 if (len < 0)
1911 return -TARGET_EINVAL;
1912 lv = sizeof(lv);
1913 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1914 if (ret < 0)
1915 return ret;
1916 if (optname == SO_TYPE) {
1917 val = host_to_target_sock_type(val);
1919 if (len > lv)
1920 len = lv;
1921 if (len == 4) {
1922 if (put_user_u32(val, optval_addr))
1923 return -TARGET_EFAULT;
1924 } else {
1925 if (put_user_u8(val, optval_addr))
1926 return -TARGET_EFAULT;
1928 if (put_user_u32(len, optlen))
1929 return -TARGET_EFAULT;
1930 break;
1931 case SOL_IP:
1932 switch(optname) {
1933 case IP_TOS:
1934 case IP_TTL:
1935 case IP_HDRINCL:
1936 case IP_ROUTER_ALERT:
1937 case IP_RECVOPTS:
1938 case IP_RETOPTS:
1939 case IP_PKTINFO:
1940 case IP_MTU_DISCOVER:
1941 case IP_RECVERR:
1942 case IP_RECVTOS:
1943 #ifdef IP_FREEBIND
1944 case IP_FREEBIND:
1945 #endif
1946 case IP_MULTICAST_TTL:
1947 case IP_MULTICAST_LOOP:
1948 if (get_user_u32(len, optlen))
1949 return -TARGET_EFAULT;
1950 if (len < 0)
1951 return -TARGET_EINVAL;
1952 lv = sizeof(lv);
1953 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1954 if (ret < 0)
1955 return ret;
1956 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1957 len = 1;
1958 if (put_user_u32(len, optlen)
1959 || put_user_u8(val, optval_addr))
1960 return -TARGET_EFAULT;
1961 } else {
1962 if (len > sizeof(int))
1963 len = sizeof(int);
1964 if (put_user_u32(len, optlen)
1965 || put_user_u32(val, optval_addr))
1966 return -TARGET_EFAULT;
1968 break;
1969 default:
1970 ret = -TARGET_ENOPROTOOPT;
1971 break;
1973 break;
1974 default:
1975 unimplemented:
1976 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1977 level, optname);
1978 ret = -TARGET_EOPNOTSUPP;
1979 break;
1981 return ret;
1984 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1985 int count, int copy)
1987 struct target_iovec *target_vec;
1988 struct iovec *vec;
1989 abi_ulong total_len, max_len;
1990 int i;
1991 int err = 0;
1992 bool bad_address = false;
1994 if (count == 0) {
1995 errno = 0;
1996 return NULL;
1998 if (count < 0 || count > IOV_MAX) {
1999 errno = EINVAL;
2000 return NULL;
2003 vec = g_try_new0(struct iovec, count);
2004 if (vec == NULL) {
2005 errno = ENOMEM;
2006 return NULL;
2009 target_vec = lock_user(VERIFY_READ, target_addr,
2010 count * sizeof(struct target_iovec), 1);
2011 if (target_vec == NULL) {
2012 err = EFAULT;
2013 goto fail2;
2016 /* ??? If host page size > target page size, this will result in a
2017 value larger than what we can actually support. */
2018 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2019 total_len = 0;
2021 for (i = 0; i < count; i++) {
2022 abi_ulong base = tswapal(target_vec[i].iov_base);
2023 abi_long len = tswapal(target_vec[i].iov_len);
2025 if (len < 0) {
2026 err = EINVAL;
2027 goto fail;
2028 } else if (len == 0) {
2029 /* Zero length pointer is ignored. */
2030 vec[i].iov_base = 0;
2031 } else {
2032 vec[i].iov_base = lock_user(type, base, len, copy);
2033 /* If the first buffer pointer is bad, this is a fault. But
2034 * subsequent bad buffers will result in a partial write; this
2035 * is realized by filling the vector with null pointers and
2036 * zero lengths. */
2037 if (!vec[i].iov_base) {
2038 if (i == 0) {
2039 err = EFAULT;
2040 goto fail;
2041 } else {
2042 bad_address = true;
2045 if (bad_address) {
2046 len = 0;
2048 if (len > max_len - total_len) {
2049 len = max_len - total_len;
2052 vec[i].iov_len = len;
2053 total_len += len;
2056 unlock_user(target_vec, target_addr, 0);
2057 return vec;
2059 fail:
2060 while (--i >= 0) {
2061 if (tswapal(target_vec[i].iov_len) > 0) {
2062 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2065 unlock_user(target_vec, target_addr, 0);
2066 fail2:
2067 g_free(vec);
2068 errno = err;
2069 return NULL;
2072 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2073 int count, int copy)
2075 struct target_iovec *target_vec;
2076 int i;
2078 target_vec = lock_user(VERIFY_READ, target_addr,
2079 count * sizeof(struct target_iovec), 1);
2080 if (target_vec) {
2081 for (i = 0; i < count; i++) {
2082 abi_ulong base = tswapal(target_vec[i].iov_base);
2083 abi_long len = tswapal(target_vec[i].iov_len);
2084 if (len < 0) {
2085 break;
2087 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2089 unlock_user(target_vec, target_addr, 0);
2092 g_free(vec);
2095 static inline int target_to_host_sock_type(int *type)
2097 int host_type = 0;
2098 int target_type = *type;
2100 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2101 case TARGET_SOCK_DGRAM:
2102 host_type = SOCK_DGRAM;
2103 break;
2104 case TARGET_SOCK_STREAM:
2105 host_type = SOCK_STREAM;
2106 break;
2107 default:
2108 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2109 break;
2111 if (target_type & TARGET_SOCK_CLOEXEC) {
2112 #if defined(SOCK_CLOEXEC)
2113 host_type |= SOCK_CLOEXEC;
2114 #else
2115 return -TARGET_EINVAL;
2116 #endif
2118 if (target_type & TARGET_SOCK_NONBLOCK) {
2119 #if defined(SOCK_NONBLOCK)
2120 host_type |= SOCK_NONBLOCK;
2121 #elif !defined(O_NONBLOCK)
2122 return -TARGET_EINVAL;
2123 #endif
2125 *type = host_type;
2126 return 0;
2129 /* Try to emulate socket type flags after socket creation. */
2130 static int sock_flags_fixup(int fd, int target_type)
2132 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2133 if (target_type & TARGET_SOCK_NONBLOCK) {
2134 int flags = fcntl(fd, F_GETFL);
2135 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2136 close(fd);
2137 return -TARGET_EINVAL;
2140 #endif
2141 return fd;
2144 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2145 abi_ulong target_addr,
2146 socklen_t len)
2148 struct sockaddr *addr = host_addr;
2149 struct target_sockaddr *target_saddr;
2151 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2152 if (!target_saddr) {
2153 return -TARGET_EFAULT;
2156 memcpy(addr, target_saddr, len);
2157 addr->sa_family = tswap16(target_saddr->sa_family);
2158 /* spkt_protocol is big-endian */
2160 unlock_user(target_saddr, target_addr, 0);
2161 return 0;
2164 static TargetFdTrans target_packet_trans = {
2165 .target_to_host_addr = packet_target_to_host_sockaddr,
2168 /* do_socket() Must return target values and target errnos. */
2169 static abi_long do_socket(int domain, int type, int protocol)
2171 int target_type = type;
2172 int ret;
2174 ret = target_to_host_sock_type(&type);
2175 if (ret) {
2176 return ret;
2179 if (domain == PF_NETLINK)
2180 return -TARGET_EAFNOSUPPORT;
2182 if (domain == AF_PACKET ||
2183 (domain == AF_INET && type == SOCK_PACKET)) {
2184 protocol = tswap16(protocol);
2187 ret = get_errno(socket(domain, type, protocol));
2188 if (ret >= 0) {
2189 ret = sock_flags_fixup(ret, target_type);
2190 if (type == SOCK_PACKET) {
2191 /* Manage an obsolete case :
2192 * if socket type is SOCK_PACKET, bind by name
2194 fd_trans_register(ret, &target_packet_trans);
2197 return ret;
2200 /* do_bind() Must return target values and target errnos. */
2201 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2202 socklen_t addrlen)
2204 void *addr;
2205 abi_long ret;
2207 if ((int)addrlen < 0) {
2208 return -TARGET_EINVAL;
2211 addr = alloca(addrlen+1);
2213 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2214 if (ret)
2215 return ret;
2217 return get_errno(bind(sockfd, addr, addrlen));
2220 /* do_connect() Must return target values and target errnos. */
2221 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2222 socklen_t addrlen)
2224 void *addr;
2225 abi_long ret;
2227 if ((int)addrlen < 0) {
2228 return -TARGET_EINVAL;
2231 addr = alloca(addrlen+1);
2233 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2234 if (ret)
2235 return ret;
2237 return get_errno(connect(sockfd, addr, addrlen));
2240 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2241 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2242 int flags, int send)
2244 abi_long ret, len;
2245 struct msghdr msg;
2246 int count;
2247 struct iovec *vec;
2248 abi_ulong target_vec;
2250 if (msgp->msg_name) {
2251 msg.msg_namelen = tswap32(msgp->msg_namelen);
2252 msg.msg_name = alloca(msg.msg_namelen+1);
2253 ret = target_to_host_sockaddr(fd, msg.msg_name,
2254 tswapal(msgp->msg_name),
2255 msg.msg_namelen);
2256 if (ret) {
2257 goto out2;
2259 } else {
2260 msg.msg_name = NULL;
2261 msg.msg_namelen = 0;
2263 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2264 msg.msg_control = alloca(msg.msg_controllen);
2265 msg.msg_flags = tswap32(msgp->msg_flags);
2267 count = tswapal(msgp->msg_iovlen);
2268 target_vec = tswapal(msgp->msg_iov);
2269 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2270 target_vec, count, send);
2271 if (vec == NULL) {
2272 ret = -host_to_target_errno(errno);
2273 goto out2;
2275 msg.msg_iovlen = count;
2276 msg.msg_iov = vec;
2278 if (send) {
2279 ret = target_to_host_cmsg(&msg, msgp);
2280 if (ret == 0)
2281 ret = get_errno(sendmsg(fd, &msg, flags));
2282 } else {
2283 ret = get_errno(recvmsg(fd, &msg, flags));
2284 if (!is_error(ret)) {
2285 len = ret;
2286 ret = host_to_target_cmsg(msgp, &msg);
2287 if (!is_error(ret)) {
2288 msgp->msg_namelen = tswap32(msg.msg_namelen);
2289 if (msg.msg_name != NULL) {
2290 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2291 msg.msg_name, msg.msg_namelen);
2292 if (ret) {
2293 goto out;
2297 ret = len;
2302 out:
2303 unlock_iovec(vec, target_vec, count, !send);
2304 out2:
2305 return ret;
2308 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2309 int flags, int send)
2311 abi_long ret;
2312 struct target_msghdr *msgp;
2314 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2315 msgp,
2316 target_msg,
2317 send ? 1 : 0)) {
2318 return -TARGET_EFAULT;
2320 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2321 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2322 return ret;
2325 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2326 * so it might not have this *mmsg-specific flag either.
2328 #ifndef MSG_WAITFORONE
2329 #define MSG_WAITFORONE 0x10000
2330 #endif
2332 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2333 unsigned int vlen, unsigned int flags,
2334 int send)
2336 struct target_mmsghdr *mmsgp;
2337 abi_long ret = 0;
2338 int i;
2340 if (vlen > UIO_MAXIOV) {
2341 vlen = UIO_MAXIOV;
2344 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2345 if (!mmsgp) {
2346 return -TARGET_EFAULT;
2349 for (i = 0; i < vlen; i++) {
2350 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2351 if (is_error(ret)) {
2352 break;
2354 mmsgp[i].msg_len = tswap32(ret);
2355 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2356 if (flags & MSG_WAITFORONE) {
2357 flags |= MSG_DONTWAIT;
2361 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2363 /* Return number of datagrams sent if we sent any at all;
2364 * otherwise return the error.
2366 if (i) {
2367 return i;
2369 return ret;
2372 /* If we don't have a system accept4() then just call accept.
2373 * The callsites to do_accept4() will ensure that they don't
2374 * pass a non-zero flags argument in this config.
2376 #ifndef CONFIG_ACCEPT4
2377 static inline int accept4(int sockfd, struct sockaddr *addr,
2378 socklen_t *addrlen, int flags)
2380 assert(flags == 0);
2381 return accept(sockfd, addr, addrlen);
2383 #endif
2385 /* do_accept4() Must return target values and target errnos. */
2386 static abi_long do_accept4(int fd, abi_ulong target_addr,
2387 abi_ulong target_addrlen_addr, int flags)
2389 socklen_t addrlen;
2390 void *addr;
2391 abi_long ret;
2392 int host_flags;
2394 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2396 if (target_addr == 0) {
2397 return get_errno(accept4(fd, NULL, NULL, host_flags));
2400 /* linux returns EINVAL if addrlen pointer is invalid */
2401 if (get_user_u32(addrlen, target_addrlen_addr))
2402 return -TARGET_EINVAL;
2404 if ((int)addrlen < 0) {
2405 return -TARGET_EINVAL;
2408 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2409 return -TARGET_EINVAL;
2411 addr = alloca(addrlen);
2413 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2414 if (!is_error(ret)) {
2415 host_to_target_sockaddr(target_addr, addr, addrlen);
2416 if (put_user_u32(addrlen, target_addrlen_addr))
2417 ret = -TARGET_EFAULT;
2419 return ret;
2422 /* do_getpeername() Must return target values and target errnos. */
2423 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2424 abi_ulong target_addrlen_addr)
2426 socklen_t addrlen;
2427 void *addr;
2428 abi_long ret;
2430 if (get_user_u32(addrlen, target_addrlen_addr))
2431 return -TARGET_EFAULT;
2433 if ((int)addrlen < 0) {
2434 return -TARGET_EINVAL;
2437 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2438 return -TARGET_EFAULT;
2440 addr = alloca(addrlen);
2442 ret = get_errno(getpeername(fd, addr, &addrlen));
2443 if (!is_error(ret)) {
2444 host_to_target_sockaddr(target_addr, addr, addrlen);
2445 if (put_user_u32(addrlen, target_addrlen_addr))
2446 ret = -TARGET_EFAULT;
2448 return ret;
2451 /* do_getsockname() Must return target values and target errnos. */
2452 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2453 abi_ulong target_addrlen_addr)
2455 socklen_t addrlen;
2456 void *addr;
2457 abi_long ret;
2459 if (get_user_u32(addrlen, target_addrlen_addr))
2460 return -TARGET_EFAULT;
2462 if ((int)addrlen < 0) {
2463 return -TARGET_EINVAL;
2466 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2467 return -TARGET_EFAULT;
2469 addr = alloca(addrlen);
2471 ret = get_errno(getsockname(fd, addr, &addrlen));
2472 if (!is_error(ret)) {
2473 host_to_target_sockaddr(target_addr, addr, addrlen);
2474 if (put_user_u32(addrlen, target_addrlen_addr))
2475 ret = -TARGET_EFAULT;
2477 return ret;
2480 /* do_socketpair() Must return target values and target errnos. */
2481 static abi_long do_socketpair(int domain, int type, int protocol,
2482 abi_ulong target_tab_addr)
2484 int tab[2];
2485 abi_long ret;
2487 target_to_host_sock_type(&type);
2489 ret = get_errno(socketpair(domain, type, protocol, tab));
2490 if (!is_error(ret)) {
2491 if (put_user_s32(tab[0], target_tab_addr)
2492 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2493 ret = -TARGET_EFAULT;
2495 return ret;
2498 /* do_sendto() Must return target values and target errnos. */
2499 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2500 abi_ulong target_addr, socklen_t addrlen)
2502 void *addr;
2503 void *host_msg;
2504 abi_long ret;
2506 if ((int)addrlen < 0) {
2507 return -TARGET_EINVAL;
2510 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2511 if (!host_msg)
2512 return -TARGET_EFAULT;
2513 if (target_addr) {
2514 addr = alloca(addrlen+1);
2515 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2516 if (ret) {
2517 unlock_user(host_msg, msg, 0);
2518 return ret;
2520 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2521 } else {
2522 ret = get_errno(send(fd, host_msg, len, flags));
2524 unlock_user(host_msg, msg, 0);
2525 return ret;
2528 /* do_recvfrom() Must return target values and target errnos. */
2529 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2530 abi_ulong target_addr,
2531 abi_ulong target_addrlen)
2533 socklen_t addrlen;
2534 void *addr;
2535 void *host_msg;
2536 abi_long ret;
2538 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2539 if (!host_msg)
2540 return -TARGET_EFAULT;
2541 if (target_addr) {
2542 if (get_user_u32(addrlen, target_addrlen)) {
2543 ret = -TARGET_EFAULT;
2544 goto fail;
2546 if ((int)addrlen < 0) {
2547 ret = -TARGET_EINVAL;
2548 goto fail;
2550 addr = alloca(addrlen);
2551 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2552 } else {
2553 addr = NULL; /* To keep compiler quiet. */
2554 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2556 if (!is_error(ret)) {
2557 if (target_addr) {
2558 host_to_target_sockaddr(target_addr, addr, addrlen);
2559 if (put_user_u32(addrlen, target_addrlen)) {
2560 ret = -TARGET_EFAULT;
2561 goto fail;
2564 unlock_user(host_msg, msg, len);
2565 } else {
2566 fail:
2567 unlock_user(host_msg, msg, 0);
2569 return ret;
2572 #ifdef TARGET_NR_socketcall
2573 /* do_socketcall() Must return target values and target errnos. */
2574 static abi_long do_socketcall(int num, abi_ulong vptr)
2576 static const unsigned ac[] = { /* number of arguments per call */
2577 [SOCKOP_socket] = 3, /* domain, type, protocol */
2578 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2579 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2580 [SOCKOP_listen] = 2, /* sockfd, backlog */
2581 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2582 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2583 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2584 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2585 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2586 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2587 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2588 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2589 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2590 [SOCKOP_shutdown] = 2, /* sockfd, how */
2591 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2592 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2593 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2594 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2595 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2596 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2598 abi_long a[6]; /* max 6 args */
2600 /* first, collect the arguments in a[] according to ac[] */
2601 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2602 unsigned i;
2603 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2604 for (i = 0; i < ac[num]; ++i) {
2605 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2606 return -TARGET_EFAULT;
2611 /* now when we have the args, actually handle the call */
2612 switch (num) {
2613 case SOCKOP_socket: /* domain, type, protocol */
2614 return do_socket(a[0], a[1], a[2]);
2615 case SOCKOP_bind: /* sockfd, addr, addrlen */
2616 return do_bind(a[0], a[1], a[2]);
2617 case SOCKOP_connect: /* sockfd, addr, addrlen */
2618 return do_connect(a[0], a[1], a[2]);
2619 case SOCKOP_listen: /* sockfd, backlog */
2620 return get_errno(listen(a[0], a[1]));
2621 case SOCKOP_accept: /* sockfd, addr, addrlen */
2622 return do_accept4(a[0], a[1], a[2], 0);
2623 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2624 return do_accept4(a[0], a[1], a[2], a[3]);
2625 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2626 return do_getsockname(a[0], a[1], a[2]);
2627 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2628 return do_getpeername(a[0], a[1], a[2]);
2629 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2630 return do_socketpair(a[0], a[1], a[2], a[3]);
2631 case SOCKOP_send: /* sockfd, msg, len, flags */
2632 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2633 case SOCKOP_recv: /* sockfd, msg, len, flags */
2634 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2635 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2636 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2637 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2638 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2639 case SOCKOP_shutdown: /* sockfd, how */
2640 return get_errno(shutdown(a[0], a[1]));
2641 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2642 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2643 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2644 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2645 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
2646 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
2647 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
2648 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
2649 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2650 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2651 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2652 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2653 default:
2654 gemu_log("Unsupported socketcall: %d\n", num);
2655 return -TARGET_ENOSYS;
2658 #endif
2660 #define N_SHM_REGIONS 32
2662 static struct shm_region {
2663 abi_ulong start;
2664 abi_ulong size;
2665 bool in_use;
2666 } shm_regions[N_SHM_REGIONS];
2668 struct target_semid_ds
2670 struct target_ipc_perm sem_perm;
2671 abi_ulong sem_otime;
2672 #if !defined(TARGET_PPC64)
2673 abi_ulong __unused1;
2674 #endif
2675 abi_ulong sem_ctime;
2676 #if !defined(TARGET_PPC64)
2677 abi_ulong __unused2;
2678 #endif
2679 abi_ulong sem_nsems;
2680 abi_ulong __unused3;
2681 abi_ulong __unused4;
2684 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2685 abi_ulong target_addr)
2687 struct target_ipc_perm *target_ip;
2688 struct target_semid_ds *target_sd;
2690 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2691 return -TARGET_EFAULT;
2692 target_ip = &(target_sd->sem_perm);
2693 host_ip->__key = tswap32(target_ip->__key);
2694 host_ip->uid = tswap32(target_ip->uid);
2695 host_ip->gid = tswap32(target_ip->gid);
2696 host_ip->cuid = tswap32(target_ip->cuid);
2697 host_ip->cgid = tswap32(target_ip->cgid);
2698 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2699 host_ip->mode = tswap32(target_ip->mode);
2700 #else
2701 host_ip->mode = tswap16(target_ip->mode);
2702 #endif
2703 #if defined(TARGET_PPC)
2704 host_ip->__seq = tswap32(target_ip->__seq);
2705 #else
2706 host_ip->__seq = tswap16(target_ip->__seq);
2707 #endif
2708 unlock_user_struct(target_sd, target_addr, 0);
2709 return 0;
2712 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2713 struct ipc_perm *host_ip)
2715 struct target_ipc_perm *target_ip;
2716 struct target_semid_ds *target_sd;
2718 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2719 return -TARGET_EFAULT;
2720 target_ip = &(target_sd->sem_perm);
2721 target_ip->__key = tswap32(host_ip->__key);
2722 target_ip->uid = tswap32(host_ip->uid);
2723 target_ip->gid = tswap32(host_ip->gid);
2724 target_ip->cuid = tswap32(host_ip->cuid);
2725 target_ip->cgid = tswap32(host_ip->cgid);
2726 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2727 target_ip->mode = tswap32(host_ip->mode);
2728 #else
2729 target_ip->mode = tswap16(host_ip->mode);
2730 #endif
2731 #if defined(TARGET_PPC)
2732 target_ip->__seq = tswap32(host_ip->__seq);
2733 #else
2734 target_ip->__seq = tswap16(host_ip->__seq);
2735 #endif
2736 unlock_user_struct(target_sd, target_addr, 1);
2737 return 0;
2740 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2741 abi_ulong target_addr)
2743 struct target_semid_ds *target_sd;
2745 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2746 return -TARGET_EFAULT;
2747 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2748 return -TARGET_EFAULT;
2749 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2750 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2751 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2752 unlock_user_struct(target_sd, target_addr, 0);
2753 return 0;
2756 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2757 struct semid_ds *host_sd)
2759 struct target_semid_ds *target_sd;
2761 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2762 return -TARGET_EFAULT;
2763 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2764 return -TARGET_EFAULT;
2765 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2766 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2767 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2768 unlock_user_struct(target_sd, target_addr, 1);
2769 return 0;
2772 struct target_seminfo {
2773 int semmap;
2774 int semmni;
2775 int semmns;
2776 int semmnu;
2777 int semmsl;
2778 int semopm;
2779 int semume;
2780 int semusz;
2781 int semvmx;
2782 int semaem;
2785 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2786 struct seminfo *host_seminfo)
2788 struct target_seminfo *target_seminfo;
2789 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2790 return -TARGET_EFAULT;
2791 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2792 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2793 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2794 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2795 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2796 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2797 __put_user(host_seminfo->semume, &target_seminfo->semume);
2798 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2799 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2800 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2801 unlock_user_struct(target_seminfo, target_addr, 1);
2802 return 0;
2805 union semun {
2806 int val;
2807 struct semid_ds *buf;
2808 unsigned short *array;
2809 struct seminfo *__buf;
2812 union target_semun {
2813 int val;
2814 abi_ulong buf;
2815 abi_ulong array;
2816 abi_ulong __buf;
2819 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2820 abi_ulong target_addr)
2822 int nsems;
2823 unsigned short *array;
2824 union semun semun;
2825 struct semid_ds semid_ds;
2826 int i, ret;
2828 semun.buf = &semid_ds;
2830 ret = semctl(semid, 0, IPC_STAT, semun);
2831 if (ret == -1)
2832 return get_errno(ret);
2834 nsems = semid_ds.sem_nsems;
2836 *host_array = g_try_new(unsigned short, nsems);
2837 if (!*host_array) {
2838 return -TARGET_ENOMEM;
2840 array = lock_user(VERIFY_READ, target_addr,
2841 nsems*sizeof(unsigned short), 1);
2842 if (!array) {
2843 g_free(*host_array);
2844 return -TARGET_EFAULT;
2847 for(i=0; i<nsems; i++) {
2848 __get_user((*host_array)[i], &array[i]);
2850 unlock_user(array, target_addr, 0);
2852 return 0;
2855 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2856 unsigned short **host_array)
2858 int nsems;
2859 unsigned short *array;
2860 union semun semun;
2861 struct semid_ds semid_ds;
2862 int i, ret;
2864 semun.buf = &semid_ds;
2866 ret = semctl(semid, 0, IPC_STAT, semun);
2867 if (ret == -1)
2868 return get_errno(ret);
2870 nsems = semid_ds.sem_nsems;
2872 array = lock_user(VERIFY_WRITE, target_addr,
2873 nsems*sizeof(unsigned short), 0);
2874 if (!array)
2875 return -TARGET_EFAULT;
2877 for(i=0; i<nsems; i++) {
2878 __put_user((*host_array)[i], &array[i]);
2880 g_free(*host_array);
2881 unlock_user(array, target_addr, 1);
2883 return 0;
2886 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2887 abi_ulong target_arg)
2889 union target_semun target_su = { .buf = target_arg };
2890 union semun arg;
2891 struct semid_ds dsarg;
2892 unsigned short *array = NULL;
2893 struct seminfo seminfo;
2894 abi_long ret = -TARGET_EINVAL;
2895 abi_long err;
2896 cmd &= 0xff;
2898 switch( cmd ) {
2899 case GETVAL:
2900 case SETVAL:
2901 /* In 64 bit cross-endian situations, we will erroneously pick up
2902 * the wrong half of the union for the "val" element. To rectify
2903 * this, the entire 8-byte structure is byteswapped, followed by
2904 * a swap of the 4 byte val field. In other cases, the data is
2905 * already in proper host byte order. */
2906 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2907 target_su.buf = tswapal(target_su.buf);
2908 arg.val = tswap32(target_su.val);
2909 } else {
2910 arg.val = target_su.val;
2912 ret = get_errno(semctl(semid, semnum, cmd, arg));
2913 break;
2914 case GETALL:
2915 case SETALL:
2916 err = target_to_host_semarray(semid, &array, target_su.array);
2917 if (err)
2918 return err;
2919 arg.array = array;
2920 ret = get_errno(semctl(semid, semnum, cmd, arg));
2921 err = host_to_target_semarray(semid, target_su.array, &array);
2922 if (err)
2923 return err;
2924 break;
2925 case IPC_STAT:
2926 case IPC_SET:
2927 case SEM_STAT:
2928 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2929 if (err)
2930 return err;
2931 arg.buf = &dsarg;
2932 ret = get_errno(semctl(semid, semnum, cmd, arg));
2933 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2934 if (err)
2935 return err;
2936 break;
2937 case IPC_INFO:
2938 case SEM_INFO:
2939 arg.__buf = &seminfo;
2940 ret = get_errno(semctl(semid, semnum, cmd, arg));
2941 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2942 if (err)
2943 return err;
2944 break;
2945 case IPC_RMID:
2946 case GETPID:
2947 case GETNCNT:
2948 case GETZCNT:
2949 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2950 break;
2953 return ret;
2956 struct target_sembuf {
2957 unsigned short sem_num;
2958 short sem_op;
2959 short sem_flg;
2962 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2963 abi_ulong target_addr,
2964 unsigned nsops)
2966 struct target_sembuf *target_sembuf;
2967 int i;
2969 target_sembuf = lock_user(VERIFY_READ, target_addr,
2970 nsops*sizeof(struct target_sembuf), 1);
2971 if (!target_sembuf)
2972 return -TARGET_EFAULT;
2974 for(i=0; i<nsops; i++) {
2975 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2976 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2977 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2980 unlock_user(target_sembuf, target_addr, 0);
2982 return 0;
2985 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2987 struct sembuf sops[nsops];
2989 if (target_to_host_sembuf(sops, ptr, nsops))
2990 return -TARGET_EFAULT;
2992 return get_errno(semop(semid, sops, nsops));
2995 struct target_msqid_ds
2997 struct target_ipc_perm msg_perm;
2998 abi_ulong msg_stime;
2999 #if TARGET_ABI_BITS == 32
3000 abi_ulong __unused1;
3001 #endif
3002 abi_ulong msg_rtime;
3003 #if TARGET_ABI_BITS == 32
3004 abi_ulong __unused2;
3005 #endif
3006 abi_ulong msg_ctime;
3007 #if TARGET_ABI_BITS == 32
3008 abi_ulong __unused3;
3009 #endif
3010 abi_ulong __msg_cbytes;
3011 abi_ulong msg_qnum;
3012 abi_ulong msg_qbytes;
3013 abi_ulong msg_lspid;
3014 abi_ulong msg_lrpid;
3015 abi_ulong __unused4;
3016 abi_ulong __unused5;
3019 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3020 abi_ulong target_addr)
3022 struct target_msqid_ds *target_md;
3024 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3025 return -TARGET_EFAULT;
3026 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3027 return -TARGET_EFAULT;
3028 host_md->msg_stime = tswapal(target_md->msg_stime);
3029 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3030 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3031 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3032 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3033 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3034 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3035 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3036 unlock_user_struct(target_md, target_addr, 0);
3037 return 0;
3040 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3041 struct msqid_ds *host_md)
3043 struct target_msqid_ds *target_md;
3045 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3046 return -TARGET_EFAULT;
3047 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3048 return -TARGET_EFAULT;
3049 target_md->msg_stime = tswapal(host_md->msg_stime);
3050 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3051 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3052 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3053 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3054 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3055 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3056 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3057 unlock_user_struct(target_md, target_addr, 1);
3058 return 0;
3061 struct target_msginfo {
3062 int msgpool;
3063 int msgmap;
3064 int msgmax;
3065 int msgmnb;
3066 int msgmni;
3067 int msgssz;
3068 int msgtql;
3069 unsigned short int msgseg;
3072 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3073 struct msginfo *host_msginfo)
3075 struct target_msginfo *target_msginfo;
3076 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3077 return -TARGET_EFAULT;
3078 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3079 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3080 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3081 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3082 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3083 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3084 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3085 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3086 unlock_user_struct(target_msginfo, target_addr, 1);
3087 return 0;
3090 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3092 struct msqid_ds dsarg;
3093 struct msginfo msginfo;
3094 abi_long ret = -TARGET_EINVAL;
3096 cmd &= 0xff;
3098 switch (cmd) {
3099 case IPC_STAT:
3100 case IPC_SET:
3101 case MSG_STAT:
3102 if (target_to_host_msqid_ds(&dsarg,ptr))
3103 return -TARGET_EFAULT;
3104 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3105 if (host_to_target_msqid_ds(ptr,&dsarg))
3106 return -TARGET_EFAULT;
3107 break;
3108 case IPC_RMID:
3109 ret = get_errno(msgctl(msgid, cmd, NULL));
3110 break;
3111 case IPC_INFO:
3112 case MSG_INFO:
3113 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3114 if (host_to_target_msginfo(ptr, &msginfo))
3115 return -TARGET_EFAULT;
3116 break;
3119 return ret;
3122 struct target_msgbuf {
3123 abi_long mtype;
3124 char mtext[1];
3127 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3128 ssize_t msgsz, int msgflg)
3130 struct target_msgbuf *target_mb;
3131 struct msgbuf *host_mb;
3132 abi_long ret = 0;
3134 if (msgsz < 0) {
3135 return -TARGET_EINVAL;
3138 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3139 return -TARGET_EFAULT;
3140 host_mb = g_try_malloc(msgsz + sizeof(long));
3141 if (!host_mb) {
3142 unlock_user_struct(target_mb, msgp, 0);
3143 return -TARGET_ENOMEM;
3145 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3146 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3147 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3148 g_free(host_mb);
3149 unlock_user_struct(target_mb, msgp, 0);
3151 return ret;
3154 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3155 ssize_t msgsz, abi_long msgtyp,
3156 int msgflg)
3158 struct target_msgbuf *target_mb;
3159 char *target_mtext;
3160 struct msgbuf *host_mb;
3161 abi_long ret = 0;
3163 if (msgsz < 0) {
3164 return -TARGET_EINVAL;
3167 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3168 return -TARGET_EFAULT;
3170 host_mb = g_try_malloc(msgsz + sizeof(long));
3171 if (!host_mb) {
3172 ret = -TARGET_ENOMEM;
3173 goto end;
3175 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3177 if (ret > 0) {
3178 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3179 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3180 if (!target_mtext) {
3181 ret = -TARGET_EFAULT;
3182 goto end;
3184 memcpy(target_mb->mtext, host_mb->mtext, ret);
3185 unlock_user(target_mtext, target_mtext_addr, ret);
3188 target_mb->mtype = tswapal(host_mb->mtype);
3190 end:
3191 if (target_mb)
3192 unlock_user_struct(target_mb, msgp, 1);
3193 g_free(host_mb);
3194 return ret;
3197 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3198 abi_ulong target_addr)
3200 struct target_shmid_ds *target_sd;
3202 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3203 return -TARGET_EFAULT;
3204 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3205 return -TARGET_EFAULT;
3206 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3207 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3208 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3209 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3210 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3211 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3212 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3213 unlock_user_struct(target_sd, target_addr, 0);
3214 return 0;
3217 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3218 struct shmid_ds *host_sd)
3220 struct target_shmid_ds *target_sd;
3222 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3223 return -TARGET_EFAULT;
3224 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3225 return -TARGET_EFAULT;
3226 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3227 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3228 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3229 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3230 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3231 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3232 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3233 unlock_user_struct(target_sd, target_addr, 1);
3234 return 0;
3237 struct target_shminfo {
3238 abi_ulong shmmax;
3239 abi_ulong shmmin;
3240 abi_ulong shmmni;
3241 abi_ulong shmseg;
3242 abi_ulong shmall;
3245 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3246 struct shminfo *host_shminfo)
3248 struct target_shminfo *target_shminfo;
3249 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3250 return -TARGET_EFAULT;
3251 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3252 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3253 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3254 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3255 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3256 unlock_user_struct(target_shminfo, target_addr, 1);
3257 return 0;
3260 struct target_shm_info {
3261 int used_ids;
3262 abi_ulong shm_tot;
3263 abi_ulong shm_rss;
3264 abi_ulong shm_swp;
3265 abi_ulong swap_attempts;
3266 abi_ulong swap_successes;
3269 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3270 struct shm_info *host_shm_info)
3272 struct target_shm_info *target_shm_info;
3273 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3274 return -TARGET_EFAULT;
3275 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3276 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3277 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3278 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3279 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3280 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3281 unlock_user_struct(target_shm_info, target_addr, 1);
3282 return 0;
3285 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3287 struct shmid_ds dsarg;
3288 struct shminfo shminfo;
3289 struct shm_info shm_info;
3290 abi_long ret = -TARGET_EINVAL;
3292 cmd &= 0xff;
3294 switch(cmd) {
3295 case IPC_STAT:
3296 case IPC_SET:
3297 case SHM_STAT:
3298 if (target_to_host_shmid_ds(&dsarg, buf))
3299 return -TARGET_EFAULT;
3300 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3301 if (host_to_target_shmid_ds(buf, &dsarg))
3302 return -TARGET_EFAULT;
3303 break;
3304 case IPC_INFO:
3305 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3306 if (host_to_target_shminfo(buf, &shminfo))
3307 return -TARGET_EFAULT;
3308 break;
3309 case SHM_INFO:
3310 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3311 if (host_to_target_shm_info(buf, &shm_info))
3312 return -TARGET_EFAULT;
3313 break;
3314 case IPC_RMID:
3315 case SHM_LOCK:
3316 case SHM_UNLOCK:
3317 ret = get_errno(shmctl(shmid, cmd, NULL));
3318 break;
3321 return ret;
3324 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3326 abi_long raddr;
3327 void *host_raddr;
3328 struct shmid_ds shm_info;
3329 int i,ret;
3331 /* find out the length of the shared memory segment */
3332 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3333 if (is_error(ret)) {
3334 /* can't get length, bail out */
3335 return ret;
3338 mmap_lock();
3340 if (shmaddr)
3341 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3342 else {
3343 abi_ulong mmap_start;
3345 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3347 if (mmap_start == -1) {
3348 errno = ENOMEM;
3349 host_raddr = (void *)-1;
3350 } else
3351 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3354 if (host_raddr == (void *)-1) {
3355 mmap_unlock();
3356 return get_errno((long)host_raddr);
3358 raddr=h2g((unsigned long)host_raddr);
3360 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3361 PAGE_VALID | PAGE_READ |
3362 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3364 for (i = 0; i < N_SHM_REGIONS; i++) {
3365 if (!shm_regions[i].in_use) {
3366 shm_regions[i].in_use = true;
3367 shm_regions[i].start = raddr;
3368 shm_regions[i].size = shm_info.shm_segsz;
3369 break;
3373 mmap_unlock();
3374 return raddr;
3378 static inline abi_long do_shmdt(abi_ulong shmaddr)
3380 int i;
3382 for (i = 0; i < N_SHM_REGIONS; ++i) {
3383 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3384 shm_regions[i].in_use = false;
3385 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3386 break;
3390 return get_errno(shmdt(g2h(shmaddr)));
3393 #ifdef TARGET_NR_ipc
3394 /* ??? This only works with linear mappings. */
3395 /* do_ipc() must return target values and target errnos. */
3396 static abi_long do_ipc(unsigned int call, abi_long first,
3397 abi_long second, abi_long third,
3398 abi_long ptr, abi_long fifth)
3400 int version;
3401 abi_long ret = 0;
3403 version = call >> 16;
3404 call &= 0xffff;
3406 switch (call) {
3407 case IPCOP_semop:
3408 ret = do_semop(first, ptr, second);
3409 break;
3411 case IPCOP_semget:
3412 ret = get_errno(semget(first, second, third));
3413 break;
3415 case IPCOP_semctl: {
3416 /* The semun argument to semctl is passed by value, so dereference the
3417 * ptr argument. */
3418 abi_ulong atptr;
3419 get_user_ual(atptr, ptr);
3420 ret = do_semctl(first, second, third, atptr);
3421 break;
3424 case IPCOP_msgget:
3425 ret = get_errno(msgget(first, second));
3426 break;
3428 case IPCOP_msgsnd:
3429 ret = do_msgsnd(first, ptr, second, third);
3430 break;
3432 case IPCOP_msgctl:
3433 ret = do_msgctl(first, second, ptr);
3434 break;
3436 case IPCOP_msgrcv:
3437 switch (version) {
3438 case 0:
3440 struct target_ipc_kludge {
3441 abi_long msgp;
3442 abi_long msgtyp;
3443 } *tmp;
3445 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3446 ret = -TARGET_EFAULT;
3447 break;
3450 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3452 unlock_user_struct(tmp, ptr, 0);
3453 break;
3455 default:
3456 ret = do_msgrcv(first, ptr, second, fifth, third);
3458 break;
3460 case IPCOP_shmat:
3461 switch (version) {
3462 default:
3464 abi_ulong raddr;
3465 raddr = do_shmat(first, ptr, second);
3466 if (is_error(raddr))
3467 return get_errno(raddr);
3468 if (put_user_ual(raddr, third))
3469 return -TARGET_EFAULT;
3470 break;
3472 case 1:
3473 ret = -TARGET_EINVAL;
3474 break;
3476 break;
3477 case IPCOP_shmdt:
3478 ret = do_shmdt(ptr);
3479 break;
3481 case IPCOP_shmget:
3482 /* IPC_* flag values are the same on all linux platforms */
3483 ret = get_errno(shmget(first, second, third));
3484 break;
3486 /* IPC_* and SHM_* command values are the same on all linux platforms */
3487 case IPCOP_shmctl:
3488 ret = do_shmctl(first, second, ptr);
3489 break;
3490 default:
3491 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3492 ret = -TARGET_ENOSYS;
3493 break;
3495 return ret;
3497 #endif
3499 /* kernel structure types definitions */
3501 #define STRUCT(name, ...) STRUCT_ ## name,
3502 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3503 enum {
3504 #include "syscall_types.h"
3505 STRUCT_MAX
3507 #undef STRUCT
3508 #undef STRUCT_SPECIAL
3510 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3511 #define STRUCT_SPECIAL(name)
3512 #include "syscall_types.h"
3513 #undef STRUCT
3514 #undef STRUCT_SPECIAL
3516 typedef struct IOCTLEntry IOCTLEntry;
3518 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3519 int fd, int cmd, abi_long arg);
3521 struct IOCTLEntry {
3522 int target_cmd;
3523 unsigned int host_cmd;
3524 const char *name;
3525 int access;
3526 do_ioctl_fn *do_ioctl;
3527 const argtype arg_type[5];
3530 #define IOC_R 0x0001
3531 #define IOC_W 0x0002
3532 #define IOC_RW (IOC_R | IOC_W)
3534 #define MAX_STRUCT_SIZE 4096
3536 #ifdef CONFIG_FIEMAP
3537 /* So fiemap access checks don't overflow on 32 bit systems.
3538 * This is very slightly smaller than the limit imposed by
3539 * the underlying kernel.
3541 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3542 / sizeof(struct fiemap_extent))
3544 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3545 int fd, int cmd, abi_long arg)
3547 /* The parameter for this ioctl is a struct fiemap followed
3548 * by an array of struct fiemap_extent whose size is set
3549 * in fiemap->fm_extent_count. The array is filled in by the
3550 * ioctl.
3552 int target_size_in, target_size_out;
3553 struct fiemap *fm;
3554 const argtype *arg_type = ie->arg_type;
3555 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3556 void *argptr, *p;
3557 abi_long ret;
3558 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3559 uint32_t outbufsz;
3560 int free_fm = 0;
3562 assert(arg_type[0] == TYPE_PTR);
3563 assert(ie->access == IOC_RW);
3564 arg_type++;
3565 target_size_in = thunk_type_size(arg_type, 0);
3566 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3567 if (!argptr) {
3568 return -TARGET_EFAULT;
3570 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3571 unlock_user(argptr, arg, 0);
3572 fm = (struct fiemap *)buf_temp;
3573 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3574 return -TARGET_EINVAL;
3577 outbufsz = sizeof (*fm) +
3578 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3580 if (outbufsz > MAX_STRUCT_SIZE) {
3581 /* We can't fit all the extents into the fixed size buffer.
3582 * Allocate one that is large enough and use it instead.
3584 fm = g_try_malloc(outbufsz);
3585 if (!fm) {
3586 return -TARGET_ENOMEM;
3588 memcpy(fm, buf_temp, sizeof(struct fiemap));
3589 free_fm = 1;
3591 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3592 if (!is_error(ret)) {
3593 target_size_out = target_size_in;
3594 /* An extent_count of 0 means we were only counting the extents
3595 * so there are no structs to copy
3597 if (fm->fm_extent_count != 0) {
3598 target_size_out += fm->fm_mapped_extents * extent_size;
3600 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3601 if (!argptr) {
3602 ret = -TARGET_EFAULT;
3603 } else {
3604 /* Convert the struct fiemap */
3605 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3606 if (fm->fm_extent_count != 0) {
3607 p = argptr + target_size_in;
3608 /* ...and then all the struct fiemap_extents */
3609 for (i = 0; i < fm->fm_mapped_extents; i++) {
3610 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3611 THUNK_TARGET);
3612 p += extent_size;
3615 unlock_user(argptr, arg, target_size_out);
3618 if (free_fm) {
3619 g_free(fm);
3621 return ret;
3623 #endif
3625 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3626 int fd, int cmd, abi_long arg)
3628 const argtype *arg_type = ie->arg_type;
3629 int target_size;
3630 void *argptr;
3631 int ret;
3632 struct ifconf *host_ifconf;
3633 uint32_t outbufsz;
3634 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3635 int target_ifreq_size;
3636 int nb_ifreq;
3637 int free_buf = 0;
3638 int i;
3639 int target_ifc_len;
3640 abi_long target_ifc_buf;
3641 int host_ifc_len;
3642 char *host_ifc_buf;
3644 assert(arg_type[0] == TYPE_PTR);
3645 assert(ie->access == IOC_RW);
3647 arg_type++;
3648 target_size = thunk_type_size(arg_type, 0);
3650 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3651 if (!argptr)
3652 return -TARGET_EFAULT;
3653 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3654 unlock_user(argptr, arg, 0);
3656 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3657 target_ifc_len = host_ifconf->ifc_len;
3658 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3660 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3661 nb_ifreq = target_ifc_len / target_ifreq_size;
3662 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3664 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3665 if (outbufsz > MAX_STRUCT_SIZE) {
3666 /* We can't fit all the extents into the fixed size buffer.
3667 * Allocate one that is large enough and use it instead.
3669 host_ifconf = malloc(outbufsz);
3670 if (!host_ifconf) {
3671 return -TARGET_ENOMEM;
3673 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3674 free_buf = 1;
3676 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3678 host_ifconf->ifc_len = host_ifc_len;
3679 host_ifconf->ifc_buf = host_ifc_buf;
3681 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3682 if (!is_error(ret)) {
3683 /* convert host ifc_len to target ifc_len */
3685 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3686 target_ifc_len = nb_ifreq * target_ifreq_size;
3687 host_ifconf->ifc_len = target_ifc_len;
3689 /* restore target ifc_buf */
3691 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3693 /* copy struct ifconf to target user */
3695 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3696 if (!argptr)
3697 return -TARGET_EFAULT;
3698 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3699 unlock_user(argptr, arg, target_size);
3701 /* copy ifreq[] to target user */
3703 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3704 for (i = 0; i < nb_ifreq ; i++) {
3705 thunk_convert(argptr + i * target_ifreq_size,
3706 host_ifc_buf + i * sizeof(struct ifreq),
3707 ifreq_arg_type, THUNK_TARGET);
3709 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3712 if (free_buf) {
3713 free(host_ifconf);
3716 return ret;
3719 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3720 int cmd, abi_long arg)
3722 void *argptr;
3723 struct dm_ioctl *host_dm;
3724 abi_long guest_data;
3725 uint32_t guest_data_size;
3726 int target_size;
3727 const argtype *arg_type = ie->arg_type;
3728 abi_long ret;
3729 void *big_buf = NULL;
3730 char *host_data;
3732 arg_type++;
3733 target_size = thunk_type_size(arg_type, 0);
3734 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3735 if (!argptr) {
3736 ret = -TARGET_EFAULT;
3737 goto out;
3739 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3740 unlock_user(argptr, arg, 0);
3742 /* buf_temp is too small, so fetch things into a bigger buffer */
3743 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3744 memcpy(big_buf, buf_temp, target_size);
3745 buf_temp = big_buf;
3746 host_dm = big_buf;
3748 guest_data = arg + host_dm->data_start;
3749 if ((guest_data - arg) < 0) {
3750 ret = -EINVAL;
3751 goto out;
3753 guest_data_size = host_dm->data_size - host_dm->data_start;
3754 host_data = (char*)host_dm + host_dm->data_start;
3756 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3757 switch (ie->host_cmd) {
3758 case DM_REMOVE_ALL:
3759 case DM_LIST_DEVICES:
3760 case DM_DEV_CREATE:
3761 case DM_DEV_REMOVE:
3762 case DM_DEV_SUSPEND:
3763 case DM_DEV_STATUS:
3764 case DM_DEV_WAIT:
3765 case DM_TABLE_STATUS:
3766 case DM_TABLE_CLEAR:
3767 case DM_TABLE_DEPS:
3768 case DM_LIST_VERSIONS:
3769 /* no input data */
3770 break;
3771 case DM_DEV_RENAME:
3772 case DM_DEV_SET_GEOMETRY:
3773 /* data contains only strings */
3774 memcpy(host_data, argptr, guest_data_size);
3775 break;
3776 case DM_TARGET_MSG:
3777 memcpy(host_data, argptr, guest_data_size);
3778 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3779 break;
3780 case DM_TABLE_LOAD:
3782 void *gspec = argptr;
3783 void *cur_data = host_data;
3784 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3785 int spec_size = thunk_type_size(arg_type, 0);
3786 int i;
3788 for (i = 0; i < host_dm->target_count; i++) {
3789 struct dm_target_spec *spec = cur_data;
3790 uint32_t next;
3791 int slen;
3793 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3794 slen = strlen((char*)gspec + spec_size) + 1;
3795 next = spec->next;
3796 spec->next = sizeof(*spec) + slen;
3797 strcpy((char*)&spec[1], gspec + spec_size);
3798 gspec += next;
3799 cur_data += spec->next;
3801 break;
3803 default:
3804 ret = -TARGET_EINVAL;
3805 unlock_user(argptr, guest_data, 0);
3806 goto out;
3808 unlock_user(argptr, guest_data, 0);
3810 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3811 if (!is_error(ret)) {
3812 guest_data = arg + host_dm->data_start;
3813 guest_data_size = host_dm->data_size - host_dm->data_start;
3814 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3815 switch (ie->host_cmd) {
3816 case DM_REMOVE_ALL:
3817 case DM_DEV_CREATE:
3818 case DM_DEV_REMOVE:
3819 case DM_DEV_RENAME:
3820 case DM_DEV_SUSPEND:
3821 case DM_DEV_STATUS:
3822 case DM_TABLE_LOAD:
3823 case DM_TABLE_CLEAR:
3824 case DM_TARGET_MSG:
3825 case DM_DEV_SET_GEOMETRY:
3826 /* no return data */
3827 break;
3828 case DM_LIST_DEVICES:
3830 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3831 uint32_t remaining_data = guest_data_size;
3832 void *cur_data = argptr;
3833 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3834 int nl_size = 12; /* can't use thunk_size due to alignment */
3836 while (1) {
3837 uint32_t next = nl->next;
3838 if (next) {
3839 nl->next = nl_size + (strlen(nl->name) + 1);
3841 if (remaining_data < nl->next) {
3842 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3843 break;
3845 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3846 strcpy(cur_data + nl_size, nl->name);
3847 cur_data += nl->next;
3848 remaining_data -= nl->next;
3849 if (!next) {
3850 break;
3852 nl = (void*)nl + next;
3854 break;
3856 case DM_DEV_WAIT:
3857 case DM_TABLE_STATUS:
3859 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3860 void *cur_data = argptr;
3861 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3862 int spec_size = thunk_type_size(arg_type, 0);
3863 int i;
3865 for (i = 0; i < host_dm->target_count; i++) {
3866 uint32_t next = spec->next;
3867 int slen = strlen((char*)&spec[1]) + 1;
3868 spec->next = (cur_data - argptr) + spec_size + slen;
3869 if (guest_data_size < spec->next) {
3870 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3871 break;
3873 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3874 strcpy(cur_data + spec_size, (char*)&spec[1]);
3875 cur_data = argptr + spec->next;
3876 spec = (void*)host_dm + host_dm->data_start + next;
3878 break;
3880 case DM_TABLE_DEPS:
3882 void *hdata = (void*)host_dm + host_dm->data_start;
3883 int count = *(uint32_t*)hdata;
3884 uint64_t *hdev = hdata + 8;
3885 uint64_t *gdev = argptr + 8;
3886 int i;
3888 *(uint32_t*)argptr = tswap32(count);
3889 for (i = 0; i < count; i++) {
3890 *gdev = tswap64(*hdev);
3891 gdev++;
3892 hdev++;
3894 break;
3896 case DM_LIST_VERSIONS:
3898 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3899 uint32_t remaining_data = guest_data_size;
3900 void *cur_data = argptr;
3901 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3902 int vers_size = thunk_type_size(arg_type, 0);
3904 while (1) {
3905 uint32_t next = vers->next;
3906 if (next) {
3907 vers->next = vers_size + (strlen(vers->name) + 1);
3909 if (remaining_data < vers->next) {
3910 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3911 break;
3913 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3914 strcpy(cur_data + vers_size, vers->name);
3915 cur_data += vers->next;
3916 remaining_data -= vers->next;
3917 if (!next) {
3918 break;
3920 vers = (void*)vers + next;
3922 break;
3924 default:
3925 unlock_user(argptr, guest_data, 0);
3926 ret = -TARGET_EINVAL;
3927 goto out;
3929 unlock_user(argptr, guest_data, guest_data_size);
3931 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3932 if (!argptr) {
3933 ret = -TARGET_EFAULT;
3934 goto out;
3936 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3937 unlock_user(argptr, arg, target_size);
3939 out:
3940 g_free(big_buf);
3941 return ret;
3944 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3945 int cmd, abi_long arg)
3947 void *argptr;
3948 int target_size;
3949 const argtype *arg_type = ie->arg_type;
3950 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3951 abi_long ret;
3953 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3954 struct blkpg_partition host_part;
3956 /* Read and convert blkpg */
3957 arg_type++;
3958 target_size = thunk_type_size(arg_type, 0);
3959 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3960 if (!argptr) {
3961 ret = -TARGET_EFAULT;
3962 goto out;
3964 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3965 unlock_user(argptr, arg, 0);
3967 switch (host_blkpg->op) {
3968 case BLKPG_ADD_PARTITION:
3969 case BLKPG_DEL_PARTITION:
3970 /* payload is struct blkpg_partition */
3971 break;
3972 default:
3973 /* Unknown opcode */
3974 ret = -TARGET_EINVAL;
3975 goto out;
3978 /* Read and convert blkpg->data */
3979 arg = (abi_long)(uintptr_t)host_blkpg->data;
3980 target_size = thunk_type_size(part_arg_type, 0);
3981 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3982 if (!argptr) {
3983 ret = -TARGET_EFAULT;
3984 goto out;
3986 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3987 unlock_user(argptr, arg, 0);
3989 /* Swizzle the data pointer to our local copy and call! */
3990 host_blkpg->data = &host_part;
3991 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3993 out:
3994 return ret;
3997 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3998 int fd, int cmd, abi_long arg)
4000 const argtype *arg_type = ie->arg_type;
4001 const StructEntry *se;
4002 const argtype *field_types;
4003 const int *dst_offsets, *src_offsets;
4004 int target_size;
4005 void *argptr;
4006 abi_ulong *target_rt_dev_ptr;
4007 unsigned long *host_rt_dev_ptr;
4008 abi_long ret;
4009 int i;
4011 assert(ie->access == IOC_W);
4012 assert(*arg_type == TYPE_PTR);
4013 arg_type++;
4014 assert(*arg_type == TYPE_STRUCT);
4015 target_size = thunk_type_size(arg_type, 0);
4016 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4017 if (!argptr) {
4018 return -TARGET_EFAULT;
4020 arg_type++;
4021 assert(*arg_type == (int)STRUCT_rtentry);
4022 se = struct_entries + *arg_type++;
4023 assert(se->convert[0] == NULL);
4024 /* convert struct here to be able to catch rt_dev string */
4025 field_types = se->field_types;
4026 dst_offsets = se->field_offsets[THUNK_HOST];
4027 src_offsets = se->field_offsets[THUNK_TARGET];
4028 for (i = 0; i < se->nb_fields; i++) {
4029 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4030 assert(*field_types == TYPE_PTRVOID);
4031 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4032 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4033 if (*target_rt_dev_ptr != 0) {
4034 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4035 tswapal(*target_rt_dev_ptr));
4036 if (!*host_rt_dev_ptr) {
4037 unlock_user(argptr, arg, 0);
4038 return -TARGET_EFAULT;
4040 } else {
4041 *host_rt_dev_ptr = 0;
4043 field_types++;
4044 continue;
4046 field_types = thunk_convert(buf_temp + dst_offsets[i],
4047 argptr + src_offsets[i],
4048 field_types, THUNK_HOST);
4050 unlock_user(argptr, arg, 0);
4052 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4053 if (*host_rt_dev_ptr != 0) {
4054 unlock_user((void *)*host_rt_dev_ptr,
4055 *target_rt_dev_ptr, 0);
4057 return ret;
4060 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4061 int fd, int cmd, abi_long arg)
4063 int sig = target_to_host_signal(arg);
4064 return get_errno(ioctl(fd, ie->host_cmd, sig));
4067 static IOCTLEntry ioctl_entries[] = {
4068 #define IOCTL(cmd, access, ...) \
4069 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4070 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4071 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4072 #include "ioctls.h"
4073 { 0, 0, },
4076 /* ??? Implement proper locking for ioctls. */
4077 /* do_ioctl() Must return target values and target errnos. */
4078 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4080 const IOCTLEntry *ie;
4081 const argtype *arg_type;
4082 abi_long ret;
4083 uint8_t buf_temp[MAX_STRUCT_SIZE];
4084 int target_size;
4085 void *argptr;
4087 ie = ioctl_entries;
4088 for(;;) {
4089 if (ie->target_cmd == 0) {
4090 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4091 return -TARGET_ENOSYS;
4093 if (ie->target_cmd == cmd)
4094 break;
4095 ie++;
4097 arg_type = ie->arg_type;
4098 #if defined(DEBUG)
4099 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4100 #endif
4101 if (ie->do_ioctl) {
4102 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4105 switch(arg_type[0]) {
4106 case TYPE_NULL:
4107 /* no argument */
4108 ret = get_errno(ioctl(fd, ie->host_cmd));
4109 break;
4110 case TYPE_PTRVOID:
4111 case TYPE_INT:
4112 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4113 break;
4114 case TYPE_PTR:
4115 arg_type++;
4116 target_size = thunk_type_size(arg_type, 0);
4117 switch(ie->access) {
4118 case IOC_R:
4119 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4120 if (!is_error(ret)) {
4121 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4122 if (!argptr)
4123 return -TARGET_EFAULT;
4124 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4125 unlock_user(argptr, arg, target_size);
4127 break;
4128 case IOC_W:
4129 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4130 if (!argptr)
4131 return -TARGET_EFAULT;
4132 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4133 unlock_user(argptr, arg, 0);
4134 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4135 break;
4136 default:
4137 case IOC_RW:
4138 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4139 if (!argptr)
4140 return -TARGET_EFAULT;
4141 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4142 unlock_user(argptr, arg, 0);
4143 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4144 if (!is_error(ret)) {
4145 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4146 if (!argptr)
4147 return -TARGET_EFAULT;
4148 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4149 unlock_user(argptr, arg, target_size);
4151 break;
4153 break;
4154 default:
4155 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4156 (long)cmd, arg_type[0]);
4157 ret = -TARGET_ENOSYS;
4158 break;
4160 return ret;
4163 static const bitmask_transtbl iflag_tbl[] = {
4164 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4165 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4166 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4167 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4168 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4169 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4170 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4171 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4172 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4173 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4174 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4175 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4176 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4177 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4178 { 0, 0, 0, 0 }
4181 static const bitmask_transtbl oflag_tbl[] = {
4182 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4183 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4184 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4185 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4186 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4187 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4188 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4189 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4190 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4191 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4192 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4193 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4194 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4195 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4196 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4197 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4198 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4199 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4200 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4201 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4202 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4203 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4204 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4205 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4206 { 0, 0, 0, 0 }
4209 static const bitmask_transtbl cflag_tbl[] = {
4210 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4211 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4212 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4213 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4214 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4215 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4216 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4217 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4218 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4219 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4220 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4221 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4222 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4223 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4224 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4225 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4226 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4227 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4228 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4229 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4230 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4231 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4232 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4233 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4234 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4235 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4236 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4237 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4238 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4239 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4240 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4241 { 0, 0, 0, 0 }
4244 static const bitmask_transtbl lflag_tbl[] = {
4245 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4246 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4247 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4248 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4249 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4250 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4251 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4252 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4253 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4254 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4255 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4256 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4257 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4258 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4259 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4260 { 0, 0, 0, 0 }
4263 static void target_to_host_termios (void *dst, const void *src)
4265 struct host_termios *host = dst;
4266 const struct target_termios *target = src;
4268 host->c_iflag =
4269 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4270 host->c_oflag =
4271 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4272 host->c_cflag =
4273 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4274 host->c_lflag =
4275 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4276 host->c_line = target->c_line;
4278 memset(host->c_cc, 0, sizeof(host->c_cc));
4279 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4280 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4281 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4282 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4283 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4284 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4285 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4286 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4287 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4288 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4289 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4290 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4291 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4292 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4293 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4294 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4295 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4298 static void host_to_target_termios (void *dst, const void *src)
4300 struct target_termios *target = dst;
4301 const struct host_termios *host = src;
4303 target->c_iflag =
4304 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4305 target->c_oflag =
4306 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4307 target->c_cflag =
4308 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4309 target->c_lflag =
4310 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4311 target->c_line = host->c_line;
4313 memset(target->c_cc, 0, sizeof(target->c_cc));
4314 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4315 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4316 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4317 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4318 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4319 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4320 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4321 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4322 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4323 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4324 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4325 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4326 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4327 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4328 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4329 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4330 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4333 static const StructEntry struct_termios_def = {
4334 .convert = { host_to_target_termios, target_to_host_termios },
4335 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4336 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4339 static bitmask_transtbl mmap_flags_tbl[] = {
4340 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4341 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4342 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4343 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4344 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4345 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4346 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4347 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4348 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4349 MAP_NORESERVE },
4350 { 0, 0, 0, 0 }
4353 #if defined(TARGET_I386)
4355 /* NOTE: there is really one LDT for all the threads */
4356 static uint8_t *ldt_table;
4358 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4360 int size;
4361 void *p;
4363 if (!ldt_table)
4364 return 0;
4365 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4366 if (size > bytecount)
4367 size = bytecount;
4368 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4369 if (!p)
4370 return -TARGET_EFAULT;
4371 /* ??? Should this by byteswapped? */
4372 memcpy(p, ldt_table, size);
4373 unlock_user(p, ptr, size);
4374 return size;
4377 /* XXX: add locking support */
4378 static abi_long write_ldt(CPUX86State *env,
4379 abi_ulong ptr, unsigned long bytecount, int oldmode)
4381 struct target_modify_ldt_ldt_s ldt_info;
4382 struct target_modify_ldt_ldt_s *target_ldt_info;
4383 int seg_32bit, contents, read_exec_only, limit_in_pages;
4384 int seg_not_present, useable, lm;
4385 uint32_t *lp, entry_1, entry_2;
4387 if (bytecount != sizeof(ldt_info))
4388 return -TARGET_EINVAL;
4389 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4390 return -TARGET_EFAULT;
4391 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4392 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4393 ldt_info.limit = tswap32(target_ldt_info->limit);
4394 ldt_info.flags = tswap32(target_ldt_info->flags);
4395 unlock_user_struct(target_ldt_info, ptr, 0);
4397 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4398 return -TARGET_EINVAL;
4399 seg_32bit = ldt_info.flags & 1;
4400 contents = (ldt_info.flags >> 1) & 3;
4401 read_exec_only = (ldt_info.flags >> 3) & 1;
4402 limit_in_pages = (ldt_info.flags >> 4) & 1;
4403 seg_not_present = (ldt_info.flags >> 5) & 1;
4404 useable = (ldt_info.flags >> 6) & 1;
4405 #ifdef TARGET_ABI32
4406 lm = 0;
4407 #else
4408 lm = (ldt_info.flags >> 7) & 1;
4409 #endif
4410 if (contents == 3) {
4411 if (oldmode)
4412 return -TARGET_EINVAL;
4413 if (seg_not_present == 0)
4414 return -TARGET_EINVAL;
4416 /* allocate the LDT */
4417 if (!ldt_table) {
4418 env->ldt.base = target_mmap(0,
4419 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4420 PROT_READ|PROT_WRITE,
4421 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4422 if (env->ldt.base == -1)
4423 return -TARGET_ENOMEM;
4424 memset(g2h(env->ldt.base), 0,
4425 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4426 env->ldt.limit = 0xffff;
4427 ldt_table = g2h(env->ldt.base);
4430 /* NOTE: same code as Linux kernel */
4431 /* Allow LDTs to be cleared by the user. */
4432 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4433 if (oldmode ||
4434 (contents == 0 &&
4435 read_exec_only == 1 &&
4436 seg_32bit == 0 &&
4437 limit_in_pages == 0 &&
4438 seg_not_present == 1 &&
4439 useable == 0 )) {
4440 entry_1 = 0;
4441 entry_2 = 0;
4442 goto install;
4446 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4447 (ldt_info.limit & 0x0ffff);
4448 entry_2 = (ldt_info.base_addr & 0xff000000) |
4449 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4450 (ldt_info.limit & 0xf0000) |
4451 ((read_exec_only ^ 1) << 9) |
4452 (contents << 10) |
4453 ((seg_not_present ^ 1) << 15) |
4454 (seg_32bit << 22) |
4455 (limit_in_pages << 23) |
4456 (lm << 21) |
4457 0x7000;
4458 if (!oldmode)
4459 entry_2 |= (useable << 20);
4461 /* Install the new entry ... */
4462 install:
4463 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4464 lp[0] = tswap32(entry_1);
4465 lp[1] = tswap32(entry_2);
4466 return 0;
4469 /* specific and weird i386 syscalls */
4470 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4471 unsigned long bytecount)
4473 abi_long ret;
4475 switch (func) {
4476 case 0:
4477 ret = read_ldt(ptr, bytecount);
4478 break;
4479 case 1:
4480 ret = write_ldt(env, ptr, bytecount, 1);
4481 break;
4482 case 0x11:
4483 ret = write_ldt(env, ptr, bytecount, 0);
4484 break;
4485 default:
4486 ret = -TARGET_ENOSYS;
4487 break;
4489 return ret;
4492 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4493 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4495 uint64_t *gdt_table = g2h(env->gdt.base);
4496 struct target_modify_ldt_ldt_s ldt_info;
4497 struct target_modify_ldt_ldt_s *target_ldt_info;
4498 int seg_32bit, contents, read_exec_only, limit_in_pages;
4499 int seg_not_present, useable, lm;
4500 uint32_t *lp, entry_1, entry_2;
4501 int i;
4503 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4504 if (!target_ldt_info)
4505 return -TARGET_EFAULT;
4506 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4507 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4508 ldt_info.limit = tswap32(target_ldt_info->limit);
4509 ldt_info.flags = tswap32(target_ldt_info->flags);
4510 if (ldt_info.entry_number == -1) {
4511 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4512 if (gdt_table[i] == 0) {
4513 ldt_info.entry_number = i;
4514 target_ldt_info->entry_number = tswap32(i);
4515 break;
4519 unlock_user_struct(target_ldt_info, ptr, 1);
4521 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4522 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4523 return -TARGET_EINVAL;
4524 seg_32bit = ldt_info.flags & 1;
4525 contents = (ldt_info.flags >> 1) & 3;
4526 read_exec_only = (ldt_info.flags >> 3) & 1;
4527 limit_in_pages = (ldt_info.flags >> 4) & 1;
4528 seg_not_present = (ldt_info.flags >> 5) & 1;
4529 useable = (ldt_info.flags >> 6) & 1;
4530 #ifdef TARGET_ABI32
4531 lm = 0;
4532 #else
4533 lm = (ldt_info.flags >> 7) & 1;
4534 #endif
4536 if (contents == 3) {
4537 if (seg_not_present == 0)
4538 return -TARGET_EINVAL;
4541 /* NOTE: same code as Linux kernel */
4542 /* Allow LDTs to be cleared by the user. */
4543 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4544 if ((contents == 0 &&
4545 read_exec_only == 1 &&
4546 seg_32bit == 0 &&
4547 limit_in_pages == 0 &&
4548 seg_not_present == 1 &&
4549 useable == 0 )) {
4550 entry_1 = 0;
4551 entry_2 = 0;
4552 goto install;
4556 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4557 (ldt_info.limit & 0x0ffff);
4558 entry_2 = (ldt_info.base_addr & 0xff000000) |
4559 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4560 (ldt_info.limit & 0xf0000) |
4561 ((read_exec_only ^ 1) << 9) |
4562 (contents << 10) |
4563 ((seg_not_present ^ 1) << 15) |
4564 (seg_32bit << 22) |
4565 (limit_in_pages << 23) |
4566 (useable << 20) |
4567 (lm << 21) |
4568 0x7000;
4570 /* Install the new entry ... */
4571 install:
4572 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4573 lp[0] = tswap32(entry_1);
4574 lp[1] = tswap32(entry_2);
4575 return 0;
4578 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4580 struct target_modify_ldt_ldt_s *target_ldt_info;
4581 uint64_t *gdt_table = g2h(env->gdt.base);
4582 uint32_t base_addr, limit, flags;
4583 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4584 int seg_not_present, useable, lm;
4585 uint32_t *lp, entry_1, entry_2;
4587 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4588 if (!target_ldt_info)
4589 return -TARGET_EFAULT;
4590 idx = tswap32(target_ldt_info->entry_number);
4591 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4592 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4593 unlock_user_struct(target_ldt_info, ptr, 1);
4594 return -TARGET_EINVAL;
4596 lp = (uint32_t *)(gdt_table + idx);
4597 entry_1 = tswap32(lp[0]);
4598 entry_2 = tswap32(lp[1]);
4600 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4601 contents = (entry_2 >> 10) & 3;
4602 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4603 seg_32bit = (entry_2 >> 22) & 1;
4604 limit_in_pages = (entry_2 >> 23) & 1;
4605 useable = (entry_2 >> 20) & 1;
4606 #ifdef TARGET_ABI32
4607 lm = 0;
4608 #else
4609 lm = (entry_2 >> 21) & 1;
4610 #endif
4611 flags = (seg_32bit << 0) | (contents << 1) |
4612 (read_exec_only << 3) | (limit_in_pages << 4) |
4613 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4614 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4615 base_addr = (entry_1 >> 16) |
4616 (entry_2 & 0xff000000) |
4617 ((entry_2 & 0xff) << 16);
4618 target_ldt_info->base_addr = tswapal(base_addr);
4619 target_ldt_info->limit = tswap32(limit);
4620 target_ldt_info->flags = tswap32(flags);
4621 unlock_user_struct(target_ldt_info, ptr, 1);
4622 return 0;
4624 #endif /* TARGET_I386 && TARGET_ABI32 */
4626 #ifndef TARGET_ABI32
4627 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4629 abi_long ret = 0;
4630 abi_ulong val;
4631 int idx;
4633 switch(code) {
4634 case TARGET_ARCH_SET_GS:
4635 case TARGET_ARCH_SET_FS:
4636 if (code == TARGET_ARCH_SET_GS)
4637 idx = R_GS;
4638 else
4639 idx = R_FS;
4640 cpu_x86_load_seg(env, idx, 0);
4641 env->segs[idx].base = addr;
4642 break;
4643 case TARGET_ARCH_GET_GS:
4644 case TARGET_ARCH_GET_FS:
4645 if (code == TARGET_ARCH_GET_GS)
4646 idx = R_GS;
4647 else
4648 idx = R_FS;
4649 val = env->segs[idx].base;
4650 if (put_user(val, addr, abi_ulong))
4651 ret = -TARGET_EFAULT;
4652 break;
4653 default:
4654 ret = -TARGET_EINVAL;
4655 break;
4657 return ret;
4659 #endif
4661 #endif /* defined(TARGET_I386) */
4663 #define NEW_STACK_SIZE 0x40000
4666 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4667 typedef struct {
4668 CPUArchState *env;
4669 pthread_mutex_t mutex;
4670 pthread_cond_t cond;
4671 pthread_t thread;
4672 uint32_t tid;
4673 abi_ulong child_tidptr;
4674 abi_ulong parent_tidptr;
4675 sigset_t sigmask;
4676 } new_thread_info;
4678 static void *clone_func(void *arg)
4680 new_thread_info *info = arg;
4681 CPUArchState *env;
4682 CPUState *cpu;
4683 TaskState *ts;
4685 rcu_register_thread();
4686 env = info->env;
4687 cpu = ENV_GET_CPU(env);
4688 thread_cpu = cpu;
4689 ts = (TaskState *)cpu->opaque;
4690 info->tid = gettid();
4691 cpu->host_tid = info->tid;
4692 task_settid(ts);
4693 if (info->child_tidptr)
4694 put_user_u32(info->tid, info->child_tidptr);
4695 if (info->parent_tidptr)
4696 put_user_u32(info->tid, info->parent_tidptr);
4697 /* Enable signals. */
4698 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4699 /* Signal to the parent that we're ready. */
4700 pthread_mutex_lock(&info->mutex);
4701 pthread_cond_broadcast(&info->cond);
4702 pthread_mutex_unlock(&info->mutex);
4703 /* Wait until the parent has finshed initializing the tls state. */
4704 pthread_mutex_lock(&clone_lock);
4705 pthread_mutex_unlock(&clone_lock);
4706 cpu_loop(env);
4707 /* never exits */
4708 return NULL;
4711 /* do_fork() Must return host values and target errnos (unlike most
4712 do_*() functions). */
4713 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4714 abi_ulong parent_tidptr, target_ulong newtls,
4715 abi_ulong child_tidptr)
4717 CPUState *cpu = ENV_GET_CPU(env);
4718 int ret;
4719 TaskState *ts;
4720 CPUState *new_cpu;
4721 CPUArchState *new_env;
4722 unsigned int nptl_flags;
4723 sigset_t sigmask;
4725 /* Emulate vfork() with fork() */
4726 if (flags & CLONE_VFORK)
4727 flags &= ~(CLONE_VFORK | CLONE_VM);
4729 if (flags & CLONE_VM) {
4730 TaskState *parent_ts = (TaskState *)cpu->opaque;
4731 new_thread_info info;
4732 pthread_attr_t attr;
4734 ts = g_new0(TaskState, 1);
4735 init_task_state(ts);
4736 /* we create a new CPU instance. */
4737 new_env = cpu_copy(env);
4738 /* Init regs that differ from the parent. */
4739 cpu_clone_regs(new_env, newsp);
4740 new_cpu = ENV_GET_CPU(new_env);
4741 new_cpu->opaque = ts;
4742 ts->bprm = parent_ts->bprm;
4743 ts->info = parent_ts->info;
4744 nptl_flags = flags;
4745 flags &= ~CLONE_NPTL_FLAGS2;
4747 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4748 ts->child_tidptr = child_tidptr;
4751 if (nptl_flags & CLONE_SETTLS)
4752 cpu_set_tls (new_env, newtls);
4754 /* Grab a mutex so that thread setup appears atomic. */
4755 pthread_mutex_lock(&clone_lock);
4757 memset(&info, 0, sizeof(info));
4758 pthread_mutex_init(&info.mutex, NULL);
4759 pthread_mutex_lock(&info.mutex);
4760 pthread_cond_init(&info.cond, NULL);
4761 info.env = new_env;
4762 if (nptl_flags & CLONE_CHILD_SETTID)
4763 info.child_tidptr = child_tidptr;
4764 if (nptl_flags & CLONE_PARENT_SETTID)
4765 info.parent_tidptr = parent_tidptr;
4767 ret = pthread_attr_init(&attr);
4768 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4769 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4770 /* It is not safe to deliver signals until the child has finished
4771 initializing, so temporarily block all signals. */
4772 sigfillset(&sigmask);
4773 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4775 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4776 /* TODO: Free new CPU state if thread creation failed. */
4778 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4779 pthread_attr_destroy(&attr);
4780 if (ret == 0) {
4781 /* Wait for the child to initialize. */
4782 pthread_cond_wait(&info.cond, &info.mutex);
4783 ret = info.tid;
4784 if (flags & CLONE_PARENT_SETTID)
4785 put_user_u32(ret, parent_tidptr);
4786 } else {
4787 ret = -1;
4789 pthread_mutex_unlock(&info.mutex);
4790 pthread_cond_destroy(&info.cond);
4791 pthread_mutex_destroy(&info.mutex);
4792 pthread_mutex_unlock(&clone_lock);
4793 } else {
4794 /* if no CLONE_VM, we consider it is a fork */
4795 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4796 return -TARGET_EINVAL;
4798 fork_start();
4799 ret = fork();
4800 if (ret == 0) {
4801 /* Child Process. */
4802 rcu_after_fork();
4803 cpu_clone_regs(env, newsp);
4804 fork_end(1);
4805 /* There is a race condition here. The parent process could
4806 theoretically read the TID in the child process before the child
4807 tid is set. This would require using either ptrace
4808 (not implemented) or having *_tidptr to point at a shared memory
4809 mapping. We can't repeat the spinlock hack used above because
4810 the child process gets its own copy of the lock. */
4811 if (flags & CLONE_CHILD_SETTID)
4812 put_user_u32(gettid(), child_tidptr);
4813 if (flags & CLONE_PARENT_SETTID)
4814 put_user_u32(gettid(), parent_tidptr);
4815 ts = (TaskState *)cpu->opaque;
4816 if (flags & CLONE_SETTLS)
4817 cpu_set_tls (env, newtls);
4818 if (flags & CLONE_CHILD_CLEARTID)
4819 ts->child_tidptr = child_tidptr;
4820 } else {
4821 fork_end(0);
4824 return ret;
4827 /* warning : doesn't handle linux specific flags... */
4828 static int target_to_host_fcntl_cmd(int cmd)
4830 switch(cmd) {
4831 case TARGET_F_DUPFD:
4832 case TARGET_F_GETFD:
4833 case TARGET_F_SETFD:
4834 case TARGET_F_GETFL:
4835 case TARGET_F_SETFL:
4836 return cmd;
4837 case TARGET_F_GETLK:
4838 return F_GETLK;
4839 case TARGET_F_SETLK:
4840 return F_SETLK;
4841 case TARGET_F_SETLKW:
4842 return F_SETLKW;
4843 case TARGET_F_GETOWN:
4844 return F_GETOWN;
4845 case TARGET_F_SETOWN:
4846 return F_SETOWN;
4847 case TARGET_F_GETSIG:
4848 return F_GETSIG;
4849 case TARGET_F_SETSIG:
4850 return F_SETSIG;
4851 #if TARGET_ABI_BITS == 32
4852 case TARGET_F_GETLK64:
4853 return F_GETLK64;
4854 case TARGET_F_SETLK64:
4855 return F_SETLK64;
4856 case TARGET_F_SETLKW64:
4857 return F_SETLKW64;
4858 #endif
4859 case TARGET_F_SETLEASE:
4860 return F_SETLEASE;
4861 case TARGET_F_GETLEASE:
4862 return F_GETLEASE;
4863 #ifdef F_DUPFD_CLOEXEC
4864 case TARGET_F_DUPFD_CLOEXEC:
4865 return F_DUPFD_CLOEXEC;
4866 #endif
4867 case TARGET_F_NOTIFY:
4868 return F_NOTIFY;
4869 #ifdef F_GETOWN_EX
4870 case TARGET_F_GETOWN_EX:
4871 return F_GETOWN_EX;
4872 #endif
4873 #ifdef F_SETOWN_EX
4874 case TARGET_F_SETOWN_EX:
4875 return F_SETOWN_EX;
4876 #endif
4877 default:
4878 return -TARGET_EINVAL;
4880 return -TARGET_EINVAL;
4883 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4884 static const bitmask_transtbl flock_tbl[] = {
4885 TRANSTBL_CONVERT(F_RDLCK),
4886 TRANSTBL_CONVERT(F_WRLCK),
4887 TRANSTBL_CONVERT(F_UNLCK),
4888 TRANSTBL_CONVERT(F_EXLCK),
4889 TRANSTBL_CONVERT(F_SHLCK),
4890 { 0, 0, 0, 0 }
4893 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4895 struct flock fl;
4896 struct target_flock *target_fl;
4897 struct flock64 fl64;
4898 struct target_flock64 *target_fl64;
4899 #ifdef F_GETOWN_EX
4900 struct f_owner_ex fox;
4901 struct target_f_owner_ex *target_fox;
4902 #endif
4903 abi_long ret;
4904 int host_cmd = target_to_host_fcntl_cmd(cmd);
4906 if (host_cmd == -TARGET_EINVAL)
4907 return host_cmd;
4909 switch(cmd) {
4910 case TARGET_F_GETLK:
4911 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4912 return -TARGET_EFAULT;
4913 fl.l_type =
4914 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4915 fl.l_whence = tswap16(target_fl->l_whence);
4916 fl.l_start = tswapal(target_fl->l_start);
4917 fl.l_len = tswapal(target_fl->l_len);
4918 fl.l_pid = tswap32(target_fl->l_pid);
4919 unlock_user_struct(target_fl, arg, 0);
4920 ret = get_errno(fcntl(fd, host_cmd, &fl));
4921 if (ret == 0) {
4922 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4923 return -TARGET_EFAULT;
4924 target_fl->l_type =
4925 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4926 target_fl->l_whence = tswap16(fl.l_whence);
4927 target_fl->l_start = tswapal(fl.l_start);
4928 target_fl->l_len = tswapal(fl.l_len);
4929 target_fl->l_pid = tswap32(fl.l_pid);
4930 unlock_user_struct(target_fl, arg, 1);
4932 break;
4934 case TARGET_F_SETLK:
4935 case TARGET_F_SETLKW:
4936 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4937 return -TARGET_EFAULT;
4938 fl.l_type =
4939 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4940 fl.l_whence = tswap16(target_fl->l_whence);
4941 fl.l_start = tswapal(target_fl->l_start);
4942 fl.l_len = tswapal(target_fl->l_len);
4943 fl.l_pid = tswap32(target_fl->l_pid);
4944 unlock_user_struct(target_fl, arg, 0);
4945 ret = get_errno(fcntl(fd, host_cmd, &fl));
4946 break;
4948 case TARGET_F_GETLK64:
4949 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4950 return -TARGET_EFAULT;
4951 fl64.l_type =
4952 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4953 fl64.l_whence = tswap16(target_fl64->l_whence);
4954 fl64.l_start = tswap64(target_fl64->l_start);
4955 fl64.l_len = tswap64(target_fl64->l_len);
4956 fl64.l_pid = tswap32(target_fl64->l_pid);
4957 unlock_user_struct(target_fl64, arg, 0);
4958 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4959 if (ret == 0) {
4960 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4961 return -TARGET_EFAULT;
4962 target_fl64->l_type =
4963 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4964 target_fl64->l_whence = tswap16(fl64.l_whence);
4965 target_fl64->l_start = tswap64(fl64.l_start);
4966 target_fl64->l_len = tswap64(fl64.l_len);
4967 target_fl64->l_pid = tswap32(fl64.l_pid);
4968 unlock_user_struct(target_fl64, arg, 1);
4970 break;
4971 case TARGET_F_SETLK64:
4972 case TARGET_F_SETLKW64:
4973 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4974 return -TARGET_EFAULT;
4975 fl64.l_type =
4976 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4977 fl64.l_whence = tswap16(target_fl64->l_whence);
4978 fl64.l_start = tswap64(target_fl64->l_start);
4979 fl64.l_len = tswap64(target_fl64->l_len);
4980 fl64.l_pid = tswap32(target_fl64->l_pid);
4981 unlock_user_struct(target_fl64, arg, 0);
4982 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4983 break;
4985 case TARGET_F_GETFL:
4986 ret = get_errno(fcntl(fd, host_cmd, arg));
4987 if (ret >= 0) {
4988 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4990 break;
4992 case TARGET_F_SETFL:
4993 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4994 break;
4996 #ifdef F_GETOWN_EX
4997 case TARGET_F_GETOWN_EX:
4998 ret = get_errno(fcntl(fd, host_cmd, &fox));
4999 if (ret >= 0) {
5000 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5001 return -TARGET_EFAULT;
5002 target_fox->type = tswap32(fox.type);
5003 target_fox->pid = tswap32(fox.pid);
5004 unlock_user_struct(target_fox, arg, 1);
5006 break;
5007 #endif
5009 #ifdef F_SETOWN_EX
5010 case TARGET_F_SETOWN_EX:
5011 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5012 return -TARGET_EFAULT;
5013 fox.type = tswap32(target_fox->type);
5014 fox.pid = tswap32(target_fox->pid);
5015 unlock_user_struct(target_fox, arg, 0);
5016 ret = get_errno(fcntl(fd, host_cmd, &fox));
5017 break;
5018 #endif
5020 case TARGET_F_SETOWN:
5021 case TARGET_F_GETOWN:
5022 case TARGET_F_SETSIG:
5023 case TARGET_F_GETSIG:
5024 case TARGET_F_SETLEASE:
5025 case TARGET_F_GETLEASE:
5026 ret = get_errno(fcntl(fd, host_cmd, arg));
5027 break;
5029 default:
5030 ret = get_errno(fcntl(fd, cmd, arg));
5031 break;
5033 return ret;
5036 #ifdef USE_UID16
5038 static inline int high2lowuid(int uid)
5040 if (uid > 65535)
5041 return 65534;
5042 else
5043 return uid;
5046 static inline int high2lowgid(int gid)
5048 if (gid > 65535)
5049 return 65534;
5050 else
5051 return gid;
5054 static inline int low2highuid(int uid)
5056 if ((int16_t)uid == -1)
5057 return -1;
5058 else
5059 return uid;
5062 static inline int low2highgid(int gid)
5064 if ((int16_t)gid == -1)
5065 return -1;
5066 else
5067 return gid;
5069 static inline int tswapid(int id)
5071 return tswap16(id);
5074 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5076 #else /* !USE_UID16 */
5077 static inline int high2lowuid(int uid)
5079 return uid;
5081 static inline int high2lowgid(int gid)
5083 return gid;
5085 static inline int low2highuid(int uid)
5087 return uid;
5089 static inline int low2highgid(int gid)
5091 return gid;
5093 static inline int tswapid(int id)
5095 return tswap32(id);
5098 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5100 #endif /* USE_UID16 */
5102 /* We must do direct syscalls for setting UID/GID, because we want to
5103 * implement the Linux system call semantics of "change only for this thread",
5104 * not the libc/POSIX semantics of "change for all threads in process".
5105 * (See http://ewontfix.com/17/ for more details.)
5106 * We use the 32-bit version of the syscalls if present; if it is not
5107 * then either the host architecture supports 32-bit UIDs natively with
5108 * the standard syscall, or the 16-bit UID is the best we can do.
5110 #ifdef __NR_setuid32
5111 #define __NR_sys_setuid __NR_setuid32
5112 #else
5113 #define __NR_sys_setuid __NR_setuid
5114 #endif
5115 #ifdef __NR_setgid32
5116 #define __NR_sys_setgid __NR_setgid32
5117 #else
5118 #define __NR_sys_setgid __NR_setgid
5119 #endif
5120 #ifdef __NR_setresuid32
5121 #define __NR_sys_setresuid __NR_setresuid32
5122 #else
5123 #define __NR_sys_setresuid __NR_setresuid
5124 #endif
5125 #ifdef __NR_setresgid32
5126 #define __NR_sys_setresgid __NR_setresgid32
5127 #else
5128 #define __NR_sys_setresgid __NR_setresgid
5129 #endif
5131 _syscall1(int, sys_setuid, uid_t, uid)
5132 _syscall1(int, sys_setgid, gid_t, gid)
5133 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
5134 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
5136 void syscall_init(void)
5138 IOCTLEntry *ie;
5139 const argtype *arg_type;
5140 int size;
5141 int i;
5143 thunk_init(STRUCT_MAX);
5145 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5146 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5147 #include "syscall_types.h"
5148 #undef STRUCT
5149 #undef STRUCT_SPECIAL
5151 /* Build target_to_host_errno_table[] table from
5152 * host_to_target_errno_table[]. */
5153 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5154 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5157 /* we patch the ioctl size if necessary. We rely on the fact that
5158 no ioctl has all the bits at '1' in the size field */
5159 ie = ioctl_entries;
5160 while (ie->target_cmd != 0) {
5161 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5162 TARGET_IOC_SIZEMASK) {
5163 arg_type = ie->arg_type;
5164 if (arg_type[0] != TYPE_PTR) {
5165 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5166 ie->target_cmd);
5167 exit(1);
5169 arg_type++;
5170 size = thunk_type_size(arg_type, 0);
5171 ie->target_cmd = (ie->target_cmd &
5172 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5173 (size << TARGET_IOC_SIZESHIFT);
5176 /* automatic consistency check if same arch */
5177 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5178 (defined(__x86_64__) && defined(TARGET_X86_64))
5179 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5180 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5181 ie->name, ie->target_cmd, ie->host_cmd);
5183 #endif
5184 ie++;
5188 #if TARGET_ABI_BITS == 32
5189 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5191 #ifdef TARGET_WORDS_BIGENDIAN
5192 return ((uint64_t)word0 << 32) | word1;
5193 #else
5194 return ((uint64_t)word1 << 32) | word0;
5195 #endif
5197 #else /* TARGET_ABI_BITS == 32 */
5198 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5200 return word0;
5202 #endif /* TARGET_ABI_BITS != 32 */
5204 #ifdef TARGET_NR_truncate64
5205 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5206 abi_long arg2,
5207 abi_long arg3,
5208 abi_long arg4)
5210 if (regpairs_aligned(cpu_env)) {
5211 arg2 = arg3;
5212 arg3 = arg4;
5214 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5216 #endif
5218 #ifdef TARGET_NR_ftruncate64
5219 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5220 abi_long arg2,
5221 abi_long arg3,
5222 abi_long arg4)
5224 if (regpairs_aligned(cpu_env)) {
5225 arg2 = arg3;
5226 arg3 = arg4;
5228 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5230 #endif
5232 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5233 abi_ulong target_addr)
5235 struct target_timespec *target_ts;
5237 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5238 return -TARGET_EFAULT;
5239 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5240 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5241 unlock_user_struct(target_ts, target_addr, 0);
5242 return 0;
5245 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5246 struct timespec *host_ts)
5248 struct target_timespec *target_ts;
5250 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5251 return -TARGET_EFAULT;
5252 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5253 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5254 unlock_user_struct(target_ts, target_addr, 1);
5255 return 0;
5258 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5259 abi_ulong target_addr)
5261 struct target_itimerspec *target_itspec;
5263 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5264 return -TARGET_EFAULT;
5267 host_itspec->it_interval.tv_sec =
5268 tswapal(target_itspec->it_interval.tv_sec);
5269 host_itspec->it_interval.tv_nsec =
5270 tswapal(target_itspec->it_interval.tv_nsec);
5271 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5272 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5274 unlock_user_struct(target_itspec, target_addr, 1);
5275 return 0;
5278 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5279 struct itimerspec *host_its)
5281 struct target_itimerspec *target_itspec;
5283 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5284 return -TARGET_EFAULT;
5287 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5288 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5290 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5291 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5293 unlock_user_struct(target_itspec, target_addr, 0);
5294 return 0;
5297 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5298 abi_ulong target_addr)
5300 struct target_sigevent *target_sevp;
5302 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5303 return -TARGET_EFAULT;
5306 /* This union is awkward on 64 bit systems because it has a 32 bit
5307 * integer and a pointer in it; we follow the conversion approach
5308 * used for handling sigval types in signal.c so the guest should get
5309 * the correct value back even if we did a 64 bit byteswap and it's
5310 * using the 32 bit integer.
5312 host_sevp->sigev_value.sival_ptr =
5313 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5314 host_sevp->sigev_signo =
5315 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5316 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5317 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5319 unlock_user_struct(target_sevp, target_addr, 1);
5320 return 0;
5323 #if defined(TARGET_NR_mlockall)
5324 static inline int target_to_host_mlockall_arg(int arg)
5326 int result = 0;
5328 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5329 result |= MCL_CURRENT;
5331 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5332 result |= MCL_FUTURE;
5334 return result;
5336 #endif
5338 static inline abi_long host_to_target_stat64(void *cpu_env,
5339 abi_ulong target_addr,
5340 struct stat *host_st)
5342 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5343 if (((CPUARMState *)cpu_env)->eabi) {
5344 struct target_eabi_stat64 *target_st;
5346 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5347 return -TARGET_EFAULT;
5348 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5349 __put_user(host_st->st_dev, &target_st->st_dev);
5350 __put_user(host_st->st_ino, &target_st->st_ino);
5351 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5352 __put_user(host_st->st_ino, &target_st->__st_ino);
5353 #endif
5354 __put_user(host_st->st_mode, &target_st->st_mode);
5355 __put_user(host_st->st_nlink, &target_st->st_nlink);
5356 __put_user(host_st->st_uid, &target_st->st_uid);
5357 __put_user(host_st->st_gid, &target_st->st_gid);
5358 __put_user(host_st->st_rdev, &target_st->st_rdev);
5359 __put_user(host_st->st_size, &target_st->st_size);
5360 __put_user(host_st->st_blksize, &target_st->st_blksize);
5361 __put_user(host_st->st_blocks, &target_st->st_blocks);
5362 __put_user(host_st->st_atime, &target_st->target_st_atime);
5363 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5364 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5365 unlock_user_struct(target_st, target_addr, 1);
5366 } else
5367 #endif
5369 #if defined(TARGET_HAS_STRUCT_STAT64)
5370 struct target_stat64 *target_st;
5371 #else
5372 struct target_stat *target_st;
5373 #endif
5375 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5376 return -TARGET_EFAULT;
5377 memset(target_st, 0, sizeof(*target_st));
5378 __put_user(host_st->st_dev, &target_st->st_dev);
5379 __put_user(host_st->st_ino, &target_st->st_ino);
5380 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5381 __put_user(host_st->st_ino, &target_st->__st_ino);
5382 #endif
5383 __put_user(host_st->st_mode, &target_st->st_mode);
5384 __put_user(host_st->st_nlink, &target_st->st_nlink);
5385 __put_user(host_st->st_uid, &target_st->st_uid);
5386 __put_user(host_st->st_gid, &target_st->st_gid);
5387 __put_user(host_st->st_rdev, &target_st->st_rdev);
5388 /* XXX: better use of kernel struct */
5389 __put_user(host_st->st_size, &target_st->st_size);
5390 __put_user(host_st->st_blksize, &target_st->st_blksize);
5391 __put_user(host_st->st_blocks, &target_st->st_blocks);
5392 __put_user(host_st->st_atime, &target_st->target_st_atime);
5393 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5394 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5395 unlock_user_struct(target_st, target_addr, 1);
5398 return 0;
5401 /* ??? Using host futex calls even when target atomic operations
5402 are not really atomic probably breaks things. However implementing
5403 futexes locally would make futexes shared between multiple processes
5404 tricky. However they're probably useless because guest atomic
5405 operations won't work either. */
5406 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5407 target_ulong uaddr2, int val3)
5409 struct timespec ts, *pts;
5410 int base_op;
5412 /* ??? We assume FUTEX_* constants are the same on both host
5413 and target. */
5414 #ifdef FUTEX_CMD_MASK
5415 base_op = op & FUTEX_CMD_MASK;
5416 #else
5417 base_op = op;
5418 #endif
5419 switch (base_op) {
5420 case FUTEX_WAIT:
5421 case FUTEX_WAIT_BITSET:
5422 if (timeout) {
5423 pts = &ts;
5424 target_to_host_timespec(pts, timeout);
5425 } else {
5426 pts = NULL;
5428 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
5429 pts, NULL, val3));
5430 case FUTEX_WAKE:
5431 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5432 case FUTEX_FD:
5433 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5434 case FUTEX_REQUEUE:
5435 case FUTEX_CMP_REQUEUE:
5436 case FUTEX_WAKE_OP:
5437 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5438 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5439 But the prototype takes a `struct timespec *'; insert casts
5440 to satisfy the compiler. We do not need to tswap TIMEOUT
5441 since it's not compared to guest memory. */
5442 pts = (struct timespec *)(uintptr_t) timeout;
5443 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
5444 g2h(uaddr2),
5445 (base_op == FUTEX_CMP_REQUEUE
5446 ? tswap32(val3)
5447 : val3)));
5448 default:
5449 return -TARGET_ENOSYS;
5452 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5453 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5454 abi_long handle, abi_long mount_id,
5455 abi_long flags)
5457 struct file_handle *target_fh;
5458 struct file_handle *fh;
5459 int mid = 0;
5460 abi_long ret;
5461 char *name;
5462 unsigned int size, total_size;
5464 if (get_user_s32(size, handle)) {
5465 return -TARGET_EFAULT;
5468 name = lock_user_string(pathname);
5469 if (!name) {
5470 return -TARGET_EFAULT;
5473 total_size = sizeof(struct file_handle) + size;
5474 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5475 if (!target_fh) {
5476 unlock_user(name, pathname, 0);
5477 return -TARGET_EFAULT;
5480 fh = g_malloc0(total_size);
5481 fh->handle_bytes = size;
5483 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5484 unlock_user(name, pathname, 0);
5486 /* man name_to_handle_at(2):
5487 * Other than the use of the handle_bytes field, the caller should treat
5488 * the file_handle structure as an opaque data type
5491 memcpy(target_fh, fh, total_size);
5492 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5493 target_fh->handle_type = tswap32(fh->handle_type);
5494 g_free(fh);
5495 unlock_user(target_fh, handle, total_size);
5497 if (put_user_s32(mid, mount_id)) {
5498 return -TARGET_EFAULT;
5501 return ret;
5504 #endif
5506 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5507 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5508 abi_long flags)
5510 struct file_handle *target_fh;
5511 struct file_handle *fh;
5512 unsigned int size, total_size;
5513 abi_long ret;
5515 if (get_user_s32(size, handle)) {
5516 return -TARGET_EFAULT;
5519 total_size = sizeof(struct file_handle) + size;
5520 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5521 if (!target_fh) {
5522 return -TARGET_EFAULT;
5525 fh = g_memdup(target_fh, total_size);
5526 fh->handle_bytes = size;
5527 fh->handle_type = tswap32(target_fh->handle_type);
5529 ret = get_errno(open_by_handle_at(mount_fd, fh,
5530 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5532 g_free(fh);
5534 unlock_user(target_fh, handle, total_size);
5536 return ret;
5538 #endif
5540 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5542 /* signalfd siginfo conversion */
5544 static void
5545 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
5546 const struct signalfd_siginfo *info)
5548 int sig = host_to_target_signal(info->ssi_signo);
5550 /* linux/signalfd.h defines a ssi_addr_lsb
5551 * not defined in sys/signalfd.h but used by some kernels
5554 #ifdef BUS_MCEERR_AO
5555 if (tinfo->ssi_signo == SIGBUS &&
5556 (tinfo->ssi_code == BUS_MCEERR_AR ||
5557 tinfo->ssi_code == BUS_MCEERR_AO)) {
5558 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
5559 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
5560 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
5562 #endif
5564 tinfo->ssi_signo = tswap32(sig);
5565 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
5566 tinfo->ssi_code = tswap32(info->ssi_code);
5567 tinfo->ssi_pid = tswap32(info->ssi_pid);
5568 tinfo->ssi_uid = tswap32(info->ssi_uid);
5569 tinfo->ssi_fd = tswap32(info->ssi_fd);
5570 tinfo->ssi_tid = tswap32(info->ssi_tid);
5571 tinfo->ssi_band = tswap32(info->ssi_band);
5572 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
5573 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
5574 tinfo->ssi_status = tswap32(info->ssi_status);
5575 tinfo->ssi_int = tswap32(info->ssi_int);
5576 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
5577 tinfo->ssi_utime = tswap64(info->ssi_utime);
5578 tinfo->ssi_stime = tswap64(info->ssi_stime);
5579 tinfo->ssi_addr = tswap64(info->ssi_addr);
5582 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
5584 int i;
5586 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
5587 host_to_target_signalfd_siginfo(buf + i, buf + i);
5590 return len;
5593 static TargetFdTrans target_signalfd_trans = {
5594 .host_to_target_data = host_to_target_data_signalfd,
5597 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
5599 int host_flags;
5600 target_sigset_t *target_mask;
5601 sigset_t host_mask;
5602 abi_long ret;
5604 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
5605 return -TARGET_EINVAL;
5607 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
5608 return -TARGET_EFAULT;
5611 target_to_host_sigset(&host_mask, target_mask);
5613 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
5615 ret = get_errno(signalfd(fd, &host_mask, host_flags));
5616 if (ret >= 0) {
5617 fd_trans_register(ret, &target_signalfd_trans);
5620 unlock_user_struct(target_mask, mask, 0);
5622 return ret;
5624 #endif
5626 /* Map host to target signal numbers for the wait family of syscalls.
5627 Assume all other status bits are the same. */
5628 int host_to_target_waitstatus(int status)
5630 if (WIFSIGNALED(status)) {
5631 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5633 if (WIFSTOPPED(status)) {
5634 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5635 | (status & 0xff);
5637 return status;
5640 static int open_self_cmdline(void *cpu_env, int fd)
5642 int fd_orig = -1;
5643 bool word_skipped = false;
5645 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5646 if (fd_orig < 0) {
5647 return fd_orig;
5650 while (true) {
5651 ssize_t nb_read;
5652 char buf[128];
5653 char *cp_buf = buf;
5655 nb_read = read(fd_orig, buf, sizeof(buf));
5656 if (nb_read < 0) {
5657 int e = errno;
5658 fd_orig = close(fd_orig);
5659 errno = e;
5660 return -1;
5661 } else if (nb_read == 0) {
5662 break;
5665 if (!word_skipped) {
5666 /* Skip the first string, which is the path to qemu-*-static
5667 instead of the actual command. */
5668 cp_buf = memchr(buf, 0, sizeof(buf));
5669 if (cp_buf) {
5670 /* Null byte found, skip one string */
5671 cp_buf++;
5672 nb_read -= cp_buf - buf;
5673 word_skipped = true;
5677 if (word_skipped) {
5678 if (write(fd, cp_buf, nb_read) != nb_read) {
5679 int e = errno;
5680 close(fd_orig);
5681 errno = e;
5682 return -1;
5687 return close(fd_orig);
5690 static int open_self_maps(void *cpu_env, int fd)
5692 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5693 TaskState *ts = cpu->opaque;
5694 FILE *fp;
5695 char *line = NULL;
5696 size_t len = 0;
5697 ssize_t read;
5699 fp = fopen("/proc/self/maps", "r");
5700 if (fp == NULL) {
5701 return -1;
5704 while ((read = getline(&line, &len, fp)) != -1) {
5705 int fields, dev_maj, dev_min, inode;
5706 uint64_t min, max, offset;
5707 char flag_r, flag_w, flag_x, flag_p;
5708 char path[512] = "";
5709 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5710 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5711 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5713 if ((fields < 10) || (fields > 11)) {
5714 continue;
5716 if (h2g_valid(min)) {
5717 int flags = page_get_flags(h2g(min));
5718 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5719 if (page_check_range(h2g(min), max - min, flags) == -1) {
5720 continue;
5722 if (h2g(min) == ts->info->stack_limit) {
5723 pstrcpy(path, sizeof(path), " [stack]");
5725 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5726 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5727 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5728 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5729 path[0] ? " " : "", path);
5733 free(line);
5734 fclose(fp);
5736 return 0;
5739 static int open_self_stat(void *cpu_env, int fd)
5741 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5742 TaskState *ts = cpu->opaque;
5743 abi_ulong start_stack = ts->info->start_stack;
5744 int i;
5746 for (i = 0; i < 44; i++) {
5747 char buf[128];
5748 int len;
5749 uint64_t val = 0;
5751 if (i == 0) {
5752 /* pid */
5753 val = getpid();
5754 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5755 } else if (i == 1) {
5756 /* app name */
5757 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5758 } else if (i == 27) {
5759 /* stack bottom */
5760 val = start_stack;
5761 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5762 } else {
5763 /* for the rest, there is MasterCard */
5764 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5767 len = strlen(buf);
5768 if (write(fd, buf, len) != len) {
5769 return -1;
5773 return 0;
5776 static int open_self_auxv(void *cpu_env, int fd)
5778 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5779 TaskState *ts = cpu->opaque;
5780 abi_ulong auxv = ts->info->saved_auxv;
5781 abi_ulong len = ts->info->auxv_len;
5782 char *ptr;
5785 * Auxiliary vector is stored in target process stack.
5786 * read in whole auxv vector and copy it to file
5788 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5789 if (ptr != NULL) {
5790 while (len > 0) {
5791 ssize_t r;
5792 r = write(fd, ptr, len);
5793 if (r <= 0) {
5794 break;
5796 len -= r;
5797 ptr += r;
5799 lseek(fd, 0, SEEK_SET);
5800 unlock_user(ptr, auxv, len);
5803 return 0;
5806 static int is_proc_myself(const char *filename, const char *entry)
5808 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5809 filename += strlen("/proc/");
5810 if (!strncmp(filename, "self/", strlen("self/"))) {
5811 filename += strlen("self/");
5812 } else if (*filename >= '1' && *filename <= '9') {
5813 char myself[80];
5814 snprintf(myself, sizeof(myself), "%d/", getpid());
5815 if (!strncmp(filename, myself, strlen(myself))) {
5816 filename += strlen(myself);
5817 } else {
5818 return 0;
5820 } else {
5821 return 0;
5823 if (!strcmp(filename, entry)) {
5824 return 1;
5827 return 0;
5830 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5831 static int is_proc(const char *filename, const char *entry)
5833 return strcmp(filename, entry) == 0;
5836 static int open_net_route(void *cpu_env, int fd)
5838 FILE *fp;
5839 char *line = NULL;
5840 size_t len = 0;
5841 ssize_t read;
5843 fp = fopen("/proc/net/route", "r");
5844 if (fp == NULL) {
5845 return -1;
5848 /* read header */
5850 read = getline(&line, &len, fp);
5851 dprintf(fd, "%s", line);
5853 /* read routes */
5855 while ((read = getline(&line, &len, fp)) != -1) {
5856 char iface[16];
5857 uint32_t dest, gw, mask;
5858 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5859 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5860 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5861 &mask, &mtu, &window, &irtt);
5862 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5863 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5864 metric, tswap32(mask), mtu, window, irtt);
5867 free(line);
5868 fclose(fp);
5870 return 0;
5872 #endif
5874 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5876 struct fake_open {
5877 const char *filename;
5878 int (*fill)(void *cpu_env, int fd);
5879 int (*cmp)(const char *s1, const char *s2);
5881 const struct fake_open *fake_open;
5882 static const struct fake_open fakes[] = {
5883 { "maps", open_self_maps, is_proc_myself },
5884 { "stat", open_self_stat, is_proc_myself },
5885 { "auxv", open_self_auxv, is_proc_myself },
5886 { "cmdline", open_self_cmdline, is_proc_myself },
5887 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5888 { "/proc/net/route", open_net_route, is_proc },
5889 #endif
5890 { NULL, NULL, NULL }
5893 if (is_proc_myself(pathname, "exe")) {
5894 int execfd = qemu_getauxval(AT_EXECFD);
5895 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
5898 for (fake_open = fakes; fake_open->filename; fake_open++) {
5899 if (fake_open->cmp(pathname, fake_open->filename)) {
5900 break;
5904 if (fake_open->filename) {
5905 const char *tmpdir;
5906 char filename[PATH_MAX];
5907 int fd, r;
5909 /* create temporary file to map stat to */
5910 tmpdir = getenv("TMPDIR");
5911 if (!tmpdir)
5912 tmpdir = "/tmp";
5913 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5914 fd = mkstemp(filename);
5915 if (fd < 0) {
5916 return fd;
5918 unlink(filename);
5920 if ((r = fake_open->fill(cpu_env, fd))) {
5921 int e = errno;
5922 close(fd);
5923 errno = e;
5924 return r;
5926 lseek(fd, 0, SEEK_SET);
5928 return fd;
5931 return safe_openat(dirfd, path(pathname), flags, mode);
5934 #define TIMER_MAGIC 0x0caf0000
5935 #define TIMER_MAGIC_MASK 0xffff0000
5937 /* Convert QEMU provided timer ID back to internal 16bit index format */
5938 static target_timer_t get_timer_id(abi_long arg)
5940 target_timer_t timerid = arg;
5942 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5943 return -TARGET_EINVAL;
5946 timerid &= 0xffff;
5948 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5949 return -TARGET_EINVAL;
5952 return timerid;
5955 /* do_syscall() should always have a single exit point at the end so
5956 that actions, such as logging of syscall results, can be performed.
5957 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5958 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5959 abi_long arg2, abi_long arg3, abi_long arg4,
5960 abi_long arg5, abi_long arg6, abi_long arg7,
5961 abi_long arg8)
5963 CPUState *cpu = ENV_GET_CPU(cpu_env);
5964 abi_long ret;
5965 struct stat st;
5966 struct statfs stfs;
5967 void *p;
5969 #if defined(DEBUG_ERESTARTSYS)
5970 /* Debug-only code for exercising the syscall-restart code paths
5971 * in the per-architecture cpu main loops: restart every syscall
5972 * the guest makes once before letting it through.
5975 static int flag;
5977 flag = !flag;
5978 if (flag) {
5979 return -TARGET_ERESTARTSYS;
5982 #endif
5984 #ifdef DEBUG
5985 gemu_log("syscall %d", num);
5986 #endif
5987 if(do_strace)
5988 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5990 switch(num) {
5991 case TARGET_NR_exit:
5992 /* In old applications this may be used to implement _exit(2).
5993 However in threaded applictions it is used for thread termination,
5994 and _exit_group is used for application termination.
5995 Do thread termination if we have more then one thread. */
5996 /* FIXME: This probably breaks if a signal arrives. We should probably
5997 be disabling signals. */
5998 if (CPU_NEXT(first_cpu)) {
5999 TaskState *ts;
6001 cpu_list_lock();
6002 /* Remove the CPU from the list. */
6003 QTAILQ_REMOVE(&cpus, cpu, node);
6004 cpu_list_unlock();
6005 ts = cpu->opaque;
6006 if (ts->child_tidptr) {
6007 put_user_u32(0, ts->child_tidptr);
6008 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6009 NULL, NULL, 0);
6011 thread_cpu = NULL;
6012 object_unref(OBJECT(cpu));
6013 g_free(ts);
6014 rcu_unregister_thread();
6015 pthread_exit(NULL);
6017 #ifdef TARGET_GPROF
6018 _mcleanup();
6019 #endif
6020 gdb_exit(cpu_env, arg1);
6021 _exit(arg1);
6022 ret = 0; /* avoid warning */
6023 break;
6024 case TARGET_NR_read:
6025 if (arg3 == 0)
6026 ret = 0;
6027 else {
6028 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6029 goto efault;
6030 ret = get_errno(safe_read(arg1, p, arg3));
6031 if (ret >= 0 &&
6032 fd_trans_host_to_target_data(arg1)) {
6033 ret = fd_trans_host_to_target_data(arg1)(p, ret);
6035 unlock_user(p, arg2, ret);
6037 break;
6038 case TARGET_NR_write:
6039 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6040 goto efault;
6041 ret = get_errno(safe_write(arg1, p, arg3));
6042 unlock_user(p, arg2, 0);
6043 break;
6044 #ifdef TARGET_NR_open
6045 case TARGET_NR_open:
6046 if (!(p = lock_user_string(arg1)))
6047 goto efault;
6048 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6049 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6050 arg3));
6051 fd_trans_unregister(ret);
6052 unlock_user(p, arg1, 0);
6053 break;
6054 #endif
6055 case TARGET_NR_openat:
6056 if (!(p = lock_user_string(arg2)))
6057 goto efault;
6058 ret = get_errno(do_openat(cpu_env, arg1, p,
6059 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6060 arg4));
6061 fd_trans_unregister(ret);
6062 unlock_user(p, arg2, 0);
6063 break;
6064 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6065 case TARGET_NR_name_to_handle_at:
6066 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6067 break;
6068 #endif
6069 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6070 case TARGET_NR_open_by_handle_at:
6071 ret = do_open_by_handle_at(arg1, arg2, arg3);
6072 fd_trans_unregister(ret);
6073 break;
6074 #endif
6075 case TARGET_NR_close:
6076 fd_trans_unregister(arg1);
6077 ret = get_errno(close(arg1));
6078 break;
6079 case TARGET_NR_brk:
6080 ret = do_brk(arg1);
6081 break;
6082 #ifdef TARGET_NR_fork
6083 case TARGET_NR_fork:
6084 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6085 break;
6086 #endif
6087 #ifdef TARGET_NR_waitpid
6088 case TARGET_NR_waitpid:
6090 int status;
6091 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6092 if (!is_error(ret) && arg2 && ret
6093 && put_user_s32(host_to_target_waitstatus(status), arg2))
6094 goto efault;
6096 break;
6097 #endif
6098 #ifdef TARGET_NR_waitid
6099 case TARGET_NR_waitid:
6101 siginfo_t info;
6102 info.si_pid = 0;
6103 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6104 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6105 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6106 goto efault;
6107 host_to_target_siginfo(p, &info);
6108 unlock_user(p, arg3, sizeof(target_siginfo_t));
6111 break;
6112 #endif
6113 #ifdef TARGET_NR_creat /* not on alpha */
6114 case TARGET_NR_creat:
6115 if (!(p = lock_user_string(arg1)))
6116 goto efault;
6117 ret = get_errno(creat(p, arg2));
6118 fd_trans_unregister(ret);
6119 unlock_user(p, arg1, 0);
6120 break;
6121 #endif
6122 #ifdef TARGET_NR_link
6123 case TARGET_NR_link:
6125 void * p2;
6126 p = lock_user_string(arg1);
6127 p2 = lock_user_string(arg2);
6128 if (!p || !p2)
6129 ret = -TARGET_EFAULT;
6130 else
6131 ret = get_errno(link(p, p2));
6132 unlock_user(p2, arg2, 0);
6133 unlock_user(p, arg1, 0);
6135 break;
6136 #endif
6137 #if defined(TARGET_NR_linkat)
6138 case TARGET_NR_linkat:
6140 void * p2 = NULL;
6141 if (!arg2 || !arg4)
6142 goto efault;
6143 p = lock_user_string(arg2);
6144 p2 = lock_user_string(arg4);
6145 if (!p || !p2)
6146 ret = -TARGET_EFAULT;
6147 else
6148 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6149 unlock_user(p, arg2, 0);
6150 unlock_user(p2, arg4, 0);
6152 break;
6153 #endif
6154 #ifdef TARGET_NR_unlink
6155 case TARGET_NR_unlink:
6156 if (!(p = lock_user_string(arg1)))
6157 goto efault;
6158 ret = get_errno(unlink(p));
6159 unlock_user(p, arg1, 0);
6160 break;
6161 #endif
6162 #if defined(TARGET_NR_unlinkat)
6163 case TARGET_NR_unlinkat:
6164 if (!(p = lock_user_string(arg2)))
6165 goto efault;
6166 ret = get_errno(unlinkat(arg1, p, arg3));
6167 unlock_user(p, arg2, 0);
6168 break;
6169 #endif
6170 case TARGET_NR_execve:
6172 char **argp, **envp;
6173 int argc, envc;
6174 abi_ulong gp;
6175 abi_ulong guest_argp;
6176 abi_ulong guest_envp;
6177 abi_ulong addr;
6178 char **q;
6179 int total_size = 0;
6181 argc = 0;
6182 guest_argp = arg2;
6183 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6184 if (get_user_ual(addr, gp))
6185 goto efault;
6186 if (!addr)
6187 break;
6188 argc++;
6190 envc = 0;
6191 guest_envp = arg3;
6192 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6193 if (get_user_ual(addr, gp))
6194 goto efault;
6195 if (!addr)
6196 break;
6197 envc++;
6200 argp = alloca((argc + 1) * sizeof(void *));
6201 envp = alloca((envc + 1) * sizeof(void *));
6203 for (gp = guest_argp, q = argp; gp;
6204 gp += sizeof(abi_ulong), q++) {
6205 if (get_user_ual(addr, gp))
6206 goto execve_efault;
6207 if (!addr)
6208 break;
6209 if (!(*q = lock_user_string(addr)))
6210 goto execve_efault;
6211 total_size += strlen(*q) + 1;
6213 *q = NULL;
6215 for (gp = guest_envp, q = envp; gp;
6216 gp += sizeof(abi_ulong), q++) {
6217 if (get_user_ual(addr, gp))
6218 goto execve_efault;
6219 if (!addr)
6220 break;
6221 if (!(*q = lock_user_string(addr)))
6222 goto execve_efault;
6223 total_size += strlen(*q) + 1;
6225 *q = NULL;
6227 if (!(p = lock_user_string(arg1)))
6228 goto execve_efault;
6229 /* Although execve() is not an interruptible syscall it is
6230 * a special case where we must use the safe_syscall wrapper:
6231 * if we allow a signal to happen before we make the host
6232 * syscall then we will 'lose' it, because at the point of
6233 * execve the process leaves QEMU's control. So we use the
6234 * safe syscall wrapper to ensure that we either take the
6235 * signal as a guest signal, or else it does not happen
6236 * before the execve completes and makes it the other
6237 * program's problem.
6239 ret = get_errno(safe_execve(p, argp, envp));
6240 unlock_user(p, arg1, 0);
6242 goto execve_end;
6244 execve_efault:
6245 ret = -TARGET_EFAULT;
6247 execve_end:
6248 for (gp = guest_argp, q = argp; *q;
6249 gp += sizeof(abi_ulong), q++) {
6250 if (get_user_ual(addr, gp)
6251 || !addr)
6252 break;
6253 unlock_user(*q, addr, 0);
6255 for (gp = guest_envp, q = envp; *q;
6256 gp += sizeof(abi_ulong), q++) {
6257 if (get_user_ual(addr, gp)
6258 || !addr)
6259 break;
6260 unlock_user(*q, addr, 0);
6263 break;
6264 case TARGET_NR_chdir:
6265 if (!(p = lock_user_string(arg1)))
6266 goto efault;
6267 ret = get_errno(chdir(p));
6268 unlock_user(p, arg1, 0);
6269 break;
6270 #ifdef TARGET_NR_time
6271 case TARGET_NR_time:
6273 time_t host_time;
6274 ret = get_errno(time(&host_time));
6275 if (!is_error(ret)
6276 && arg1
6277 && put_user_sal(host_time, arg1))
6278 goto efault;
6280 break;
6281 #endif
6282 #ifdef TARGET_NR_mknod
6283 case TARGET_NR_mknod:
6284 if (!(p = lock_user_string(arg1)))
6285 goto efault;
6286 ret = get_errno(mknod(p, arg2, arg3));
6287 unlock_user(p, arg1, 0);
6288 break;
6289 #endif
6290 #if defined(TARGET_NR_mknodat)
6291 case TARGET_NR_mknodat:
6292 if (!(p = lock_user_string(arg2)))
6293 goto efault;
6294 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6295 unlock_user(p, arg2, 0);
6296 break;
6297 #endif
6298 #ifdef TARGET_NR_chmod
6299 case TARGET_NR_chmod:
6300 if (!(p = lock_user_string(arg1)))
6301 goto efault;
6302 ret = get_errno(chmod(p, arg2));
6303 unlock_user(p, arg1, 0);
6304 break;
6305 #endif
6306 #ifdef TARGET_NR_break
6307 case TARGET_NR_break:
6308 goto unimplemented;
6309 #endif
6310 #ifdef TARGET_NR_oldstat
6311 case TARGET_NR_oldstat:
6312 goto unimplemented;
6313 #endif
6314 case TARGET_NR_lseek:
6315 ret = get_errno(lseek(arg1, arg2, arg3));
6316 break;
6317 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6318 /* Alpha specific */
6319 case TARGET_NR_getxpid:
6320 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6321 ret = get_errno(getpid());
6322 break;
6323 #endif
6324 #ifdef TARGET_NR_getpid
6325 case TARGET_NR_getpid:
6326 ret = get_errno(getpid());
6327 break;
6328 #endif
6329 case TARGET_NR_mount:
6331 /* need to look at the data field */
6332 void *p2, *p3;
6334 if (arg1) {
6335 p = lock_user_string(arg1);
6336 if (!p) {
6337 goto efault;
6339 } else {
6340 p = NULL;
6343 p2 = lock_user_string(arg2);
6344 if (!p2) {
6345 if (arg1) {
6346 unlock_user(p, arg1, 0);
6348 goto efault;
6351 if (arg3) {
6352 p3 = lock_user_string(arg3);
6353 if (!p3) {
6354 if (arg1) {
6355 unlock_user(p, arg1, 0);
6357 unlock_user(p2, arg2, 0);
6358 goto efault;
6360 } else {
6361 p3 = NULL;
6364 /* FIXME - arg5 should be locked, but it isn't clear how to
6365 * do that since it's not guaranteed to be a NULL-terminated
6366 * string.
6368 if (!arg5) {
6369 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6370 } else {
6371 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6373 ret = get_errno(ret);
6375 if (arg1) {
6376 unlock_user(p, arg1, 0);
6378 unlock_user(p2, arg2, 0);
6379 if (arg3) {
6380 unlock_user(p3, arg3, 0);
6383 break;
6384 #ifdef TARGET_NR_umount
6385 case TARGET_NR_umount:
6386 if (!(p = lock_user_string(arg1)))
6387 goto efault;
6388 ret = get_errno(umount(p));
6389 unlock_user(p, arg1, 0);
6390 break;
6391 #endif
6392 #ifdef TARGET_NR_stime /* not on alpha */
6393 case TARGET_NR_stime:
6395 time_t host_time;
6396 if (get_user_sal(host_time, arg1))
6397 goto efault;
6398 ret = get_errno(stime(&host_time));
6400 break;
6401 #endif
6402 case TARGET_NR_ptrace:
6403 goto unimplemented;
6404 #ifdef TARGET_NR_alarm /* not on alpha */
6405 case TARGET_NR_alarm:
6406 ret = alarm(arg1);
6407 break;
6408 #endif
6409 #ifdef TARGET_NR_oldfstat
6410 case TARGET_NR_oldfstat:
6411 goto unimplemented;
6412 #endif
6413 #ifdef TARGET_NR_pause /* not on alpha */
6414 case TARGET_NR_pause:
6415 ret = get_errno(pause());
6416 break;
6417 #endif
6418 #ifdef TARGET_NR_utime
6419 case TARGET_NR_utime:
6421 struct utimbuf tbuf, *host_tbuf;
6422 struct target_utimbuf *target_tbuf;
6423 if (arg2) {
6424 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6425 goto efault;
6426 tbuf.actime = tswapal(target_tbuf->actime);
6427 tbuf.modtime = tswapal(target_tbuf->modtime);
6428 unlock_user_struct(target_tbuf, arg2, 0);
6429 host_tbuf = &tbuf;
6430 } else {
6431 host_tbuf = NULL;
6433 if (!(p = lock_user_string(arg1)))
6434 goto efault;
6435 ret = get_errno(utime(p, host_tbuf));
6436 unlock_user(p, arg1, 0);
6438 break;
6439 #endif
6440 #ifdef TARGET_NR_utimes
6441 case TARGET_NR_utimes:
6443 struct timeval *tvp, tv[2];
6444 if (arg2) {
6445 if (copy_from_user_timeval(&tv[0], arg2)
6446 || copy_from_user_timeval(&tv[1],
6447 arg2 + sizeof(struct target_timeval)))
6448 goto efault;
6449 tvp = tv;
6450 } else {
6451 tvp = NULL;
6453 if (!(p = lock_user_string(arg1)))
6454 goto efault;
6455 ret = get_errno(utimes(p, tvp));
6456 unlock_user(p, arg1, 0);
6458 break;
6459 #endif
6460 #if defined(TARGET_NR_futimesat)
6461 case TARGET_NR_futimesat:
6463 struct timeval *tvp, tv[2];
6464 if (arg3) {
6465 if (copy_from_user_timeval(&tv[0], arg3)
6466 || copy_from_user_timeval(&tv[1],
6467 arg3 + sizeof(struct target_timeval)))
6468 goto efault;
6469 tvp = tv;
6470 } else {
6471 tvp = NULL;
6473 if (!(p = lock_user_string(arg2)))
6474 goto efault;
6475 ret = get_errno(futimesat(arg1, path(p), tvp));
6476 unlock_user(p, arg2, 0);
6478 break;
6479 #endif
6480 #ifdef TARGET_NR_stty
6481 case TARGET_NR_stty:
6482 goto unimplemented;
6483 #endif
6484 #ifdef TARGET_NR_gtty
6485 case TARGET_NR_gtty:
6486 goto unimplemented;
6487 #endif
6488 #ifdef TARGET_NR_access
6489 case TARGET_NR_access:
6490 if (!(p = lock_user_string(arg1)))
6491 goto efault;
6492 ret = get_errno(access(path(p), arg2));
6493 unlock_user(p, arg1, 0);
6494 break;
6495 #endif
6496 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6497 case TARGET_NR_faccessat:
6498 if (!(p = lock_user_string(arg2)))
6499 goto efault;
6500 ret = get_errno(faccessat(arg1, p, arg3, 0));
6501 unlock_user(p, arg2, 0);
6502 break;
6503 #endif
6504 #ifdef TARGET_NR_nice /* not on alpha */
6505 case TARGET_NR_nice:
6506 ret = get_errno(nice(arg1));
6507 break;
6508 #endif
6509 #ifdef TARGET_NR_ftime
6510 case TARGET_NR_ftime:
6511 goto unimplemented;
6512 #endif
6513 case TARGET_NR_sync:
6514 sync();
6515 ret = 0;
6516 break;
6517 case TARGET_NR_kill:
6518 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6519 break;
6520 #ifdef TARGET_NR_rename
6521 case TARGET_NR_rename:
6523 void *p2;
6524 p = lock_user_string(arg1);
6525 p2 = lock_user_string(arg2);
6526 if (!p || !p2)
6527 ret = -TARGET_EFAULT;
6528 else
6529 ret = get_errno(rename(p, p2));
6530 unlock_user(p2, arg2, 0);
6531 unlock_user(p, arg1, 0);
6533 break;
6534 #endif
6535 #if defined(TARGET_NR_renameat)
6536 case TARGET_NR_renameat:
6538 void *p2;
6539 p = lock_user_string(arg2);
6540 p2 = lock_user_string(arg4);
6541 if (!p || !p2)
6542 ret = -TARGET_EFAULT;
6543 else
6544 ret = get_errno(renameat(arg1, p, arg3, p2));
6545 unlock_user(p2, arg4, 0);
6546 unlock_user(p, arg2, 0);
6548 break;
6549 #endif
6550 #ifdef TARGET_NR_mkdir
6551 case TARGET_NR_mkdir:
6552 if (!(p = lock_user_string(arg1)))
6553 goto efault;
6554 ret = get_errno(mkdir(p, arg2));
6555 unlock_user(p, arg1, 0);
6556 break;
6557 #endif
6558 #if defined(TARGET_NR_mkdirat)
6559 case TARGET_NR_mkdirat:
6560 if (!(p = lock_user_string(arg2)))
6561 goto efault;
6562 ret = get_errno(mkdirat(arg1, p, arg3));
6563 unlock_user(p, arg2, 0);
6564 break;
6565 #endif
6566 #ifdef TARGET_NR_rmdir
6567 case TARGET_NR_rmdir:
6568 if (!(p = lock_user_string(arg1)))
6569 goto efault;
6570 ret = get_errno(rmdir(p));
6571 unlock_user(p, arg1, 0);
6572 break;
6573 #endif
6574 case TARGET_NR_dup:
6575 ret = get_errno(dup(arg1));
6576 if (ret >= 0) {
6577 fd_trans_dup(arg1, ret);
6579 break;
6580 #ifdef TARGET_NR_pipe
6581 case TARGET_NR_pipe:
6582 ret = do_pipe(cpu_env, arg1, 0, 0);
6583 break;
6584 #endif
6585 #ifdef TARGET_NR_pipe2
6586 case TARGET_NR_pipe2:
6587 ret = do_pipe(cpu_env, arg1,
6588 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6589 break;
6590 #endif
6591 case TARGET_NR_times:
6593 struct target_tms *tmsp;
6594 struct tms tms;
6595 ret = get_errno(times(&tms));
6596 if (arg1) {
6597 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6598 if (!tmsp)
6599 goto efault;
6600 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6601 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6602 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6603 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6605 if (!is_error(ret))
6606 ret = host_to_target_clock_t(ret);
6608 break;
6609 #ifdef TARGET_NR_prof
6610 case TARGET_NR_prof:
6611 goto unimplemented;
6612 #endif
6613 #ifdef TARGET_NR_signal
6614 case TARGET_NR_signal:
6615 goto unimplemented;
6616 #endif
6617 case TARGET_NR_acct:
6618 if (arg1 == 0) {
6619 ret = get_errno(acct(NULL));
6620 } else {
6621 if (!(p = lock_user_string(arg1)))
6622 goto efault;
6623 ret = get_errno(acct(path(p)));
6624 unlock_user(p, arg1, 0);
6626 break;
6627 #ifdef TARGET_NR_umount2
6628 case TARGET_NR_umount2:
6629 if (!(p = lock_user_string(arg1)))
6630 goto efault;
6631 ret = get_errno(umount2(p, arg2));
6632 unlock_user(p, arg1, 0);
6633 break;
6634 #endif
6635 #ifdef TARGET_NR_lock
6636 case TARGET_NR_lock:
6637 goto unimplemented;
6638 #endif
6639 case TARGET_NR_ioctl:
6640 ret = do_ioctl(arg1, arg2, arg3);
6641 break;
6642 case TARGET_NR_fcntl:
6643 ret = do_fcntl(arg1, arg2, arg3);
6644 break;
6645 #ifdef TARGET_NR_mpx
6646 case TARGET_NR_mpx:
6647 goto unimplemented;
6648 #endif
6649 case TARGET_NR_setpgid:
6650 ret = get_errno(setpgid(arg1, arg2));
6651 break;
6652 #ifdef TARGET_NR_ulimit
6653 case TARGET_NR_ulimit:
6654 goto unimplemented;
6655 #endif
6656 #ifdef TARGET_NR_oldolduname
6657 case TARGET_NR_oldolduname:
6658 goto unimplemented;
6659 #endif
6660 case TARGET_NR_umask:
6661 ret = get_errno(umask(arg1));
6662 break;
6663 case TARGET_NR_chroot:
6664 if (!(p = lock_user_string(arg1)))
6665 goto efault;
6666 ret = get_errno(chroot(p));
6667 unlock_user(p, arg1, 0);
6668 break;
6669 #ifdef TARGET_NR_ustat
6670 case TARGET_NR_ustat:
6671 goto unimplemented;
6672 #endif
6673 #ifdef TARGET_NR_dup2
6674 case TARGET_NR_dup2:
6675 ret = get_errno(dup2(arg1, arg2));
6676 if (ret >= 0) {
6677 fd_trans_dup(arg1, arg2);
6679 break;
6680 #endif
6681 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6682 case TARGET_NR_dup3:
6683 ret = get_errno(dup3(arg1, arg2, arg3));
6684 if (ret >= 0) {
6685 fd_trans_dup(arg1, arg2);
6687 break;
6688 #endif
6689 #ifdef TARGET_NR_getppid /* not on alpha */
6690 case TARGET_NR_getppid:
6691 ret = get_errno(getppid());
6692 break;
6693 #endif
6694 #ifdef TARGET_NR_getpgrp
6695 case TARGET_NR_getpgrp:
6696 ret = get_errno(getpgrp());
6697 break;
6698 #endif
6699 case TARGET_NR_setsid:
6700 ret = get_errno(setsid());
6701 break;
6702 #ifdef TARGET_NR_sigaction
6703 case TARGET_NR_sigaction:
6705 #if defined(TARGET_ALPHA)
6706 struct target_sigaction act, oact, *pact = 0;
6707 struct target_old_sigaction *old_act;
6708 if (arg2) {
6709 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6710 goto efault;
6711 act._sa_handler = old_act->_sa_handler;
6712 target_siginitset(&act.sa_mask, old_act->sa_mask);
6713 act.sa_flags = old_act->sa_flags;
6714 act.sa_restorer = 0;
6715 unlock_user_struct(old_act, arg2, 0);
6716 pact = &act;
6718 ret = get_errno(do_sigaction(arg1, pact, &oact));
6719 if (!is_error(ret) && arg3) {
6720 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6721 goto efault;
6722 old_act->_sa_handler = oact._sa_handler;
6723 old_act->sa_mask = oact.sa_mask.sig[0];
6724 old_act->sa_flags = oact.sa_flags;
6725 unlock_user_struct(old_act, arg3, 1);
6727 #elif defined(TARGET_MIPS)
6728 struct target_sigaction act, oact, *pact, *old_act;
6730 if (arg2) {
6731 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6732 goto efault;
6733 act._sa_handler = old_act->_sa_handler;
6734 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6735 act.sa_flags = old_act->sa_flags;
6736 unlock_user_struct(old_act, arg2, 0);
6737 pact = &act;
6738 } else {
6739 pact = NULL;
6742 ret = get_errno(do_sigaction(arg1, pact, &oact));
6744 if (!is_error(ret) && arg3) {
6745 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6746 goto efault;
6747 old_act->_sa_handler = oact._sa_handler;
6748 old_act->sa_flags = oact.sa_flags;
6749 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6750 old_act->sa_mask.sig[1] = 0;
6751 old_act->sa_mask.sig[2] = 0;
6752 old_act->sa_mask.sig[3] = 0;
6753 unlock_user_struct(old_act, arg3, 1);
6755 #else
6756 struct target_old_sigaction *old_act;
6757 struct target_sigaction act, oact, *pact;
6758 if (arg2) {
6759 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6760 goto efault;
6761 act._sa_handler = old_act->_sa_handler;
6762 target_siginitset(&act.sa_mask, old_act->sa_mask);
6763 act.sa_flags = old_act->sa_flags;
6764 act.sa_restorer = old_act->sa_restorer;
6765 unlock_user_struct(old_act, arg2, 0);
6766 pact = &act;
6767 } else {
6768 pact = NULL;
6770 ret = get_errno(do_sigaction(arg1, pact, &oact));
6771 if (!is_error(ret) && arg3) {
6772 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6773 goto efault;
6774 old_act->_sa_handler = oact._sa_handler;
6775 old_act->sa_mask = oact.sa_mask.sig[0];
6776 old_act->sa_flags = oact.sa_flags;
6777 old_act->sa_restorer = oact.sa_restorer;
6778 unlock_user_struct(old_act, arg3, 1);
6780 #endif
6782 break;
6783 #endif
6784 case TARGET_NR_rt_sigaction:
6786 #if defined(TARGET_ALPHA)
6787 struct target_sigaction act, oact, *pact = 0;
6788 struct target_rt_sigaction *rt_act;
6789 /* ??? arg4 == sizeof(sigset_t). */
6790 if (arg2) {
6791 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6792 goto efault;
6793 act._sa_handler = rt_act->_sa_handler;
6794 act.sa_mask = rt_act->sa_mask;
6795 act.sa_flags = rt_act->sa_flags;
6796 act.sa_restorer = arg5;
6797 unlock_user_struct(rt_act, arg2, 0);
6798 pact = &act;
6800 ret = get_errno(do_sigaction(arg1, pact, &oact));
6801 if (!is_error(ret) && arg3) {
6802 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6803 goto efault;
6804 rt_act->_sa_handler = oact._sa_handler;
6805 rt_act->sa_mask = oact.sa_mask;
6806 rt_act->sa_flags = oact.sa_flags;
6807 unlock_user_struct(rt_act, arg3, 1);
6809 #else
6810 struct target_sigaction *act;
6811 struct target_sigaction *oact;
6813 if (arg2) {
6814 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6815 goto efault;
6816 } else
6817 act = NULL;
6818 if (arg3) {
6819 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6820 ret = -TARGET_EFAULT;
6821 goto rt_sigaction_fail;
6823 } else
6824 oact = NULL;
6825 ret = get_errno(do_sigaction(arg1, act, oact));
6826 rt_sigaction_fail:
6827 if (act)
6828 unlock_user_struct(act, arg2, 0);
6829 if (oact)
6830 unlock_user_struct(oact, arg3, 1);
6831 #endif
6833 break;
6834 #ifdef TARGET_NR_sgetmask /* not on alpha */
6835 case TARGET_NR_sgetmask:
6837 sigset_t cur_set;
6838 abi_ulong target_set;
6839 do_sigprocmask(0, NULL, &cur_set);
6840 host_to_target_old_sigset(&target_set, &cur_set);
6841 ret = target_set;
6843 break;
6844 #endif
6845 #ifdef TARGET_NR_ssetmask /* not on alpha */
6846 case TARGET_NR_ssetmask:
6848 sigset_t set, oset, cur_set;
6849 abi_ulong target_set = arg1;
6850 do_sigprocmask(0, NULL, &cur_set);
6851 target_to_host_old_sigset(&set, &target_set);
6852 sigorset(&set, &set, &cur_set);
6853 do_sigprocmask(SIG_SETMASK, &set, &oset);
6854 host_to_target_old_sigset(&target_set, &oset);
6855 ret = target_set;
6857 break;
6858 #endif
6859 #ifdef TARGET_NR_sigprocmask
6860 case TARGET_NR_sigprocmask:
6862 #if defined(TARGET_ALPHA)
6863 sigset_t set, oldset;
6864 abi_ulong mask;
6865 int how;
6867 switch (arg1) {
6868 case TARGET_SIG_BLOCK:
6869 how = SIG_BLOCK;
6870 break;
6871 case TARGET_SIG_UNBLOCK:
6872 how = SIG_UNBLOCK;
6873 break;
6874 case TARGET_SIG_SETMASK:
6875 how = SIG_SETMASK;
6876 break;
6877 default:
6878 ret = -TARGET_EINVAL;
6879 goto fail;
6881 mask = arg2;
6882 target_to_host_old_sigset(&set, &mask);
6884 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6885 if (!is_error(ret)) {
6886 host_to_target_old_sigset(&mask, &oldset);
6887 ret = mask;
6888 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6890 #else
6891 sigset_t set, oldset, *set_ptr;
6892 int how;
6894 if (arg2) {
6895 switch (arg1) {
6896 case TARGET_SIG_BLOCK:
6897 how = SIG_BLOCK;
6898 break;
6899 case TARGET_SIG_UNBLOCK:
6900 how = SIG_UNBLOCK;
6901 break;
6902 case TARGET_SIG_SETMASK:
6903 how = SIG_SETMASK;
6904 break;
6905 default:
6906 ret = -TARGET_EINVAL;
6907 goto fail;
6909 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6910 goto efault;
6911 target_to_host_old_sigset(&set, p);
6912 unlock_user(p, arg2, 0);
6913 set_ptr = &set;
6914 } else {
6915 how = 0;
6916 set_ptr = NULL;
6918 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6919 if (!is_error(ret) && arg3) {
6920 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6921 goto efault;
6922 host_to_target_old_sigset(p, &oldset);
6923 unlock_user(p, arg3, sizeof(target_sigset_t));
6925 #endif
6927 break;
6928 #endif
6929 case TARGET_NR_rt_sigprocmask:
6931 int how = arg1;
6932 sigset_t set, oldset, *set_ptr;
6934 if (arg2) {
6935 switch(how) {
6936 case TARGET_SIG_BLOCK:
6937 how = SIG_BLOCK;
6938 break;
6939 case TARGET_SIG_UNBLOCK:
6940 how = SIG_UNBLOCK;
6941 break;
6942 case TARGET_SIG_SETMASK:
6943 how = SIG_SETMASK;
6944 break;
6945 default:
6946 ret = -TARGET_EINVAL;
6947 goto fail;
6949 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6950 goto efault;
6951 target_to_host_sigset(&set, p);
6952 unlock_user(p, arg2, 0);
6953 set_ptr = &set;
6954 } else {
6955 how = 0;
6956 set_ptr = NULL;
6958 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6959 if (!is_error(ret) && arg3) {
6960 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6961 goto efault;
6962 host_to_target_sigset(p, &oldset);
6963 unlock_user(p, arg3, sizeof(target_sigset_t));
6966 break;
6967 #ifdef TARGET_NR_sigpending
6968 case TARGET_NR_sigpending:
6970 sigset_t set;
6971 ret = get_errno(sigpending(&set));
6972 if (!is_error(ret)) {
6973 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6974 goto efault;
6975 host_to_target_old_sigset(p, &set);
6976 unlock_user(p, arg1, sizeof(target_sigset_t));
6979 break;
6980 #endif
6981 case TARGET_NR_rt_sigpending:
6983 sigset_t set;
6984 ret = get_errno(sigpending(&set));
6985 if (!is_error(ret)) {
6986 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6987 goto efault;
6988 host_to_target_sigset(p, &set);
6989 unlock_user(p, arg1, sizeof(target_sigset_t));
6992 break;
6993 #ifdef TARGET_NR_sigsuspend
6994 case TARGET_NR_sigsuspend:
6996 sigset_t set;
6997 #if defined(TARGET_ALPHA)
6998 abi_ulong mask = arg1;
6999 target_to_host_old_sigset(&set, &mask);
7000 #else
7001 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7002 goto efault;
7003 target_to_host_old_sigset(&set, p);
7004 unlock_user(p, arg1, 0);
7005 #endif
7006 ret = get_errno(sigsuspend(&set));
7008 break;
7009 #endif
7010 case TARGET_NR_rt_sigsuspend:
7012 sigset_t set;
7013 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7014 goto efault;
7015 target_to_host_sigset(&set, p);
7016 unlock_user(p, arg1, 0);
7017 ret = get_errno(sigsuspend(&set));
7019 break;
7020 case TARGET_NR_rt_sigtimedwait:
7022 sigset_t set;
7023 struct timespec uts, *puts;
7024 siginfo_t uinfo;
7026 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
7027 goto efault;
7028 target_to_host_sigset(&set, p);
7029 unlock_user(p, arg1, 0);
7030 if (arg3) {
7031 puts = &uts;
7032 target_to_host_timespec(puts, arg3);
7033 } else {
7034 puts = NULL;
7036 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
7037 if (!is_error(ret)) {
7038 if (arg2) {
7039 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7041 if (!p) {
7042 goto efault;
7044 host_to_target_siginfo(p, &uinfo);
7045 unlock_user(p, arg2, sizeof(target_siginfo_t));
7047 ret = host_to_target_signal(ret);
7050 break;
7051 case TARGET_NR_rt_sigqueueinfo:
7053 siginfo_t uinfo;
7054 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7055 goto efault;
7056 target_to_host_siginfo(&uinfo, p);
7057 unlock_user(p, arg1, 0);
7058 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7060 break;
7061 #ifdef TARGET_NR_sigreturn
7062 case TARGET_NR_sigreturn:
7063 ret = do_sigreturn(cpu_env);
7064 break;
7065 #endif
7066 case TARGET_NR_rt_sigreturn:
7067 ret = do_rt_sigreturn(cpu_env);
7068 break;
7069 case TARGET_NR_sethostname:
7070 if (!(p = lock_user_string(arg1)))
7071 goto efault;
7072 ret = get_errno(sethostname(p, arg2));
7073 unlock_user(p, arg1, 0);
7074 break;
7075 case TARGET_NR_setrlimit:
7077 int resource = target_to_host_resource(arg1);
7078 struct target_rlimit *target_rlim;
7079 struct rlimit rlim;
7080 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7081 goto efault;
7082 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7083 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7084 unlock_user_struct(target_rlim, arg2, 0);
7085 ret = get_errno(setrlimit(resource, &rlim));
7087 break;
7088 case TARGET_NR_getrlimit:
7090 int resource = target_to_host_resource(arg1);
7091 struct target_rlimit *target_rlim;
7092 struct rlimit rlim;
7094 ret = get_errno(getrlimit(resource, &rlim));
7095 if (!is_error(ret)) {
7096 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7097 goto efault;
7098 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7099 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7100 unlock_user_struct(target_rlim, arg2, 1);
7103 break;
7104 case TARGET_NR_getrusage:
7106 struct rusage rusage;
7107 ret = get_errno(getrusage(arg1, &rusage));
7108 if (!is_error(ret)) {
7109 ret = host_to_target_rusage(arg2, &rusage);
7112 break;
7113 case TARGET_NR_gettimeofday:
7115 struct timeval tv;
7116 ret = get_errno(gettimeofday(&tv, NULL));
7117 if (!is_error(ret)) {
7118 if (copy_to_user_timeval(arg1, &tv))
7119 goto efault;
7122 break;
7123 case TARGET_NR_settimeofday:
7125 struct timeval tv, *ptv = NULL;
7126 struct timezone tz, *ptz = NULL;
7128 if (arg1) {
7129 if (copy_from_user_timeval(&tv, arg1)) {
7130 goto efault;
7132 ptv = &tv;
7135 if (arg2) {
7136 if (copy_from_user_timezone(&tz, arg2)) {
7137 goto efault;
7139 ptz = &tz;
7142 ret = get_errno(settimeofday(ptv, ptz));
7144 break;
7145 #if defined(TARGET_NR_select)
7146 case TARGET_NR_select:
7147 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7148 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7149 #else
7151 struct target_sel_arg_struct *sel;
7152 abi_ulong inp, outp, exp, tvp;
7153 long nsel;
7155 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7156 goto efault;
7157 nsel = tswapal(sel->n);
7158 inp = tswapal(sel->inp);
7159 outp = tswapal(sel->outp);
7160 exp = tswapal(sel->exp);
7161 tvp = tswapal(sel->tvp);
7162 unlock_user_struct(sel, arg1, 0);
7163 ret = do_select(nsel, inp, outp, exp, tvp);
7165 #endif
7166 break;
7167 #endif
7168 #ifdef TARGET_NR_pselect6
7169 case TARGET_NR_pselect6:
7171 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7172 fd_set rfds, wfds, efds;
7173 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7174 struct timespec ts, *ts_ptr;
7177 * The 6th arg is actually two args smashed together,
7178 * so we cannot use the C library.
7180 sigset_t set;
7181 struct {
7182 sigset_t *set;
7183 size_t size;
7184 } sig, *sig_ptr;
7186 abi_ulong arg_sigset, arg_sigsize, *arg7;
7187 target_sigset_t *target_sigset;
7189 n = arg1;
7190 rfd_addr = arg2;
7191 wfd_addr = arg3;
7192 efd_addr = arg4;
7193 ts_addr = arg5;
7195 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7196 if (ret) {
7197 goto fail;
7199 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7200 if (ret) {
7201 goto fail;
7203 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7204 if (ret) {
7205 goto fail;
7209 * This takes a timespec, and not a timeval, so we cannot
7210 * use the do_select() helper ...
7212 if (ts_addr) {
7213 if (target_to_host_timespec(&ts, ts_addr)) {
7214 goto efault;
7216 ts_ptr = &ts;
7217 } else {
7218 ts_ptr = NULL;
7221 /* Extract the two packed args for the sigset */
7222 if (arg6) {
7223 sig_ptr = &sig;
7224 sig.size = _NSIG / 8;
7226 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7227 if (!arg7) {
7228 goto efault;
7230 arg_sigset = tswapal(arg7[0]);
7231 arg_sigsize = tswapal(arg7[1]);
7232 unlock_user(arg7, arg6, 0);
7234 if (arg_sigset) {
7235 sig.set = &set;
7236 if (arg_sigsize != sizeof(*target_sigset)) {
7237 /* Like the kernel, we enforce correct size sigsets */
7238 ret = -TARGET_EINVAL;
7239 goto fail;
7241 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7242 sizeof(*target_sigset), 1);
7243 if (!target_sigset) {
7244 goto efault;
7246 target_to_host_sigset(&set, target_sigset);
7247 unlock_user(target_sigset, arg_sigset, 0);
7248 } else {
7249 sig.set = NULL;
7251 } else {
7252 sig_ptr = NULL;
7255 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7256 ts_ptr, sig_ptr));
7258 if (!is_error(ret)) {
7259 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7260 goto efault;
7261 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7262 goto efault;
7263 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7264 goto efault;
7266 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7267 goto efault;
7270 break;
7271 #endif
7272 #ifdef TARGET_NR_symlink
7273 case TARGET_NR_symlink:
7275 void *p2;
7276 p = lock_user_string(arg1);
7277 p2 = lock_user_string(arg2);
7278 if (!p || !p2)
7279 ret = -TARGET_EFAULT;
7280 else
7281 ret = get_errno(symlink(p, p2));
7282 unlock_user(p2, arg2, 0);
7283 unlock_user(p, arg1, 0);
7285 break;
7286 #endif
7287 #if defined(TARGET_NR_symlinkat)
7288 case TARGET_NR_symlinkat:
7290 void *p2;
7291 p = lock_user_string(arg1);
7292 p2 = lock_user_string(arg3);
7293 if (!p || !p2)
7294 ret = -TARGET_EFAULT;
7295 else
7296 ret = get_errno(symlinkat(p, arg2, p2));
7297 unlock_user(p2, arg3, 0);
7298 unlock_user(p, arg1, 0);
7300 break;
7301 #endif
7302 #ifdef TARGET_NR_oldlstat
7303 case TARGET_NR_oldlstat:
7304 goto unimplemented;
7305 #endif
7306 #ifdef TARGET_NR_readlink
7307 case TARGET_NR_readlink:
7309 void *p2;
7310 p = lock_user_string(arg1);
7311 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7312 if (!p || !p2) {
7313 ret = -TARGET_EFAULT;
7314 } else if (!arg3) {
7315 /* Short circuit this for the magic exe check. */
7316 ret = -TARGET_EINVAL;
7317 } else if (is_proc_myself((const char *)p, "exe")) {
7318 char real[PATH_MAX], *temp;
7319 temp = realpath(exec_path, real);
7320 /* Return value is # of bytes that we wrote to the buffer. */
7321 if (temp == NULL) {
7322 ret = get_errno(-1);
7323 } else {
7324 /* Don't worry about sign mismatch as earlier mapping
7325 * logic would have thrown a bad address error. */
7326 ret = MIN(strlen(real), arg3);
7327 /* We cannot NUL terminate the string. */
7328 memcpy(p2, real, ret);
7330 } else {
7331 ret = get_errno(readlink(path(p), p2, arg3));
7333 unlock_user(p2, arg2, ret);
7334 unlock_user(p, arg1, 0);
7336 break;
7337 #endif
7338 #if defined(TARGET_NR_readlinkat)
7339 case TARGET_NR_readlinkat:
7341 void *p2;
7342 p = lock_user_string(arg2);
7343 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7344 if (!p || !p2) {
7345 ret = -TARGET_EFAULT;
7346 } else if (is_proc_myself((const char *)p, "exe")) {
7347 char real[PATH_MAX], *temp;
7348 temp = realpath(exec_path, real);
7349 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7350 snprintf((char *)p2, arg4, "%s", real);
7351 } else {
7352 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7354 unlock_user(p2, arg3, ret);
7355 unlock_user(p, arg2, 0);
7357 break;
7358 #endif
7359 #ifdef TARGET_NR_uselib
7360 case TARGET_NR_uselib:
7361 goto unimplemented;
7362 #endif
7363 #ifdef TARGET_NR_swapon
7364 case TARGET_NR_swapon:
7365 if (!(p = lock_user_string(arg1)))
7366 goto efault;
7367 ret = get_errno(swapon(p, arg2));
7368 unlock_user(p, arg1, 0);
7369 break;
7370 #endif
7371 case TARGET_NR_reboot:
7372 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7373 /* arg4 must be ignored in all other cases */
7374 p = lock_user_string(arg4);
7375 if (!p) {
7376 goto efault;
7378 ret = get_errno(reboot(arg1, arg2, arg3, p));
7379 unlock_user(p, arg4, 0);
7380 } else {
7381 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7383 break;
7384 #ifdef TARGET_NR_readdir
7385 case TARGET_NR_readdir:
7386 goto unimplemented;
7387 #endif
7388 #ifdef TARGET_NR_mmap
7389 case TARGET_NR_mmap:
7390 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7391 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7392 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7393 || defined(TARGET_S390X)
7395 abi_ulong *v;
7396 abi_ulong v1, v2, v3, v4, v5, v6;
7397 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7398 goto efault;
7399 v1 = tswapal(v[0]);
7400 v2 = tswapal(v[1]);
7401 v3 = tswapal(v[2]);
7402 v4 = tswapal(v[3]);
7403 v5 = tswapal(v[4]);
7404 v6 = tswapal(v[5]);
7405 unlock_user(v, arg1, 0);
7406 ret = get_errno(target_mmap(v1, v2, v3,
7407 target_to_host_bitmask(v4, mmap_flags_tbl),
7408 v5, v6));
7410 #else
7411 ret = get_errno(target_mmap(arg1, arg2, arg3,
7412 target_to_host_bitmask(arg4, mmap_flags_tbl),
7413 arg5,
7414 arg6));
7415 #endif
7416 break;
7417 #endif
7418 #ifdef TARGET_NR_mmap2
7419 case TARGET_NR_mmap2:
7420 #ifndef MMAP_SHIFT
7421 #define MMAP_SHIFT 12
7422 #endif
7423 ret = get_errno(target_mmap(arg1, arg2, arg3,
7424 target_to_host_bitmask(arg4, mmap_flags_tbl),
7425 arg5,
7426 arg6 << MMAP_SHIFT));
7427 break;
7428 #endif
7429 case TARGET_NR_munmap:
7430 ret = get_errno(target_munmap(arg1, arg2));
7431 break;
7432 case TARGET_NR_mprotect:
7434 TaskState *ts = cpu->opaque;
7435 /* Special hack to detect libc making the stack executable. */
7436 if ((arg3 & PROT_GROWSDOWN)
7437 && arg1 >= ts->info->stack_limit
7438 && arg1 <= ts->info->start_stack) {
7439 arg3 &= ~PROT_GROWSDOWN;
7440 arg2 = arg2 + arg1 - ts->info->stack_limit;
7441 arg1 = ts->info->stack_limit;
7444 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7445 break;
7446 #ifdef TARGET_NR_mremap
7447 case TARGET_NR_mremap:
7448 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7449 break;
7450 #endif
7451 /* ??? msync/mlock/munlock are broken for softmmu. */
7452 #ifdef TARGET_NR_msync
7453 case TARGET_NR_msync:
7454 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7455 break;
7456 #endif
7457 #ifdef TARGET_NR_mlock
7458 case TARGET_NR_mlock:
7459 ret = get_errno(mlock(g2h(arg1), arg2));
7460 break;
7461 #endif
7462 #ifdef TARGET_NR_munlock
7463 case TARGET_NR_munlock:
7464 ret = get_errno(munlock(g2h(arg1), arg2));
7465 break;
7466 #endif
7467 #ifdef TARGET_NR_mlockall
7468 case TARGET_NR_mlockall:
7469 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7470 break;
7471 #endif
7472 #ifdef TARGET_NR_munlockall
7473 case TARGET_NR_munlockall:
7474 ret = get_errno(munlockall());
7475 break;
7476 #endif
7477 case TARGET_NR_truncate:
7478 if (!(p = lock_user_string(arg1)))
7479 goto efault;
7480 ret = get_errno(truncate(p, arg2));
7481 unlock_user(p, arg1, 0);
7482 break;
7483 case TARGET_NR_ftruncate:
7484 ret = get_errno(ftruncate(arg1, arg2));
7485 break;
7486 case TARGET_NR_fchmod:
7487 ret = get_errno(fchmod(arg1, arg2));
7488 break;
7489 #if defined(TARGET_NR_fchmodat)
7490 case TARGET_NR_fchmodat:
7491 if (!(p = lock_user_string(arg2)))
7492 goto efault;
7493 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7494 unlock_user(p, arg2, 0);
7495 break;
7496 #endif
7497 case TARGET_NR_getpriority:
7498 /* Note that negative values are valid for getpriority, so we must
7499 differentiate based on errno settings. */
7500 errno = 0;
7501 ret = getpriority(arg1, arg2);
7502 if (ret == -1 && errno != 0) {
7503 ret = -host_to_target_errno(errno);
7504 break;
7506 #ifdef TARGET_ALPHA
7507 /* Return value is the unbiased priority. Signal no error. */
7508 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7509 #else
7510 /* Return value is a biased priority to avoid negative numbers. */
7511 ret = 20 - ret;
7512 #endif
7513 break;
7514 case TARGET_NR_setpriority:
7515 ret = get_errno(setpriority(arg1, arg2, arg3));
7516 break;
7517 #ifdef TARGET_NR_profil
7518 case TARGET_NR_profil:
7519 goto unimplemented;
7520 #endif
7521 case TARGET_NR_statfs:
7522 if (!(p = lock_user_string(arg1)))
7523 goto efault;
7524 ret = get_errno(statfs(path(p), &stfs));
7525 unlock_user(p, arg1, 0);
7526 convert_statfs:
7527 if (!is_error(ret)) {
7528 struct target_statfs *target_stfs;
7530 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7531 goto efault;
7532 __put_user(stfs.f_type, &target_stfs->f_type);
7533 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7534 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7535 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7536 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7537 __put_user(stfs.f_files, &target_stfs->f_files);
7538 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7539 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7540 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7541 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7542 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7543 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7544 unlock_user_struct(target_stfs, arg2, 1);
7546 break;
7547 case TARGET_NR_fstatfs:
7548 ret = get_errno(fstatfs(arg1, &stfs));
7549 goto convert_statfs;
7550 #ifdef TARGET_NR_statfs64
7551 case TARGET_NR_statfs64:
7552 if (!(p = lock_user_string(arg1)))
7553 goto efault;
7554 ret = get_errno(statfs(path(p), &stfs));
7555 unlock_user(p, arg1, 0);
7556 convert_statfs64:
7557 if (!is_error(ret)) {
7558 struct target_statfs64 *target_stfs;
7560 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7561 goto efault;
7562 __put_user(stfs.f_type, &target_stfs->f_type);
7563 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7564 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7565 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7566 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7567 __put_user(stfs.f_files, &target_stfs->f_files);
7568 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7569 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7570 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7571 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7572 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7573 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7574 unlock_user_struct(target_stfs, arg3, 1);
7576 break;
7577 case TARGET_NR_fstatfs64:
7578 ret = get_errno(fstatfs(arg1, &stfs));
7579 goto convert_statfs64;
7580 #endif
7581 #ifdef TARGET_NR_ioperm
7582 case TARGET_NR_ioperm:
7583 goto unimplemented;
7584 #endif
7585 #ifdef TARGET_NR_socketcall
7586 case TARGET_NR_socketcall:
7587 ret = do_socketcall(arg1, arg2);
7588 break;
7589 #endif
7590 #ifdef TARGET_NR_accept
7591 case TARGET_NR_accept:
7592 ret = do_accept4(arg1, arg2, arg3, 0);
7593 break;
7594 #endif
7595 #ifdef TARGET_NR_accept4
7596 case TARGET_NR_accept4:
7597 #ifdef CONFIG_ACCEPT4
7598 ret = do_accept4(arg1, arg2, arg3, arg4);
7599 #else
7600 goto unimplemented;
7601 #endif
7602 break;
7603 #endif
7604 #ifdef TARGET_NR_bind
7605 case TARGET_NR_bind:
7606 ret = do_bind(arg1, arg2, arg3);
7607 break;
7608 #endif
7609 #ifdef TARGET_NR_connect
7610 case TARGET_NR_connect:
7611 ret = do_connect(arg1, arg2, arg3);
7612 break;
7613 #endif
7614 #ifdef TARGET_NR_getpeername
7615 case TARGET_NR_getpeername:
7616 ret = do_getpeername(arg1, arg2, arg3);
7617 break;
7618 #endif
7619 #ifdef TARGET_NR_getsockname
7620 case TARGET_NR_getsockname:
7621 ret = do_getsockname(arg1, arg2, arg3);
7622 break;
7623 #endif
7624 #ifdef TARGET_NR_getsockopt
7625 case TARGET_NR_getsockopt:
7626 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7627 break;
7628 #endif
7629 #ifdef TARGET_NR_listen
7630 case TARGET_NR_listen:
7631 ret = get_errno(listen(arg1, arg2));
7632 break;
7633 #endif
7634 #ifdef TARGET_NR_recv
7635 case TARGET_NR_recv:
7636 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7637 break;
7638 #endif
7639 #ifdef TARGET_NR_recvfrom
7640 case TARGET_NR_recvfrom:
7641 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7642 break;
7643 #endif
7644 #ifdef TARGET_NR_recvmsg
7645 case TARGET_NR_recvmsg:
7646 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7647 break;
7648 #endif
7649 #ifdef TARGET_NR_send
7650 case TARGET_NR_send:
7651 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7652 break;
7653 #endif
7654 #ifdef TARGET_NR_sendmsg
7655 case TARGET_NR_sendmsg:
7656 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7657 break;
7658 #endif
7659 #ifdef TARGET_NR_sendmmsg
7660 case TARGET_NR_sendmmsg:
7661 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7662 break;
7663 case TARGET_NR_recvmmsg:
7664 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7665 break;
7666 #endif
7667 #ifdef TARGET_NR_sendto
7668 case TARGET_NR_sendto:
7669 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7670 break;
7671 #endif
7672 #ifdef TARGET_NR_shutdown
7673 case TARGET_NR_shutdown:
7674 ret = get_errno(shutdown(arg1, arg2));
7675 break;
7676 #endif
7677 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7678 case TARGET_NR_getrandom:
7679 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
7680 if (!p) {
7681 goto efault;
7683 ret = get_errno(getrandom(p, arg2, arg3));
7684 unlock_user(p, arg1, ret);
7685 break;
7686 #endif
7687 #ifdef TARGET_NR_socket
7688 case TARGET_NR_socket:
7689 ret = do_socket(arg1, arg2, arg3);
7690 fd_trans_unregister(ret);
7691 break;
7692 #endif
7693 #ifdef TARGET_NR_socketpair
7694 case TARGET_NR_socketpair:
7695 ret = do_socketpair(arg1, arg2, arg3, arg4);
7696 break;
7697 #endif
7698 #ifdef TARGET_NR_setsockopt
7699 case TARGET_NR_setsockopt:
7700 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7701 break;
7702 #endif
7704 case TARGET_NR_syslog:
7705 if (!(p = lock_user_string(arg2)))
7706 goto efault;
7707 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7708 unlock_user(p, arg2, 0);
7709 break;
7711 case TARGET_NR_setitimer:
7713 struct itimerval value, ovalue, *pvalue;
7715 if (arg2) {
7716 pvalue = &value;
7717 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7718 || copy_from_user_timeval(&pvalue->it_value,
7719 arg2 + sizeof(struct target_timeval)))
7720 goto efault;
7721 } else {
7722 pvalue = NULL;
7724 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7725 if (!is_error(ret) && arg3) {
7726 if (copy_to_user_timeval(arg3,
7727 &ovalue.it_interval)
7728 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7729 &ovalue.it_value))
7730 goto efault;
7733 break;
7734 case TARGET_NR_getitimer:
7736 struct itimerval value;
7738 ret = get_errno(getitimer(arg1, &value));
7739 if (!is_error(ret) && arg2) {
7740 if (copy_to_user_timeval(arg2,
7741 &value.it_interval)
7742 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7743 &value.it_value))
7744 goto efault;
7747 break;
7748 #ifdef TARGET_NR_stat
7749 case TARGET_NR_stat:
7750 if (!(p = lock_user_string(arg1)))
7751 goto efault;
7752 ret = get_errno(stat(path(p), &st));
7753 unlock_user(p, arg1, 0);
7754 goto do_stat;
7755 #endif
7756 #ifdef TARGET_NR_lstat
7757 case TARGET_NR_lstat:
7758 if (!(p = lock_user_string(arg1)))
7759 goto efault;
7760 ret = get_errno(lstat(path(p), &st));
7761 unlock_user(p, arg1, 0);
7762 goto do_stat;
7763 #endif
7764 case TARGET_NR_fstat:
7766 ret = get_errno(fstat(arg1, &st));
7767 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7768 do_stat:
7769 #endif
7770 if (!is_error(ret)) {
7771 struct target_stat *target_st;
7773 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7774 goto efault;
7775 memset(target_st, 0, sizeof(*target_st));
7776 __put_user(st.st_dev, &target_st->st_dev);
7777 __put_user(st.st_ino, &target_st->st_ino);
7778 __put_user(st.st_mode, &target_st->st_mode);
7779 __put_user(st.st_uid, &target_st->st_uid);
7780 __put_user(st.st_gid, &target_st->st_gid);
7781 __put_user(st.st_nlink, &target_st->st_nlink);
7782 __put_user(st.st_rdev, &target_st->st_rdev);
7783 __put_user(st.st_size, &target_st->st_size);
7784 __put_user(st.st_blksize, &target_st->st_blksize);
7785 __put_user(st.st_blocks, &target_st->st_blocks);
7786 __put_user(st.st_atime, &target_st->target_st_atime);
7787 __put_user(st.st_mtime, &target_st->target_st_mtime);
7788 __put_user(st.st_ctime, &target_st->target_st_ctime);
7789 unlock_user_struct(target_st, arg2, 1);
7792 break;
7793 #ifdef TARGET_NR_olduname
7794 case TARGET_NR_olduname:
7795 goto unimplemented;
7796 #endif
7797 #ifdef TARGET_NR_iopl
7798 case TARGET_NR_iopl:
7799 goto unimplemented;
7800 #endif
7801 case TARGET_NR_vhangup:
7802 ret = get_errno(vhangup());
7803 break;
7804 #ifdef TARGET_NR_idle
7805 case TARGET_NR_idle:
7806 goto unimplemented;
7807 #endif
7808 #ifdef TARGET_NR_syscall
7809 case TARGET_NR_syscall:
7810 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7811 arg6, arg7, arg8, 0);
7812 break;
7813 #endif
7814 case TARGET_NR_wait4:
7816 int status;
7817 abi_long status_ptr = arg2;
7818 struct rusage rusage, *rusage_ptr;
7819 abi_ulong target_rusage = arg4;
7820 abi_long rusage_err;
7821 if (target_rusage)
7822 rusage_ptr = &rusage;
7823 else
7824 rusage_ptr = NULL;
7825 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
7826 if (!is_error(ret)) {
7827 if (status_ptr && ret) {
7828 status = host_to_target_waitstatus(status);
7829 if (put_user_s32(status, status_ptr))
7830 goto efault;
7832 if (target_rusage) {
7833 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7834 if (rusage_err) {
7835 ret = rusage_err;
7840 break;
7841 #ifdef TARGET_NR_swapoff
7842 case TARGET_NR_swapoff:
7843 if (!(p = lock_user_string(arg1)))
7844 goto efault;
7845 ret = get_errno(swapoff(p));
7846 unlock_user(p, arg1, 0);
7847 break;
7848 #endif
7849 case TARGET_NR_sysinfo:
7851 struct target_sysinfo *target_value;
7852 struct sysinfo value;
7853 ret = get_errno(sysinfo(&value));
7854 if (!is_error(ret) && arg1)
7856 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7857 goto efault;
7858 __put_user(value.uptime, &target_value->uptime);
7859 __put_user(value.loads[0], &target_value->loads[0]);
7860 __put_user(value.loads[1], &target_value->loads[1]);
7861 __put_user(value.loads[2], &target_value->loads[2]);
7862 __put_user(value.totalram, &target_value->totalram);
7863 __put_user(value.freeram, &target_value->freeram);
7864 __put_user(value.sharedram, &target_value->sharedram);
7865 __put_user(value.bufferram, &target_value->bufferram);
7866 __put_user(value.totalswap, &target_value->totalswap);
7867 __put_user(value.freeswap, &target_value->freeswap);
7868 __put_user(value.procs, &target_value->procs);
7869 __put_user(value.totalhigh, &target_value->totalhigh);
7870 __put_user(value.freehigh, &target_value->freehigh);
7871 __put_user(value.mem_unit, &target_value->mem_unit);
7872 unlock_user_struct(target_value, arg1, 1);
7875 break;
7876 #ifdef TARGET_NR_ipc
7877 case TARGET_NR_ipc:
7878 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7879 break;
7880 #endif
7881 #ifdef TARGET_NR_semget
7882 case TARGET_NR_semget:
7883 ret = get_errno(semget(arg1, arg2, arg3));
7884 break;
7885 #endif
7886 #ifdef TARGET_NR_semop
7887 case TARGET_NR_semop:
7888 ret = do_semop(arg1, arg2, arg3);
7889 break;
7890 #endif
7891 #ifdef TARGET_NR_semctl
7892 case TARGET_NR_semctl:
7893 ret = do_semctl(arg1, arg2, arg3, arg4);
7894 break;
7895 #endif
7896 #ifdef TARGET_NR_msgctl
7897 case TARGET_NR_msgctl:
7898 ret = do_msgctl(arg1, arg2, arg3);
7899 break;
7900 #endif
7901 #ifdef TARGET_NR_msgget
7902 case TARGET_NR_msgget:
7903 ret = get_errno(msgget(arg1, arg2));
7904 break;
7905 #endif
7906 #ifdef TARGET_NR_msgrcv
7907 case TARGET_NR_msgrcv:
7908 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7909 break;
7910 #endif
7911 #ifdef TARGET_NR_msgsnd
7912 case TARGET_NR_msgsnd:
7913 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7914 break;
7915 #endif
7916 #ifdef TARGET_NR_shmget
7917 case TARGET_NR_shmget:
7918 ret = get_errno(shmget(arg1, arg2, arg3));
7919 break;
7920 #endif
7921 #ifdef TARGET_NR_shmctl
7922 case TARGET_NR_shmctl:
7923 ret = do_shmctl(arg1, arg2, arg3);
7924 break;
7925 #endif
7926 #ifdef TARGET_NR_shmat
7927 case TARGET_NR_shmat:
7928 ret = do_shmat(arg1, arg2, arg3);
7929 break;
7930 #endif
7931 #ifdef TARGET_NR_shmdt
7932 case TARGET_NR_shmdt:
7933 ret = do_shmdt(arg1);
7934 break;
7935 #endif
7936 case TARGET_NR_fsync:
7937 ret = get_errno(fsync(arg1));
7938 break;
7939 case TARGET_NR_clone:
7940 /* Linux manages to have three different orderings for its
7941 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7942 * match the kernel's CONFIG_CLONE_* settings.
7943 * Microblaze is further special in that it uses a sixth
7944 * implicit argument to clone for the TLS pointer.
7946 #if defined(TARGET_MICROBLAZE)
7947 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7948 #elif defined(TARGET_CLONE_BACKWARDS)
7949 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7950 #elif defined(TARGET_CLONE_BACKWARDS2)
7951 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7952 #else
7953 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7954 #endif
7955 break;
7956 #ifdef __NR_exit_group
7957 /* new thread calls */
7958 case TARGET_NR_exit_group:
7959 #ifdef TARGET_GPROF
7960 _mcleanup();
7961 #endif
7962 gdb_exit(cpu_env, arg1);
7963 ret = get_errno(exit_group(arg1));
7964 break;
7965 #endif
7966 case TARGET_NR_setdomainname:
7967 if (!(p = lock_user_string(arg1)))
7968 goto efault;
7969 ret = get_errno(setdomainname(p, arg2));
7970 unlock_user(p, arg1, 0);
7971 break;
7972 case TARGET_NR_uname:
7973 /* no need to transcode because we use the linux syscall */
7975 struct new_utsname * buf;
7977 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7978 goto efault;
7979 ret = get_errno(sys_uname(buf));
7980 if (!is_error(ret)) {
7981 /* Overrite the native machine name with whatever is being
7982 emulated. */
7983 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7984 /* Allow the user to override the reported release. */
7985 if (qemu_uname_release && *qemu_uname_release)
7986 strcpy (buf->release, qemu_uname_release);
7988 unlock_user_struct(buf, arg1, 1);
7990 break;
7991 #ifdef TARGET_I386
7992 case TARGET_NR_modify_ldt:
7993 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7994 break;
7995 #if !defined(TARGET_X86_64)
7996 case TARGET_NR_vm86old:
7997 goto unimplemented;
7998 case TARGET_NR_vm86:
7999 ret = do_vm86(cpu_env, arg1, arg2);
8000 break;
8001 #endif
8002 #endif
8003 case TARGET_NR_adjtimex:
8004 goto unimplemented;
8005 #ifdef TARGET_NR_create_module
8006 case TARGET_NR_create_module:
8007 #endif
8008 case TARGET_NR_init_module:
8009 case TARGET_NR_delete_module:
8010 #ifdef TARGET_NR_get_kernel_syms
8011 case TARGET_NR_get_kernel_syms:
8012 #endif
8013 goto unimplemented;
8014 case TARGET_NR_quotactl:
8015 goto unimplemented;
8016 case TARGET_NR_getpgid:
8017 ret = get_errno(getpgid(arg1));
8018 break;
8019 case TARGET_NR_fchdir:
8020 ret = get_errno(fchdir(arg1));
8021 break;
8022 #ifdef TARGET_NR_bdflush /* not on x86_64 */
8023 case TARGET_NR_bdflush:
8024 goto unimplemented;
8025 #endif
8026 #ifdef TARGET_NR_sysfs
8027 case TARGET_NR_sysfs:
8028 goto unimplemented;
8029 #endif
8030 case TARGET_NR_personality:
8031 ret = get_errno(personality(arg1));
8032 break;
8033 #ifdef TARGET_NR_afs_syscall
8034 case TARGET_NR_afs_syscall:
8035 goto unimplemented;
8036 #endif
8037 #ifdef TARGET_NR__llseek /* Not on alpha */
8038 case TARGET_NR__llseek:
8040 int64_t res;
8041 #if !defined(__NR_llseek)
8042 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8043 if (res == -1) {
8044 ret = get_errno(res);
8045 } else {
8046 ret = 0;
8048 #else
8049 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8050 #endif
8051 if ((ret == 0) && put_user_s64(res, arg4)) {
8052 goto efault;
8055 break;
8056 #endif
8057 #ifdef TARGET_NR_getdents
8058 case TARGET_NR_getdents:
8059 #ifdef __NR_getdents
8060 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8062 struct target_dirent *target_dirp;
8063 struct linux_dirent *dirp;
8064 abi_long count = arg3;
8066 dirp = g_try_malloc(count);
8067 if (!dirp) {
8068 ret = -TARGET_ENOMEM;
8069 goto fail;
8072 ret = get_errno(sys_getdents(arg1, dirp, count));
8073 if (!is_error(ret)) {
8074 struct linux_dirent *de;
8075 struct target_dirent *tde;
8076 int len = ret;
8077 int reclen, treclen;
8078 int count1, tnamelen;
8080 count1 = 0;
8081 de = dirp;
8082 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8083 goto efault;
8084 tde = target_dirp;
8085 while (len > 0) {
8086 reclen = de->d_reclen;
8087 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8088 assert(tnamelen >= 0);
8089 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8090 assert(count1 + treclen <= count);
8091 tde->d_reclen = tswap16(treclen);
8092 tde->d_ino = tswapal(de->d_ino);
8093 tde->d_off = tswapal(de->d_off);
8094 memcpy(tde->d_name, de->d_name, tnamelen);
8095 de = (struct linux_dirent *)((char *)de + reclen);
8096 len -= reclen;
8097 tde = (struct target_dirent *)((char *)tde + treclen);
8098 count1 += treclen;
8100 ret = count1;
8101 unlock_user(target_dirp, arg2, ret);
8103 g_free(dirp);
8105 #else
8107 struct linux_dirent *dirp;
8108 abi_long count = arg3;
8110 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8111 goto efault;
8112 ret = get_errno(sys_getdents(arg1, dirp, count));
8113 if (!is_error(ret)) {
8114 struct linux_dirent *de;
8115 int len = ret;
8116 int reclen;
8117 de = dirp;
8118 while (len > 0) {
8119 reclen = de->d_reclen;
8120 if (reclen > len)
8121 break;
8122 de->d_reclen = tswap16(reclen);
8123 tswapls(&de->d_ino);
8124 tswapls(&de->d_off);
8125 de = (struct linux_dirent *)((char *)de + reclen);
8126 len -= reclen;
8129 unlock_user(dirp, arg2, ret);
8131 #endif
8132 #else
8133 /* Implement getdents in terms of getdents64 */
8135 struct linux_dirent64 *dirp;
8136 abi_long count = arg3;
8138 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8139 if (!dirp) {
8140 goto efault;
8142 ret = get_errno(sys_getdents64(arg1, dirp, count));
8143 if (!is_error(ret)) {
8144 /* Convert the dirent64 structs to target dirent. We do this
8145 * in-place, since we can guarantee that a target_dirent is no
8146 * larger than a dirent64; however this means we have to be
8147 * careful to read everything before writing in the new format.
8149 struct linux_dirent64 *de;
8150 struct target_dirent *tde;
8151 int len = ret;
8152 int tlen = 0;
8154 de = dirp;
8155 tde = (struct target_dirent *)dirp;
8156 while (len > 0) {
8157 int namelen, treclen;
8158 int reclen = de->d_reclen;
8159 uint64_t ino = de->d_ino;
8160 int64_t off = de->d_off;
8161 uint8_t type = de->d_type;
8163 namelen = strlen(de->d_name);
8164 treclen = offsetof(struct target_dirent, d_name)
8165 + namelen + 2;
8166 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8168 memmove(tde->d_name, de->d_name, namelen + 1);
8169 tde->d_ino = tswapal(ino);
8170 tde->d_off = tswapal(off);
8171 tde->d_reclen = tswap16(treclen);
8172 /* The target_dirent type is in what was formerly a padding
8173 * byte at the end of the structure:
8175 *(((char *)tde) + treclen - 1) = type;
8177 de = (struct linux_dirent64 *)((char *)de + reclen);
8178 tde = (struct target_dirent *)((char *)tde + treclen);
8179 len -= reclen;
8180 tlen += treclen;
8182 ret = tlen;
8184 unlock_user(dirp, arg2, ret);
8186 #endif
8187 break;
8188 #endif /* TARGET_NR_getdents */
8189 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8190 case TARGET_NR_getdents64:
8192 struct linux_dirent64 *dirp;
8193 abi_long count = arg3;
8194 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8195 goto efault;
8196 ret = get_errno(sys_getdents64(arg1, dirp, count));
8197 if (!is_error(ret)) {
8198 struct linux_dirent64 *de;
8199 int len = ret;
8200 int reclen;
8201 de = dirp;
8202 while (len > 0) {
8203 reclen = de->d_reclen;
8204 if (reclen > len)
8205 break;
8206 de->d_reclen = tswap16(reclen);
8207 tswap64s((uint64_t *)&de->d_ino);
8208 tswap64s((uint64_t *)&de->d_off);
8209 de = (struct linux_dirent64 *)((char *)de + reclen);
8210 len -= reclen;
8213 unlock_user(dirp, arg2, ret);
8215 break;
8216 #endif /* TARGET_NR_getdents64 */
8217 #if defined(TARGET_NR__newselect)
8218 case TARGET_NR__newselect:
8219 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8220 break;
8221 #endif
8222 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8223 # ifdef TARGET_NR_poll
8224 case TARGET_NR_poll:
8225 # endif
8226 # ifdef TARGET_NR_ppoll
8227 case TARGET_NR_ppoll:
8228 # endif
8230 struct target_pollfd *target_pfd;
8231 unsigned int nfds = arg2;
8232 int timeout = arg3;
8233 struct pollfd *pfd;
8234 unsigned int i;
8236 pfd = NULL;
8237 target_pfd = NULL;
8238 if (nfds) {
8239 target_pfd = lock_user(VERIFY_WRITE, arg1,
8240 sizeof(struct target_pollfd) * nfds, 1);
8241 if (!target_pfd) {
8242 goto efault;
8245 pfd = alloca(sizeof(struct pollfd) * nfds);
8246 for (i = 0; i < nfds; i++) {
8247 pfd[i].fd = tswap32(target_pfd[i].fd);
8248 pfd[i].events = tswap16(target_pfd[i].events);
8252 # ifdef TARGET_NR_ppoll
8253 if (num == TARGET_NR_ppoll) {
8254 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8255 target_sigset_t *target_set;
8256 sigset_t _set, *set = &_set;
8258 if (arg3) {
8259 if (target_to_host_timespec(timeout_ts, arg3)) {
8260 unlock_user(target_pfd, arg1, 0);
8261 goto efault;
8263 } else {
8264 timeout_ts = NULL;
8267 if (arg4) {
8268 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8269 if (!target_set) {
8270 unlock_user(target_pfd, arg1, 0);
8271 goto efault;
8273 target_to_host_sigset(set, target_set);
8274 } else {
8275 set = NULL;
8278 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
8280 if (!is_error(ret) && arg3) {
8281 host_to_target_timespec(arg3, timeout_ts);
8283 if (arg4) {
8284 unlock_user(target_set, arg4, 0);
8286 } else
8287 # endif
8288 ret = get_errno(poll(pfd, nfds, timeout));
8290 if (!is_error(ret)) {
8291 for(i = 0; i < nfds; i++) {
8292 target_pfd[i].revents = tswap16(pfd[i].revents);
8295 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8297 break;
8298 #endif
8299 case TARGET_NR_flock:
8300 /* NOTE: the flock constant seems to be the same for every
8301 Linux platform */
8302 ret = get_errno(flock(arg1, arg2));
8303 break;
8304 case TARGET_NR_readv:
8306 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8307 if (vec != NULL) {
8308 ret = get_errno(readv(arg1, vec, arg3));
8309 unlock_iovec(vec, arg2, arg3, 1);
8310 } else {
8311 ret = -host_to_target_errno(errno);
8314 break;
8315 case TARGET_NR_writev:
8317 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8318 if (vec != NULL) {
8319 ret = get_errno(writev(arg1, vec, arg3));
8320 unlock_iovec(vec, arg2, arg3, 0);
8321 } else {
8322 ret = -host_to_target_errno(errno);
8325 break;
8326 case TARGET_NR_getsid:
8327 ret = get_errno(getsid(arg1));
8328 break;
8329 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8330 case TARGET_NR_fdatasync:
8331 ret = get_errno(fdatasync(arg1));
8332 break;
8333 #endif
8334 #ifdef TARGET_NR__sysctl
8335 case TARGET_NR__sysctl:
8336 /* We don't implement this, but ENOTDIR is always a safe
8337 return value. */
8338 ret = -TARGET_ENOTDIR;
8339 break;
8340 #endif
8341 case TARGET_NR_sched_getaffinity:
8343 unsigned int mask_size;
8344 unsigned long *mask;
8347 * sched_getaffinity needs multiples of ulong, so need to take
8348 * care of mismatches between target ulong and host ulong sizes.
8350 if (arg2 & (sizeof(abi_ulong) - 1)) {
8351 ret = -TARGET_EINVAL;
8352 break;
8354 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8356 mask = alloca(mask_size);
8357 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8359 if (!is_error(ret)) {
8360 if (ret > arg2) {
8361 /* More data returned than the caller's buffer will fit.
8362 * This only happens if sizeof(abi_long) < sizeof(long)
8363 * and the caller passed us a buffer holding an odd number
8364 * of abi_longs. If the host kernel is actually using the
8365 * extra 4 bytes then fail EINVAL; otherwise we can just
8366 * ignore them and only copy the interesting part.
8368 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8369 if (numcpus > arg2 * 8) {
8370 ret = -TARGET_EINVAL;
8371 break;
8373 ret = arg2;
8376 if (copy_to_user(arg3, mask, ret)) {
8377 goto efault;
8381 break;
8382 case TARGET_NR_sched_setaffinity:
8384 unsigned int mask_size;
8385 unsigned long *mask;
8388 * sched_setaffinity needs multiples of ulong, so need to take
8389 * care of mismatches between target ulong and host ulong sizes.
8391 if (arg2 & (sizeof(abi_ulong) - 1)) {
8392 ret = -TARGET_EINVAL;
8393 break;
8395 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8397 mask = alloca(mask_size);
8398 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8399 goto efault;
8401 memcpy(mask, p, arg2);
8402 unlock_user_struct(p, arg2, 0);
8404 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8406 break;
8407 case TARGET_NR_sched_setparam:
8409 struct sched_param *target_schp;
8410 struct sched_param schp;
8412 if (arg2 == 0) {
8413 return -TARGET_EINVAL;
8415 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8416 goto efault;
8417 schp.sched_priority = tswap32(target_schp->sched_priority);
8418 unlock_user_struct(target_schp, arg2, 0);
8419 ret = get_errno(sched_setparam(arg1, &schp));
8421 break;
8422 case TARGET_NR_sched_getparam:
8424 struct sched_param *target_schp;
8425 struct sched_param schp;
8427 if (arg2 == 0) {
8428 return -TARGET_EINVAL;
8430 ret = get_errno(sched_getparam(arg1, &schp));
8431 if (!is_error(ret)) {
8432 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8433 goto efault;
8434 target_schp->sched_priority = tswap32(schp.sched_priority);
8435 unlock_user_struct(target_schp, arg2, 1);
8438 break;
8439 case TARGET_NR_sched_setscheduler:
8441 struct sched_param *target_schp;
8442 struct sched_param schp;
8443 if (arg3 == 0) {
8444 return -TARGET_EINVAL;
8446 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8447 goto efault;
8448 schp.sched_priority = tswap32(target_schp->sched_priority);
8449 unlock_user_struct(target_schp, arg3, 0);
8450 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8452 break;
8453 case TARGET_NR_sched_getscheduler:
8454 ret = get_errno(sched_getscheduler(arg1));
8455 break;
8456 case TARGET_NR_sched_yield:
8457 ret = get_errno(sched_yield());
8458 break;
8459 case TARGET_NR_sched_get_priority_max:
8460 ret = get_errno(sched_get_priority_max(arg1));
8461 break;
8462 case TARGET_NR_sched_get_priority_min:
8463 ret = get_errno(sched_get_priority_min(arg1));
8464 break;
8465 case TARGET_NR_sched_rr_get_interval:
8467 struct timespec ts;
8468 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8469 if (!is_error(ret)) {
8470 ret = host_to_target_timespec(arg2, &ts);
8473 break;
8474 case TARGET_NR_nanosleep:
8476 struct timespec req, rem;
8477 target_to_host_timespec(&req, arg1);
8478 ret = get_errno(nanosleep(&req, &rem));
8479 if (is_error(ret) && arg2) {
8480 host_to_target_timespec(arg2, &rem);
8483 break;
8484 #ifdef TARGET_NR_query_module
8485 case TARGET_NR_query_module:
8486 goto unimplemented;
8487 #endif
8488 #ifdef TARGET_NR_nfsservctl
8489 case TARGET_NR_nfsservctl:
8490 goto unimplemented;
8491 #endif
8492 case TARGET_NR_prctl:
8493 switch (arg1) {
8494 case PR_GET_PDEATHSIG:
8496 int deathsig;
8497 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8498 if (!is_error(ret) && arg2
8499 && put_user_ual(deathsig, arg2)) {
8500 goto efault;
8502 break;
8504 #ifdef PR_GET_NAME
8505 case PR_GET_NAME:
8507 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8508 if (!name) {
8509 goto efault;
8511 ret = get_errno(prctl(arg1, (unsigned long)name,
8512 arg3, arg4, arg5));
8513 unlock_user(name, arg2, 16);
8514 break;
8516 case PR_SET_NAME:
8518 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8519 if (!name) {
8520 goto efault;
8522 ret = get_errno(prctl(arg1, (unsigned long)name,
8523 arg3, arg4, arg5));
8524 unlock_user(name, arg2, 0);
8525 break;
8527 #endif
8528 default:
8529 /* Most prctl options have no pointer arguments */
8530 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8531 break;
8533 break;
8534 #ifdef TARGET_NR_arch_prctl
8535 case TARGET_NR_arch_prctl:
8536 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8537 ret = do_arch_prctl(cpu_env, arg1, arg2);
8538 break;
8539 #else
8540 goto unimplemented;
8541 #endif
8542 #endif
8543 #ifdef TARGET_NR_pread64
8544 case TARGET_NR_pread64:
8545 if (regpairs_aligned(cpu_env)) {
8546 arg4 = arg5;
8547 arg5 = arg6;
8549 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8550 goto efault;
8551 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8552 unlock_user(p, arg2, ret);
8553 break;
8554 case TARGET_NR_pwrite64:
8555 if (regpairs_aligned(cpu_env)) {
8556 arg4 = arg5;
8557 arg5 = arg6;
8559 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8560 goto efault;
8561 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8562 unlock_user(p, arg2, 0);
8563 break;
8564 #endif
8565 case TARGET_NR_getcwd:
8566 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8567 goto efault;
8568 ret = get_errno(sys_getcwd1(p, arg2));
8569 unlock_user(p, arg1, ret);
8570 break;
8571 case TARGET_NR_capget:
8572 case TARGET_NR_capset:
8574 struct target_user_cap_header *target_header;
8575 struct target_user_cap_data *target_data = NULL;
8576 struct __user_cap_header_struct header;
8577 struct __user_cap_data_struct data[2];
8578 struct __user_cap_data_struct *dataptr = NULL;
8579 int i, target_datalen;
8580 int data_items = 1;
8582 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8583 goto efault;
8585 header.version = tswap32(target_header->version);
8586 header.pid = tswap32(target_header->pid);
8588 if (header.version != _LINUX_CAPABILITY_VERSION) {
8589 /* Version 2 and up takes pointer to two user_data structs */
8590 data_items = 2;
8593 target_datalen = sizeof(*target_data) * data_items;
8595 if (arg2) {
8596 if (num == TARGET_NR_capget) {
8597 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8598 } else {
8599 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8601 if (!target_data) {
8602 unlock_user_struct(target_header, arg1, 0);
8603 goto efault;
8606 if (num == TARGET_NR_capset) {
8607 for (i = 0; i < data_items; i++) {
8608 data[i].effective = tswap32(target_data[i].effective);
8609 data[i].permitted = tswap32(target_data[i].permitted);
8610 data[i].inheritable = tswap32(target_data[i].inheritable);
8614 dataptr = data;
8617 if (num == TARGET_NR_capget) {
8618 ret = get_errno(capget(&header, dataptr));
8619 } else {
8620 ret = get_errno(capset(&header, dataptr));
8623 /* The kernel always updates version for both capget and capset */
8624 target_header->version = tswap32(header.version);
8625 unlock_user_struct(target_header, arg1, 1);
8627 if (arg2) {
8628 if (num == TARGET_NR_capget) {
8629 for (i = 0; i < data_items; i++) {
8630 target_data[i].effective = tswap32(data[i].effective);
8631 target_data[i].permitted = tswap32(data[i].permitted);
8632 target_data[i].inheritable = tswap32(data[i].inheritable);
8634 unlock_user(target_data, arg2, target_datalen);
8635 } else {
8636 unlock_user(target_data, arg2, 0);
8639 break;
8641 case TARGET_NR_sigaltstack:
8642 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8643 break;
8645 #ifdef CONFIG_SENDFILE
8646 case TARGET_NR_sendfile:
8648 off_t *offp = NULL;
8649 off_t off;
8650 if (arg3) {
8651 ret = get_user_sal(off, arg3);
8652 if (is_error(ret)) {
8653 break;
8655 offp = &off;
8657 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8658 if (!is_error(ret) && arg3) {
8659 abi_long ret2 = put_user_sal(off, arg3);
8660 if (is_error(ret2)) {
8661 ret = ret2;
8664 break;
8666 #ifdef TARGET_NR_sendfile64
8667 case TARGET_NR_sendfile64:
8669 off_t *offp = NULL;
8670 off_t off;
8671 if (arg3) {
8672 ret = get_user_s64(off, arg3);
8673 if (is_error(ret)) {
8674 break;
8676 offp = &off;
8678 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8679 if (!is_error(ret) && arg3) {
8680 abi_long ret2 = put_user_s64(off, arg3);
8681 if (is_error(ret2)) {
8682 ret = ret2;
8685 break;
8687 #endif
8688 #else
8689 case TARGET_NR_sendfile:
8690 #ifdef TARGET_NR_sendfile64
8691 case TARGET_NR_sendfile64:
8692 #endif
8693 goto unimplemented;
8694 #endif
8696 #ifdef TARGET_NR_getpmsg
8697 case TARGET_NR_getpmsg:
8698 goto unimplemented;
8699 #endif
8700 #ifdef TARGET_NR_putpmsg
8701 case TARGET_NR_putpmsg:
8702 goto unimplemented;
8703 #endif
8704 #ifdef TARGET_NR_vfork
8705 case TARGET_NR_vfork:
8706 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8707 0, 0, 0, 0));
8708 break;
8709 #endif
8710 #ifdef TARGET_NR_ugetrlimit
8711 case TARGET_NR_ugetrlimit:
8713 struct rlimit rlim;
8714 int resource = target_to_host_resource(arg1);
8715 ret = get_errno(getrlimit(resource, &rlim));
8716 if (!is_error(ret)) {
8717 struct target_rlimit *target_rlim;
8718 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8719 goto efault;
8720 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8721 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8722 unlock_user_struct(target_rlim, arg2, 1);
8724 break;
8726 #endif
8727 #ifdef TARGET_NR_truncate64
8728 case TARGET_NR_truncate64:
8729 if (!(p = lock_user_string(arg1)))
8730 goto efault;
8731 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8732 unlock_user(p, arg1, 0);
8733 break;
8734 #endif
8735 #ifdef TARGET_NR_ftruncate64
8736 case TARGET_NR_ftruncate64:
8737 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8738 break;
8739 #endif
8740 #ifdef TARGET_NR_stat64
8741 case TARGET_NR_stat64:
8742 if (!(p = lock_user_string(arg1)))
8743 goto efault;
8744 ret = get_errno(stat(path(p), &st));
8745 unlock_user(p, arg1, 0);
8746 if (!is_error(ret))
8747 ret = host_to_target_stat64(cpu_env, arg2, &st);
8748 break;
8749 #endif
8750 #ifdef TARGET_NR_lstat64
8751 case TARGET_NR_lstat64:
8752 if (!(p = lock_user_string(arg1)))
8753 goto efault;
8754 ret = get_errno(lstat(path(p), &st));
8755 unlock_user(p, arg1, 0);
8756 if (!is_error(ret))
8757 ret = host_to_target_stat64(cpu_env, arg2, &st);
8758 break;
8759 #endif
8760 #ifdef TARGET_NR_fstat64
8761 case TARGET_NR_fstat64:
8762 ret = get_errno(fstat(arg1, &st));
8763 if (!is_error(ret))
8764 ret = host_to_target_stat64(cpu_env, arg2, &st);
8765 break;
8766 #endif
8767 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8768 #ifdef TARGET_NR_fstatat64
8769 case TARGET_NR_fstatat64:
8770 #endif
8771 #ifdef TARGET_NR_newfstatat
8772 case TARGET_NR_newfstatat:
8773 #endif
8774 if (!(p = lock_user_string(arg2)))
8775 goto efault;
8776 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8777 if (!is_error(ret))
8778 ret = host_to_target_stat64(cpu_env, arg3, &st);
8779 break;
8780 #endif
8781 #ifdef TARGET_NR_lchown
8782 case TARGET_NR_lchown:
8783 if (!(p = lock_user_string(arg1)))
8784 goto efault;
8785 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8786 unlock_user(p, arg1, 0);
8787 break;
8788 #endif
8789 #ifdef TARGET_NR_getuid
8790 case TARGET_NR_getuid:
8791 ret = get_errno(high2lowuid(getuid()));
8792 break;
8793 #endif
8794 #ifdef TARGET_NR_getgid
8795 case TARGET_NR_getgid:
8796 ret = get_errno(high2lowgid(getgid()));
8797 break;
8798 #endif
8799 #ifdef TARGET_NR_geteuid
8800 case TARGET_NR_geteuid:
8801 ret = get_errno(high2lowuid(geteuid()));
8802 break;
8803 #endif
8804 #ifdef TARGET_NR_getegid
8805 case TARGET_NR_getegid:
8806 ret = get_errno(high2lowgid(getegid()));
8807 break;
8808 #endif
8809 case TARGET_NR_setreuid:
8810 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8811 break;
8812 case TARGET_NR_setregid:
8813 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8814 break;
8815 case TARGET_NR_getgroups:
8817 int gidsetsize = arg1;
8818 target_id *target_grouplist;
8819 gid_t *grouplist;
8820 int i;
8822 grouplist = alloca(gidsetsize * sizeof(gid_t));
8823 ret = get_errno(getgroups(gidsetsize, grouplist));
8824 if (gidsetsize == 0)
8825 break;
8826 if (!is_error(ret)) {
8827 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8828 if (!target_grouplist)
8829 goto efault;
8830 for(i = 0;i < ret; i++)
8831 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8832 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8835 break;
8836 case TARGET_NR_setgroups:
8838 int gidsetsize = arg1;
8839 target_id *target_grouplist;
8840 gid_t *grouplist = NULL;
8841 int i;
8842 if (gidsetsize) {
8843 grouplist = alloca(gidsetsize * sizeof(gid_t));
8844 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8845 if (!target_grouplist) {
8846 ret = -TARGET_EFAULT;
8847 goto fail;
8849 for (i = 0; i < gidsetsize; i++) {
8850 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8852 unlock_user(target_grouplist, arg2, 0);
8854 ret = get_errno(setgroups(gidsetsize, grouplist));
8856 break;
8857 case TARGET_NR_fchown:
8858 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8859 break;
8860 #if defined(TARGET_NR_fchownat)
8861 case TARGET_NR_fchownat:
8862 if (!(p = lock_user_string(arg2)))
8863 goto efault;
8864 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8865 low2highgid(arg4), arg5));
8866 unlock_user(p, arg2, 0);
8867 break;
8868 #endif
8869 #ifdef TARGET_NR_setresuid
8870 case TARGET_NR_setresuid:
8871 ret = get_errno(sys_setresuid(low2highuid(arg1),
8872 low2highuid(arg2),
8873 low2highuid(arg3)));
8874 break;
8875 #endif
8876 #ifdef TARGET_NR_getresuid
8877 case TARGET_NR_getresuid:
8879 uid_t ruid, euid, suid;
8880 ret = get_errno(getresuid(&ruid, &euid, &suid));
8881 if (!is_error(ret)) {
8882 if (put_user_id(high2lowuid(ruid), arg1)
8883 || put_user_id(high2lowuid(euid), arg2)
8884 || put_user_id(high2lowuid(suid), arg3))
8885 goto efault;
8888 break;
8889 #endif
8890 #ifdef TARGET_NR_getresgid
8891 case TARGET_NR_setresgid:
8892 ret = get_errno(sys_setresgid(low2highgid(arg1),
8893 low2highgid(arg2),
8894 low2highgid(arg3)));
8895 break;
8896 #endif
8897 #ifdef TARGET_NR_getresgid
8898 case TARGET_NR_getresgid:
8900 gid_t rgid, egid, sgid;
8901 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8902 if (!is_error(ret)) {
8903 if (put_user_id(high2lowgid(rgid), arg1)
8904 || put_user_id(high2lowgid(egid), arg2)
8905 || put_user_id(high2lowgid(sgid), arg3))
8906 goto efault;
8909 break;
8910 #endif
8911 #ifdef TARGET_NR_chown
8912 case TARGET_NR_chown:
8913 if (!(p = lock_user_string(arg1)))
8914 goto efault;
8915 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8916 unlock_user(p, arg1, 0);
8917 break;
8918 #endif
8919 case TARGET_NR_setuid:
8920 ret = get_errno(sys_setuid(low2highuid(arg1)));
8921 break;
8922 case TARGET_NR_setgid:
8923 ret = get_errno(sys_setgid(low2highgid(arg1)));
8924 break;
8925 case TARGET_NR_setfsuid:
8926 ret = get_errno(setfsuid(arg1));
8927 break;
8928 case TARGET_NR_setfsgid:
8929 ret = get_errno(setfsgid(arg1));
8930 break;
8932 #ifdef TARGET_NR_lchown32
8933 case TARGET_NR_lchown32:
8934 if (!(p = lock_user_string(arg1)))
8935 goto efault;
8936 ret = get_errno(lchown(p, arg2, arg3));
8937 unlock_user(p, arg1, 0);
8938 break;
8939 #endif
8940 #ifdef TARGET_NR_getuid32
8941 case TARGET_NR_getuid32:
8942 ret = get_errno(getuid());
8943 break;
8944 #endif
8946 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8947 /* Alpha specific */
8948 case TARGET_NR_getxuid:
8950 uid_t euid;
8951 euid=geteuid();
8952 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8954 ret = get_errno(getuid());
8955 break;
8956 #endif
8957 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8958 /* Alpha specific */
8959 case TARGET_NR_getxgid:
8961 uid_t egid;
8962 egid=getegid();
8963 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8965 ret = get_errno(getgid());
8966 break;
8967 #endif
8968 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8969 /* Alpha specific */
8970 case TARGET_NR_osf_getsysinfo:
8971 ret = -TARGET_EOPNOTSUPP;
8972 switch (arg1) {
8973 case TARGET_GSI_IEEE_FP_CONTROL:
8975 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8977 /* Copied from linux ieee_fpcr_to_swcr. */
8978 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8979 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8980 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8981 | SWCR_TRAP_ENABLE_DZE
8982 | SWCR_TRAP_ENABLE_OVF);
8983 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8984 | SWCR_TRAP_ENABLE_INE);
8985 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8986 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8988 if (put_user_u64 (swcr, arg2))
8989 goto efault;
8990 ret = 0;
8992 break;
8994 /* case GSI_IEEE_STATE_AT_SIGNAL:
8995 -- Not implemented in linux kernel.
8996 case GSI_UACPROC:
8997 -- Retrieves current unaligned access state; not much used.
8998 case GSI_PROC_TYPE:
8999 -- Retrieves implver information; surely not used.
9000 case GSI_GET_HWRPB:
9001 -- Grabs a copy of the HWRPB; surely not used.
9004 break;
9005 #endif
9006 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
9007 /* Alpha specific */
9008 case TARGET_NR_osf_setsysinfo:
9009 ret = -TARGET_EOPNOTSUPP;
9010 switch (arg1) {
9011 case TARGET_SSI_IEEE_FP_CONTROL:
9013 uint64_t swcr, fpcr, orig_fpcr;
9015 if (get_user_u64 (swcr, arg2)) {
9016 goto efault;
9018 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9019 fpcr = orig_fpcr & FPCR_DYN_MASK;
9021 /* Copied from linux ieee_swcr_to_fpcr. */
9022 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
9023 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
9024 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
9025 | SWCR_TRAP_ENABLE_DZE
9026 | SWCR_TRAP_ENABLE_OVF)) << 48;
9027 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
9028 | SWCR_TRAP_ENABLE_INE)) << 57;
9029 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
9030 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
9032 cpu_alpha_store_fpcr(cpu_env, fpcr);
9033 ret = 0;
9035 break;
9037 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9039 uint64_t exc, fpcr, orig_fpcr;
9040 int si_code;
9042 if (get_user_u64(exc, arg2)) {
9043 goto efault;
9046 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9048 /* We only add to the exception status here. */
9049 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9051 cpu_alpha_store_fpcr(cpu_env, fpcr);
9052 ret = 0;
9054 /* Old exceptions are not signaled. */
9055 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9057 /* If any exceptions set by this call,
9058 and are unmasked, send a signal. */
9059 si_code = 0;
9060 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9061 si_code = TARGET_FPE_FLTRES;
9063 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9064 si_code = TARGET_FPE_FLTUND;
9066 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9067 si_code = TARGET_FPE_FLTOVF;
9069 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9070 si_code = TARGET_FPE_FLTDIV;
9072 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9073 si_code = TARGET_FPE_FLTINV;
9075 if (si_code != 0) {
9076 target_siginfo_t info;
9077 info.si_signo = SIGFPE;
9078 info.si_errno = 0;
9079 info.si_code = si_code;
9080 info._sifields._sigfault._addr
9081 = ((CPUArchState *)cpu_env)->pc;
9082 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9085 break;
9087 /* case SSI_NVPAIRS:
9088 -- Used with SSIN_UACPROC to enable unaligned accesses.
9089 case SSI_IEEE_STATE_AT_SIGNAL:
9090 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9091 -- Not implemented in linux kernel
9094 break;
9095 #endif
9096 #ifdef TARGET_NR_osf_sigprocmask
9097 /* Alpha specific. */
9098 case TARGET_NR_osf_sigprocmask:
9100 abi_ulong mask;
9101 int how;
9102 sigset_t set, oldset;
9104 switch(arg1) {
9105 case TARGET_SIG_BLOCK:
9106 how = SIG_BLOCK;
9107 break;
9108 case TARGET_SIG_UNBLOCK:
9109 how = SIG_UNBLOCK;
9110 break;
9111 case TARGET_SIG_SETMASK:
9112 how = SIG_SETMASK;
9113 break;
9114 default:
9115 ret = -TARGET_EINVAL;
9116 goto fail;
9118 mask = arg2;
9119 target_to_host_old_sigset(&set, &mask);
9120 do_sigprocmask(how, &set, &oldset);
9121 host_to_target_old_sigset(&mask, &oldset);
9122 ret = mask;
9124 break;
9125 #endif
9127 #ifdef TARGET_NR_getgid32
9128 case TARGET_NR_getgid32:
9129 ret = get_errno(getgid());
9130 break;
9131 #endif
9132 #ifdef TARGET_NR_geteuid32
9133 case TARGET_NR_geteuid32:
9134 ret = get_errno(geteuid());
9135 break;
9136 #endif
9137 #ifdef TARGET_NR_getegid32
9138 case TARGET_NR_getegid32:
9139 ret = get_errno(getegid());
9140 break;
9141 #endif
9142 #ifdef TARGET_NR_setreuid32
9143 case TARGET_NR_setreuid32:
9144 ret = get_errno(setreuid(arg1, arg2));
9145 break;
9146 #endif
9147 #ifdef TARGET_NR_setregid32
9148 case TARGET_NR_setregid32:
9149 ret = get_errno(setregid(arg1, arg2));
9150 break;
9151 #endif
9152 #ifdef TARGET_NR_getgroups32
9153 case TARGET_NR_getgroups32:
9155 int gidsetsize = arg1;
9156 uint32_t *target_grouplist;
9157 gid_t *grouplist;
9158 int i;
9160 grouplist = alloca(gidsetsize * sizeof(gid_t));
9161 ret = get_errno(getgroups(gidsetsize, grouplist));
9162 if (gidsetsize == 0)
9163 break;
9164 if (!is_error(ret)) {
9165 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9166 if (!target_grouplist) {
9167 ret = -TARGET_EFAULT;
9168 goto fail;
9170 for(i = 0;i < ret; i++)
9171 target_grouplist[i] = tswap32(grouplist[i]);
9172 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9175 break;
9176 #endif
9177 #ifdef TARGET_NR_setgroups32
9178 case TARGET_NR_setgroups32:
9180 int gidsetsize = arg1;
9181 uint32_t *target_grouplist;
9182 gid_t *grouplist;
9183 int i;
9185 grouplist = alloca(gidsetsize * sizeof(gid_t));
9186 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9187 if (!target_grouplist) {
9188 ret = -TARGET_EFAULT;
9189 goto fail;
9191 for(i = 0;i < gidsetsize; i++)
9192 grouplist[i] = tswap32(target_grouplist[i]);
9193 unlock_user(target_grouplist, arg2, 0);
9194 ret = get_errno(setgroups(gidsetsize, grouplist));
9196 break;
9197 #endif
9198 #ifdef TARGET_NR_fchown32
9199 case TARGET_NR_fchown32:
9200 ret = get_errno(fchown(arg1, arg2, arg3));
9201 break;
9202 #endif
9203 #ifdef TARGET_NR_setresuid32
9204 case TARGET_NR_setresuid32:
9205 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
9206 break;
9207 #endif
9208 #ifdef TARGET_NR_getresuid32
9209 case TARGET_NR_getresuid32:
9211 uid_t ruid, euid, suid;
9212 ret = get_errno(getresuid(&ruid, &euid, &suid));
9213 if (!is_error(ret)) {
9214 if (put_user_u32(ruid, arg1)
9215 || put_user_u32(euid, arg2)
9216 || put_user_u32(suid, arg3))
9217 goto efault;
9220 break;
9221 #endif
9222 #ifdef TARGET_NR_setresgid32
9223 case TARGET_NR_setresgid32:
9224 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
9225 break;
9226 #endif
9227 #ifdef TARGET_NR_getresgid32
9228 case TARGET_NR_getresgid32:
9230 gid_t rgid, egid, sgid;
9231 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9232 if (!is_error(ret)) {
9233 if (put_user_u32(rgid, arg1)
9234 || put_user_u32(egid, arg2)
9235 || put_user_u32(sgid, arg3))
9236 goto efault;
9239 break;
9240 #endif
9241 #ifdef TARGET_NR_chown32
9242 case TARGET_NR_chown32:
9243 if (!(p = lock_user_string(arg1)))
9244 goto efault;
9245 ret = get_errno(chown(p, arg2, arg3));
9246 unlock_user(p, arg1, 0);
9247 break;
9248 #endif
9249 #ifdef TARGET_NR_setuid32
9250 case TARGET_NR_setuid32:
9251 ret = get_errno(sys_setuid(arg1));
9252 break;
9253 #endif
9254 #ifdef TARGET_NR_setgid32
9255 case TARGET_NR_setgid32:
9256 ret = get_errno(sys_setgid(arg1));
9257 break;
9258 #endif
9259 #ifdef TARGET_NR_setfsuid32
9260 case TARGET_NR_setfsuid32:
9261 ret = get_errno(setfsuid(arg1));
9262 break;
9263 #endif
9264 #ifdef TARGET_NR_setfsgid32
9265 case TARGET_NR_setfsgid32:
9266 ret = get_errno(setfsgid(arg1));
9267 break;
9268 #endif
9270 case TARGET_NR_pivot_root:
9271 goto unimplemented;
9272 #ifdef TARGET_NR_mincore
9273 case TARGET_NR_mincore:
9275 void *a;
9276 ret = -TARGET_EFAULT;
9277 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9278 goto efault;
9279 if (!(p = lock_user_string(arg3)))
9280 goto mincore_fail;
9281 ret = get_errno(mincore(a, arg2, p));
9282 unlock_user(p, arg3, ret);
9283 mincore_fail:
9284 unlock_user(a, arg1, 0);
9286 break;
9287 #endif
9288 #ifdef TARGET_NR_arm_fadvise64_64
9289 case TARGET_NR_arm_fadvise64_64:
9292 * arm_fadvise64_64 looks like fadvise64_64 but
9293 * with different argument order
9295 abi_long temp;
9296 temp = arg3;
9297 arg3 = arg4;
9298 arg4 = temp;
9300 #endif
9301 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9302 #ifdef TARGET_NR_fadvise64_64
9303 case TARGET_NR_fadvise64_64:
9304 #endif
9305 #ifdef TARGET_NR_fadvise64
9306 case TARGET_NR_fadvise64:
9307 #endif
9308 #ifdef TARGET_S390X
9309 switch (arg4) {
9310 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9311 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9312 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9313 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9314 default: break;
9316 #endif
9317 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9318 break;
9319 #endif
9320 #ifdef TARGET_NR_madvise
9321 case TARGET_NR_madvise:
9322 /* A straight passthrough may not be safe because qemu sometimes
9323 turns private file-backed mappings into anonymous mappings.
9324 This will break MADV_DONTNEED.
9325 This is a hint, so ignoring and returning success is ok. */
9326 ret = get_errno(0);
9327 break;
9328 #endif
9329 #if TARGET_ABI_BITS == 32
9330 case TARGET_NR_fcntl64:
9332 int cmd;
9333 struct flock64 fl;
9334 struct target_flock64 *target_fl;
9335 #ifdef TARGET_ARM
9336 struct target_eabi_flock64 *target_efl;
9337 #endif
9339 cmd = target_to_host_fcntl_cmd(arg2);
9340 if (cmd == -TARGET_EINVAL) {
9341 ret = cmd;
9342 break;
9345 switch(arg2) {
9346 case TARGET_F_GETLK64:
9347 #ifdef TARGET_ARM
9348 if (((CPUARMState *)cpu_env)->eabi) {
9349 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9350 goto efault;
9351 fl.l_type = tswap16(target_efl->l_type);
9352 fl.l_whence = tswap16(target_efl->l_whence);
9353 fl.l_start = tswap64(target_efl->l_start);
9354 fl.l_len = tswap64(target_efl->l_len);
9355 fl.l_pid = tswap32(target_efl->l_pid);
9356 unlock_user_struct(target_efl, arg3, 0);
9357 } else
9358 #endif
9360 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9361 goto efault;
9362 fl.l_type = tswap16(target_fl->l_type);
9363 fl.l_whence = tswap16(target_fl->l_whence);
9364 fl.l_start = tswap64(target_fl->l_start);
9365 fl.l_len = tswap64(target_fl->l_len);
9366 fl.l_pid = tswap32(target_fl->l_pid);
9367 unlock_user_struct(target_fl, arg3, 0);
9369 ret = get_errno(fcntl(arg1, cmd, &fl));
9370 if (ret == 0) {
9371 #ifdef TARGET_ARM
9372 if (((CPUARMState *)cpu_env)->eabi) {
9373 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9374 goto efault;
9375 target_efl->l_type = tswap16(fl.l_type);
9376 target_efl->l_whence = tswap16(fl.l_whence);
9377 target_efl->l_start = tswap64(fl.l_start);
9378 target_efl->l_len = tswap64(fl.l_len);
9379 target_efl->l_pid = tswap32(fl.l_pid);
9380 unlock_user_struct(target_efl, arg3, 1);
9381 } else
9382 #endif
9384 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9385 goto efault;
9386 target_fl->l_type = tswap16(fl.l_type);
9387 target_fl->l_whence = tswap16(fl.l_whence);
9388 target_fl->l_start = tswap64(fl.l_start);
9389 target_fl->l_len = tswap64(fl.l_len);
9390 target_fl->l_pid = tswap32(fl.l_pid);
9391 unlock_user_struct(target_fl, arg3, 1);
9394 break;
9396 case TARGET_F_SETLK64:
9397 case TARGET_F_SETLKW64:
9398 #ifdef TARGET_ARM
9399 if (((CPUARMState *)cpu_env)->eabi) {
9400 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9401 goto efault;
9402 fl.l_type = tswap16(target_efl->l_type);
9403 fl.l_whence = tswap16(target_efl->l_whence);
9404 fl.l_start = tswap64(target_efl->l_start);
9405 fl.l_len = tswap64(target_efl->l_len);
9406 fl.l_pid = tswap32(target_efl->l_pid);
9407 unlock_user_struct(target_efl, arg3, 0);
9408 } else
9409 #endif
9411 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9412 goto efault;
9413 fl.l_type = tswap16(target_fl->l_type);
9414 fl.l_whence = tswap16(target_fl->l_whence);
9415 fl.l_start = tswap64(target_fl->l_start);
9416 fl.l_len = tswap64(target_fl->l_len);
9417 fl.l_pid = tswap32(target_fl->l_pid);
9418 unlock_user_struct(target_fl, arg3, 0);
9420 ret = get_errno(fcntl(arg1, cmd, &fl));
9421 break;
9422 default:
9423 ret = do_fcntl(arg1, arg2, arg3);
9424 break;
9426 break;
9428 #endif
9429 #ifdef TARGET_NR_cacheflush
9430 case TARGET_NR_cacheflush:
9431 /* self-modifying code is handled automatically, so nothing needed */
9432 ret = 0;
9433 break;
9434 #endif
9435 #ifdef TARGET_NR_security
9436 case TARGET_NR_security:
9437 goto unimplemented;
9438 #endif
9439 #ifdef TARGET_NR_getpagesize
9440 case TARGET_NR_getpagesize:
9441 ret = TARGET_PAGE_SIZE;
9442 break;
9443 #endif
9444 case TARGET_NR_gettid:
9445 ret = get_errno(gettid());
9446 break;
9447 #ifdef TARGET_NR_readahead
9448 case TARGET_NR_readahead:
9449 #if TARGET_ABI_BITS == 32
9450 if (regpairs_aligned(cpu_env)) {
9451 arg2 = arg3;
9452 arg3 = arg4;
9453 arg4 = arg5;
9455 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9456 #else
9457 ret = get_errno(readahead(arg1, arg2, arg3));
9458 #endif
9459 break;
9460 #endif
9461 #ifdef CONFIG_ATTR
9462 #ifdef TARGET_NR_setxattr
9463 case TARGET_NR_listxattr:
9464 case TARGET_NR_llistxattr:
9466 void *p, *b = 0;
9467 if (arg2) {
9468 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9469 if (!b) {
9470 ret = -TARGET_EFAULT;
9471 break;
9474 p = lock_user_string(arg1);
9475 if (p) {
9476 if (num == TARGET_NR_listxattr) {
9477 ret = get_errno(listxattr(p, b, arg3));
9478 } else {
9479 ret = get_errno(llistxattr(p, b, arg3));
9481 } else {
9482 ret = -TARGET_EFAULT;
9484 unlock_user(p, arg1, 0);
9485 unlock_user(b, arg2, arg3);
9486 break;
9488 case TARGET_NR_flistxattr:
9490 void *b = 0;
9491 if (arg2) {
9492 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9493 if (!b) {
9494 ret = -TARGET_EFAULT;
9495 break;
9498 ret = get_errno(flistxattr(arg1, b, arg3));
9499 unlock_user(b, arg2, arg3);
9500 break;
9502 case TARGET_NR_setxattr:
9503 case TARGET_NR_lsetxattr:
9505 void *p, *n, *v = 0;
9506 if (arg3) {
9507 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9508 if (!v) {
9509 ret = -TARGET_EFAULT;
9510 break;
9513 p = lock_user_string(arg1);
9514 n = lock_user_string(arg2);
9515 if (p && n) {
9516 if (num == TARGET_NR_setxattr) {
9517 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9518 } else {
9519 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9521 } else {
9522 ret = -TARGET_EFAULT;
9524 unlock_user(p, arg1, 0);
9525 unlock_user(n, arg2, 0);
9526 unlock_user(v, arg3, 0);
9528 break;
9529 case TARGET_NR_fsetxattr:
9531 void *n, *v = 0;
9532 if (arg3) {
9533 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9534 if (!v) {
9535 ret = -TARGET_EFAULT;
9536 break;
9539 n = lock_user_string(arg2);
9540 if (n) {
9541 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9542 } else {
9543 ret = -TARGET_EFAULT;
9545 unlock_user(n, arg2, 0);
9546 unlock_user(v, arg3, 0);
9548 break;
9549 case TARGET_NR_getxattr:
9550 case TARGET_NR_lgetxattr:
9552 void *p, *n, *v = 0;
9553 if (arg3) {
9554 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9555 if (!v) {
9556 ret = -TARGET_EFAULT;
9557 break;
9560 p = lock_user_string(arg1);
9561 n = lock_user_string(arg2);
9562 if (p && n) {
9563 if (num == TARGET_NR_getxattr) {
9564 ret = get_errno(getxattr(p, n, v, arg4));
9565 } else {
9566 ret = get_errno(lgetxattr(p, n, v, arg4));
9568 } else {
9569 ret = -TARGET_EFAULT;
9571 unlock_user(p, arg1, 0);
9572 unlock_user(n, arg2, 0);
9573 unlock_user(v, arg3, arg4);
9575 break;
9576 case TARGET_NR_fgetxattr:
9578 void *n, *v = 0;
9579 if (arg3) {
9580 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9581 if (!v) {
9582 ret = -TARGET_EFAULT;
9583 break;
9586 n = lock_user_string(arg2);
9587 if (n) {
9588 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9589 } else {
9590 ret = -TARGET_EFAULT;
9592 unlock_user(n, arg2, 0);
9593 unlock_user(v, arg3, arg4);
9595 break;
9596 case TARGET_NR_removexattr:
9597 case TARGET_NR_lremovexattr:
9599 void *p, *n;
9600 p = lock_user_string(arg1);
9601 n = lock_user_string(arg2);
9602 if (p && n) {
9603 if (num == TARGET_NR_removexattr) {
9604 ret = get_errno(removexattr(p, n));
9605 } else {
9606 ret = get_errno(lremovexattr(p, n));
9608 } else {
9609 ret = -TARGET_EFAULT;
9611 unlock_user(p, arg1, 0);
9612 unlock_user(n, arg2, 0);
9614 break;
9615 case TARGET_NR_fremovexattr:
9617 void *n;
9618 n = lock_user_string(arg2);
9619 if (n) {
9620 ret = get_errno(fremovexattr(arg1, n));
9621 } else {
9622 ret = -TARGET_EFAULT;
9624 unlock_user(n, arg2, 0);
9626 break;
9627 #endif
9628 #endif /* CONFIG_ATTR */
9629 #ifdef TARGET_NR_set_thread_area
9630 case TARGET_NR_set_thread_area:
9631 #if defined(TARGET_MIPS)
9632 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9633 ret = 0;
9634 break;
9635 #elif defined(TARGET_CRIS)
9636 if (arg1 & 0xff)
9637 ret = -TARGET_EINVAL;
9638 else {
9639 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9640 ret = 0;
9642 break;
9643 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9644 ret = do_set_thread_area(cpu_env, arg1);
9645 break;
9646 #elif defined(TARGET_M68K)
9648 TaskState *ts = cpu->opaque;
9649 ts->tp_value = arg1;
9650 ret = 0;
9651 break;
9653 #else
9654 goto unimplemented_nowarn;
9655 #endif
9656 #endif
9657 #ifdef TARGET_NR_get_thread_area
9658 case TARGET_NR_get_thread_area:
9659 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9660 ret = do_get_thread_area(cpu_env, arg1);
9661 break;
9662 #elif defined(TARGET_M68K)
9664 TaskState *ts = cpu->opaque;
9665 ret = ts->tp_value;
9666 break;
9668 #else
9669 goto unimplemented_nowarn;
9670 #endif
9671 #endif
9672 #ifdef TARGET_NR_getdomainname
9673 case TARGET_NR_getdomainname:
9674 goto unimplemented_nowarn;
9675 #endif
9677 #ifdef TARGET_NR_clock_gettime
9678 case TARGET_NR_clock_gettime:
9680 struct timespec ts;
9681 ret = get_errno(clock_gettime(arg1, &ts));
9682 if (!is_error(ret)) {
9683 host_to_target_timespec(arg2, &ts);
9685 break;
9687 #endif
9688 #ifdef TARGET_NR_clock_getres
9689 case TARGET_NR_clock_getres:
9691 struct timespec ts;
9692 ret = get_errno(clock_getres(arg1, &ts));
9693 if (!is_error(ret)) {
9694 host_to_target_timespec(arg2, &ts);
9696 break;
9698 #endif
9699 #ifdef TARGET_NR_clock_nanosleep
9700 case TARGET_NR_clock_nanosleep:
9702 struct timespec ts;
9703 target_to_host_timespec(&ts, arg3);
9704 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9705 if (arg4)
9706 host_to_target_timespec(arg4, &ts);
9708 #if defined(TARGET_PPC)
9709 /* clock_nanosleep is odd in that it returns positive errno values.
9710 * On PPC, CR0 bit 3 should be set in such a situation. */
9711 if (ret) {
9712 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9714 #endif
9715 break;
9717 #endif
9719 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9720 case TARGET_NR_set_tid_address:
9721 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9722 break;
9723 #endif
9725 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9726 case TARGET_NR_tkill:
9727 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9728 break;
9729 #endif
9731 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9732 case TARGET_NR_tgkill:
9733 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9734 target_to_host_signal(arg3)));
9735 break;
9736 #endif
9738 #ifdef TARGET_NR_set_robust_list
9739 case TARGET_NR_set_robust_list:
9740 case TARGET_NR_get_robust_list:
9741 /* The ABI for supporting robust futexes has userspace pass
9742 * the kernel a pointer to a linked list which is updated by
9743 * userspace after the syscall; the list is walked by the kernel
9744 * when the thread exits. Since the linked list in QEMU guest
9745 * memory isn't a valid linked list for the host and we have
9746 * no way to reliably intercept the thread-death event, we can't
9747 * support these. Silently return ENOSYS so that guest userspace
9748 * falls back to a non-robust futex implementation (which should
9749 * be OK except in the corner case of the guest crashing while
9750 * holding a mutex that is shared with another process via
9751 * shared memory).
9753 goto unimplemented_nowarn;
9754 #endif
9756 #if defined(TARGET_NR_utimensat)
9757 case TARGET_NR_utimensat:
9759 struct timespec *tsp, ts[2];
9760 if (!arg3) {
9761 tsp = NULL;
9762 } else {
9763 target_to_host_timespec(ts, arg3);
9764 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9765 tsp = ts;
9767 if (!arg2)
9768 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9769 else {
9770 if (!(p = lock_user_string(arg2))) {
9771 ret = -TARGET_EFAULT;
9772 goto fail;
9774 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9775 unlock_user(p, arg2, 0);
9778 break;
9779 #endif
9780 case TARGET_NR_futex:
9781 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9782 break;
9783 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9784 case TARGET_NR_inotify_init:
9785 ret = get_errno(sys_inotify_init());
9786 break;
9787 #endif
9788 #ifdef CONFIG_INOTIFY1
9789 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9790 case TARGET_NR_inotify_init1:
9791 ret = get_errno(sys_inotify_init1(arg1));
9792 break;
9793 #endif
9794 #endif
9795 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9796 case TARGET_NR_inotify_add_watch:
9797 p = lock_user_string(arg2);
9798 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9799 unlock_user(p, arg2, 0);
9800 break;
9801 #endif
9802 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9803 case TARGET_NR_inotify_rm_watch:
9804 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9805 break;
9806 #endif
9808 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9809 case TARGET_NR_mq_open:
9811 struct mq_attr posix_mq_attr, *attrp;
9813 p = lock_user_string(arg1 - 1);
9814 if (arg4 != 0) {
9815 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9816 attrp = &posix_mq_attr;
9817 } else {
9818 attrp = 0;
9820 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9821 unlock_user (p, arg1, 0);
9823 break;
9825 case TARGET_NR_mq_unlink:
9826 p = lock_user_string(arg1 - 1);
9827 ret = get_errno(mq_unlink(p));
9828 unlock_user (p, arg1, 0);
9829 break;
9831 case TARGET_NR_mq_timedsend:
9833 struct timespec ts;
9835 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9836 if (arg5 != 0) {
9837 target_to_host_timespec(&ts, arg5);
9838 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9839 host_to_target_timespec(arg5, &ts);
9841 else
9842 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9843 unlock_user (p, arg2, arg3);
9845 break;
9847 case TARGET_NR_mq_timedreceive:
9849 struct timespec ts;
9850 unsigned int prio;
9852 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9853 if (arg5 != 0) {
9854 target_to_host_timespec(&ts, arg5);
9855 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9856 host_to_target_timespec(arg5, &ts);
9858 else
9859 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9860 unlock_user (p, arg2, arg3);
9861 if (arg4 != 0)
9862 put_user_u32(prio, arg4);
9864 break;
9866 /* Not implemented for now... */
9867 /* case TARGET_NR_mq_notify: */
9868 /* break; */
9870 case TARGET_NR_mq_getsetattr:
9872 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9873 ret = 0;
9874 if (arg3 != 0) {
9875 ret = mq_getattr(arg1, &posix_mq_attr_out);
9876 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9878 if (arg2 != 0) {
9879 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9880 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9884 break;
9885 #endif
9887 #ifdef CONFIG_SPLICE
9888 #ifdef TARGET_NR_tee
9889 case TARGET_NR_tee:
9891 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9893 break;
9894 #endif
9895 #ifdef TARGET_NR_splice
9896 case TARGET_NR_splice:
9898 loff_t loff_in, loff_out;
9899 loff_t *ploff_in = NULL, *ploff_out = NULL;
9900 if (arg2) {
9901 if (get_user_u64(loff_in, arg2)) {
9902 goto efault;
9904 ploff_in = &loff_in;
9906 if (arg4) {
9907 if (get_user_u64(loff_out, arg4)) {
9908 goto efault;
9910 ploff_out = &loff_out;
9912 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9913 if (arg2) {
9914 if (put_user_u64(loff_in, arg2)) {
9915 goto efault;
9918 if (arg4) {
9919 if (put_user_u64(loff_out, arg4)) {
9920 goto efault;
9924 break;
9925 #endif
9926 #ifdef TARGET_NR_vmsplice
9927 case TARGET_NR_vmsplice:
9929 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9930 if (vec != NULL) {
9931 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9932 unlock_iovec(vec, arg2, arg3, 0);
9933 } else {
9934 ret = -host_to_target_errno(errno);
9937 break;
9938 #endif
9939 #endif /* CONFIG_SPLICE */
9940 #ifdef CONFIG_EVENTFD
9941 #if defined(TARGET_NR_eventfd)
9942 case TARGET_NR_eventfd:
9943 ret = get_errno(eventfd(arg1, 0));
9944 fd_trans_unregister(ret);
9945 break;
9946 #endif
9947 #if defined(TARGET_NR_eventfd2)
9948 case TARGET_NR_eventfd2:
9950 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9951 if (arg2 & TARGET_O_NONBLOCK) {
9952 host_flags |= O_NONBLOCK;
9954 if (arg2 & TARGET_O_CLOEXEC) {
9955 host_flags |= O_CLOEXEC;
9957 ret = get_errno(eventfd(arg1, host_flags));
9958 fd_trans_unregister(ret);
9959 break;
9961 #endif
9962 #endif /* CONFIG_EVENTFD */
9963 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9964 case TARGET_NR_fallocate:
9965 #if TARGET_ABI_BITS == 32
9966 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9967 target_offset64(arg5, arg6)));
9968 #else
9969 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9970 #endif
9971 break;
9972 #endif
9973 #if defined(CONFIG_SYNC_FILE_RANGE)
9974 #if defined(TARGET_NR_sync_file_range)
9975 case TARGET_NR_sync_file_range:
9976 #if TARGET_ABI_BITS == 32
9977 #if defined(TARGET_MIPS)
9978 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9979 target_offset64(arg5, arg6), arg7));
9980 #else
9981 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9982 target_offset64(arg4, arg5), arg6));
9983 #endif /* !TARGET_MIPS */
9984 #else
9985 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9986 #endif
9987 break;
9988 #endif
9989 #if defined(TARGET_NR_sync_file_range2)
9990 case TARGET_NR_sync_file_range2:
9991 /* This is like sync_file_range but the arguments are reordered */
9992 #if TARGET_ABI_BITS == 32
9993 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9994 target_offset64(arg5, arg6), arg2));
9995 #else
9996 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9997 #endif
9998 break;
9999 #endif
10000 #endif
10001 #if defined(TARGET_NR_signalfd4)
10002 case TARGET_NR_signalfd4:
10003 ret = do_signalfd4(arg1, arg2, arg4);
10004 break;
10005 #endif
10006 #if defined(TARGET_NR_signalfd)
10007 case TARGET_NR_signalfd:
10008 ret = do_signalfd4(arg1, arg2, 0);
10009 break;
10010 #endif
10011 #if defined(CONFIG_EPOLL)
10012 #if defined(TARGET_NR_epoll_create)
10013 case TARGET_NR_epoll_create:
10014 ret = get_errno(epoll_create(arg1));
10015 break;
10016 #endif
10017 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
10018 case TARGET_NR_epoll_create1:
10019 ret = get_errno(epoll_create1(arg1));
10020 break;
10021 #endif
10022 #if defined(TARGET_NR_epoll_ctl)
10023 case TARGET_NR_epoll_ctl:
10025 struct epoll_event ep;
10026 struct epoll_event *epp = 0;
10027 if (arg4) {
10028 struct target_epoll_event *target_ep;
10029 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
10030 goto efault;
10032 ep.events = tswap32(target_ep->events);
10033 /* The epoll_data_t union is just opaque data to the kernel,
10034 * so we transfer all 64 bits across and need not worry what
10035 * actual data type it is.
10037 ep.data.u64 = tswap64(target_ep->data.u64);
10038 unlock_user_struct(target_ep, arg4, 0);
10039 epp = &ep;
10041 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10042 break;
10044 #endif
10046 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10047 #define IMPLEMENT_EPOLL_PWAIT
10048 #endif
10049 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10050 #if defined(TARGET_NR_epoll_wait)
10051 case TARGET_NR_epoll_wait:
10052 #endif
10053 #if defined(IMPLEMENT_EPOLL_PWAIT)
10054 case TARGET_NR_epoll_pwait:
10055 #endif
10057 struct target_epoll_event *target_ep;
10058 struct epoll_event *ep;
10059 int epfd = arg1;
10060 int maxevents = arg3;
10061 int timeout = arg4;
10063 target_ep = lock_user(VERIFY_WRITE, arg2,
10064 maxevents * sizeof(struct target_epoll_event), 1);
10065 if (!target_ep) {
10066 goto efault;
10069 ep = alloca(maxevents * sizeof(struct epoll_event));
10071 switch (num) {
10072 #if defined(IMPLEMENT_EPOLL_PWAIT)
10073 case TARGET_NR_epoll_pwait:
10075 target_sigset_t *target_set;
10076 sigset_t _set, *set = &_set;
10078 if (arg5) {
10079 target_set = lock_user(VERIFY_READ, arg5,
10080 sizeof(target_sigset_t), 1);
10081 if (!target_set) {
10082 unlock_user(target_ep, arg2, 0);
10083 goto efault;
10085 target_to_host_sigset(set, target_set);
10086 unlock_user(target_set, arg5, 0);
10087 } else {
10088 set = NULL;
10091 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10092 break;
10094 #endif
10095 #if defined(TARGET_NR_epoll_wait)
10096 case TARGET_NR_epoll_wait:
10097 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10098 break;
10099 #endif
10100 default:
10101 ret = -TARGET_ENOSYS;
10103 if (!is_error(ret)) {
10104 int i;
10105 for (i = 0; i < ret; i++) {
10106 target_ep[i].events = tswap32(ep[i].events);
10107 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10110 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10111 break;
10113 #endif
10114 #endif
10115 #ifdef TARGET_NR_prlimit64
10116 case TARGET_NR_prlimit64:
10118 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10119 struct target_rlimit64 *target_rnew, *target_rold;
10120 struct host_rlimit64 rnew, rold, *rnewp = 0;
10121 int resource = target_to_host_resource(arg2);
10122 if (arg3) {
10123 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10124 goto efault;
10126 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10127 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10128 unlock_user_struct(target_rnew, arg3, 0);
10129 rnewp = &rnew;
10132 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10133 if (!is_error(ret) && arg4) {
10134 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10135 goto efault;
10137 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10138 target_rold->rlim_max = tswap64(rold.rlim_max);
10139 unlock_user_struct(target_rold, arg4, 1);
10141 break;
10143 #endif
10144 #ifdef TARGET_NR_gethostname
10145 case TARGET_NR_gethostname:
10147 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10148 if (name) {
10149 ret = get_errno(gethostname(name, arg2));
10150 unlock_user(name, arg1, arg2);
10151 } else {
10152 ret = -TARGET_EFAULT;
10154 break;
10156 #endif
10157 #ifdef TARGET_NR_atomic_cmpxchg_32
10158 case TARGET_NR_atomic_cmpxchg_32:
10160 /* should use start_exclusive from main.c */
10161 abi_ulong mem_value;
10162 if (get_user_u32(mem_value, arg6)) {
10163 target_siginfo_t info;
10164 info.si_signo = SIGSEGV;
10165 info.si_errno = 0;
10166 info.si_code = TARGET_SEGV_MAPERR;
10167 info._sifields._sigfault._addr = arg6;
10168 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10169 ret = 0xdeadbeef;
10172 if (mem_value == arg2)
10173 put_user_u32(arg1, arg6);
10174 ret = mem_value;
10175 break;
10177 #endif
10178 #ifdef TARGET_NR_atomic_barrier
10179 case TARGET_NR_atomic_barrier:
10181 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10182 ret = 0;
10183 break;
10185 #endif
10187 #ifdef TARGET_NR_timer_create
10188 case TARGET_NR_timer_create:
10190 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10192 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10194 int clkid = arg1;
10195 int timer_index = next_free_host_timer();
10197 if (timer_index < 0) {
10198 ret = -TARGET_EAGAIN;
10199 } else {
10200 timer_t *phtimer = g_posix_timers + timer_index;
10202 if (arg2) {
10203 phost_sevp = &host_sevp;
10204 ret = target_to_host_sigevent(phost_sevp, arg2);
10205 if (ret != 0) {
10206 break;
10210 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10211 if (ret) {
10212 phtimer = NULL;
10213 } else {
10214 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10215 goto efault;
10219 break;
10221 #endif
10223 #ifdef TARGET_NR_timer_settime
10224 case TARGET_NR_timer_settime:
10226 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10227 * struct itimerspec * old_value */
10228 target_timer_t timerid = get_timer_id(arg1);
10230 if (timerid < 0) {
10231 ret = timerid;
10232 } else if (arg3 == 0) {
10233 ret = -TARGET_EINVAL;
10234 } else {
10235 timer_t htimer = g_posix_timers[timerid];
10236 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10238 target_to_host_itimerspec(&hspec_new, arg3);
10239 ret = get_errno(
10240 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10241 host_to_target_itimerspec(arg2, &hspec_old);
10243 break;
10245 #endif
10247 #ifdef TARGET_NR_timer_gettime
10248 case TARGET_NR_timer_gettime:
10250 /* args: timer_t timerid, struct itimerspec *curr_value */
10251 target_timer_t timerid = get_timer_id(arg1);
10253 if (timerid < 0) {
10254 ret = timerid;
10255 } else if (!arg2) {
10256 ret = -TARGET_EFAULT;
10257 } else {
10258 timer_t htimer = g_posix_timers[timerid];
10259 struct itimerspec hspec;
10260 ret = get_errno(timer_gettime(htimer, &hspec));
10262 if (host_to_target_itimerspec(arg2, &hspec)) {
10263 ret = -TARGET_EFAULT;
10266 break;
10268 #endif
10270 #ifdef TARGET_NR_timer_getoverrun
10271 case TARGET_NR_timer_getoverrun:
10273 /* args: timer_t timerid */
10274 target_timer_t timerid = get_timer_id(arg1);
10276 if (timerid < 0) {
10277 ret = timerid;
10278 } else {
10279 timer_t htimer = g_posix_timers[timerid];
10280 ret = get_errno(timer_getoverrun(htimer));
10282 fd_trans_unregister(ret);
10283 break;
10285 #endif
10287 #ifdef TARGET_NR_timer_delete
10288 case TARGET_NR_timer_delete:
10290 /* args: timer_t timerid */
10291 target_timer_t timerid = get_timer_id(arg1);
10293 if (timerid < 0) {
10294 ret = timerid;
10295 } else {
10296 timer_t htimer = g_posix_timers[timerid];
10297 ret = get_errno(timer_delete(htimer));
10298 g_posix_timers[timerid] = 0;
10300 break;
10302 #endif
10304 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10305 case TARGET_NR_timerfd_create:
10306 ret = get_errno(timerfd_create(arg1,
10307 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10308 break;
10309 #endif
10311 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10312 case TARGET_NR_timerfd_gettime:
10314 struct itimerspec its_curr;
10316 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10318 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10319 goto efault;
10322 break;
10323 #endif
10325 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10326 case TARGET_NR_timerfd_settime:
10328 struct itimerspec its_new, its_old, *p_new;
10330 if (arg3) {
10331 if (target_to_host_itimerspec(&its_new, arg3)) {
10332 goto efault;
10334 p_new = &its_new;
10335 } else {
10336 p_new = NULL;
10339 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10341 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10342 goto efault;
10345 break;
10346 #endif
10348 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10349 case TARGET_NR_ioprio_get:
10350 ret = get_errno(ioprio_get(arg1, arg2));
10351 break;
10352 #endif
10354 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10355 case TARGET_NR_ioprio_set:
10356 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10357 break;
10358 #endif
10360 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10361 case TARGET_NR_setns:
10362 ret = get_errno(setns(arg1, arg2));
10363 break;
10364 #endif
10365 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10366 case TARGET_NR_unshare:
10367 ret = get_errno(unshare(arg1));
10368 break;
10369 #endif
10371 default:
10372 unimplemented:
10373 gemu_log("qemu: Unsupported syscall: %d\n", num);
10374 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10375 unimplemented_nowarn:
10376 #endif
10377 ret = -TARGET_ENOSYS;
10378 break;
10380 fail:
10381 #ifdef DEBUG
10382 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10383 #endif
10384 if(do_strace)
10385 print_syscall_ret(num, ret);
10386 return ret;
10387 efault:
10388 ret = -TARGET_EFAULT;
10389 goto fail;