linux-user: Handle msgrcv error case correctly
[qemu.git] / linux-user / syscall.c
blobcec5b80331701467c06159f6c7d1757da02622d3
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/mman.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <sys/poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include "linux_loop.h"
105 #include "uname.h"
107 #include "qemu.h"
109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
112 //#define DEBUG
113 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
114 * once. This exercises the codepaths for restart.
116 //#define DEBUG_ERESTARTSYS
118 //#include <linux/msdos_fs.h>
119 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
120 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
123 #undef _syscall0
124 #undef _syscall1
125 #undef _syscall2
126 #undef _syscall3
127 #undef _syscall4
128 #undef _syscall5
129 #undef _syscall6
131 #define _syscall0(type,name) \
132 static type name (void) \
134 return syscall(__NR_##name); \
137 #define _syscall1(type,name,type1,arg1) \
138 static type name (type1 arg1) \
140 return syscall(__NR_##name, arg1); \
143 #define _syscall2(type,name,type1,arg1,type2,arg2) \
144 static type name (type1 arg1,type2 arg2) \
146 return syscall(__NR_##name, arg1, arg2); \
149 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
150 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 return syscall(__NR_##name, arg1, arg2, arg3); \
155 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
161 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
162 type5,arg5) \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
170 type5,arg5,type6,arg6) \
171 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
172 type6 arg6) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 #define __NR_sys_uname __NR_uname
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
184 #define __NR_sys_syslog __NR_syslog
185 #define __NR_sys_tgkill __NR_tgkill
186 #define __NR_sys_tkill __NR_tkill
187 #define __NR_sys_futex __NR_futex
188 #define __NR_sys_inotify_init __NR_inotify_init
189 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
190 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
193 defined(__s390x__)
194 #define __NR__llseek __NR_lseek
195 #endif
197 /* Newer kernel ports have llseek() instead of _llseek() */
198 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
199 #define TARGET_NR__llseek TARGET_NR_llseek
200 #endif
202 #ifdef __NR_gettid
203 _syscall0(int, gettid)
204 #else
205 /* This is a replacement for the host gettid() and must return a host
206 errno. */
207 static int gettid(void) {
208 return -ENOSYS;
210 #endif
211 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
212 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
213 #endif
214 #if !defined(__NR_getdents) || \
215 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
220 loff_t *, res, uint, wh);
221 #endif
222 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
223 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
226 #endif
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill,int,tid,int,sig)
229 #endif
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group,int,error_code)
232 #endif
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address,int *,tidptr)
235 #endif
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
238 const struct timespec *,timeout,int *,uaddr2,int,val3)
239 #endif
240 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
241 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
244 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
247 void *, arg);
248 _syscall2(int, capget, struct __user_cap_header_struct *, header,
249 struct __user_cap_data_struct *, data);
250 _syscall2(int, capset, struct __user_cap_header_struct *, header,
251 struct __user_cap_data_struct *, data);
252 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
253 _syscall2(int, ioprio_get, int, which, int, who)
254 #endif
255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
256 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
257 #endif
258 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
259 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
260 #endif
262 static bitmask_transtbl fcntl_flags_tbl[] = {
263 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
264 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
265 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
266 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
267 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
268 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
269 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
270 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
271 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
272 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
273 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
274 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
275 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
278 #endif
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
281 #endif
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
284 #endif
285 #if defined(O_PATH)
286 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
287 #endif
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
291 #endif
292 { 0, 0, 0, 0 }
295 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
296 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
297 typedef struct TargetFdTrans {
298 TargetFdDataFunc host_to_target_data;
299 TargetFdDataFunc target_to_host_data;
300 TargetFdAddrFunc target_to_host_addr;
301 } TargetFdTrans;
303 static TargetFdTrans **target_fd_trans;
305 static unsigned int target_fd_max;
307 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
309 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
310 return target_fd_trans[fd]->host_to_target_data;
312 return NULL;
315 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
317 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
318 return target_fd_trans[fd]->target_to_host_addr;
320 return NULL;
323 static void fd_trans_register(int fd, TargetFdTrans *trans)
325 unsigned int oldmax;
327 if (fd >= target_fd_max) {
328 oldmax = target_fd_max;
329 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
330 target_fd_trans = g_renew(TargetFdTrans *,
331 target_fd_trans, target_fd_max);
332 memset((void *)(target_fd_trans + oldmax), 0,
333 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
335 target_fd_trans[fd] = trans;
338 static void fd_trans_unregister(int fd)
340 if (fd >= 0 && fd < target_fd_max) {
341 target_fd_trans[fd] = NULL;
345 static void fd_trans_dup(int oldfd, int newfd)
347 fd_trans_unregister(newfd);
348 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
349 fd_trans_register(newfd, target_fd_trans[oldfd]);
353 static int sys_getcwd1(char *buf, size_t size)
355 if (getcwd(buf, size) == NULL) {
356 /* getcwd() sets errno */
357 return (-1);
359 return strlen(buf)+1;
362 #ifdef TARGET_NR_utimensat
363 #ifdef CONFIG_UTIMENSAT
364 static int sys_utimensat(int dirfd, const char *pathname,
365 const struct timespec times[2], int flags)
367 if (pathname == NULL)
368 return futimens(dirfd, times);
369 else
370 return utimensat(dirfd, pathname, times, flags);
372 #elif defined(__NR_utimensat)
373 #define __NR_sys_utimensat __NR_utimensat
374 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
375 const struct timespec *,tsp,int,flags)
376 #else
377 static int sys_utimensat(int dirfd, const char *pathname,
378 const struct timespec times[2], int flags)
380 errno = ENOSYS;
381 return -1;
383 #endif
384 #endif /* TARGET_NR_utimensat */
386 #ifdef CONFIG_INOTIFY
387 #include <sys/inotify.h>
389 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
390 static int sys_inotify_init(void)
392 return (inotify_init());
394 #endif
395 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
396 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
398 return (inotify_add_watch(fd, pathname, mask));
400 #endif
401 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
402 static int sys_inotify_rm_watch(int fd, int32_t wd)
404 return (inotify_rm_watch(fd, wd));
406 #endif
407 #ifdef CONFIG_INOTIFY1
408 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
409 static int sys_inotify_init1(int flags)
411 return (inotify_init1(flags));
413 #endif
414 #endif
415 #else
416 /* Userspace can usually survive runtime without inotify */
417 #undef TARGET_NR_inotify_init
418 #undef TARGET_NR_inotify_init1
419 #undef TARGET_NR_inotify_add_watch
420 #undef TARGET_NR_inotify_rm_watch
421 #endif /* CONFIG_INOTIFY */
423 #if defined(TARGET_NR_ppoll)
424 #ifndef __NR_ppoll
425 # define __NR_ppoll -1
426 #endif
427 #define __NR_sys_ppoll __NR_ppoll
428 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
429 struct timespec *, timeout, const sigset_t *, sigmask,
430 size_t, sigsetsize)
431 #endif
433 #if defined(TARGET_NR_prlimit64)
434 #ifndef __NR_prlimit64
435 # define __NR_prlimit64 -1
436 #endif
437 #define __NR_sys_prlimit64 __NR_prlimit64
438 /* The glibc rlimit structure may not be that used by the underlying syscall */
439 struct host_rlimit64 {
440 uint64_t rlim_cur;
441 uint64_t rlim_max;
443 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
444 const struct host_rlimit64 *, new_limit,
445 struct host_rlimit64 *, old_limit)
446 #endif
449 #if defined(TARGET_NR_timer_create)
450 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
451 static timer_t g_posix_timers[32] = { 0, } ;
453 static inline int next_free_host_timer(void)
455 int k ;
456 /* FIXME: Does finding the next free slot require a lock? */
457 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
458 if (g_posix_timers[k] == 0) {
459 g_posix_timers[k] = (timer_t) 1;
460 return k;
463 return -1;
465 #endif
467 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
468 #ifdef TARGET_ARM
469 static inline int regpairs_aligned(void *cpu_env) {
470 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
472 #elif defined(TARGET_MIPS)
473 static inline int regpairs_aligned(void *cpu_env) { return 1; }
474 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
475 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
476 * of registers which translates to the same as ARM/MIPS, because we start with
477 * r3 as arg1 */
478 static inline int regpairs_aligned(void *cpu_env) { return 1; }
479 #else
480 static inline int regpairs_aligned(void *cpu_env) { return 0; }
481 #endif
483 #define ERRNO_TABLE_SIZE 1200
485 /* target_to_host_errno_table[] is initialized from
486 * host_to_target_errno_table[] in syscall_init(). */
487 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
491 * This list is the union of errno values overridden in asm-<arch>/errno.h
492 * minus the errnos that are not actually generic to all archs.
494 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
495 [EAGAIN] = TARGET_EAGAIN,
496 [EIDRM] = TARGET_EIDRM,
497 [ECHRNG] = TARGET_ECHRNG,
498 [EL2NSYNC] = TARGET_EL2NSYNC,
499 [EL3HLT] = TARGET_EL3HLT,
500 [EL3RST] = TARGET_EL3RST,
501 [ELNRNG] = TARGET_ELNRNG,
502 [EUNATCH] = TARGET_EUNATCH,
503 [ENOCSI] = TARGET_ENOCSI,
504 [EL2HLT] = TARGET_EL2HLT,
505 [EDEADLK] = TARGET_EDEADLK,
506 [ENOLCK] = TARGET_ENOLCK,
507 [EBADE] = TARGET_EBADE,
508 [EBADR] = TARGET_EBADR,
509 [EXFULL] = TARGET_EXFULL,
510 [ENOANO] = TARGET_ENOANO,
511 [EBADRQC] = TARGET_EBADRQC,
512 [EBADSLT] = TARGET_EBADSLT,
513 [EBFONT] = TARGET_EBFONT,
514 [ENOSTR] = TARGET_ENOSTR,
515 [ENODATA] = TARGET_ENODATA,
516 [ETIME] = TARGET_ETIME,
517 [ENOSR] = TARGET_ENOSR,
518 [ENONET] = TARGET_ENONET,
519 [ENOPKG] = TARGET_ENOPKG,
520 [EREMOTE] = TARGET_EREMOTE,
521 [ENOLINK] = TARGET_ENOLINK,
522 [EADV] = TARGET_EADV,
523 [ESRMNT] = TARGET_ESRMNT,
524 [ECOMM] = TARGET_ECOMM,
525 [EPROTO] = TARGET_EPROTO,
526 [EDOTDOT] = TARGET_EDOTDOT,
527 [EMULTIHOP] = TARGET_EMULTIHOP,
528 [EBADMSG] = TARGET_EBADMSG,
529 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
530 [EOVERFLOW] = TARGET_EOVERFLOW,
531 [ENOTUNIQ] = TARGET_ENOTUNIQ,
532 [EBADFD] = TARGET_EBADFD,
533 [EREMCHG] = TARGET_EREMCHG,
534 [ELIBACC] = TARGET_ELIBACC,
535 [ELIBBAD] = TARGET_ELIBBAD,
536 [ELIBSCN] = TARGET_ELIBSCN,
537 [ELIBMAX] = TARGET_ELIBMAX,
538 [ELIBEXEC] = TARGET_ELIBEXEC,
539 [EILSEQ] = TARGET_EILSEQ,
540 [ENOSYS] = TARGET_ENOSYS,
541 [ELOOP] = TARGET_ELOOP,
542 [ERESTART] = TARGET_ERESTART,
543 [ESTRPIPE] = TARGET_ESTRPIPE,
544 [ENOTEMPTY] = TARGET_ENOTEMPTY,
545 [EUSERS] = TARGET_EUSERS,
546 [ENOTSOCK] = TARGET_ENOTSOCK,
547 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
548 [EMSGSIZE] = TARGET_EMSGSIZE,
549 [EPROTOTYPE] = TARGET_EPROTOTYPE,
550 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
551 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
552 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
553 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
554 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
555 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
556 [EADDRINUSE] = TARGET_EADDRINUSE,
557 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
558 [ENETDOWN] = TARGET_ENETDOWN,
559 [ENETUNREACH] = TARGET_ENETUNREACH,
560 [ENETRESET] = TARGET_ENETRESET,
561 [ECONNABORTED] = TARGET_ECONNABORTED,
562 [ECONNRESET] = TARGET_ECONNRESET,
563 [ENOBUFS] = TARGET_ENOBUFS,
564 [EISCONN] = TARGET_EISCONN,
565 [ENOTCONN] = TARGET_ENOTCONN,
566 [EUCLEAN] = TARGET_EUCLEAN,
567 [ENOTNAM] = TARGET_ENOTNAM,
568 [ENAVAIL] = TARGET_ENAVAIL,
569 [EISNAM] = TARGET_EISNAM,
570 [EREMOTEIO] = TARGET_EREMOTEIO,
571 [ESHUTDOWN] = TARGET_ESHUTDOWN,
572 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
573 [ETIMEDOUT] = TARGET_ETIMEDOUT,
574 [ECONNREFUSED] = TARGET_ECONNREFUSED,
575 [EHOSTDOWN] = TARGET_EHOSTDOWN,
576 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
577 [EALREADY] = TARGET_EALREADY,
578 [EINPROGRESS] = TARGET_EINPROGRESS,
579 [ESTALE] = TARGET_ESTALE,
580 [ECANCELED] = TARGET_ECANCELED,
581 [ENOMEDIUM] = TARGET_ENOMEDIUM,
582 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
583 #ifdef ENOKEY
584 [ENOKEY] = TARGET_ENOKEY,
585 #endif
586 #ifdef EKEYEXPIRED
587 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
588 #endif
589 #ifdef EKEYREVOKED
590 [EKEYREVOKED] = TARGET_EKEYREVOKED,
591 #endif
592 #ifdef EKEYREJECTED
593 [EKEYREJECTED] = TARGET_EKEYREJECTED,
594 #endif
595 #ifdef EOWNERDEAD
596 [EOWNERDEAD] = TARGET_EOWNERDEAD,
597 #endif
598 #ifdef ENOTRECOVERABLE
599 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
600 #endif
603 static inline int host_to_target_errno(int err)
605 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
606 host_to_target_errno_table[err]) {
607 return host_to_target_errno_table[err];
609 return err;
612 static inline int target_to_host_errno(int err)
614 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
615 target_to_host_errno_table[err]) {
616 return target_to_host_errno_table[err];
618 return err;
621 static inline abi_long get_errno(abi_long ret)
623 if (ret == -1)
624 return -host_to_target_errno(errno);
625 else
626 return ret;
629 static inline int is_error(abi_long ret)
631 return (abi_ulong)ret >= (abi_ulong)(-4096);
634 char *target_strerror(int err)
636 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
637 return NULL;
639 return strerror(target_to_host_errno(err));
642 #define safe_syscall0(type, name) \
643 static type safe_##name(void) \
645 return safe_syscall(__NR_##name); \
648 #define safe_syscall1(type, name, type1, arg1) \
649 static type safe_##name(type1 arg1) \
651 return safe_syscall(__NR_##name, arg1); \
654 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
655 static type safe_##name(type1 arg1, type2 arg2) \
657 return safe_syscall(__NR_##name, arg1, arg2); \
660 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
661 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
663 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
666 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
667 type4, arg4) \
668 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
670 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
673 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
674 type4, arg4, type5, arg5) \
675 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
676 type5 arg5) \
678 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
681 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
682 type4, arg4, type5, arg5, type6, arg6) \
683 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
684 type5 arg5, type6 arg6) \
686 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
689 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
690 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
691 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
692 int, flags, mode_t, mode)
693 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
694 struct rusage *, rusage)
695 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
696 int, options, struct rusage *, rusage)
697 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
698 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
699 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
700 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
701 const struct timespec *,timeout,int *,uaddr2,int,val3)
703 static inline int host_to_target_sock_type(int host_type)
705 int target_type;
707 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
708 case SOCK_DGRAM:
709 target_type = TARGET_SOCK_DGRAM;
710 break;
711 case SOCK_STREAM:
712 target_type = TARGET_SOCK_STREAM;
713 break;
714 default:
715 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
716 break;
719 #if defined(SOCK_CLOEXEC)
720 if (host_type & SOCK_CLOEXEC) {
721 target_type |= TARGET_SOCK_CLOEXEC;
723 #endif
725 #if defined(SOCK_NONBLOCK)
726 if (host_type & SOCK_NONBLOCK) {
727 target_type |= TARGET_SOCK_NONBLOCK;
729 #endif
731 return target_type;
734 static abi_ulong target_brk;
735 static abi_ulong target_original_brk;
736 static abi_ulong brk_page;
738 void target_set_brk(abi_ulong new_brk)
740 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
741 brk_page = HOST_PAGE_ALIGN(target_brk);
744 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
745 #define DEBUGF_BRK(message, args...)
747 /* do_brk() must return target values and target errnos. */
748 abi_long do_brk(abi_ulong new_brk)
750 abi_long mapped_addr;
751 int new_alloc_size;
753 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
755 if (!new_brk) {
756 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
757 return target_brk;
759 if (new_brk < target_original_brk) {
760 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
761 target_brk);
762 return target_brk;
765 /* If the new brk is less than the highest page reserved to the
766 * target heap allocation, set it and we're almost done... */
767 if (new_brk <= brk_page) {
768 /* Heap contents are initialized to zero, as for anonymous
769 * mapped pages. */
770 if (new_brk > target_brk) {
771 memset(g2h(target_brk), 0, new_brk - target_brk);
773 target_brk = new_brk;
774 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
775 return target_brk;
778 /* We need to allocate more memory after the brk... Note that
779 * we don't use MAP_FIXED because that will map over the top of
780 * any existing mapping (like the one with the host libc or qemu
781 * itself); instead we treat "mapped but at wrong address" as
782 * a failure and unmap again.
784 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
785 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
786 PROT_READ|PROT_WRITE,
787 MAP_ANON|MAP_PRIVATE, 0, 0));
789 if (mapped_addr == brk_page) {
790 /* Heap contents are initialized to zero, as for anonymous
791 * mapped pages. Technically the new pages are already
792 * initialized to zero since they *are* anonymous mapped
793 * pages, however we have to take care with the contents that
794 * come from the remaining part of the previous page: it may
795 * contains garbage data due to a previous heap usage (grown
796 * then shrunken). */
797 memset(g2h(target_brk), 0, brk_page - target_brk);
799 target_brk = new_brk;
800 brk_page = HOST_PAGE_ALIGN(target_brk);
801 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
802 target_brk);
803 return target_brk;
804 } else if (mapped_addr != -1) {
805 /* Mapped but at wrong address, meaning there wasn't actually
806 * enough space for this brk.
808 target_munmap(mapped_addr, new_alloc_size);
809 mapped_addr = -1;
810 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
812 else {
813 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
816 #if defined(TARGET_ALPHA)
817 /* We (partially) emulate OSF/1 on Alpha, which requires we
818 return a proper errno, not an unchanged brk value. */
819 return -TARGET_ENOMEM;
820 #endif
821 /* For everything else, return the previous break. */
822 return target_brk;
825 static inline abi_long copy_from_user_fdset(fd_set *fds,
826 abi_ulong target_fds_addr,
827 int n)
829 int i, nw, j, k;
830 abi_ulong b, *target_fds;
832 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
833 if (!(target_fds = lock_user(VERIFY_READ,
834 target_fds_addr,
835 sizeof(abi_ulong) * nw,
836 1)))
837 return -TARGET_EFAULT;
839 FD_ZERO(fds);
840 k = 0;
841 for (i = 0; i < nw; i++) {
842 /* grab the abi_ulong */
843 __get_user(b, &target_fds[i]);
844 for (j = 0; j < TARGET_ABI_BITS; j++) {
845 /* check the bit inside the abi_ulong */
846 if ((b >> j) & 1)
847 FD_SET(k, fds);
848 k++;
852 unlock_user(target_fds, target_fds_addr, 0);
854 return 0;
857 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
858 abi_ulong target_fds_addr,
859 int n)
861 if (target_fds_addr) {
862 if (copy_from_user_fdset(fds, target_fds_addr, n))
863 return -TARGET_EFAULT;
864 *fds_ptr = fds;
865 } else {
866 *fds_ptr = NULL;
868 return 0;
871 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
872 const fd_set *fds,
873 int n)
875 int i, nw, j, k;
876 abi_long v;
877 abi_ulong *target_fds;
879 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
880 if (!(target_fds = lock_user(VERIFY_WRITE,
881 target_fds_addr,
882 sizeof(abi_ulong) * nw,
883 0)))
884 return -TARGET_EFAULT;
886 k = 0;
887 for (i = 0; i < nw; i++) {
888 v = 0;
889 for (j = 0; j < TARGET_ABI_BITS; j++) {
890 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
891 k++;
893 __put_user(v, &target_fds[i]);
896 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
898 return 0;
901 #if defined(__alpha__)
902 #define HOST_HZ 1024
903 #else
904 #define HOST_HZ 100
905 #endif
907 static inline abi_long host_to_target_clock_t(long ticks)
909 #if HOST_HZ == TARGET_HZ
910 return ticks;
911 #else
912 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
913 #endif
916 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
917 const struct rusage *rusage)
919 struct target_rusage *target_rusage;
921 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
922 return -TARGET_EFAULT;
923 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
924 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
925 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
926 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
927 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
928 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
929 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
930 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
931 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
932 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
933 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
934 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
935 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
936 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
937 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
938 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
939 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
940 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
941 unlock_user_struct(target_rusage, target_addr, 1);
943 return 0;
946 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
948 abi_ulong target_rlim_swap;
949 rlim_t result;
951 target_rlim_swap = tswapal(target_rlim);
952 if (target_rlim_swap == TARGET_RLIM_INFINITY)
953 return RLIM_INFINITY;
955 result = target_rlim_swap;
956 if (target_rlim_swap != (rlim_t)result)
957 return RLIM_INFINITY;
959 return result;
962 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
964 abi_ulong target_rlim_swap;
965 abi_ulong result;
967 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
968 target_rlim_swap = TARGET_RLIM_INFINITY;
969 else
970 target_rlim_swap = rlim;
971 result = tswapal(target_rlim_swap);
973 return result;
976 static inline int target_to_host_resource(int code)
978 switch (code) {
979 case TARGET_RLIMIT_AS:
980 return RLIMIT_AS;
981 case TARGET_RLIMIT_CORE:
982 return RLIMIT_CORE;
983 case TARGET_RLIMIT_CPU:
984 return RLIMIT_CPU;
985 case TARGET_RLIMIT_DATA:
986 return RLIMIT_DATA;
987 case TARGET_RLIMIT_FSIZE:
988 return RLIMIT_FSIZE;
989 case TARGET_RLIMIT_LOCKS:
990 return RLIMIT_LOCKS;
991 case TARGET_RLIMIT_MEMLOCK:
992 return RLIMIT_MEMLOCK;
993 case TARGET_RLIMIT_MSGQUEUE:
994 return RLIMIT_MSGQUEUE;
995 case TARGET_RLIMIT_NICE:
996 return RLIMIT_NICE;
997 case TARGET_RLIMIT_NOFILE:
998 return RLIMIT_NOFILE;
999 case TARGET_RLIMIT_NPROC:
1000 return RLIMIT_NPROC;
1001 case TARGET_RLIMIT_RSS:
1002 return RLIMIT_RSS;
1003 case TARGET_RLIMIT_RTPRIO:
1004 return RLIMIT_RTPRIO;
1005 case TARGET_RLIMIT_SIGPENDING:
1006 return RLIMIT_SIGPENDING;
1007 case TARGET_RLIMIT_STACK:
1008 return RLIMIT_STACK;
1009 default:
1010 return code;
1014 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1015 abi_ulong target_tv_addr)
1017 struct target_timeval *target_tv;
1019 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1020 return -TARGET_EFAULT;
1022 __get_user(tv->tv_sec, &target_tv->tv_sec);
1023 __get_user(tv->tv_usec, &target_tv->tv_usec);
1025 unlock_user_struct(target_tv, target_tv_addr, 0);
1027 return 0;
1030 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1031 const struct timeval *tv)
1033 struct target_timeval *target_tv;
1035 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1036 return -TARGET_EFAULT;
1038 __put_user(tv->tv_sec, &target_tv->tv_sec);
1039 __put_user(tv->tv_usec, &target_tv->tv_usec);
1041 unlock_user_struct(target_tv, target_tv_addr, 1);
1043 return 0;
1046 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1047 abi_ulong target_tz_addr)
1049 struct target_timezone *target_tz;
1051 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1052 return -TARGET_EFAULT;
1055 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1056 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1058 unlock_user_struct(target_tz, target_tz_addr, 0);
1060 return 0;
1063 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1064 #include <mqueue.h>
1066 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1067 abi_ulong target_mq_attr_addr)
1069 struct target_mq_attr *target_mq_attr;
1071 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1072 target_mq_attr_addr, 1))
1073 return -TARGET_EFAULT;
1075 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1076 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1077 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1078 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1080 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1082 return 0;
1085 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1086 const struct mq_attr *attr)
1088 struct target_mq_attr *target_mq_attr;
1090 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1091 target_mq_attr_addr, 0))
1092 return -TARGET_EFAULT;
1094 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1095 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1096 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1097 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1099 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1101 return 0;
1103 #endif
1105 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1106 /* do_select() must return target values and target errnos. */
1107 static abi_long do_select(int n,
1108 abi_ulong rfd_addr, abi_ulong wfd_addr,
1109 abi_ulong efd_addr, abi_ulong target_tv_addr)
1111 fd_set rfds, wfds, efds;
1112 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1113 struct timeval tv;
1114 struct timespec ts, *ts_ptr;
1115 abi_long ret;
1117 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1118 if (ret) {
1119 return ret;
1121 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1122 if (ret) {
1123 return ret;
1125 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1126 if (ret) {
1127 return ret;
1130 if (target_tv_addr) {
1131 if (copy_from_user_timeval(&tv, target_tv_addr))
1132 return -TARGET_EFAULT;
1133 ts.tv_sec = tv.tv_sec;
1134 ts.tv_nsec = tv.tv_usec * 1000;
1135 ts_ptr = &ts;
1136 } else {
1137 ts_ptr = NULL;
1140 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1141 ts_ptr, NULL));
1143 if (!is_error(ret)) {
1144 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1145 return -TARGET_EFAULT;
1146 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1147 return -TARGET_EFAULT;
1148 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1149 return -TARGET_EFAULT;
1151 if (target_tv_addr) {
1152 tv.tv_sec = ts.tv_sec;
1153 tv.tv_usec = ts.tv_nsec / 1000;
1154 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1155 return -TARGET_EFAULT;
1160 return ret;
1162 #endif
1164 static abi_long do_pipe2(int host_pipe[], int flags)
1166 #ifdef CONFIG_PIPE2
1167 return pipe2(host_pipe, flags);
1168 #else
1169 return -ENOSYS;
1170 #endif
1173 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1174 int flags, int is_pipe2)
1176 int host_pipe[2];
1177 abi_long ret;
1178 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1180 if (is_error(ret))
1181 return get_errno(ret);
1183 /* Several targets have special calling conventions for the original
1184 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1185 if (!is_pipe2) {
1186 #if defined(TARGET_ALPHA)
1187 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1188 return host_pipe[0];
1189 #elif defined(TARGET_MIPS)
1190 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1191 return host_pipe[0];
1192 #elif defined(TARGET_SH4)
1193 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1194 return host_pipe[0];
1195 #elif defined(TARGET_SPARC)
1196 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1197 return host_pipe[0];
1198 #endif
1201 if (put_user_s32(host_pipe[0], pipedes)
1202 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1203 return -TARGET_EFAULT;
1204 return get_errno(ret);
1207 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1208 abi_ulong target_addr,
1209 socklen_t len)
1211 struct target_ip_mreqn *target_smreqn;
1213 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1214 if (!target_smreqn)
1215 return -TARGET_EFAULT;
1216 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1217 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1218 if (len == sizeof(struct target_ip_mreqn))
1219 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1220 unlock_user(target_smreqn, target_addr, 0);
1222 return 0;
1225 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1226 abi_ulong target_addr,
1227 socklen_t len)
1229 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1230 sa_family_t sa_family;
1231 struct target_sockaddr *target_saddr;
1233 if (fd_trans_target_to_host_addr(fd)) {
1234 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1237 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1238 if (!target_saddr)
1239 return -TARGET_EFAULT;
1241 sa_family = tswap16(target_saddr->sa_family);
1243 /* Oops. The caller might send a incomplete sun_path; sun_path
1244 * must be terminated by \0 (see the manual page), but
1245 * unfortunately it is quite common to specify sockaddr_un
1246 * length as "strlen(x->sun_path)" while it should be
1247 * "strlen(...) + 1". We'll fix that here if needed.
1248 * Linux kernel has a similar feature.
1251 if (sa_family == AF_UNIX) {
1252 if (len < unix_maxlen && len > 0) {
1253 char *cp = (char*)target_saddr;
1255 if ( cp[len-1] && !cp[len] )
1256 len++;
1258 if (len > unix_maxlen)
1259 len = unix_maxlen;
1262 memcpy(addr, target_saddr, len);
1263 addr->sa_family = sa_family;
1264 if (sa_family == AF_PACKET) {
1265 struct target_sockaddr_ll *lladdr;
1267 lladdr = (struct target_sockaddr_ll *)addr;
1268 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1269 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1271 unlock_user(target_saddr, target_addr, 0);
1273 return 0;
1276 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1277 struct sockaddr *addr,
1278 socklen_t len)
1280 struct target_sockaddr *target_saddr;
1282 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1283 if (!target_saddr)
1284 return -TARGET_EFAULT;
1285 memcpy(target_saddr, addr, len);
1286 target_saddr->sa_family = tswap16(addr->sa_family);
1287 unlock_user(target_saddr, target_addr, len);
1289 return 0;
1292 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1293 struct target_msghdr *target_msgh)
1295 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1296 abi_long msg_controllen;
1297 abi_ulong target_cmsg_addr;
1298 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1299 socklen_t space = 0;
1301 msg_controllen = tswapal(target_msgh->msg_controllen);
1302 if (msg_controllen < sizeof (struct target_cmsghdr))
1303 goto the_end;
1304 target_cmsg_addr = tswapal(target_msgh->msg_control);
1305 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1306 target_cmsg_start = target_cmsg;
1307 if (!target_cmsg)
1308 return -TARGET_EFAULT;
1310 while (cmsg && target_cmsg) {
1311 void *data = CMSG_DATA(cmsg);
1312 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1314 int len = tswapal(target_cmsg->cmsg_len)
1315 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1317 space += CMSG_SPACE(len);
1318 if (space > msgh->msg_controllen) {
1319 space -= CMSG_SPACE(len);
1320 /* This is a QEMU bug, since we allocated the payload
1321 * area ourselves (unlike overflow in host-to-target
1322 * conversion, which is just the guest giving us a buffer
1323 * that's too small). It can't happen for the payload types
1324 * we currently support; if it becomes an issue in future
1325 * we would need to improve our allocation strategy to
1326 * something more intelligent than "twice the size of the
1327 * target buffer we're reading from".
1329 gemu_log("Host cmsg overflow\n");
1330 break;
1333 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1334 cmsg->cmsg_level = SOL_SOCKET;
1335 } else {
1336 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1338 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1339 cmsg->cmsg_len = CMSG_LEN(len);
1341 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1342 int *fd = (int *)data;
1343 int *target_fd = (int *)target_data;
1344 int i, numfds = len / sizeof(int);
1346 for (i = 0; i < numfds; i++) {
1347 __get_user(fd[i], target_fd + i);
1349 } else if (cmsg->cmsg_level == SOL_SOCKET
1350 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1351 struct ucred *cred = (struct ucred *)data;
1352 struct target_ucred *target_cred =
1353 (struct target_ucred *)target_data;
1355 __get_user(cred->pid, &target_cred->pid);
1356 __get_user(cred->uid, &target_cred->uid);
1357 __get_user(cred->gid, &target_cred->gid);
1358 } else {
1359 gemu_log("Unsupported ancillary data: %d/%d\n",
1360 cmsg->cmsg_level, cmsg->cmsg_type);
1361 memcpy(data, target_data, len);
1364 cmsg = CMSG_NXTHDR(msgh, cmsg);
1365 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1366 target_cmsg_start);
1368 unlock_user(target_cmsg, target_cmsg_addr, 0);
1369 the_end:
1370 msgh->msg_controllen = space;
1371 return 0;
1374 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1375 struct msghdr *msgh)
1377 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1378 abi_long msg_controllen;
1379 abi_ulong target_cmsg_addr;
1380 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1381 socklen_t space = 0;
1383 msg_controllen = tswapal(target_msgh->msg_controllen);
1384 if (msg_controllen < sizeof (struct target_cmsghdr))
1385 goto the_end;
1386 target_cmsg_addr = tswapal(target_msgh->msg_control);
1387 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1388 target_cmsg_start = target_cmsg;
1389 if (!target_cmsg)
1390 return -TARGET_EFAULT;
1392 while (cmsg && target_cmsg) {
1393 void *data = CMSG_DATA(cmsg);
1394 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1396 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1397 int tgt_len, tgt_space;
1399 /* We never copy a half-header but may copy half-data;
1400 * this is Linux's behaviour in put_cmsg(). Note that
1401 * truncation here is a guest problem (which we report
1402 * to the guest via the CTRUNC bit), unlike truncation
1403 * in target_to_host_cmsg, which is a QEMU bug.
1405 if (msg_controllen < sizeof(struct cmsghdr)) {
1406 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1407 break;
1410 if (cmsg->cmsg_level == SOL_SOCKET) {
1411 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1412 } else {
1413 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1415 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1417 tgt_len = TARGET_CMSG_LEN(len);
1419 /* Payload types which need a different size of payload on
1420 * the target must adjust tgt_len here.
1422 switch (cmsg->cmsg_level) {
1423 case SOL_SOCKET:
1424 switch (cmsg->cmsg_type) {
1425 case SO_TIMESTAMP:
1426 tgt_len = sizeof(struct target_timeval);
1427 break;
1428 default:
1429 break;
1431 default:
1432 break;
1435 if (msg_controllen < tgt_len) {
1436 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1437 tgt_len = msg_controllen;
1440 /* We must now copy-and-convert len bytes of payload
1441 * into tgt_len bytes of destination space. Bear in mind
1442 * that in both source and destination we may be dealing
1443 * with a truncated value!
1445 switch (cmsg->cmsg_level) {
1446 case SOL_SOCKET:
1447 switch (cmsg->cmsg_type) {
1448 case SCM_RIGHTS:
1450 int *fd = (int *)data;
1451 int *target_fd = (int *)target_data;
1452 int i, numfds = tgt_len / sizeof(int);
1454 for (i = 0; i < numfds; i++) {
1455 __put_user(fd[i], target_fd + i);
1457 break;
1459 case SO_TIMESTAMP:
1461 struct timeval *tv = (struct timeval *)data;
1462 struct target_timeval *target_tv =
1463 (struct target_timeval *)target_data;
1465 if (len != sizeof(struct timeval) ||
1466 tgt_len != sizeof(struct target_timeval)) {
1467 goto unimplemented;
1470 /* copy struct timeval to target */
1471 __put_user(tv->tv_sec, &target_tv->tv_sec);
1472 __put_user(tv->tv_usec, &target_tv->tv_usec);
1473 break;
1475 case SCM_CREDENTIALS:
1477 struct ucred *cred = (struct ucred *)data;
1478 struct target_ucred *target_cred =
1479 (struct target_ucred *)target_data;
1481 __put_user(cred->pid, &target_cred->pid);
1482 __put_user(cred->uid, &target_cred->uid);
1483 __put_user(cred->gid, &target_cred->gid);
1484 break;
1486 default:
1487 goto unimplemented;
1489 break;
1491 default:
1492 unimplemented:
1493 gemu_log("Unsupported ancillary data: %d/%d\n",
1494 cmsg->cmsg_level, cmsg->cmsg_type);
1495 memcpy(target_data, data, MIN(len, tgt_len));
1496 if (tgt_len > len) {
1497 memset(target_data + len, 0, tgt_len - len);
1501 target_cmsg->cmsg_len = tswapal(tgt_len);
1502 tgt_space = TARGET_CMSG_SPACE(len);
1503 if (msg_controllen < tgt_space) {
1504 tgt_space = msg_controllen;
1506 msg_controllen -= tgt_space;
1507 space += tgt_space;
1508 cmsg = CMSG_NXTHDR(msgh, cmsg);
1509 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1510 target_cmsg_start);
1512 unlock_user(target_cmsg, target_cmsg_addr, space);
1513 the_end:
1514 target_msgh->msg_controllen = tswapal(space);
1515 return 0;
1518 /* do_setsockopt() Must return target values and target errnos. */
1519 static abi_long do_setsockopt(int sockfd, int level, int optname,
1520 abi_ulong optval_addr, socklen_t optlen)
1522 abi_long ret;
1523 int val;
1524 struct ip_mreqn *ip_mreq;
1525 struct ip_mreq_source *ip_mreq_source;
1527 switch(level) {
1528 case SOL_TCP:
1529 /* TCP options all take an 'int' value. */
1530 if (optlen < sizeof(uint32_t))
1531 return -TARGET_EINVAL;
1533 if (get_user_u32(val, optval_addr))
1534 return -TARGET_EFAULT;
1535 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1536 break;
1537 case SOL_IP:
1538 switch(optname) {
1539 case IP_TOS:
1540 case IP_TTL:
1541 case IP_HDRINCL:
1542 case IP_ROUTER_ALERT:
1543 case IP_RECVOPTS:
1544 case IP_RETOPTS:
1545 case IP_PKTINFO:
1546 case IP_MTU_DISCOVER:
1547 case IP_RECVERR:
1548 case IP_RECVTOS:
1549 #ifdef IP_FREEBIND
1550 case IP_FREEBIND:
1551 #endif
1552 case IP_MULTICAST_TTL:
1553 case IP_MULTICAST_LOOP:
1554 val = 0;
1555 if (optlen >= sizeof(uint32_t)) {
1556 if (get_user_u32(val, optval_addr))
1557 return -TARGET_EFAULT;
1558 } else if (optlen >= 1) {
1559 if (get_user_u8(val, optval_addr))
1560 return -TARGET_EFAULT;
1562 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1563 break;
1564 case IP_ADD_MEMBERSHIP:
1565 case IP_DROP_MEMBERSHIP:
1566 if (optlen < sizeof (struct target_ip_mreq) ||
1567 optlen > sizeof (struct target_ip_mreqn))
1568 return -TARGET_EINVAL;
1570 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1571 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1572 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1573 break;
1575 case IP_BLOCK_SOURCE:
1576 case IP_UNBLOCK_SOURCE:
1577 case IP_ADD_SOURCE_MEMBERSHIP:
1578 case IP_DROP_SOURCE_MEMBERSHIP:
1579 if (optlen != sizeof (struct target_ip_mreq_source))
1580 return -TARGET_EINVAL;
1582 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1583 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1584 unlock_user (ip_mreq_source, optval_addr, 0);
1585 break;
1587 default:
1588 goto unimplemented;
1590 break;
1591 case SOL_IPV6:
1592 switch (optname) {
1593 case IPV6_MTU_DISCOVER:
1594 case IPV6_MTU:
1595 case IPV6_V6ONLY:
1596 case IPV6_RECVPKTINFO:
1597 val = 0;
1598 if (optlen < sizeof(uint32_t)) {
1599 return -TARGET_EINVAL;
1601 if (get_user_u32(val, optval_addr)) {
1602 return -TARGET_EFAULT;
1604 ret = get_errno(setsockopt(sockfd, level, optname,
1605 &val, sizeof(val)));
1606 break;
1607 default:
1608 goto unimplemented;
1610 break;
1611 case SOL_RAW:
1612 switch (optname) {
1613 case ICMP_FILTER:
1614 /* struct icmp_filter takes an u32 value */
1615 if (optlen < sizeof(uint32_t)) {
1616 return -TARGET_EINVAL;
1619 if (get_user_u32(val, optval_addr)) {
1620 return -TARGET_EFAULT;
1622 ret = get_errno(setsockopt(sockfd, level, optname,
1623 &val, sizeof(val)));
1624 break;
1626 default:
1627 goto unimplemented;
1629 break;
1630 case TARGET_SOL_SOCKET:
1631 switch (optname) {
1632 case TARGET_SO_RCVTIMEO:
1634 struct timeval tv;
1636 optname = SO_RCVTIMEO;
1638 set_timeout:
1639 if (optlen != sizeof(struct target_timeval)) {
1640 return -TARGET_EINVAL;
1643 if (copy_from_user_timeval(&tv, optval_addr)) {
1644 return -TARGET_EFAULT;
1647 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1648 &tv, sizeof(tv)));
1649 return ret;
1651 case TARGET_SO_SNDTIMEO:
1652 optname = SO_SNDTIMEO;
1653 goto set_timeout;
1654 case TARGET_SO_ATTACH_FILTER:
1656 struct target_sock_fprog *tfprog;
1657 struct target_sock_filter *tfilter;
1658 struct sock_fprog fprog;
1659 struct sock_filter *filter;
1660 int i;
1662 if (optlen != sizeof(*tfprog)) {
1663 return -TARGET_EINVAL;
1665 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1666 return -TARGET_EFAULT;
1668 if (!lock_user_struct(VERIFY_READ, tfilter,
1669 tswapal(tfprog->filter), 0)) {
1670 unlock_user_struct(tfprog, optval_addr, 1);
1671 return -TARGET_EFAULT;
1674 fprog.len = tswap16(tfprog->len);
1675 filter = g_try_new(struct sock_filter, fprog.len);
1676 if (filter == NULL) {
1677 unlock_user_struct(tfilter, tfprog->filter, 1);
1678 unlock_user_struct(tfprog, optval_addr, 1);
1679 return -TARGET_ENOMEM;
1681 for (i = 0; i < fprog.len; i++) {
1682 filter[i].code = tswap16(tfilter[i].code);
1683 filter[i].jt = tfilter[i].jt;
1684 filter[i].jf = tfilter[i].jf;
1685 filter[i].k = tswap32(tfilter[i].k);
1687 fprog.filter = filter;
1689 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1690 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1691 g_free(filter);
1693 unlock_user_struct(tfilter, tfprog->filter, 1);
1694 unlock_user_struct(tfprog, optval_addr, 1);
1695 return ret;
1697 case TARGET_SO_BINDTODEVICE:
1699 char *dev_ifname, *addr_ifname;
1701 if (optlen > IFNAMSIZ - 1) {
1702 optlen = IFNAMSIZ - 1;
1704 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1705 if (!dev_ifname) {
1706 return -TARGET_EFAULT;
1708 optname = SO_BINDTODEVICE;
1709 addr_ifname = alloca(IFNAMSIZ);
1710 memcpy(addr_ifname, dev_ifname, optlen);
1711 addr_ifname[optlen] = 0;
1712 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1713 addr_ifname, optlen));
1714 unlock_user (dev_ifname, optval_addr, 0);
1715 return ret;
1717 /* Options with 'int' argument. */
1718 case TARGET_SO_DEBUG:
1719 optname = SO_DEBUG;
1720 break;
1721 case TARGET_SO_REUSEADDR:
1722 optname = SO_REUSEADDR;
1723 break;
1724 case TARGET_SO_TYPE:
1725 optname = SO_TYPE;
1726 break;
1727 case TARGET_SO_ERROR:
1728 optname = SO_ERROR;
1729 break;
1730 case TARGET_SO_DONTROUTE:
1731 optname = SO_DONTROUTE;
1732 break;
1733 case TARGET_SO_BROADCAST:
1734 optname = SO_BROADCAST;
1735 break;
1736 case TARGET_SO_SNDBUF:
1737 optname = SO_SNDBUF;
1738 break;
1739 case TARGET_SO_SNDBUFFORCE:
1740 optname = SO_SNDBUFFORCE;
1741 break;
1742 case TARGET_SO_RCVBUF:
1743 optname = SO_RCVBUF;
1744 break;
1745 case TARGET_SO_RCVBUFFORCE:
1746 optname = SO_RCVBUFFORCE;
1747 break;
1748 case TARGET_SO_KEEPALIVE:
1749 optname = SO_KEEPALIVE;
1750 break;
1751 case TARGET_SO_OOBINLINE:
1752 optname = SO_OOBINLINE;
1753 break;
1754 case TARGET_SO_NO_CHECK:
1755 optname = SO_NO_CHECK;
1756 break;
1757 case TARGET_SO_PRIORITY:
1758 optname = SO_PRIORITY;
1759 break;
1760 #ifdef SO_BSDCOMPAT
1761 case TARGET_SO_BSDCOMPAT:
1762 optname = SO_BSDCOMPAT;
1763 break;
1764 #endif
1765 case TARGET_SO_PASSCRED:
1766 optname = SO_PASSCRED;
1767 break;
1768 case TARGET_SO_PASSSEC:
1769 optname = SO_PASSSEC;
1770 break;
1771 case TARGET_SO_TIMESTAMP:
1772 optname = SO_TIMESTAMP;
1773 break;
1774 case TARGET_SO_RCVLOWAT:
1775 optname = SO_RCVLOWAT;
1776 break;
1777 break;
1778 default:
1779 goto unimplemented;
1781 if (optlen < sizeof(uint32_t))
1782 return -TARGET_EINVAL;
1784 if (get_user_u32(val, optval_addr))
1785 return -TARGET_EFAULT;
1786 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1787 break;
1788 default:
1789 unimplemented:
1790 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1791 ret = -TARGET_ENOPROTOOPT;
1793 return ret;
1796 /* do_getsockopt() Must return target values and target errnos. */
1797 static abi_long do_getsockopt(int sockfd, int level, int optname,
1798 abi_ulong optval_addr, abi_ulong optlen)
1800 abi_long ret;
1801 int len, val;
1802 socklen_t lv;
1804 switch(level) {
1805 case TARGET_SOL_SOCKET:
1806 level = SOL_SOCKET;
1807 switch (optname) {
1808 /* These don't just return a single integer */
1809 case TARGET_SO_LINGER:
1810 case TARGET_SO_RCVTIMEO:
1811 case TARGET_SO_SNDTIMEO:
1812 case TARGET_SO_PEERNAME:
1813 goto unimplemented;
1814 case TARGET_SO_PEERCRED: {
1815 struct ucred cr;
1816 socklen_t crlen;
1817 struct target_ucred *tcr;
1819 if (get_user_u32(len, optlen)) {
1820 return -TARGET_EFAULT;
1822 if (len < 0) {
1823 return -TARGET_EINVAL;
1826 crlen = sizeof(cr);
1827 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1828 &cr, &crlen));
1829 if (ret < 0) {
1830 return ret;
1832 if (len > crlen) {
1833 len = crlen;
1835 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1836 return -TARGET_EFAULT;
1838 __put_user(cr.pid, &tcr->pid);
1839 __put_user(cr.uid, &tcr->uid);
1840 __put_user(cr.gid, &tcr->gid);
1841 unlock_user_struct(tcr, optval_addr, 1);
1842 if (put_user_u32(len, optlen)) {
1843 return -TARGET_EFAULT;
1845 break;
1847 /* Options with 'int' argument. */
1848 case TARGET_SO_DEBUG:
1849 optname = SO_DEBUG;
1850 goto int_case;
1851 case TARGET_SO_REUSEADDR:
1852 optname = SO_REUSEADDR;
1853 goto int_case;
1854 case TARGET_SO_TYPE:
1855 optname = SO_TYPE;
1856 goto int_case;
1857 case TARGET_SO_ERROR:
1858 optname = SO_ERROR;
1859 goto int_case;
1860 case TARGET_SO_DONTROUTE:
1861 optname = SO_DONTROUTE;
1862 goto int_case;
1863 case TARGET_SO_BROADCAST:
1864 optname = SO_BROADCAST;
1865 goto int_case;
1866 case TARGET_SO_SNDBUF:
1867 optname = SO_SNDBUF;
1868 goto int_case;
1869 case TARGET_SO_RCVBUF:
1870 optname = SO_RCVBUF;
1871 goto int_case;
1872 case TARGET_SO_KEEPALIVE:
1873 optname = SO_KEEPALIVE;
1874 goto int_case;
1875 case TARGET_SO_OOBINLINE:
1876 optname = SO_OOBINLINE;
1877 goto int_case;
1878 case TARGET_SO_NO_CHECK:
1879 optname = SO_NO_CHECK;
1880 goto int_case;
1881 case TARGET_SO_PRIORITY:
1882 optname = SO_PRIORITY;
1883 goto int_case;
1884 #ifdef SO_BSDCOMPAT
1885 case TARGET_SO_BSDCOMPAT:
1886 optname = SO_BSDCOMPAT;
1887 goto int_case;
1888 #endif
1889 case TARGET_SO_PASSCRED:
1890 optname = SO_PASSCRED;
1891 goto int_case;
1892 case TARGET_SO_TIMESTAMP:
1893 optname = SO_TIMESTAMP;
1894 goto int_case;
1895 case TARGET_SO_RCVLOWAT:
1896 optname = SO_RCVLOWAT;
1897 goto int_case;
1898 case TARGET_SO_ACCEPTCONN:
1899 optname = SO_ACCEPTCONN;
1900 goto int_case;
1901 default:
1902 goto int_case;
1904 break;
1905 case SOL_TCP:
1906 /* TCP options all take an 'int' value. */
1907 int_case:
1908 if (get_user_u32(len, optlen))
1909 return -TARGET_EFAULT;
1910 if (len < 0)
1911 return -TARGET_EINVAL;
1912 lv = sizeof(lv);
1913 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1914 if (ret < 0)
1915 return ret;
1916 if (optname == SO_TYPE) {
1917 val = host_to_target_sock_type(val);
1919 if (len > lv)
1920 len = lv;
1921 if (len == 4) {
1922 if (put_user_u32(val, optval_addr))
1923 return -TARGET_EFAULT;
1924 } else {
1925 if (put_user_u8(val, optval_addr))
1926 return -TARGET_EFAULT;
1928 if (put_user_u32(len, optlen))
1929 return -TARGET_EFAULT;
1930 break;
1931 case SOL_IP:
1932 switch(optname) {
1933 case IP_TOS:
1934 case IP_TTL:
1935 case IP_HDRINCL:
1936 case IP_ROUTER_ALERT:
1937 case IP_RECVOPTS:
1938 case IP_RETOPTS:
1939 case IP_PKTINFO:
1940 case IP_MTU_DISCOVER:
1941 case IP_RECVERR:
1942 case IP_RECVTOS:
1943 #ifdef IP_FREEBIND
1944 case IP_FREEBIND:
1945 #endif
1946 case IP_MULTICAST_TTL:
1947 case IP_MULTICAST_LOOP:
1948 if (get_user_u32(len, optlen))
1949 return -TARGET_EFAULT;
1950 if (len < 0)
1951 return -TARGET_EINVAL;
1952 lv = sizeof(lv);
1953 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1954 if (ret < 0)
1955 return ret;
1956 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1957 len = 1;
1958 if (put_user_u32(len, optlen)
1959 || put_user_u8(val, optval_addr))
1960 return -TARGET_EFAULT;
1961 } else {
1962 if (len > sizeof(int))
1963 len = sizeof(int);
1964 if (put_user_u32(len, optlen)
1965 || put_user_u32(val, optval_addr))
1966 return -TARGET_EFAULT;
1968 break;
1969 default:
1970 ret = -TARGET_ENOPROTOOPT;
1971 break;
1973 break;
1974 default:
1975 unimplemented:
1976 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1977 level, optname);
1978 ret = -TARGET_EOPNOTSUPP;
1979 break;
1981 return ret;
1984 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1985 int count, int copy)
1987 struct target_iovec *target_vec;
1988 struct iovec *vec;
1989 abi_ulong total_len, max_len;
1990 int i;
1991 int err = 0;
1992 bool bad_address = false;
1994 if (count == 0) {
1995 errno = 0;
1996 return NULL;
1998 if (count < 0 || count > IOV_MAX) {
1999 errno = EINVAL;
2000 return NULL;
2003 vec = g_try_new0(struct iovec, count);
2004 if (vec == NULL) {
2005 errno = ENOMEM;
2006 return NULL;
2009 target_vec = lock_user(VERIFY_READ, target_addr,
2010 count * sizeof(struct target_iovec), 1);
2011 if (target_vec == NULL) {
2012 err = EFAULT;
2013 goto fail2;
2016 /* ??? If host page size > target page size, this will result in a
2017 value larger than what we can actually support. */
2018 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2019 total_len = 0;
2021 for (i = 0; i < count; i++) {
2022 abi_ulong base = tswapal(target_vec[i].iov_base);
2023 abi_long len = tswapal(target_vec[i].iov_len);
2025 if (len < 0) {
2026 err = EINVAL;
2027 goto fail;
2028 } else if (len == 0) {
2029 /* Zero length pointer is ignored. */
2030 vec[i].iov_base = 0;
2031 } else {
2032 vec[i].iov_base = lock_user(type, base, len, copy);
2033 /* If the first buffer pointer is bad, this is a fault. But
2034 * subsequent bad buffers will result in a partial write; this
2035 * is realized by filling the vector with null pointers and
2036 * zero lengths. */
2037 if (!vec[i].iov_base) {
2038 if (i == 0) {
2039 err = EFAULT;
2040 goto fail;
2041 } else {
2042 bad_address = true;
2045 if (bad_address) {
2046 len = 0;
2048 if (len > max_len - total_len) {
2049 len = max_len - total_len;
2052 vec[i].iov_len = len;
2053 total_len += len;
2056 unlock_user(target_vec, target_addr, 0);
2057 return vec;
2059 fail:
2060 while (--i >= 0) {
2061 if (tswapal(target_vec[i].iov_len) > 0) {
2062 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2065 unlock_user(target_vec, target_addr, 0);
2066 fail2:
2067 g_free(vec);
2068 errno = err;
2069 return NULL;
2072 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2073 int count, int copy)
2075 struct target_iovec *target_vec;
2076 int i;
2078 target_vec = lock_user(VERIFY_READ, target_addr,
2079 count * sizeof(struct target_iovec), 1);
2080 if (target_vec) {
2081 for (i = 0; i < count; i++) {
2082 abi_ulong base = tswapal(target_vec[i].iov_base);
2083 abi_long len = tswapal(target_vec[i].iov_len);
2084 if (len < 0) {
2085 break;
2087 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2089 unlock_user(target_vec, target_addr, 0);
2092 g_free(vec);
2095 static inline int target_to_host_sock_type(int *type)
2097 int host_type = 0;
2098 int target_type = *type;
2100 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2101 case TARGET_SOCK_DGRAM:
2102 host_type = SOCK_DGRAM;
2103 break;
2104 case TARGET_SOCK_STREAM:
2105 host_type = SOCK_STREAM;
2106 break;
2107 default:
2108 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2109 break;
2111 if (target_type & TARGET_SOCK_CLOEXEC) {
2112 #if defined(SOCK_CLOEXEC)
2113 host_type |= SOCK_CLOEXEC;
2114 #else
2115 return -TARGET_EINVAL;
2116 #endif
2118 if (target_type & TARGET_SOCK_NONBLOCK) {
2119 #if defined(SOCK_NONBLOCK)
2120 host_type |= SOCK_NONBLOCK;
2121 #elif !defined(O_NONBLOCK)
2122 return -TARGET_EINVAL;
2123 #endif
2125 *type = host_type;
2126 return 0;
2129 /* Try to emulate socket type flags after socket creation. */
2130 static int sock_flags_fixup(int fd, int target_type)
2132 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2133 if (target_type & TARGET_SOCK_NONBLOCK) {
2134 int flags = fcntl(fd, F_GETFL);
2135 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2136 close(fd);
2137 return -TARGET_EINVAL;
2140 #endif
2141 return fd;
2144 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2145 abi_ulong target_addr,
2146 socklen_t len)
2148 struct sockaddr *addr = host_addr;
2149 struct target_sockaddr *target_saddr;
2151 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2152 if (!target_saddr) {
2153 return -TARGET_EFAULT;
2156 memcpy(addr, target_saddr, len);
2157 addr->sa_family = tswap16(target_saddr->sa_family);
2158 /* spkt_protocol is big-endian */
2160 unlock_user(target_saddr, target_addr, 0);
2161 return 0;
2164 static TargetFdTrans target_packet_trans = {
2165 .target_to_host_addr = packet_target_to_host_sockaddr,
2168 /* do_socket() Must return target values and target errnos. */
2169 static abi_long do_socket(int domain, int type, int protocol)
2171 int target_type = type;
2172 int ret;
2174 ret = target_to_host_sock_type(&type);
2175 if (ret) {
2176 return ret;
2179 if (domain == PF_NETLINK)
2180 return -TARGET_EAFNOSUPPORT;
2182 if (domain == AF_PACKET ||
2183 (domain == AF_INET && type == SOCK_PACKET)) {
2184 protocol = tswap16(protocol);
2187 ret = get_errno(socket(domain, type, protocol));
2188 if (ret >= 0) {
2189 ret = sock_flags_fixup(ret, target_type);
2190 if (type == SOCK_PACKET) {
2191 /* Manage an obsolete case :
2192 * if socket type is SOCK_PACKET, bind by name
2194 fd_trans_register(ret, &target_packet_trans);
2197 return ret;
2200 /* do_bind() Must return target values and target errnos. */
2201 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2202 socklen_t addrlen)
2204 void *addr;
2205 abi_long ret;
2207 if ((int)addrlen < 0) {
2208 return -TARGET_EINVAL;
2211 addr = alloca(addrlen+1);
2213 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2214 if (ret)
2215 return ret;
2217 return get_errno(bind(sockfd, addr, addrlen));
2220 /* do_connect() Must return target values and target errnos. */
2221 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2222 socklen_t addrlen)
2224 void *addr;
2225 abi_long ret;
2227 if ((int)addrlen < 0) {
2228 return -TARGET_EINVAL;
2231 addr = alloca(addrlen+1);
2233 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2234 if (ret)
2235 return ret;
2237 return get_errno(connect(sockfd, addr, addrlen));
2240 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2241 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2242 int flags, int send)
2244 abi_long ret, len;
2245 struct msghdr msg;
2246 int count;
2247 struct iovec *vec;
2248 abi_ulong target_vec;
2250 if (msgp->msg_name) {
2251 msg.msg_namelen = tswap32(msgp->msg_namelen);
2252 msg.msg_name = alloca(msg.msg_namelen+1);
2253 ret = target_to_host_sockaddr(fd, msg.msg_name,
2254 tswapal(msgp->msg_name),
2255 msg.msg_namelen);
2256 if (ret) {
2257 goto out2;
2259 } else {
2260 msg.msg_name = NULL;
2261 msg.msg_namelen = 0;
2263 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2264 msg.msg_control = alloca(msg.msg_controllen);
2265 msg.msg_flags = tswap32(msgp->msg_flags);
2267 count = tswapal(msgp->msg_iovlen);
2268 target_vec = tswapal(msgp->msg_iov);
2269 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2270 target_vec, count, send);
2271 if (vec == NULL) {
2272 ret = -host_to_target_errno(errno);
2273 goto out2;
2275 msg.msg_iovlen = count;
2276 msg.msg_iov = vec;
2278 if (send) {
2279 ret = target_to_host_cmsg(&msg, msgp);
2280 if (ret == 0)
2281 ret = get_errno(sendmsg(fd, &msg, flags));
2282 } else {
2283 ret = get_errno(recvmsg(fd, &msg, flags));
2284 if (!is_error(ret)) {
2285 len = ret;
2286 ret = host_to_target_cmsg(msgp, &msg);
2287 if (!is_error(ret)) {
2288 msgp->msg_namelen = tswap32(msg.msg_namelen);
2289 if (msg.msg_name != NULL) {
2290 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2291 msg.msg_name, msg.msg_namelen);
2292 if (ret) {
2293 goto out;
2297 ret = len;
2302 out:
2303 unlock_iovec(vec, target_vec, count, !send);
2304 out2:
2305 return ret;
2308 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2309 int flags, int send)
2311 abi_long ret;
2312 struct target_msghdr *msgp;
2314 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2315 msgp,
2316 target_msg,
2317 send ? 1 : 0)) {
2318 return -TARGET_EFAULT;
2320 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2321 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2322 return ret;
2325 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2326 * so it might not have this *mmsg-specific flag either.
2328 #ifndef MSG_WAITFORONE
2329 #define MSG_WAITFORONE 0x10000
2330 #endif
2332 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2333 unsigned int vlen, unsigned int flags,
2334 int send)
2336 struct target_mmsghdr *mmsgp;
2337 abi_long ret = 0;
2338 int i;
2340 if (vlen > UIO_MAXIOV) {
2341 vlen = UIO_MAXIOV;
2344 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2345 if (!mmsgp) {
2346 return -TARGET_EFAULT;
2349 for (i = 0; i < vlen; i++) {
2350 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2351 if (is_error(ret)) {
2352 break;
2354 mmsgp[i].msg_len = tswap32(ret);
2355 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2356 if (flags & MSG_WAITFORONE) {
2357 flags |= MSG_DONTWAIT;
2361 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2363 /* Return number of datagrams sent if we sent any at all;
2364 * otherwise return the error.
2366 if (i) {
2367 return i;
2369 return ret;
2372 /* If we don't have a system accept4() then just call accept.
2373 * The callsites to do_accept4() will ensure that they don't
2374 * pass a non-zero flags argument in this config.
2376 #ifndef CONFIG_ACCEPT4
2377 static inline int accept4(int sockfd, struct sockaddr *addr,
2378 socklen_t *addrlen, int flags)
2380 assert(flags == 0);
2381 return accept(sockfd, addr, addrlen);
2383 #endif
2385 /* do_accept4() Must return target values and target errnos. */
2386 static abi_long do_accept4(int fd, abi_ulong target_addr,
2387 abi_ulong target_addrlen_addr, int flags)
2389 socklen_t addrlen;
2390 void *addr;
2391 abi_long ret;
2392 int host_flags;
2394 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2396 if (target_addr == 0) {
2397 return get_errno(accept4(fd, NULL, NULL, host_flags));
2400 /* linux returns EINVAL if addrlen pointer is invalid */
2401 if (get_user_u32(addrlen, target_addrlen_addr))
2402 return -TARGET_EINVAL;
2404 if ((int)addrlen < 0) {
2405 return -TARGET_EINVAL;
2408 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2409 return -TARGET_EINVAL;
2411 addr = alloca(addrlen);
2413 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2414 if (!is_error(ret)) {
2415 host_to_target_sockaddr(target_addr, addr, addrlen);
2416 if (put_user_u32(addrlen, target_addrlen_addr))
2417 ret = -TARGET_EFAULT;
2419 return ret;
2422 /* do_getpeername() Must return target values and target errnos. */
2423 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2424 abi_ulong target_addrlen_addr)
2426 socklen_t addrlen;
2427 void *addr;
2428 abi_long ret;
2430 if (get_user_u32(addrlen, target_addrlen_addr))
2431 return -TARGET_EFAULT;
2433 if ((int)addrlen < 0) {
2434 return -TARGET_EINVAL;
2437 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2438 return -TARGET_EFAULT;
2440 addr = alloca(addrlen);
2442 ret = get_errno(getpeername(fd, addr, &addrlen));
2443 if (!is_error(ret)) {
2444 host_to_target_sockaddr(target_addr, addr, addrlen);
2445 if (put_user_u32(addrlen, target_addrlen_addr))
2446 ret = -TARGET_EFAULT;
2448 return ret;
2451 /* do_getsockname() Must return target values and target errnos. */
2452 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2453 abi_ulong target_addrlen_addr)
2455 socklen_t addrlen;
2456 void *addr;
2457 abi_long ret;
2459 if (get_user_u32(addrlen, target_addrlen_addr))
2460 return -TARGET_EFAULT;
2462 if ((int)addrlen < 0) {
2463 return -TARGET_EINVAL;
2466 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2467 return -TARGET_EFAULT;
2469 addr = alloca(addrlen);
2471 ret = get_errno(getsockname(fd, addr, &addrlen));
2472 if (!is_error(ret)) {
2473 host_to_target_sockaddr(target_addr, addr, addrlen);
2474 if (put_user_u32(addrlen, target_addrlen_addr))
2475 ret = -TARGET_EFAULT;
2477 return ret;
2480 /* do_socketpair() Must return target values and target errnos. */
2481 static abi_long do_socketpair(int domain, int type, int protocol,
2482 abi_ulong target_tab_addr)
2484 int tab[2];
2485 abi_long ret;
2487 target_to_host_sock_type(&type);
2489 ret = get_errno(socketpair(domain, type, protocol, tab));
2490 if (!is_error(ret)) {
2491 if (put_user_s32(tab[0], target_tab_addr)
2492 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2493 ret = -TARGET_EFAULT;
2495 return ret;
2498 /* do_sendto() Must return target values and target errnos. */
2499 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2500 abi_ulong target_addr, socklen_t addrlen)
2502 void *addr;
2503 void *host_msg;
2504 abi_long ret;
2506 if ((int)addrlen < 0) {
2507 return -TARGET_EINVAL;
2510 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2511 if (!host_msg)
2512 return -TARGET_EFAULT;
2513 if (target_addr) {
2514 addr = alloca(addrlen+1);
2515 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2516 if (ret) {
2517 unlock_user(host_msg, msg, 0);
2518 return ret;
2520 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2521 } else {
2522 ret = get_errno(send(fd, host_msg, len, flags));
2524 unlock_user(host_msg, msg, 0);
2525 return ret;
2528 /* do_recvfrom() Must return target values and target errnos. */
2529 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2530 abi_ulong target_addr,
2531 abi_ulong target_addrlen)
2533 socklen_t addrlen;
2534 void *addr;
2535 void *host_msg;
2536 abi_long ret;
2538 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2539 if (!host_msg)
2540 return -TARGET_EFAULT;
2541 if (target_addr) {
2542 if (get_user_u32(addrlen, target_addrlen)) {
2543 ret = -TARGET_EFAULT;
2544 goto fail;
2546 if ((int)addrlen < 0) {
2547 ret = -TARGET_EINVAL;
2548 goto fail;
2550 addr = alloca(addrlen);
2551 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2552 } else {
2553 addr = NULL; /* To keep compiler quiet. */
2554 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2556 if (!is_error(ret)) {
2557 if (target_addr) {
2558 host_to_target_sockaddr(target_addr, addr, addrlen);
2559 if (put_user_u32(addrlen, target_addrlen)) {
2560 ret = -TARGET_EFAULT;
2561 goto fail;
2564 unlock_user(host_msg, msg, len);
2565 } else {
2566 fail:
2567 unlock_user(host_msg, msg, 0);
2569 return ret;
2572 #ifdef TARGET_NR_socketcall
2573 /* do_socketcall() Must return target values and target errnos. */
2574 static abi_long do_socketcall(int num, abi_ulong vptr)
2576 static const unsigned ac[] = { /* number of arguments per call */
2577 [SOCKOP_socket] = 3, /* domain, type, protocol */
2578 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2579 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2580 [SOCKOP_listen] = 2, /* sockfd, backlog */
2581 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2582 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2583 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2584 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2585 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2586 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2587 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2588 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2589 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2590 [SOCKOP_shutdown] = 2, /* sockfd, how */
2591 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2592 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2593 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2594 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2595 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2596 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2598 abi_long a[6]; /* max 6 args */
2600 /* first, collect the arguments in a[] according to ac[] */
2601 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2602 unsigned i;
2603 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2604 for (i = 0; i < ac[num]; ++i) {
2605 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2606 return -TARGET_EFAULT;
2611 /* now when we have the args, actually handle the call */
2612 switch (num) {
2613 case SOCKOP_socket: /* domain, type, protocol */
2614 return do_socket(a[0], a[1], a[2]);
2615 case SOCKOP_bind: /* sockfd, addr, addrlen */
2616 return do_bind(a[0], a[1], a[2]);
2617 case SOCKOP_connect: /* sockfd, addr, addrlen */
2618 return do_connect(a[0], a[1], a[2]);
2619 case SOCKOP_listen: /* sockfd, backlog */
2620 return get_errno(listen(a[0], a[1]));
2621 case SOCKOP_accept: /* sockfd, addr, addrlen */
2622 return do_accept4(a[0], a[1], a[2], 0);
2623 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2624 return do_accept4(a[0], a[1], a[2], a[3]);
2625 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2626 return do_getsockname(a[0], a[1], a[2]);
2627 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2628 return do_getpeername(a[0], a[1], a[2]);
2629 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2630 return do_socketpair(a[0], a[1], a[2], a[3]);
2631 case SOCKOP_send: /* sockfd, msg, len, flags */
2632 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2633 case SOCKOP_recv: /* sockfd, msg, len, flags */
2634 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2635 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2636 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2637 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2638 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2639 case SOCKOP_shutdown: /* sockfd, how */
2640 return get_errno(shutdown(a[0], a[1]));
2641 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2642 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2643 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2644 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2645 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
2646 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
2647 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
2648 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
2649 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2650 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2651 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2652 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2653 default:
2654 gemu_log("Unsupported socketcall: %d\n", num);
2655 return -TARGET_ENOSYS;
2658 #endif
2660 #define N_SHM_REGIONS 32
2662 static struct shm_region {
2663 abi_ulong start;
2664 abi_ulong size;
2665 bool in_use;
2666 } shm_regions[N_SHM_REGIONS];
2668 struct target_semid_ds
2670 struct target_ipc_perm sem_perm;
2671 abi_ulong sem_otime;
2672 #if !defined(TARGET_PPC64)
2673 abi_ulong __unused1;
2674 #endif
2675 abi_ulong sem_ctime;
2676 #if !defined(TARGET_PPC64)
2677 abi_ulong __unused2;
2678 #endif
2679 abi_ulong sem_nsems;
2680 abi_ulong __unused3;
2681 abi_ulong __unused4;
2684 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2685 abi_ulong target_addr)
2687 struct target_ipc_perm *target_ip;
2688 struct target_semid_ds *target_sd;
2690 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2691 return -TARGET_EFAULT;
2692 target_ip = &(target_sd->sem_perm);
2693 host_ip->__key = tswap32(target_ip->__key);
2694 host_ip->uid = tswap32(target_ip->uid);
2695 host_ip->gid = tswap32(target_ip->gid);
2696 host_ip->cuid = tswap32(target_ip->cuid);
2697 host_ip->cgid = tswap32(target_ip->cgid);
2698 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2699 host_ip->mode = tswap32(target_ip->mode);
2700 #else
2701 host_ip->mode = tswap16(target_ip->mode);
2702 #endif
2703 #if defined(TARGET_PPC)
2704 host_ip->__seq = tswap32(target_ip->__seq);
2705 #else
2706 host_ip->__seq = tswap16(target_ip->__seq);
2707 #endif
2708 unlock_user_struct(target_sd, target_addr, 0);
2709 return 0;
2712 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2713 struct ipc_perm *host_ip)
2715 struct target_ipc_perm *target_ip;
2716 struct target_semid_ds *target_sd;
2718 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2719 return -TARGET_EFAULT;
2720 target_ip = &(target_sd->sem_perm);
2721 target_ip->__key = tswap32(host_ip->__key);
2722 target_ip->uid = tswap32(host_ip->uid);
2723 target_ip->gid = tswap32(host_ip->gid);
2724 target_ip->cuid = tswap32(host_ip->cuid);
2725 target_ip->cgid = tswap32(host_ip->cgid);
2726 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2727 target_ip->mode = tswap32(host_ip->mode);
2728 #else
2729 target_ip->mode = tswap16(host_ip->mode);
2730 #endif
2731 #if defined(TARGET_PPC)
2732 target_ip->__seq = tswap32(host_ip->__seq);
2733 #else
2734 target_ip->__seq = tswap16(host_ip->__seq);
2735 #endif
2736 unlock_user_struct(target_sd, target_addr, 1);
2737 return 0;
2740 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2741 abi_ulong target_addr)
2743 struct target_semid_ds *target_sd;
2745 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2746 return -TARGET_EFAULT;
2747 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2748 return -TARGET_EFAULT;
2749 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2750 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2751 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2752 unlock_user_struct(target_sd, target_addr, 0);
2753 return 0;
2756 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2757 struct semid_ds *host_sd)
2759 struct target_semid_ds *target_sd;
2761 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2762 return -TARGET_EFAULT;
2763 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2764 return -TARGET_EFAULT;
2765 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2766 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2767 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2768 unlock_user_struct(target_sd, target_addr, 1);
2769 return 0;
2772 struct target_seminfo {
2773 int semmap;
2774 int semmni;
2775 int semmns;
2776 int semmnu;
2777 int semmsl;
2778 int semopm;
2779 int semume;
2780 int semusz;
2781 int semvmx;
2782 int semaem;
2785 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2786 struct seminfo *host_seminfo)
2788 struct target_seminfo *target_seminfo;
2789 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2790 return -TARGET_EFAULT;
2791 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2792 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2793 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2794 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2795 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2796 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2797 __put_user(host_seminfo->semume, &target_seminfo->semume);
2798 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2799 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2800 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2801 unlock_user_struct(target_seminfo, target_addr, 1);
2802 return 0;
2805 union semun {
2806 int val;
2807 struct semid_ds *buf;
2808 unsigned short *array;
2809 struct seminfo *__buf;
2812 union target_semun {
2813 int val;
2814 abi_ulong buf;
2815 abi_ulong array;
2816 abi_ulong __buf;
2819 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2820 abi_ulong target_addr)
2822 int nsems;
2823 unsigned short *array;
2824 union semun semun;
2825 struct semid_ds semid_ds;
2826 int i, ret;
2828 semun.buf = &semid_ds;
2830 ret = semctl(semid, 0, IPC_STAT, semun);
2831 if (ret == -1)
2832 return get_errno(ret);
2834 nsems = semid_ds.sem_nsems;
2836 *host_array = g_try_new(unsigned short, nsems);
2837 if (!*host_array) {
2838 return -TARGET_ENOMEM;
2840 array = lock_user(VERIFY_READ, target_addr,
2841 nsems*sizeof(unsigned short), 1);
2842 if (!array) {
2843 g_free(*host_array);
2844 return -TARGET_EFAULT;
2847 for(i=0; i<nsems; i++) {
2848 __get_user((*host_array)[i], &array[i]);
2850 unlock_user(array, target_addr, 0);
2852 return 0;
2855 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2856 unsigned short **host_array)
2858 int nsems;
2859 unsigned short *array;
2860 union semun semun;
2861 struct semid_ds semid_ds;
2862 int i, ret;
2864 semun.buf = &semid_ds;
2866 ret = semctl(semid, 0, IPC_STAT, semun);
2867 if (ret == -1)
2868 return get_errno(ret);
2870 nsems = semid_ds.sem_nsems;
2872 array = lock_user(VERIFY_WRITE, target_addr,
2873 nsems*sizeof(unsigned short), 0);
2874 if (!array)
2875 return -TARGET_EFAULT;
2877 for(i=0; i<nsems; i++) {
2878 __put_user((*host_array)[i], &array[i]);
2880 g_free(*host_array);
2881 unlock_user(array, target_addr, 1);
2883 return 0;
2886 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2887 abi_ulong target_arg)
2889 union target_semun target_su = { .buf = target_arg };
2890 union semun arg;
2891 struct semid_ds dsarg;
2892 unsigned short *array = NULL;
2893 struct seminfo seminfo;
2894 abi_long ret = -TARGET_EINVAL;
2895 abi_long err;
2896 cmd &= 0xff;
2898 switch( cmd ) {
2899 case GETVAL:
2900 case SETVAL:
2901 /* In 64 bit cross-endian situations, we will erroneously pick up
2902 * the wrong half of the union for the "val" element. To rectify
2903 * this, the entire 8-byte structure is byteswapped, followed by
2904 * a swap of the 4 byte val field. In other cases, the data is
2905 * already in proper host byte order. */
2906 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2907 target_su.buf = tswapal(target_su.buf);
2908 arg.val = tswap32(target_su.val);
2909 } else {
2910 arg.val = target_su.val;
2912 ret = get_errno(semctl(semid, semnum, cmd, arg));
2913 break;
2914 case GETALL:
2915 case SETALL:
2916 err = target_to_host_semarray(semid, &array, target_su.array);
2917 if (err)
2918 return err;
2919 arg.array = array;
2920 ret = get_errno(semctl(semid, semnum, cmd, arg));
2921 err = host_to_target_semarray(semid, target_su.array, &array);
2922 if (err)
2923 return err;
2924 break;
2925 case IPC_STAT:
2926 case IPC_SET:
2927 case SEM_STAT:
2928 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2929 if (err)
2930 return err;
2931 arg.buf = &dsarg;
2932 ret = get_errno(semctl(semid, semnum, cmd, arg));
2933 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2934 if (err)
2935 return err;
2936 break;
2937 case IPC_INFO:
2938 case SEM_INFO:
2939 arg.__buf = &seminfo;
2940 ret = get_errno(semctl(semid, semnum, cmd, arg));
2941 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2942 if (err)
2943 return err;
2944 break;
2945 case IPC_RMID:
2946 case GETPID:
2947 case GETNCNT:
2948 case GETZCNT:
2949 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2950 break;
2953 return ret;
2956 struct target_sembuf {
2957 unsigned short sem_num;
2958 short sem_op;
2959 short sem_flg;
2962 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2963 abi_ulong target_addr,
2964 unsigned nsops)
2966 struct target_sembuf *target_sembuf;
2967 int i;
2969 target_sembuf = lock_user(VERIFY_READ, target_addr,
2970 nsops*sizeof(struct target_sembuf), 1);
2971 if (!target_sembuf)
2972 return -TARGET_EFAULT;
2974 for(i=0; i<nsops; i++) {
2975 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2976 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2977 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2980 unlock_user(target_sembuf, target_addr, 0);
2982 return 0;
2985 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2987 struct sembuf sops[nsops];
2989 if (target_to_host_sembuf(sops, ptr, nsops))
2990 return -TARGET_EFAULT;
2992 return get_errno(semop(semid, sops, nsops));
2995 struct target_msqid_ds
2997 struct target_ipc_perm msg_perm;
2998 abi_ulong msg_stime;
2999 #if TARGET_ABI_BITS == 32
3000 abi_ulong __unused1;
3001 #endif
3002 abi_ulong msg_rtime;
3003 #if TARGET_ABI_BITS == 32
3004 abi_ulong __unused2;
3005 #endif
3006 abi_ulong msg_ctime;
3007 #if TARGET_ABI_BITS == 32
3008 abi_ulong __unused3;
3009 #endif
3010 abi_ulong __msg_cbytes;
3011 abi_ulong msg_qnum;
3012 abi_ulong msg_qbytes;
3013 abi_ulong msg_lspid;
3014 abi_ulong msg_lrpid;
3015 abi_ulong __unused4;
3016 abi_ulong __unused5;
3019 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3020 abi_ulong target_addr)
3022 struct target_msqid_ds *target_md;
3024 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3025 return -TARGET_EFAULT;
3026 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3027 return -TARGET_EFAULT;
3028 host_md->msg_stime = tswapal(target_md->msg_stime);
3029 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3030 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3031 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3032 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3033 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3034 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3035 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3036 unlock_user_struct(target_md, target_addr, 0);
3037 return 0;
3040 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3041 struct msqid_ds *host_md)
3043 struct target_msqid_ds *target_md;
3045 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3046 return -TARGET_EFAULT;
3047 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3048 return -TARGET_EFAULT;
3049 target_md->msg_stime = tswapal(host_md->msg_stime);
3050 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3051 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3052 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3053 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3054 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3055 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3056 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3057 unlock_user_struct(target_md, target_addr, 1);
3058 return 0;
3061 struct target_msginfo {
3062 int msgpool;
3063 int msgmap;
3064 int msgmax;
3065 int msgmnb;
3066 int msgmni;
3067 int msgssz;
3068 int msgtql;
3069 unsigned short int msgseg;
3072 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3073 struct msginfo *host_msginfo)
3075 struct target_msginfo *target_msginfo;
3076 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3077 return -TARGET_EFAULT;
3078 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3079 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3080 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3081 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3082 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3083 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3084 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3085 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3086 unlock_user_struct(target_msginfo, target_addr, 1);
3087 return 0;
3090 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3092 struct msqid_ds dsarg;
3093 struct msginfo msginfo;
3094 abi_long ret = -TARGET_EINVAL;
3096 cmd &= 0xff;
3098 switch (cmd) {
3099 case IPC_STAT:
3100 case IPC_SET:
3101 case MSG_STAT:
3102 if (target_to_host_msqid_ds(&dsarg,ptr))
3103 return -TARGET_EFAULT;
3104 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3105 if (host_to_target_msqid_ds(ptr,&dsarg))
3106 return -TARGET_EFAULT;
3107 break;
3108 case IPC_RMID:
3109 ret = get_errno(msgctl(msgid, cmd, NULL));
3110 break;
3111 case IPC_INFO:
3112 case MSG_INFO:
3113 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3114 if (host_to_target_msginfo(ptr, &msginfo))
3115 return -TARGET_EFAULT;
3116 break;
3119 return ret;
3122 struct target_msgbuf {
3123 abi_long mtype;
3124 char mtext[1];
3127 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3128 ssize_t msgsz, int msgflg)
3130 struct target_msgbuf *target_mb;
3131 struct msgbuf *host_mb;
3132 abi_long ret = 0;
3134 if (msgsz < 0) {
3135 return -TARGET_EINVAL;
3138 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3139 return -TARGET_EFAULT;
3140 host_mb = g_try_malloc(msgsz + sizeof(long));
3141 if (!host_mb) {
3142 unlock_user_struct(target_mb, msgp, 0);
3143 return -TARGET_ENOMEM;
3145 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3146 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3147 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3148 g_free(host_mb);
3149 unlock_user_struct(target_mb, msgp, 0);
3151 return ret;
3154 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3155 ssize_t msgsz, abi_long msgtyp,
3156 int msgflg)
3158 struct target_msgbuf *target_mb;
3159 char *target_mtext;
3160 struct msgbuf *host_mb;
3161 abi_long ret = 0;
3163 if (msgsz < 0) {
3164 return -TARGET_EINVAL;
3167 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3168 return -TARGET_EFAULT;
3170 host_mb = g_malloc(msgsz+sizeof(long));
3171 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3173 if (ret > 0) {
3174 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3175 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3176 if (!target_mtext) {
3177 ret = -TARGET_EFAULT;
3178 goto end;
3180 memcpy(target_mb->mtext, host_mb->mtext, ret);
3181 unlock_user(target_mtext, target_mtext_addr, ret);
3184 target_mb->mtype = tswapal(host_mb->mtype);
3186 end:
3187 if (target_mb)
3188 unlock_user_struct(target_mb, msgp, 1);
3189 g_free(host_mb);
3190 return ret;
3193 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3194 abi_ulong target_addr)
3196 struct target_shmid_ds *target_sd;
3198 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3199 return -TARGET_EFAULT;
3200 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3201 return -TARGET_EFAULT;
3202 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3203 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3204 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3205 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3206 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3207 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3208 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3209 unlock_user_struct(target_sd, target_addr, 0);
3210 return 0;
3213 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3214 struct shmid_ds *host_sd)
3216 struct target_shmid_ds *target_sd;
3218 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3219 return -TARGET_EFAULT;
3220 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3221 return -TARGET_EFAULT;
3222 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3223 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3224 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3225 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3226 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3227 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3228 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3229 unlock_user_struct(target_sd, target_addr, 1);
3230 return 0;
3233 struct target_shminfo {
3234 abi_ulong shmmax;
3235 abi_ulong shmmin;
3236 abi_ulong shmmni;
3237 abi_ulong shmseg;
3238 abi_ulong shmall;
3241 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3242 struct shminfo *host_shminfo)
3244 struct target_shminfo *target_shminfo;
3245 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3246 return -TARGET_EFAULT;
3247 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3248 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3249 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3250 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3251 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3252 unlock_user_struct(target_shminfo, target_addr, 1);
3253 return 0;
3256 struct target_shm_info {
3257 int used_ids;
3258 abi_ulong shm_tot;
3259 abi_ulong shm_rss;
3260 abi_ulong shm_swp;
3261 abi_ulong swap_attempts;
3262 abi_ulong swap_successes;
3265 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3266 struct shm_info *host_shm_info)
3268 struct target_shm_info *target_shm_info;
3269 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3270 return -TARGET_EFAULT;
3271 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3272 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3273 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3274 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3275 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3276 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3277 unlock_user_struct(target_shm_info, target_addr, 1);
3278 return 0;
3281 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3283 struct shmid_ds dsarg;
3284 struct shminfo shminfo;
3285 struct shm_info shm_info;
3286 abi_long ret = -TARGET_EINVAL;
3288 cmd &= 0xff;
3290 switch(cmd) {
3291 case IPC_STAT:
3292 case IPC_SET:
3293 case SHM_STAT:
3294 if (target_to_host_shmid_ds(&dsarg, buf))
3295 return -TARGET_EFAULT;
3296 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3297 if (host_to_target_shmid_ds(buf, &dsarg))
3298 return -TARGET_EFAULT;
3299 break;
3300 case IPC_INFO:
3301 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3302 if (host_to_target_shminfo(buf, &shminfo))
3303 return -TARGET_EFAULT;
3304 break;
3305 case SHM_INFO:
3306 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3307 if (host_to_target_shm_info(buf, &shm_info))
3308 return -TARGET_EFAULT;
3309 break;
3310 case IPC_RMID:
3311 case SHM_LOCK:
3312 case SHM_UNLOCK:
3313 ret = get_errno(shmctl(shmid, cmd, NULL));
3314 break;
3317 return ret;
3320 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3322 abi_long raddr;
3323 void *host_raddr;
3324 struct shmid_ds shm_info;
3325 int i,ret;
3327 /* find out the length of the shared memory segment */
3328 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3329 if (is_error(ret)) {
3330 /* can't get length, bail out */
3331 return ret;
3334 mmap_lock();
3336 if (shmaddr)
3337 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3338 else {
3339 abi_ulong mmap_start;
3341 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3343 if (mmap_start == -1) {
3344 errno = ENOMEM;
3345 host_raddr = (void *)-1;
3346 } else
3347 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3350 if (host_raddr == (void *)-1) {
3351 mmap_unlock();
3352 return get_errno((long)host_raddr);
3354 raddr=h2g((unsigned long)host_raddr);
3356 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3357 PAGE_VALID | PAGE_READ |
3358 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3360 for (i = 0; i < N_SHM_REGIONS; i++) {
3361 if (!shm_regions[i].in_use) {
3362 shm_regions[i].in_use = true;
3363 shm_regions[i].start = raddr;
3364 shm_regions[i].size = shm_info.shm_segsz;
3365 break;
3369 mmap_unlock();
3370 return raddr;
3374 static inline abi_long do_shmdt(abi_ulong shmaddr)
3376 int i;
3378 for (i = 0; i < N_SHM_REGIONS; ++i) {
3379 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3380 shm_regions[i].in_use = false;
3381 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3382 break;
3386 return get_errno(shmdt(g2h(shmaddr)));
3389 #ifdef TARGET_NR_ipc
3390 /* ??? This only works with linear mappings. */
3391 /* do_ipc() must return target values and target errnos. */
3392 static abi_long do_ipc(unsigned int call, abi_long first,
3393 abi_long second, abi_long third,
3394 abi_long ptr, abi_long fifth)
3396 int version;
3397 abi_long ret = 0;
3399 version = call >> 16;
3400 call &= 0xffff;
3402 switch (call) {
3403 case IPCOP_semop:
3404 ret = do_semop(first, ptr, second);
3405 break;
3407 case IPCOP_semget:
3408 ret = get_errno(semget(first, second, third));
3409 break;
3411 case IPCOP_semctl: {
3412 /* The semun argument to semctl is passed by value, so dereference the
3413 * ptr argument. */
3414 abi_ulong atptr;
3415 get_user_ual(atptr, ptr);
3416 ret = do_semctl(first, second, third, atptr);
3417 break;
3420 case IPCOP_msgget:
3421 ret = get_errno(msgget(first, second));
3422 break;
3424 case IPCOP_msgsnd:
3425 ret = do_msgsnd(first, ptr, second, third);
3426 break;
3428 case IPCOP_msgctl:
3429 ret = do_msgctl(first, second, ptr);
3430 break;
3432 case IPCOP_msgrcv:
3433 switch (version) {
3434 case 0:
3436 struct target_ipc_kludge {
3437 abi_long msgp;
3438 abi_long msgtyp;
3439 } *tmp;
3441 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3442 ret = -TARGET_EFAULT;
3443 break;
3446 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3448 unlock_user_struct(tmp, ptr, 0);
3449 break;
3451 default:
3452 ret = do_msgrcv(first, ptr, second, fifth, third);
3454 break;
3456 case IPCOP_shmat:
3457 switch (version) {
3458 default:
3460 abi_ulong raddr;
3461 raddr = do_shmat(first, ptr, second);
3462 if (is_error(raddr))
3463 return get_errno(raddr);
3464 if (put_user_ual(raddr, third))
3465 return -TARGET_EFAULT;
3466 break;
3468 case 1:
3469 ret = -TARGET_EINVAL;
3470 break;
3472 break;
3473 case IPCOP_shmdt:
3474 ret = do_shmdt(ptr);
3475 break;
3477 case IPCOP_shmget:
3478 /* IPC_* flag values are the same on all linux platforms */
3479 ret = get_errno(shmget(first, second, third));
3480 break;
3482 /* IPC_* and SHM_* command values are the same on all linux platforms */
3483 case IPCOP_shmctl:
3484 ret = do_shmctl(first, second, ptr);
3485 break;
3486 default:
3487 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3488 ret = -TARGET_ENOSYS;
3489 break;
3491 return ret;
3493 #endif
3495 /* kernel structure types definitions */
3497 #define STRUCT(name, ...) STRUCT_ ## name,
3498 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3499 enum {
3500 #include "syscall_types.h"
3501 STRUCT_MAX
3503 #undef STRUCT
3504 #undef STRUCT_SPECIAL
3506 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3507 #define STRUCT_SPECIAL(name)
3508 #include "syscall_types.h"
3509 #undef STRUCT
3510 #undef STRUCT_SPECIAL
3512 typedef struct IOCTLEntry IOCTLEntry;
3514 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3515 int fd, int cmd, abi_long arg);
3517 struct IOCTLEntry {
3518 int target_cmd;
3519 unsigned int host_cmd;
3520 const char *name;
3521 int access;
3522 do_ioctl_fn *do_ioctl;
3523 const argtype arg_type[5];
3526 #define IOC_R 0x0001
3527 #define IOC_W 0x0002
3528 #define IOC_RW (IOC_R | IOC_W)
3530 #define MAX_STRUCT_SIZE 4096
3532 #ifdef CONFIG_FIEMAP
3533 /* So fiemap access checks don't overflow on 32 bit systems.
3534 * This is very slightly smaller than the limit imposed by
3535 * the underlying kernel.
3537 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3538 / sizeof(struct fiemap_extent))
3540 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3541 int fd, int cmd, abi_long arg)
3543 /* The parameter for this ioctl is a struct fiemap followed
3544 * by an array of struct fiemap_extent whose size is set
3545 * in fiemap->fm_extent_count. The array is filled in by the
3546 * ioctl.
3548 int target_size_in, target_size_out;
3549 struct fiemap *fm;
3550 const argtype *arg_type = ie->arg_type;
3551 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3552 void *argptr, *p;
3553 abi_long ret;
3554 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3555 uint32_t outbufsz;
3556 int free_fm = 0;
3558 assert(arg_type[0] == TYPE_PTR);
3559 assert(ie->access == IOC_RW);
3560 arg_type++;
3561 target_size_in = thunk_type_size(arg_type, 0);
3562 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3563 if (!argptr) {
3564 return -TARGET_EFAULT;
3566 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3567 unlock_user(argptr, arg, 0);
3568 fm = (struct fiemap *)buf_temp;
3569 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3570 return -TARGET_EINVAL;
3573 outbufsz = sizeof (*fm) +
3574 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3576 if (outbufsz > MAX_STRUCT_SIZE) {
3577 /* We can't fit all the extents into the fixed size buffer.
3578 * Allocate one that is large enough and use it instead.
3580 fm = g_try_malloc(outbufsz);
3581 if (!fm) {
3582 return -TARGET_ENOMEM;
3584 memcpy(fm, buf_temp, sizeof(struct fiemap));
3585 free_fm = 1;
3587 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3588 if (!is_error(ret)) {
3589 target_size_out = target_size_in;
3590 /* An extent_count of 0 means we were only counting the extents
3591 * so there are no structs to copy
3593 if (fm->fm_extent_count != 0) {
3594 target_size_out += fm->fm_mapped_extents * extent_size;
3596 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3597 if (!argptr) {
3598 ret = -TARGET_EFAULT;
3599 } else {
3600 /* Convert the struct fiemap */
3601 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3602 if (fm->fm_extent_count != 0) {
3603 p = argptr + target_size_in;
3604 /* ...and then all the struct fiemap_extents */
3605 for (i = 0; i < fm->fm_mapped_extents; i++) {
3606 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3607 THUNK_TARGET);
3608 p += extent_size;
3611 unlock_user(argptr, arg, target_size_out);
3614 if (free_fm) {
3615 g_free(fm);
3617 return ret;
3619 #endif
3621 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3622 int fd, int cmd, abi_long arg)
3624 const argtype *arg_type = ie->arg_type;
3625 int target_size;
3626 void *argptr;
3627 int ret;
3628 struct ifconf *host_ifconf;
3629 uint32_t outbufsz;
3630 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3631 int target_ifreq_size;
3632 int nb_ifreq;
3633 int free_buf = 0;
3634 int i;
3635 int target_ifc_len;
3636 abi_long target_ifc_buf;
3637 int host_ifc_len;
3638 char *host_ifc_buf;
3640 assert(arg_type[0] == TYPE_PTR);
3641 assert(ie->access == IOC_RW);
3643 arg_type++;
3644 target_size = thunk_type_size(arg_type, 0);
3646 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3647 if (!argptr)
3648 return -TARGET_EFAULT;
3649 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3650 unlock_user(argptr, arg, 0);
3652 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3653 target_ifc_len = host_ifconf->ifc_len;
3654 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3656 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3657 nb_ifreq = target_ifc_len / target_ifreq_size;
3658 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3660 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3661 if (outbufsz > MAX_STRUCT_SIZE) {
3662 /* We can't fit all the extents into the fixed size buffer.
3663 * Allocate one that is large enough and use it instead.
3665 host_ifconf = malloc(outbufsz);
3666 if (!host_ifconf) {
3667 return -TARGET_ENOMEM;
3669 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3670 free_buf = 1;
3672 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3674 host_ifconf->ifc_len = host_ifc_len;
3675 host_ifconf->ifc_buf = host_ifc_buf;
3677 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3678 if (!is_error(ret)) {
3679 /* convert host ifc_len to target ifc_len */
3681 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3682 target_ifc_len = nb_ifreq * target_ifreq_size;
3683 host_ifconf->ifc_len = target_ifc_len;
3685 /* restore target ifc_buf */
3687 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3689 /* copy struct ifconf to target user */
3691 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3692 if (!argptr)
3693 return -TARGET_EFAULT;
3694 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3695 unlock_user(argptr, arg, target_size);
3697 /* copy ifreq[] to target user */
3699 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3700 for (i = 0; i < nb_ifreq ; i++) {
3701 thunk_convert(argptr + i * target_ifreq_size,
3702 host_ifc_buf + i * sizeof(struct ifreq),
3703 ifreq_arg_type, THUNK_TARGET);
3705 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3708 if (free_buf) {
3709 free(host_ifconf);
3712 return ret;
3715 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3716 int cmd, abi_long arg)
3718 void *argptr;
3719 struct dm_ioctl *host_dm;
3720 abi_long guest_data;
3721 uint32_t guest_data_size;
3722 int target_size;
3723 const argtype *arg_type = ie->arg_type;
3724 abi_long ret;
3725 void *big_buf = NULL;
3726 char *host_data;
3728 arg_type++;
3729 target_size = thunk_type_size(arg_type, 0);
3730 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3731 if (!argptr) {
3732 ret = -TARGET_EFAULT;
3733 goto out;
3735 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3736 unlock_user(argptr, arg, 0);
3738 /* buf_temp is too small, so fetch things into a bigger buffer */
3739 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3740 memcpy(big_buf, buf_temp, target_size);
3741 buf_temp = big_buf;
3742 host_dm = big_buf;
3744 guest_data = arg + host_dm->data_start;
3745 if ((guest_data - arg) < 0) {
3746 ret = -EINVAL;
3747 goto out;
3749 guest_data_size = host_dm->data_size - host_dm->data_start;
3750 host_data = (char*)host_dm + host_dm->data_start;
3752 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3753 switch (ie->host_cmd) {
3754 case DM_REMOVE_ALL:
3755 case DM_LIST_DEVICES:
3756 case DM_DEV_CREATE:
3757 case DM_DEV_REMOVE:
3758 case DM_DEV_SUSPEND:
3759 case DM_DEV_STATUS:
3760 case DM_DEV_WAIT:
3761 case DM_TABLE_STATUS:
3762 case DM_TABLE_CLEAR:
3763 case DM_TABLE_DEPS:
3764 case DM_LIST_VERSIONS:
3765 /* no input data */
3766 break;
3767 case DM_DEV_RENAME:
3768 case DM_DEV_SET_GEOMETRY:
3769 /* data contains only strings */
3770 memcpy(host_data, argptr, guest_data_size);
3771 break;
3772 case DM_TARGET_MSG:
3773 memcpy(host_data, argptr, guest_data_size);
3774 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3775 break;
3776 case DM_TABLE_LOAD:
3778 void *gspec = argptr;
3779 void *cur_data = host_data;
3780 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3781 int spec_size = thunk_type_size(arg_type, 0);
3782 int i;
3784 for (i = 0; i < host_dm->target_count; i++) {
3785 struct dm_target_spec *spec = cur_data;
3786 uint32_t next;
3787 int slen;
3789 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3790 slen = strlen((char*)gspec + spec_size) + 1;
3791 next = spec->next;
3792 spec->next = sizeof(*spec) + slen;
3793 strcpy((char*)&spec[1], gspec + spec_size);
3794 gspec += next;
3795 cur_data += spec->next;
3797 break;
3799 default:
3800 ret = -TARGET_EINVAL;
3801 unlock_user(argptr, guest_data, 0);
3802 goto out;
3804 unlock_user(argptr, guest_data, 0);
3806 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3807 if (!is_error(ret)) {
3808 guest_data = arg + host_dm->data_start;
3809 guest_data_size = host_dm->data_size - host_dm->data_start;
3810 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3811 switch (ie->host_cmd) {
3812 case DM_REMOVE_ALL:
3813 case DM_DEV_CREATE:
3814 case DM_DEV_REMOVE:
3815 case DM_DEV_RENAME:
3816 case DM_DEV_SUSPEND:
3817 case DM_DEV_STATUS:
3818 case DM_TABLE_LOAD:
3819 case DM_TABLE_CLEAR:
3820 case DM_TARGET_MSG:
3821 case DM_DEV_SET_GEOMETRY:
3822 /* no return data */
3823 break;
3824 case DM_LIST_DEVICES:
3826 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3827 uint32_t remaining_data = guest_data_size;
3828 void *cur_data = argptr;
3829 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3830 int nl_size = 12; /* can't use thunk_size due to alignment */
3832 while (1) {
3833 uint32_t next = nl->next;
3834 if (next) {
3835 nl->next = nl_size + (strlen(nl->name) + 1);
3837 if (remaining_data < nl->next) {
3838 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3839 break;
3841 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3842 strcpy(cur_data + nl_size, nl->name);
3843 cur_data += nl->next;
3844 remaining_data -= nl->next;
3845 if (!next) {
3846 break;
3848 nl = (void*)nl + next;
3850 break;
3852 case DM_DEV_WAIT:
3853 case DM_TABLE_STATUS:
3855 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3856 void *cur_data = argptr;
3857 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3858 int spec_size = thunk_type_size(arg_type, 0);
3859 int i;
3861 for (i = 0; i < host_dm->target_count; i++) {
3862 uint32_t next = spec->next;
3863 int slen = strlen((char*)&spec[1]) + 1;
3864 spec->next = (cur_data - argptr) + spec_size + slen;
3865 if (guest_data_size < spec->next) {
3866 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3867 break;
3869 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3870 strcpy(cur_data + spec_size, (char*)&spec[1]);
3871 cur_data = argptr + spec->next;
3872 spec = (void*)host_dm + host_dm->data_start + next;
3874 break;
3876 case DM_TABLE_DEPS:
3878 void *hdata = (void*)host_dm + host_dm->data_start;
3879 int count = *(uint32_t*)hdata;
3880 uint64_t *hdev = hdata + 8;
3881 uint64_t *gdev = argptr + 8;
3882 int i;
3884 *(uint32_t*)argptr = tswap32(count);
3885 for (i = 0; i < count; i++) {
3886 *gdev = tswap64(*hdev);
3887 gdev++;
3888 hdev++;
3890 break;
3892 case DM_LIST_VERSIONS:
3894 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3895 uint32_t remaining_data = guest_data_size;
3896 void *cur_data = argptr;
3897 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3898 int vers_size = thunk_type_size(arg_type, 0);
3900 while (1) {
3901 uint32_t next = vers->next;
3902 if (next) {
3903 vers->next = vers_size + (strlen(vers->name) + 1);
3905 if (remaining_data < vers->next) {
3906 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3907 break;
3909 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3910 strcpy(cur_data + vers_size, vers->name);
3911 cur_data += vers->next;
3912 remaining_data -= vers->next;
3913 if (!next) {
3914 break;
3916 vers = (void*)vers + next;
3918 break;
3920 default:
3921 unlock_user(argptr, guest_data, 0);
3922 ret = -TARGET_EINVAL;
3923 goto out;
3925 unlock_user(argptr, guest_data, guest_data_size);
3927 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3928 if (!argptr) {
3929 ret = -TARGET_EFAULT;
3930 goto out;
3932 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3933 unlock_user(argptr, arg, target_size);
3935 out:
3936 g_free(big_buf);
3937 return ret;
3940 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3941 int cmd, abi_long arg)
3943 void *argptr;
3944 int target_size;
3945 const argtype *arg_type = ie->arg_type;
3946 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3947 abi_long ret;
3949 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3950 struct blkpg_partition host_part;
3952 /* Read and convert blkpg */
3953 arg_type++;
3954 target_size = thunk_type_size(arg_type, 0);
3955 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3956 if (!argptr) {
3957 ret = -TARGET_EFAULT;
3958 goto out;
3960 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3961 unlock_user(argptr, arg, 0);
3963 switch (host_blkpg->op) {
3964 case BLKPG_ADD_PARTITION:
3965 case BLKPG_DEL_PARTITION:
3966 /* payload is struct blkpg_partition */
3967 break;
3968 default:
3969 /* Unknown opcode */
3970 ret = -TARGET_EINVAL;
3971 goto out;
3974 /* Read and convert blkpg->data */
3975 arg = (abi_long)(uintptr_t)host_blkpg->data;
3976 target_size = thunk_type_size(part_arg_type, 0);
3977 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3978 if (!argptr) {
3979 ret = -TARGET_EFAULT;
3980 goto out;
3982 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3983 unlock_user(argptr, arg, 0);
3985 /* Swizzle the data pointer to our local copy and call! */
3986 host_blkpg->data = &host_part;
3987 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3989 out:
3990 return ret;
3993 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3994 int fd, int cmd, abi_long arg)
3996 const argtype *arg_type = ie->arg_type;
3997 const StructEntry *se;
3998 const argtype *field_types;
3999 const int *dst_offsets, *src_offsets;
4000 int target_size;
4001 void *argptr;
4002 abi_ulong *target_rt_dev_ptr;
4003 unsigned long *host_rt_dev_ptr;
4004 abi_long ret;
4005 int i;
4007 assert(ie->access == IOC_W);
4008 assert(*arg_type == TYPE_PTR);
4009 arg_type++;
4010 assert(*arg_type == TYPE_STRUCT);
4011 target_size = thunk_type_size(arg_type, 0);
4012 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4013 if (!argptr) {
4014 return -TARGET_EFAULT;
4016 arg_type++;
4017 assert(*arg_type == (int)STRUCT_rtentry);
4018 se = struct_entries + *arg_type++;
4019 assert(se->convert[0] == NULL);
4020 /* convert struct here to be able to catch rt_dev string */
4021 field_types = se->field_types;
4022 dst_offsets = se->field_offsets[THUNK_HOST];
4023 src_offsets = se->field_offsets[THUNK_TARGET];
4024 for (i = 0; i < se->nb_fields; i++) {
4025 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4026 assert(*field_types == TYPE_PTRVOID);
4027 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4028 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4029 if (*target_rt_dev_ptr != 0) {
4030 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4031 tswapal(*target_rt_dev_ptr));
4032 if (!*host_rt_dev_ptr) {
4033 unlock_user(argptr, arg, 0);
4034 return -TARGET_EFAULT;
4036 } else {
4037 *host_rt_dev_ptr = 0;
4039 field_types++;
4040 continue;
4042 field_types = thunk_convert(buf_temp + dst_offsets[i],
4043 argptr + src_offsets[i],
4044 field_types, THUNK_HOST);
4046 unlock_user(argptr, arg, 0);
4048 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4049 if (*host_rt_dev_ptr != 0) {
4050 unlock_user((void *)*host_rt_dev_ptr,
4051 *target_rt_dev_ptr, 0);
4053 return ret;
4056 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4057 int fd, int cmd, abi_long arg)
4059 int sig = target_to_host_signal(arg);
4060 return get_errno(ioctl(fd, ie->host_cmd, sig));
4063 static IOCTLEntry ioctl_entries[] = {
4064 #define IOCTL(cmd, access, ...) \
4065 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4066 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4067 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4068 #include "ioctls.h"
4069 { 0, 0, },
4072 /* ??? Implement proper locking for ioctls. */
4073 /* do_ioctl() Must return target values and target errnos. */
4074 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4076 const IOCTLEntry *ie;
4077 const argtype *arg_type;
4078 abi_long ret;
4079 uint8_t buf_temp[MAX_STRUCT_SIZE];
4080 int target_size;
4081 void *argptr;
4083 ie = ioctl_entries;
4084 for(;;) {
4085 if (ie->target_cmd == 0) {
4086 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4087 return -TARGET_ENOSYS;
4089 if (ie->target_cmd == cmd)
4090 break;
4091 ie++;
4093 arg_type = ie->arg_type;
4094 #if defined(DEBUG)
4095 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4096 #endif
4097 if (ie->do_ioctl) {
4098 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4101 switch(arg_type[0]) {
4102 case TYPE_NULL:
4103 /* no argument */
4104 ret = get_errno(ioctl(fd, ie->host_cmd));
4105 break;
4106 case TYPE_PTRVOID:
4107 case TYPE_INT:
4108 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4109 break;
4110 case TYPE_PTR:
4111 arg_type++;
4112 target_size = thunk_type_size(arg_type, 0);
4113 switch(ie->access) {
4114 case IOC_R:
4115 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4116 if (!is_error(ret)) {
4117 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4118 if (!argptr)
4119 return -TARGET_EFAULT;
4120 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4121 unlock_user(argptr, arg, target_size);
4123 break;
4124 case IOC_W:
4125 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4126 if (!argptr)
4127 return -TARGET_EFAULT;
4128 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4129 unlock_user(argptr, arg, 0);
4130 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4131 break;
4132 default:
4133 case IOC_RW:
4134 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4135 if (!argptr)
4136 return -TARGET_EFAULT;
4137 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4138 unlock_user(argptr, arg, 0);
4139 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4140 if (!is_error(ret)) {
4141 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4142 if (!argptr)
4143 return -TARGET_EFAULT;
4144 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4145 unlock_user(argptr, arg, target_size);
4147 break;
4149 break;
4150 default:
4151 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4152 (long)cmd, arg_type[0]);
4153 ret = -TARGET_ENOSYS;
4154 break;
4156 return ret;
4159 static const bitmask_transtbl iflag_tbl[] = {
4160 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4161 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4162 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4163 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4164 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4165 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4166 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4167 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4168 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4169 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4170 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4171 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4172 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4173 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4174 { 0, 0, 0, 0 }
4177 static const bitmask_transtbl oflag_tbl[] = {
4178 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4179 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4180 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4181 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4182 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4183 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4184 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4185 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4186 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4187 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4188 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4189 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4190 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4191 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4192 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4193 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4194 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4195 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4196 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4197 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4198 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4199 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4200 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4201 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4202 { 0, 0, 0, 0 }
4205 static const bitmask_transtbl cflag_tbl[] = {
4206 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4207 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4208 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4209 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4210 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4211 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4212 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4213 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4214 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4215 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4216 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4217 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4218 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4219 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4220 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4221 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4222 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4223 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4224 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4225 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4226 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4227 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4228 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4229 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4230 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4231 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4232 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4233 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4234 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4235 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4236 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4237 { 0, 0, 0, 0 }
4240 static const bitmask_transtbl lflag_tbl[] = {
4241 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4242 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4243 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4244 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4245 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4246 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4247 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4248 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4249 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4250 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4251 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4252 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4253 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4254 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4255 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4256 { 0, 0, 0, 0 }
4259 static void target_to_host_termios (void *dst, const void *src)
4261 struct host_termios *host = dst;
4262 const struct target_termios *target = src;
4264 host->c_iflag =
4265 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4266 host->c_oflag =
4267 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4268 host->c_cflag =
4269 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4270 host->c_lflag =
4271 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4272 host->c_line = target->c_line;
4274 memset(host->c_cc, 0, sizeof(host->c_cc));
4275 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4276 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4277 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4278 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4279 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4280 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4281 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4282 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4283 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4284 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4285 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4286 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4287 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4288 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4289 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4290 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4291 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4294 static void host_to_target_termios (void *dst, const void *src)
4296 struct target_termios *target = dst;
4297 const struct host_termios *host = src;
4299 target->c_iflag =
4300 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4301 target->c_oflag =
4302 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4303 target->c_cflag =
4304 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4305 target->c_lflag =
4306 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4307 target->c_line = host->c_line;
4309 memset(target->c_cc, 0, sizeof(target->c_cc));
4310 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4311 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4312 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4313 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4314 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4315 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4316 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4317 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4318 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4319 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4320 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4321 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4322 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4323 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4324 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4325 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4326 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4329 static const StructEntry struct_termios_def = {
4330 .convert = { host_to_target_termios, target_to_host_termios },
4331 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4332 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4335 static bitmask_transtbl mmap_flags_tbl[] = {
4336 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4337 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4338 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4339 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4340 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4341 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4342 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4343 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4344 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4345 MAP_NORESERVE },
4346 { 0, 0, 0, 0 }
4349 #if defined(TARGET_I386)
4351 /* NOTE: there is really one LDT for all the threads */
4352 static uint8_t *ldt_table;
4354 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4356 int size;
4357 void *p;
4359 if (!ldt_table)
4360 return 0;
4361 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4362 if (size > bytecount)
4363 size = bytecount;
4364 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4365 if (!p)
4366 return -TARGET_EFAULT;
4367 /* ??? Should this by byteswapped? */
4368 memcpy(p, ldt_table, size);
4369 unlock_user(p, ptr, size);
4370 return size;
4373 /* XXX: add locking support */
4374 static abi_long write_ldt(CPUX86State *env,
4375 abi_ulong ptr, unsigned long bytecount, int oldmode)
4377 struct target_modify_ldt_ldt_s ldt_info;
4378 struct target_modify_ldt_ldt_s *target_ldt_info;
4379 int seg_32bit, contents, read_exec_only, limit_in_pages;
4380 int seg_not_present, useable, lm;
4381 uint32_t *lp, entry_1, entry_2;
4383 if (bytecount != sizeof(ldt_info))
4384 return -TARGET_EINVAL;
4385 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4386 return -TARGET_EFAULT;
4387 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4388 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4389 ldt_info.limit = tswap32(target_ldt_info->limit);
4390 ldt_info.flags = tswap32(target_ldt_info->flags);
4391 unlock_user_struct(target_ldt_info, ptr, 0);
4393 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4394 return -TARGET_EINVAL;
4395 seg_32bit = ldt_info.flags & 1;
4396 contents = (ldt_info.flags >> 1) & 3;
4397 read_exec_only = (ldt_info.flags >> 3) & 1;
4398 limit_in_pages = (ldt_info.flags >> 4) & 1;
4399 seg_not_present = (ldt_info.flags >> 5) & 1;
4400 useable = (ldt_info.flags >> 6) & 1;
4401 #ifdef TARGET_ABI32
4402 lm = 0;
4403 #else
4404 lm = (ldt_info.flags >> 7) & 1;
4405 #endif
4406 if (contents == 3) {
4407 if (oldmode)
4408 return -TARGET_EINVAL;
4409 if (seg_not_present == 0)
4410 return -TARGET_EINVAL;
4412 /* allocate the LDT */
4413 if (!ldt_table) {
4414 env->ldt.base = target_mmap(0,
4415 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4416 PROT_READ|PROT_WRITE,
4417 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4418 if (env->ldt.base == -1)
4419 return -TARGET_ENOMEM;
4420 memset(g2h(env->ldt.base), 0,
4421 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4422 env->ldt.limit = 0xffff;
4423 ldt_table = g2h(env->ldt.base);
4426 /* NOTE: same code as Linux kernel */
4427 /* Allow LDTs to be cleared by the user. */
4428 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4429 if (oldmode ||
4430 (contents == 0 &&
4431 read_exec_only == 1 &&
4432 seg_32bit == 0 &&
4433 limit_in_pages == 0 &&
4434 seg_not_present == 1 &&
4435 useable == 0 )) {
4436 entry_1 = 0;
4437 entry_2 = 0;
4438 goto install;
4442 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4443 (ldt_info.limit & 0x0ffff);
4444 entry_2 = (ldt_info.base_addr & 0xff000000) |
4445 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4446 (ldt_info.limit & 0xf0000) |
4447 ((read_exec_only ^ 1) << 9) |
4448 (contents << 10) |
4449 ((seg_not_present ^ 1) << 15) |
4450 (seg_32bit << 22) |
4451 (limit_in_pages << 23) |
4452 (lm << 21) |
4453 0x7000;
4454 if (!oldmode)
4455 entry_2 |= (useable << 20);
4457 /* Install the new entry ... */
4458 install:
4459 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4460 lp[0] = tswap32(entry_1);
4461 lp[1] = tswap32(entry_2);
4462 return 0;
4465 /* specific and weird i386 syscalls */
4466 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4467 unsigned long bytecount)
4469 abi_long ret;
4471 switch (func) {
4472 case 0:
4473 ret = read_ldt(ptr, bytecount);
4474 break;
4475 case 1:
4476 ret = write_ldt(env, ptr, bytecount, 1);
4477 break;
4478 case 0x11:
4479 ret = write_ldt(env, ptr, bytecount, 0);
4480 break;
4481 default:
4482 ret = -TARGET_ENOSYS;
4483 break;
4485 return ret;
4488 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4489 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4491 uint64_t *gdt_table = g2h(env->gdt.base);
4492 struct target_modify_ldt_ldt_s ldt_info;
4493 struct target_modify_ldt_ldt_s *target_ldt_info;
4494 int seg_32bit, contents, read_exec_only, limit_in_pages;
4495 int seg_not_present, useable, lm;
4496 uint32_t *lp, entry_1, entry_2;
4497 int i;
4499 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4500 if (!target_ldt_info)
4501 return -TARGET_EFAULT;
4502 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4503 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4504 ldt_info.limit = tswap32(target_ldt_info->limit);
4505 ldt_info.flags = tswap32(target_ldt_info->flags);
4506 if (ldt_info.entry_number == -1) {
4507 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4508 if (gdt_table[i] == 0) {
4509 ldt_info.entry_number = i;
4510 target_ldt_info->entry_number = tswap32(i);
4511 break;
4515 unlock_user_struct(target_ldt_info, ptr, 1);
4517 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4518 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4519 return -TARGET_EINVAL;
4520 seg_32bit = ldt_info.flags & 1;
4521 contents = (ldt_info.flags >> 1) & 3;
4522 read_exec_only = (ldt_info.flags >> 3) & 1;
4523 limit_in_pages = (ldt_info.flags >> 4) & 1;
4524 seg_not_present = (ldt_info.flags >> 5) & 1;
4525 useable = (ldt_info.flags >> 6) & 1;
4526 #ifdef TARGET_ABI32
4527 lm = 0;
4528 #else
4529 lm = (ldt_info.flags >> 7) & 1;
4530 #endif
4532 if (contents == 3) {
4533 if (seg_not_present == 0)
4534 return -TARGET_EINVAL;
4537 /* NOTE: same code as Linux kernel */
4538 /* Allow LDTs to be cleared by the user. */
4539 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4540 if ((contents == 0 &&
4541 read_exec_only == 1 &&
4542 seg_32bit == 0 &&
4543 limit_in_pages == 0 &&
4544 seg_not_present == 1 &&
4545 useable == 0 )) {
4546 entry_1 = 0;
4547 entry_2 = 0;
4548 goto install;
4552 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4553 (ldt_info.limit & 0x0ffff);
4554 entry_2 = (ldt_info.base_addr & 0xff000000) |
4555 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4556 (ldt_info.limit & 0xf0000) |
4557 ((read_exec_only ^ 1) << 9) |
4558 (contents << 10) |
4559 ((seg_not_present ^ 1) << 15) |
4560 (seg_32bit << 22) |
4561 (limit_in_pages << 23) |
4562 (useable << 20) |
4563 (lm << 21) |
4564 0x7000;
4566 /* Install the new entry ... */
4567 install:
4568 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4569 lp[0] = tswap32(entry_1);
4570 lp[1] = tswap32(entry_2);
4571 return 0;
4574 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4576 struct target_modify_ldt_ldt_s *target_ldt_info;
4577 uint64_t *gdt_table = g2h(env->gdt.base);
4578 uint32_t base_addr, limit, flags;
4579 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4580 int seg_not_present, useable, lm;
4581 uint32_t *lp, entry_1, entry_2;
4583 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4584 if (!target_ldt_info)
4585 return -TARGET_EFAULT;
4586 idx = tswap32(target_ldt_info->entry_number);
4587 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4588 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4589 unlock_user_struct(target_ldt_info, ptr, 1);
4590 return -TARGET_EINVAL;
4592 lp = (uint32_t *)(gdt_table + idx);
4593 entry_1 = tswap32(lp[0]);
4594 entry_2 = tswap32(lp[1]);
4596 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4597 contents = (entry_2 >> 10) & 3;
4598 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4599 seg_32bit = (entry_2 >> 22) & 1;
4600 limit_in_pages = (entry_2 >> 23) & 1;
4601 useable = (entry_2 >> 20) & 1;
4602 #ifdef TARGET_ABI32
4603 lm = 0;
4604 #else
4605 lm = (entry_2 >> 21) & 1;
4606 #endif
4607 flags = (seg_32bit << 0) | (contents << 1) |
4608 (read_exec_only << 3) | (limit_in_pages << 4) |
4609 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4610 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4611 base_addr = (entry_1 >> 16) |
4612 (entry_2 & 0xff000000) |
4613 ((entry_2 & 0xff) << 16);
4614 target_ldt_info->base_addr = tswapal(base_addr);
4615 target_ldt_info->limit = tswap32(limit);
4616 target_ldt_info->flags = tswap32(flags);
4617 unlock_user_struct(target_ldt_info, ptr, 1);
4618 return 0;
4620 #endif /* TARGET_I386 && TARGET_ABI32 */
4622 #ifndef TARGET_ABI32
4623 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4625 abi_long ret = 0;
4626 abi_ulong val;
4627 int idx;
4629 switch(code) {
4630 case TARGET_ARCH_SET_GS:
4631 case TARGET_ARCH_SET_FS:
4632 if (code == TARGET_ARCH_SET_GS)
4633 idx = R_GS;
4634 else
4635 idx = R_FS;
4636 cpu_x86_load_seg(env, idx, 0);
4637 env->segs[idx].base = addr;
4638 break;
4639 case TARGET_ARCH_GET_GS:
4640 case TARGET_ARCH_GET_FS:
4641 if (code == TARGET_ARCH_GET_GS)
4642 idx = R_GS;
4643 else
4644 idx = R_FS;
4645 val = env->segs[idx].base;
4646 if (put_user(val, addr, abi_ulong))
4647 ret = -TARGET_EFAULT;
4648 break;
4649 default:
4650 ret = -TARGET_EINVAL;
4651 break;
4653 return ret;
4655 #endif
4657 #endif /* defined(TARGET_I386) */
4659 #define NEW_STACK_SIZE 0x40000
4662 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4663 typedef struct {
4664 CPUArchState *env;
4665 pthread_mutex_t mutex;
4666 pthread_cond_t cond;
4667 pthread_t thread;
4668 uint32_t tid;
4669 abi_ulong child_tidptr;
4670 abi_ulong parent_tidptr;
4671 sigset_t sigmask;
4672 } new_thread_info;
4674 static void *clone_func(void *arg)
4676 new_thread_info *info = arg;
4677 CPUArchState *env;
4678 CPUState *cpu;
4679 TaskState *ts;
4681 rcu_register_thread();
4682 env = info->env;
4683 cpu = ENV_GET_CPU(env);
4684 thread_cpu = cpu;
4685 ts = (TaskState *)cpu->opaque;
4686 info->tid = gettid();
4687 cpu->host_tid = info->tid;
4688 task_settid(ts);
4689 if (info->child_tidptr)
4690 put_user_u32(info->tid, info->child_tidptr);
4691 if (info->parent_tidptr)
4692 put_user_u32(info->tid, info->parent_tidptr);
4693 /* Enable signals. */
4694 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4695 /* Signal to the parent that we're ready. */
4696 pthread_mutex_lock(&info->mutex);
4697 pthread_cond_broadcast(&info->cond);
4698 pthread_mutex_unlock(&info->mutex);
4699 /* Wait until the parent has finshed initializing the tls state. */
4700 pthread_mutex_lock(&clone_lock);
4701 pthread_mutex_unlock(&clone_lock);
4702 cpu_loop(env);
4703 /* never exits */
4704 return NULL;
4707 /* do_fork() Must return host values and target errnos (unlike most
4708 do_*() functions). */
4709 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4710 abi_ulong parent_tidptr, target_ulong newtls,
4711 abi_ulong child_tidptr)
4713 CPUState *cpu = ENV_GET_CPU(env);
4714 int ret;
4715 TaskState *ts;
4716 CPUState *new_cpu;
4717 CPUArchState *new_env;
4718 unsigned int nptl_flags;
4719 sigset_t sigmask;
4721 /* Emulate vfork() with fork() */
4722 if (flags & CLONE_VFORK)
4723 flags &= ~(CLONE_VFORK | CLONE_VM);
4725 if (flags & CLONE_VM) {
4726 TaskState *parent_ts = (TaskState *)cpu->opaque;
4727 new_thread_info info;
4728 pthread_attr_t attr;
4730 ts = g_new0(TaskState, 1);
4731 init_task_state(ts);
4732 /* we create a new CPU instance. */
4733 new_env = cpu_copy(env);
4734 /* Init regs that differ from the parent. */
4735 cpu_clone_regs(new_env, newsp);
4736 new_cpu = ENV_GET_CPU(new_env);
4737 new_cpu->opaque = ts;
4738 ts->bprm = parent_ts->bprm;
4739 ts->info = parent_ts->info;
4740 nptl_flags = flags;
4741 flags &= ~CLONE_NPTL_FLAGS2;
4743 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4744 ts->child_tidptr = child_tidptr;
4747 if (nptl_flags & CLONE_SETTLS)
4748 cpu_set_tls (new_env, newtls);
4750 /* Grab a mutex so that thread setup appears atomic. */
4751 pthread_mutex_lock(&clone_lock);
4753 memset(&info, 0, sizeof(info));
4754 pthread_mutex_init(&info.mutex, NULL);
4755 pthread_mutex_lock(&info.mutex);
4756 pthread_cond_init(&info.cond, NULL);
4757 info.env = new_env;
4758 if (nptl_flags & CLONE_CHILD_SETTID)
4759 info.child_tidptr = child_tidptr;
4760 if (nptl_flags & CLONE_PARENT_SETTID)
4761 info.parent_tidptr = parent_tidptr;
4763 ret = pthread_attr_init(&attr);
4764 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4765 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4766 /* It is not safe to deliver signals until the child has finished
4767 initializing, so temporarily block all signals. */
4768 sigfillset(&sigmask);
4769 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4771 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4772 /* TODO: Free new CPU state if thread creation failed. */
4774 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4775 pthread_attr_destroy(&attr);
4776 if (ret == 0) {
4777 /* Wait for the child to initialize. */
4778 pthread_cond_wait(&info.cond, &info.mutex);
4779 ret = info.tid;
4780 if (flags & CLONE_PARENT_SETTID)
4781 put_user_u32(ret, parent_tidptr);
4782 } else {
4783 ret = -1;
4785 pthread_mutex_unlock(&info.mutex);
4786 pthread_cond_destroy(&info.cond);
4787 pthread_mutex_destroy(&info.mutex);
4788 pthread_mutex_unlock(&clone_lock);
4789 } else {
4790 /* if no CLONE_VM, we consider it is a fork */
4791 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4792 return -TARGET_EINVAL;
4794 fork_start();
4795 ret = fork();
4796 if (ret == 0) {
4797 /* Child Process. */
4798 rcu_after_fork();
4799 cpu_clone_regs(env, newsp);
4800 fork_end(1);
4801 /* There is a race condition here. The parent process could
4802 theoretically read the TID in the child process before the child
4803 tid is set. This would require using either ptrace
4804 (not implemented) or having *_tidptr to point at a shared memory
4805 mapping. We can't repeat the spinlock hack used above because
4806 the child process gets its own copy of the lock. */
4807 if (flags & CLONE_CHILD_SETTID)
4808 put_user_u32(gettid(), child_tidptr);
4809 if (flags & CLONE_PARENT_SETTID)
4810 put_user_u32(gettid(), parent_tidptr);
4811 ts = (TaskState *)cpu->opaque;
4812 if (flags & CLONE_SETTLS)
4813 cpu_set_tls (env, newtls);
4814 if (flags & CLONE_CHILD_CLEARTID)
4815 ts->child_tidptr = child_tidptr;
4816 } else {
4817 fork_end(0);
4820 return ret;
4823 /* warning : doesn't handle linux specific flags... */
4824 static int target_to_host_fcntl_cmd(int cmd)
4826 switch(cmd) {
4827 case TARGET_F_DUPFD:
4828 case TARGET_F_GETFD:
4829 case TARGET_F_SETFD:
4830 case TARGET_F_GETFL:
4831 case TARGET_F_SETFL:
4832 return cmd;
4833 case TARGET_F_GETLK:
4834 return F_GETLK;
4835 case TARGET_F_SETLK:
4836 return F_SETLK;
4837 case TARGET_F_SETLKW:
4838 return F_SETLKW;
4839 case TARGET_F_GETOWN:
4840 return F_GETOWN;
4841 case TARGET_F_SETOWN:
4842 return F_SETOWN;
4843 case TARGET_F_GETSIG:
4844 return F_GETSIG;
4845 case TARGET_F_SETSIG:
4846 return F_SETSIG;
4847 #if TARGET_ABI_BITS == 32
4848 case TARGET_F_GETLK64:
4849 return F_GETLK64;
4850 case TARGET_F_SETLK64:
4851 return F_SETLK64;
4852 case TARGET_F_SETLKW64:
4853 return F_SETLKW64;
4854 #endif
4855 case TARGET_F_SETLEASE:
4856 return F_SETLEASE;
4857 case TARGET_F_GETLEASE:
4858 return F_GETLEASE;
4859 #ifdef F_DUPFD_CLOEXEC
4860 case TARGET_F_DUPFD_CLOEXEC:
4861 return F_DUPFD_CLOEXEC;
4862 #endif
4863 case TARGET_F_NOTIFY:
4864 return F_NOTIFY;
4865 #ifdef F_GETOWN_EX
4866 case TARGET_F_GETOWN_EX:
4867 return F_GETOWN_EX;
4868 #endif
4869 #ifdef F_SETOWN_EX
4870 case TARGET_F_SETOWN_EX:
4871 return F_SETOWN_EX;
4872 #endif
4873 default:
4874 return -TARGET_EINVAL;
4876 return -TARGET_EINVAL;
4879 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4880 static const bitmask_transtbl flock_tbl[] = {
4881 TRANSTBL_CONVERT(F_RDLCK),
4882 TRANSTBL_CONVERT(F_WRLCK),
4883 TRANSTBL_CONVERT(F_UNLCK),
4884 TRANSTBL_CONVERT(F_EXLCK),
4885 TRANSTBL_CONVERT(F_SHLCK),
4886 { 0, 0, 0, 0 }
4889 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4891 struct flock fl;
4892 struct target_flock *target_fl;
4893 struct flock64 fl64;
4894 struct target_flock64 *target_fl64;
4895 #ifdef F_GETOWN_EX
4896 struct f_owner_ex fox;
4897 struct target_f_owner_ex *target_fox;
4898 #endif
4899 abi_long ret;
4900 int host_cmd = target_to_host_fcntl_cmd(cmd);
4902 if (host_cmd == -TARGET_EINVAL)
4903 return host_cmd;
4905 switch(cmd) {
4906 case TARGET_F_GETLK:
4907 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4908 return -TARGET_EFAULT;
4909 fl.l_type =
4910 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4911 fl.l_whence = tswap16(target_fl->l_whence);
4912 fl.l_start = tswapal(target_fl->l_start);
4913 fl.l_len = tswapal(target_fl->l_len);
4914 fl.l_pid = tswap32(target_fl->l_pid);
4915 unlock_user_struct(target_fl, arg, 0);
4916 ret = get_errno(fcntl(fd, host_cmd, &fl));
4917 if (ret == 0) {
4918 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4919 return -TARGET_EFAULT;
4920 target_fl->l_type =
4921 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4922 target_fl->l_whence = tswap16(fl.l_whence);
4923 target_fl->l_start = tswapal(fl.l_start);
4924 target_fl->l_len = tswapal(fl.l_len);
4925 target_fl->l_pid = tswap32(fl.l_pid);
4926 unlock_user_struct(target_fl, arg, 1);
4928 break;
4930 case TARGET_F_SETLK:
4931 case TARGET_F_SETLKW:
4932 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4933 return -TARGET_EFAULT;
4934 fl.l_type =
4935 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4936 fl.l_whence = tswap16(target_fl->l_whence);
4937 fl.l_start = tswapal(target_fl->l_start);
4938 fl.l_len = tswapal(target_fl->l_len);
4939 fl.l_pid = tswap32(target_fl->l_pid);
4940 unlock_user_struct(target_fl, arg, 0);
4941 ret = get_errno(fcntl(fd, host_cmd, &fl));
4942 break;
4944 case TARGET_F_GETLK64:
4945 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4946 return -TARGET_EFAULT;
4947 fl64.l_type =
4948 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4949 fl64.l_whence = tswap16(target_fl64->l_whence);
4950 fl64.l_start = tswap64(target_fl64->l_start);
4951 fl64.l_len = tswap64(target_fl64->l_len);
4952 fl64.l_pid = tswap32(target_fl64->l_pid);
4953 unlock_user_struct(target_fl64, arg, 0);
4954 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4955 if (ret == 0) {
4956 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4957 return -TARGET_EFAULT;
4958 target_fl64->l_type =
4959 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4960 target_fl64->l_whence = tswap16(fl64.l_whence);
4961 target_fl64->l_start = tswap64(fl64.l_start);
4962 target_fl64->l_len = tswap64(fl64.l_len);
4963 target_fl64->l_pid = tswap32(fl64.l_pid);
4964 unlock_user_struct(target_fl64, arg, 1);
4966 break;
4967 case TARGET_F_SETLK64:
4968 case TARGET_F_SETLKW64:
4969 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4970 return -TARGET_EFAULT;
4971 fl64.l_type =
4972 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4973 fl64.l_whence = tswap16(target_fl64->l_whence);
4974 fl64.l_start = tswap64(target_fl64->l_start);
4975 fl64.l_len = tswap64(target_fl64->l_len);
4976 fl64.l_pid = tswap32(target_fl64->l_pid);
4977 unlock_user_struct(target_fl64, arg, 0);
4978 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4979 break;
4981 case TARGET_F_GETFL:
4982 ret = get_errno(fcntl(fd, host_cmd, arg));
4983 if (ret >= 0) {
4984 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4986 break;
4988 case TARGET_F_SETFL:
4989 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4990 break;
4992 #ifdef F_GETOWN_EX
4993 case TARGET_F_GETOWN_EX:
4994 ret = get_errno(fcntl(fd, host_cmd, &fox));
4995 if (ret >= 0) {
4996 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4997 return -TARGET_EFAULT;
4998 target_fox->type = tswap32(fox.type);
4999 target_fox->pid = tswap32(fox.pid);
5000 unlock_user_struct(target_fox, arg, 1);
5002 break;
5003 #endif
5005 #ifdef F_SETOWN_EX
5006 case TARGET_F_SETOWN_EX:
5007 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5008 return -TARGET_EFAULT;
5009 fox.type = tswap32(target_fox->type);
5010 fox.pid = tswap32(target_fox->pid);
5011 unlock_user_struct(target_fox, arg, 0);
5012 ret = get_errno(fcntl(fd, host_cmd, &fox));
5013 break;
5014 #endif
5016 case TARGET_F_SETOWN:
5017 case TARGET_F_GETOWN:
5018 case TARGET_F_SETSIG:
5019 case TARGET_F_GETSIG:
5020 case TARGET_F_SETLEASE:
5021 case TARGET_F_GETLEASE:
5022 ret = get_errno(fcntl(fd, host_cmd, arg));
5023 break;
5025 default:
5026 ret = get_errno(fcntl(fd, cmd, arg));
5027 break;
5029 return ret;
5032 #ifdef USE_UID16
5034 static inline int high2lowuid(int uid)
5036 if (uid > 65535)
5037 return 65534;
5038 else
5039 return uid;
5042 static inline int high2lowgid(int gid)
5044 if (gid > 65535)
5045 return 65534;
5046 else
5047 return gid;
5050 static inline int low2highuid(int uid)
5052 if ((int16_t)uid == -1)
5053 return -1;
5054 else
5055 return uid;
5058 static inline int low2highgid(int gid)
5060 if ((int16_t)gid == -1)
5061 return -1;
5062 else
5063 return gid;
5065 static inline int tswapid(int id)
5067 return tswap16(id);
5070 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5072 #else /* !USE_UID16 */
5073 static inline int high2lowuid(int uid)
5075 return uid;
5077 static inline int high2lowgid(int gid)
5079 return gid;
5081 static inline int low2highuid(int uid)
5083 return uid;
5085 static inline int low2highgid(int gid)
5087 return gid;
5089 static inline int tswapid(int id)
5091 return tswap32(id);
5094 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5096 #endif /* USE_UID16 */
5098 void syscall_init(void)
5100 IOCTLEntry *ie;
5101 const argtype *arg_type;
5102 int size;
5103 int i;
5105 thunk_init(STRUCT_MAX);
5107 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5108 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5109 #include "syscall_types.h"
5110 #undef STRUCT
5111 #undef STRUCT_SPECIAL
5113 /* Build target_to_host_errno_table[] table from
5114 * host_to_target_errno_table[]. */
5115 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5116 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5119 /* we patch the ioctl size if necessary. We rely on the fact that
5120 no ioctl has all the bits at '1' in the size field */
5121 ie = ioctl_entries;
5122 while (ie->target_cmd != 0) {
5123 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5124 TARGET_IOC_SIZEMASK) {
5125 arg_type = ie->arg_type;
5126 if (arg_type[0] != TYPE_PTR) {
5127 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5128 ie->target_cmd);
5129 exit(1);
5131 arg_type++;
5132 size = thunk_type_size(arg_type, 0);
5133 ie->target_cmd = (ie->target_cmd &
5134 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5135 (size << TARGET_IOC_SIZESHIFT);
5138 /* automatic consistency check if same arch */
5139 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5140 (defined(__x86_64__) && defined(TARGET_X86_64))
5141 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5142 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5143 ie->name, ie->target_cmd, ie->host_cmd);
5145 #endif
5146 ie++;
5150 #if TARGET_ABI_BITS == 32
5151 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5153 #ifdef TARGET_WORDS_BIGENDIAN
5154 return ((uint64_t)word0 << 32) | word1;
5155 #else
5156 return ((uint64_t)word1 << 32) | word0;
5157 #endif
5159 #else /* TARGET_ABI_BITS == 32 */
5160 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5162 return word0;
5164 #endif /* TARGET_ABI_BITS != 32 */
5166 #ifdef TARGET_NR_truncate64
5167 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5168 abi_long arg2,
5169 abi_long arg3,
5170 abi_long arg4)
5172 if (regpairs_aligned(cpu_env)) {
5173 arg2 = arg3;
5174 arg3 = arg4;
5176 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5178 #endif
5180 #ifdef TARGET_NR_ftruncate64
5181 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5182 abi_long arg2,
5183 abi_long arg3,
5184 abi_long arg4)
5186 if (regpairs_aligned(cpu_env)) {
5187 arg2 = arg3;
5188 arg3 = arg4;
5190 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5192 #endif
5194 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5195 abi_ulong target_addr)
5197 struct target_timespec *target_ts;
5199 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5200 return -TARGET_EFAULT;
5201 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
5202 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5203 unlock_user_struct(target_ts, target_addr, 0);
5204 return 0;
5207 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5208 struct timespec *host_ts)
5210 struct target_timespec *target_ts;
5212 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5213 return -TARGET_EFAULT;
5214 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
5215 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
5216 unlock_user_struct(target_ts, target_addr, 1);
5217 return 0;
5220 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5221 abi_ulong target_addr)
5223 struct target_itimerspec *target_itspec;
5225 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5226 return -TARGET_EFAULT;
5229 host_itspec->it_interval.tv_sec =
5230 tswapal(target_itspec->it_interval.tv_sec);
5231 host_itspec->it_interval.tv_nsec =
5232 tswapal(target_itspec->it_interval.tv_nsec);
5233 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5234 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5236 unlock_user_struct(target_itspec, target_addr, 1);
5237 return 0;
5240 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5241 struct itimerspec *host_its)
5243 struct target_itimerspec *target_itspec;
5245 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5246 return -TARGET_EFAULT;
5249 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5250 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5252 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5253 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5255 unlock_user_struct(target_itspec, target_addr, 0);
5256 return 0;
5259 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5260 abi_ulong target_addr)
5262 struct target_sigevent *target_sevp;
5264 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5265 return -TARGET_EFAULT;
5268 /* This union is awkward on 64 bit systems because it has a 32 bit
5269 * integer and a pointer in it; we follow the conversion approach
5270 * used for handling sigval types in signal.c so the guest should get
5271 * the correct value back even if we did a 64 bit byteswap and it's
5272 * using the 32 bit integer.
5274 host_sevp->sigev_value.sival_ptr =
5275 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5276 host_sevp->sigev_signo =
5277 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5278 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5279 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5281 unlock_user_struct(target_sevp, target_addr, 1);
5282 return 0;
5285 #if defined(TARGET_NR_mlockall)
5286 static inline int target_to_host_mlockall_arg(int arg)
5288 int result = 0;
5290 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5291 result |= MCL_CURRENT;
5293 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5294 result |= MCL_FUTURE;
5296 return result;
5298 #endif
5300 static inline abi_long host_to_target_stat64(void *cpu_env,
5301 abi_ulong target_addr,
5302 struct stat *host_st)
5304 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5305 if (((CPUARMState *)cpu_env)->eabi) {
5306 struct target_eabi_stat64 *target_st;
5308 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5309 return -TARGET_EFAULT;
5310 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5311 __put_user(host_st->st_dev, &target_st->st_dev);
5312 __put_user(host_st->st_ino, &target_st->st_ino);
5313 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5314 __put_user(host_st->st_ino, &target_st->__st_ino);
5315 #endif
5316 __put_user(host_st->st_mode, &target_st->st_mode);
5317 __put_user(host_st->st_nlink, &target_st->st_nlink);
5318 __put_user(host_st->st_uid, &target_st->st_uid);
5319 __put_user(host_st->st_gid, &target_st->st_gid);
5320 __put_user(host_st->st_rdev, &target_st->st_rdev);
5321 __put_user(host_st->st_size, &target_st->st_size);
5322 __put_user(host_st->st_blksize, &target_st->st_blksize);
5323 __put_user(host_st->st_blocks, &target_st->st_blocks);
5324 __put_user(host_st->st_atime, &target_st->target_st_atime);
5325 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5326 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5327 unlock_user_struct(target_st, target_addr, 1);
5328 } else
5329 #endif
5331 #if defined(TARGET_HAS_STRUCT_STAT64)
5332 struct target_stat64 *target_st;
5333 #else
5334 struct target_stat *target_st;
5335 #endif
5337 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5338 return -TARGET_EFAULT;
5339 memset(target_st, 0, sizeof(*target_st));
5340 __put_user(host_st->st_dev, &target_st->st_dev);
5341 __put_user(host_st->st_ino, &target_st->st_ino);
5342 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5343 __put_user(host_st->st_ino, &target_st->__st_ino);
5344 #endif
5345 __put_user(host_st->st_mode, &target_st->st_mode);
5346 __put_user(host_st->st_nlink, &target_st->st_nlink);
5347 __put_user(host_st->st_uid, &target_st->st_uid);
5348 __put_user(host_st->st_gid, &target_st->st_gid);
5349 __put_user(host_st->st_rdev, &target_st->st_rdev);
5350 /* XXX: better use of kernel struct */
5351 __put_user(host_st->st_size, &target_st->st_size);
5352 __put_user(host_st->st_blksize, &target_st->st_blksize);
5353 __put_user(host_st->st_blocks, &target_st->st_blocks);
5354 __put_user(host_st->st_atime, &target_st->target_st_atime);
5355 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5356 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5357 unlock_user_struct(target_st, target_addr, 1);
5360 return 0;
5363 /* ??? Using host futex calls even when target atomic operations
5364 are not really atomic probably breaks things. However implementing
5365 futexes locally would make futexes shared between multiple processes
5366 tricky. However they're probably useless because guest atomic
5367 operations won't work either. */
5368 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5369 target_ulong uaddr2, int val3)
5371 struct timespec ts, *pts;
5372 int base_op;
5374 /* ??? We assume FUTEX_* constants are the same on both host
5375 and target. */
5376 #ifdef FUTEX_CMD_MASK
5377 base_op = op & FUTEX_CMD_MASK;
5378 #else
5379 base_op = op;
5380 #endif
5381 switch (base_op) {
5382 case FUTEX_WAIT:
5383 case FUTEX_WAIT_BITSET:
5384 if (timeout) {
5385 pts = &ts;
5386 target_to_host_timespec(pts, timeout);
5387 } else {
5388 pts = NULL;
5390 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
5391 pts, NULL, val3));
5392 case FUTEX_WAKE:
5393 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5394 case FUTEX_FD:
5395 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5396 case FUTEX_REQUEUE:
5397 case FUTEX_CMP_REQUEUE:
5398 case FUTEX_WAKE_OP:
5399 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5400 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5401 But the prototype takes a `struct timespec *'; insert casts
5402 to satisfy the compiler. We do not need to tswap TIMEOUT
5403 since it's not compared to guest memory. */
5404 pts = (struct timespec *)(uintptr_t) timeout;
5405 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
5406 g2h(uaddr2),
5407 (base_op == FUTEX_CMP_REQUEUE
5408 ? tswap32(val3)
5409 : val3)));
5410 default:
5411 return -TARGET_ENOSYS;
5414 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5415 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5416 abi_long handle, abi_long mount_id,
5417 abi_long flags)
5419 struct file_handle *target_fh;
5420 struct file_handle *fh;
5421 int mid = 0;
5422 abi_long ret;
5423 char *name;
5424 unsigned int size, total_size;
5426 if (get_user_s32(size, handle)) {
5427 return -TARGET_EFAULT;
5430 name = lock_user_string(pathname);
5431 if (!name) {
5432 return -TARGET_EFAULT;
5435 total_size = sizeof(struct file_handle) + size;
5436 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5437 if (!target_fh) {
5438 unlock_user(name, pathname, 0);
5439 return -TARGET_EFAULT;
5442 fh = g_malloc0(total_size);
5443 fh->handle_bytes = size;
5445 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5446 unlock_user(name, pathname, 0);
5448 /* man name_to_handle_at(2):
5449 * Other than the use of the handle_bytes field, the caller should treat
5450 * the file_handle structure as an opaque data type
5453 memcpy(target_fh, fh, total_size);
5454 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5455 target_fh->handle_type = tswap32(fh->handle_type);
5456 g_free(fh);
5457 unlock_user(target_fh, handle, total_size);
5459 if (put_user_s32(mid, mount_id)) {
5460 return -TARGET_EFAULT;
5463 return ret;
5466 #endif
5468 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5469 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5470 abi_long flags)
5472 struct file_handle *target_fh;
5473 struct file_handle *fh;
5474 unsigned int size, total_size;
5475 abi_long ret;
5477 if (get_user_s32(size, handle)) {
5478 return -TARGET_EFAULT;
5481 total_size = sizeof(struct file_handle) + size;
5482 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5483 if (!target_fh) {
5484 return -TARGET_EFAULT;
5487 fh = g_memdup(target_fh, total_size);
5488 fh->handle_bytes = size;
5489 fh->handle_type = tswap32(target_fh->handle_type);
5491 ret = get_errno(open_by_handle_at(mount_fd, fh,
5492 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5494 g_free(fh);
5496 unlock_user(target_fh, handle, total_size);
5498 return ret;
5500 #endif
5502 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5504 /* signalfd siginfo conversion */
5506 static void
5507 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
5508 const struct signalfd_siginfo *info)
5510 int sig = host_to_target_signal(info->ssi_signo);
5512 /* linux/signalfd.h defines a ssi_addr_lsb
5513 * not defined in sys/signalfd.h but used by some kernels
5516 #ifdef BUS_MCEERR_AO
5517 if (tinfo->ssi_signo == SIGBUS &&
5518 (tinfo->ssi_code == BUS_MCEERR_AR ||
5519 tinfo->ssi_code == BUS_MCEERR_AO)) {
5520 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
5521 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
5522 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
5524 #endif
5526 tinfo->ssi_signo = tswap32(sig);
5527 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
5528 tinfo->ssi_code = tswap32(info->ssi_code);
5529 tinfo->ssi_pid = tswap32(info->ssi_pid);
5530 tinfo->ssi_uid = tswap32(info->ssi_uid);
5531 tinfo->ssi_fd = tswap32(info->ssi_fd);
5532 tinfo->ssi_tid = tswap32(info->ssi_tid);
5533 tinfo->ssi_band = tswap32(info->ssi_band);
5534 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
5535 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
5536 tinfo->ssi_status = tswap32(info->ssi_status);
5537 tinfo->ssi_int = tswap32(info->ssi_int);
5538 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
5539 tinfo->ssi_utime = tswap64(info->ssi_utime);
5540 tinfo->ssi_stime = tswap64(info->ssi_stime);
5541 tinfo->ssi_addr = tswap64(info->ssi_addr);
5544 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
5546 int i;
5548 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
5549 host_to_target_signalfd_siginfo(buf + i, buf + i);
5552 return len;
5555 static TargetFdTrans target_signalfd_trans = {
5556 .host_to_target_data = host_to_target_data_signalfd,
5559 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
5561 int host_flags;
5562 target_sigset_t *target_mask;
5563 sigset_t host_mask;
5564 abi_long ret;
5566 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
5567 return -TARGET_EINVAL;
5569 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
5570 return -TARGET_EFAULT;
5573 target_to_host_sigset(&host_mask, target_mask);
5575 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
5577 ret = get_errno(signalfd(fd, &host_mask, host_flags));
5578 if (ret >= 0) {
5579 fd_trans_register(ret, &target_signalfd_trans);
5582 unlock_user_struct(target_mask, mask, 0);
5584 return ret;
5586 #endif
5588 /* Map host to target signal numbers for the wait family of syscalls.
5589 Assume all other status bits are the same. */
5590 int host_to_target_waitstatus(int status)
5592 if (WIFSIGNALED(status)) {
5593 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5595 if (WIFSTOPPED(status)) {
5596 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5597 | (status & 0xff);
5599 return status;
5602 static int open_self_cmdline(void *cpu_env, int fd)
5604 int fd_orig = -1;
5605 bool word_skipped = false;
5607 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5608 if (fd_orig < 0) {
5609 return fd_orig;
5612 while (true) {
5613 ssize_t nb_read;
5614 char buf[128];
5615 char *cp_buf = buf;
5617 nb_read = read(fd_orig, buf, sizeof(buf));
5618 if (nb_read < 0) {
5619 int e = errno;
5620 fd_orig = close(fd_orig);
5621 errno = e;
5622 return -1;
5623 } else if (nb_read == 0) {
5624 break;
5627 if (!word_skipped) {
5628 /* Skip the first string, which is the path to qemu-*-static
5629 instead of the actual command. */
5630 cp_buf = memchr(buf, 0, sizeof(buf));
5631 if (cp_buf) {
5632 /* Null byte found, skip one string */
5633 cp_buf++;
5634 nb_read -= cp_buf - buf;
5635 word_skipped = true;
5639 if (word_skipped) {
5640 if (write(fd, cp_buf, nb_read) != nb_read) {
5641 int e = errno;
5642 close(fd_orig);
5643 errno = e;
5644 return -1;
5649 return close(fd_orig);
5652 static int open_self_maps(void *cpu_env, int fd)
5654 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5655 TaskState *ts = cpu->opaque;
5656 FILE *fp;
5657 char *line = NULL;
5658 size_t len = 0;
5659 ssize_t read;
5661 fp = fopen("/proc/self/maps", "r");
5662 if (fp == NULL) {
5663 return -1;
5666 while ((read = getline(&line, &len, fp)) != -1) {
5667 int fields, dev_maj, dev_min, inode;
5668 uint64_t min, max, offset;
5669 char flag_r, flag_w, flag_x, flag_p;
5670 char path[512] = "";
5671 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5672 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5673 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5675 if ((fields < 10) || (fields > 11)) {
5676 continue;
5678 if (h2g_valid(min)) {
5679 int flags = page_get_flags(h2g(min));
5680 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5681 if (page_check_range(h2g(min), max - min, flags) == -1) {
5682 continue;
5684 if (h2g(min) == ts->info->stack_limit) {
5685 pstrcpy(path, sizeof(path), " [stack]");
5687 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5688 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5689 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5690 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5691 path[0] ? " " : "", path);
5695 free(line);
5696 fclose(fp);
5698 return 0;
5701 static int open_self_stat(void *cpu_env, int fd)
5703 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5704 TaskState *ts = cpu->opaque;
5705 abi_ulong start_stack = ts->info->start_stack;
5706 int i;
5708 for (i = 0; i < 44; i++) {
5709 char buf[128];
5710 int len;
5711 uint64_t val = 0;
5713 if (i == 0) {
5714 /* pid */
5715 val = getpid();
5716 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5717 } else if (i == 1) {
5718 /* app name */
5719 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5720 } else if (i == 27) {
5721 /* stack bottom */
5722 val = start_stack;
5723 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5724 } else {
5725 /* for the rest, there is MasterCard */
5726 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5729 len = strlen(buf);
5730 if (write(fd, buf, len) != len) {
5731 return -1;
5735 return 0;
5738 static int open_self_auxv(void *cpu_env, int fd)
5740 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5741 TaskState *ts = cpu->opaque;
5742 abi_ulong auxv = ts->info->saved_auxv;
5743 abi_ulong len = ts->info->auxv_len;
5744 char *ptr;
5747 * Auxiliary vector is stored in target process stack.
5748 * read in whole auxv vector and copy it to file
5750 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5751 if (ptr != NULL) {
5752 while (len > 0) {
5753 ssize_t r;
5754 r = write(fd, ptr, len);
5755 if (r <= 0) {
5756 break;
5758 len -= r;
5759 ptr += r;
5761 lseek(fd, 0, SEEK_SET);
5762 unlock_user(ptr, auxv, len);
5765 return 0;
5768 static int is_proc_myself(const char *filename, const char *entry)
5770 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5771 filename += strlen("/proc/");
5772 if (!strncmp(filename, "self/", strlen("self/"))) {
5773 filename += strlen("self/");
5774 } else if (*filename >= '1' && *filename <= '9') {
5775 char myself[80];
5776 snprintf(myself, sizeof(myself), "%d/", getpid());
5777 if (!strncmp(filename, myself, strlen(myself))) {
5778 filename += strlen(myself);
5779 } else {
5780 return 0;
5782 } else {
5783 return 0;
5785 if (!strcmp(filename, entry)) {
5786 return 1;
5789 return 0;
5792 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5793 static int is_proc(const char *filename, const char *entry)
5795 return strcmp(filename, entry) == 0;
5798 static int open_net_route(void *cpu_env, int fd)
5800 FILE *fp;
5801 char *line = NULL;
5802 size_t len = 0;
5803 ssize_t read;
5805 fp = fopen("/proc/net/route", "r");
5806 if (fp == NULL) {
5807 return -1;
5810 /* read header */
5812 read = getline(&line, &len, fp);
5813 dprintf(fd, "%s", line);
5815 /* read routes */
5817 while ((read = getline(&line, &len, fp)) != -1) {
5818 char iface[16];
5819 uint32_t dest, gw, mask;
5820 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5821 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5822 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5823 &mask, &mtu, &window, &irtt);
5824 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5825 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5826 metric, tswap32(mask), mtu, window, irtt);
5829 free(line);
5830 fclose(fp);
5832 return 0;
5834 #endif
5836 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5838 struct fake_open {
5839 const char *filename;
5840 int (*fill)(void *cpu_env, int fd);
5841 int (*cmp)(const char *s1, const char *s2);
5843 const struct fake_open *fake_open;
5844 static const struct fake_open fakes[] = {
5845 { "maps", open_self_maps, is_proc_myself },
5846 { "stat", open_self_stat, is_proc_myself },
5847 { "auxv", open_self_auxv, is_proc_myself },
5848 { "cmdline", open_self_cmdline, is_proc_myself },
5849 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5850 { "/proc/net/route", open_net_route, is_proc },
5851 #endif
5852 { NULL, NULL, NULL }
5855 if (is_proc_myself(pathname, "exe")) {
5856 int execfd = qemu_getauxval(AT_EXECFD);
5857 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
5860 for (fake_open = fakes; fake_open->filename; fake_open++) {
5861 if (fake_open->cmp(pathname, fake_open->filename)) {
5862 break;
5866 if (fake_open->filename) {
5867 const char *tmpdir;
5868 char filename[PATH_MAX];
5869 int fd, r;
5871 /* create temporary file to map stat to */
5872 tmpdir = getenv("TMPDIR");
5873 if (!tmpdir)
5874 tmpdir = "/tmp";
5875 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5876 fd = mkstemp(filename);
5877 if (fd < 0) {
5878 return fd;
5880 unlink(filename);
5882 if ((r = fake_open->fill(cpu_env, fd))) {
5883 int e = errno;
5884 close(fd);
5885 errno = e;
5886 return r;
5888 lseek(fd, 0, SEEK_SET);
5890 return fd;
5893 return safe_openat(dirfd, path(pathname), flags, mode);
5896 #define TIMER_MAGIC 0x0caf0000
5897 #define TIMER_MAGIC_MASK 0xffff0000
5899 /* Convert QEMU provided timer ID back to internal 16bit index format */
5900 static target_timer_t get_timer_id(abi_long arg)
5902 target_timer_t timerid = arg;
5904 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5905 return -TARGET_EINVAL;
5908 timerid &= 0xffff;
5910 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5911 return -TARGET_EINVAL;
5914 return timerid;
5917 /* do_syscall() should always have a single exit point at the end so
5918 that actions, such as logging of syscall results, can be performed.
5919 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5920 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5921 abi_long arg2, abi_long arg3, abi_long arg4,
5922 abi_long arg5, abi_long arg6, abi_long arg7,
5923 abi_long arg8)
5925 CPUState *cpu = ENV_GET_CPU(cpu_env);
5926 abi_long ret;
5927 struct stat st;
5928 struct statfs stfs;
5929 void *p;
5931 #if defined(DEBUG_ERESTARTSYS)
5932 /* Debug-only code for exercising the syscall-restart code paths
5933 * in the per-architecture cpu main loops: restart every syscall
5934 * the guest makes once before letting it through.
5937 static int flag;
5939 flag = !flag;
5940 if (flag) {
5941 return -TARGET_ERESTARTSYS;
5944 #endif
5946 #ifdef DEBUG
5947 gemu_log("syscall %d", num);
5948 #endif
5949 if(do_strace)
5950 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5952 switch(num) {
5953 case TARGET_NR_exit:
5954 /* In old applications this may be used to implement _exit(2).
5955 However in threaded applictions it is used for thread termination,
5956 and _exit_group is used for application termination.
5957 Do thread termination if we have more then one thread. */
5958 /* FIXME: This probably breaks if a signal arrives. We should probably
5959 be disabling signals. */
5960 if (CPU_NEXT(first_cpu)) {
5961 TaskState *ts;
5963 cpu_list_lock();
5964 /* Remove the CPU from the list. */
5965 QTAILQ_REMOVE(&cpus, cpu, node);
5966 cpu_list_unlock();
5967 ts = cpu->opaque;
5968 if (ts->child_tidptr) {
5969 put_user_u32(0, ts->child_tidptr);
5970 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5971 NULL, NULL, 0);
5973 thread_cpu = NULL;
5974 object_unref(OBJECT(cpu));
5975 g_free(ts);
5976 rcu_unregister_thread();
5977 pthread_exit(NULL);
5979 #ifdef TARGET_GPROF
5980 _mcleanup();
5981 #endif
5982 gdb_exit(cpu_env, arg1);
5983 _exit(arg1);
5984 ret = 0; /* avoid warning */
5985 break;
5986 case TARGET_NR_read:
5987 if (arg3 == 0)
5988 ret = 0;
5989 else {
5990 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5991 goto efault;
5992 ret = get_errno(safe_read(arg1, p, arg3));
5993 if (ret >= 0 &&
5994 fd_trans_host_to_target_data(arg1)) {
5995 ret = fd_trans_host_to_target_data(arg1)(p, ret);
5997 unlock_user(p, arg2, ret);
5999 break;
6000 case TARGET_NR_write:
6001 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6002 goto efault;
6003 ret = get_errno(safe_write(arg1, p, arg3));
6004 unlock_user(p, arg2, 0);
6005 break;
6006 #ifdef TARGET_NR_open
6007 case TARGET_NR_open:
6008 if (!(p = lock_user_string(arg1)))
6009 goto efault;
6010 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6011 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6012 arg3));
6013 fd_trans_unregister(ret);
6014 unlock_user(p, arg1, 0);
6015 break;
6016 #endif
6017 case TARGET_NR_openat:
6018 if (!(p = lock_user_string(arg2)))
6019 goto efault;
6020 ret = get_errno(do_openat(cpu_env, arg1, p,
6021 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6022 arg4));
6023 fd_trans_unregister(ret);
6024 unlock_user(p, arg2, 0);
6025 break;
6026 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6027 case TARGET_NR_name_to_handle_at:
6028 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6029 break;
6030 #endif
6031 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6032 case TARGET_NR_open_by_handle_at:
6033 ret = do_open_by_handle_at(arg1, arg2, arg3);
6034 fd_trans_unregister(ret);
6035 break;
6036 #endif
6037 case TARGET_NR_close:
6038 fd_trans_unregister(arg1);
6039 ret = get_errno(close(arg1));
6040 break;
6041 case TARGET_NR_brk:
6042 ret = do_brk(arg1);
6043 break;
6044 #ifdef TARGET_NR_fork
6045 case TARGET_NR_fork:
6046 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6047 break;
6048 #endif
6049 #ifdef TARGET_NR_waitpid
6050 case TARGET_NR_waitpid:
6052 int status;
6053 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6054 if (!is_error(ret) && arg2 && ret
6055 && put_user_s32(host_to_target_waitstatus(status), arg2))
6056 goto efault;
6058 break;
6059 #endif
6060 #ifdef TARGET_NR_waitid
6061 case TARGET_NR_waitid:
6063 siginfo_t info;
6064 info.si_pid = 0;
6065 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6066 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6067 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6068 goto efault;
6069 host_to_target_siginfo(p, &info);
6070 unlock_user(p, arg3, sizeof(target_siginfo_t));
6073 break;
6074 #endif
6075 #ifdef TARGET_NR_creat /* not on alpha */
6076 case TARGET_NR_creat:
6077 if (!(p = lock_user_string(arg1)))
6078 goto efault;
6079 ret = get_errno(creat(p, arg2));
6080 fd_trans_unregister(ret);
6081 unlock_user(p, arg1, 0);
6082 break;
6083 #endif
6084 #ifdef TARGET_NR_link
6085 case TARGET_NR_link:
6087 void * p2;
6088 p = lock_user_string(arg1);
6089 p2 = lock_user_string(arg2);
6090 if (!p || !p2)
6091 ret = -TARGET_EFAULT;
6092 else
6093 ret = get_errno(link(p, p2));
6094 unlock_user(p2, arg2, 0);
6095 unlock_user(p, arg1, 0);
6097 break;
6098 #endif
6099 #if defined(TARGET_NR_linkat)
6100 case TARGET_NR_linkat:
6102 void * p2 = NULL;
6103 if (!arg2 || !arg4)
6104 goto efault;
6105 p = lock_user_string(arg2);
6106 p2 = lock_user_string(arg4);
6107 if (!p || !p2)
6108 ret = -TARGET_EFAULT;
6109 else
6110 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6111 unlock_user(p, arg2, 0);
6112 unlock_user(p2, arg4, 0);
6114 break;
6115 #endif
6116 #ifdef TARGET_NR_unlink
6117 case TARGET_NR_unlink:
6118 if (!(p = lock_user_string(arg1)))
6119 goto efault;
6120 ret = get_errno(unlink(p));
6121 unlock_user(p, arg1, 0);
6122 break;
6123 #endif
6124 #if defined(TARGET_NR_unlinkat)
6125 case TARGET_NR_unlinkat:
6126 if (!(p = lock_user_string(arg2)))
6127 goto efault;
6128 ret = get_errno(unlinkat(arg1, p, arg3));
6129 unlock_user(p, arg2, 0);
6130 break;
6131 #endif
6132 case TARGET_NR_execve:
6134 char **argp, **envp;
6135 int argc, envc;
6136 abi_ulong gp;
6137 abi_ulong guest_argp;
6138 abi_ulong guest_envp;
6139 abi_ulong addr;
6140 char **q;
6141 int total_size = 0;
6143 argc = 0;
6144 guest_argp = arg2;
6145 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6146 if (get_user_ual(addr, gp))
6147 goto efault;
6148 if (!addr)
6149 break;
6150 argc++;
6152 envc = 0;
6153 guest_envp = arg3;
6154 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6155 if (get_user_ual(addr, gp))
6156 goto efault;
6157 if (!addr)
6158 break;
6159 envc++;
6162 argp = alloca((argc + 1) * sizeof(void *));
6163 envp = alloca((envc + 1) * sizeof(void *));
6165 for (gp = guest_argp, q = argp; gp;
6166 gp += sizeof(abi_ulong), q++) {
6167 if (get_user_ual(addr, gp))
6168 goto execve_efault;
6169 if (!addr)
6170 break;
6171 if (!(*q = lock_user_string(addr)))
6172 goto execve_efault;
6173 total_size += strlen(*q) + 1;
6175 *q = NULL;
6177 for (gp = guest_envp, q = envp; gp;
6178 gp += sizeof(abi_ulong), q++) {
6179 if (get_user_ual(addr, gp))
6180 goto execve_efault;
6181 if (!addr)
6182 break;
6183 if (!(*q = lock_user_string(addr)))
6184 goto execve_efault;
6185 total_size += strlen(*q) + 1;
6187 *q = NULL;
6189 if (!(p = lock_user_string(arg1)))
6190 goto execve_efault;
6191 /* Although execve() is not an interruptible syscall it is
6192 * a special case where we must use the safe_syscall wrapper:
6193 * if we allow a signal to happen before we make the host
6194 * syscall then we will 'lose' it, because at the point of
6195 * execve the process leaves QEMU's control. So we use the
6196 * safe syscall wrapper to ensure that we either take the
6197 * signal as a guest signal, or else it does not happen
6198 * before the execve completes and makes it the other
6199 * program's problem.
6201 ret = get_errno(safe_execve(p, argp, envp));
6202 unlock_user(p, arg1, 0);
6204 goto execve_end;
6206 execve_efault:
6207 ret = -TARGET_EFAULT;
6209 execve_end:
6210 for (gp = guest_argp, q = argp; *q;
6211 gp += sizeof(abi_ulong), q++) {
6212 if (get_user_ual(addr, gp)
6213 || !addr)
6214 break;
6215 unlock_user(*q, addr, 0);
6217 for (gp = guest_envp, q = envp; *q;
6218 gp += sizeof(abi_ulong), q++) {
6219 if (get_user_ual(addr, gp)
6220 || !addr)
6221 break;
6222 unlock_user(*q, addr, 0);
6225 break;
6226 case TARGET_NR_chdir:
6227 if (!(p = lock_user_string(arg1)))
6228 goto efault;
6229 ret = get_errno(chdir(p));
6230 unlock_user(p, arg1, 0);
6231 break;
6232 #ifdef TARGET_NR_time
6233 case TARGET_NR_time:
6235 time_t host_time;
6236 ret = get_errno(time(&host_time));
6237 if (!is_error(ret)
6238 && arg1
6239 && put_user_sal(host_time, arg1))
6240 goto efault;
6242 break;
6243 #endif
6244 #ifdef TARGET_NR_mknod
6245 case TARGET_NR_mknod:
6246 if (!(p = lock_user_string(arg1)))
6247 goto efault;
6248 ret = get_errno(mknod(p, arg2, arg3));
6249 unlock_user(p, arg1, 0);
6250 break;
6251 #endif
6252 #if defined(TARGET_NR_mknodat)
6253 case TARGET_NR_mknodat:
6254 if (!(p = lock_user_string(arg2)))
6255 goto efault;
6256 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6257 unlock_user(p, arg2, 0);
6258 break;
6259 #endif
6260 #ifdef TARGET_NR_chmod
6261 case TARGET_NR_chmod:
6262 if (!(p = lock_user_string(arg1)))
6263 goto efault;
6264 ret = get_errno(chmod(p, arg2));
6265 unlock_user(p, arg1, 0);
6266 break;
6267 #endif
6268 #ifdef TARGET_NR_break
6269 case TARGET_NR_break:
6270 goto unimplemented;
6271 #endif
6272 #ifdef TARGET_NR_oldstat
6273 case TARGET_NR_oldstat:
6274 goto unimplemented;
6275 #endif
6276 case TARGET_NR_lseek:
6277 ret = get_errno(lseek(arg1, arg2, arg3));
6278 break;
6279 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6280 /* Alpha specific */
6281 case TARGET_NR_getxpid:
6282 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6283 ret = get_errno(getpid());
6284 break;
6285 #endif
6286 #ifdef TARGET_NR_getpid
6287 case TARGET_NR_getpid:
6288 ret = get_errno(getpid());
6289 break;
6290 #endif
6291 case TARGET_NR_mount:
6293 /* need to look at the data field */
6294 void *p2, *p3;
6296 if (arg1) {
6297 p = lock_user_string(arg1);
6298 if (!p) {
6299 goto efault;
6301 } else {
6302 p = NULL;
6305 p2 = lock_user_string(arg2);
6306 if (!p2) {
6307 if (arg1) {
6308 unlock_user(p, arg1, 0);
6310 goto efault;
6313 if (arg3) {
6314 p3 = lock_user_string(arg3);
6315 if (!p3) {
6316 if (arg1) {
6317 unlock_user(p, arg1, 0);
6319 unlock_user(p2, arg2, 0);
6320 goto efault;
6322 } else {
6323 p3 = NULL;
6326 /* FIXME - arg5 should be locked, but it isn't clear how to
6327 * do that since it's not guaranteed to be a NULL-terminated
6328 * string.
6330 if (!arg5) {
6331 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6332 } else {
6333 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6335 ret = get_errno(ret);
6337 if (arg1) {
6338 unlock_user(p, arg1, 0);
6340 unlock_user(p2, arg2, 0);
6341 if (arg3) {
6342 unlock_user(p3, arg3, 0);
6345 break;
6346 #ifdef TARGET_NR_umount
6347 case TARGET_NR_umount:
6348 if (!(p = lock_user_string(arg1)))
6349 goto efault;
6350 ret = get_errno(umount(p));
6351 unlock_user(p, arg1, 0);
6352 break;
6353 #endif
6354 #ifdef TARGET_NR_stime /* not on alpha */
6355 case TARGET_NR_stime:
6357 time_t host_time;
6358 if (get_user_sal(host_time, arg1))
6359 goto efault;
6360 ret = get_errno(stime(&host_time));
6362 break;
6363 #endif
6364 case TARGET_NR_ptrace:
6365 goto unimplemented;
6366 #ifdef TARGET_NR_alarm /* not on alpha */
6367 case TARGET_NR_alarm:
6368 ret = alarm(arg1);
6369 break;
6370 #endif
6371 #ifdef TARGET_NR_oldfstat
6372 case TARGET_NR_oldfstat:
6373 goto unimplemented;
6374 #endif
6375 #ifdef TARGET_NR_pause /* not on alpha */
6376 case TARGET_NR_pause:
6377 ret = get_errno(pause());
6378 break;
6379 #endif
6380 #ifdef TARGET_NR_utime
6381 case TARGET_NR_utime:
6383 struct utimbuf tbuf, *host_tbuf;
6384 struct target_utimbuf *target_tbuf;
6385 if (arg2) {
6386 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6387 goto efault;
6388 tbuf.actime = tswapal(target_tbuf->actime);
6389 tbuf.modtime = tswapal(target_tbuf->modtime);
6390 unlock_user_struct(target_tbuf, arg2, 0);
6391 host_tbuf = &tbuf;
6392 } else {
6393 host_tbuf = NULL;
6395 if (!(p = lock_user_string(arg1)))
6396 goto efault;
6397 ret = get_errno(utime(p, host_tbuf));
6398 unlock_user(p, arg1, 0);
6400 break;
6401 #endif
6402 #ifdef TARGET_NR_utimes
6403 case TARGET_NR_utimes:
6405 struct timeval *tvp, tv[2];
6406 if (arg2) {
6407 if (copy_from_user_timeval(&tv[0], arg2)
6408 || copy_from_user_timeval(&tv[1],
6409 arg2 + sizeof(struct target_timeval)))
6410 goto efault;
6411 tvp = tv;
6412 } else {
6413 tvp = NULL;
6415 if (!(p = lock_user_string(arg1)))
6416 goto efault;
6417 ret = get_errno(utimes(p, tvp));
6418 unlock_user(p, arg1, 0);
6420 break;
6421 #endif
6422 #if defined(TARGET_NR_futimesat)
6423 case TARGET_NR_futimesat:
6425 struct timeval *tvp, tv[2];
6426 if (arg3) {
6427 if (copy_from_user_timeval(&tv[0], arg3)
6428 || copy_from_user_timeval(&tv[1],
6429 arg3 + sizeof(struct target_timeval)))
6430 goto efault;
6431 tvp = tv;
6432 } else {
6433 tvp = NULL;
6435 if (!(p = lock_user_string(arg2)))
6436 goto efault;
6437 ret = get_errno(futimesat(arg1, path(p), tvp));
6438 unlock_user(p, arg2, 0);
6440 break;
6441 #endif
6442 #ifdef TARGET_NR_stty
6443 case TARGET_NR_stty:
6444 goto unimplemented;
6445 #endif
6446 #ifdef TARGET_NR_gtty
6447 case TARGET_NR_gtty:
6448 goto unimplemented;
6449 #endif
6450 #ifdef TARGET_NR_access
6451 case TARGET_NR_access:
6452 if (!(p = lock_user_string(arg1)))
6453 goto efault;
6454 ret = get_errno(access(path(p), arg2));
6455 unlock_user(p, arg1, 0);
6456 break;
6457 #endif
6458 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6459 case TARGET_NR_faccessat:
6460 if (!(p = lock_user_string(arg2)))
6461 goto efault;
6462 ret = get_errno(faccessat(arg1, p, arg3, 0));
6463 unlock_user(p, arg2, 0);
6464 break;
6465 #endif
6466 #ifdef TARGET_NR_nice /* not on alpha */
6467 case TARGET_NR_nice:
6468 ret = get_errno(nice(arg1));
6469 break;
6470 #endif
6471 #ifdef TARGET_NR_ftime
6472 case TARGET_NR_ftime:
6473 goto unimplemented;
6474 #endif
6475 case TARGET_NR_sync:
6476 sync();
6477 ret = 0;
6478 break;
6479 case TARGET_NR_kill:
6480 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6481 break;
6482 #ifdef TARGET_NR_rename
6483 case TARGET_NR_rename:
6485 void *p2;
6486 p = lock_user_string(arg1);
6487 p2 = lock_user_string(arg2);
6488 if (!p || !p2)
6489 ret = -TARGET_EFAULT;
6490 else
6491 ret = get_errno(rename(p, p2));
6492 unlock_user(p2, arg2, 0);
6493 unlock_user(p, arg1, 0);
6495 break;
6496 #endif
6497 #if defined(TARGET_NR_renameat)
6498 case TARGET_NR_renameat:
6500 void *p2;
6501 p = lock_user_string(arg2);
6502 p2 = lock_user_string(arg4);
6503 if (!p || !p2)
6504 ret = -TARGET_EFAULT;
6505 else
6506 ret = get_errno(renameat(arg1, p, arg3, p2));
6507 unlock_user(p2, arg4, 0);
6508 unlock_user(p, arg2, 0);
6510 break;
6511 #endif
6512 #ifdef TARGET_NR_mkdir
6513 case TARGET_NR_mkdir:
6514 if (!(p = lock_user_string(arg1)))
6515 goto efault;
6516 ret = get_errno(mkdir(p, arg2));
6517 unlock_user(p, arg1, 0);
6518 break;
6519 #endif
6520 #if defined(TARGET_NR_mkdirat)
6521 case TARGET_NR_mkdirat:
6522 if (!(p = lock_user_string(arg2)))
6523 goto efault;
6524 ret = get_errno(mkdirat(arg1, p, arg3));
6525 unlock_user(p, arg2, 0);
6526 break;
6527 #endif
6528 #ifdef TARGET_NR_rmdir
6529 case TARGET_NR_rmdir:
6530 if (!(p = lock_user_string(arg1)))
6531 goto efault;
6532 ret = get_errno(rmdir(p));
6533 unlock_user(p, arg1, 0);
6534 break;
6535 #endif
6536 case TARGET_NR_dup:
6537 ret = get_errno(dup(arg1));
6538 if (ret >= 0) {
6539 fd_trans_dup(arg1, ret);
6541 break;
6542 #ifdef TARGET_NR_pipe
6543 case TARGET_NR_pipe:
6544 ret = do_pipe(cpu_env, arg1, 0, 0);
6545 break;
6546 #endif
6547 #ifdef TARGET_NR_pipe2
6548 case TARGET_NR_pipe2:
6549 ret = do_pipe(cpu_env, arg1,
6550 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6551 break;
6552 #endif
6553 case TARGET_NR_times:
6555 struct target_tms *tmsp;
6556 struct tms tms;
6557 ret = get_errno(times(&tms));
6558 if (arg1) {
6559 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6560 if (!tmsp)
6561 goto efault;
6562 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6563 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6564 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6565 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6567 if (!is_error(ret))
6568 ret = host_to_target_clock_t(ret);
6570 break;
6571 #ifdef TARGET_NR_prof
6572 case TARGET_NR_prof:
6573 goto unimplemented;
6574 #endif
6575 #ifdef TARGET_NR_signal
6576 case TARGET_NR_signal:
6577 goto unimplemented;
6578 #endif
6579 case TARGET_NR_acct:
6580 if (arg1 == 0) {
6581 ret = get_errno(acct(NULL));
6582 } else {
6583 if (!(p = lock_user_string(arg1)))
6584 goto efault;
6585 ret = get_errno(acct(path(p)));
6586 unlock_user(p, arg1, 0);
6588 break;
6589 #ifdef TARGET_NR_umount2
6590 case TARGET_NR_umount2:
6591 if (!(p = lock_user_string(arg1)))
6592 goto efault;
6593 ret = get_errno(umount2(p, arg2));
6594 unlock_user(p, arg1, 0);
6595 break;
6596 #endif
6597 #ifdef TARGET_NR_lock
6598 case TARGET_NR_lock:
6599 goto unimplemented;
6600 #endif
6601 case TARGET_NR_ioctl:
6602 ret = do_ioctl(arg1, arg2, arg3);
6603 break;
6604 case TARGET_NR_fcntl:
6605 ret = do_fcntl(arg1, arg2, arg3);
6606 break;
6607 #ifdef TARGET_NR_mpx
6608 case TARGET_NR_mpx:
6609 goto unimplemented;
6610 #endif
6611 case TARGET_NR_setpgid:
6612 ret = get_errno(setpgid(arg1, arg2));
6613 break;
6614 #ifdef TARGET_NR_ulimit
6615 case TARGET_NR_ulimit:
6616 goto unimplemented;
6617 #endif
6618 #ifdef TARGET_NR_oldolduname
6619 case TARGET_NR_oldolduname:
6620 goto unimplemented;
6621 #endif
6622 case TARGET_NR_umask:
6623 ret = get_errno(umask(arg1));
6624 break;
6625 case TARGET_NR_chroot:
6626 if (!(p = lock_user_string(arg1)))
6627 goto efault;
6628 ret = get_errno(chroot(p));
6629 unlock_user(p, arg1, 0);
6630 break;
6631 #ifdef TARGET_NR_ustat
6632 case TARGET_NR_ustat:
6633 goto unimplemented;
6634 #endif
6635 #ifdef TARGET_NR_dup2
6636 case TARGET_NR_dup2:
6637 ret = get_errno(dup2(arg1, arg2));
6638 if (ret >= 0) {
6639 fd_trans_dup(arg1, arg2);
6641 break;
6642 #endif
6643 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6644 case TARGET_NR_dup3:
6645 ret = get_errno(dup3(arg1, arg2, arg3));
6646 if (ret >= 0) {
6647 fd_trans_dup(arg1, arg2);
6649 break;
6650 #endif
6651 #ifdef TARGET_NR_getppid /* not on alpha */
6652 case TARGET_NR_getppid:
6653 ret = get_errno(getppid());
6654 break;
6655 #endif
6656 #ifdef TARGET_NR_getpgrp
6657 case TARGET_NR_getpgrp:
6658 ret = get_errno(getpgrp());
6659 break;
6660 #endif
6661 case TARGET_NR_setsid:
6662 ret = get_errno(setsid());
6663 break;
6664 #ifdef TARGET_NR_sigaction
6665 case TARGET_NR_sigaction:
6667 #if defined(TARGET_ALPHA)
6668 struct target_sigaction act, oact, *pact = 0;
6669 struct target_old_sigaction *old_act;
6670 if (arg2) {
6671 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6672 goto efault;
6673 act._sa_handler = old_act->_sa_handler;
6674 target_siginitset(&act.sa_mask, old_act->sa_mask);
6675 act.sa_flags = old_act->sa_flags;
6676 act.sa_restorer = 0;
6677 unlock_user_struct(old_act, arg2, 0);
6678 pact = &act;
6680 ret = get_errno(do_sigaction(arg1, pact, &oact));
6681 if (!is_error(ret) && arg3) {
6682 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6683 goto efault;
6684 old_act->_sa_handler = oact._sa_handler;
6685 old_act->sa_mask = oact.sa_mask.sig[0];
6686 old_act->sa_flags = oact.sa_flags;
6687 unlock_user_struct(old_act, arg3, 1);
6689 #elif defined(TARGET_MIPS)
6690 struct target_sigaction act, oact, *pact, *old_act;
6692 if (arg2) {
6693 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6694 goto efault;
6695 act._sa_handler = old_act->_sa_handler;
6696 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6697 act.sa_flags = old_act->sa_flags;
6698 unlock_user_struct(old_act, arg2, 0);
6699 pact = &act;
6700 } else {
6701 pact = NULL;
6704 ret = get_errno(do_sigaction(arg1, pact, &oact));
6706 if (!is_error(ret) && arg3) {
6707 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6708 goto efault;
6709 old_act->_sa_handler = oact._sa_handler;
6710 old_act->sa_flags = oact.sa_flags;
6711 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6712 old_act->sa_mask.sig[1] = 0;
6713 old_act->sa_mask.sig[2] = 0;
6714 old_act->sa_mask.sig[3] = 0;
6715 unlock_user_struct(old_act, arg3, 1);
6717 #else
6718 struct target_old_sigaction *old_act;
6719 struct target_sigaction act, oact, *pact;
6720 if (arg2) {
6721 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6722 goto efault;
6723 act._sa_handler = old_act->_sa_handler;
6724 target_siginitset(&act.sa_mask, old_act->sa_mask);
6725 act.sa_flags = old_act->sa_flags;
6726 act.sa_restorer = old_act->sa_restorer;
6727 unlock_user_struct(old_act, arg2, 0);
6728 pact = &act;
6729 } else {
6730 pact = NULL;
6732 ret = get_errno(do_sigaction(arg1, pact, &oact));
6733 if (!is_error(ret) && arg3) {
6734 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6735 goto efault;
6736 old_act->_sa_handler = oact._sa_handler;
6737 old_act->sa_mask = oact.sa_mask.sig[0];
6738 old_act->sa_flags = oact.sa_flags;
6739 old_act->sa_restorer = oact.sa_restorer;
6740 unlock_user_struct(old_act, arg3, 1);
6742 #endif
6744 break;
6745 #endif
6746 case TARGET_NR_rt_sigaction:
6748 #if defined(TARGET_ALPHA)
6749 struct target_sigaction act, oact, *pact = 0;
6750 struct target_rt_sigaction *rt_act;
6751 /* ??? arg4 == sizeof(sigset_t). */
6752 if (arg2) {
6753 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6754 goto efault;
6755 act._sa_handler = rt_act->_sa_handler;
6756 act.sa_mask = rt_act->sa_mask;
6757 act.sa_flags = rt_act->sa_flags;
6758 act.sa_restorer = arg5;
6759 unlock_user_struct(rt_act, arg2, 0);
6760 pact = &act;
6762 ret = get_errno(do_sigaction(arg1, pact, &oact));
6763 if (!is_error(ret) && arg3) {
6764 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6765 goto efault;
6766 rt_act->_sa_handler = oact._sa_handler;
6767 rt_act->sa_mask = oact.sa_mask;
6768 rt_act->sa_flags = oact.sa_flags;
6769 unlock_user_struct(rt_act, arg3, 1);
6771 #else
6772 struct target_sigaction *act;
6773 struct target_sigaction *oact;
6775 if (arg2) {
6776 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6777 goto efault;
6778 } else
6779 act = NULL;
6780 if (arg3) {
6781 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6782 ret = -TARGET_EFAULT;
6783 goto rt_sigaction_fail;
6785 } else
6786 oact = NULL;
6787 ret = get_errno(do_sigaction(arg1, act, oact));
6788 rt_sigaction_fail:
6789 if (act)
6790 unlock_user_struct(act, arg2, 0);
6791 if (oact)
6792 unlock_user_struct(oact, arg3, 1);
6793 #endif
6795 break;
6796 #ifdef TARGET_NR_sgetmask /* not on alpha */
6797 case TARGET_NR_sgetmask:
6799 sigset_t cur_set;
6800 abi_ulong target_set;
6801 do_sigprocmask(0, NULL, &cur_set);
6802 host_to_target_old_sigset(&target_set, &cur_set);
6803 ret = target_set;
6805 break;
6806 #endif
6807 #ifdef TARGET_NR_ssetmask /* not on alpha */
6808 case TARGET_NR_ssetmask:
6810 sigset_t set, oset, cur_set;
6811 abi_ulong target_set = arg1;
6812 do_sigprocmask(0, NULL, &cur_set);
6813 target_to_host_old_sigset(&set, &target_set);
6814 sigorset(&set, &set, &cur_set);
6815 do_sigprocmask(SIG_SETMASK, &set, &oset);
6816 host_to_target_old_sigset(&target_set, &oset);
6817 ret = target_set;
6819 break;
6820 #endif
6821 #ifdef TARGET_NR_sigprocmask
6822 case TARGET_NR_sigprocmask:
6824 #if defined(TARGET_ALPHA)
6825 sigset_t set, oldset;
6826 abi_ulong mask;
6827 int how;
6829 switch (arg1) {
6830 case TARGET_SIG_BLOCK:
6831 how = SIG_BLOCK;
6832 break;
6833 case TARGET_SIG_UNBLOCK:
6834 how = SIG_UNBLOCK;
6835 break;
6836 case TARGET_SIG_SETMASK:
6837 how = SIG_SETMASK;
6838 break;
6839 default:
6840 ret = -TARGET_EINVAL;
6841 goto fail;
6843 mask = arg2;
6844 target_to_host_old_sigset(&set, &mask);
6846 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6847 if (!is_error(ret)) {
6848 host_to_target_old_sigset(&mask, &oldset);
6849 ret = mask;
6850 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6852 #else
6853 sigset_t set, oldset, *set_ptr;
6854 int how;
6856 if (arg2) {
6857 switch (arg1) {
6858 case TARGET_SIG_BLOCK:
6859 how = SIG_BLOCK;
6860 break;
6861 case TARGET_SIG_UNBLOCK:
6862 how = SIG_UNBLOCK;
6863 break;
6864 case TARGET_SIG_SETMASK:
6865 how = SIG_SETMASK;
6866 break;
6867 default:
6868 ret = -TARGET_EINVAL;
6869 goto fail;
6871 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6872 goto efault;
6873 target_to_host_old_sigset(&set, p);
6874 unlock_user(p, arg2, 0);
6875 set_ptr = &set;
6876 } else {
6877 how = 0;
6878 set_ptr = NULL;
6880 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6881 if (!is_error(ret) && arg3) {
6882 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6883 goto efault;
6884 host_to_target_old_sigset(p, &oldset);
6885 unlock_user(p, arg3, sizeof(target_sigset_t));
6887 #endif
6889 break;
6890 #endif
6891 case TARGET_NR_rt_sigprocmask:
6893 int how = arg1;
6894 sigset_t set, oldset, *set_ptr;
6896 if (arg2) {
6897 switch(how) {
6898 case TARGET_SIG_BLOCK:
6899 how = SIG_BLOCK;
6900 break;
6901 case TARGET_SIG_UNBLOCK:
6902 how = SIG_UNBLOCK;
6903 break;
6904 case TARGET_SIG_SETMASK:
6905 how = SIG_SETMASK;
6906 break;
6907 default:
6908 ret = -TARGET_EINVAL;
6909 goto fail;
6911 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6912 goto efault;
6913 target_to_host_sigset(&set, p);
6914 unlock_user(p, arg2, 0);
6915 set_ptr = &set;
6916 } else {
6917 how = 0;
6918 set_ptr = NULL;
6920 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6921 if (!is_error(ret) && arg3) {
6922 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6923 goto efault;
6924 host_to_target_sigset(p, &oldset);
6925 unlock_user(p, arg3, sizeof(target_sigset_t));
6928 break;
6929 #ifdef TARGET_NR_sigpending
6930 case TARGET_NR_sigpending:
6932 sigset_t set;
6933 ret = get_errno(sigpending(&set));
6934 if (!is_error(ret)) {
6935 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6936 goto efault;
6937 host_to_target_old_sigset(p, &set);
6938 unlock_user(p, arg1, sizeof(target_sigset_t));
6941 break;
6942 #endif
6943 case TARGET_NR_rt_sigpending:
6945 sigset_t set;
6946 ret = get_errno(sigpending(&set));
6947 if (!is_error(ret)) {
6948 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6949 goto efault;
6950 host_to_target_sigset(p, &set);
6951 unlock_user(p, arg1, sizeof(target_sigset_t));
6954 break;
6955 #ifdef TARGET_NR_sigsuspend
6956 case TARGET_NR_sigsuspend:
6958 sigset_t set;
6959 #if defined(TARGET_ALPHA)
6960 abi_ulong mask = arg1;
6961 target_to_host_old_sigset(&set, &mask);
6962 #else
6963 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6964 goto efault;
6965 target_to_host_old_sigset(&set, p);
6966 unlock_user(p, arg1, 0);
6967 #endif
6968 ret = get_errno(sigsuspend(&set));
6970 break;
6971 #endif
6972 case TARGET_NR_rt_sigsuspend:
6974 sigset_t set;
6975 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6976 goto efault;
6977 target_to_host_sigset(&set, p);
6978 unlock_user(p, arg1, 0);
6979 ret = get_errno(sigsuspend(&set));
6981 break;
6982 case TARGET_NR_rt_sigtimedwait:
6984 sigset_t set;
6985 struct timespec uts, *puts;
6986 siginfo_t uinfo;
6988 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6989 goto efault;
6990 target_to_host_sigset(&set, p);
6991 unlock_user(p, arg1, 0);
6992 if (arg3) {
6993 puts = &uts;
6994 target_to_host_timespec(puts, arg3);
6995 } else {
6996 puts = NULL;
6998 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6999 if (!is_error(ret)) {
7000 if (arg2) {
7001 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
7003 if (!p) {
7004 goto efault;
7006 host_to_target_siginfo(p, &uinfo);
7007 unlock_user(p, arg2, sizeof(target_siginfo_t));
7009 ret = host_to_target_signal(ret);
7012 break;
7013 case TARGET_NR_rt_sigqueueinfo:
7015 siginfo_t uinfo;
7016 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
7017 goto efault;
7018 target_to_host_siginfo(&uinfo, p);
7019 unlock_user(p, arg1, 0);
7020 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7022 break;
7023 #ifdef TARGET_NR_sigreturn
7024 case TARGET_NR_sigreturn:
7025 ret = do_sigreturn(cpu_env);
7026 break;
7027 #endif
7028 case TARGET_NR_rt_sigreturn:
7029 ret = do_rt_sigreturn(cpu_env);
7030 break;
7031 case TARGET_NR_sethostname:
7032 if (!(p = lock_user_string(arg1)))
7033 goto efault;
7034 ret = get_errno(sethostname(p, arg2));
7035 unlock_user(p, arg1, 0);
7036 break;
7037 case TARGET_NR_setrlimit:
7039 int resource = target_to_host_resource(arg1);
7040 struct target_rlimit *target_rlim;
7041 struct rlimit rlim;
7042 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7043 goto efault;
7044 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7045 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7046 unlock_user_struct(target_rlim, arg2, 0);
7047 ret = get_errno(setrlimit(resource, &rlim));
7049 break;
7050 case TARGET_NR_getrlimit:
7052 int resource = target_to_host_resource(arg1);
7053 struct target_rlimit *target_rlim;
7054 struct rlimit rlim;
7056 ret = get_errno(getrlimit(resource, &rlim));
7057 if (!is_error(ret)) {
7058 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7059 goto efault;
7060 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7061 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7062 unlock_user_struct(target_rlim, arg2, 1);
7065 break;
7066 case TARGET_NR_getrusage:
7068 struct rusage rusage;
7069 ret = get_errno(getrusage(arg1, &rusage));
7070 if (!is_error(ret)) {
7071 ret = host_to_target_rusage(arg2, &rusage);
7074 break;
7075 case TARGET_NR_gettimeofday:
7077 struct timeval tv;
7078 ret = get_errno(gettimeofday(&tv, NULL));
7079 if (!is_error(ret)) {
7080 if (copy_to_user_timeval(arg1, &tv))
7081 goto efault;
7084 break;
7085 case TARGET_NR_settimeofday:
7087 struct timeval tv, *ptv = NULL;
7088 struct timezone tz, *ptz = NULL;
7090 if (arg1) {
7091 if (copy_from_user_timeval(&tv, arg1)) {
7092 goto efault;
7094 ptv = &tv;
7097 if (arg2) {
7098 if (copy_from_user_timezone(&tz, arg2)) {
7099 goto efault;
7101 ptz = &tz;
7104 ret = get_errno(settimeofday(ptv, ptz));
7106 break;
7107 #if defined(TARGET_NR_select)
7108 case TARGET_NR_select:
7109 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7110 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7111 #else
7113 struct target_sel_arg_struct *sel;
7114 abi_ulong inp, outp, exp, tvp;
7115 long nsel;
7117 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7118 goto efault;
7119 nsel = tswapal(sel->n);
7120 inp = tswapal(sel->inp);
7121 outp = tswapal(sel->outp);
7122 exp = tswapal(sel->exp);
7123 tvp = tswapal(sel->tvp);
7124 unlock_user_struct(sel, arg1, 0);
7125 ret = do_select(nsel, inp, outp, exp, tvp);
7127 #endif
7128 break;
7129 #endif
7130 #ifdef TARGET_NR_pselect6
7131 case TARGET_NR_pselect6:
7133 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7134 fd_set rfds, wfds, efds;
7135 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7136 struct timespec ts, *ts_ptr;
7139 * The 6th arg is actually two args smashed together,
7140 * so we cannot use the C library.
7142 sigset_t set;
7143 struct {
7144 sigset_t *set;
7145 size_t size;
7146 } sig, *sig_ptr;
7148 abi_ulong arg_sigset, arg_sigsize, *arg7;
7149 target_sigset_t *target_sigset;
7151 n = arg1;
7152 rfd_addr = arg2;
7153 wfd_addr = arg3;
7154 efd_addr = arg4;
7155 ts_addr = arg5;
7157 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7158 if (ret) {
7159 goto fail;
7161 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7162 if (ret) {
7163 goto fail;
7165 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7166 if (ret) {
7167 goto fail;
7171 * This takes a timespec, and not a timeval, so we cannot
7172 * use the do_select() helper ...
7174 if (ts_addr) {
7175 if (target_to_host_timespec(&ts, ts_addr)) {
7176 goto efault;
7178 ts_ptr = &ts;
7179 } else {
7180 ts_ptr = NULL;
7183 /* Extract the two packed args for the sigset */
7184 if (arg6) {
7185 sig_ptr = &sig;
7186 sig.size = _NSIG / 8;
7188 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7189 if (!arg7) {
7190 goto efault;
7192 arg_sigset = tswapal(arg7[0]);
7193 arg_sigsize = tswapal(arg7[1]);
7194 unlock_user(arg7, arg6, 0);
7196 if (arg_sigset) {
7197 sig.set = &set;
7198 if (arg_sigsize != sizeof(*target_sigset)) {
7199 /* Like the kernel, we enforce correct size sigsets */
7200 ret = -TARGET_EINVAL;
7201 goto fail;
7203 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7204 sizeof(*target_sigset), 1);
7205 if (!target_sigset) {
7206 goto efault;
7208 target_to_host_sigset(&set, target_sigset);
7209 unlock_user(target_sigset, arg_sigset, 0);
7210 } else {
7211 sig.set = NULL;
7213 } else {
7214 sig_ptr = NULL;
7217 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7218 ts_ptr, sig_ptr));
7220 if (!is_error(ret)) {
7221 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7222 goto efault;
7223 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7224 goto efault;
7225 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7226 goto efault;
7228 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7229 goto efault;
7232 break;
7233 #endif
7234 #ifdef TARGET_NR_symlink
7235 case TARGET_NR_symlink:
7237 void *p2;
7238 p = lock_user_string(arg1);
7239 p2 = lock_user_string(arg2);
7240 if (!p || !p2)
7241 ret = -TARGET_EFAULT;
7242 else
7243 ret = get_errno(symlink(p, p2));
7244 unlock_user(p2, arg2, 0);
7245 unlock_user(p, arg1, 0);
7247 break;
7248 #endif
7249 #if defined(TARGET_NR_symlinkat)
7250 case TARGET_NR_symlinkat:
7252 void *p2;
7253 p = lock_user_string(arg1);
7254 p2 = lock_user_string(arg3);
7255 if (!p || !p2)
7256 ret = -TARGET_EFAULT;
7257 else
7258 ret = get_errno(symlinkat(p, arg2, p2));
7259 unlock_user(p2, arg3, 0);
7260 unlock_user(p, arg1, 0);
7262 break;
7263 #endif
7264 #ifdef TARGET_NR_oldlstat
7265 case TARGET_NR_oldlstat:
7266 goto unimplemented;
7267 #endif
7268 #ifdef TARGET_NR_readlink
7269 case TARGET_NR_readlink:
7271 void *p2;
7272 p = lock_user_string(arg1);
7273 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7274 if (!p || !p2) {
7275 ret = -TARGET_EFAULT;
7276 } else if (!arg3) {
7277 /* Short circuit this for the magic exe check. */
7278 ret = -TARGET_EINVAL;
7279 } else if (is_proc_myself((const char *)p, "exe")) {
7280 char real[PATH_MAX], *temp;
7281 temp = realpath(exec_path, real);
7282 /* Return value is # of bytes that we wrote to the buffer. */
7283 if (temp == NULL) {
7284 ret = get_errno(-1);
7285 } else {
7286 /* Don't worry about sign mismatch as earlier mapping
7287 * logic would have thrown a bad address error. */
7288 ret = MIN(strlen(real), arg3);
7289 /* We cannot NUL terminate the string. */
7290 memcpy(p2, real, ret);
7292 } else {
7293 ret = get_errno(readlink(path(p), p2, arg3));
7295 unlock_user(p2, arg2, ret);
7296 unlock_user(p, arg1, 0);
7298 break;
7299 #endif
7300 #if defined(TARGET_NR_readlinkat)
7301 case TARGET_NR_readlinkat:
7303 void *p2;
7304 p = lock_user_string(arg2);
7305 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7306 if (!p || !p2) {
7307 ret = -TARGET_EFAULT;
7308 } else if (is_proc_myself((const char *)p, "exe")) {
7309 char real[PATH_MAX], *temp;
7310 temp = realpath(exec_path, real);
7311 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7312 snprintf((char *)p2, arg4, "%s", real);
7313 } else {
7314 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7316 unlock_user(p2, arg3, ret);
7317 unlock_user(p, arg2, 0);
7319 break;
7320 #endif
7321 #ifdef TARGET_NR_uselib
7322 case TARGET_NR_uselib:
7323 goto unimplemented;
7324 #endif
7325 #ifdef TARGET_NR_swapon
7326 case TARGET_NR_swapon:
7327 if (!(p = lock_user_string(arg1)))
7328 goto efault;
7329 ret = get_errno(swapon(p, arg2));
7330 unlock_user(p, arg1, 0);
7331 break;
7332 #endif
7333 case TARGET_NR_reboot:
7334 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7335 /* arg4 must be ignored in all other cases */
7336 p = lock_user_string(arg4);
7337 if (!p) {
7338 goto efault;
7340 ret = get_errno(reboot(arg1, arg2, arg3, p));
7341 unlock_user(p, arg4, 0);
7342 } else {
7343 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7345 break;
7346 #ifdef TARGET_NR_readdir
7347 case TARGET_NR_readdir:
7348 goto unimplemented;
7349 #endif
7350 #ifdef TARGET_NR_mmap
7351 case TARGET_NR_mmap:
7352 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7353 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7354 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7355 || defined(TARGET_S390X)
7357 abi_ulong *v;
7358 abi_ulong v1, v2, v3, v4, v5, v6;
7359 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7360 goto efault;
7361 v1 = tswapal(v[0]);
7362 v2 = tswapal(v[1]);
7363 v3 = tswapal(v[2]);
7364 v4 = tswapal(v[3]);
7365 v5 = tswapal(v[4]);
7366 v6 = tswapal(v[5]);
7367 unlock_user(v, arg1, 0);
7368 ret = get_errno(target_mmap(v1, v2, v3,
7369 target_to_host_bitmask(v4, mmap_flags_tbl),
7370 v5, v6));
7372 #else
7373 ret = get_errno(target_mmap(arg1, arg2, arg3,
7374 target_to_host_bitmask(arg4, mmap_flags_tbl),
7375 arg5,
7376 arg6));
7377 #endif
7378 break;
7379 #endif
7380 #ifdef TARGET_NR_mmap2
7381 case TARGET_NR_mmap2:
7382 #ifndef MMAP_SHIFT
7383 #define MMAP_SHIFT 12
7384 #endif
7385 ret = get_errno(target_mmap(arg1, arg2, arg3,
7386 target_to_host_bitmask(arg4, mmap_flags_tbl),
7387 arg5,
7388 arg6 << MMAP_SHIFT));
7389 break;
7390 #endif
7391 case TARGET_NR_munmap:
7392 ret = get_errno(target_munmap(arg1, arg2));
7393 break;
7394 case TARGET_NR_mprotect:
7396 TaskState *ts = cpu->opaque;
7397 /* Special hack to detect libc making the stack executable. */
7398 if ((arg3 & PROT_GROWSDOWN)
7399 && arg1 >= ts->info->stack_limit
7400 && arg1 <= ts->info->start_stack) {
7401 arg3 &= ~PROT_GROWSDOWN;
7402 arg2 = arg2 + arg1 - ts->info->stack_limit;
7403 arg1 = ts->info->stack_limit;
7406 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7407 break;
7408 #ifdef TARGET_NR_mremap
7409 case TARGET_NR_mremap:
7410 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7411 break;
7412 #endif
7413 /* ??? msync/mlock/munlock are broken for softmmu. */
7414 #ifdef TARGET_NR_msync
7415 case TARGET_NR_msync:
7416 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7417 break;
7418 #endif
7419 #ifdef TARGET_NR_mlock
7420 case TARGET_NR_mlock:
7421 ret = get_errno(mlock(g2h(arg1), arg2));
7422 break;
7423 #endif
7424 #ifdef TARGET_NR_munlock
7425 case TARGET_NR_munlock:
7426 ret = get_errno(munlock(g2h(arg1), arg2));
7427 break;
7428 #endif
7429 #ifdef TARGET_NR_mlockall
7430 case TARGET_NR_mlockall:
7431 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7432 break;
7433 #endif
7434 #ifdef TARGET_NR_munlockall
7435 case TARGET_NR_munlockall:
7436 ret = get_errno(munlockall());
7437 break;
7438 #endif
7439 case TARGET_NR_truncate:
7440 if (!(p = lock_user_string(arg1)))
7441 goto efault;
7442 ret = get_errno(truncate(p, arg2));
7443 unlock_user(p, arg1, 0);
7444 break;
7445 case TARGET_NR_ftruncate:
7446 ret = get_errno(ftruncate(arg1, arg2));
7447 break;
7448 case TARGET_NR_fchmod:
7449 ret = get_errno(fchmod(arg1, arg2));
7450 break;
7451 #if defined(TARGET_NR_fchmodat)
7452 case TARGET_NR_fchmodat:
7453 if (!(p = lock_user_string(arg2)))
7454 goto efault;
7455 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7456 unlock_user(p, arg2, 0);
7457 break;
7458 #endif
7459 case TARGET_NR_getpriority:
7460 /* Note that negative values are valid for getpriority, so we must
7461 differentiate based on errno settings. */
7462 errno = 0;
7463 ret = getpriority(arg1, arg2);
7464 if (ret == -1 && errno != 0) {
7465 ret = -host_to_target_errno(errno);
7466 break;
7468 #ifdef TARGET_ALPHA
7469 /* Return value is the unbiased priority. Signal no error. */
7470 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7471 #else
7472 /* Return value is a biased priority to avoid negative numbers. */
7473 ret = 20 - ret;
7474 #endif
7475 break;
7476 case TARGET_NR_setpriority:
7477 ret = get_errno(setpriority(arg1, arg2, arg3));
7478 break;
7479 #ifdef TARGET_NR_profil
7480 case TARGET_NR_profil:
7481 goto unimplemented;
7482 #endif
7483 case TARGET_NR_statfs:
7484 if (!(p = lock_user_string(arg1)))
7485 goto efault;
7486 ret = get_errno(statfs(path(p), &stfs));
7487 unlock_user(p, arg1, 0);
7488 convert_statfs:
7489 if (!is_error(ret)) {
7490 struct target_statfs *target_stfs;
7492 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7493 goto efault;
7494 __put_user(stfs.f_type, &target_stfs->f_type);
7495 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7496 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7497 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7498 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7499 __put_user(stfs.f_files, &target_stfs->f_files);
7500 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7501 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7502 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7503 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7504 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7505 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7506 unlock_user_struct(target_stfs, arg2, 1);
7508 break;
7509 case TARGET_NR_fstatfs:
7510 ret = get_errno(fstatfs(arg1, &stfs));
7511 goto convert_statfs;
7512 #ifdef TARGET_NR_statfs64
7513 case TARGET_NR_statfs64:
7514 if (!(p = lock_user_string(arg1)))
7515 goto efault;
7516 ret = get_errno(statfs(path(p), &stfs));
7517 unlock_user(p, arg1, 0);
7518 convert_statfs64:
7519 if (!is_error(ret)) {
7520 struct target_statfs64 *target_stfs;
7522 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7523 goto efault;
7524 __put_user(stfs.f_type, &target_stfs->f_type);
7525 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7526 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7527 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7528 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7529 __put_user(stfs.f_files, &target_stfs->f_files);
7530 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7531 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7532 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7533 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7534 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7535 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7536 unlock_user_struct(target_stfs, arg3, 1);
7538 break;
7539 case TARGET_NR_fstatfs64:
7540 ret = get_errno(fstatfs(arg1, &stfs));
7541 goto convert_statfs64;
7542 #endif
7543 #ifdef TARGET_NR_ioperm
7544 case TARGET_NR_ioperm:
7545 goto unimplemented;
7546 #endif
7547 #ifdef TARGET_NR_socketcall
7548 case TARGET_NR_socketcall:
7549 ret = do_socketcall(arg1, arg2);
7550 break;
7551 #endif
7552 #ifdef TARGET_NR_accept
7553 case TARGET_NR_accept:
7554 ret = do_accept4(arg1, arg2, arg3, 0);
7555 break;
7556 #endif
7557 #ifdef TARGET_NR_accept4
7558 case TARGET_NR_accept4:
7559 #ifdef CONFIG_ACCEPT4
7560 ret = do_accept4(arg1, arg2, arg3, arg4);
7561 #else
7562 goto unimplemented;
7563 #endif
7564 break;
7565 #endif
7566 #ifdef TARGET_NR_bind
7567 case TARGET_NR_bind:
7568 ret = do_bind(arg1, arg2, arg3);
7569 break;
7570 #endif
7571 #ifdef TARGET_NR_connect
7572 case TARGET_NR_connect:
7573 ret = do_connect(arg1, arg2, arg3);
7574 break;
7575 #endif
7576 #ifdef TARGET_NR_getpeername
7577 case TARGET_NR_getpeername:
7578 ret = do_getpeername(arg1, arg2, arg3);
7579 break;
7580 #endif
7581 #ifdef TARGET_NR_getsockname
7582 case TARGET_NR_getsockname:
7583 ret = do_getsockname(arg1, arg2, arg3);
7584 break;
7585 #endif
7586 #ifdef TARGET_NR_getsockopt
7587 case TARGET_NR_getsockopt:
7588 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7589 break;
7590 #endif
7591 #ifdef TARGET_NR_listen
7592 case TARGET_NR_listen:
7593 ret = get_errno(listen(arg1, arg2));
7594 break;
7595 #endif
7596 #ifdef TARGET_NR_recv
7597 case TARGET_NR_recv:
7598 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7599 break;
7600 #endif
7601 #ifdef TARGET_NR_recvfrom
7602 case TARGET_NR_recvfrom:
7603 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7604 break;
7605 #endif
7606 #ifdef TARGET_NR_recvmsg
7607 case TARGET_NR_recvmsg:
7608 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7609 break;
7610 #endif
7611 #ifdef TARGET_NR_send
7612 case TARGET_NR_send:
7613 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7614 break;
7615 #endif
7616 #ifdef TARGET_NR_sendmsg
7617 case TARGET_NR_sendmsg:
7618 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7619 break;
7620 #endif
7621 #ifdef TARGET_NR_sendmmsg
7622 case TARGET_NR_sendmmsg:
7623 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7624 break;
7625 case TARGET_NR_recvmmsg:
7626 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7627 break;
7628 #endif
7629 #ifdef TARGET_NR_sendto
7630 case TARGET_NR_sendto:
7631 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7632 break;
7633 #endif
7634 #ifdef TARGET_NR_shutdown
7635 case TARGET_NR_shutdown:
7636 ret = get_errno(shutdown(arg1, arg2));
7637 break;
7638 #endif
7639 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7640 case TARGET_NR_getrandom:
7641 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
7642 if (!p) {
7643 goto efault;
7645 ret = get_errno(getrandom(p, arg2, arg3));
7646 unlock_user(p, arg1, ret);
7647 break;
7648 #endif
7649 #ifdef TARGET_NR_socket
7650 case TARGET_NR_socket:
7651 ret = do_socket(arg1, arg2, arg3);
7652 fd_trans_unregister(ret);
7653 break;
7654 #endif
7655 #ifdef TARGET_NR_socketpair
7656 case TARGET_NR_socketpair:
7657 ret = do_socketpair(arg1, arg2, arg3, arg4);
7658 break;
7659 #endif
7660 #ifdef TARGET_NR_setsockopt
7661 case TARGET_NR_setsockopt:
7662 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7663 break;
7664 #endif
7666 case TARGET_NR_syslog:
7667 if (!(p = lock_user_string(arg2)))
7668 goto efault;
7669 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7670 unlock_user(p, arg2, 0);
7671 break;
7673 case TARGET_NR_setitimer:
7675 struct itimerval value, ovalue, *pvalue;
7677 if (arg2) {
7678 pvalue = &value;
7679 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7680 || copy_from_user_timeval(&pvalue->it_value,
7681 arg2 + sizeof(struct target_timeval)))
7682 goto efault;
7683 } else {
7684 pvalue = NULL;
7686 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7687 if (!is_error(ret) && arg3) {
7688 if (copy_to_user_timeval(arg3,
7689 &ovalue.it_interval)
7690 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7691 &ovalue.it_value))
7692 goto efault;
7695 break;
7696 case TARGET_NR_getitimer:
7698 struct itimerval value;
7700 ret = get_errno(getitimer(arg1, &value));
7701 if (!is_error(ret) && arg2) {
7702 if (copy_to_user_timeval(arg2,
7703 &value.it_interval)
7704 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7705 &value.it_value))
7706 goto efault;
7709 break;
7710 #ifdef TARGET_NR_stat
7711 case TARGET_NR_stat:
7712 if (!(p = lock_user_string(arg1)))
7713 goto efault;
7714 ret = get_errno(stat(path(p), &st));
7715 unlock_user(p, arg1, 0);
7716 goto do_stat;
7717 #endif
7718 #ifdef TARGET_NR_lstat
7719 case TARGET_NR_lstat:
7720 if (!(p = lock_user_string(arg1)))
7721 goto efault;
7722 ret = get_errno(lstat(path(p), &st));
7723 unlock_user(p, arg1, 0);
7724 goto do_stat;
7725 #endif
7726 case TARGET_NR_fstat:
7728 ret = get_errno(fstat(arg1, &st));
7729 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7730 do_stat:
7731 #endif
7732 if (!is_error(ret)) {
7733 struct target_stat *target_st;
7735 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7736 goto efault;
7737 memset(target_st, 0, sizeof(*target_st));
7738 __put_user(st.st_dev, &target_st->st_dev);
7739 __put_user(st.st_ino, &target_st->st_ino);
7740 __put_user(st.st_mode, &target_st->st_mode);
7741 __put_user(st.st_uid, &target_st->st_uid);
7742 __put_user(st.st_gid, &target_st->st_gid);
7743 __put_user(st.st_nlink, &target_st->st_nlink);
7744 __put_user(st.st_rdev, &target_st->st_rdev);
7745 __put_user(st.st_size, &target_st->st_size);
7746 __put_user(st.st_blksize, &target_st->st_blksize);
7747 __put_user(st.st_blocks, &target_st->st_blocks);
7748 __put_user(st.st_atime, &target_st->target_st_atime);
7749 __put_user(st.st_mtime, &target_st->target_st_mtime);
7750 __put_user(st.st_ctime, &target_st->target_st_ctime);
7751 unlock_user_struct(target_st, arg2, 1);
7754 break;
7755 #ifdef TARGET_NR_olduname
7756 case TARGET_NR_olduname:
7757 goto unimplemented;
7758 #endif
7759 #ifdef TARGET_NR_iopl
7760 case TARGET_NR_iopl:
7761 goto unimplemented;
7762 #endif
7763 case TARGET_NR_vhangup:
7764 ret = get_errno(vhangup());
7765 break;
7766 #ifdef TARGET_NR_idle
7767 case TARGET_NR_idle:
7768 goto unimplemented;
7769 #endif
7770 #ifdef TARGET_NR_syscall
7771 case TARGET_NR_syscall:
7772 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7773 arg6, arg7, arg8, 0);
7774 break;
7775 #endif
7776 case TARGET_NR_wait4:
7778 int status;
7779 abi_long status_ptr = arg2;
7780 struct rusage rusage, *rusage_ptr;
7781 abi_ulong target_rusage = arg4;
7782 abi_long rusage_err;
7783 if (target_rusage)
7784 rusage_ptr = &rusage;
7785 else
7786 rusage_ptr = NULL;
7787 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
7788 if (!is_error(ret)) {
7789 if (status_ptr && ret) {
7790 status = host_to_target_waitstatus(status);
7791 if (put_user_s32(status, status_ptr))
7792 goto efault;
7794 if (target_rusage) {
7795 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7796 if (rusage_err) {
7797 ret = rusage_err;
7802 break;
7803 #ifdef TARGET_NR_swapoff
7804 case TARGET_NR_swapoff:
7805 if (!(p = lock_user_string(arg1)))
7806 goto efault;
7807 ret = get_errno(swapoff(p));
7808 unlock_user(p, arg1, 0);
7809 break;
7810 #endif
7811 case TARGET_NR_sysinfo:
7813 struct target_sysinfo *target_value;
7814 struct sysinfo value;
7815 ret = get_errno(sysinfo(&value));
7816 if (!is_error(ret) && arg1)
7818 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7819 goto efault;
7820 __put_user(value.uptime, &target_value->uptime);
7821 __put_user(value.loads[0], &target_value->loads[0]);
7822 __put_user(value.loads[1], &target_value->loads[1]);
7823 __put_user(value.loads[2], &target_value->loads[2]);
7824 __put_user(value.totalram, &target_value->totalram);
7825 __put_user(value.freeram, &target_value->freeram);
7826 __put_user(value.sharedram, &target_value->sharedram);
7827 __put_user(value.bufferram, &target_value->bufferram);
7828 __put_user(value.totalswap, &target_value->totalswap);
7829 __put_user(value.freeswap, &target_value->freeswap);
7830 __put_user(value.procs, &target_value->procs);
7831 __put_user(value.totalhigh, &target_value->totalhigh);
7832 __put_user(value.freehigh, &target_value->freehigh);
7833 __put_user(value.mem_unit, &target_value->mem_unit);
7834 unlock_user_struct(target_value, arg1, 1);
7837 break;
7838 #ifdef TARGET_NR_ipc
7839 case TARGET_NR_ipc:
7840 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7841 break;
7842 #endif
7843 #ifdef TARGET_NR_semget
7844 case TARGET_NR_semget:
7845 ret = get_errno(semget(arg1, arg2, arg3));
7846 break;
7847 #endif
7848 #ifdef TARGET_NR_semop
7849 case TARGET_NR_semop:
7850 ret = do_semop(arg1, arg2, arg3);
7851 break;
7852 #endif
7853 #ifdef TARGET_NR_semctl
7854 case TARGET_NR_semctl:
7855 ret = do_semctl(arg1, arg2, arg3, arg4);
7856 break;
7857 #endif
7858 #ifdef TARGET_NR_msgctl
7859 case TARGET_NR_msgctl:
7860 ret = do_msgctl(arg1, arg2, arg3);
7861 break;
7862 #endif
7863 #ifdef TARGET_NR_msgget
7864 case TARGET_NR_msgget:
7865 ret = get_errno(msgget(arg1, arg2));
7866 break;
7867 #endif
7868 #ifdef TARGET_NR_msgrcv
7869 case TARGET_NR_msgrcv:
7870 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7871 break;
7872 #endif
7873 #ifdef TARGET_NR_msgsnd
7874 case TARGET_NR_msgsnd:
7875 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7876 break;
7877 #endif
7878 #ifdef TARGET_NR_shmget
7879 case TARGET_NR_shmget:
7880 ret = get_errno(shmget(arg1, arg2, arg3));
7881 break;
7882 #endif
7883 #ifdef TARGET_NR_shmctl
7884 case TARGET_NR_shmctl:
7885 ret = do_shmctl(arg1, arg2, arg3);
7886 break;
7887 #endif
7888 #ifdef TARGET_NR_shmat
7889 case TARGET_NR_shmat:
7890 ret = do_shmat(arg1, arg2, arg3);
7891 break;
7892 #endif
7893 #ifdef TARGET_NR_shmdt
7894 case TARGET_NR_shmdt:
7895 ret = do_shmdt(arg1);
7896 break;
7897 #endif
7898 case TARGET_NR_fsync:
7899 ret = get_errno(fsync(arg1));
7900 break;
7901 case TARGET_NR_clone:
7902 /* Linux manages to have three different orderings for its
7903 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7904 * match the kernel's CONFIG_CLONE_* settings.
7905 * Microblaze is further special in that it uses a sixth
7906 * implicit argument to clone for the TLS pointer.
7908 #if defined(TARGET_MICROBLAZE)
7909 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7910 #elif defined(TARGET_CLONE_BACKWARDS)
7911 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7912 #elif defined(TARGET_CLONE_BACKWARDS2)
7913 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7914 #else
7915 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7916 #endif
7917 break;
7918 #ifdef __NR_exit_group
7919 /* new thread calls */
7920 case TARGET_NR_exit_group:
7921 #ifdef TARGET_GPROF
7922 _mcleanup();
7923 #endif
7924 gdb_exit(cpu_env, arg1);
7925 ret = get_errno(exit_group(arg1));
7926 break;
7927 #endif
7928 case TARGET_NR_setdomainname:
7929 if (!(p = lock_user_string(arg1)))
7930 goto efault;
7931 ret = get_errno(setdomainname(p, arg2));
7932 unlock_user(p, arg1, 0);
7933 break;
7934 case TARGET_NR_uname:
7935 /* no need to transcode because we use the linux syscall */
7937 struct new_utsname * buf;
7939 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7940 goto efault;
7941 ret = get_errno(sys_uname(buf));
7942 if (!is_error(ret)) {
7943 /* Overrite the native machine name with whatever is being
7944 emulated. */
7945 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7946 /* Allow the user to override the reported release. */
7947 if (qemu_uname_release && *qemu_uname_release)
7948 strcpy (buf->release, qemu_uname_release);
7950 unlock_user_struct(buf, arg1, 1);
7952 break;
7953 #ifdef TARGET_I386
7954 case TARGET_NR_modify_ldt:
7955 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7956 break;
7957 #if !defined(TARGET_X86_64)
7958 case TARGET_NR_vm86old:
7959 goto unimplemented;
7960 case TARGET_NR_vm86:
7961 ret = do_vm86(cpu_env, arg1, arg2);
7962 break;
7963 #endif
7964 #endif
7965 case TARGET_NR_adjtimex:
7966 goto unimplemented;
7967 #ifdef TARGET_NR_create_module
7968 case TARGET_NR_create_module:
7969 #endif
7970 case TARGET_NR_init_module:
7971 case TARGET_NR_delete_module:
7972 #ifdef TARGET_NR_get_kernel_syms
7973 case TARGET_NR_get_kernel_syms:
7974 #endif
7975 goto unimplemented;
7976 case TARGET_NR_quotactl:
7977 goto unimplemented;
7978 case TARGET_NR_getpgid:
7979 ret = get_errno(getpgid(arg1));
7980 break;
7981 case TARGET_NR_fchdir:
7982 ret = get_errno(fchdir(arg1));
7983 break;
7984 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7985 case TARGET_NR_bdflush:
7986 goto unimplemented;
7987 #endif
7988 #ifdef TARGET_NR_sysfs
7989 case TARGET_NR_sysfs:
7990 goto unimplemented;
7991 #endif
7992 case TARGET_NR_personality:
7993 ret = get_errno(personality(arg1));
7994 break;
7995 #ifdef TARGET_NR_afs_syscall
7996 case TARGET_NR_afs_syscall:
7997 goto unimplemented;
7998 #endif
7999 #ifdef TARGET_NR__llseek /* Not on alpha */
8000 case TARGET_NR__llseek:
8002 int64_t res;
8003 #if !defined(__NR_llseek)
8004 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
8005 if (res == -1) {
8006 ret = get_errno(res);
8007 } else {
8008 ret = 0;
8010 #else
8011 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
8012 #endif
8013 if ((ret == 0) && put_user_s64(res, arg4)) {
8014 goto efault;
8017 break;
8018 #endif
8019 #ifdef TARGET_NR_getdents
8020 case TARGET_NR_getdents:
8021 #ifdef __NR_getdents
8022 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8024 struct target_dirent *target_dirp;
8025 struct linux_dirent *dirp;
8026 abi_long count = arg3;
8028 dirp = g_try_malloc(count);
8029 if (!dirp) {
8030 ret = -TARGET_ENOMEM;
8031 goto fail;
8034 ret = get_errno(sys_getdents(arg1, dirp, count));
8035 if (!is_error(ret)) {
8036 struct linux_dirent *de;
8037 struct target_dirent *tde;
8038 int len = ret;
8039 int reclen, treclen;
8040 int count1, tnamelen;
8042 count1 = 0;
8043 de = dirp;
8044 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8045 goto efault;
8046 tde = target_dirp;
8047 while (len > 0) {
8048 reclen = de->d_reclen;
8049 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8050 assert(tnamelen >= 0);
8051 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8052 assert(count1 + treclen <= count);
8053 tde->d_reclen = tswap16(treclen);
8054 tde->d_ino = tswapal(de->d_ino);
8055 tde->d_off = tswapal(de->d_off);
8056 memcpy(tde->d_name, de->d_name, tnamelen);
8057 de = (struct linux_dirent *)((char *)de + reclen);
8058 len -= reclen;
8059 tde = (struct target_dirent *)((char *)tde + treclen);
8060 count1 += treclen;
8062 ret = count1;
8063 unlock_user(target_dirp, arg2, ret);
8065 g_free(dirp);
8067 #else
8069 struct linux_dirent *dirp;
8070 abi_long count = arg3;
8072 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8073 goto efault;
8074 ret = get_errno(sys_getdents(arg1, dirp, count));
8075 if (!is_error(ret)) {
8076 struct linux_dirent *de;
8077 int len = ret;
8078 int reclen;
8079 de = dirp;
8080 while (len > 0) {
8081 reclen = de->d_reclen;
8082 if (reclen > len)
8083 break;
8084 de->d_reclen = tswap16(reclen);
8085 tswapls(&de->d_ino);
8086 tswapls(&de->d_off);
8087 de = (struct linux_dirent *)((char *)de + reclen);
8088 len -= reclen;
8091 unlock_user(dirp, arg2, ret);
8093 #endif
8094 #else
8095 /* Implement getdents in terms of getdents64 */
8097 struct linux_dirent64 *dirp;
8098 abi_long count = arg3;
8100 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8101 if (!dirp) {
8102 goto efault;
8104 ret = get_errno(sys_getdents64(arg1, dirp, count));
8105 if (!is_error(ret)) {
8106 /* Convert the dirent64 structs to target dirent. We do this
8107 * in-place, since we can guarantee that a target_dirent is no
8108 * larger than a dirent64; however this means we have to be
8109 * careful to read everything before writing in the new format.
8111 struct linux_dirent64 *de;
8112 struct target_dirent *tde;
8113 int len = ret;
8114 int tlen = 0;
8116 de = dirp;
8117 tde = (struct target_dirent *)dirp;
8118 while (len > 0) {
8119 int namelen, treclen;
8120 int reclen = de->d_reclen;
8121 uint64_t ino = de->d_ino;
8122 int64_t off = de->d_off;
8123 uint8_t type = de->d_type;
8125 namelen = strlen(de->d_name);
8126 treclen = offsetof(struct target_dirent, d_name)
8127 + namelen + 2;
8128 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8130 memmove(tde->d_name, de->d_name, namelen + 1);
8131 tde->d_ino = tswapal(ino);
8132 tde->d_off = tswapal(off);
8133 tde->d_reclen = tswap16(treclen);
8134 /* The target_dirent type is in what was formerly a padding
8135 * byte at the end of the structure:
8137 *(((char *)tde) + treclen - 1) = type;
8139 de = (struct linux_dirent64 *)((char *)de + reclen);
8140 tde = (struct target_dirent *)((char *)tde + treclen);
8141 len -= reclen;
8142 tlen += treclen;
8144 ret = tlen;
8146 unlock_user(dirp, arg2, ret);
8148 #endif
8149 break;
8150 #endif /* TARGET_NR_getdents */
8151 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8152 case TARGET_NR_getdents64:
8154 struct linux_dirent64 *dirp;
8155 abi_long count = arg3;
8156 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8157 goto efault;
8158 ret = get_errno(sys_getdents64(arg1, dirp, count));
8159 if (!is_error(ret)) {
8160 struct linux_dirent64 *de;
8161 int len = ret;
8162 int reclen;
8163 de = dirp;
8164 while (len > 0) {
8165 reclen = de->d_reclen;
8166 if (reclen > len)
8167 break;
8168 de->d_reclen = tswap16(reclen);
8169 tswap64s((uint64_t *)&de->d_ino);
8170 tswap64s((uint64_t *)&de->d_off);
8171 de = (struct linux_dirent64 *)((char *)de + reclen);
8172 len -= reclen;
8175 unlock_user(dirp, arg2, ret);
8177 break;
8178 #endif /* TARGET_NR_getdents64 */
8179 #if defined(TARGET_NR__newselect)
8180 case TARGET_NR__newselect:
8181 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8182 break;
8183 #endif
8184 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8185 # ifdef TARGET_NR_poll
8186 case TARGET_NR_poll:
8187 # endif
8188 # ifdef TARGET_NR_ppoll
8189 case TARGET_NR_ppoll:
8190 # endif
8192 struct target_pollfd *target_pfd;
8193 unsigned int nfds = arg2;
8194 int timeout = arg3;
8195 struct pollfd *pfd;
8196 unsigned int i;
8198 pfd = NULL;
8199 target_pfd = NULL;
8200 if (nfds) {
8201 target_pfd = lock_user(VERIFY_WRITE, arg1,
8202 sizeof(struct target_pollfd) * nfds, 1);
8203 if (!target_pfd) {
8204 goto efault;
8207 pfd = alloca(sizeof(struct pollfd) * nfds);
8208 for (i = 0; i < nfds; i++) {
8209 pfd[i].fd = tswap32(target_pfd[i].fd);
8210 pfd[i].events = tswap16(target_pfd[i].events);
8214 # ifdef TARGET_NR_ppoll
8215 if (num == TARGET_NR_ppoll) {
8216 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8217 target_sigset_t *target_set;
8218 sigset_t _set, *set = &_set;
8220 if (arg3) {
8221 if (target_to_host_timespec(timeout_ts, arg3)) {
8222 unlock_user(target_pfd, arg1, 0);
8223 goto efault;
8225 } else {
8226 timeout_ts = NULL;
8229 if (arg4) {
8230 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8231 if (!target_set) {
8232 unlock_user(target_pfd, arg1, 0);
8233 goto efault;
8235 target_to_host_sigset(set, target_set);
8236 } else {
8237 set = NULL;
8240 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
8242 if (!is_error(ret) && arg3) {
8243 host_to_target_timespec(arg3, timeout_ts);
8245 if (arg4) {
8246 unlock_user(target_set, arg4, 0);
8248 } else
8249 # endif
8250 ret = get_errno(poll(pfd, nfds, timeout));
8252 if (!is_error(ret)) {
8253 for(i = 0; i < nfds; i++) {
8254 target_pfd[i].revents = tswap16(pfd[i].revents);
8257 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8259 break;
8260 #endif
8261 case TARGET_NR_flock:
8262 /* NOTE: the flock constant seems to be the same for every
8263 Linux platform */
8264 ret = get_errno(flock(arg1, arg2));
8265 break;
8266 case TARGET_NR_readv:
8268 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8269 if (vec != NULL) {
8270 ret = get_errno(readv(arg1, vec, arg3));
8271 unlock_iovec(vec, arg2, arg3, 1);
8272 } else {
8273 ret = -host_to_target_errno(errno);
8276 break;
8277 case TARGET_NR_writev:
8279 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8280 if (vec != NULL) {
8281 ret = get_errno(writev(arg1, vec, arg3));
8282 unlock_iovec(vec, arg2, arg3, 0);
8283 } else {
8284 ret = -host_to_target_errno(errno);
8287 break;
8288 case TARGET_NR_getsid:
8289 ret = get_errno(getsid(arg1));
8290 break;
8291 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8292 case TARGET_NR_fdatasync:
8293 ret = get_errno(fdatasync(arg1));
8294 break;
8295 #endif
8296 #ifdef TARGET_NR__sysctl
8297 case TARGET_NR__sysctl:
8298 /* We don't implement this, but ENOTDIR is always a safe
8299 return value. */
8300 ret = -TARGET_ENOTDIR;
8301 break;
8302 #endif
8303 case TARGET_NR_sched_getaffinity:
8305 unsigned int mask_size;
8306 unsigned long *mask;
8309 * sched_getaffinity needs multiples of ulong, so need to take
8310 * care of mismatches between target ulong and host ulong sizes.
8312 if (arg2 & (sizeof(abi_ulong) - 1)) {
8313 ret = -TARGET_EINVAL;
8314 break;
8316 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8318 mask = alloca(mask_size);
8319 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8321 if (!is_error(ret)) {
8322 if (ret > arg2) {
8323 /* More data returned than the caller's buffer will fit.
8324 * This only happens if sizeof(abi_long) < sizeof(long)
8325 * and the caller passed us a buffer holding an odd number
8326 * of abi_longs. If the host kernel is actually using the
8327 * extra 4 bytes then fail EINVAL; otherwise we can just
8328 * ignore them and only copy the interesting part.
8330 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8331 if (numcpus > arg2 * 8) {
8332 ret = -TARGET_EINVAL;
8333 break;
8335 ret = arg2;
8338 if (copy_to_user(arg3, mask, ret)) {
8339 goto efault;
8343 break;
8344 case TARGET_NR_sched_setaffinity:
8346 unsigned int mask_size;
8347 unsigned long *mask;
8350 * sched_setaffinity needs multiples of ulong, so need to take
8351 * care of mismatches between target ulong and host ulong sizes.
8353 if (arg2 & (sizeof(abi_ulong) - 1)) {
8354 ret = -TARGET_EINVAL;
8355 break;
8357 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8359 mask = alloca(mask_size);
8360 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8361 goto efault;
8363 memcpy(mask, p, arg2);
8364 unlock_user_struct(p, arg2, 0);
8366 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8368 break;
8369 case TARGET_NR_sched_setparam:
8371 struct sched_param *target_schp;
8372 struct sched_param schp;
8374 if (arg2 == 0) {
8375 return -TARGET_EINVAL;
8377 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8378 goto efault;
8379 schp.sched_priority = tswap32(target_schp->sched_priority);
8380 unlock_user_struct(target_schp, arg2, 0);
8381 ret = get_errno(sched_setparam(arg1, &schp));
8383 break;
8384 case TARGET_NR_sched_getparam:
8386 struct sched_param *target_schp;
8387 struct sched_param schp;
8389 if (arg2 == 0) {
8390 return -TARGET_EINVAL;
8392 ret = get_errno(sched_getparam(arg1, &schp));
8393 if (!is_error(ret)) {
8394 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8395 goto efault;
8396 target_schp->sched_priority = tswap32(schp.sched_priority);
8397 unlock_user_struct(target_schp, arg2, 1);
8400 break;
8401 case TARGET_NR_sched_setscheduler:
8403 struct sched_param *target_schp;
8404 struct sched_param schp;
8405 if (arg3 == 0) {
8406 return -TARGET_EINVAL;
8408 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8409 goto efault;
8410 schp.sched_priority = tswap32(target_schp->sched_priority);
8411 unlock_user_struct(target_schp, arg3, 0);
8412 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8414 break;
8415 case TARGET_NR_sched_getscheduler:
8416 ret = get_errno(sched_getscheduler(arg1));
8417 break;
8418 case TARGET_NR_sched_yield:
8419 ret = get_errno(sched_yield());
8420 break;
8421 case TARGET_NR_sched_get_priority_max:
8422 ret = get_errno(sched_get_priority_max(arg1));
8423 break;
8424 case TARGET_NR_sched_get_priority_min:
8425 ret = get_errno(sched_get_priority_min(arg1));
8426 break;
8427 case TARGET_NR_sched_rr_get_interval:
8429 struct timespec ts;
8430 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8431 if (!is_error(ret)) {
8432 ret = host_to_target_timespec(arg2, &ts);
8435 break;
8436 case TARGET_NR_nanosleep:
8438 struct timespec req, rem;
8439 target_to_host_timespec(&req, arg1);
8440 ret = get_errno(nanosleep(&req, &rem));
8441 if (is_error(ret) && arg2) {
8442 host_to_target_timespec(arg2, &rem);
8445 break;
8446 #ifdef TARGET_NR_query_module
8447 case TARGET_NR_query_module:
8448 goto unimplemented;
8449 #endif
8450 #ifdef TARGET_NR_nfsservctl
8451 case TARGET_NR_nfsservctl:
8452 goto unimplemented;
8453 #endif
8454 case TARGET_NR_prctl:
8455 switch (arg1) {
8456 case PR_GET_PDEATHSIG:
8458 int deathsig;
8459 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8460 if (!is_error(ret) && arg2
8461 && put_user_ual(deathsig, arg2)) {
8462 goto efault;
8464 break;
8466 #ifdef PR_GET_NAME
8467 case PR_GET_NAME:
8469 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8470 if (!name) {
8471 goto efault;
8473 ret = get_errno(prctl(arg1, (unsigned long)name,
8474 arg3, arg4, arg5));
8475 unlock_user(name, arg2, 16);
8476 break;
8478 case PR_SET_NAME:
8480 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8481 if (!name) {
8482 goto efault;
8484 ret = get_errno(prctl(arg1, (unsigned long)name,
8485 arg3, arg4, arg5));
8486 unlock_user(name, arg2, 0);
8487 break;
8489 #endif
8490 default:
8491 /* Most prctl options have no pointer arguments */
8492 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8493 break;
8495 break;
8496 #ifdef TARGET_NR_arch_prctl
8497 case TARGET_NR_arch_prctl:
8498 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8499 ret = do_arch_prctl(cpu_env, arg1, arg2);
8500 break;
8501 #else
8502 goto unimplemented;
8503 #endif
8504 #endif
8505 #ifdef TARGET_NR_pread64
8506 case TARGET_NR_pread64:
8507 if (regpairs_aligned(cpu_env)) {
8508 arg4 = arg5;
8509 arg5 = arg6;
8511 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8512 goto efault;
8513 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8514 unlock_user(p, arg2, ret);
8515 break;
8516 case TARGET_NR_pwrite64:
8517 if (regpairs_aligned(cpu_env)) {
8518 arg4 = arg5;
8519 arg5 = arg6;
8521 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8522 goto efault;
8523 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8524 unlock_user(p, arg2, 0);
8525 break;
8526 #endif
8527 case TARGET_NR_getcwd:
8528 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8529 goto efault;
8530 ret = get_errno(sys_getcwd1(p, arg2));
8531 unlock_user(p, arg1, ret);
8532 break;
8533 case TARGET_NR_capget:
8534 case TARGET_NR_capset:
8536 struct target_user_cap_header *target_header;
8537 struct target_user_cap_data *target_data = NULL;
8538 struct __user_cap_header_struct header;
8539 struct __user_cap_data_struct data[2];
8540 struct __user_cap_data_struct *dataptr = NULL;
8541 int i, target_datalen;
8542 int data_items = 1;
8544 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8545 goto efault;
8547 header.version = tswap32(target_header->version);
8548 header.pid = tswap32(target_header->pid);
8550 if (header.version != _LINUX_CAPABILITY_VERSION) {
8551 /* Version 2 and up takes pointer to two user_data structs */
8552 data_items = 2;
8555 target_datalen = sizeof(*target_data) * data_items;
8557 if (arg2) {
8558 if (num == TARGET_NR_capget) {
8559 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8560 } else {
8561 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8563 if (!target_data) {
8564 unlock_user_struct(target_header, arg1, 0);
8565 goto efault;
8568 if (num == TARGET_NR_capset) {
8569 for (i = 0; i < data_items; i++) {
8570 data[i].effective = tswap32(target_data[i].effective);
8571 data[i].permitted = tswap32(target_data[i].permitted);
8572 data[i].inheritable = tswap32(target_data[i].inheritable);
8576 dataptr = data;
8579 if (num == TARGET_NR_capget) {
8580 ret = get_errno(capget(&header, dataptr));
8581 } else {
8582 ret = get_errno(capset(&header, dataptr));
8585 /* The kernel always updates version for both capget and capset */
8586 target_header->version = tswap32(header.version);
8587 unlock_user_struct(target_header, arg1, 1);
8589 if (arg2) {
8590 if (num == TARGET_NR_capget) {
8591 for (i = 0; i < data_items; i++) {
8592 target_data[i].effective = tswap32(data[i].effective);
8593 target_data[i].permitted = tswap32(data[i].permitted);
8594 target_data[i].inheritable = tswap32(data[i].inheritable);
8596 unlock_user(target_data, arg2, target_datalen);
8597 } else {
8598 unlock_user(target_data, arg2, 0);
8601 break;
8603 case TARGET_NR_sigaltstack:
8604 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8605 break;
8607 #ifdef CONFIG_SENDFILE
8608 case TARGET_NR_sendfile:
8610 off_t *offp = NULL;
8611 off_t off;
8612 if (arg3) {
8613 ret = get_user_sal(off, arg3);
8614 if (is_error(ret)) {
8615 break;
8617 offp = &off;
8619 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8620 if (!is_error(ret) && arg3) {
8621 abi_long ret2 = put_user_sal(off, arg3);
8622 if (is_error(ret2)) {
8623 ret = ret2;
8626 break;
8628 #ifdef TARGET_NR_sendfile64
8629 case TARGET_NR_sendfile64:
8631 off_t *offp = NULL;
8632 off_t off;
8633 if (arg3) {
8634 ret = get_user_s64(off, arg3);
8635 if (is_error(ret)) {
8636 break;
8638 offp = &off;
8640 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8641 if (!is_error(ret) && arg3) {
8642 abi_long ret2 = put_user_s64(off, arg3);
8643 if (is_error(ret2)) {
8644 ret = ret2;
8647 break;
8649 #endif
8650 #else
8651 case TARGET_NR_sendfile:
8652 #ifdef TARGET_NR_sendfile64
8653 case TARGET_NR_sendfile64:
8654 #endif
8655 goto unimplemented;
8656 #endif
8658 #ifdef TARGET_NR_getpmsg
8659 case TARGET_NR_getpmsg:
8660 goto unimplemented;
8661 #endif
8662 #ifdef TARGET_NR_putpmsg
8663 case TARGET_NR_putpmsg:
8664 goto unimplemented;
8665 #endif
8666 #ifdef TARGET_NR_vfork
8667 case TARGET_NR_vfork:
8668 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8669 0, 0, 0, 0));
8670 break;
8671 #endif
8672 #ifdef TARGET_NR_ugetrlimit
8673 case TARGET_NR_ugetrlimit:
8675 struct rlimit rlim;
8676 int resource = target_to_host_resource(arg1);
8677 ret = get_errno(getrlimit(resource, &rlim));
8678 if (!is_error(ret)) {
8679 struct target_rlimit *target_rlim;
8680 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8681 goto efault;
8682 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8683 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8684 unlock_user_struct(target_rlim, arg2, 1);
8686 break;
8688 #endif
8689 #ifdef TARGET_NR_truncate64
8690 case TARGET_NR_truncate64:
8691 if (!(p = lock_user_string(arg1)))
8692 goto efault;
8693 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8694 unlock_user(p, arg1, 0);
8695 break;
8696 #endif
8697 #ifdef TARGET_NR_ftruncate64
8698 case TARGET_NR_ftruncate64:
8699 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8700 break;
8701 #endif
8702 #ifdef TARGET_NR_stat64
8703 case TARGET_NR_stat64:
8704 if (!(p = lock_user_string(arg1)))
8705 goto efault;
8706 ret = get_errno(stat(path(p), &st));
8707 unlock_user(p, arg1, 0);
8708 if (!is_error(ret))
8709 ret = host_to_target_stat64(cpu_env, arg2, &st);
8710 break;
8711 #endif
8712 #ifdef TARGET_NR_lstat64
8713 case TARGET_NR_lstat64:
8714 if (!(p = lock_user_string(arg1)))
8715 goto efault;
8716 ret = get_errno(lstat(path(p), &st));
8717 unlock_user(p, arg1, 0);
8718 if (!is_error(ret))
8719 ret = host_to_target_stat64(cpu_env, arg2, &st);
8720 break;
8721 #endif
8722 #ifdef TARGET_NR_fstat64
8723 case TARGET_NR_fstat64:
8724 ret = get_errno(fstat(arg1, &st));
8725 if (!is_error(ret))
8726 ret = host_to_target_stat64(cpu_env, arg2, &st);
8727 break;
8728 #endif
8729 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8730 #ifdef TARGET_NR_fstatat64
8731 case TARGET_NR_fstatat64:
8732 #endif
8733 #ifdef TARGET_NR_newfstatat
8734 case TARGET_NR_newfstatat:
8735 #endif
8736 if (!(p = lock_user_string(arg2)))
8737 goto efault;
8738 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8739 if (!is_error(ret))
8740 ret = host_to_target_stat64(cpu_env, arg3, &st);
8741 break;
8742 #endif
8743 #ifdef TARGET_NR_lchown
8744 case TARGET_NR_lchown:
8745 if (!(p = lock_user_string(arg1)))
8746 goto efault;
8747 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8748 unlock_user(p, arg1, 0);
8749 break;
8750 #endif
8751 #ifdef TARGET_NR_getuid
8752 case TARGET_NR_getuid:
8753 ret = get_errno(high2lowuid(getuid()));
8754 break;
8755 #endif
8756 #ifdef TARGET_NR_getgid
8757 case TARGET_NR_getgid:
8758 ret = get_errno(high2lowgid(getgid()));
8759 break;
8760 #endif
8761 #ifdef TARGET_NR_geteuid
8762 case TARGET_NR_geteuid:
8763 ret = get_errno(high2lowuid(geteuid()));
8764 break;
8765 #endif
8766 #ifdef TARGET_NR_getegid
8767 case TARGET_NR_getegid:
8768 ret = get_errno(high2lowgid(getegid()));
8769 break;
8770 #endif
8771 case TARGET_NR_setreuid:
8772 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8773 break;
8774 case TARGET_NR_setregid:
8775 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8776 break;
8777 case TARGET_NR_getgroups:
8779 int gidsetsize = arg1;
8780 target_id *target_grouplist;
8781 gid_t *grouplist;
8782 int i;
8784 grouplist = alloca(gidsetsize * sizeof(gid_t));
8785 ret = get_errno(getgroups(gidsetsize, grouplist));
8786 if (gidsetsize == 0)
8787 break;
8788 if (!is_error(ret)) {
8789 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8790 if (!target_grouplist)
8791 goto efault;
8792 for(i = 0;i < ret; i++)
8793 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8794 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8797 break;
8798 case TARGET_NR_setgroups:
8800 int gidsetsize = arg1;
8801 target_id *target_grouplist;
8802 gid_t *grouplist = NULL;
8803 int i;
8804 if (gidsetsize) {
8805 grouplist = alloca(gidsetsize * sizeof(gid_t));
8806 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8807 if (!target_grouplist) {
8808 ret = -TARGET_EFAULT;
8809 goto fail;
8811 for (i = 0; i < gidsetsize; i++) {
8812 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8814 unlock_user(target_grouplist, arg2, 0);
8816 ret = get_errno(setgroups(gidsetsize, grouplist));
8818 break;
8819 case TARGET_NR_fchown:
8820 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8821 break;
8822 #if defined(TARGET_NR_fchownat)
8823 case TARGET_NR_fchownat:
8824 if (!(p = lock_user_string(arg2)))
8825 goto efault;
8826 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8827 low2highgid(arg4), arg5));
8828 unlock_user(p, arg2, 0);
8829 break;
8830 #endif
8831 #ifdef TARGET_NR_setresuid
8832 case TARGET_NR_setresuid:
8833 ret = get_errno(setresuid(low2highuid(arg1),
8834 low2highuid(arg2),
8835 low2highuid(arg3)));
8836 break;
8837 #endif
8838 #ifdef TARGET_NR_getresuid
8839 case TARGET_NR_getresuid:
8841 uid_t ruid, euid, suid;
8842 ret = get_errno(getresuid(&ruid, &euid, &suid));
8843 if (!is_error(ret)) {
8844 if (put_user_id(high2lowuid(ruid), arg1)
8845 || put_user_id(high2lowuid(euid), arg2)
8846 || put_user_id(high2lowuid(suid), arg3))
8847 goto efault;
8850 break;
8851 #endif
8852 #ifdef TARGET_NR_getresgid
8853 case TARGET_NR_setresgid:
8854 ret = get_errno(setresgid(low2highgid(arg1),
8855 low2highgid(arg2),
8856 low2highgid(arg3)));
8857 break;
8858 #endif
8859 #ifdef TARGET_NR_getresgid
8860 case TARGET_NR_getresgid:
8862 gid_t rgid, egid, sgid;
8863 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8864 if (!is_error(ret)) {
8865 if (put_user_id(high2lowgid(rgid), arg1)
8866 || put_user_id(high2lowgid(egid), arg2)
8867 || put_user_id(high2lowgid(sgid), arg3))
8868 goto efault;
8871 break;
8872 #endif
8873 #ifdef TARGET_NR_chown
8874 case TARGET_NR_chown:
8875 if (!(p = lock_user_string(arg1)))
8876 goto efault;
8877 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8878 unlock_user(p, arg1, 0);
8879 break;
8880 #endif
8881 case TARGET_NR_setuid:
8882 ret = get_errno(setuid(low2highuid(arg1)));
8883 break;
8884 case TARGET_NR_setgid:
8885 ret = get_errno(setgid(low2highgid(arg1)));
8886 break;
8887 case TARGET_NR_setfsuid:
8888 ret = get_errno(setfsuid(arg1));
8889 break;
8890 case TARGET_NR_setfsgid:
8891 ret = get_errno(setfsgid(arg1));
8892 break;
8894 #ifdef TARGET_NR_lchown32
8895 case TARGET_NR_lchown32:
8896 if (!(p = lock_user_string(arg1)))
8897 goto efault;
8898 ret = get_errno(lchown(p, arg2, arg3));
8899 unlock_user(p, arg1, 0);
8900 break;
8901 #endif
8902 #ifdef TARGET_NR_getuid32
8903 case TARGET_NR_getuid32:
8904 ret = get_errno(getuid());
8905 break;
8906 #endif
8908 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8909 /* Alpha specific */
8910 case TARGET_NR_getxuid:
8912 uid_t euid;
8913 euid=geteuid();
8914 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8916 ret = get_errno(getuid());
8917 break;
8918 #endif
8919 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8920 /* Alpha specific */
8921 case TARGET_NR_getxgid:
8923 uid_t egid;
8924 egid=getegid();
8925 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8927 ret = get_errno(getgid());
8928 break;
8929 #endif
8930 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8931 /* Alpha specific */
8932 case TARGET_NR_osf_getsysinfo:
8933 ret = -TARGET_EOPNOTSUPP;
8934 switch (arg1) {
8935 case TARGET_GSI_IEEE_FP_CONTROL:
8937 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8939 /* Copied from linux ieee_fpcr_to_swcr. */
8940 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8941 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8942 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8943 | SWCR_TRAP_ENABLE_DZE
8944 | SWCR_TRAP_ENABLE_OVF);
8945 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8946 | SWCR_TRAP_ENABLE_INE);
8947 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8948 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8950 if (put_user_u64 (swcr, arg2))
8951 goto efault;
8952 ret = 0;
8954 break;
8956 /* case GSI_IEEE_STATE_AT_SIGNAL:
8957 -- Not implemented in linux kernel.
8958 case GSI_UACPROC:
8959 -- Retrieves current unaligned access state; not much used.
8960 case GSI_PROC_TYPE:
8961 -- Retrieves implver information; surely not used.
8962 case GSI_GET_HWRPB:
8963 -- Grabs a copy of the HWRPB; surely not used.
8966 break;
8967 #endif
8968 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8969 /* Alpha specific */
8970 case TARGET_NR_osf_setsysinfo:
8971 ret = -TARGET_EOPNOTSUPP;
8972 switch (arg1) {
8973 case TARGET_SSI_IEEE_FP_CONTROL:
8975 uint64_t swcr, fpcr, orig_fpcr;
8977 if (get_user_u64 (swcr, arg2)) {
8978 goto efault;
8980 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8981 fpcr = orig_fpcr & FPCR_DYN_MASK;
8983 /* Copied from linux ieee_swcr_to_fpcr. */
8984 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8985 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8986 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8987 | SWCR_TRAP_ENABLE_DZE
8988 | SWCR_TRAP_ENABLE_OVF)) << 48;
8989 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8990 | SWCR_TRAP_ENABLE_INE)) << 57;
8991 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8992 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8994 cpu_alpha_store_fpcr(cpu_env, fpcr);
8995 ret = 0;
8997 break;
8999 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
9001 uint64_t exc, fpcr, orig_fpcr;
9002 int si_code;
9004 if (get_user_u64(exc, arg2)) {
9005 goto efault;
9008 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
9010 /* We only add to the exception status here. */
9011 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
9013 cpu_alpha_store_fpcr(cpu_env, fpcr);
9014 ret = 0;
9016 /* Old exceptions are not signaled. */
9017 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9019 /* If any exceptions set by this call,
9020 and are unmasked, send a signal. */
9021 si_code = 0;
9022 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9023 si_code = TARGET_FPE_FLTRES;
9025 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9026 si_code = TARGET_FPE_FLTUND;
9028 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9029 si_code = TARGET_FPE_FLTOVF;
9031 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9032 si_code = TARGET_FPE_FLTDIV;
9034 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9035 si_code = TARGET_FPE_FLTINV;
9037 if (si_code != 0) {
9038 target_siginfo_t info;
9039 info.si_signo = SIGFPE;
9040 info.si_errno = 0;
9041 info.si_code = si_code;
9042 info._sifields._sigfault._addr
9043 = ((CPUArchState *)cpu_env)->pc;
9044 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9047 break;
9049 /* case SSI_NVPAIRS:
9050 -- Used with SSIN_UACPROC to enable unaligned accesses.
9051 case SSI_IEEE_STATE_AT_SIGNAL:
9052 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9053 -- Not implemented in linux kernel
9056 break;
9057 #endif
9058 #ifdef TARGET_NR_osf_sigprocmask
9059 /* Alpha specific. */
9060 case TARGET_NR_osf_sigprocmask:
9062 abi_ulong mask;
9063 int how;
9064 sigset_t set, oldset;
9066 switch(arg1) {
9067 case TARGET_SIG_BLOCK:
9068 how = SIG_BLOCK;
9069 break;
9070 case TARGET_SIG_UNBLOCK:
9071 how = SIG_UNBLOCK;
9072 break;
9073 case TARGET_SIG_SETMASK:
9074 how = SIG_SETMASK;
9075 break;
9076 default:
9077 ret = -TARGET_EINVAL;
9078 goto fail;
9080 mask = arg2;
9081 target_to_host_old_sigset(&set, &mask);
9082 do_sigprocmask(how, &set, &oldset);
9083 host_to_target_old_sigset(&mask, &oldset);
9084 ret = mask;
9086 break;
9087 #endif
9089 #ifdef TARGET_NR_getgid32
9090 case TARGET_NR_getgid32:
9091 ret = get_errno(getgid());
9092 break;
9093 #endif
9094 #ifdef TARGET_NR_geteuid32
9095 case TARGET_NR_geteuid32:
9096 ret = get_errno(geteuid());
9097 break;
9098 #endif
9099 #ifdef TARGET_NR_getegid32
9100 case TARGET_NR_getegid32:
9101 ret = get_errno(getegid());
9102 break;
9103 #endif
9104 #ifdef TARGET_NR_setreuid32
9105 case TARGET_NR_setreuid32:
9106 ret = get_errno(setreuid(arg1, arg2));
9107 break;
9108 #endif
9109 #ifdef TARGET_NR_setregid32
9110 case TARGET_NR_setregid32:
9111 ret = get_errno(setregid(arg1, arg2));
9112 break;
9113 #endif
9114 #ifdef TARGET_NR_getgroups32
9115 case TARGET_NR_getgroups32:
9117 int gidsetsize = arg1;
9118 uint32_t *target_grouplist;
9119 gid_t *grouplist;
9120 int i;
9122 grouplist = alloca(gidsetsize * sizeof(gid_t));
9123 ret = get_errno(getgroups(gidsetsize, grouplist));
9124 if (gidsetsize == 0)
9125 break;
9126 if (!is_error(ret)) {
9127 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9128 if (!target_grouplist) {
9129 ret = -TARGET_EFAULT;
9130 goto fail;
9132 for(i = 0;i < ret; i++)
9133 target_grouplist[i] = tswap32(grouplist[i]);
9134 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9137 break;
9138 #endif
9139 #ifdef TARGET_NR_setgroups32
9140 case TARGET_NR_setgroups32:
9142 int gidsetsize = arg1;
9143 uint32_t *target_grouplist;
9144 gid_t *grouplist;
9145 int i;
9147 grouplist = alloca(gidsetsize * sizeof(gid_t));
9148 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9149 if (!target_grouplist) {
9150 ret = -TARGET_EFAULT;
9151 goto fail;
9153 for(i = 0;i < gidsetsize; i++)
9154 grouplist[i] = tswap32(target_grouplist[i]);
9155 unlock_user(target_grouplist, arg2, 0);
9156 ret = get_errno(setgroups(gidsetsize, grouplist));
9158 break;
9159 #endif
9160 #ifdef TARGET_NR_fchown32
9161 case TARGET_NR_fchown32:
9162 ret = get_errno(fchown(arg1, arg2, arg3));
9163 break;
9164 #endif
9165 #ifdef TARGET_NR_setresuid32
9166 case TARGET_NR_setresuid32:
9167 ret = get_errno(setresuid(arg1, arg2, arg3));
9168 break;
9169 #endif
9170 #ifdef TARGET_NR_getresuid32
9171 case TARGET_NR_getresuid32:
9173 uid_t ruid, euid, suid;
9174 ret = get_errno(getresuid(&ruid, &euid, &suid));
9175 if (!is_error(ret)) {
9176 if (put_user_u32(ruid, arg1)
9177 || put_user_u32(euid, arg2)
9178 || put_user_u32(suid, arg3))
9179 goto efault;
9182 break;
9183 #endif
9184 #ifdef TARGET_NR_setresgid32
9185 case TARGET_NR_setresgid32:
9186 ret = get_errno(setresgid(arg1, arg2, arg3));
9187 break;
9188 #endif
9189 #ifdef TARGET_NR_getresgid32
9190 case TARGET_NR_getresgid32:
9192 gid_t rgid, egid, sgid;
9193 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9194 if (!is_error(ret)) {
9195 if (put_user_u32(rgid, arg1)
9196 || put_user_u32(egid, arg2)
9197 || put_user_u32(sgid, arg3))
9198 goto efault;
9201 break;
9202 #endif
9203 #ifdef TARGET_NR_chown32
9204 case TARGET_NR_chown32:
9205 if (!(p = lock_user_string(arg1)))
9206 goto efault;
9207 ret = get_errno(chown(p, arg2, arg3));
9208 unlock_user(p, arg1, 0);
9209 break;
9210 #endif
9211 #ifdef TARGET_NR_setuid32
9212 case TARGET_NR_setuid32:
9213 ret = get_errno(setuid(arg1));
9214 break;
9215 #endif
9216 #ifdef TARGET_NR_setgid32
9217 case TARGET_NR_setgid32:
9218 ret = get_errno(setgid(arg1));
9219 break;
9220 #endif
9221 #ifdef TARGET_NR_setfsuid32
9222 case TARGET_NR_setfsuid32:
9223 ret = get_errno(setfsuid(arg1));
9224 break;
9225 #endif
9226 #ifdef TARGET_NR_setfsgid32
9227 case TARGET_NR_setfsgid32:
9228 ret = get_errno(setfsgid(arg1));
9229 break;
9230 #endif
9232 case TARGET_NR_pivot_root:
9233 goto unimplemented;
9234 #ifdef TARGET_NR_mincore
9235 case TARGET_NR_mincore:
9237 void *a;
9238 ret = -TARGET_EFAULT;
9239 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9240 goto efault;
9241 if (!(p = lock_user_string(arg3)))
9242 goto mincore_fail;
9243 ret = get_errno(mincore(a, arg2, p));
9244 unlock_user(p, arg3, ret);
9245 mincore_fail:
9246 unlock_user(a, arg1, 0);
9248 break;
9249 #endif
9250 #ifdef TARGET_NR_arm_fadvise64_64
9251 case TARGET_NR_arm_fadvise64_64:
9254 * arm_fadvise64_64 looks like fadvise64_64 but
9255 * with different argument order
9257 abi_long temp;
9258 temp = arg3;
9259 arg3 = arg4;
9260 arg4 = temp;
9262 #endif
9263 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9264 #ifdef TARGET_NR_fadvise64_64
9265 case TARGET_NR_fadvise64_64:
9266 #endif
9267 #ifdef TARGET_NR_fadvise64
9268 case TARGET_NR_fadvise64:
9269 #endif
9270 #ifdef TARGET_S390X
9271 switch (arg4) {
9272 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9273 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9274 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9275 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9276 default: break;
9278 #endif
9279 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9280 break;
9281 #endif
9282 #ifdef TARGET_NR_madvise
9283 case TARGET_NR_madvise:
9284 /* A straight passthrough may not be safe because qemu sometimes
9285 turns private file-backed mappings into anonymous mappings.
9286 This will break MADV_DONTNEED.
9287 This is a hint, so ignoring and returning success is ok. */
9288 ret = get_errno(0);
9289 break;
9290 #endif
9291 #if TARGET_ABI_BITS == 32
9292 case TARGET_NR_fcntl64:
9294 int cmd;
9295 struct flock64 fl;
9296 struct target_flock64 *target_fl;
9297 #ifdef TARGET_ARM
9298 struct target_eabi_flock64 *target_efl;
9299 #endif
9301 cmd = target_to_host_fcntl_cmd(arg2);
9302 if (cmd == -TARGET_EINVAL) {
9303 ret = cmd;
9304 break;
9307 switch(arg2) {
9308 case TARGET_F_GETLK64:
9309 #ifdef TARGET_ARM
9310 if (((CPUARMState *)cpu_env)->eabi) {
9311 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9312 goto efault;
9313 fl.l_type = tswap16(target_efl->l_type);
9314 fl.l_whence = tswap16(target_efl->l_whence);
9315 fl.l_start = tswap64(target_efl->l_start);
9316 fl.l_len = tswap64(target_efl->l_len);
9317 fl.l_pid = tswap32(target_efl->l_pid);
9318 unlock_user_struct(target_efl, arg3, 0);
9319 } else
9320 #endif
9322 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9323 goto efault;
9324 fl.l_type = tswap16(target_fl->l_type);
9325 fl.l_whence = tswap16(target_fl->l_whence);
9326 fl.l_start = tswap64(target_fl->l_start);
9327 fl.l_len = tswap64(target_fl->l_len);
9328 fl.l_pid = tswap32(target_fl->l_pid);
9329 unlock_user_struct(target_fl, arg3, 0);
9331 ret = get_errno(fcntl(arg1, cmd, &fl));
9332 if (ret == 0) {
9333 #ifdef TARGET_ARM
9334 if (((CPUARMState *)cpu_env)->eabi) {
9335 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9336 goto efault;
9337 target_efl->l_type = tswap16(fl.l_type);
9338 target_efl->l_whence = tswap16(fl.l_whence);
9339 target_efl->l_start = tswap64(fl.l_start);
9340 target_efl->l_len = tswap64(fl.l_len);
9341 target_efl->l_pid = tswap32(fl.l_pid);
9342 unlock_user_struct(target_efl, arg3, 1);
9343 } else
9344 #endif
9346 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9347 goto efault;
9348 target_fl->l_type = tswap16(fl.l_type);
9349 target_fl->l_whence = tswap16(fl.l_whence);
9350 target_fl->l_start = tswap64(fl.l_start);
9351 target_fl->l_len = tswap64(fl.l_len);
9352 target_fl->l_pid = tswap32(fl.l_pid);
9353 unlock_user_struct(target_fl, arg3, 1);
9356 break;
9358 case TARGET_F_SETLK64:
9359 case TARGET_F_SETLKW64:
9360 #ifdef TARGET_ARM
9361 if (((CPUARMState *)cpu_env)->eabi) {
9362 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9363 goto efault;
9364 fl.l_type = tswap16(target_efl->l_type);
9365 fl.l_whence = tswap16(target_efl->l_whence);
9366 fl.l_start = tswap64(target_efl->l_start);
9367 fl.l_len = tswap64(target_efl->l_len);
9368 fl.l_pid = tswap32(target_efl->l_pid);
9369 unlock_user_struct(target_efl, arg3, 0);
9370 } else
9371 #endif
9373 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9374 goto efault;
9375 fl.l_type = tswap16(target_fl->l_type);
9376 fl.l_whence = tswap16(target_fl->l_whence);
9377 fl.l_start = tswap64(target_fl->l_start);
9378 fl.l_len = tswap64(target_fl->l_len);
9379 fl.l_pid = tswap32(target_fl->l_pid);
9380 unlock_user_struct(target_fl, arg3, 0);
9382 ret = get_errno(fcntl(arg1, cmd, &fl));
9383 break;
9384 default:
9385 ret = do_fcntl(arg1, arg2, arg3);
9386 break;
9388 break;
9390 #endif
9391 #ifdef TARGET_NR_cacheflush
9392 case TARGET_NR_cacheflush:
9393 /* self-modifying code is handled automatically, so nothing needed */
9394 ret = 0;
9395 break;
9396 #endif
9397 #ifdef TARGET_NR_security
9398 case TARGET_NR_security:
9399 goto unimplemented;
9400 #endif
9401 #ifdef TARGET_NR_getpagesize
9402 case TARGET_NR_getpagesize:
9403 ret = TARGET_PAGE_SIZE;
9404 break;
9405 #endif
9406 case TARGET_NR_gettid:
9407 ret = get_errno(gettid());
9408 break;
9409 #ifdef TARGET_NR_readahead
9410 case TARGET_NR_readahead:
9411 #if TARGET_ABI_BITS == 32
9412 if (regpairs_aligned(cpu_env)) {
9413 arg2 = arg3;
9414 arg3 = arg4;
9415 arg4 = arg5;
9417 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9418 #else
9419 ret = get_errno(readahead(arg1, arg2, arg3));
9420 #endif
9421 break;
9422 #endif
9423 #ifdef CONFIG_ATTR
9424 #ifdef TARGET_NR_setxattr
9425 case TARGET_NR_listxattr:
9426 case TARGET_NR_llistxattr:
9428 void *p, *b = 0;
9429 if (arg2) {
9430 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9431 if (!b) {
9432 ret = -TARGET_EFAULT;
9433 break;
9436 p = lock_user_string(arg1);
9437 if (p) {
9438 if (num == TARGET_NR_listxattr) {
9439 ret = get_errno(listxattr(p, b, arg3));
9440 } else {
9441 ret = get_errno(llistxattr(p, b, arg3));
9443 } else {
9444 ret = -TARGET_EFAULT;
9446 unlock_user(p, arg1, 0);
9447 unlock_user(b, arg2, arg3);
9448 break;
9450 case TARGET_NR_flistxattr:
9452 void *b = 0;
9453 if (arg2) {
9454 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9455 if (!b) {
9456 ret = -TARGET_EFAULT;
9457 break;
9460 ret = get_errno(flistxattr(arg1, b, arg3));
9461 unlock_user(b, arg2, arg3);
9462 break;
9464 case TARGET_NR_setxattr:
9465 case TARGET_NR_lsetxattr:
9467 void *p, *n, *v = 0;
9468 if (arg3) {
9469 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9470 if (!v) {
9471 ret = -TARGET_EFAULT;
9472 break;
9475 p = lock_user_string(arg1);
9476 n = lock_user_string(arg2);
9477 if (p && n) {
9478 if (num == TARGET_NR_setxattr) {
9479 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9480 } else {
9481 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9483 } else {
9484 ret = -TARGET_EFAULT;
9486 unlock_user(p, arg1, 0);
9487 unlock_user(n, arg2, 0);
9488 unlock_user(v, arg3, 0);
9490 break;
9491 case TARGET_NR_fsetxattr:
9493 void *n, *v = 0;
9494 if (arg3) {
9495 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9496 if (!v) {
9497 ret = -TARGET_EFAULT;
9498 break;
9501 n = lock_user_string(arg2);
9502 if (n) {
9503 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9504 } else {
9505 ret = -TARGET_EFAULT;
9507 unlock_user(n, arg2, 0);
9508 unlock_user(v, arg3, 0);
9510 break;
9511 case TARGET_NR_getxattr:
9512 case TARGET_NR_lgetxattr:
9514 void *p, *n, *v = 0;
9515 if (arg3) {
9516 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9517 if (!v) {
9518 ret = -TARGET_EFAULT;
9519 break;
9522 p = lock_user_string(arg1);
9523 n = lock_user_string(arg2);
9524 if (p && n) {
9525 if (num == TARGET_NR_getxattr) {
9526 ret = get_errno(getxattr(p, n, v, arg4));
9527 } else {
9528 ret = get_errno(lgetxattr(p, n, v, arg4));
9530 } else {
9531 ret = -TARGET_EFAULT;
9533 unlock_user(p, arg1, 0);
9534 unlock_user(n, arg2, 0);
9535 unlock_user(v, arg3, arg4);
9537 break;
9538 case TARGET_NR_fgetxattr:
9540 void *n, *v = 0;
9541 if (arg3) {
9542 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9543 if (!v) {
9544 ret = -TARGET_EFAULT;
9545 break;
9548 n = lock_user_string(arg2);
9549 if (n) {
9550 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9551 } else {
9552 ret = -TARGET_EFAULT;
9554 unlock_user(n, arg2, 0);
9555 unlock_user(v, arg3, arg4);
9557 break;
9558 case TARGET_NR_removexattr:
9559 case TARGET_NR_lremovexattr:
9561 void *p, *n;
9562 p = lock_user_string(arg1);
9563 n = lock_user_string(arg2);
9564 if (p && n) {
9565 if (num == TARGET_NR_removexattr) {
9566 ret = get_errno(removexattr(p, n));
9567 } else {
9568 ret = get_errno(lremovexattr(p, n));
9570 } else {
9571 ret = -TARGET_EFAULT;
9573 unlock_user(p, arg1, 0);
9574 unlock_user(n, arg2, 0);
9576 break;
9577 case TARGET_NR_fremovexattr:
9579 void *n;
9580 n = lock_user_string(arg2);
9581 if (n) {
9582 ret = get_errno(fremovexattr(arg1, n));
9583 } else {
9584 ret = -TARGET_EFAULT;
9586 unlock_user(n, arg2, 0);
9588 break;
9589 #endif
9590 #endif /* CONFIG_ATTR */
9591 #ifdef TARGET_NR_set_thread_area
9592 case TARGET_NR_set_thread_area:
9593 #if defined(TARGET_MIPS)
9594 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9595 ret = 0;
9596 break;
9597 #elif defined(TARGET_CRIS)
9598 if (arg1 & 0xff)
9599 ret = -TARGET_EINVAL;
9600 else {
9601 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9602 ret = 0;
9604 break;
9605 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9606 ret = do_set_thread_area(cpu_env, arg1);
9607 break;
9608 #elif defined(TARGET_M68K)
9610 TaskState *ts = cpu->opaque;
9611 ts->tp_value = arg1;
9612 ret = 0;
9613 break;
9615 #else
9616 goto unimplemented_nowarn;
9617 #endif
9618 #endif
9619 #ifdef TARGET_NR_get_thread_area
9620 case TARGET_NR_get_thread_area:
9621 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9622 ret = do_get_thread_area(cpu_env, arg1);
9623 break;
9624 #elif defined(TARGET_M68K)
9626 TaskState *ts = cpu->opaque;
9627 ret = ts->tp_value;
9628 break;
9630 #else
9631 goto unimplemented_nowarn;
9632 #endif
9633 #endif
9634 #ifdef TARGET_NR_getdomainname
9635 case TARGET_NR_getdomainname:
9636 goto unimplemented_nowarn;
9637 #endif
9639 #ifdef TARGET_NR_clock_gettime
9640 case TARGET_NR_clock_gettime:
9642 struct timespec ts;
9643 ret = get_errno(clock_gettime(arg1, &ts));
9644 if (!is_error(ret)) {
9645 host_to_target_timespec(arg2, &ts);
9647 break;
9649 #endif
9650 #ifdef TARGET_NR_clock_getres
9651 case TARGET_NR_clock_getres:
9653 struct timespec ts;
9654 ret = get_errno(clock_getres(arg1, &ts));
9655 if (!is_error(ret)) {
9656 host_to_target_timespec(arg2, &ts);
9658 break;
9660 #endif
9661 #ifdef TARGET_NR_clock_nanosleep
9662 case TARGET_NR_clock_nanosleep:
9664 struct timespec ts;
9665 target_to_host_timespec(&ts, arg3);
9666 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9667 if (arg4)
9668 host_to_target_timespec(arg4, &ts);
9670 #if defined(TARGET_PPC)
9671 /* clock_nanosleep is odd in that it returns positive errno values.
9672 * On PPC, CR0 bit 3 should be set in such a situation. */
9673 if (ret) {
9674 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9676 #endif
9677 break;
9679 #endif
9681 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9682 case TARGET_NR_set_tid_address:
9683 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9684 break;
9685 #endif
9687 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9688 case TARGET_NR_tkill:
9689 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9690 break;
9691 #endif
9693 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9694 case TARGET_NR_tgkill:
9695 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9696 target_to_host_signal(arg3)));
9697 break;
9698 #endif
9700 #ifdef TARGET_NR_set_robust_list
9701 case TARGET_NR_set_robust_list:
9702 case TARGET_NR_get_robust_list:
9703 /* The ABI for supporting robust futexes has userspace pass
9704 * the kernel a pointer to a linked list which is updated by
9705 * userspace after the syscall; the list is walked by the kernel
9706 * when the thread exits. Since the linked list in QEMU guest
9707 * memory isn't a valid linked list for the host and we have
9708 * no way to reliably intercept the thread-death event, we can't
9709 * support these. Silently return ENOSYS so that guest userspace
9710 * falls back to a non-robust futex implementation (which should
9711 * be OK except in the corner case of the guest crashing while
9712 * holding a mutex that is shared with another process via
9713 * shared memory).
9715 goto unimplemented_nowarn;
9716 #endif
9718 #if defined(TARGET_NR_utimensat)
9719 case TARGET_NR_utimensat:
9721 struct timespec *tsp, ts[2];
9722 if (!arg3) {
9723 tsp = NULL;
9724 } else {
9725 target_to_host_timespec(ts, arg3);
9726 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9727 tsp = ts;
9729 if (!arg2)
9730 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9731 else {
9732 if (!(p = lock_user_string(arg2))) {
9733 ret = -TARGET_EFAULT;
9734 goto fail;
9736 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9737 unlock_user(p, arg2, 0);
9740 break;
9741 #endif
9742 case TARGET_NR_futex:
9743 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9744 break;
9745 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9746 case TARGET_NR_inotify_init:
9747 ret = get_errno(sys_inotify_init());
9748 break;
9749 #endif
9750 #ifdef CONFIG_INOTIFY1
9751 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9752 case TARGET_NR_inotify_init1:
9753 ret = get_errno(sys_inotify_init1(arg1));
9754 break;
9755 #endif
9756 #endif
9757 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9758 case TARGET_NR_inotify_add_watch:
9759 p = lock_user_string(arg2);
9760 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9761 unlock_user(p, arg2, 0);
9762 break;
9763 #endif
9764 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9765 case TARGET_NR_inotify_rm_watch:
9766 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9767 break;
9768 #endif
9770 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9771 case TARGET_NR_mq_open:
9773 struct mq_attr posix_mq_attr, *attrp;
9775 p = lock_user_string(arg1 - 1);
9776 if (arg4 != 0) {
9777 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9778 attrp = &posix_mq_attr;
9779 } else {
9780 attrp = 0;
9782 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9783 unlock_user (p, arg1, 0);
9785 break;
9787 case TARGET_NR_mq_unlink:
9788 p = lock_user_string(arg1 - 1);
9789 ret = get_errno(mq_unlink(p));
9790 unlock_user (p, arg1, 0);
9791 break;
9793 case TARGET_NR_mq_timedsend:
9795 struct timespec ts;
9797 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9798 if (arg5 != 0) {
9799 target_to_host_timespec(&ts, arg5);
9800 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9801 host_to_target_timespec(arg5, &ts);
9803 else
9804 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9805 unlock_user (p, arg2, arg3);
9807 break;
9809 case TARGET_NR_mq_timedreceive:
9811 struct timespec ts;
9812 unsigned int prio;
9814 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9815 if (arg5 != 0) {
9816 target_to_host_timespec(&ts, arg5);
9817 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9818 host_to_target_timespec(arg5, &ts);
9820 else
9821 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9822 unlock_user (p, arg2, arg3);
9823 if (arg4 != 0)
9824 put_user_u32(prio, arg4);
9826 break;
9828 /* Not implemented for now... */
9829 /* case TARGET_NR_mq_notify: */
9830 /* break; */
9832 case TARGET_NR_mq_getsetattr:
9834 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9835 ret = 0;
9836 if (arg3 != 0) {
9837 ret = mq_getattr(arg1, &posix_mq_attr_out);
9838 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9840 if (arg2 != 0) {
9841 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9842 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9846 break;
9847 #endif
9849 #ifdef CONFIG_SPLICE
9850 #ifdef TARGET_NR_tee
9851 case TARGET_NR_tee:
9853 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9855 break;
9856 #endif
9857 #ifdef TARGET_NR_splice
9858 case TARGET_NR_splice:
9860 loff_t loff_in, loff_out;
9861 loff_t *ploff_in = NULL, *ploff_out = NULL;
9862 if (arg2) {
9863 if (get_user_u64(loff_in, arg2)) {
9864 goto efault;
9866 ploff_in = &loff_in;
9868 if (arg4) {
9869 if (get_user_u64(loff_out, arg4)) {
9870 goto efault;
9872 ploff_out = &loff_out;
9874 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9875 if (arg2) {
9876 if (put_user_u64(loff_in, arg2)) {
9877 goto efault;
9880 if (arg4) {
9881 if (put_user_u64(loff_out, arg4)) {
9882 goto efault;
9886 break;
9887 #endif
9888 #ifdef TARGET_NR_vmsplice
9889 case TARGET_NR_vmsplice:
9891 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9892 if (vec != NULL) {
9893 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9894 unlock_iovec(vec, arg2, arg3, 0);
9895 } else {
9896 ret = -host_to_target_errno(errno);
9899 break;
9900 #endif
9901 #endif /* CONFIG_SPLICE */
9902 #ifdef CONFIG_EVENTFD
9903 #if defined(TARGET_NR_eventfd)
9904 case TARGET_NR_eventfd:
9905 ret = get_errno(eventfd(arg1, 0));
9906 fd_trans_unregister(ret);
9907 break;
9908 #endif
9909 #if defined(TARGET_NR_eventfd2)
9910 case TARGET_NR_eventfd2:
9912 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9913 if (arg2 & TARGET_O_NONBLOCK) {
9914 host_flags |= O_NONBLOCK;
9916 if (arg2 & TARGET_O_CLOEXEC) {
9917 host_flags |= O_CLOEXEC;
9919 ret = get_errno(eventfd(arg1, host_flags));
9920 fd_trans_unregister(ret);
9921 break;
9923 #endif
9924 #endif /* CONFIG_EVENTFD */
9925 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9926 case TARGET_NR_fallocate:
9927 #if TARGET_ABI_BITS == 32
9928 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9929 target_offset64(arg5, arg6)));
9930 #else
9931 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9932 #endif
9933 break;
9934 #endif
9935 #if defined(CONFIG_SYNC_FILE_RANGE)
9936 #if defined(TARGET_NR_sync_file_range)
9937 case TARGET_NR_sync_file_range:
9938 #if TARGET_ABI_BITS == 32
9939 #if defined(TARGET_MIPS)
9940 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9941 target_offset64(arg5, arg6), arg7));
9942 #else
9943 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9944 target_offset64(arg4, arg5), arg6));
9945 #endif /* !TARGET_MIPS */
9946 #else
9947 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9948 #endif
9949 break;
9950 #endif
9951 #if defined(TARGET_NR_sync_file_range2)
9952 case TARGET_NR_sync_file_range2:
9953 /* This is like sync_file_range but the arguments are reordered */
9954 #if TARGET_ABI_BITS == 32
9955 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9956 target_offset64(arg5, arg6), arg2));
9957 #else
9958 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9959 #endif
9960 break;
9961 #endif
9962 #endif
9963 #if defined(TARGET_NR_signalfd4)
9964 case TARGET_NR_signalfd4:
9965 ret = do_signalfd4(arg1, arg2, arg4);
9966 break;
9967 #endif
9968 #if defined(TARGET_NR_signalfd)
9969 case TARGET_NR_signalfd:
9970 ret = do_signalfd4(arg1, arg2, 0);
9971 break;
9972 #endif
9973 #if defined(CONFIG_EPOLL)
9974 #if defined(TARGET_NR_epoll_create)
9975 case TARGET_NR_epoll_create:
9976 ret = get_errno(epoll_create(arg1));
9977 break;
9978 #endif
9979 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9980 case TARGET_NR_epoll_create1:
9981 ret = get_errno(epoll_create1(arg1));
9982 break;
9983 #endif
9984 #if defined(TARGET_NR_epoll_ctl)
9985 case TARGET_NR_epoll_ctl:
9987 struct epoll_event ep;
9988 struct epoll_event *epp = 0;
9989 if (arg4) {
9990 struct target_epoll_event *target_ep;
9991 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9992 goto efault;
9994 ep.events = tswap32(target_ep->events);
9995 /* The epoll_data_t union is just opaque data to the kernel,
9996 * so we transfer all 64 bits across and need not worry what
9997 * actual data type it is.
9999 ep.data.u64 = tswap64(target_ep->data.u64);
10000 unlock_user_struct(target_ep, arg4, 0);
10001 epp = &ep;
10003 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
10004 break;
10006 #endif
10008 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
10009 #define IMPLEMENT_EPOLL_PWAIT
10010 #endif
10011 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
10012 #if defined(TARGET_NR_epoll_wait)
10013 case TARGET_NR_epoll_wait:
10014 #endif
10015 #if defined(IMPLEMENT_EPOLL_PWAIT)
10016 case TARGET_NR_epoll_pwait:
10017 #endif
10019 struct target_epoll_event *target_ep;
10020 struct epoll_event *ep;
10021 int epfd = arg1;
10022 int maxevents = arg3;
10023 int timeout = arg4;
10025 target_ep = lock_user(VERIFY_WRITE, arg2,
10026 maxevents * sizeof(struct target_epoll_event), 1);
10027 if (!target_ep) {
10028 goto efault;
10031 ep = alloca(maxevents * sizeof(struct epoll_event));
10033 switch (num) {
10034 #if defined(IMPLEMENT_EPOLL_PWAIT)
10035 case TARGET_NR_epoll_pwait:
10037 target_sigset_t *target_set;
10038 sigset_t _set, *set = &_set;
10040 if (arg5) {
10041 target_set = lock_user(VERIFY_READ, arg5,
10042 sizeof(target_sigset_t), 1);
10043 if (!target_set) {
10044 unlock_user(target_ep, arg2, 0);
10045 goto efault;
10047 target_to_host_sigset(set, target_set);
10048 unlock_user(target_set, arg5, 0);
10049 } else {
10050 set = NULL;
10053 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10054 break;
10056 #endif
10057 #if defined(TARGET_NR_epoll_wait)
10058 case TARGET_NR_epoll_wait:
10059 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10060 break;
10061 #endif
10062 default:
10063 ret = -TARGET_ENOSYS;
10065 if (!is_error(ret)) {
10066 int i;
10067 for (i = 0; i < ret; i++) {
10068 target_ep[i].events = tswap32(ep[i].events);
10069 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10072 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10073 break;
10075 #endif
10076 #endif
10077 #ifdef TARGET_NR_prlimit64
10078 case TARGET_NR_prlimit64:
10080 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10081 struct target_rlimit64 *target_rnew, *target_rold;
10082 struct host_rlimit64 rnew, rold, *rnewp = 0;
10083 int resource = target_to_host_resource(arg2);
10084 if (arg3) {
10085 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10086 goto efault;
10088 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10089 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10090 unlock_user_struct(target_rnew, arg3, 0);
10091 rnewp = &rnew;
10094 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10095 if (!is_error(ret) && arg4) {
10096 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10097 goto efault;
10099 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10100 target_rold->rlim_max = tswap64(rold.rlim_max);
10101 unlock_user_struct(target_rold, arg4, 1);
10103 break;
10105 #endif
10106 #ifdef TARGET_NR_gethostname
10107 case TARGET_NR_gethostname:
10109 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10110 if (name) {
10111 ret = get_errno(gethostname(name, arg2));
10112 unlock_user(name, arg1, arg2);
10113 } else {
10114 ret = -TARGET_EFAULT;
10116 break;
10118 #endif
10119 #ifdef TARGET_NR_atomic_cmpxchg_32
10120 case TARGET_NR_atomic_cmpxchg_32:
10122 /* should use start_exclusive from main.c */
10123 abi_ulong mem_value;
10124 if (get_user_u32(mem_value, arg6)) {
10125 target_siginfo_t info;
10126 info.si_signo = SIGSEGV;
10127 info.si_errno = 0;
10128 info.si_code = TARGET_SEGV_MAPERR;
10129 info._sifields._sigfault._addr = arg6;
10130 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10131 ret = 0xdeadbeef;
10134 if (mem_value == arg2)
10135 put_user_u32(arg1, arg6);
10136 ret = mem_value;
10137 break;
10139 #endif
10140 #ifdef TARGET_NR_atomic_barrier
10141 case TARGET_NR_atomic_barrier:
10143 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10144 ret = 0;
10145 break;
10147 #endif
10149 #ifdef TARGET_NR_timer_create
10150 case TARGET_NR_timer_create:
10152 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10154 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10156 int clkid = arg1;
10157 int timer_index = next_free_host_timer();
10159 if (timer_index < 0) {
10160 ret = -TARGET_EAGAIN;
10161 } else {
10162 timer_t *phtimer = g_posix_timers + timer_index;
10164 if (arg2) {
10165 phost_sevp = &host_sevp;
10166 ret = target_to_host_sigevent(phost_sevp, arg2);
10167 if (ret != 0) {
10168 break;
10172 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10173 if (ret) {
10174 phtimer = NULL;
10175 } else {
10176 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10177 goto efault;
10181 break;
10183 #endif
10185 #ifdef TARGET_NR_timer_settime
10186 case TARGET_NR_timer_settime:
10188 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10189 * struct itimerspec * old_value */
10190 target_timer_t timerid = get_timer_id(arg1);
10192 if (timerid < 0) {
10193 ret = timerid;
10194 } else if (arg3 == 0) {
10195 ret = -TARGET_EINVAL;
10196 } else {
10197 timer_t htimer = g_posix_timers[timerid];
10198 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10200 target_to_host_itimerspec(&hspec_new, arg3);
10201 ret = get_errno(
10202 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10203 host_to_target_itimerspec(arg2, &hspec_old);
10205 break;
10207 #endif
10209 #ifdef TARGET_NR_timer_gettime
10210 case TARGET_NR_timer_gettime:
10212 /* args: timer_t timerid, struct itimerspec *curr_value */
10213 target_timer_t timerid = get_timer_id(arg1);
10215 if (timerid < 0) {
10216 ret = timerid;
10217 } else if (!arg2) {
10218 ret = -TARGET_EFAULT;
10219 } else {
10220 timer_t htimer = g_posix_timers[timerid];
10221 struct itimerspec hspec;
10222 ret = get_errno(timer_gettime(htimer, &hspec));
10224 if (host_to_target_itimerspec(arg2, &hspec)) {
10225 ret = -TARGET_EFAULT;
10228 break;
10230 #endif
10232 #ifdef TARGET_NR_timer_getoverrun
10233 case TARGET_NR_timer_getoverrun:
10235 /* args: timer_t timerid */
10236 target_timer_t timerid = get_timer_id(arg1);
10238 if (timerid < 0) {
10239 ret = timerid;
10240 } else {
10241 timer_t htimer = g_posix_timers[timerid];
10242 ret = get_errno(timer_getoverrun(htimer));
10244 fd_trans_unregister(ret);
10245 break;
10247 #endif
10249 #ifdef TARGET_NR_timer_delete
10250 case TARGET_NR_timer_delete:
10252 /* args: timer_t timerid */
10253 target_timer_t timerid = get_timer_id(arg1);
10255 if (timerid < 0) {
10256 ret = timerid;
10257 } else {
10258 timer_t htimer = g_posix_timers[timerid];
10259 ret = get_errno(timer_delete(htimer));
10260 g_posix_timers[timerid] = 0;
10262 break;
10264 #endif
10266 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10267 case TARGET_NR_timerfd_create:
10268 ret = get_errno(timerfd_create(arg1,
10269 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10270 break;
10271 #endif
10273 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10274 case TARGET_NR_timerfd_gettime:
10276 struct itimerspec its_curr;
10278 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10280 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10281 goto efault;
10284 break;
10285 #endif
10287 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10288 case TARGET_NR_timerfd_settime:
10290 struct itimerspec its_new, its_old, *p_new;
10292 if (arg3) {
10293 if (target_to_host_itimerspec(&its_new, arg3)) {
10294 goto efault;
10296 p_new = &its_new;
10297 } else {
10298 p_new = NULL;
10301 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10303 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10304 goto efault;
10307 break;
10308 #endif
10310 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10311 case TARGET_NR_ioprio_get:
10312 ret = get_errno(ioprio_get(arg1, arg2));
10313 break;
10314 #endif
10316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10317 case TARGET_NR_ioprio_set:
10318 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10319 break;
10320 #endif
10322 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10323 case TARGET_NR_setns:
10324 ret = get_errno(setns(arg1, arg2));
10325 break;
10326 #endif
10327 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10328 case TARGET_NR_unshare:
10329 ret = get_errno(unshare(arg1));
10330 break;
10331 #endif
10333 default:
10334 unimplemented:
10335 gemu_log("qemu: Unsupported syscall: %d\n", num);
10336 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10337 unimplemented_nowarn:
10338 #endif
10339 ret = -TARGET_ENOSYS;
10340 break;
10342 fail:
10343 #ifdef DEBUG
10344 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10345 #endif
10346 if(do_strace)
10347 print_syscall_ret(num, ret);
10348 return ret;
10349 efault:
10350 ret = -TARGET_EFAULT;
10351 goto fail;