linux-user: Use safe_syscall for wait system calls
[qemu/kevin.git] / linux-user / syscall.c
blobd9f4695624b90ade034f605b903ec4fc2ed5a641
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/mman.h>
36 #include <sys/swap.h>
37 #include <linux/capability.h>
38 #include <sched.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <sys/poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
60 #ifdef CONFIG_TIMERFD
61 #include <sys/timerfd.h>
62 #endif
63 #ifdef TARGET_GPROF
64 #include <sys/gmon.h>
65 #endif
66 #ifdef CONFIG_EVENTFD
67 #include <sys/eventfd.h>
68 #endif
69 #ifdef CONFIG_EPOLL
70 #include <sys/epoll.h>
71 #endif
72 #ifdef CONFIG_ATTR
73 #include "qemu/xattr.h"
74 #endif
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
77 #endif
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
91 #include <linux/kd.h>
92 #include <linux/mtio.h>
93 #include <linux/fs.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
96 #endif
97 #include <linux/fb.h>
98 #include <linux/vt.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include "linux_loop.h"
105 #include "uname.h"
107 #include "qemu.h"
109 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
110 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
112 //#define DEBUG
113 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
114 * once. This exercises the codepaths for restart.
116 //#define DEBUG_ERESTARTSYS
118 //#include <linux/msdos_fs.h>
119 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
120 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
123 #undef _syscall0
124 #undef _syscall1
125 #undef _syscall2
126 #undef _syscall3
127 #undef _syscall4
128 #undef _syscall5
129 #undef _syscall6
131 #define _syscall0(type,name) \
132 static type name (void) \
134 return syscall(__NR_##name); \
137 #define _syscall1(type,name,type1,arg1) \
138 static type name (type1 arg1) \
140 return syscall(__NR_##name, arg1); \
143 #define _syscall2(type,name,type1,arg1,type2,arg2) \
144 static type name (type1 arg1,type2 arg2) \
146 return syscall(__NR_##name, arg1, arg2); \
149 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
150 static type name (type1 arg1,type2 arg2,type3 arg3) \
152 return syscall(__NR_##name, arg1, arg2, arg3); \
155 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
161 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
162 type5,arg5) \
163 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
165 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
169 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
170 type5,arg5,type6,arg6) \
171 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
172 type6 arg6) \
174 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
178 #define __NR_sys_uname __NR_uname
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
184 #define __NR_sys_syslog __NR_syslog
185 #define __NR_sys_tgkill __NR_tgkill
186 #define __NR_sys_tkill __NR_tkill
187 #define __NR_sys_futex __NR_futex
188 #define __NR_sys_inotify_init __NR_inotify_init
189 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
190 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
192 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
193 defined(__s390x__)
194 #define __NR__llseek __NR_lseek
195 #endif
197 /* Newer kernel ports have llseek() instead of _llseek() */
198 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
199 #define TARGET_NR__llseek TARGET_NR_llseek
200 #endif
202 #ifdef __NR_gettid
203 _syscall0(int, gettid)
204 #else
205 /* This is a replacement for the host gettid() and must return a host
206 errno. */
207 static int gettid(void) {
208 return -ENOSYS;
210 #endif
211 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
212 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
213 #endif
214 #if !defined(__NR_getdents) || \
215 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
216 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
217 #endif
218 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
219 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
220 loff_t *, res, uint, wh);
221 #endif
222 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
223 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
224 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
225 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
226 #endif
227 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
228 _syscall2(int,sys_tkill,int,tid,int,sig)
229 #endif
230 #ifdef __NR_exit_group
231 _syscall1(int,exit_group,int,error_code)
232 #endif
233 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
234 _syscall1(int,set_tid_address,int *,tidptr)
235 #endif
236 #if defined(TARGET_NR_futex) && defined(__NR_futex)
237 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
238 const struct timespec *,timeout,int *,uaddr2,int,val3)
239 #endif
240 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
241 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
244 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
245 unsigned long *, user_mask_ptr);
246 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
247 void *, arg);
248 _syscall2(int, capget, struct __user_cap_header_struct *, header,
249 struct __user_cap_data_struct *, data);
250 _syscall2(int, capset, struct __user_cap_header_struct *, header,
251 struct __user_cap_data_struct *, data);
252 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
253 _syscall2(int, ioprio_get, int, which, int, who)
254 #endif
255 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
256 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
257 #endif
258 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
259 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
260 #endif
262 static bitmask_transtbl fcntl_flags_tbl[] = {
263 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
264 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
265 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
266 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
267 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
268 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
269 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
270 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
271 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
272 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
273 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
274 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
275 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
276 #if defined(O_DIRECT)
277 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
278 #endif
279 #if defined(O_NOATIME)
280 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
281 #endif
282 #if defined(O_CLOEXEC)
283 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
284 #endif
285 #if defined(O_PATH)
286 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
287 #endif
288 /* Don't terminate the list prematurely on 64-bit host+guest. */
289 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
290 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
291 #endif
292 { 0, 0, 0, 0 }
295 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
296 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
297 typedef struct TargetFdTrans {
298 TargetFdDataFunc host_to_target_data;
299 TargetFdDataFunc target_to_host_data;
300 TargetFdAddrFunc target_to_host_addr;
301 } TargetFdTrans;
303 static TargetFdTrans **target_fd_trans;
305 static unsigned int target_fd_max;
307 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
309 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
310 return target_fd_trans[fd]->host_to_target_data;
312 return NULL;
315 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
317 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
318 return target_fd_trans[fd]->target_to_host_addr;
320 return NULL;
323 static void fd_trans_register(int fd, TargetFdTrans *trans)
325 unsigned int oldmax;
327 if (fd >= target_fd_max) {
328 oldmax = target_fd_max;
329 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
330 target_fd_trans = g_renew(TargetFdTrans *,
331 target_fd_trans, target_fd_max);
332 memset((void *)(target_fd_trans + oldmax), 0,
333 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
335 target_fd_trans[fd] = trans;
338 static void fd_trans_unregister(int fd)
340 if (fd >= 0 && fd < target_fd_max) {
341 target_fd_trans[fd] = NULL;
345 static void fd_trans_dup(int oldfd, int newfd)
347 fd_trans_unregister(newfd);
348 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
349 fd_trans_register(newfd, target_fd_trans[oldfd]);
353 static int sys_getcwd1(char *buf, size_t size)
355 if (getcwd(buf, size) == NULL) {
356 /* getcwd() sets errno */
357 return (-1);
359 return strlen(buf)+1;
362 #ifdef TARGET_NR_utimensat
363 #ifdef CONFIG_UTIMENSAT
364 static int sys_utimensat(int dirfd, const char *pathname,
365 const struct timespec times[2], int flags)
367 if (pathname == NULL)
368 return futimens(dirfd, times);
369 else
370 return utimensat(dirfd, pathname, times, flags);
372 #elif defined(__NR_utimensat)
373 #define __NR_sys_utimensat __NR_utimensat
374 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
375 const struct timespec *,tsp,int,flags)
376 #else
377 static int sys_utimensat(int dirfd, const char *pathname,
378 const struct timespec times[2], int flags)
380 errno = ENOSYS;
381 return -1;
383 #endif
384 #endif /* TARGET_NR_utimensat */
386 #ifdef CONFIG_INOTIFY
387 #include <sys/inotify.h>
389 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
390 static int sys_inotify_init(void)
392 return (inotify_init());
394 #endif
395 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
396 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
398 return (inotify_add_watch(fd, pathname, mask));
400 #endif
401 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
402 static int sys_inotify_rm_watch(int fd, int32_t wd)
404 return (inotify_rm_watch(fd, wd));
406 #endif
407 #ifdef CONFIG_INOTIFY1
408 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
409 static int sys_inotify_init1(int flags)
411 return (inotify_init1(flags));
413 #endif
414 #endif
415 #else
416 /* Userspace can usually survive runtime without inotify */
417 #undef TARGET_NR_inotify_init
418 #undef TARGET_NR_inotify_init1
419 #undef TARGET_NR_inotify_add_watch
420 #undef TARGET_NR_inotify_rm_watch
421 #endif /* CONFIG_INOTIFY */
423 #if defined(TARGET_NR_ppoll)
424 #ifndef __NR_ppoll
425 # define __NR_ppoll -1
426 #endif
427 #define __NR_sys_ppoll __NR_ppoll
428 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
429 struct timespec *, timeout, const sigset_t *, sigmask,
430 size_t, sigsetsize)
431 #endif
433 #if defined(TARGET_NR_pselect6)
434 #ifndef __NR_pselect6
435 # define __NR_pselect6 -1
436 #endif
437 #define __NR_sys_pselect6 __NR_pselect6
438 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
439 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
440 #endif
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449 uint64_t rlim_cur;
450 uint64_t rlim_max;
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453 const struct host_rlimit64 *, new_limit,
454 struct host_rlimit64 *, old_limit)
455 #endif
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
464 int k ;
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467 if (g_posix_timers[k] == 0) {
468 g_posix_timers[k] = (timer_t) 1;
469 return k;
472 return -1;
474 #endif
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env) {
479 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
481 #elif defined(TARGET_MIPS)
482 static inline int regpairs_aligned(void *cpu_env) { return 1; }
483 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
484 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
485 * of registers which translates to the same as ARM/MIPS, because we start with
486 * r3 as arg1 */
487 static inline int regpairs_aligned(void *cpu_env) { return 1; }
488 #else
489 static inline int regpairs_aligned(void *cpu_env) { return 0; }
490 #endif
492 #define ERRNO_TABLE_SIZE 1200
494 /* target_to_host_errno_table[] is initialized from
495 * host_to_target_errno_table[] in syscall_init(). */
496 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
500 * This list is the union of errno values overridden in asm-<arch>/errno.h
501 * minus the errnos that are not actually generic to all archs.
503 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
504 [EAGAIN] = TARGET_EAGAIN,
505 [EIDRM] = TARGET_EIDRM,
506 [ECHRNG] = TARGET_ECHRNG,
507 [EL2NSYNC] = TARGET_EL2NSYNC,
508 [EL3HLT] = TARGET_EL3HLT,
509 [EL3RST] = TARGET_EL3RST,
510 [ELNRNG] = TARGET_ELNRNG,
511 [EUNATCH] = TARGET_EUNATCH,
512 [ENOCSI] = TARGET_ENOCSI,
513 [EL2HLT] = TARGET_EL2HLT,
514 [EDEADLK] = TARGET_EDEADLK,
515 [ENOLCK] = TARGET_ENOLCK,
516 [EBADE] = TARGET_EBADE,
517 [EBADR] = TARGET_EBADR,
518 [EXFULL] = TARGET_EXFULL,
519 [ENOANO] = TARGET_ENOANO,
520 [EBADRQC] = TARGET_EBADRQC,
521 [EBADSLT] = TARGET_EBADSLT,
522 [EBFONT] = TARGET_EBFONT,
523 [ENOSTR] = TARGET_ENOSTR,
524 [ENODATA] = TARGET_ENODATA,
525 [ETIME] = TARGET_ETIME,
526 [ENOSR] = TARGET_ENOSR,
527 [ENONET] = TARGET_ENONET,
528 [ENOPKG] = TARGET_ENOPKG,
529 [EREMOTE] = TARGET_EREMOTE,
530 [ENOLINK] = TARGET_ENOLINK,
531 [EADV] = TARGET_EADV,
532 [ESRMNT] = TARGET_ESRMNT,
533 [ECOMM] = TARGET_ECOMM,
534 [EPROTO] = TARGET_EPROTO,
535 [EDOTDOT] = TARGET_EDOTDOT,
536 [EMULTIHOP] = TARGET_EMULTIHOP,
537 [EBADMSG] = TARGET_EBADMSG,
538 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
539 [EOVERFLOW] = TARGET_EOVERFLOW,
540 [ENOTUNIQ] = TARGET_ENOTUNIQ,
541 [EBADFD] = TARGET_EBADFD,
542 [EREMCHG] = TARGET_EREMCHG,
543 [ELIBACC] = TARGET_ELIBACC,
544 [ELIBBAD] = TARGET_ELIBBAD,
545 [ELIBSCN] = TARGET_ELIBSCN,
546 [ELIBMAX] = TARGET_ELIBMAX,
547 [ELIBEXEC] = TARGET_ELIBEXEC,
548 [EILSEQ] = TARGET_EILSEQ,
549 [ENOSYS] = TARGET_ENOSYS,
550 [ELOOP] = TARGET_ELOOP,
551 [ERESTART] = TARGET_ERESTART,
552 [ESTRPIPE] = TARGET_ESTRPIPE,
553 [ENOTEMPTY] = TARGET_ENOTEMPTY,
554 [EUSERS] = TARGET_EUSERS,
555 [ENOTSOCK] = TARGET_ENOTSOCK,
556 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
557 [EMSGSIZE] = TARGET_EMSGSIZE,
558 [EPROTOTYPE] = TARGET_EPROTOTYPE,
559 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
560 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
561 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
562 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
563 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
564 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
565 [EADDRINUSE] = TARGET_EADDRINUSE,
566 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
567 [ENETDOWN] = TARGET_ENETDOWN,
568 [ENETUNREACH] = TARGET_ENETUNREACH,
569 [ENETRESET] = TARGET_ENETRESET,
570 [ECONNABORTED] = TARGET_ECONNABORTED,
571 [ECONNRESET] = TARGET_ECONNRESET,
572 [ENOBUFS] = TARGET_ENOBUFS,
573 [EISCONN] = TARGET_EISCONN,
574 [ENOTCONN] = TARGET_ENOTCONN,
575 [EUCLEAN] = TARGET_EUCLEAN,
576 [ENOTNAM] = TARGET_ENOTNAM,
577 [ENAVAIL] = TARGET_ENAVAIL,
578 [EISNAM] = TARGET_EISNAM,
579 [EREMOTEIO] = TARGET_EREMOTEIO,
580 [ESHUTDOWN] = TARGET_ESHUTDOWN,
581 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
582 [ETIMEDOUT] = TARGET_ETIMEDOUT,
583 [ECONNREFUSED] = TARGET_ECONNREFUSED,
584 [EHOSTDOWN] = TARGET_EHOSTDOWN,
585 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
586 [EALREADY] = TARGET_EALREADY,
587 [EINPROGRESS] = TARGET_EINPROGRESS,
588 [ESTALE] = TARGET_ESTALE,
589 [ECANCELED] = TARGET_ECANCELED,
590 [ENOMEDIUM] = TARGET_ENOMEDIUM,
591 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
592 #ifdef ENOKEY
593 [ENOKEY] = TARGET_ENOKEY,
594 #endif
595 #ifdef EKEYEXPIRED
596 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
597 #endif
598 #ifdef EKEYREVOKED
599 [EKEYREVOKED] = TARGET_EKEYREVOKED,
600 #endif
601 #ifdef EKEYREJECTED
602 [EKEYREJECTED] = TARGET_EKEYREJECTED,
603 #endif
604 #ifdef EOWNERDEAD
605 [EOWNERDEAD] = TARGET_EOWNERDEAD,
606 #endif
607 #ifdef ENOTRECOVERABLE
608 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
609 #endif
612 static inline int host_to_target_errno(int err)
614 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
615 host_to_target_errno_table[err]) {
616 return host_to_target_errno_table[err];
618 return err;
621 static inline int target_to_host_errno(int err)
623 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
624 target_to_host_errno_table[err]) {
625 return target_to_host_errno_table[err];
627 return err;
630 static inline abi_long get_errno(abi_long ret)
632 if (ret == -1)
633 return -host_to_target_errno(errno);
634 else
635 return ret;
638 static inline int is_error(abi_long ret)
640 return (abi_ulong)ret >= (abi_ulong)(-4096);
643 char *target_strerror(int err)
645 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
646 return NULL;
648 return strerror(target_to_host_errno(err));
651 #define safe_syscall0(type, name) \
652 static type safe_##name(void) \
654 return safe_syscall(__NR_##name); \
657 #define safe_syscall1(type, name, type1, arg1) \
658 static type safe_##name(type1 arg1) \
660 return safe_syscall(__NR_##name, arg1); \
663 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
664 static type safe_##name(type1 arg1, type2 arg2) \
666 return safe_syscall(__NR_##name, arg1, arg2); \
669 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
670 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
672 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
675 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
676 type4, arg4) \
677 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
679 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
682 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
683 type4, arg4, type5, arg5) \
684 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
685 type5 arg5) \
687 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
690 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
691 type4, arg4, type5, arg5, type6, arg6) \
692 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
693 type5 arg5, type6 arg6) \
695 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
698 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
699 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
700 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
701 int, flags, mode_t, mode)
702 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
703 struct rusage *, rusage)
704 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
705 int, options, struct rusage *, rusage)
707 static inline int host_to_target_sock_type(int host_type)
709 int target_type;
711 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
712 case SOCK_DGRAM:
713 target_type = TARGET_SOCK_DGRAM;
714 break;
715 case SOCK_STREAM:
716 target_type = TARGET_SOCK_STREAM;
717 break;
718 default:
719 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
720 break;
723 #if defined(SOCK_CLOEXEC)
724 if (host_type & SOCK_CLOEXEC) {
725 target_type |= TARGET_SOCK_CLOEXEC;
727 #endif
729 #if defined(SOCK_NONBLOCK)
730 if (host_type & SOCK_NONBLOCK) {
731 target_type |= TARGET_SOCK_NONBLOCK;
733 #endif
735 return target_type;
738 static abi_ulong target_brk;
739 static abi_ulong target_original_brk;
740 static abi_ulong brk_page;
742 void target_set_brk(abi_ulong new_brk)
744 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
745 brk_page = HOST_PAGE_ALIGN(target_brk);
748 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
749 #define DEBUGF_BRK(message, args...)
751 /* do_brk() must return target values and target errnos. */
752 abi_long do_brk(abi_ulong new_brk)
754 abi_long mapped_addr;
755 int new_alloc_size;
757 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
759 if (!new_brk) {
760 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
761 return target_brk;
763 if (new_brk < target_original_brk) {
764 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
765 target_brk);
766 return target_brk;
769 /* If the new brk is less than the highest page reserved to the
770 * target heap allocation, set it and we're almost done... */
771 if (new_brk <= brk_page) {
772 /* Heap contents are initialized to zero, as for anonymous
773 * mapped pages. */
774 if (new_brk > target_brk) {
775 memset(g2h(target_brk), 0, new_brk - target_brk);
777 target_brk = new_brk;
778 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
779 return target_brk;
782 /* We need to allocate more memory after the brk... Note that
783 * we don't use MAP_FIXED because that will map over the top of
784 * any existing mapping (like the one with the host libc or qemu
785 * itself); instead we treat "mapped but at wrong address" as
786 * a failure and unmap again.
788 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
789 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
790 PROT_READ|PROT_WRITE,
791 MAP_ANON|MAP_PRIVATE, 0, 0));
793 if (mapped_addr == brk_page) {
794 /* Heap contents are initialized to zero, as for anonymous
795 * mapped pages. Technically the new pages are already
796 * initialized to zero since they *are* anonymous mapped
797 * pages, however we have to take care with the contents that
798 * come from the remaining part of the previous page: it may
799 * contains garbage data due to a previous heap usage (grown
800 * then shrunken). */
801 memset(g2h(target_brk), 0, brk_page - target_brk);
803 target_brk = new_brk;
804 brk_page = HOST_PAGE_ALIGN(target_brk);
805 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
806 target_brk);
807 return target_brk;
808 } else if (mapped_addr != -1) {
809 /* Mapped but at wrong address, meaning there wasn't actually
810 * enough space for this brk.
812 target_munmap(mapped_addr, new_alloc_size);
813 mapped_addr = -1;
814 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
816 else {
817 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
820 #if defined(TARGET_ALPHA)
821 /* We (partially) emulate OSF/1 on Alpha, which requires we
822 return a proper errno, not an unchanged brk value. */
823 return -TARGET_ENOMEM;
824 #endif
825 /* For everything else, return the previous break. */
826 return target_brk;
829 static inline abi_long copy_from_user_fdset(fd_set *fds,
830 abi_ulong target_fds_addr,
831 int n)
833 int i, nw, j, k;
834 abi_ulong b, *target_fds;
836 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
837 if (!(target_fds = lock_user(VERIFY_READ,
838 target_fds_addr,
839 sizeof(abi_ulong) * nw,
840 1)))
841 return -TARGET_EFAULT;
843 FD_ZERO(fds);
844 k = 0;
845 for (i = 0; i < nw; i++) {
846 /* grab the abi_ulong */
847 __get_user(b, &target_fds[i]);
848 for (j = 0; j < TARGET_ABI_BITS; j++) {
849 /* check the bit inside the abi_ulong */
850 if ((b >> j) & 1)
851 FD_SET(k, fds);
852 k++;
856 unlock_user(target_fds, target_fds_addr, 0);
858 return 0;
861 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
862 abi_ulong target_fds_addr,
863 int n)
865 if (target_fds_addr) {
866 if (copy_from_user_fdset(fds, target_fds_addr, n))
867 return -TARGET_EFAULT;
868 *fds_ptr = fds;
869 } else {
870 *fds_ptr = NULL;
872 return 0;
875 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
876 const fd_set *fds,
877 int n)
879 int i, nw, j, k;
880 abi_long v;
881 abi_ulong *target_fds;
883 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
884 if (!(target_fds = lock_user(VERIFY_WRITE,
885 target_fds_addr,
886 sizeof(abi_ulong) * nw,
887 0)))
888 return -TARGET_EFAULT;
890 k = 0;
891 for (i = 0; i < nw; i++) {
892 v = 0;
893 for (j = 0; j < TARGET_ABI_BITS; j++) {
894 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
895 k++;
897 __put_user(v, &target_fds[i]);
900 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
902 return 0;
905 #if defined(__alpha__)
906 #define HOST_HZ 1024
907 #else
908 #define HOST_HZ 100
909 #endif
911 static inline abi_long host_to_target_clock_t(long ticks)
913 #if HOST_HZ == TARGET_HZ
914 return ticks;
915 #else
916 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
917 #endif
920 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
921 const struct rusage *rusage)
923 struct target_rusage *target_rusage;
925 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
926 return -TARGET_EFAULT;
927 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
928 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
929 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
930 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
931 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
932 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
933 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
934 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
935 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
936 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
937 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
938 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
939 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
940 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
941 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
942 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
943 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
944 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
945 unlock_user_struct(target_rusage, target_addr, 1);
947 return 0;
950 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
952 abi_ulong target_rlim_swap;
953 rlim_t result;
955 target_rlim_swap = tswapal(target_rlim);
956 if (target_rlim_swap == TARGET_RLIM_INFINITY)
957 return RLIM_INFINITY;
959 result = target_rlim_swap;
960 if (target_rlim_swap != (rlim_t)result)
961 return RLIM_INFINITY;
963 return result;
966 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
968 abi_ulong target_rlim_swap;
969 abi_ulong result;
971 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
972 target_rlim_swap = TARGET_RLIM_INFINITY;
973 else
974 target_rlim_swap = rlim;
975 result = tswapal(target_rlim_swap);
977 return result;
980 static inline int target_to_host_resource(int code)
982 switch (code) {
983 case TARGET_RLIMIT_AS:
984 return RLIMIT_AS;
985 case TARGET_RLIMIT_CORE:
986 return RLIMIT_CORE;
987 case TARGET_RLIMIT_CPU:
988 return RLIMIT_CPU;
989 case TARGET_RLIMIT_DATA:
990 return RLIMIT_DATA;
991 case TARGET_RLIMIT_FSIZE:
992 return RLIMIT_FSIZE;
993 case TARGET_RLIMIT_LOCKS:
994 return RLIMIT_LOCKS;
995 case TARGET_RLIMIT_MEMLOCK:
996 return RLIMIT_MEMLOCK;
997 case TARGET_RLIMIT_MSGQUEUE:
998 return RLIMIT_MSGQUEUE;
999 case TARGET_RLIMIT_NICE:
1000 return RLIMIT_NICE;
1001 case TARGET_RLIMIT_NOFILE:
1002 return RLIMIT_NOFILE;
1003 case TARGET_RLIMIT_NPROC:
1004 return RLIMIT_NPROC;
1005 case TARGET_RLIMIT_RSS:
1006 return RLIMIT_RSS;
1007 case TARGET_RLIMIT_RTPRIO:
1008 return RLIMIT_RTPRIO;
1009 case TARGET_RLIMIT_SIGPENDING:
1010 return RLIMIT_SIGPENDING;
1011 case TARGET_RLIMIT_STACK:
1012 return RLIMIT_STACK;
1013 default:
1014 return code;
1018 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1019 abi_ulong target_tv_addr)
1021 struct target_timeval *target_tv;
1023 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1024 return -TARGET_EFAULT;
1026 __get_user(tv->tv_sec, &target_tv->tv_sec);
1027 __get_user(tv->tv_usec, &target_tv->tv_usec);
1029 unlock_user_struct(target_tv, target_tv_addr, 0);
1031 return 0;
1034 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1035 const struct timeval *tv)
1037 struct target_timeval *target_tv;
1039 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1040 return -TARGET_EFAULT;
1042 __put_user(tv->tv_sec, &target_tv->tv_sec);
1043 __put_user(tv->tv_usec, &target_tv->tv_usec);
1045 unlock_user_struct(target_tv, target_tv_addr, 1);
1047 return 0;
1050 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1051 abi_ulong target_tz_addr)
1053 struct target_timezone *target_tz;
1055 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1056 return -TARGET_EFAULT;
1059 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1060 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1062 unlock_user_struct(target_tz, target_tz_addr, 0);
1064 return 0;
1067 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1068 #include <mqueue.h>
1070 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1071 abi_ulong target_mq_attr_addr)
1073 struct target_mq_attr *target_mq_attr;
1075 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1076 target_mq_attr_addr, 1))
1077 return -TARGET_EFAULT;
1079 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1080 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1081 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1082 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1084 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1086 return 0;
1089 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1090 const struct mq_attr *attr)
1092 struct target_mq_attr *target_mq_attr;
1094 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1095 target_mq_attr_addr, 0))
1096 return -TARGET_EFAULT;
1098 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1099 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1100 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1101 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1103 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1105 return 0;
1107 #endif
1109 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1110 /* do_select() must return target values and target errnos. */
1111 static abi_long do_select(int n,
1112 abi_ulong rfd_addr, abi_ulong wfd_addr,
1113 abi_ulong efd_addr, abi_ulong target_tv_addr)
1115 fd_set rfds, wfds, efds;
1116 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1117 struct timeval tv, *tv_ptr;
1118 abi_long ret;
1120 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1121 if (ret) {
1122 return ret;
1124 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1125 if (ret) {
1126 return ret;
1128 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1129 if (ret) {
1130 return ret;
1133 if (target_tv_addr) {
1134 if (copy_from_user_timeval(&tv, target_tv_addr))
1135 return -TARGET_EFAULT;
1136 tv_ptr = &tv;
1137 } else {
1138 tv_ptr = NULL;
1141 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1143 if (!is_error(ret)) {
1144 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1145 return -TARGET_EFAULT;
1146 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1147 return -TARGET_EFAULT;
1148 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1149 return -TARGET_EFAULT;
1151 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1152 return -TARGET_EFAULT;
1155 return ret;
1157 #endif
1159 static abi_long do_pipe2(int host_pipe[], int flags)
1161 #ifdef CONFIG_PIPE2
1162 return pipe2(host_pipe, flags);
1163 #else
1164 return -ENOSYS;
1165 #endif
1168 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1169 int flags, int is_pipe2)
1171 int host_pipe[2];
1172 abi_long ret;
1173 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1175 if (is_error(ret))
1176 return get_errno(ret);
1178 /* Several targets have special calling conventions for the original
1179 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1180 if (!is_pipe2) {
1181 #if defined(TARGET_ALPHA)
1182 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1183 return host_pipe[0];
1184 #elif defined(TARGET_MIPS)
1185 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1186 return host_pipe[0];
1187 #elif defined(TARGET_SH4)
1188 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1189 return host_pipe[0];
1190 #elif defined(TARGET_SPARC)
1191 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1192 return host_pipe[0];
1193 #endif
1196 if (put_user_s32(host_pipe[0], pipedes)
1197 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1198 return -TARGET_EFAULT;
1199 return get_errno(ret);
1202 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1203 abi_ulong target_addr,
1204 socklen_t len)
1206 struct target_ip_mreqn *target_smreqn;
1208 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1209 if (!target_smreqn)
1210 return -TARGET_EFAULT;
1211 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1212 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1213 if (len == sizeof(struct target_ip_mreqn))
1214 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1215 unlock_user(target_smreqn, target_addr, 0);
1217 return 0;
1220 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1221 abi_ulong target_addr,
1222 socklen_t len)
1224 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1225 sa_family_t sa_family;
1226 struct target_sockaddr *target_saddr;
1228 if (fd_trans_target_to_host_addr(fd)) {
1229 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1232 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1233 if (!target_saddr)
1234 return -TARGET_EFAULT;
1236 sa_family = tswap16(target_saddr->sa_family);
1238 /* Oops. The caller might send a incomplete sun_path; sun_path
1239 * must be terminated by \0 (see the manual page), but
1240 * unfortunately it is quite common to specify sockaddr_un
1241 * length as "strlen(x->sun_path)" while it should be
1242 * "strlen(...) + 1". We'll fix that here if needed.
1243 * Linux kernel has a similar feature.
1246 if (sa_family == AF_UNIX) {
1247 if (len < unix_maxlen && len > 0) {
1248 char *cp = (char*)target_saddr;
1250 if ( cp[len-1] && !cp[len] )
1251 len++;
1253 if (len > unix_maxlen)
1254 len = unix_maxlen;
1257 memcpy(addr, target_saddr, len);
1258 addr->sa_family = sa_family;
1259 if (sa_family == AF_PACKET) {
1260 struct target_sockaddr_ll *lladdr;
1262 lladdr = (struct target_sockaddr_ll *)addr;
1263 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1264 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1266 unlock_user(target_saddr, target_addr, 0);
1268 return 0;
1271 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1272 struct sockaddr *addr,
1273 socklen_t len)
1275 struct target_sockaddr *target_saddr;
1277 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1278 if (!target_saddr)
1279 return -TARGET_EFAULT;
1280 memcpy(target_saddr, addr, len);
1281 target_saddr->sa_family = tswap16(addr->sa_family);
1282 unlock_user(target_saddr, target_addr, len);
1284 return 0;
1287 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1288 struct target_msghdr *target_msgh)
1290 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1291 abi_long msg_controllen;
1292 abi_ulong target_cmsg_addr;
1293 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1294 socklen_t space = 0;
1296 msg_controllen = tswapal(target_msgh->msg_controllen);
1297 if (msg_controllen < sizeof (struct target_cmsghdr))
1298 goto the_end;
1299 target_cmsg_addr = tswapal(target_msgh->msg_control);
1300 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1301 target_cmsg_start = target_cmsg;
1302 if (!target_cmsg)
1303 return -TARGET_EFAULT;
1305 while (cmsg && target_cmsg) {
1306 void *data = CMSG_DATA(cmsg);
1307 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1309 int len = tswapal(target_cmsg->cmsg_len)
1310 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1312 space += CMSG_SPACE(len);
1313 if (space > msgh->msg_controllen) {
1314 space -= CMSG_SPACE(len);
1315 /* This is a QEMU bug, since we allocated the payload
1316 * area ourselves (unlike overflow in host-to-target
1317 * conversion, which is just the guest giving us a buffer
1318 * that's too small). It can't happen for the payload types
1319 * we currently support; if it becomes an issue in future
1320 * we would need to improve our allocation strategy to
1321 * something more intelligent than "twice the size of the
1322 * target buffer we're reading from".
1324 gemu_log("Host cmsg overflow\n");
1325 break;
1328 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1329 cmsg->cmsg_level = SOL_SOCKET;
1330 } else {
1331 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1333 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1334 cmsg->cmsg_len = CMSG_LEN(len);
1336 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1337 int *fd = (int *)data;
1338 int *target_fd = (int *)target_data;
1339 int i, numfds = len / sizeof(int);
1341 for (i = 0; i < numfds; i++) {
1342 __get_user(fd[i], target_fd + i);
1344 } else if (cmsg->cmsg_level == SOL_SOCKET
1345 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1346 struct ucred *cred = (struct ucred *)data;
1347 struct target_ucred *target_cred =
1348 (struct target_ucred *)target_data;
1350 __get_user(cred->pid, &target_cred->pid);
1351 __get_user(cred->uid, &target_cred->uid);
1352 __get_user(cred->gid, &target_cred->gid);
1353 } else {
1354 gemu_log("Unsupported ancillary data: %d/%d\n",
1355 cmsg->cmsg_level, cmsg->cmsg_type);
1356 memcpy(data, target_data, len);
1359 cmsg = CMSG_NXTHDR(msgh, cmsg);
1360 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1361 target_cmsg_start);
1363 unlock_user(target_cmsg, target_cmsg_addr, 0);
1364 the_end:
1365 msgh->msg_controllen = space;
1366 return 0;
1369 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1370 struct msghdr *msgh)
1372 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1373 abi_long msg_controllen;
1374 abi_ulong target_cmsg_addr;
1375 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1376 socklen_t space = 0;
1378 msg_controllen = tswapal(target_msgh->msg_controllen);
1379 if (msg_controllen < sizeof (struct target_cmsghdr))
1380 goto the_end;
1381 target_cmsg_addr = tswapal(target_msgh->msg_control);
1382 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1383 target_cmsg_start = target_cmsg;
1384 if (!target_cmsg)
1385 return -TARGET_EFAULT;
1387 while (cmsg && target_cmsg) {
1388 void *data = CMSG_DATA(cmsg);
1389 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1391 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1392 int tgt_len, tgt_space;
1394 /* We never copy a half-header but may copy half-data;
1395 * this is Linux's behaviour in put_cmsg(). Note that
1396 * truncation here is a guest problem (which we report
1397 * to the guest via the CTRUNC bit), unlike truncation
1398 * in target_to_host_cmsg, which is a QEMU bug.
1400 if (msg_controllen < sizeof(struct cmsghdr)) {
1401 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1402 break;
1405 if (cmsg->cmsg_level == SOL_SOCKET) {
1406 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1407 } else {
1408 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1410 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1412 tgt_len = TARGET_CMSG_LEN(len);
1414 /* Payload types which need a different size of payload on
1415 * the target must adjust tgt_len here.
1417 switch (cmsg->cmsg_level) {
1418 case SOL_SOCKET:
1419 switch (cmsg->cmsg_type) {
1420 case SO_TIMESTAMP:
1421 tgt_len = sizeof(struct target_timeval);
1422 break;
1423 default:
1424 break;
1426 default:
1427 break;
1430 if (msg_controllen < tgt_len) {
1431 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1432 tgt_len = msg_controllen;
1435 /* We must now copy-and-convert len bytes of payload
1436 * into tgt_len bytes of destination space. Bear in mind
1437 * that in both source and destination we may be dealing
1438 * with a truncated value!
1440 switch (cmsg->cmsg_level) {
1441 case SOL_SOCKET:
1442 switch (cmsg->cmsg_type) {
1443 case SCM_RIGHTS:
1445 int *fd = (int *)data;
1446 int *target_fd = (int *)target_data;
1447 int i, numfds = tgt_len / sizeof(int);
1449 for (i = 0; i < numfds; i++) {
1450 __put_user(fd[i], target_fd + i);
1452 break;
1454 case SO_TIMESTAMP:
1456 struct timeval *tv = (struct timeval *)data;
1457 struct target_timeval *target_tv =
1458 (struct target_timeval *)target_data;
1460 if (len != sizeof(struct timeval) ||
1461 tgt_len != sizeof(struct target_timeval)) {
1462 goto unimplemented;
1465 /* copy struct timeval to target */
1466 __put_user(tv->tv_sec, &target_tv->tv_sec);
1467 __put_user(tv->tv_usec, &target_tv->tv_usec);
1468 break;
1470 case SCM_CREDENTIALS:
1472 struct ucred *cred = (struct ucred *)data;
1473 struct target_ucred *target_cred =
1474 (struct target_ucred *)target_data;
1476 __put_user(cred->pid, &target_cred->pid);
1477 __put_user(cred->uid, &target_cred->uid);
1478 __put_user(cred->gid, &target_cred->gid);
1479 break;
1481 default:
1482 goto unimplemented;
1484 break;
1486 default:
1487 unimplemented:
1488 gemu_log("Unsupported ancillary data: %d/%d\n",
1489 cmsg->cmsg_level, cmsg->cmsg_type);
1490 memcpy(target_data, data, MIN(len, tgt_len));
1491 if (tgt_len > len) {
1492 memset(target_data + len, 0, tgt_len - len);
1496 target_cmsg->cmsg_len = tswapal(tgt_len);
1497 tgt_space = TARGET_CMSG_SPACE(len);
1498 if (msg_controllen < tgt_space) {
1499 tgt_space = msg_controllen;
1501 msg_controllen -= tgt_space;
1502 space += tgt_space;
1503 cmsg = CMSG_NXTHDR(msgh, cmsg);
1504 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1505 target_cmsg_start);
1507 unlock_user(target_cmsg, target_cmsg_addr, space);
1508 the_end:
1509 target_msgh->msg_controllen = tswapal(space);
1510 return 0;
1513 /* do_setsockopt() Must return target values and target errnos. */
1514 static abi_long do_setsockopt(int sockfd, int level, int optname,
1515 abi_ulong optval_addr, socklen_t optlen)
1517 abi_long ret;
1518 int val;
1519 struct ip_mreqn *ip_mreq;
1520 struct ip_mreq_source *ip_mreq_source;
1522 switch(level) {
1523 case SOL_TCP:
1524 /* TCP options all take an 'int' value. */
1525 if (optlen < sizeof(uint32_t))
1526 return -TARGET_EINVAL;
1528 if (get_user_u32(val, optval_addr))
1529 return -TARGET_EFAULT;
1530 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1531 break;
1532 case SOL_IP:
1533 switch(optname) {
1534 case IP_TOS:
1535 case IP_TTL:
1536 case IP_HDRINCL:
1537 case IP_ROUTER_ALERT:
1538 case IP_RECVOPTS:
1539 case IP_RETOPTS:
1540 case IP_PKTINFO:
1541 case IP_MTU_DISCOVER:
1542 case IP_RECVERR:
1543 case IP_RECVTOS:
1544 #ifdef IP_FREEBIND
1545 case IP_FREEBIND:
1546 #endif
1547 case IP_MULTICAST_TTL:
1548 case IP_MULTICAST_LOOP:
1549 val = 0;
1550 if (optlen >= sizeof(uint32_t)) {
1551 if (get_user_u32(val, optval_addr))
1552 return -TARGET_EFAULT;
1553 } else if (optlen >= 1) {
1554 if (get_user_u8(val, optval_addr))
1555 return -TARGET_EFAULT;
1557 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1558 break;
1559 case IP_ADD_MEMBERSHIP:
1560 case IP_DROP_MEMBERSHIP:
1561 if (optlen < sizeof (struct target_ip_mreq) ||
1562 optlen > sizeof (struct target_ip_mreqn))
1563 return -TARGET_EINVAL;
1565 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1566 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1567 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1568 break;
1570 case IP_BLOCK_SOURCE:
1571 case IP_UNBLOCK_SOURCE:
1572 case IP_ADD_SOURCE_MEMBERSHIP:
1573 case IP_DROP_SOURCE_MEMBERSHIP:
1574 if (optlen != sizeof (struct target_ip_mreq_source))
1575 return -TARGET_EINVAL;
1577 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1578 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1579 unlock_user (ip_mreq_source, optval_addr, 0);
1580 break;
1582 default:
1583 goto unimplemented;
1585 break;
1586 case SOL_IPV6:
1587 switch (optname) {
1588 case IPV6_MTU_DISCOVER:
1589 case IPV6_MTU:
1590 case IPV6_V6ONLY:
1591 case IPV6_RECVPKTINFO:
1592 val = 0;
1593 if (optlen < sizeof(uint32_t)) {
1594 return -TARGET_EINVAL;
1596 if (get_user_u32(val, optval_addr)) {
1597 return -TARGET_EFAULT;
1599 ret = get_errno(setsockopt(sockfd, level, optname,
1600 &val, sizeof(val)));
1601 break;
1602 default:
1603 goto unimplemented;
1605 break;
1606 case SOL_RAW:
1607 switch (optname) {
1608 case ICMP_FILTER:
1609 /* struct icmp_filter takes an u32 value */
1610 if (optlen < sizeof(uint32_t)) {
1611 return -TARGET_EINVAL;
1614 if (get_user_u32(val, optval_addr)) {
1615 return -TARGET_EFAULT;
1617 ret = get_errno(setsockopt(sockfd, level, optname,
1618 &val, sizeof(val)));
1619 break;
1621 default:
1622 goto unimplemented;
1624 break;
1625 case TARGET_SOL_SOCKET:
1626 switch (optname) {
1627 case TARGET_SO_RCVTIMEO:
1629 struct timeval tv;
1631 optname = SO_RCVTIMEO;
1633 set_timeout:
1634 if (optlen != sizeof(struct target_timeval)) {
1635 return -TARGET_EINVAL;
1638 if (copy_from_user_timeval(&tv, optval_addr)) {
1639 return -TARGET_EFAULT;
1642 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1643 &tv, sizeof(tv)));
1644 return ret;
1646 case TARGET_SO_SNDTIMEO:
1647 optname = SO_SNDTIMEO;
1648 goto set_timeout;
1649 case TARGET_SO_ATTACH_FILTER:
1651 struct target_sock_fprog *tfprog;
1652 struct target_sock_filter *tfilter;
1653 struct sock_fprog fprog;
1654 struct sock_filter *filter;
1655 int i;
1657 if (optlen != sizeof(*tfprog)) {
1658 return -TARGET_EINVAL;
1660 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1661 return -TARGET_EFAULT;
1663 if (!lock_user_struct(VERIFY_READ, tfilter,
1664 tswapal(tfprog->filter), 0)) {
1665 unlock_user_struct(tfprog, optval_addr, 1);
1666 return -TARGET_EFAULT;
1669 fprog.len = tswap16(tfprog->len);
1670 filter = g_try_new(struct sock_filter, fprog.len);
1671 if (filter == NULL) {
1672 unlock_user_struct(tfilter, tfprog->filter, 1);
1673 unlock_user_struct(tfprog, optval_addr, 1);
1674 return -TARGET_ENOMEM;
1676 for (i = 0; i < fprog.len; i++) {
1677 filter[i].code = tswap16(tfilter[i].code);
1678 filter[i].jt = tfilter[i].jt;
1679 filter[i].jf = tfilter[i].jf;
1680 filter[i].k = tswap32(tfilter[i].k);
1682 fprog.filter = filter;
1684 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1685 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1686 g_free(filter);
1688 unlock_user_struct(tfilter, tfprog->filter, 1);
1689 unlock_user_struct(tfprog, optval_addr, 1);
1690 return ret;
1692 case TARGET_SO_BINDTODEVICE:
1694 char *dev_ifname, *addr_ifname;
1696 if (optlen > IFNAMSIZ - 1) {
1697 optlen = IFNAMSIZ - 1;
1699 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1700 if (!dev_ifname) {
1701 return -TARGET_EFAULT;
1703 optname = SO_BINDTODEVICE;
1704 addr_ifname = alloca(IFNAMSIZ);
1705 memcpy(addr_ifname, dev_ifname, optlen);
1706 addr_ifname[optlen] = 0;
1707 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1708 addr_ifname, optlen));
1709 unlock_user (dev_ifname, optval_addr, 0);
1710 return ret;
1712 /* Options with 'int' argument. */
1713 case TARGET_SO_DEBUG:
1714 optname = SO_DEBUG;
1715 break;
1716 case TARGET_SO_REUSEADDR:
1717 optname = SO_REUSEADDR;
1718 break;
1719 case TARGET_SO_TYPE:
1720 optname = SO_TYPE;
1721 break;
1722 case TARGET_SO_ERROR:
1723 optname = SO_ERROR;
1724 break;
1725 case TARGET_SO_DONTROUTE:
1726 optname = SO_DONTROUTE;
1727 break;
1728 case TARGET_SO_BROADCAST:
1729 optname = SO_BROADCAST;
1730 break;
1731 case TARGET_SO_SNDBUF:
1732 optname = SO_SNDBUF;
1733 break;
1734 case TARGET_SO_SNDBUFFORCE:
1735 optname = SO_SNDBUFFORCE;
1736 break;
1737 case TARGET_SO_RCVBUF:
1738 optname = SO_RCVBUF;
1739 break;
1740 case TARGET_SO_RCVBUFFORCE:
1741 optname = SO_RCVBUFFORCE;
1742 break;
1743 case TARGET_SO_KEEPALIVE:
1744 optname = SO_KEEPALIVE;
1745 break;
1746 case TARGET_SO_OOBINLINE:
1747 optname = SO_OOBINLINE;
1748 break;
1749 case TARGET_SO_NO_CHECK:
1750 optname = SO_NO_CHECK;
1751 break;
1752 case TARGET_SO_PRIORITY:
1753 optname = SO_PRIORITY;
1754 break;
1755 #ifdef SO_BSDCOMPAT
1756 case TARGET_SO_BSDCOMPAT:
1757 optname = SO_BSDCOMPAT;
1758 break;
1759 #endif
1760 case TARGET_SO_PASSCRED:
1761 optname = SO_PASSCRED;
1762 break;
1763 case TARGET_SO_PASSSEC:
1764 optname = SO_PASSSEC;
1765 break;
1766 case TARGET_SO_TIMESTAMP:
1767 optname = SO_TIMESTAMP;
1768 break;
1769 case TARGET_SO_RCVLOWAT:
1770 optname = SO_RCVLOWAT;
1771 break;
1772 break;
1773 default:
1774 goto unimplemented;
1776 if (optlen < sizeof(uint32_t))
1777 return -TARGET_EINVAL;
1779 if (get_user_u32(val, optval_addr))
1780 return -TARGET_EFAULT;
1781 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1782 break;
1783 default:
1784 unimplemented:
1785 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1786 ret = -TARGET_ENOPROTOOPT;
1788 return ret;
1791 /* do_getsockopt() Must return target values and target errnos. */
1792 static abi_long do_getsockopt(int sockfd, int level, int optname,
1793 abi_ulong optval_addr, abi_ulong optlen)
1795 abi_long ret;
1796 int len, val;
1797 socklen_t lv;
1799 switch(level) {
1800 case TARGET_SOL_SOCKET:
1801 level = SOL_SOCKET;
1802 switch (optname) {
1803 /* These don't just return a single integer */
1804 case TARGET_SO_LINGER:
1805 case TARGET_SO_RCVTIMEO:
1806 case TARGET_SO_SNDTIMEO:
1807 case TARGET_SO_PEERNAME:
1808 goto unimplemented;
1809 case TARGET_SO_PEERCRED: {
1810 struct ucred cr;
1811 socklen_t crlen;
1812 struct target_ucred *tcr;
1814 if (get_user_u32(len, optlen)) {
1815 return -TARGET_EFAULT;
1817 if (len < 0) {
1818 return -TARGET_EINVAL;
1821 crlen = sizeof(cr);
1822 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1823 &cr, &crlen));
1824 if (ret < 0) {
1825 return ret;
1827 if (len > crlen) {
1828 len = crlen;
1830 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1831 return -TARGET_EFAULT;
1833 __put_user(cr.pid, &tcr->pid);
1834 __put_user(cr.uid, &tcr->uid);
1835 __put_user(cr.gid, &tcr->gid);
1836 unlock_user_struct(tcr, optval_addr, 1);
1837 if (put_user_u32(len, optlen)) {
1838 return -TARGET_EFAULT;
1840 break;
1842 /* Options with 'int' argument. */
1843 case TARGET_SO_DEBUG:
1844 optname = SO_DEBUG;
1845 goto int_case;
1846 case TARGET_SO_REUSEADDR:
1847 optname = SO_REUSEADDR;
1848 goto int_case;
1849 case TARGET_SO_TYPE:
1850 optname = SO_TYPE;
1851 goto int_case;
1852 case TARGET_SO_ERROR:
1853 optname = SO_ERROR;
1854 goto int_case;
1855 case TARGET_SO_DONTROUTE:
1856 optname = SO_DONTROUTE;
1857 goto int_case;
1858 case TARGET_SO_BROADCAST:
1859 optname = SO_BROADCAST;
1860 goto int_case;
1861 case TARGET_SO_SNDBUF:
1862 optname = SO_SNDBUF;
1863 goto int_case;
1864 case TARGET_SO_RCVBUF:
1865 optname = SO_RCVBUF;
1866 goto int_case;
1867 case TARGET_SO_KEEPALIVE:
1868 optname = SO_KEEPALIVE;
1869 goto int_case;
1870 case TARGET_SO_OOBINLINE:
1871 optname = SO_OOBINLINE;
1872 goto int_case;
1873 case TARGET_SO_NO_CHECK:
1874 optname = SO_NO_CHECK;
1875 goto int_case;
1876 case TARGET_SO_PRIORITY:
1877 optname = SO_PRIORITY;
1878 goto int_case;
1879 #ifdef SO_BSDCOMPAT
1880 case TARGET_SO_BSDCOMPAT:
1881 optname = SO_BSDCOMPAT;
1882 goto int_case;
1883 #endif
1884 case TARGET_SO_PASSCRED:
1885 optname = SO_PASSCRED;
1886 goto int_case;
1887 case TARGET_SO_TIMESTAMP:
1888 optname = SO_TIMESTAMP;
1889 goto int_case;
1890 case TARGET_SO_RCVLOWAT:
1891 optname = SO_RCVLOWAT;
1892 goto int_case;
1893 case TARGET_SO_ACCEPTCONN:
1894 optname = SO_ACCEPTCONN;
1895 goto int_case;
1896 default:
1897 goto int_case;
1899 break;
1900 case SOL_TCP:
1901 /* TCP options all take an 'int' value. */
1902 int_case:
1903 if (get_user_u32(len, optlen))
1904 return -TARGET_EFAULT;
1905 if (len < 0)
1906 return -TARGET_EINVAL;
1907 lv = sizeof(lv);
1908 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1909 if (ret < 0)
1910 return ret;
1911 if (optname == SO_TYPE) {
1912 val = host_to_target_sock_type(val);
1914 if (len > lv)
1915 len = lv;
1916 if (len == 4) {
1917 if (put_user_u32(val, optval_addr))
1918 return -TARGET_EFAULT;
1919 } else {
1920 if (put_user_u8(val, optval_addr))
1921 return -TARGET_EFAULT;
1923 if (put_user_u32(len, optlen))
1924 return -TARGET_EFAULT;
1925 break;
1926 case SOL_IP:
1927 switch(optname) {
1928 case IP_TOS:
1929 case IP_TTL:
1930 case IP_HDRINCL:
1931 case IP_ROUTER_ALERT:
1932 case IP_RECVOPTS:
1933 case IP_RETOPTS:
1934 case IP_PKTINFO:
1935 case IP_MTU_DISCOVER:
1936 case IP_RECVERR:
1937 case IP_RECVTOS:
1938 #ifdef IP_FREEBIND
1939 case IP_FREEBIND:
1940 #endif
1941 case IP_MULTICAST_TTL:
1942 case IP_MULTICAST_LOOP:
1943 if (get_user_u32(len, optlen))
1944 return -TARGET_EFAULT;
1945 if (len < 0)
1946 return -TARGET_EINVAL;
1947 lv = sizeof(lv);
1948 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1949 if (ret < 0)
1950 return ret;
1951 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1952 len = 1;
1953 if (put_user_u32(len, optlen)
1954 || put_user_u8(val, optval_addr))
1955 return -TARGET_EFAULT;
1956 } else {
1957 if (len > sizeof(int))
1958 len = sizeof(int);
1959 if (put_user_u32(len, optlen)
1960 || put_user_u32(val, optval_addr))
1961 return -TARGET_EFAULT;
1963 break;
1964 default:
1965 ret = -TARGET_ENOPROTOOPT;
1966 break;
1968 break;
1969 default:
1970 unimplemented:
1971 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1972 level, optname);
1973 ret = -TARGET_EOPNOTSUPP;
1974 break;
1976 return ret;
1979 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1980 int count, int copy)
1982 struct target_iovec *target_vec;
1983 struct iovec *vec;
1984 abi_ulong total_len, max_len;
1985 int i;
1986 int err = 0;
1987 bool bad_address = false;
1989 if (count == 0) {
1990 errno = 0;
1991 return NULL;
1993 if (count < 0 || count > IOV_MAX) {
1994 errno = EINVAL;
1995 return NULL;
1998 vec = g_try_new0(struct iovec, count);
1999 if (vec == NULL) {
2000 errno = ENOMEM;
2001 return NULL;
2004 target_vec = lock_user(VERIFY_READ, target_addr,
2005 count * sizeof(struct target_iovec), 1);
2006 if (target_vec == NULL) {
2007 err = EFAULT;
2008 goto fail2;
2011 /* ??? If host page size > target page size, this will result in a
2012 value larger than what we can actually support. */
2013 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2014 total_len = 0;
2016 for (i = 0; i < count; i++) {
2017 abi_ulong base = tswapal(target_vec[i].iov_base);
2018 abi_long len = tswapal(target_vec[i].iov_len);
2020 if (len < 0) {
2021 err = EINVAL;
2022 goto fail;
2023 } else if (len == 0) {
2024 /* Zero length pointer is ignored. */
2025 vec[i].iov_base = 0;
2026 } else {
2027 vec[i].iov_base = lock_user(type, base, len, copy);
2028 /* If the first buffer pointer is bad, this is a fault. But
2029 * subsequent bad buffers will result in a partial write; this
2030 * is realized by filling the vector with null pointers and
2031 * zero lengths. */
2032 if (!vec[i].iov_base) {
2033 if (i == 0) {
2034 err = EFAULT;
2035 goto fail;
2036 } else {
2037 bad_address = true;
2040 if (bad_address) {
2041 len = 0;
2043 if (len > max_len - total_len) {
2044 len = max_len - total_len;
2047 vec[i].iov_len = len;
2048 total_len += len;
2051 unlock_user(target_vec, target_addr, 0);
2052 return vec;
2054 fail:
2055 while (--i >= 0) {
2056 if (tswapal(target_vec[i].iov_len) > 0) {
2057 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2060 unlock_user(target_vec, target_addr, 0);
2061 fail2:
2062 g_free(vec);
2063 errno = err;
2064 return NULL;
2067 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2068 int count, int copy)
2070 struct target_iovec *target_vec;
2071 int i;
2073 target_vec = lock_user(VERIFY_READ, target_addr,
2074 count * sizeof(struct target_iovec), 1);
2075 if (target_vec) {
2076 for (i = 0; i < count; i++) {
2077 abi_ulong base = tswapal(target_vec[i].iov_base);
2078 abi_long len = tswapal(target_vec[i].iov_len);
2079 if (len < 0) {
2080 break;
2082 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2084 unlock_user(target_vec, target_addr, 0);
2087 g_free(vec);
2090 static inline int target_to_host_sock_type(int *type)
2092 int host_type = 0;
2093 int target_type = *type;
2095 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2096 case TARGET_SOCK_DGRAM:
2097 host_type = SOCK_DGRAM;
2098 break;
2099 case TARGET_SOCK_STREAM:
2100 host_type = SOCK_STREAM;
2101 break;
2102 default:
2103 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2104 break;
2106 if (target_type & TARGET_SOCK_CLOEXEC) {
2107 #if defined(SOCK_CLOEXEC)
2108 host_type |= SOCK_CLOEXEC;
2109 #else
2110 return -TARGET_EINVAL;
2111 #endif
2113 if (target_type & TARGET_SOCK_NONBLOCK) {
2114 #if defined(SOCK_NONBLOCK)
2115 host_type |= SOCK_NONBLOCK;
2116 #elif !defined(O_NONBLOCK)
2117 return -TARGET_EINVAL;
2118 #endif
2120 *type = host_type;
2121 return 0;
2124 /* Try to emulate socket type flags after socket creation. */
2125 static int sock_flags_fixup(int fd, int target_type)
2127 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2128 if (target_type & TARGET_SOCK_NONBLOCK) {
2129 int flags = fcntl(fd, F_GETFL);
2130 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2131 close(fd);
2132 return -TARGET_EINVAL;
2135 #endif
2136 return fd;
2139 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2140 abi_ulong target_addr,
2141 socklen_t len)
2143 struct sockaddr *addr = host_addr;
2144 struct target_sockaddr *target_saddr;
2146 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2147 if (!target_saddr) {
2148 return -TARGET_EFAULT;
2151 memcpy(addr, target_saddr, len);
2152 addr->sa_family = tswap16(target_saddr->sa_family);
2153 /* spkt_protocol is big-endian */
2155 unlock_user(target_saddr, target_addr, 0);
2156 return 0;
2159 static TargetFdTrans target_packet_trans = {
2160 .target_to_host_addr = packet_target_to_host_sockaddr,
2163 /* do_socket() Must return target values and target errnos. */
2164 static abi_long do_socket(int domain, int type, int protocol)
2166 int target_type = type;
2167 int ret;
2169 ret = target_to_host_sock_type(&type);
2170 if (ret) {
2171 return ret;
2174 if (domain == PF_NETLINK)
2175 return -TARGET_EAFNOSUPPORT;
2177 if (domain == AF_PACKET ||
2178 (domain == AF_INET && type == SOCK_PACKET)) {
2179 protocol = tswap16(protocol);
2182 ret = get_errno(socket(domain, type, protocol));
2183 if (ret >= 0) {
2184 ret = sock_flags_fixup(ret, target_type);
2185 if (type == SOCK_PACKET) {
2186 /* Manage an obsolete case :
2187 * if socket type is SOCK_PACKET, bind by name
2189 fd_trans_register(ret, &target_packet_trans);
2192 return ret;
2195 /* do_bind() Must return target values and target errnos. */
2196 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2197 socklen_t addrlen)
2199 void *addr;
2200 abi_long ret;
2202 if ((int)addrlen < 0) {
2203 return -TARGET_EINVAL;
2206 addr = alloca(addrlen+1);
2208 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2209 if (ret)
2210 return ret;
2212 return get_errno(bind(sockfd, addr, addrlen));
2215 /* do_connect() Must return target values and target errnos. */
2216 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2217 socklen_t addrlen)
2219 void *addr;
2220 abi_long ret;
2222 if ((int)addrlen < 0) {
2223 return -TARGET_EINVAL;
2226 addr = alloca(addrlen+1);
2228 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2229 if (ret)
2230 return ret;
2232 return get_errno(connect(sockfd, addr, addrlen));
2235 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2236 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2237 int flags, int send)
2239 abi_long ret, len;
2240 struct msghdr msg;
2241 int count;
2242 struct iovec *vec;
2243 abi_ulong target_vec;
2245 if (msgp->msg_name) {
2246 msg.msg_namelen = tswap32(msgp->msg_namelen);
2247 msg.msg_name = alloca(msg.msg_namelen+1);
2248 ret = target_to_host_sockaddr(fd, msg.msg_name,
2249 tswapal(msgp->msg_name),
2250 msg.msg_namelen);
2251 if (ret) {
2252 goto out2;
2254 } else {
2255 msg.msg_name = NULL;
2256 msg.msg_namelen = 0;
2258 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2259 msg.msg_control = alloca(msg.msg_controllen);
2260 msg.msg_flags = tswap32(msgp->msg_flags);
2262 count = tswapal(msgp->msg_iovlen);
2263 target_vec = tswapal(msgp->msg_iov);
2264 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2265 target_vec, count, send);
2266 if (vec == NULL) {
2267 ret = -host_to_target_errno(errno);
2268 goto out2;
2270 msg.msg_iovlen = count;
2271 msg.msg_iov = vec;
2273 if (send) {
2274 ret = target_to_host_cmsg(&msg, msgp);
2275 if (ret == 0)
2276 ret = get_errno(sendmsg(fd, &msg, flags));
2277 } else {
2278 ret = get_errno(recvmsg(fd, &msg, flags));
2279 if (!is_error(ret)) {
2280 len = ret;
2281 ret = host_to_target_cmsg(msgp, &msg);
2282 if (!is_error(ret)) {
2283 msgp->msg_namelen = tswap32(msg.msg_namelen);
2284 if (msg.msg_name != NULL) {
2285 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2286 msg.msg_name, msg.msg_namelen);
2287 if (ret) {
2288 goto out;
2292 ret = len;
2297 out:
2298 unlock_iovec(vec, target_vec, count, !send);
2299 out2:
2300 return ret;
2303 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2304 int flags, int send)
2306 abi_long ret;
2307 struct target_msghdr *msgp;
2309 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2310 msgp,
2311 target_msg,
2312 send ? 1 : 0)) {
2313 return -TARGET_EFAULT;
2315 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2316 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2317 return ret;
2320 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2321 * so it might not have this *mmsg-specific flag either.
2323 #ifndef MSG_WAITFORONE
2324 #define MSG_WAITFORONE 0x10000
2325 #endif
2327 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2328 unsigned int vlen, unsigned int flags,
2329 int send)
2331 struct target_mmsghdr *mmsgp;
2332 abi_long ret = 0;
2333 int i;
2335 if (vlen > UIO_MAXIOV) {
2336 vlen = UIO_MAXIOV;
2339 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2340 if (!mmsgp) {
2341 return -TARGET_EFAULT;
2344 for (i = 0; i < vlen; i++) {
2345 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2346 if (is_error(ret)) {
2347 break;
2349 mmsgp[i].msg_len = tswap32(ret);
2350 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2351 if (flags & MSG_WAITFORONE) {
2352 flags |= MSG_DONTWAIT;
2356 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2358 /* Return number of datagrams sent if we sent any at all;
2359 * otherwise return the error.
2361 if (i) {
2362 return i;
2364 return ret;
2367 /* If we don't have a system accept4() then just call accept.
2368 * The callsites to do_accept4() will ensure that they don't
2369 * pass a non-zero flags argument in this config.
2371 #ifndef CONFIG_ACCEPT4
2372 static inline int accept4(int sockfd, struct sockaddr *addr,
2373 socklen_t *addrlen, int flags)
2375 assert(flags == 0);
2376 return accept(sockfd, addr, addrlen);
2378 #endif
2380 /* do_accept4() Must return target values and target errnos. */
2381 static abi_long do_accept4(int fd, abi_ulong target_addr,
2382 abi_ulong target_addrlen_addr, int flags)
2384 socklen_t addrlen;
2385 void *addr;
2386 abi_long ret;
2387 int host_flags;
2389 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2391 if (target_addr == 0) {
2392 return get_errno(accept4(fd, NULL, NULL, host_flags));
2395 /* linux returns EINVAL if addrlen pointer is invalid */
2396 if (get_user_u32(addrlen, target_addrlen_addr))
2397 return -TARGET_EINVAL;
2399 if ((int)addrlen < 0) {
2400 return -TARGET_EINVAL;
2403 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2404 return -TARGET_EINVAL;
2406 addr = alloca(addrlen);
2408 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2409 if (!is_error(ret)) {
2410 host_to_target_sockaddr(target_addr, addr, addrlen);
2411 if (put_user_u32(addrlen, target_addrlen_addr))
2412 ret = -TARGET_EFAULT;
2414 return ret;
2417 /* do_getpeername() Must return target values and target errnos. */
2418 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2419 abi_ulong target_addrlen_addr)
2421 socklen_t addrlen;
2422 void *addr;
2423 abi_long ret;
2425 if (get_user_u32(addrlen, target_addrlen_addr))
2426 return -TARGET_EFAULT;
2428 if ((int)addrlen < 0) {
2429 return -TARGET_EINVAL;
2432 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2433 return -TARGET_EFAULT;
2435 addr = alloca(addrlen);
2437 ret = get_errno(getpeername(fd, addr, &addrlen));
2438 if (!is_error(ret)) {
2439 host_to_target_sockaddr(target_addr, addr, addrlen);
2440 if (put_user_u32(addrlen, target_addrlen_addr))
2441 ret = -TARGET_EFAULT;
2443 return ret;
2446 /* do_getsockname() Must return target values and target errnos. */
2447 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2448 abi_ulong target_addrlen_addr)
2450 socklen_t addrlen;
2451 void *addr;
2452 abi_long ret;
2454 if (get_user_u32(addrlen, target_addrlen_addr))
2455 return -TARGET_EFAULT;
2457 if ((int)addrlen < 0) {
2458 return -TARGET_EINVAL;
2461 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2462 return -TARGET_EFAULT;
2464 addr = alloca(addrlen);
2466 ret = get_errno(getsockname(fd, addr, &addrlen));
2467 if (!is_error(ret)) {
2468 host_to_target_sockaddr(target_addr, addr, addrlen);
2469 if (put_user_u32(addrlen, target_addrlen_addr))
2470 ret = -TARGET_EFAULT;
2472 return ret;
2475 /* do_socketpair() Must return target values and target errnos. */
2476 static abi_long do_socketpair(int domain, int type, int protocol,
2477 abi_ulong target_tab_addr)
2479 int tab[2];
2480 abi_long ret;
2482 target_to_host_sock_type(&type);
2484 ret = get_errno(socketpair(domain, type, protocol, tab));
2485 if (!is_error(ret)) {
2486 if (put_user_s32(tab[0], target_tab_addr)
2487 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2488 ret = -TARGET_EFAULT;
2490 return ret;
2493 /* do_sendto() Must return target values and target errnos. */
2494 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2495 abi_ulong target_addr, socklen_t addrlen)
2497 void *addr;
2498 void *host_msg;
2499 abi_long ret;
2501 if ((int)addrlen < 0) {
2502 return -TARGET_EINVAL;
2505 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2506 if (!host_msg)
2507 return -TARGET_EFAULT;
2508 if (target_addr) {
2509 addr = alloca(addrlen+1);
2510 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2511 if (ret) {
2512 unlock_user(host_msg, msg, 0);
2513 return ret;
2515 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2516 } else {
2517 ret = get_errno(send(fd, host_msg, len, flags));
2519 unlock_user(host_msg, msg, 0);
2520 return ret;
2523 /* do_recvfrom() Must return target values and target errnos. */
2524 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2525 abi_ulong target_addr,
2526 abi_ulong target_addrlen)
2528 socklen_t addrlen;
2529 void *addr;
2530 void *host_msg;
2531 abi_long ret;
2533 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2534 if (!host_msg)
2535 return -TARGET_EFAULT;
2536 if (target_addr) {
2537 if (get_user_u32(addrlen, target_addrlen)) {
2538 ret = -TARGET_EFAULT;
2539 goto fail;
2541 if ((int)addrlen < 0) {
2542 ret = -TARGET_EINVAL;
2543 goto fail;
2545 addr = alloca(addrlen);
2546 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2547 } else {
2548 addr = NULL; /* To keep compiler quiet. */
2549 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2551 if (!is_error(ret)) {
2552 if (target_addr) {
2553 host_to_target_sockaddr(target_addr, addr, addrlen);
2554 if (put_user_u32(addrlen, target_addrlen)) {
2555 ret = -TARGET_EFAULT;
2556 goto fail;
2559 unlock_user(host_msg, msg, len);
2560 } else {
2561 fail:
2562 unlock_user(host_msg, msg, 0);
2564 return ret;
2567 #ifdef TARGET_NR_socketcall
2568 /* do_socketcall() Must return target values and target errnos. */
2569 static abi_long do_socketcall(int num, abi_ulong vptr)
2571 static const unsigned ac[] = { /* number of arguments per call */
2572 [SOCKOP_socket] = 3, /* domain, type, protocol */
2573 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2574 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2575 [SOCKOP_listen] = 2, /* sockfd, backlog */
2576 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2577 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2578 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2579 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2580 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2581 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2582 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2583 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2584 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2585 [SOCKOP_shutdown] = 2, /* sockfd, how */
2586 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2587 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2588 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2589 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2590 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2591 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2593 abi_long a[6]; /* max 6 args */
2595 /* first, collect the arguments in a[] according to ac[] */
2596 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2597 unsigned i;
2598 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2599 for (i = 0; i < ac[num]; ++i) {
2600 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2601 return -TARGET_EFAULT;
2606 /* now when we have the args, actually handle the call */
2607 switch (num) {
2608 case SOCKOP_socket: /* domain, type, protocol */
2609 return do_socket(a[0], a[1], a[2]);
2610 case SOCKOP_bind: /* sockfd, addr, addrlen */
2611 return do_bind(a[0], a[1], a[2]);
2612 case SOCKOP_connect: /* sockfd, addr, addrlen */
2613 return do_connect(a[0], a[1], a[2]);
2614 case SOCKOP_listen: /* sockfd, backlog */
2615 return get_errno(listen(a[0], a[1]));
2616 case SOCKOP_accept: /* sockfd, addr, addrlen */
2617 return do_accept4(a[0], a[1], a[2], 0);
2618 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2619 return do_accept4(a[0], a[1], a[2], a[3]);
2620 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2621 return do_getsockname(a[0], a[1], a[2]);
2622 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2623 return do_getpeername(a[0], a[1], a[2]);
2624 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2625 return do_socketpair(a[0], a[1], a[2], a[3]);
2626 case SOCKOP_send: /* sockfd, msg, len, flags */
2627 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2628 case SOCKOP_recv: /* sockfd, msg, len, flags */
2629 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2630 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2631 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2632 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2633 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2634 case SOCKOP_shutdown: /* sockfd, how */
2635 return get_errno(shutdown(a[0], a[1]));
2636 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2637 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2638 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2639 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2640 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
2641 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
2642 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
2643 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
2644 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2645 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2646 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2647 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2648 default:
2649 gemu_log("Unsupported socketcall: %d\n", num);
2650 return -TARGET_ENOSYS;
2653 #endif
2655 #define N_SHM_REGIONS 32
2657 static struct shm_region {
2658 abi_ulong start;
2659 abi_ulong size;
2660 bool in_use;
2661 } shm_regions[N_SHM_REGIONS];
2663 struct target_semid_ds
2665 struct target_ipc_perm sem_perm;
2666 abi_ulong sem_otime;
2667 #if !defined(TARGET_PPC64)
2668 abi_ulong __unused1;
2669 #endif
2670 abi_ulong sem_ctime;
2671 #if !defined(TARGET_PPC64)
2672 abi_ulong __unused2;
2673 #endif
2674 abi_ulong sem_nsems;
2675 abi_ulong __unused3;
2676 abi_ulong __unused4;
2679 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2680 abi_ulong target_addr)
2682 struct target_ipc_perm *target_ip;
2683 struct target_semid_ds *target_sd;
2685 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2686 return -TARGET_EFAULT;
2687 target_ip = &(target_sd->sem_perm);
2688 host_ip->__key = tswap32(target_ip->__key);
2689 host_ip->uid = tswap32(target_ip->uid);
2690 host_ip->gid = tswap32(target_ip->gid);
2691 host_ip->cuid = tswap32(target_ip->cuid);
2692 host_ip->cgid = tswap32(target_ip->cgid);
2693 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2694 host_ip->mode = tswap32(target_ip->mode);
2695 #else
2696 host_ip->mode = tswap16(target_ip->mode);
2697 #endif
2698 #if defined(TARGET_PPC)
2699 host_ip->__seq = tswap32(target_ip->__seq);
2700 #else
2701 host_ip->__seq = tswap16(target_ip->__seq);
2702 #endif
2703 unlock_user_struct(target_sd, target_addr, 0);
2704 return 0;
2707 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2708 struct ipc_perm *host_ip)
2710 struct target_ipc_perm *target_ip;
2711 struct target_semid_ds *target_sd;
2713 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2714 return -TARGET_EFAULT;
2715 target_ip = &(target_sd->sem_perm);
2716 target_ip->__key = tswap32(host_ip->__key);
2717 target_ip->uid = tswap32(host_ip->uid);
2718 target_ip->gid = tswap32(host_ip->gid);
2719 target_ip->cuid = tswap32(host_ip->cuid);
2720 target_ip->cgid = tswap32(host_ip->cgid);
2721 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2722 target_ip->mode = tswap32(host_ip->mode);
2723 #else
2724 target_ip->mode = tswap16(host_ip->mode);
2725 #endif
2726 #if defined(TARGET_PPC)
2727 target_ip->__seq = tswap32(host_ip->__seq);
2728 #else
2729 target_ip->__seq = tswap16(host_ip->__seq);
2730 #endif
2731 unlock_user_struct(target_sd, target_addr, 1);
2732 return 0;
2735 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2736 abi_ulong target_addr)
2738 struct target_semid_ds *target_sd;
2740 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2741 return -TARGET_EFAULT;
2742 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2743 return -TARGET_EFAULT;
2744 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2745 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2746 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2747 unlock_user_struct(target_sd, target_addr, 0);
2748 return 0;
2751 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2752 struct semid_ds *host_sd)
2754 struct target_semid_ds *target_sd;
2756 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2757 return -TARGET_EFAULT;
2758 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2759 return -TARGET_EFAULT;
2760 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2761 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2762 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2763 unlock_user_struct(target_sd, target_addr, 1);
2764 return 0;
2767 struct target_seminfo {
2768 int semmap;
2769 int semmni;
2770 int semmns;
2771 int semmnu;
2772 int semmsl;
2773 int semopm;
2774 int semume;
2775 int semusz;
2776 int semvmx;
2777 int semaem;
2780 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2781 struct seminfo *host_seminfo)
2783 struct target_seminfo *target_seminfo;
2784 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2785 return -TARGET_EFAULT;
2786 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2787 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2788 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2789 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2790 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2791 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2792 __put_user(host_seminfo->semume, &target_seminfo->semume);
2793 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2794 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2795 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2796 unlock_user_struct(target_seminfo, target_addr, 1);
2797 return 0;
2800 union semun {
2801 int val;
2802 struct semid_ds *buf;
2803 unsigned short *array;
2804 struct seminfo *__buf;
2807 union target_semun {
2808 int val;
2809 abi_ulong buf;
2810 abi_ulong array;
2811 abi_ulong __buf;
2814 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2815 abi_ulong target_addr)
2817 int nsems;
2818 unsigned short *array;
2819 union semun semun;
2820 struct semid_ds semid_ds;
2821 int i, ret;
2823 semun.buf = &semid_ds;
2825 ret = semctl(semid, 0, IPC_STAT, semun);
2826 if (ret == -1)
2827 return get_errno(ret);
2829 nsems = semid_ds.sem_nsems;
2831 *host_array = g_try_new(unsigned short, nsems);
2832 if (!*host_array) {
2833 return -TARGET_ENOMEM;
2835 array = lock_user(VERIFY_READ, target_addr,
2836 nsems*sizeof(unsigned short), 1);
2837 if (!array) {
2838 g_free(*host_array);
2839 return -TARGET_EFAULT;
2842 for(i=0; i<nsems; i++) {
2843 __get_user((*host_array)[i], &array[i]);
2845 unlock_user(array, target_addr, 0);
2847 return 0;
2850 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2851 unsigned short **host_array)
2853 int nsems;
2854 unsigned short *array;
2855 union semun semun;
2856 struct semid_ds semid_ds;
2857 int i, ret;
2859 semun.buf = &semid_ds;
2861 ret = semctl(semid, 0, IPC_STAT, semun);
2862 if (ret == -1)
2863 return get_errno(ret);
2865 nsems = semid_ds.sem_nsems;
2867 array = lock_user(VERIFY_WRITE, target_addr,
2868 nsems*sizeof(unsigned short), 0);
2869 if (!array)
2870 return -TARGET_EFAULT;
2872 for(i=0; i<nsems; i++) {
2873 __put_user((*host_array)[i], &array[i]);
2875 g_free(*host_array);
2876 unlock_user(array, target_addr, 1);
2878 return 0;
2881 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2882 abi_ulong target_arg)
2884 union target_semun target_su = { .buf = target_arg };
2885 union semun arg;
2886 struct semid_ds dsarg;
2887 unsigned short *array = NULL;
2888 struct seminfo seminfo;
2889 abi_long ret = -TARGET_EINVAL;
2890 abi_long err;
2891 cmd &= 0xff;
2893 switch( cmd ) {
2894 case GETVAL:
2895 case SETVAL:
2896 /* In 64 bit cross-endian situations, we will erroneously pick up
2897 * the wrong half of the union for the "val" element. To rectify
2898 * this, the entire 8-byte structure is byteswapped, followed by
2899 * a swap of the 4 byte val field. In other cases, the data is
2900 * already in proper host byte order. */
2901 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2902 target_su.buf = tswapal(target_su.buf);
2903 arg.val = tswap32(target_su.val);
2904 } else {
2905 arg.val = target_su.val;
2907 ret = get_errno(semctl(semid, semnum, cmd, arg));
2908 break;
2909 case GETALL:
2910 case SETALL:
2911 err = target_to_host_semarray(semid, &array, target_su.array);
2912 if (err)
2913 return err;
2914 arg.array = array;
2915 ret = get_errno(semctl(semid, semnum, cmd, arg));
2916 err = host_to_target_semarray(semid, target_su.array, &array);
2917 if (err)
2918 return err;
2919 break;
2920 case IPC_STAT:
2921 case IPC_SET:
2922 case SEM_STAT:
2923 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2924 if (err)
2925 return err;
2926 arg.buf = &dsarg;
2927 ret = get_errno(semctl(semid, semnum, cmd, arg));
2928 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2929 if (err)
2930 return err;
2931 break;
2932 case IPC_INFO:
2933 case SEM_INFO:
2934 arg.__buf = &seminfo;
2935 ret = get_errno(semctl(semid, semnum, cmd, arg));
2936 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2937 if (err)
2938 return err;
2939 break;
2940 case IPC_RMID:
2941 case GETPID:
2942 case GETNCNT:
2943 case GETZCNT:
2944 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2945 break;
2948 return ret;
2951 struct target_sembuf {
2952 unsigned short sem_num;
2953 short sem_op;
2954 short sem_flg;
2957 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2958 abi_ulong target_addr,
2959 unsigned nsops)
2961 struct target_sembuf *target_sembuf;
2962 int i;
2964 target_sembuf = lock_user(VERIFY_READ, target_addr,
2965 nsops*sizeof(struct target_sembuf), 1);
2966 if (!target_sembuf)
2967 return -TARGET_EFAULT;
2969 for(i=0; i<nsops; i++) {
2970 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2971 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2972 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2975 unlock_user(target_sembuf, target_addr, 0);
2977 return 0;
2980 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2982 struct sembuf sops[nsops];
2984 if (target_to_host_sembuf(sops, ptr, nsops))
2985 return -TARGET_EFAULT;
2987 return get_errno(semop(semid, sops, nsops));
2990 struct target_msqid_ds
2992 struct target_ipc_perm msg_perm;
2993 abi_ulong msg_stime;
2994 #if TARGET_ABI_BITS == 32
2995 abi_ulong __unused1;
2996 #endif
2997 abi_ulong msg_rtime;
2998 #if TARGET_ABI_BITS == 32
2999 abi_ulong __unused2;
3000 #endif
3001 abi_ulong msg_ctime;
3002 #if TARGET_ABI_BITS == 32
3003 abi_ulong __unused3;
3004 #endif
3005 abi_ulong __msg_cbytes;
3006 abi_ulong msg_qnum;
3007 abi_ulong msg_qbytes;
3008 abi_ulong msg_lspid;
3009 abi_ulong msg_lrpid;
3010 abi_ulong __unused4;
3011 abi_ulong __unused5;
3014 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3015 abi_ulong target_addr)
3017 struct target_msqid_ds *target_md;
3019 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3020 return -TARGET_EFAULT;
3021 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3022 return -TARGET_EFAULT;
3023 host_md->msg_stime = tswapal(target_md->msg_stime);
3024 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3025 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3026 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3027 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3028 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3029 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3030 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3031 unlock_user_struct(target_md, target_addr, 0);
3032 return 0;
3035 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3036 struct msqid_ds *host_md)
3038 struct target_msqid_ds *target_md;
3040 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3041 return -TARGET_EFAULT;
3042 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3043 return -TARGET_EFAULT;
3044 target_md->msg_stime = tswapal(host_md->msg_stime);
3045 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3046 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3047 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3048 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3049 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3050 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3051 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3052 unlock_user_struct(target_md, target_addr, 1);
3053 return 0;
3056 struct target_msginfo {
3057 int msgpool;
3058 int msgmap;
3059 int msgmax;
3060 int msgmnb;
3061 int msgmni;
3062 int msgssz;
3063 int msgtql;
3064 unsigned short int msgseg;
3067 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3068 struct msginfo *host_msginfo)
3070 struct target_msginfo *target_msginfo;
3071 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3072 return -TARGET_EFAULT;
3073 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3074 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3075 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3076 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3077 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3078 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3079 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3080 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3081 unlock_user_struct(target_msginfo, target_addr, 1);
3082 return 0;
3085 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3087 struct msqid_ds dsarg;
3088 struct msginfo msginfo;
3089 abi_long ret = -TARGET_EINVAL;
3091 cmd &= 0xff;
3093 switch (cmd) {
3094 case IPC_STAT:
3095 case IPC_SET:
3096 case MSG_STAT:
3097 if (target_to_host_msqid_ds(&dsarg,ptr))
3098 return -TARGET_EFAULT;
3099 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3100 if (host_to_target_msqid_ds(ptr,&dsarg))
3101 return -TARGET_EFAULT;
3102 break;
3103 case IPC_RMID:
3104 ret = get_errno(msgctl(msgid, cmd, NULL));
3105 break;
3106 case IPC_INFO:
3107 case MSG_INFO:
3108 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3109 if (host_to_target_msginfo(ptr, &msginfo))
3110 return -TARGET_EFAULT;
3111 break;
3114 return ret;
3117 struct target_msgbuf {
3118 abi_long mtype;
3119 char mtext[1];
3122 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3123 ssize_t msgsz, int msgflg)
3125 struct target_msgbuf *target_mb;
3126 struct msgbuf *host_mb;
3127 abi_long ret = 0;
3129 if (msgsz < 0) {
3130 return -TARGET_EINVAL;
3133 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3134 return -TARGET_EFAULT;
3135 host_mb = g_try_malloc(msgsz + sizeof(long));
3136 if (!host_mb) {
3137 unlock_user_struct(target_mb, msgp, 0);
3138 return -TARGET_ENOMEM;
3140 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3141 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3142 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3143 g_free(host_mb);
3144 unlock_user_struct(target_mb, msgp, 0);
3146 return ret;
3149 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3150 unsigned int msgsz, abi_long msgtyp,
3151 int msgflg)
3153 struct target_msgbuf *target_mb;
3154 char *target_mtext;
3155 struct msgbuf *host_mb;
3156 abi_long ret = 0;
3158 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3159 return -TARGET_EFAULT;
3161 host_mb = g_malloc(msgsz+sizeof(long));
3162 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3164 if (ret > 0) {
3165 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3166 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3167 if (!target_mtext) {
3168 ret = -TARGET_EFAULT;
3169 goto end;
3171 memcpy(target_mb->mtext, host_mb->mtext, ret);
3172 unlock_user(target_mtext, target_mtext_addr, ret);
3175 target_mb->mtype = tswapal(host_mb->mtype);
3177 end:
3178 if (target_mb)
3179 unlock_user_struct(target_mb, msgp, 1);
3180 g_free(host_mb);
3181 return ret;
3184 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3185 abi_ulong target_addr)
3187 struct target_shmid_ds *target_sd;
3189 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3190 return -TARGET_EFAULT;
3191 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3192 return -TARGET_EFAULT;
3193 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3194 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3195 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3196 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3197 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3198 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3199 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3200 unlock_user_struct(target_sd, target_addr, 0);
3201 return 0;
3204 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3205 struct shmid_ds *host_sd)
3207 struct target_shmid_ds *target_sd;
3209 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3210 return -TARGET_EFAULT;
3211 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3212 return -TARGET_EFAULT;
3213 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3214 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3215 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3216 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3217 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3218 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3219 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3220 unlock_user_struct(target_sd, target_addr, 1);
3221 return 0;
3224 struct target_shminfo {
3225 abi_ulong shmmax;
3226 abi_ulong shmmin;
3227 abi_ulong shmmni;
3228 abi_ulong shmseg;
3229 abi_ulong shmall;
3232 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3233 struct shminfo *host_shminfo)
3235 struct target_shminfo *target_shminfo;
3236 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3237 return -TARGET_EFAULT;
3238 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3239 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3240 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3241 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3242 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3243 unlock_user_struct(target_shminfo, target_addr, 1);
3244 return 0;
3247 struct target_shm_info {
3248 int used_ids;
3249 abi_ulong shm_tot;
3250 abi_ulong shm_rss;
3251 abi_ulong shm_swp;
3252 abi_ulong swap_attempts;
3253 abi_ulong swap_successes;
3256 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3257 struct shm_info *host_shm_info)
3259 struct target_shm_info *target_shm_info;
3260 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3261 return -TARGET_EFAULT;
3262 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3263 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3264 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3265 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3266 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3267 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3268 unlock_user_struct(target_shm_info, target_addr, 1);
3269 return 0;
3272 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3274 struct shmid_ds dsarg;
3275 struct shminfo shminfo;
3276 struct shm_info shm_info;
3277 abi_long ret = -TARGET_EINVAL;
3279 cmd &= 0xff;
3281 switch(cmd) {
3282 case IPC_STAT:
3283 case IPC_SET:
3284 case SHM_STAT:
3285 if (target_to_host_shmid_ds(&dsarg, buf))
3286 return -TARGET_EFAULT;
3287 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3288 if (host_to_target_shmid_ds(buf, &dsarg))
3289 return -TARGET_EFAULT;
3290 break;
3291 case IPC_INFO:
3292 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3293 if (host_to_target_shminfo(buf, &shminfo))
3294 return -TARGET_EFAULT;
3295 break;
3296 case SHM_INFO:
3297 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3298 if (host_to_target_shm_info(buf, &shm_info))
3299 return -TARGET_EFAULT;
3300 break;
3301 case IPC_RMID:
3302 case SHM_LOCK:
3303 case SHM_UNLOCK:
3304 ret = get_errno(shmctl(shmid, cmd, NULL));
3305 break;
3308 return ret;
3311 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3313 abi_long raddr;
3314 void *host_raddr;
3315 struct shmid_ds shm_info;
3316 int i,ret;
3318 /* find out the length of the shared memory segment */
3319 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3320 if (is_error(ret)) {
3321 /* can't get length, bail out */
3322 return ret;
3325 mmap_lock();
3327 if (shmaddr)
3328 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3329 else {
3330 abi_ulong mmap_start;
3332 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3334 if (mmap_start == -1) {
3335 errno = ENOMEM;
3336 host_raddr = (void *)-1;
3337 } else
3338 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3341 if (host_raddr == (void *)-1) {
3342 mmap_unlock();
3343 return get_errno((long)host_raddr);
3345 raddr=h2g((unsigned long)host_raddr);
3347 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3348 PAGE_VALID | PAGE_READ |
3349 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3351 for (i = 0; i < N_SHM_REGIONS; i++) {
3352 if (!shm_regions[i].in_use) {
3353 shm_regions[i].in_use = true;
3354 shm_regions[i].start = raddr;
3355 shm_regions[i].size = shm_info.shm_segsz;
3356 break;
3360 mmap_unlock();
3361 return raddr;
3365 static inline abi_long do_shmdt(abi_ulong shmaddr)
3367 int i;
3369 for (i = 0; i < N_SHM_REGIONS; ++i) {
3370 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3371 shm_regions[i].in_use = false;
3372 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3373 break;
3377 return get_errno(shmdt(g2h(shmaddr)));
3380 #ifdef TARGET_NR_ipc
3381 /* ??? This only works with linear mappings. */
3382 /* do_ipc() must return target values and target errnos. */
3383 static abi_long do_ipc(unsigned int call, abi_long first,
3384 abi_long second, abi_long third,
3385 abi_long ptr, abi_long fifth)
3387 int version;
3388 abi_long ret = 0;
3390 version = call >> 16;
3391 call &= 0xffff;
3393 switch (call) {
3394 case IPCOP_semop:
3395 ret = do_semop(first, ptr, second);
3396 break;
3398 case IPCOP_semget:
3399 ret = get_errno(semget(first, second, third));
3400 break;
3402 case IPCOP_semctl: {
3403 /* The semun argument to semctl is passed by value, so dereference the
3404 * ptr argument. */
3405 abi_ulong atptr;
3406 get_user_ual(atptr, ptr);
3407 ret = do_semctl(first, second, third, atptr);
3408 break;
3411 case IPCOP_msgget:
3412 ret = get_errno(msgget(first, second));
3413 break;
3415 case IPCOP_msgsnd:
3416 ret = do_msgsnd(first, ptr, second, third);
3417 break;
3419 case IPCOP_msgctl:
3420 ret = do_msgctl(first, second, ptr);
3421 break;
3423 case IPCOP_msgrcv:
3424 switch (version) {
3425 case 0:
3427 struct target_ipc_kludge {
3428 abi_long msgp;
3429 abi_long msgtyp;
3430 } *tmp;
3432 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3433 ret = -TARGET_EFAULT;
3434 break;
3437 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3439 unlock_user_struct(tmp, ptr, 0);
3440 break;
3442 default:
3443 ret = do_msgrcv(first, ptr, second, fifth, third);
3445 break;
3447 case IPCOP_shmat:
3448 switch (version) {
3449 default:
3451 abi_ulong raddr;
3452 raddr = do_shmat(first, ptr, second);
3453 if (is_error(raddr))
3454 return get_errno(raddr);
3455 if (put_user_ual(raddr, third))
3456 return -TARGET_EFAULT;
3457 break;
3459 case 1:
3460 ret = -TARGET_EINVAL;
3461 break;
3463 break;
3464 case IPCOP_shmdt:
3465 ret = do_shmdt(ptr);
3466 break;
3468 case IPCOP_shmget:
3469 /* IPC_* flag values are the same on all linux platforms */
3470 ret = get_errno(shmget(first, second, third));
3471 break;
3473 /* IPC_* and SHM_* command values are the same on all linux platforms */
3474 case IPCOP_shmctl:
3475 ret = do_shmctl(first, second, ptr);
3476 break;
3477 default:
3478 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3479 ret = -TARGET_ENOSYS;
3480 break;
3482 return ret;
3484 #endif
3486 /* kernel structure types definitions */
3488 #define STRUCT(name, ...) STRUCT_ ## name,
3489 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3490 enum {
3491 #include "syscall_types.h"
3492 STRUCT_MAX
3494 #undef STRUCT
3495 #undef STRUCT_SPECIAL
3497 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3498 #define STRUCT_SPECIAL(name)
3499 #include "syscall_types.h"
3500 #undef STRUCT
3501 #undef STRUCT_SPECIAL
3503 typedef struct IOCTLEntry IOCTLEntry;
3505 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3506 int fd, int cmd, abi_long arg);
3508 struct IOCTLEntry {
3509 int target_cmd;
3510 unsigned int host_cmd;
3511 const char *name;
3512 int access;
3513 do_ioctl_fn *do_ioctl;
3514 const argtype arg_type[5];
3517 #define IOC_R 0x0001
3518 #define IOC_W 0x0002
3519 #define IOC_RW (IOC_R | IOC_W)
3521 #define MAX_STRUCT_SIZE 4096
3523 #ifdef CONFIG_FIEMAP
3524 /* So fiemap access checks don't overflow on 32 bit systems.
3525 * This is very slightly smaller than the limit imposed by
3526 * the underlying kernel.
3528 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3529 / sizeof(struct fiemap_extent))
3531 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3532 int fd, int cmd, abi_long arg)
3534 /* The parameter for this ioctl is a struct fiemap followed
3535 * by an array of struct fiemap_extent whose size is set
3536 * in fiemap->fm_extent_count. The array is filled in by the
3537 * ioctl.
3539 int target_size_in, target_size_out;
3540 struct fiemap *fm;
3541 const argtype *arg_type = ie->arg_type;
3542 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3543 void *argptr, *p;
3544 abi_long ret;
3545 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3546 uint32_t outbufsz;
3547 int free_fm = 0;
3549 assert(arg_type[0] == TYPE_PTR);
3550 assert(ie->access == IOC_RW);
3551 arg_type++;
3552 target_size_in = thunk_type_size(arg_type, 0);
3553 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3554 if (!argptr) {
3555 return -TARGET_EFAULT;
3557 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3558 unlock_user(argptr, arg, 0);
3559 fm = (struct fiemap *)buf_temp;
3560 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3561 return -TARGET_EINVAL;
3564 outbufsz = sizeof (*fm) +
3565 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3567 if (outbufsz > MAX_STRUCT_SIZE) {
3568 /* We can't fit all the extents into the fixed size buffer.
3569 * Allocate one that is large enough and use it instead.
3571 fm = g_try_malloc(outbufsz);
3572 if (!fm) {
3573 return -TARGET_ENOMEM;
3575 memcpy(fm, buf_temp, sizeof(struct fiemap));
3576 free_fm = 1;
3578 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3579 if (!is_error(ret)) {
3580 target_size_out = target_size_in;
3581 /* An extent_count of 0 means we were only counting the extents
3582 * so there are no structs to copy
3584 if (fm->fm_extent_count != 0) {
3585 target_size_out += fm->fm_mapped_extents * extent_size;
3587 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3588 if (!argptr) {
3589 ret = -TARGET_EFAULT;
3590 } else {
3591 /* Convert the struct fiemap */
3592 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3593 if (fm->fm_extent_count != 0) {
3594 p = argptr + target_size_in;
3595 /* ...and then all the struct fiemap_extents */
3596 for (i = 0; i < fm->fm_mapped_extents; i++) {
3597 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3598 THUNK_TARGET);
3599 p += extent_size;
3602 unlock_user(argptr, arg, target_size_out);
3605 if (free_fm) {
3606 g_free(fm);
3608 return ret;
3610 #endif
3612 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3613 int fd, int cmd, abi_long arg)
3615 const argtype *arg_type = ie->arg_type;
3616 int target_size;
3617 void *argptr;
3618 int ret;
3619 struct ifconf *host_ifconf;
3620 uint32_t outbufsz;
3621 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3622 int target_ifreq_size;
3623 int nb_ifreq;
3624 int free_buf = 0;
3625 int i;
3626 int target_ifc_len;
3627 abi_long target_ifc_buf;
3628 int host_ifc_len;
3629 char *host_ifc_buf;
3631 assert(arg_type[0] == TYPE_PTR);
3632 assert(ie->access == IOC_RW);
3634 arg_type++;
3635 target_size = thunk_type_size(arg_type, 0);
3637 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3638 if (!argptr)
3639 return -TARGET_EFAULT;
3640 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3641 unlock_user(argptr, arg, 0);
3643 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3644 target_ifc_len = host_ifconf->ifc_len;
3645 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3647 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3648 nb_ifreq = target_ifc_len / target_ifreq_size;
3649 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3651 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3652 if (outbufsz > MAX_STRUCT_SIZE) {
3653 /* We can't fit all the extents into the fixed size buffer.
3654 * Allocate one that is large enough and use it instead.
3656 host_ifconf = malloc(outbufsz);
3657 if (!host_ifconf) {
3658 return -TARGET_ENOMEM;
3660 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3661 free_buf = 1;
3663 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3665 host_ifconf->ifc_len = host_ifc_len;
3666 host_ifconf->ifc_buf = host_ifc_buf;
3668 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3669 if (!is_error(ret)) {
3670 /* convert host ifc_len to target ifc_len */
3672 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3673 target_ifc_len = nb_ifreq * target_ifreq_size;
3674 host_ifconf->ifc_len = target_ifc_len;
3676 /* restore target ifc_buf */
3678 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3680 /* copy struct ifconf to target user */
3682 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3683 if (!argptr)
3684 return -TARGET_EFAULT;
3685 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3686 unlock_user(argptr, arg, target_size);
3688 /* copy ifreq[] to target user */
3690 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3691 for (i = 0; i < nb_ifreq ; i++) {
3692 thunk_convert(argptr + i * target_ifreq_size,
3693 host_ifc_buf + i * sizeof(struct ifreq),
3694 ifreq_arg_type, THUNK_TARGET);
3696 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3699 if (free_buf) {
3700 free(host_ifconf);
3703 return ret;
3706 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3707 int cmd, abi_long arg)
3709 void *argptr;
3710 struct dm_ioctl *host_dm;
3711 abi_long guest_data;
3712 uint32_t guest_data_size;
3713 int target_size;
3714 const argtype *arg_type = ie->arg_type;
3715 abi_long ret;
3716 void *big_buf = NULL;
3717 char *host_data;
3719 arg_type++;
3720 target_size = thunk_type_size(arg_type, 0);
3721 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3722 if (!argptr) {
3723 ret = -TARGET_EFAULT;
3724 goto out;
3726 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3727 unlock_user(argptr, arg, 0);
3729 /* buf_temp is too small, so fetch things into a bigger buffer */
3730 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3731 memcpy(big_buf, buf_temp, target_size);
3732 buf_temp = big_buf;
3733 host_dm = big_buf;
3735 guest_data = arg + host_dm->data_start;
3736 if ((guest_data - arg) < 0) {
3737 ret = -EINVAL;
3738 goto out;
3740 guest_data_size = host_dm->data_size - host_dm->data_start;
3741 host_data = (char*)host_dm + host_dm->data_start;
3743 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3744 switch (ie->host_cmd) {
3745 case DM_REMOVE_ALL:
3746 case DM_LIST_DEVICES:
3747 case DM_DEV_CREATE:
3748 case DM_DEV_REMOVE:
3749 case DM_DEV_SUSPEND:
3750 case DM_DEV_STATUS:
3751 case DM_DEV_WAIT:
3752 case DM_TABLE_STATUS:
3753 case DM_TABLE_CLEAR:
3754 case DM_TABLE_DEPS:
3755 case DM_LIST_VERSIONS:
3756 /* no input data */
3757 break;
3758 case DM_DEV_RENAME:
3759 case DM_DEV_SET_GEOMETRY:
3760 /* data contains only strings */
3761 memcpy(host_data, argptr, guest_data_size);
3762 break;
3763 case DM_TARGET_MSG:
3764 memcpy(host_data, argptr, guest_data_size);
3765 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3766 break;
3767 case DM_TABLE_LOAD:
3769 void *gspec = argptr;
3770 void *cur_data = host_data;
3771 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3772 int spec_size = thunk_type_size(arg_type, 0);
3773 int i;
3775 for (i = 0; i < host_dm->target_count; i++) {
3776 struct dm_target_spec *spec = cur_data;
3777 uint32_t next;
3778 int slen;
3780 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3781 slen = strlen((char*)gspec + spec_size) + 1;
3782 next = spec->next;
3783 spec->next = sizeof(*spec) + slen;
3784 strcpy((char*)&spec[1], gspec + spec_size);
3785 gspec += next;
3786 cur_data += spec->next;
3788 break;
3790 default:
3791 ret = -TARGET_EINVAL;
3792 unlock_user(argptr, guest_data, 0);
3793 goto out;
3795 unlock_user(argptr, guest_data, 0);
3797 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3798 if (!is_error(ret)) {
3799 guest_data = arg + host_dm->data_start;
3800 guest_data_size = host_dm->data_size - host_dm->data_start;
3801 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3802 switch (ie->host_cmd) {
3803 case DM_REMOVE_ALL:
3804 case DM_DEV_CREATE:
3805 case DM_DEV_REMOVE:
3806 case DM_DEV_RENAME:
3807 case DM_DEV_SUSPEND:
3808 case DM_DEV_STATUS:
3809 case DM_TABLE_LOAD:
3810 case DM_TABLE_CLEAR:
3811 case DM_TARGET_MSG:
3812 case DM_DEV_SET_GEOMETRY:
3813 /* no return data */
3814 break;
3815 case DM_LIST_DEVICES:
3817 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3818 uint32_t remaining_data = guest_data_size;
3819 void *cur_data = argptr;
3820 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3821 int nl_size = 12; /* can't use thunk_size due to alignment */
3823 while (1) {
3824 uint32_t next = nl->next;
3825 if (next) {
3826 nl->next = nl_size + (strlen(nl->name) + 1);
3828 if (remaining_data < nl->next) {
3829 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3830 break;
3832 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3833 strcpy(cur_data + nl_size, nl->name);
3834 cur_data += nl->next;
3835 remaining_data -= nl->next;
3836 if (!next) {
3837 break;
3839 nl = (void*)nl + next;
3841 break;
3843 case DM_DEV_WAIT:
3844 case DM_TABLE_STATUS:
3846 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3847 void *cur_data = argptr;
3848 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3849 int spec_size = thunk_type_size(arg_type, 0);
3850 int i;
3852 for (i = 0; i < host_dm->target_count; i++) {
3853 uint32_t next = spec->next;
3854 int slen = strlen((char*)&spec[1]) + 1;
3855 spec->next = (cur_data - argptr) + spec_size + slen;
3856 if (guest_data_size < spec->next) {
3857 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3858 break;
3860 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3861 strcpy(cur_data + spec_size, (char*)&spec[1]);
3862 cur_data = argptr + spec->next;
3863 spec = (void*)host_dm + host_dm->data_start + next;
3865 break;
3867 case DM_TABLE_DEPS:
3869 void *hdata = (void*)host_dm + host_dm->data_start;
3870 int count = *(uint32_t*)hdata;
3871 uint64_t *hdev = hdata + 8;
3872 uint64_t *gdev = argptr + 8;
3873 int i;
3875 *(uint32_t*)argptr = tswap32(count);
3876 for (i = 0; i < count; i++) {
3877 *gdev = tswap64(*hdev);
3878 gdev++;
3879 hdev++;
3881 break;
3883 case DM_LIST_VERSIONS:
3885 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3886 uint32_t remaining_data = guest_data_size;
3887 void *cur_data = argptr;
3888 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3889 int vers_size = thunk_type_size(arg_type, 0);
3891 while (1) {
3892 uint32_t next = vers->next;
3893 if (next) {
3894 vers->next = vers_size + (strlen(vers->name) + 1);
3896 if (remaining_data < vers->next) {
3897 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3898 break;
3900 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3901 strcpy(cur_data + vers_size, vers->name);
3902 cur_data += vers->next;
3903 remaining_data -= vers->next;
3904 if (!next) {
3905 break;
3907 vers = (void*)vers + next;
3909 break;
3911 default:
3912 unlock_user(argptr, guest_data, 0);
3913 ret = -TARGET_EINVAL;
3914 goto out;
3916 unlock_user(argptr, guest_data, guest_data_size);
3918 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3919 if (!argptr) {
3920 ret = -TARGET_EFAULT;
3921 goto out;
3923 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3924 unlock_user(argptr, arg, target_size);
3926 out:
3927 g_free(big_buf);
3928 return ret;
3931 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3932 int cmd, abi_long arg)
3934 void *argptr;
3935 int target_size;
3936 const argtype *arg_type = ie->arg_type;
3937 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3938 abi_long ret;
3940 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3941 struct blkpg_partition host_part;
3943 /* Read and convert blkpg */
3944 arg_type++;
3945 target_size = thunk_type_size(arg_type, 0);
3946 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3947 if (!argptr) {
3948 ret = -TARGET_EFAULT;
3949 goto out;
3951 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3952 unlock_user(argptr, arg, 0);
3954 switch (host_blkpg->op) {
3955 case BLKPG_ADD_PARTITION:
3956 case BLKPG_DEL_PARTITION:
3957 /* payload is struct blkpg_partition */
3958 break;
3959 default:
3960 /* Unknown opcode */
3961 ret = -TARGET_EINVAL;
3962 goto out;
3965 /* Read and convert blkpg->data */
3966 arg = (abi_long)(uintptr_t)host_blkpg->data;
3967 target_size = thunk_type_size(part_arg_type, 0);
3968 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3969 if (!argptr) {
3970 ret = -TARGET_EFAULT;
3971 goto out;
3973 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3974 unlock_user(argptr, arg, 0);
3976 /* Swizzle the data pointer to our local copy and call! */
3977 host_blkpg->data = &host_part;
3978 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3980 out:
3981 return ret;
3984 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3985 int fd, int cmd, abi_long arg)
3987 const argtype *arg_type = ie->arg_type;
3988 const StructEntry *se;
3989 const argtype *field_types;
3990 const int *dst_offsets, *src_offsets;
3991 int target_size;
3992 void *argptr;
3993 abi_ulong *target_rt_dev_ptr;
3994 unsigned long *host_rt_dev_ptr;
3995 abi_long ret;
3996 int i;
3998 assert(ie->access == IOC_W);
3999 assert(*arg_type == TYPE_PTR);
4000 arg_type++;
4001 assert(*arg_type == TYPE_STRUCT);
4002 target_size = thunk_type_size(arg_type, 0);
4003 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4004 if (!argptr) {
4005 return -TARGET_EFAULT;
4007 arg_type++;
4008 assert(*arg_type == (int)STRUCT_rtentry);
4009 se = struct_entries + *arg_type++;
4010 assert(se->convert[0] == NULL);
4011 /* convert struct here to be able to catch rt_dev string */
4012 field_types = se->field_types;
4013 dst_offsets = se->field_offsets[THUNK_HOST];
4014 src_offsets = se->field_offsets[THUNK_TARGET];
4015 for (i = 0; i < se->nb_fields; i++) {
4016 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4017 assert(*field_types == TYPE_PTRVOID);
4018 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4019 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4020 if (*target_rt_dev_ptr != 0) {
4021 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4022 tswapal(*target_rt_dev_ptr));
4023 if (!*host_rt_dev_ptr) {
4024 unlock_user(argptr, arg, 0);
4025 return -TARGET_EFAULT;
4027 } else {
4028 *host_rt_dev_ptr = 0;
4030 field_types++;
4031 continue;
4033 field_types = thunk_convert(buf_temp + dst_offsets[i],
4034 argptr + src_offsets[i],
4035 field_types, THUNK_HOST);
4037 unlock_user(argptr, arg, 0);
4039 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4040 if (*host_rt_dev_ptr != 0) {
4041 unlock_user((void *)*host_rt_dev_ptr,
4042 *target_rt_dev_ptr, 0);
4044 return ret;
4047 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4048 int fd, int cmd, abi_long arg)
4050 int sig = target_to_host_signal(arg);
4051 return get_errno(ioctl(fd, ie->host_cmd, sig));
4054 static IOCTLEntry ioctl_entries[] = {
4055 #define IOCTL(cmd, access, ...) \
4056 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4057 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4058 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4059 #include "ioctls.h"
4060 { 0, 0, },
4063 /* ??? Implement proper locking for ioctls. */
4064 /* do_ioctl() Must return target values and target errnos. */
4065 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4067 const IOCTLEntry *ie;
4068 const argtype *arg_type;
4069 abi_long ret;
4070 uint8_t buf_temp[MAX_STRUCT_SIZE];
4071 int target_size;
4072 void *argptr;
4074 ie = ioctl_entries;
4075 for(;;) {
4076 if (ie->target_cmd == 0) {
4077 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4078 return -TARGET_ENOSYS;
4080 if (ie->target_cmd == cmd)
4081 break;
4082 ie++;
4084 arg_type = ie->arg_type;
4085 #if defined(DEBUG)
4086 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4087 #endif
4088 if (ie->do_ioctl) {
4089 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4092 switch(arg_type[0]) {
4093 case TYPE_NULL:
4094 /* no argument */
4095 ret = get_errno(ioctl(fd, ie->host_cmd));
4096 break;
4097 case TYPE_PTRVOID:
4098 case TYPE_INT:
4099 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4100 break;
4101 case TYPE_PTR:
4102 arg_type++;
4103 target_size = thunk_type_size(arg_type, 0);
4104 switch(ie->access) {
4105 case IOC_R:
4106 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4107 if (!is_error(ret)) {
4108 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4109 if (!argptr)
4110 return -TARGET_EFAULT;
4111 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4112 unlock_user(argptr, arg, target_size);
4114 break;
4115 case IOC_W:
4116 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4117 if (!argptr)
4118 return -TARGET_EFAULT;
4119 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4120 unlock_user(argptr, arg, 0);
4121 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4122 break;
4123 default:
4124 case IOC_RW:
4125 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4126 if (!argptr)
4127 return -TARGET_EFAULT;
4128 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4129 unlock_user(argptr, arg, 0);
4130 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4131 if (!is_error(ret)) {
4132 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4133 if (!argptr)
4134 return -TARGET_EFAULT;
4135 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4136 unlock_user(argptr, arg, target_size);
4138 break;
4140 break;
4141 default:
4142 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4143 (long)cmd, arg_type[0]);
4144 ret = -TARGET_ENOSYS;
4145 break;
4147 return ret;
4150 static const bitmask_transtbl iflag_tbl[] = {
4151 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4152 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4153 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4154 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4155 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4156 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4157 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4158 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4159 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4160 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4161 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4162 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4163 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4164 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4165 { 0, 0, 0, 0 }
4168 static const bitmask_transtbl oflag_tbl[] = {
4169 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4170 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4171 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4172 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4173 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4174 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4175 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4176 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4177 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4178 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4179 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4180 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4181 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4182 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4183 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4184 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4185 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4186 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4187 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4188 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4189 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4190 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4191 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4192 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4193 { 0, 0, 0, 0 }
4196 static const bitmask_transtbl cflag_tbl[] = {
4197 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4198 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4199 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4200 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4201 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4202 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4203 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4204 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4205 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4206 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4207 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4208 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4209 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4210 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4211 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4212 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4213 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4214 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4215 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4216 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4217 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4218 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4219 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4220 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4221 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4222 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4223 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4224 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4225 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4226 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4227 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4228 { 0, 0, 0, 0 }
4231 static const bitmask_transtbl lflag_tbl[] = {
4232 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4233 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4234 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4235 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4236 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4237 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4238 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4239 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4240 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4241 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4242 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4243 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4244 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4245 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4246 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4247 { 0, 0, 0, 0 }
4250 static void target_to_host_termios (void *dst, const void *src)
4252 struct host_termios *host = dst;
4253 const struct target_termios *target = src;
4255 host->c_iflag =
4256 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4257 host->c_oflag =
4258 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4259 host->c_cflag =
4260 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4261 host->c_lflag =
4262 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4263 host->c_line = target->c_line;
4265 memset(host->c_cc, 0, sizeof(host->c_cc));
4266 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4267 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4268 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4269 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4270 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4271 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4272 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4273 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4274 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4275 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4276 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4277 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4278 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4279 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4280 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4281 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4282 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4285 static void host_to_target_termios (void *dst, const void *src)
4287 struct target_termios *target = dst;
4288 const struct host_termios *host = src;
4290 target->c_iflag =
4291 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4292 target->c_oflag =
4293 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4294 target->c_cflag =
4295 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4296 target->c_lflag =
4297 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4298 target->c_line = host->c_line;
4300 memset(target->c_cc, 0, sizeof(target->c_cc));
4301 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4302 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4303 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4304 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4305 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4306 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4307 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4308 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4309 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4310 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4311 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4312 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4313 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4314 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4315 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4316 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4317 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4320 static const StructEntry struct_termios_def = {
4321 .convert = { host_to_target_termios, target_to_host_termios },
4322 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4323 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4326 static bitmask_transtbl mmap_flags_tbl[] = {
4327 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4328 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4329 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4330 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4331 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4332 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4333 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4334 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4335 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4336 MAP_NORESERVE },
4337 { 0, 0, 0, 0 }
4340 #if defined(TARGET_I386)
4342 /* NOTE: there is really one LDT for all the threads */
4343 static uint8_t *ldt_table;
4345 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4347 int size;
4348 void *p;
4350 if (!ldt_table)
4351 return 0;
4352 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4353 if (size > bytecount)
4354 size = bytecount;
4355 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4356 if (!p)
4357 return -TARGET_EFAULT;
4358 /* ??? Should this by byteswapped? */
4359 memcpy(p, ldt_table, size);
4360 unlock_user(p, ptr, size);
4361 return size;
4364 /* XXX: add locking support */
4365 static abi_long write_ldt(CPUX86State *env,
4366 abi_ulong ptr, unsigned long bytecount, int oldmode)
4368 struct target_modify_ldt_ldt_s ldt_info;
4369 struct target_modify_ldt_ldt_s *target_ldt_info;
4370 int seg_32bit, contents, read_exec_only, limit_in_pages;
4371 int seg_not_present, useable, lm;
4372 uint32_t *lp, entry_1, entry_2;
4374 if (bytecount != sizeof(ldt_info))
4375 return -TARGET_EINVAL;
4376 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4377 return -TARGET_EFAULT;
4378 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4379 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4380 ldt_info.limit = tswap32(target_ldt_info->limit);
4381 ldt_info.flags = tswap32(target_ldt_info->flags);
4382 unlock_user_struct(target_ldt_info, ptr, 0);
4384 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4385 return -TARGET_EINVAL;
4386 seg_32bit = ldt_info.flags & 1;
4387 contents = (ldt_info.flags >> 1) & 3;
4388 read_exec_only = (ldt_info.flags >> 3) & 1;
4389 limit_in_pages = (ldt_info.flags >> 4) & 1;
4390 seg_not_present = (ldt_info.flags >> 5) & 1;
4391 useable = (ldt_info.flags >> 6) & 1;
4392 #ifdef TARGET_ABI32
4393 lm = 0;
4394 #else
4395 lm = (ldt_info.flags >> 7) & 1;
4396 #endif
4397 if (contents == 3) {
4398 if (oldmode)
4399 return -TARGET_EINVAL;
4400 if (seg_not_present == 0)
4401 return -TARGET_EINVAL;
4403 /* allocate the LDT */
4404 if (!ldt_table) {
4405 env->ldt.base = target_mmap(0,
4406 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4407 PROT_READ|PROT_WRITE,
4408 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4409 if (env->ldt.base == -1)
4410 return -TARGET_ENOMEM;
4411 memset(g2h(env->ldt.base), 0,
4412 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4413 env->ldt.limit = 0xffff;
4414 ldt_table = g2h(env->ldt.base);
4417 /* NOTE: same code as Linux kernel */
4418 /* Allow LDTs to be cleared by the user. */
4419 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4420 if (oldmode ||
4421 (contents == 0 &&
4422 read_exec_only == 1 &&
4423 seg_32bit == 0 &&
4424 limit_in_pages == 0 &&
4425 seg_not_present == 1 &&
4426 useable == 0 )) {
4427 entry_1 = 0;
4428 entry_2 = 0;
4429 goto install;
4433 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4434 (ldt_info.limit & 0x0ffff);
4435 entry_2 = (ldt_info.base_addr & 0xff000000) |
4436 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4437 (ldt_info.limit & 0xf0000) |
4438 ((read_exec_only ^ 1) << 9) |
4439 (contents << 10) |
4440 ((seg_not_present ^ 1) << 15) |
4441 (seg_32bit << 22) |
4442 (limit_in_pages << 23) |
4443 (lm << 21) |
4444 0x7000;
4445 if (!oldmode)
4446 entry_2 |= (useable << 20);
4448 /* Install the new entry ... */
4449 install:
4450 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4451 lp[0] = tswap32(entry_1);
4452 lp[1] = tswap32(entry_2);
4453 return 0;
4456 /* specific and weird i386 syscalls */
4457 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4458 unsigned long bytecount)
4460 abi_long ret;
4462 switch (func) {
4463 case 0:
4464 ret = read_ldt(ptr, bytecount);
4465 break;
4466 case 1:
4467 ret = write_ldt(env, ptr, bytecount, 1);
4468 break;
4469 case 0x11:
4470 ret = write_ldt(env, ptr, bytecount, 0);
4471 break;
4472 default:
4473 ret = -TARGET_ENOSYS;
4474 break;
4476 return ret;
4479 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4480 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4482 uint64_t *gdt_table = g2h(env->gdt.base);
4483 struct target_modify_ldt_ldt_s ldt_info;
4484 struct target_modify_ldt_ldt_s *target_ldt_info;
4485 int seg_32bit, contents, read_exec_only, limit_in_pages;
4486 int seg_not_present, useable, lm;
4487 uint32_t *lp, entry_1, entry_2;
4488 int i;
4490 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4491 if (!target_ldt_info)
4492 return -TARGET_EFAULT;
4493 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4494 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4495 ldt_info.limit = tswap32(target_ldt_info->limit);
4496 ldt_info.flags = tswap32(target_ldt_info->flags);
4497 if (ldt_info.entry_number == -1) {
4498 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4499 if (gdt_table[i] == 0) {
4500 ldt_info.entry_number = i;
4501 target_ldt_info->entry_number = tswap32(i);
4502 break;
4506 unlock_user_struct(target_ldt_info, ptr, 1);
4508 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4509 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4510 return -TARGET_EINVAL;
4511 seg_32bit = ldt_info.flags & 1;
4512 contents = (ldt_info.flags >> 1) & 3;
4513 read_exec_only = (ldt_info.flags >> 3) & 1;
4514 limit_in_pages = (ldt_info.flags >> 4) & 1;
4515 seg_not_present = (ldt_info.flags >> 5) & 1;
4516 useable = (ldt_info.flags >> 6) & 1;
4517 #ifdef TARGET_ABI32
4518 lm = 0;
4519 #else
4520 lm = (ldt_info.flags >> 7) & 1;
4521 #endif
4523 if (contents == 3) {
4524 if (seg_not_present == 0)
4525 return -TARGET_EINVAL;
4528 /* NOTE: same code as Linux kernel */
4529 /* Allow LDTs to be cleared by the user. */
4530 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4531 if ((contents == 0 &&
4532 read_exec_only == 1 &&
4533 seg_32bit == 0 &&
4534 limit_in_pages == 0 &&
4535 seg_not_present == 1 &&
4536 useable == 0 )) {
4537 entry_1 = 0;
4538 entry_2 = 0;
4539 goto install;
4543 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4544 (ldt_info.limit & 0x0ffff);
4545 entry_2 = (ldt_info.base_addr & 0xff000000) |
4546 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4547 (ldt_info.limit & 0xf0000) |
4548 ((read_exec_only ^ 1) << 9) |
4549 (contents << 10) |
4550 ((seg_not_present ^ 1) << 15) |
4551 (seg_32bit << 22) |
4552 (limit_in_pages << 23) |
4553 (useable << 20) |
4554 (lm << 21) |
4555 0x7000;
4557 /* Install the new entry ... */
4558 install:
4559 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4560 lp[0] = tswap32(entry_1);
4561 lp[1] = tswap32(entry_2);
4562 return 0;
4565 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4567 struct target_modify_ldt_ldt_s *target_ldt_info;
4568 uint64_t *gdt_table = g2h(env->gdt.base);
4569 uint32_t base_addr, limit, flags;
4570 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4571 int seg_not_present, useable, lm;
4572 uint32_t *lp, entry_1, entry_2;
4574 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4575 if (!target_ldt_info)
4576 return -TARGET_EFAULT;
4577 idx = tswap32(target_ldt_info->entry_number);
4578 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4579 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4580 unlock_user_struct(target_ldt_info, ptr, 1);
4581 return -TARGET_EINVAL;
4583 lp = (uint32_t *)(gdt_table + idx);
4584 entry_1 = tswap32(lp[0]);
4585 entry_2 = tswap32(lp[1]);
4587 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4588 contents = (entry_2 >> 10) & 3;
4589 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4590 seg_32bit = (entry_2 >> 22) & 1;
4591 limit_in_pages = (entry_2 >> 23) & 1;
4592 useable = (entry_2 >> 20) & 1;
4593 #ifdef TARGET_ABI32
4594 lm = 0;
4595 #else
4596 lm = (entry_2 >> 21) & 1;
4597 #endif
4598 flags = (seg_32bit << 0) | (contents << 1) |
4599 (read_exec_only << 3) | (limit_in_pages << 4) |
4600 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4601 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4602 base_addr = (entry_1 >> 16) |
4603 (entry_2 & 0xff000000) |
4604 ((entry_2 & 0xff) << 16);
4605 target_ldt_info->base_addr = tswapal(base_addr);
4606 target_ldt_info->limit = tswap32(limit);
4607 target_ldt_info->flags = tswap32(flags);
4608 unlock_user_struct(target_ldt_info, ptr, 1);
4609 return 0;
4611 #endif /* TARGET_I386 && TARGET_ABI32 */
4613 #ifndef TARGET_ABI32
4614 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4616 abi_long ret = 0;
4617 abi_ulong val;
4618 int idx;
4620 switch(code) {
4621 case TARGET_ARCH_SET_GS:
4622 case TARGET_ARCH_SET_FS:
4623 if (code == TARGET_ARCH_SET_GS)
4624 idx = R_GS;
4625 else
4626 idx = R_FS;
4627 cpu_x86_load_seg(env, idx, 0);
4628 env->segs[idx].base = addr;
4629 break;
4630 case TARGET_ARCH_GET_GS:
4631 case TARGET_ARCH_GET_FS:
4632 if (code == TARGET_ARCH_GET_GS)
4633 idx = R_GS;
4634 else
4635 idx = R_FS;
4636 val = env->segs[idx].base;
4637 if (put_user(val, addr, abi_ulong))
4638 ret = -TARGET_EFAULT;
4639 break;
4640 default:
4641 ret = -TARGET_EINVAL;
4642 break;
4644 return ret;
4646 #endif
4648 #endif /* defined(TARGET_I386) */
4650 #define NEW_STACK_SIZE 0x40000
4653 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4654 typedef struct {
4655 CPUArchState *env;
4656 pthread_mutex_t mutex;
4657 pthread_cond_t cond;
4658 pthread_t thread;
4659 uint32_t tid;
4660 abi_ulong child_tidptr;
4661 abi_ulong parent_tidptr;
4662 sigset_t sigmask;
4663 } new_thread_info;
4665 static void *clone_func(void *arg)
4667 new_thread_info *info = arg;
4668 CPUArchState *env;
4669 CPUState *cpu;
4670 TaskState *ts;
4672 rcu_register_thread();
4673 env = info->env;
4674 cpu = ENV_GET_CPU(env);
4675 thread_cpu = cpu;
4676 ts = (TaskState *)cpu->opaque;
4677 info->tid = gettid();
4678 cpu->host_tid = info->tid;
4679 task_settid(ts);
4680 if (info->child_tidptr)
4681 put_user_u32(info->tid, info->child_tidptr);
4682 if (info->parent_tidptr)
4683 put_user_u32(info->tid, info->parent_tidptr);
4684 /* Enable signals. */
4685 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4686 /* Signal to the parent that we're ready. */
4687 pthread_mutex_lock(&info->mutex);
4688 pthread_cond_broadcast(&info->cond);
4689 pthread_mutex_unlock(&info->mutex);
4690 /* Wait until the parent has finshed initializing the tls state. */
4691 pthread_mutex_lock(&clone_lock);
4692 pthread_mutex_unlock(&clone_lock);
4693 cpu_loop(env);
4694 /* never exits */
4695 return NULL;
4698 /* do_fork() Must return host values and target errnos (unlike most
4699 do_*() functions). */
4700 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4701 abi_ulong parent_tidptr, target_ulong newtls,
4702 abi_ulong child_tidptr)
4704 CPUState *cpu = ENV_GET_CPU(env);
4705 int ret;
4706 TaskState *ts;
4707 CPUState *new_cpu;
4708 CPUArchState *new_env;
4709 unsigned int nptl_flags;
4710 sigset_t sigmask;
4712 /* Emulate vfork() with fork() */
4713 if (flags & CLONE_VFORK)
4714 flags &= ~(CLONE_VFORK | CLONE_VM);
4716 if (flags & CLONE_VM) {
4717 TaskState *parent_ts = (TaskState *)cpu->opaque;
4718 new_thread_info info;
4719 pthread_attr_t attr;
4721 ts = g_new0(TaskState, 1);
4722 init_task_state(ts);
4723 /* we create a new CPU instance. */
4724 new_env = cpu_copy(env);
4725 /* Init regs that differ from the parent. */
4726 cpu_clone_regs(new_env, newsp);
4727 new_cpu = ENV_GET_CPU(new_env);
4728 new_cpu->opaque = ts;
4729 ts->bprm = parent_ts->bprm;
4730 ts->info = parent_ts->info;
4731 nptl_flags = flags;
4732 flags &= ~CLONE_NPTL_FLAGS2;
4734 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4735 ts->child_tidptr = child_tidptr;
4738 if (nptl_flags & CLONE_SETTLS)
4739 cpu_set_tls (new_env, newtls);
4741 /* Grab a mutex so that thread setup appears atomic. */
4742 pthread_mutex_lock(&clone_lock);
4744 memset(&info, 0, sizeof(info));
4745 pthread_mutex_init(&info.mutex, NULL);
4746 pthread_mutex_lock(&info.mutex);
4747 pthread_cond_init(&info.cond, NULL);
4748 info.env = new_env;
4749 if (nptl_flags & CLONE_CHILD_SETTID)
4750 info.child_tidptr = child_tidptr;
4751 if (nptl_flags & CLONE_PARENT_SETTID)
4752 info.parent_tidptr = parent_tidptr;
4754 ret = pthread_attr_init(&attr);
4755 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4756 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4757 /* It is not safe to deliver signals until the child has finished
4758 initializing, so temporarily block all signals. */
4759 sigfillset(&sigmask);
4760 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4762 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4763 /* TODO: Free new CPU state if thread creation failed. */
4765 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4766 pthread_attr_destroy(&attr);
4767 if (ret == 0) {
4768 /* Wait for the child to initialize. */
4769 pthread_cond_wait(&info.cond, &info.mutex);
4770 ret = info.tid;
4771 if (flags & CLONE_PARENT_SETTID)
4772 put_user_u32(ret, parent_tidptr);
4773 } else {
4774 ret = -1;
4776 pthread_mutex_unlock(&info.mutex);
4777 pthread_cond_destroy(&info.cond);
4778 pthread_mutex_destroy(&info.mutex);
4779 pthread_mutex_unlock(&clone_lock);
4780 } else {
4781 /* if no CLONE_VM, we consider it is a fork */
4782 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4783 return -TARGET_EINVAL;
4785 fork_start();
4786 ret = fork();
4787 if (ret == 0) {
4788 /* Child Process. */
4789 rcu_after_fork();
4790 cpu_clone_regs(env, newsp);
4791 fork_end(1);
4792 /* There is a race condition here. The parent process could
4793 theoretically read the TID in the child process before the child
4794 tid is set. This would require using either ptrace
4795 (not implemented) or having *_tidptr to point at a shared memory
4796 mapping. We can't repeat the spinlock hack used above because
4797 the child process gets its own copy of the lock. */
4798 if (flags & CLONE_CHILD_SETTID)
4799 put_user_u32(gettid(), child_tidptr);
4800 if (flags & CLONE_PARENT_SETTID)
4801 put_user_u32(gettid(), parent_tidptr);
4802 ts = (TaskState *)cpu->opaque;
4803 if (flags & CLONE_SETTLS)
4804 cpu_set_tls (env, newtls);
4805 if (flags & CLONE_CHILD_CLEARTID)
4806 ts->child_tidptr = child_tidptr;
4807 } else {
4808 fork_end(0);
4811 return ret;
4814 /* warning : doesn't handle linux specific flags... */
4815 static int target_to_host_fcntl_cmd(int cmd)
4817 switch(cmd) {
4818 case TARGET_F_DUPFD:
4819 case TARGET_F_GETFD:
4820 case TARGET_F_SETFD:
4821 case TARGET_F_GETFL:
4822 case TARGET_F_SETFL:
4823 return cmd;
4824 case TARGET_F_GETLK:
4825 return F_GETLK;
4826 case TARGET_F_SETLK:
4827 return F_SETLK;
4828 case TARGET_F_SETLKW:
4829 return F_SETLKW;
4830 case TARGET_F_GETOWN:
4831 return F_GETOWN;
4832 case TARGET_F_SETOWN:
4833 return F_SETOWN;
4834 case TARGET_F_GETSIG:
4835 return F_GETSIG;
4836 case TARGET_F_SETSIG:
4837 return F_SETSIG;
4838 #if TARGET_ABI_BITS == 32
4839 case TARGET_F_GETLK64:
4840 return F_GETLK64;
4841 case TARGET_F_SETLK64:
4842 return F_SETLK64;
4843 case TARGET_F_SETLKW64:
4844 return F_SETLKW64;
4845 #endif
4846 case TARGET_F_SETLEASE:
4847 return F_SETLEASE;
4848 case TARGET_F_GETLEASE:
4849 return F_GETLEASE;
4850 #ifdef F_DUPFD_CLOEXEC
4851 case TARGET_F_DUPFD_CLOEXEC:
4852 return F_DUPFD_CLOEXEC;
4853 #endif
4854 case TARGET_F_NOTIFY:
4855 return F_NOTIFY;
4856 #ifdef F_GETOWN_EX
4857 case TARGET_F_GETOWN_EX:
4858 return F_GETOWN_EX;
4859 #endif
4860 #ifdef F_SETOWN_EX
4861 case TARGET_F_SETOWN_EX:
4862 return F_SETOWN_EX;
4863 #endif
4864 default:
4865 return -TARGET_EINVAL;
4867 return -TARGET_EINVAL;
4870 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4871 static const bitmask_transtbl flock_tbl[] = {
4872 TRANSTBL_CONVERT(F_RDLCK),
4873 TRANSTBL_CONVERT(F_WRLCK),
4874 TRANSTBL_CONVERT(F_UNLCK),
4875 TRANSTBL_CONVERT(F_EXLCK),
4876 TRANSTBL_CONVERT(F_SHLCK),
4877 { 0, 0, 0, 0 }
4880 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4882 struct flock fl;
4883 struct target_flock *target_fl;
4884 struct flock64 fl64;
4885 struct target_flock64 *target_fl64;
4886 #ifdef F_GETOWN_EX
4887 struct f_owner_ex fox;
4888 struct target_f_owner_ex *target_fox;
4889 #endif
4890 abi_long ret;
4891 int host_cmd = target_to_host_fcntl_cmd(cmd);
4893 if (host_cmd == -TARGET_EINVAL)
4894 return host_cmd;
4896 switch(cmd) {
4897 case TARGET_F_GETLK:
4898 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4899 return -TARGET_EFAULT;
4900 fl.l_type =
4901 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4902 fl.l_whence = tswap16(target_fl->l_whence);
4903 fl.l_start = tswapal(target_fl->l_start);
4904 fl.l_len = tswapal(target_fl->l_len);
4905 fl.l_pid = tswap32(target_fl->l_pid);
4906 unlock_user_struct(target_fl, arg, 0);
4907 ret = get_errno(fcntl(fd, host_cmd, &fl));
4908 if (ret == 0) {
4909 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4910 return -TARGET_EFAULT;
4911 target_fl->l_type =
4912 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4913 target_fl->l_whence = tswap16(fl.l_whence);
4914 target_fl->l_start = tswapal(fl.l_start);
4915 target_fl->l_len = tswapal(fl.l_len);
4916 target_fl->l_pid = tswap32(fl.l_pid);
4917 unlock_user_struct(target_fl, arg, 1);
4919 break;
4921 case TARGET_F_SETLK:
4922 case TARGET_F_SETLKW:
4923 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4924 return -TARGET_EFAULT;
4925 fl.l_type =
4926 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4927 fl.l_whence = tswap16(target_fl->l_whence);
4928 fl.l_start = tswapal(target_fl->l_start);
4929 fl.l_len = tswapal(target_fl->l_len);
4930 fl.l_pid = tswap32(target_fl->l_pid);
4931 unlock_user_struct(target_fl, arg, 0);
4932 ret = get_errno(fcntl(fd, host_cmd, &fl));
4933 break;
4935 case TARGET_F_GETLK64:
4936 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4937 return -TARGET_EFAULT;
4938 fl64.l_type =
4939 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4940 fl64.l_whence = tswap16(target_fl64->l_whence);
4941 fl64.l_start = tswap64(target_fl64->l_start);
4942 fl64.l_len = tswap64(target_fl64->l_len);
4943 fl64.l_pid = tswap32(target_fl64->l_pid);
4944 unlock_user_struct(target_fl64, arg, 0);
4945 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4946 if (ret == 0) {
4947 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4948 return -TARGET_EFAULT;
4949 target_fl64->l_type =
4950 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4951 target_fl64->l_whence = tswap16(fl64.l_whence);
4952 target_fl64->l_start = tswap64(fl64.l_start);
4953 target_fl64->l_len = tswap64(fl64.l_len);
4954 target_fl64->l_pid = tswap32(fl64.l_pid);
4955 unlock_user_struct(target_fl64, arg, 1);
4957 break;
4958 case TARGET_F_SETLK64:
4959 case TARGET_F_SETLKW64:
4960 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4961 return -TARGET_EFAULT;
4962 fl64.l_type =
4963 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4964 fl64.l_whence = tswap16(target_fl64->l_whence);
4965 fl64.l_start = tswap64(target_fl64->l_start);
4966 fl64.l_len = tswap64(target_fl64->l_len);
4967 fl64.l_pid = tswap32(target_fl64->l_pid);
4968 unlock_user_struct(target_fl64, arg, 0);
4969 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4970 break;
4972 case TARGET_F_GETFL:
4973 ret = get_errno(fcntl(fd, host_cmd, arg));
4974 if (ret >= 0) {
4975 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4977 break;
4979 case TARGET_F_SETFL:
4980 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4981 break;
4983 #ifdef F_GETOWN_EX
4984 case TARGET_F_GETOWN_EX:
4985 ret = get_errno(fcntl(fd, host_cmd, &fox));
4986 if (ret >= 0) {
4987 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4988 return -TARGET_EFAULT;
4989 target_fox->type = tswap32(fox.type);
4990 target_fox->pid = tswap32(fox.pid);
4991 unlock_user_struct(target_fox, arg, 1);
4993 break;
4994 #endif
4996 #ifdef F_SETOWN_EX
4997 case TARGET_F_SETOWN_EX:
4998 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4999 return -TARGET_EFAULT;
5000 fox.type = tswap32(target_fox->type);
5001 fox.pid = tswap32(target_fox->pid);
5002 unlock_user_struct(target_fox, arg, 0);
5003 ret = get_errno(fcntl(fd, host_cmd, &fox));
5004 break;
5005 #endif
5007 case TARGET_F_SETOWN:
5008 case TARGET_F_GETOWN:
5009 case TARGET_F_SETSIG:
5010 case TARGET_F_GETSIG:
5011 case TARGET_F_SETLEASE:
5012 case TARGET_F_GETLEASE:
5013 ret = get_errno(fcntl(fd, host_cmd, arg));
5014 break;
5016 default:
5017 ret = get_errno(fcntl(fd, cmd, arg));
5018 break;
5020 return ret;
5023 #ifdef USE_UID16
5025 static inline int high2lowuid(int uid)
5027 if (uid > 65535)
5028 return 65534;
5029 else
5030 return uid;
5033 static inline int high2lowgid(int gid)
5035 if (gid > 65535)
5036 return 65534;
5037 else
5038 return gid;
5041 static inline int low2highuid(int uid)
5043 if ((int16_t)uid == -1)
5044 return -1;
5045 else
5046 return uid;
5049 static inline int low2highgid(int gid)
5051 if ((int16_t)gid == -1)
5052 return -1;
5053 else
5054 return gid;
5056 static inline int tswapid(int id)
5058 return tswap16(id);
5061 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5063 #else /* !USE_UID16 */
5064 static inline int high2lowuid(int uid)
5066 return uid;
5068 static inline int high2lowgid(int gid)
5070 return gid;
5072 static inline int low2highuid(int uid)
5074 return uid;
5076 static inline int low2highgid(int gid)
5078 return gid;
5080 static inline int tswapid(int id)
5082 return tswap32(id);
5085 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5087 #endif /* USE_UID16 */
5089 void syscall_init(void)
5091 IOCTLEntry *ie;
5092 const argtype *arg_type;
5093 int size;
5094 int i;
5096 thunk_init(STRUCT_MAX);
5098 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5099 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5100 #include "syscall_types.h"
5101 #undef STRUCT
5102 #undef STRUCT_SPECIAL
5104 /* Build target_to_host_errno_table[] table from
5105 * host_to_target_errno_table[]. */
5106 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5107 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5110 /* we patch the ioctl size if necessary. We rely on the fact that
5111 no ioctl has all the bits at '1' in the size field */
5112 ie = ioctl_entries;
5113 while (ie->target_cmd != 0) {
5114 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5115 TARGET_IOC_SIZEMASK) {
5116 arg_type = ie->arg_type;
5117 if (arg_type[0] != TYPE_PTR) {
5118 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5119 ie->target_cmd);
5120 exit(1);
5122 arg_type++;
5123 size = thunk_type_size(arg_type, 0);
5124 ie->target_cmd = (ie->target_cmd &
5125 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5126 (size << TARGET_IOC_SIZESHIFT);
5129 /* automatic consistency check if same arch */
5130 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5131 (defined(__x86_64__) && defined(TARGET_X86_64))
5132 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5133 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5134 ie->name, ie->target_cmd, ie->host_cmd);
5136 #endif
5137 ie++;
5141 #if TARGET_ABI_BITS == 32
5142 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5144 #ifdef TARGET_WORDS_BIGENDIAN
5145 return ((uint64_t)word0 << 32) | word1;
5146 #else
5147 return ((uint64_t)word1 << 32) | word0;
5148 #endif
5150 #else /* TARGET_ABI_BITS == 32 */
5151 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5153 return word0;
5155 #endif /* TARGET_ABI_BITS != 32 */
5157 #ifdef TARGET_NR_truncate64
5158 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5159 abi_long arg2,
5160 abi_long arg3,
5161 abi_long arg4)
5163 if (regpairs_aligned(cpu_env)) {
5164 arg2 = arg3;
5165 arg3 = arg4;
5167 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5169 #endif
5171 #ifdef TARGET_NR_ftruncate64
5172 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5173 abi_long arg2,
5174 abi_long arg3,
5175 abi_long arg4)
5177 if (regpairs_aligned(cpu_env)) {
5178 arg2 = arg3;
5179 arg3 = arg4;
5181 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5183 #endif
5185 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5186 abi_ulong target_addr)
5188 struct target_timespec *target_ts;
5190 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5191 return -TARGET_EFAULT;
5192 host_ts->tv_sec = tswapal(target_ts->tv_sec);
5193 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
5194 unlock_user_struct(target_ts, target_addr, 0);
5195 return 0;
5198 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5199 struct timespec *host_ts)
5201 struct target_timespec *target_ts;
5203 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5204 return -TARGET_EFAULT;
5205 target_ts->tv_sec = tswapal(host_ts->tv_sec);
5206 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
5207 unlock_user_struct(target_ts, target_addr, 1);
5208 return 0;
5211 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5212 abi_ulong target_addr)
5214 struct target_itimerspec *target_itspec;
5216 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5217 return -TARGET_EFAULT;
5220 host_itspec->it_interval.tv_sec =
5221 tswapal(target_itspec->it_interval.tv_sec);
5222 host_itspec->it_interval.tv_nsec =
5223 tswapal(target_itspec->it_interval.tv_nsec);
5224 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5225 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5227 unlock_user_struct(target_itspec, target_addr, 1);
5228 return 0;
5231 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5232 struct itimerspec *host_its)
5234 struct target_itimerspec *target_itspec;
5236 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5237 return -TARGET_EFAULT;
5240 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5241 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5243 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5244 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5246 unlock_user_struct(target_itspec, target_addr, 0);
5247 return 0;
5250 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5251 abi_ulong target_addr)
5253 struct target_sigevent *target_sevp;
5255 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5256 return -TARGET_EFAULT;
5259 /* This union is awkward on 64 bit systems because it has a 32 bit
5260 * integer and a pointer in it; we follow the conversion approach
5261 * used for handling sigval types in signal.c so the guest should get
5262 * the correct value back even if we did a 64 bit byteswap and it's
5263 * using the 32 bit integer.
5265 host_sevp->sigev_value.sival_ptr =
5266 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5267 host_sevp->sigev_signo =
5268 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5269 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5270 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5272 unlock_user_struct(target_sevp, target_addr, 1);
5273 return 0;
5276 #if defined(TARGET_NR_mlockall)
5277 static inline int target_to_host_mlockall_arg(int arg)
5279 int result = 0;
5281 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5282 result |= MCL_CURRENT;
5284 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5285 result |= MCL_FUTURE;
5287 return result;
5289 #endif
5291 static inline abi_long host_to_target_stat64(void *cpu_env,
5292 abi_ulong target_addr,
5293 struct stat *host_st)
5295 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5296 if (((CPUARMState *)cpu_env)->eabi) {
5297 struct target_eabi_stat64 *target_st;
5299 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5300 return -TARGET_EFAULT;
5301 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5302 __put_user(host_st->st_dev, &target_st->st_dev);
5303 __put_user(host_st->st_ino, &target_st->st_ino);
5304 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5305 __put_user(host_st->st_ino, &target_st->__st_ino);
5306 #endif
5307 __put_user(host_st->st_mode, &target_st->st_mode);
5308 __put_user(host_st->st_nlink, &target_st->st_nlink);
5309 __put_user(host_st->st_uid, &target_st->st_uid);
5310 __put_user(host_st->st_gid, &target_st->st_gid);
5311 __put_user(host_st->st_rdev, &target_st->st_rdev);
5312 __put_user(host_st->st_size, &target_st->st_size);
5313 __put_user(host_st->st_blksize, &target_st->st_blksize);
5314 __put_user(host_st->st_blocks, &target_st->st_blocks);
5315 __put_user(host_st->st_atime, &target_st->target_st_atime);
5316 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5317 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5318 unlock_user_struct(target_st, target_addr, 1);
5319 } else
5320 #endif
5322 #if defined(TARGET_HAS_STRUCT_STAT64)
5323 struct target_stat64 *target_st;
5324 #else
5325 struct target_stat *target_st;
5326 #endif
5328 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5329 return -TARGET_EFAULT;
5330 memset(target_st, 0, sizeof(*target_st));
5331 __put_user(host_st->st_dev, &target_st->st_dev);
5332 __put_user(host_st->st_ino, &target_st->st_ino);
5333 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5334 __put_user(host_st->st_ino, &target_st->__st_ino);
5335 #endif
5336 __put_user(host_st->st_mode, &target_st->st_mode);
5337 __put_user(host_st->st_nlink, &target_st->st_nlink);
5338 __put_user(host_st->st_uid, &target_st->st_uid);
5339 __put_user(host_st->st_gid, &target_st->st_gid);
5340 __put_user(host_st->st_rdev, &target_st->st_rdev);
5341 /* XXX: better use of kernel struct */
5342 __put_user(host_st->st_size, &target_st->st_size);
5343 __put_user(host_st->st_blksize, &target_st->st_blksize);
5344 __put_user(host_st->st_blocks, &target_st->st_blocks);
5345 __put_user(host_st->st_atime, &target_st->target_st_atime);
5346 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5347 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5348 unlock_user_struct(target_st, target_addr, 1);
5351 return 0;
5354 /* ??? Using host futex calls even when target atomic operations
5355 are not really atomic probably breaks things. However implementing
5356 futexes locally would make futexes shared between multiple processes
5357 tricky. However they're probably useless because guest atomic
5358 operations won't work either. */
5359 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5360 target_ulong uaddr2, int val3)
5362 struct timespec ts, *pts;
5363 int base_op;
5365 /* ??? We assume FUTEX_* constants are the same on both host
5366 and target. */
5367 #ifdef FUTEX_CMD_MASK
5368 base_op = op & FUTEX_CMD_MASK;
5369 #else
5370 base_op = op;
5371 #endif
5372 switch (base_op) {
5373 case FUTEX_WAIT:
5374 case FUTEX_WAIT_BITSET:
5375 if (timeout) {
5376 pts = &ts;
5377 target_to_host_timespec(pts, timeout);
5378 } else {
5379 pts = NULL;
5381 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5382 pts, NULL, val3));
5383 case FUTEX_WAKE:
5384 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5385 case FUTEX_FD:
5386 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5387 case FUTEX_REQUEUE:
5388 case FUTEX_CMP_REQUEUE:
5389 case FUTEX_WAKE_OP:
5390 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5391 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5392 But the prototype takes a `struct timespec *'; insert casts
5393 to satisfy the compiler. We do not need to tswap TIMEOUT
5394 since it's not compared to guest memory. */
5395 pts = (struct timespec *)(uintptr_t) timeout;
5396 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5397 g2h(uaddr2),
5398 (base_op == FUTEX_CMP_REQUEUE
5399 ? tswap32(val3)
5400 : val3)));
5401 default:
5402 return -TARGET_ENOSYS;
5405 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5406 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5407 abi_long handle, abi_long mount_id,
5408 abi_long flags)
5410 struct file_handle *target_fh;
5411 struct file_handle *fh;
5412 int mid = 0;
5413 abi_long ret;
5414 char *name;
5415 unsigned int size, total_size;
5417 if (get_user_s32(size, handle)) {
5418 return -TARGET_EFAULT;
5421 name = lock_user_string(pathname);
5422 if (!name) {
5423 return -TARGET_EFAULT;
5426 total_size = sizeof(struct file_handle) + size;
5427 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5428 if (!target_fh) {
5429 unlock_user(name, pathname, 0);
5430 return -TARGET_EFAULT;
5433 fh = g_malloc0(total_size);
5434 fh->handle_bytes = size;
5436 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5437 unlock_user(name, pathname, 0);
5439 /* man name_to_handle_at(2):
5440 * Other than the use of the handle_bytes field, the caller should treat
5441 * the file_handle structure as an opaque data type
5444 memcpy(target_fh, fh, total_size);
5445 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5446 target_fh->handle_type = tswap32(fh->handle_type);
5447 g_free(fh);
5448 unlock_user(target_fh, handle, total_size);
5450 if (put_user_s32(mid, mount_id)) {
5451 return -TARGET_EFAULT;
5454 return ret;
5457 #endif
5459 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5460 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5461 abi_long flags)
5463 struct file_handle *target_fh;
5464 struct file_handle *fh;
5465 unsigned int size, total_size;
5466 abi_long ret;
5468 if (get_user_s32(size, handle)) {
5469 return -TARGET_EFAULT;
5472 total_size = sizeof(struct file_handle) + size;
5473 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5474 if (!target_fh) {
5475 return -TARGET_EFAULT;
5478 fh = g_memdup(target_fh, total_size);
5479 fh->handle_bytes = size;
5480 fh->handle_type = tswap32(target_fh->handle_type);
5482 ret = get_errno(open_by_handle_at(mount_fd, fh,
5483 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5485 g_free(fh);
5487 unlock_user(target_fh, handle, total_size);
5489 return ret;
5491 #endif
5493 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5495 /* signalfd siginfo conversion */
5497 static void
5498 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
5499 const struct signalfd_siginfo *info)
5501 int sig = host_to_target_signal(info->ssi_signo);
5503 /* linux/signalfd.h defines a ssi_addr_lsb
5504 * not defined in sys/signalfd.h but used by some kernels
5507 #ifdef BUS_MCEERR_AO
5508 if (tinfo->ssi_signo == SIGBUS &&
5509 (tinfo->ssi_code == BUS_MCEERR_AR ||
5510 tinfo->ssi_code == BUS_MCEERR_AO)) {
5511 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
5512 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
5513 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
5515 #endif
5517 tinfo->ssi_signo = tswap32(sig);
5518 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
5519 tinfo->ssi_code = tswap32(info->ssi_code);
5520 tinfo->ssi_pid = tswap32(info->ssi_pid);
5521 tinfo->ssi_uid = tswap32(info->ssi_uid);
5522 tinfo->ssi_fd = tswap32(info->ssi_fd);
5523 tinfo->ssi_tid = tswap32(info->ssi_tid);
5524 tinfo->ssi_band = tswap32(info->ssi_band);
5525 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
5526 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
5527 tinfo->ssi_status = tswap32(info->ssi_status);
5528 tinfo->ssi_int = tswap32(info->ssi_int);
5529 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
5530 tinfo->ssi_utime = tswap64(info->ssi_utime);
5531 tinfo->ssi_stime = tswap64(info->ssi_stime);
5532 tinfo->ssi_addr = tswap64(info->ssi_addr);
5535 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
5537 int i;
5539 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
5540 host_to_target_signalfd_siginfo(buf + i, buf + i);
5543 return len;
5546 static TargetFdTrans target_signalfd_trans = {
5547 .host_to_target_data = host_to_target_data_signalfd,
5550 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
5552 int host_flags;
5553 target_sigset_t *target_mask;
5554 sigset_t host_mask;
5555 abi_long ret;
5557 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
5558 return -TARGET_EINVAL;
5560 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
5561 return -TARGET_EFAULT;
5564 target_to_host_sigset(&host_mask, target_mask);
5566 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
5568 ret = get_errno(signalfd(fd, &host_mask, host_flags));
5569 if (ret >= 0) {
5570 fd_trans_register(ret, &target_signalfd_trans);
5573 unlock_user_struct(target_mask, mask, 0);
5575 return ret;
5577 #endif
5579 /* Map host to target signal numbers for the wait family of syscalls.
5580 Assume all other status bits are the same. */
5581 int host_to_target_waitstatus(int status)
5583 if (WIFSIGNALED(status)) {
5584 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5586 if (WIFSTOPPED(status)) {
5587 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5588 | (status & 0xff);
5590 return status;
5593 static int open_self_cmdline(void *cpu_env, int fd)
5595 int fd_orig = -1;
5596 bool word_skipped = false;
5598 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5599 if (fd_orig < 0) {
5600 return fd_orig;
5603 while (true) {
5604 ssize_t nb_read;
5605 char buf[128];
5606 char *cp_buf = buf;
5608 nb_read = read(fd_orig, buf, sizeof(buf));
5609 if (nb_read < 0) {
5610 int e = errno;
5611 fd_orig = close(fd_orig);
5612 errno = e;
5613 return -1;
5614 } else if (nb_read == 0) {
5615 break;
5618 if (!word_skipped) {
5619 /* Skip the first string, which is the path to qemu-*-static
5620 instead of the actual command. */
5621 cp_buf = memchr(buf, 0, sizeof(buf));
5622 if (cp_buf) {
5623 /* Null byte found, skip one string */
5624 cp_buf++;
5625 nb_read -= cp_buf - buf;
5626 word_skipped = true;
5630 if (word_skipped) {
5631 if (write(fd, cp_buf, nb_read) != nb_read) {
5632 int e = errno;
5633 close(fd_orig);
5634 errno = e;
5635 return -1;
5640 return close(fd_orig);
5643 static int open_self_maps(void *cpu_env, int fd)
5645 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5646 TaskState *ts = cpu->opaque;
5647 FILE *fp;
5648 char *line = NULL;
5649 size_t len = 0;
5650 ssize_t read;
5652 fp = fopen("/proc/self/maps", "r");
5653 if (fp == NULL) {
5654 return -1;
5657 while ((read = getline(&line, &len, fp)) != -1) {
5658 int fields, dev_maj, dev_min, inode;
5659 uint64_t min, max, offset;
5660 char flag_r, flag_w, flag_x, flag_p;
5661 char path[512] = "";
5662 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5663 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5664 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5666 if ((fields < 10) || (fields > 11)) {
5667 continue;
5669 if (h2g_valid(min)) {
5670 int flags = page_get_flags(h2g(min));
5671 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5672 if (page_check_range(h2g(min), max - min, flags) == -1) {
5673 continue;
5675 if (h2g(min) == ts->info->stack_limit) {
5676 pstrcpy(path, sizeof(path), " [stack]");
5678 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5679 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5680 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5681 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5682 path[0] ? " " : "", path);
5686 free(line);
5687 fclose(fp);
5689 return 0;
5692 static int open_self_stat(void *cpu_env, int fd)
5694 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5695 TaskState *ts = cpu->opaque;
5696 abi_ulong start_stack = ts->info->start_stack;
5697 int i;
5699 for (i = 0; i < 44; i++) {
5700 char buf[128];
5701 int len;
5702 uint64_t val = 0;
5704 if (i == 0) {
5705 /* pid */
5706 val = getpid();
5707 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5708 } else if (i == 1) {
5709 /* app name */
5710 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5711 } else if (i == 27) {
5712 /* stack bottom */
5713 val = start_stack;
5714 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5715 } else {
5716 /* for the rest, there is MasterCard */
5717 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5720 len = strlen(buf);
5721 if (write(fd, buf, len) != len) {
5722 return -1;
5726 return 0;
5729 static int open_self_auxv(void *cpu_env, int fd)
5731 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5732 TaskState *ts = cpu->opaque;
5733 abi_ulong auxv = ts->info->saved_auxv;
5734 abi_ulong len = ts->info->auxv_len;
5735 char *ptr;
5738 * Auxiliary vector is stored in target process stack.
5739 * read in whole auxv vector and copy it to file
5741 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5742 if (ptr != NULL) {
5743 while (len > 0) {
5744 ssize_t r;
5745 r = write(fd, ptr, len);
5746 if (r <= 0) {
5747 break;
5749 len -= r;
5750 ptr += r;
5752 lseek(fd, 0, SEEK_SET);
5753 unlock_user(ptr, auxv, len);
5756 return 0;
5759 static int is_proc_myself(const char *filename, const char *entry)
5761 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5762 filename += strlen("/proc/");
5763 if (!strncmp(filename, "self/", strlen("self/"))) {
5764 filename += strlen("self/");
5765 } else if (*filename >= '1' && *filename <= '9') {
5766 char myself[80];
5767 snprintf(myself, sizeof(myself), "%d/", getpid());
5768 if (!strncmp(filename, myself, strlen(myself))) {
5769 filename += strlen(myself);
5770 } else {
5771 return 0;
5773 } else {
5774 return 0;
5776 if (!strcmp(filename, entry)) {
5777 return 1;
5780 return 0;
5783 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5784 static int is_proc(const char *filename, const char *entry)
5786 return strcmp(filename, entry) == 0;
5789 static int open_net_route(void *cpu_env, int fd)
5791 FILE *fp;
5792 char *line = NULL;
5793 size_t len = 0;
5794 ssize_t read;
5796 fp = fopen("/proc/net/route", "r");
5797 if (fp == NULL) {
5798 return -1;
5801 /* read header */
5803 read = getline(&line, &len, fp);
5804 dprintf(fd, "%s", line);
5806 /* read routes */
5808 while ((read = getline(&line, &len, fp)) != -1) {
5809 char iface[16];
5810 uint32_t dest, gw, mask;
5811 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5812 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5813 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5814 &mask, &mtu, &window, &irtt);
5815 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5816 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5817 metric, tswap32(mask), mtu, window, irtt);
5820 free(line);
5821 fclose(fp);
5823 return 0;
5825 #endif
5827 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5829 struct fake_open {
5830 const char *filename;
5831 int (*fill)(void *cpu_env, int fd);
5832 int (*cmp)(const char *s1, const char *s2);
5834 const struct fake_open *fake_open;
5835 static const struct fake_open fakes[] = {
5836 { "maps", open_self_maps, is_proc_myself },
5837 { "stat", open_self_stat, is_proc_myself },
5838 { "auxv", open_self_auxv, is_proc_myself },
5839 { "cmdline", open_self_cmdline, is_proc_myself },
5840 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5841 { "/proc/net/route", open_net_route, is_proc },
5842 #endif
5843 { NULL, NULL, NULL }
5846 if (is_proc_myself(pathname, "exe")) {
5847 int execfd = qemu_getauxval(AT_EXECFD);
5848 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
5851 for (fake_open = fakes; fake_open->filename; fake_open++) {
5852 if (fake_open->cmp(pathname, fake_open->filename)) {
5853 break;
5857 if (fake_open->filename) {
5858 const char *tmpdir;
5859 char filename[PATH_MAX];
5860 int fd, r;
5862 /* create temporary file to map stat to */
5863 tmpdir = getenv("TMPDIR");
5864 if (!tmpdir)
5865 tmpdir = "/tmp";
5866 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5867 fd = mkstemp(filename);
5868 if (fd < 0) {
5869 return fd;
5871 unlink(filename);
5873 if ((r = fake_open->fill(cpu_env, fd))) {
5874 int e = errno;
5875 close(fd);
5876 errno = e;
5877 return r;
5879 lseek(fd, 0, SEEK_SET);
5881 return fd;
5884 return safe_openat(dirfd, path(pathname), flags, mode);
5887 #define TIMER_MAGIC 0x0caf0000
5888 #define TIMER_MAGIC_MASK 0xffff0000
5890 /* Convert QEMU provided timer ID back to internal 16bit index format */
5891 static target_timer_t get_timer_id(abi_long arg)
5893 target_timer_t timerid = arg;
5895 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5896 return -TARGET_EINVAL;
5899 timerid &= 0xffff;
5901 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5902 return -TARGET_EINVAL;
5905 return timerid;
5908 /* do_syscall() should always have a single exit point at the end so
5909 that actions, such as logging of syscall results, can be performed.
5910 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5911 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5912 abi_long arg2, abi_long arg3, abi_long arg4,
5913 abi_long arg5, abi_long arg6, abi_long arg7,
5914 abi_long arg8)
5916 CPUState *cpu = ENV_GET_CPU(cpu_env);
5917 abi_long ret;
5918 struct stat st;
5919 struct statfs stfs;
5920 void *p;
5922 #if defined(DEBUG_ERESTARTSYS)
5923 /* Debug-only code for exercising the syscall-restart code paths
5924 * in the per-architecture cpu main loops: restart every syscall
5925 * the guest makes once before letting it through.
5928 static int flag;
5930 flag = !flag;
5931 if (flag) {
5932 return -TARGET_ERESTARTSYS;
5935 #endif
5937 #ifdef DEBUG
5938 gemu_log("syscall %d", num);
5939 #endif
5940 if(do_strace)
5941 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5943 switch(num) {
5944 case TARGET_NR_exit:
5945 /* In old applications this may be used to implement _exit(2).
5946 However in threaded applictions it is used for thread termination,
5947 and _exit_group is used for application termination.
5948 Do thread termination if we have more then one thread. */
5949 /* FIXME: This probably breaks if a signal arrives. We should probably
5950 be disabling signals. */
5951 if (CPU_NEXT(first_cpu)) {
5952 TaskState *ts;
5954 cpu_list_lock();
5955 /* Remove the CPU from the list. */
5956 QTAILQ_REMOVE(&cpus, cpu, node);
5957 cpu_list_unlock();
5958 ts = cpu->opaque;
5959 if (ts->child_tidptr) {
5960 put_user_u32(0, ts->child_tidptr);
5961 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5962 NULL, NULL, 0);
5964 thread_cpu = NULL;
5965 object_unref(OBJECT(cpu));
5966 g_free(ts);
5967 rcu_unregister_thread();
5968 pthread_exit(NULL);
5970 #ifdef TARGET_GPROF
5971 _mcleanup();
5972 #endif
5973 gdb_exit(cpu_env, arg1);
5974 _exit(arg1);
5975 ret = 0; /* avoid warning */
5976 break;
5977 case TARGET_NR_read:
5978 if (arg3 == 0)
5979 ret = 0;
5980 else {
5981 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5982 goto efault;
5983 ret = get_errno(safe_read(arg1, p, arg3));
5984 if (ret >= 0 &&
5985 fd_trans_host_to_target_data(arg1)) {
5986 ret = fd_trans_host_to_target_data(arg1)(p, ret);
5988 unlock_user(p, arg2, ret);
5990 break;
5991 case TARGET_NR_write:
5992 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5993 goto efault;
5994 ret = get_errno(safe_write(arg1, p, arg3));
5995 unlock_user(p, arg2, 0);
5996 break;
5997 #ifdef TARGET_NR_open
5998 case TARGET_NR_open:
5999 if (!(p = lock_user_string(arg1)))
6000 goto efault;
6001 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
6002 target_to_host_bitmask(arg2, fcntl_flags_tbl),
6003 arg3));
6004 fd_trans_unregister(ret);
6005 unlock_user(p, arg1, 0);
6006 break;
6007 #endif
6008 case TARGET_NR_openat:
6009 if (!(p = lock_user_string(arg2)))
6010 goto efault;
6011 ret = get_errno(do_openat(cpu_env, arg1, p,
6012 target_to_host_bitmask(arg3, fcntl_flags_tbl),
6013 arg4));
6014 fd_trans_unregister(ret);
6015 unlock_user(p, arg2, 0);
6016 break;
6017 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6018 case TARGET_NR_name_to_handle_at:
6019 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
6020 break;
6021 #endif
6022 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6023 case TARGET_NR_open_by_handle_at:
6024 ret = do_open_by_handle_at(arg1, arg2, arg3);
6025 fd_trans_unregister(ret);
6026 break;
6027 #endif
6028 case TARGET_NR_close:
6029 fd_trans_unregister(arg1);
6030 ret = get_errno(close(arg1));
6031 break;
6032 case TARGET_NR_brk:
6033 ret = do_brk(arg1);
6034 break;
6035 #ifdef TARGET_NR_fork
6036 case TARGET_NR_fork:
6037 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
6038 break;
6039 #endif
6040 #ifdef TARGET_NR_waitpid
6041 case TARGET_NR_waitpid:
6043 int status;
6044 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
6045 if (!is_error(ret) && arg2 && ret
6046 && put_user_s32(host_to_target_waitstatus(status), arg2))
6047 goto efault;
6049 break;
6050 #endif
6051 #ifdef TARGET_NR_waitid
6052 case TARGET_NR_waitid:
6054 siginfo_t info;
6055 info.si_pid = 0;
6056 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
6057 if (!is_error(ret) && arg3 && info.si_pid != 0) {
6058 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
6059 goto efault;
6060 host_to_target_siginfo(p, &info);
6061 unlock_user(p, arg3, sizeof(target_siginfo_t));
6064 break;
6065 #endif
6066 #ifdef TARGET_NR_creat /* not on alpha */
6067 case TARGET_NR_creat:
6068 if (!(p = lock_user_string(arg1)))
6069 goto efault;
6070 ret = get_errno(creat(p, arg2));
6071 fd_trans_unregister(ret);
6072 unlock_user(p, arg1, 0);
6073 break;
6074 #endif
6075 #ifdef TARGET_NR_link
6076 case TARGET_NR_link:
6078 void * p2;
6079 p = lock_user_string(arg1);
6080 p2 = lock_user_string(arg2);
6081 if (!p || !p2)
6082 ret = -TARGET_EFAULT;
6083 else
6084 ret = get_errno(link(p, p2));
6085 unlock_user(p2, arg2, 0);
6086 unlock_user(p, arg1, 0);
6088 break;
6089 #endif
6090 #if defined(TARGET_NR_linkat)
6091 case TARGET_NR_linkat:
6093 void * p2 = NULL;
6094 if (!arg2 || !arg4)
6095 goto efault;
6096 p = lock_user_string(arg2);
6097 p2 = lock_user_string(arg4);
6098 if (!p || !p2)
6099 ret = -TARGET_EFAULT;
6100 else
6101 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6102 unlock_user(p, arg2, 0);
6103 unlock_user(p2, arg4, 0);
6105 break;
6106 #endif
6107 #ifdef TARGET_NR_unlink
6108 case TARGET_NR_unlink:
6109 if (!(p = lock_user_string(arg1)))
6110 goto efault;
6111 ret = get_errno(unlink(p));
6112 unlock_user(p, arg1, 0);
6113 break;
6114 #endif
6115 #if defined(TARGET_NR_unlinkat)
6116 case TARGET_NR_unlinkat:
6117 if (!(p = lock_user_string(arg2)))
6118 goto efault;
6119 ret = get_errno(unlinkat(arg1, p, arg3));
6120 unlock_user(p, arg2, 0);
6121 break;
6122 #endif
6123 case TARGET_NR_execve:
6125 char **argp, **envp;
6126 int argc, envc;
6127 abi_ulong gp;
6128 abi_ulong guest_argp;
6129 abi_ulong guest_envp;
6130 abi_ulong addr;
6131 char **q;
6132 int total_size = 0;
6134 argc = 0;
6135 guest_argp = arg2;
6136 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6137 if (get_user_ual(addr, gp))
6138 goto efault;
6139 if (!addr)
6140 break;
6141 argc++;
6143 envc = 0;
6144 guest_envp = arg3;
6145 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6146 if (get_user_ual(addr, gp))
6147 goto efault;
6148 if (!addr)
6149 break;
6150 envc++;
6153 argp = alloca((argc + 1) * sizeof(void *));
6154 envp = alloca((envc + 1) * sizeof(void *));
6156 for (gp = guest_argp, q = argp; gp;
6157 gp += sizeof(abi_ulong), q++) {
6158 if (get_user_ual(addr, gp))
6159 goto execve_efault;
6160 if (!addr)
6161 break;
6162 if (!(*q = lock_user_string(addr)))
6163 goto execve_efault;
6164 total_size += strlen(*q) + 1;
6166 *q = NULL;
6168 for (gp = guest_envp, q = envp; gp;
6169 gp += sizeof(abi_ulong), q++) {
6170 if (get_user_ual(addr, gp))
6171 goto execve_efault;
6172 if (!addr)
6173 break;
6174 if (!(*q = lock_user_string(addr)))
6175 goto execve_efault;
6176 total_size += strlen(*q) + 1;
6178 *q = NULL;
6180 if (!(p = lock_user_string(arg1)))
6181 goto execve_efault;
6182 ret = get_errno(execve(p, argp, envp));
6183 unlock_user(p, arg1, 0);
6185 goto execve_end;
6187 execve_efault:
6188 ret = -TARGET_EFAULT;
6190 execve_end:
6191 for (gp = guest_argp, q = argp; *q;
6192 gp += sizeof(abi_ulong), q++) {
6193 if (get_user_ual(addr, gp)
6194 || !addr)
6195 break;
6196 unlock_user(*q, addr, 0);
6198 for (gp = guest_envp, q = envp; *q;
6199 gp += sizeof(abi_ulong), q++) {
6200 if (get_user_ual(addr, gp)
6201 || !addr)
6202 break;
6203 unlock_user(*q, addr, 0);
6206 break;
6207 case TARGET_NR_chdir:
6208 if (!(p = lock_user_string(arg1)))
6209 goto efault;
6210 ret = get_errno(chdir(p));
6211 unlock_user(p, arg1, 0);
6212 break;
6213 #ifdef TARGET_NR_time
6214 case TARGET_NR_time:
6216 time_t host_time;
6217 ret = get_errno(time(&host_time));
6218 if (!is_error(ret)
6219 && arg1
6220 && put_user_sal(host_time, arg1))
6221 goto efault;
6223 break;
6224 #endif
6225 #ifdef TARGET_NR_mknod
6226 case TARGET_NR_mknod:
6227 if (!(p = lock_user_string(arg1)))
6228 goto efault;
6229 ret = get_errno(mknod(p, arg2, arg3));
6230 unlock_user(p, arg1, 0);
6231 break;
6232 #endif
6233 #if defined(TARGET_NR_mknodat)
6234 case TARGET_NR_mknodat:
6235 if (!(p = lock_user_string(arg2)))
6236 goto efault;
6237 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6238 unlock_user(p, arg2, 0);
6239 break;
6240 #endif
6241 #ifdef TARGET_NR_chmod
6242 case TARGET_NR_chmod:
6243 if (!(p = lock_user_string(arg1)))
6244 goto efault;
6245 ret = get_errno(chmod(p, arg2));
6246 unlock_user(p, arg1, 0);
6247 break;
6248 #endif
6249 #ifdef TARGET_NR_break
6250 case TARGET_NR_break:
6251 goto unimplemented;
6252 #endif
6253 #ifdef TARGET_NR_oldstat
6254 case TARGET_NR_oldstat:
6255 goto unimplemented;
6256 #endif
6257 case TARGET_NR_lseek:
6258 ret = get_errno(lseek(arg1, arg2, arg3));
6259 break;
6260 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6261 /* Alpha specific */
6262 case TARGET_NR_getxpid:
6263 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6264 ret = get_errno(getpid());
6265 break;
6266 #endif
6267 #ifdef TARGET_NR_getpid
6268 case TARGET_NR_getpid:
6269 ret = get_errno(getpid());
6270 break;
6271 #endif
6272 case TARGET_NR_mount:
6274 /* need to look at the data field */
6275 void *p2, *p3;
6277 if (arg1) {
6278 p = lock_user_string(arg1);
6279 if (!p) {
6280 goto efault;
6282 } else {
6283 p = NULL;
6286 p2 = lock_user_string(arg2);
6287 if (!p2) {
6288 if (arg1) {
6289 unlock_user(p, arg1, 0);
6291 goto efault;
6294 if (arg3) {
6295 p3 = lock_user_string(arg3);
6296 if (!p3) {
6297 if (arg1) {
6298 unlock_user(p, arg1, 0);
6300 unlock_user(p2, arg2, 0);
6301 goto efault;
6303 } else {
6304 p3 = NULL;
6307 /* FIXME - arg5 should be locked, but it isn't clear how to
6308 * do that since it's not guaranteed to be a NULL-terminated
6309 * string.
6311 if (!arg5) {
6312 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6313 } else {
6314 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6316 ret = get_errno(ret);
6318 if (arg1) {
6319 unlock_user(p, arg1, 0);
6321 unlock_user(p2, arg2, 0);
6322 if (arg3) {
6323 unlock_user(p3, arg3, 0);
6326 break;
6327 #ifdef TARGET_NR_umount
6328 case TARGET_NR_umount:
6329 if (!(p = lock_user_string(arg1)))
6330 goto efault;
6331 ret = get_errno(umount(p));
6332 unlock_user(p, arg1, 0);
6333 break;
6334 #endif
6335 #ifdef TARGET_NR_stime /* not on alpha */
6336 case TARGET_NR_stime:
6338 time_t host_time;
6339 if (get_user_sal(host_time, arg1))
6340 goto efault;
6341 ret = get_errno(stime(&host_time));
6343 break;
6344 #endif
6345 case TARGET_NR_ptrace:
6346 goto unimplemented;
6347 #ifdef TARGET_NR_alarm /* not on alpha */
6348 case TARGET_NR_alarm:
6349 ret = alarm(arg1);
6350 break;
6351 #endif
6352 #ifdef TARGET_NR_oldfstat
6353 case TARGET_NR_oldfstat:
6354 goto unimplemented;
6355 #endif
6356 #ifdef TARGET_NR_pause /* not on alpha */
6357 case TARGET_NR_pause:
6358 ret = get_errno(pause());
6359 break;
6360 #endif
6361 #ifdef TARGET_NR_utime
6362 case TARGET_NR_utime:
6364 struct utimbuf tbuf, *host_tbuf;
6365 struct target_utimbuf *target_tbuf;
6366 if (arg2) {
6367 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6368 goto efault;
6369 tbuf.actime = tswapal(target_tbuf->actime);
6370 tbuf.modtime = tswapal(target_tbuf->modtime);
6371 unlock_user_struct(target_tbuf, arg2, 0);
6372 host_tbuf = &tbuf;
6373 } else {
6374 host_tbuf = NULL;
6376 if (!(p = lock_user_string(arg1)))
6377 goto efault;
6378 ret = get_errno(utime(p, host_tbuf));
6379 unlock_user(p, arg1, 0);
6381 break;
6382 #endif
6383 #ifdef TARGET_NR_utimes
6384 case TARGET_NR_utimes:
6386 struct timeval *tvp, tv[2];
6387 if (arg2) {
6388 if (copy_from_user_timeval(&tv[0], arg2)
6389 || copy_from_user_timeval(&tv[1],
6390 arg2 + sizeof(struct target_timeval)))
6391 goto efault;
6392 tvp = tv;
6393 } else {
6394 tvp = NULL;
6396 if (!(p = lock_user_string(arg1)))
6397 goto efault;
6398 ret = get_errno(utimes(p, tvp));
6399 unlock_user(p, arg1, 0);
6401 break;
6402 #endif
6403 #if defined(TARGET_NR_futimesat)
6404 case TARGET_NR_futimesat:
6406 struct timeval *tvp, tv[2];
6407 if (arg3) {
6408 if (copy_from_user_timeval(&tv[0], arg3)
6409 || copy_from_user_timeval(&tv[1],
6410 arg3 + sizeof(struct target_timeval)))
6411 goto efault;
6412 tvp = tv;
6413 } else {
6414 tvp = NULL;
6416 if (!(p = lock_user_string(arg2)))
6417 goto efault;
6418 ret = get_errno(futimesat(arg1, path(p), tvp));
6419 unlock_user(p, arg2, 0);
6421 break;
6422 #endif
6423 #ifdef TARGET_NR_stty
6424 case TARGET_NR_stty:
6425 goto unimplemented;
6426 #endif
6427 #ifdef TARGET_NR_gtty
6428 case TARGET_NR_gtty:
6429 goto unimplemented;
6430 #endif
6431 #ifdef TARGET_NR_access
6432 case TARGET_NR_access:
6433 if (!(p = lock_user_string(arg1)))
6434 goto efault;
6435 ret = get_errno(access(path(p), arg2));
6436 unlock_user(p, arg1, 0);
6437 break;
6438 #endif
6439 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6440 case TARGET_NR_faccessat:
6441 if (!(p = lock_user_string(arg2)))
6442 goto efault;
6443 ret = get_errno(faccessat(arg1, p, arg3, 0));
6444 unlock_user(p, arg2, 0);
6445 break;
6446 #endif
6447 #ifdef TARGET_NR_nice /* not on alpha */
6448 case TARGET_NR_nice:
6449 ret = get_errno(nice(arg1));
6450 break;
6451 #endif
6452 #ifdef TARGET_NR_ftime
6453 case TARGET_NR_ftime:
6454 goto unimplemented;
6455 #endif
6456 case TARGET_NR_sync:
6457 sync();
6458 ret = 0;
6459 break;
6460 case TARGET_NR_kill:
6461 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6462 break;
6463 #ifdef TARGET_NR_rename
6464 case TARGET_NR_rename:
6466 void *p2;
6467 p = lock_user_string(arg1);
6468 p2 = lock_user_string(arg2);
6469 if (!p || !p2)
6470 ret = -TARGET_EFAULT;
6471 else
6472 ret = get_errno(rename(p, p2));
6473 unlock_user(p2, arg2, 0);
6474 unlock_user(p, arg1, 0);
6476 break;
6477 #endif
6478 #if defined(TARGET_NR_renameat)
6479 case TARGET_NR_renameat:
6481 void *p2;
6482 p = lock_user_string(arg2);
6483 p2 = lock_user_string(arg4);
6484 if (!p || !p2)
6485 ret = -TARGET_EFAULT;
6486 else
6487 ret = get_errno(renameat(arg1, p, arg3, p2));
6488 unlock_user(p2, arg4, 0);
6489 unlock_user(p, arg2, 0);
6491 break;
6492 #endif
6493 #ifdef TARGET_NR_mkdir
6494 case TARGET_NR_mkdir:
6495 if (!(p = lock_user_string(arg1)))
6496 goto efault;
6497 ret = get_errno(mkdir(p, arg2));
6498 unlock_user(p, arg1, 0);
6499 break;
6500 #endif
6501 #if defined(TARGET_NR_mkdirat)
6502 case TARGET_NR_mkdirat:
6503 if (!(p = lock_user_string(arg2)))
6504 goto efault;
6505 ret = get_errno(mkdirat(arg1, p, arg3));
6506 unlock_user(p, arg2, 0);
6507 break;
6508 #endif
6509 #ifdef TARGET_NR_rmdir
6510 case TARGET_NR_rmdir:
6511 if (!(p = lock_user_string(arg1)))
6512 goto efault;
6513 ret = get_errno(rmdir(p));
6514 unlock_user(p, arg1, 0);
6515 break;
6516 #endif
6517 case TARGET_NR_dup:
6518 ret = get_errno(dup(arg1));
6519 if (ret >= 0) {
6520 fd_trans_dup(arg1, ret);
6522 break;
6523 #ifdef TARGET_NR_pipe
6524 case TARGET_NR_pipe:
6525 ret = do_pipe(cpu_env, arg1, 0, 0);
6526 break;
6527 #endif
6528 #ifdef TARGET_NR_pipe2
6529 case TARGET_NR_pipe2:
6530 ret = do_pipe(cpu_env, arg1,
6531 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6532 break;
6533 #endif
6534 case TARGET_NR_times:
6536 struct target_tms *tmsp;
6537 struct tms tms;
6538 ret = get_errno(times(&tms));
6539 if (arg1) {
6540 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6541 if (!tmsp)
6542 goto efault;
6543 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6544 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6545 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6546 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6548 if (!is_error(ret))
6549 ret = host_to_target_clock_t(ret);
6551 break;
6552 #ifdef TARGET_NR_prof
6553 case TARGET_NR_prof:
6554 goto unimplemented;
6555 #endif
6556 #ifdef TARGET_NR_signal
6557 case TARGET_NR_signal:
6558 goto unimplemented;
6559 #endif
6560 case TARGET_NR_acct:
6561 if (arg1 == 0) {
6562 ret = get_errno(acct(NULL));
6563 } else {
6564 if (!(p = lock_user_string(arg1)))
6565 goto efault;
6566 ret = get_errno(acct(path(p)));
6567 unlock_user(p, arg1, 0);
6569 break;
6570 #ifdef TARGET_NR_umount2
6571 case TARGET_NR_umount2:
6572 if (!(p = lock_user_string(arg1)))
6573 goto efault;
6574 ret = get_errno(umount2(p, arg2));
6575 unlock_user(p, arg1, 0);
6576 break;
6577 #endif
6578 #ifdef TARGET_NR_lock
6579 case TARGET_NR_lock:
6580 goto unimplemented;
6581 #endif
6582 case TARGET_NR_ioctl:
6583 ret = do_ioctl(arg1, arg2, arg3);
6584 break;
6585 case TARGET_NR_fcntl:
6586 ret = do_fcntl(arg1, arg2, arg3);
6587 break;
6588 #ifdef TARGET_NR_mpx
6589 case TARGET_NR_mpx:
6590 goto unimplemented;
6591 #endif
6592 case TARGET_NR_setpgid:
6593 ret = get_errno(setpgid(arg1, arg2));
6594 break;
6595 #ifdef TARGET_NR_ulimit
6596 case TARGET_NR_ulimit:
6597 goto unimplemented;
6598 #endif
6599 #ifdef TARGET_NR_oldolduname
6600 case TARGET_NR_oldolduname:
6601 goto unimplemented;
6602 #endif
6603 case TARGET_NR_umask:
6604 ret = get_errno(umask(arg1));
6605 break;
6606 case TARGET_NR_chroot:
6607 if (!(p = lock_user_string(arg1)))
6608 goto efault;
6609 ret = get_errno(chroot(p));
6610 unlock_user(p, arg1, 0);
6611 break;
6612 #ifdef TARGET_NR_ustat
6613 case TARGET_NR_ustat:
6614 goto unimplemented;
6615 #endif
6616 #ifdef TARGET_NR_dup2
6617 case TARGET_NR_dup2:
6618 ret = get_errno(dup2(arg1, arg2));
6619 if (ret >= 0) {
6620 fd_trans_dup(arg1, arg2);
6622 break;
6623 #endif
6624 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6625 case TARGET_NR_dup3:
6626 ret = get_errno(dup3(arg1, arg2, arg3));
6627 if (ret >= 0) {
6628 fd_trans_dup(arg1, arg2);
6630 break;
6631 #endif
6632 #ifdef TARGET_NR_getppid /* not on alpha */
6633 case TARGET_NR_getppid:
6634 ret = get_errno(getppid());
6635 break;
6636 #endif
6637 #ifdef TARGET_NR_getpgrp
6638 case TARGET_NR_getpgrp:
6639 ret = get_errno(getpgrp());
6640 break;
6641 #endif
6642 case TARGET_NR_setsid:
6643 ret = get_errno(setsid());
6644 break;
6645 #ifdef TARGET_NR_sigaction
6646 case TARGET_NR_sigaction:
6648 #if defined(TARGET_ALPHA)
6649 struct target_sigaction act, oact, *pact = 0;
6650 struct target_old_sigaction *old_act;
6651 if (arg2) {
6652 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6653 goto efault;
6654 act._sa_handler = old_act->_sa_handler;
6655 target_siginitset(&act.sa_mask, old_act->sa_mask);
6656 act.sa_flags = old_act->sa_flags;
6657 act.sa_restorer = 0;
6658 unlock_user_struct(old_act, arg2, 0);
6659 pact = &act;
6661 ret = get_errno(do_sigaction(arg1, pact, &oact));
6662 if (!is_error(ret) && arg3) {
6663 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6664 goto efault;
6665 old_act->_sa_handler = oact._sa_handler;
6666 old_act->sa_mask = oact.sa_mask.sig[0];
6667 old_act->sa_flags = oact.sa_flags;
6668 unlock_user_struct(old_act, arg3, 1);
6670 #elif defined(TARGET_MIPS)
6671 struct target_sigaction act, oact, *pact, *old_act;
6673 if (arg2) {
6674 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6675 goto efault;
6676 act._sa_handler = old_act->_sa_handler;
6677 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6678 act.sa_flags = old_act->sa_flags;
6679 unlock_user_struct(old_act, arg2, 0);
6680 pact = &act;
6681 } else {
6682 pact = NULL;
6685 ret = get_errno(do_sigaction(arg1, pact, &oact));
6687 if (!is_error(ret) && arg3) {
6688 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6689 goto efault;
6690 old_act->_sa_handler = oact._sa_handler;
6691 old_act->sa_flags = oact.sa_flags;
6692 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6693 old_act->sa_mask.sig[1] = 0;
6694 old_act->sa_mask.sig[2] = 0;
6695 old_act->sa_mask.sig[3] = 0;
6696 unlock_user_struct(old_act, arg3, 1);
6698 #else
6699 struct target_old_sigaction *old_act;
6700 struct target_sigaction act, oact, *pact;
6701 if (arg2) {
6702 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6703 goto efault;
6704 act._sa_handler = old_act->_sa_handler;
6705 target_siginitset(&act.sa_mask, old_act->sa_mask);
6706 act.sa_flags = old_act->sa_flags;
6707 act.sa_restorer = old_act->sa_restorer;
6708 unlock_user_struct(old_act, arg2, 0);
6709 pact = &act;
6710 } else {
6711 pact = NULL;
6713 ret = get_errno(do_sigaction(arg1, pact, &oact));
6714 if (!is_error(ret) && arg3) {
6715 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6716 goto efault;
6717 old_act->_sa_handler = oact._sa_handler;
6718 old_act->sa_mask = oact.sa_mask.sig[0];
6719 old_act->sa_flags = oact.sa_flags;
6720 old_act->sa_restorer = oact.sa_restorer;
6721 unlock_user_struct(old_act, arg3, 1);
6723 #endif
6725 break;
6726 #endif
6727 case TARGET_NR_rt_sigaction:
6729 #if defined(TARGET_ALPHA)
6730 struct target_sigaction act, oact, *pact = 0;
6731 struct target_rt_sigaction *rt_act;
6732 /* ??? arg4 == sizeof(sigset_t). */
6733 if (arg2) {
6734 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6735 goto efault;
6736 act._sa_handler = rt_act->_sa_handler;
6737 act.sa_mask = rt_act->sa_mask;
6738 act.sa_flags = rt_act->sa_flags;
6739 act.sa_restorer = arg5;
6740 unlock_user_struct(rt_act, arg2, 0);
6741 pact = &act;
6743 ret = get_errno(do_sigaction(arg1, pact, &oact));
6744 if (!is_error(ret) && arg3) {
6745 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6746 goto efault;
6747 rt_act->_sa_handler = oact._sa_handler;
6748 rt_act->sa_mask = oact.sa_mask;
6749 rt_act->sa_flags = oact.sa_flags;
6750 unlock_user_struct(rt_act, arg3, 1);
6752 #else
6753 struct target_sigaction *act;
6754 struct target_sigaction *oact;
6756 if (arg2) {
6757 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6758 goto efault;
6759 } else
6760 act = NULL;
6761 if (arg3) {
6762 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6763 ret = -TARGET_EFAULT;
6764 goto rt_sigaction_fail;
6766 } else
6767 oact = NULL;
6768 ret = get_errno(do_sigaction(arg1, act, oact));
6769 rt_sigaction_fail:
6770 if (act)
6771 unlock_user_struct(act, arg2, 0);
6772 if (oact)
6773 unlock_user_struct(oact, arg3, 1);
6774 #endif
6776 break;
6777 #ifdef TARGET_NR_sgetmask /* not on alpha */
6778 case TARGET_NR_sgetmask:
6780 sigset_t cur_set;
6781 abi_ulong target_set;
6782 do_sigprocmask(0, NULL, &cur_set);
6783 host_to_target_old_sigset(&target_set, &cur_set);
6784 ret = target_set;
6786 break;
6787 #endif
6788 #ifdef TARGET_NR_ssetmask /* not on alpha */
6789 case TARGET_NR_ssetmask:
6791 sigset_t set, oset, cur_set;
6792 abi_ulong target_set = arg1;
6793 do_sigprocmask(0, NULL, &cur_set);
6794 target_to_host_old_sigset(&set, &target_set);
6795 sigorset(&set, &set, &cur_set);
6796 do_sigprocmask(SIG_SETMASK, &set, &oset);
6797 host_to_target_old_sigset(&target_set, &oset);
6798 ret = target_set;
6800 break;
6801 #endif
6802 #ifdef TARGET_NR_sigprocmask
6803 case TARGET_NR_sigprocmask:
6805 #if defined(TARGET_ALPHA)
6806 sigset_t set, oldset;
6807 abi_ulong mask;
6808 int how;
6810 switch (arg1) {
6811 case TARGET_SIG_BLOCK:
6812 how = SIG_BLOCK;
6813 break;
6814 case TARGET_SIG_UNBLOCK:
6815 how = SIG_UNBLOCK;
6816 break;
6817 case TARGET_SIG_SETMASK:
6818 how = SIG_SETMASK;
6819 break;
6820 default:
6821 ret = -TARGET_EINVAL;
6822 goto fail;
6824 mask = arg2;
6825 target_to_host_old_sigset(&set, &mask);
6827 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6828 if (!is_error(ret)) {
6829 host_to_target_old_sigset(&mask, &oldset);
6830 ret = mask;
6831 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6833 #else
6834 sigset_t set, oldset, *set_ptr;
6835 int how;
6837 if (arg2) {
6838 switch (arg1) {
6839 case TARGET_SIG_BLOCK:
6840 how = SIG_BLOCK;
6841 break;
6842 case TARGET_SIG_UNBLOCK:
6843 how = SIG_UNBLOCK;
6844 break;
6845 case TARGET_SIG_SETMASK:
6846 how = SIG_SETMASK;
6847 break;
6848 default:
6849 ret = -TARGET_EINVAL;
6850 goto fail;
6852 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6853 goto efault;
6854 target_to_host_old_sigset(&set, p);
6855 unlock_user(p, arg2, 0);
6856 set_ptr = &set;
6857 } else {
6858 how = 0;
6859 set_ptr = NULL;
6861 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6862 if (!is_error(ret) && arg3) {
6863 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6864 goto efault;
6865 host_to_target_old_sigset(p, &oldset);
6866 unlock_user(p, arg3, sizeof(target_sigset_t));
6868 #endif
6870 break;
6871 #endif
6872 case TARGET_NR_rt_sigprocmask:
6874 int how = arg1;
6875 sigset_t set, oldset, *set_ptr;
6877 if (arg2) {
6878 switch(how) {
6879 case TARGET_SIG_BLOCK:
6880 how = SIG_BLOCK;
6881 break;
6882 case TARGET_SIG_UNBLOCK:
6883 how = SIG_UNBLOCK;
6884 break;
6885 case TARGET_SIG_SETMASK:
6886 how = SIG_SETMASK;
6887 break;
6888 default:
6889 ret = -TARGET_EINVAL;
6890 goto fail;
6892 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6893 goto efault;
6894 target_to_host_sigset(&set, p);
6895 unlock_user(p, arg2, 0);
6896 set_ptr = &set;
6897 } else {
6898 how = 0;
6899 set_ptr = NULL;
6901 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6902 if (!is_error(ret) && arg3) {
6903 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6904 goto efault;
6905 host_to_target_sigset(p, &oldset);
6906 unlock_user(p, arg3, sizeof(target_sigset_t));
6909 break;
6910 #ifdef TARGET_NR_sigpending
6911 case TARGET_NR_sigpending:
6913 sigset_t set;
6914 ret = get_errno(sigpending(&set));
6915 if (!is_error(ret)) {
6916 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6917 goto efault;
6918 host_to_target_old_sigset(p, &set);
6919 unlock_user(p, arg1, sizeof(target_sigset_t));
6922 break;
6923 #endif
6924 case TARGET_NR_rt_sigpending:
6926 sigset_t set;
6927 ret = get_errno(sigpending(&set));
6928 if (!is_error(ret)) {
6929 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6930 goto efault;
6931 host_to_target_sigset(p, &set);
6932 unlock_user(p, arg1, sizeof(target_sigset_t));
6935 break;
6936 #ifdef TARGET_NR_sigsuspend
6937 case TARGET_NR_sigsuspend:
6939 sigset_t set;
6940 #if defined(TARGET_ALPHA)
6941 abi_ulong mask = arg1;
6942 target_to_host_old_sigset(&set, &mask);
6943 #else
6944 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6945 goto efault;
6946 target_to_host_old_sigset(&set, p);
6947 unlock_user(p, arg1, 0);
6948 #endif
6949 ret = get_errno(sigsuspend(&set));
6951 break;
6952 #endif
6953 case TARGET_NR_rt_sigsuspend:
6955 sigset_t set;
6956 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6957 goto efault;
6958 target_to_host_sigset(&set, p);
6959 unlock_user(p, arg1, 0);
6960 ret = get_errno(sigsuspend(&set));
6962 break;
6963 case TARGET_NR_rt_sigtimedwait:
6965 sigset_t set;
6966 struct timespec uts, *puts;
6967 siginfo_t uinfo;
6969 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6970 goto efault;
6971 target_to_host_sigset(&set, p);
6972 unlock_user(p, arg1, 0);
6973 if (arg3) {
6974 puts = &uts;
6975 target_to_host_timespec(puts, arg3);
6976 } else {
6977 puts = NULL;
6979 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6980 if (!is_error(ret)) {
6981 if (arg2) {
6982 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6984 if (!p) {
6985 goto efault;
6987 host_to_target_siginfo(p, &uinfo);
6988 unlock_user(p, arg2, sizeof(target_siginfo_t));
6990 ret = host_to_target_signal(ret);
6993 break;
6994 case TARGET_NR_rt_sigqueueinfo:
6996 siginfo_t uinfo;
6997 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6998 goto efault;
6999 target_to_host_siginfo(&uinfo, p);
7000 unlock_user(p, arg1, 0);
7001 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
7003 break;
7004 #ifdef TARGET_NR_sigreturn
7005 case TARGET_NR_sigreturn:
7006 ret = do_sigreturn(cpu_env);
7007 break;
7008 #endif
7009 case TARGET_NR_rt_sigreturn:
7010 ret = do_rt_sigreturn(cpu_env);
7011 break;
7012 case TARGET_NR_sethostname:
7013 if (!(p = lock_user_string(arg1)))
7014 goto efault;
7015 ret = get_errno(sethostname(p, arg2));
7016 unlock_user(p, arg1, 0);
7017 break;
7018 case TARGET_NR_setrlimit:
7020 int resource = target_to_host_resource(arg1);
7021 struct target_rlimit *target_rlim;
7022 struct rlimit rlim;
7023 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
7024 goto efault;
7025 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
7026 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
7027 unlock_user_struct(target_rlim, arg2, 0);
7028 ret = get_errno(setrlimit(resource, &rlim));
7030 break;
7031 case TARGET_NR_getrlimit:
7033 int resource = target_to_host_resource(arg1);
7034 struct target_rlimit *target_rlim;
7035 struct rlimit rlim;
7037 ret = get_errno(getrlimit(resource, &rlim));
7038 if (!is_error(ret)) {
7039 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7040 goto efault;
7041 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7042 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7043 unlock_user_struct(target_rlim, arg2, 1);
7046 break;
7047 case TARGET_NR_getrusage:
7049 struct rusage rusage;
7050 ret = get_errno(getrusage(arg1, &rusage));
7051 if (!is_error(ret)) {
7052 ret = host_to_target_rusage(arg2, &rusage);
7055 break;
7056 case TARGET_NR_gettimeofday:
7058 struct timeval tv;
7059 ret = get_errno(gettimeofday(&tv, NULL));
7060 if (!is_error(ret)) {
7061 if (copy_to_user_timeval(arg1, &tv))
7062 goto efault;
7065 break;
7066 case TARGET_NR_settimeofday:
7068 struct timeval tv, *ptv = NULL;
7069 struct timezone tz, *ptz = NULL;
7071 if (arg1) {
7072 if (copy_from_user_timeval(&tv, arg1)) {
7073 goto efault;
7075 ptv = &tv;
7078 if (arg2) {
7079 if (copy_from_user_timezone(&tz, arg2)) {
7080 goto efault;
7082 ptz = &tz;
7085 ret = get_errno(settimeofday(ptv, ptz));
7087 break;
7088 #if defined(TARGET_NR_select)
7089 case TARGET_NR_select:
7090 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7091 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7092 #else
7094 struct target_sel_arg_struct *sel;
7095 abi_ulong inp, outp, exp, tvp;
7096 long nsel;
7098 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7099 goto efault;
7100 nsel = tswapal(sel->n);
7101 inp = tswapal(sel->inp);
7102 outp = tswapal(sel->outp);
7103 exp = tswapal(sel->exp);
7104 tvp = tswapal(sel->tvp);
7105 unlock_user_struct(sel, arg1, 0);
7106 ret = do_select(nsel, inp, outp, exp, tvp);
7108 #endif
7109 break;
7110 #endif
7111 #ifdef TARGET_NR_pselect6
7112 case TARGET_NR_pselect6:
7114 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7115 fd_set rfds, wfds, efds;
7116 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7117 struct timespec ts, *ts_ptr;
7120 * The 6th arg is actually two args smashed together,
7121 * so we cannot use the C library.
7123 sigset_t set;
7124 struct {
7125 sigset_t *set;
7126 size_t size;
7127 } sig, *sig_ptr;
7129 abi_ulong arg_sigset, arg_sigsize, *arg7;
7130 target_sigset_t *target_sigset;
7132 n = arg1;
7133 rfd_addr = arg2;
7134 wfd_addr = arg3;
7135 efd_addr = arg4;
7136 ts_addr = arg5;
7138 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7139 if (ret) {
7140 goto fail;
7142 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7143 if (ret) {
7144 goto fail;
7146 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7147 if (ret) {
7148 goto fail;
7152 * This takes a timespec, and not a timeval, so we cannot
7153 * use the do_select() helper ...
7155 if (ts_addr) {
7156 if (target_to_host_timespec(&ts, ts_addr)) {
7157 goto efault;
7159 ts_ptr = &ts;
7160 } else {
7161 ts_ptr = NULL;
7164 /* Extract the two packed args for the sigset */
7165 if (arg6) {
7166 sig_ptr = &sig;
7167 sig.size = _NSIG / 8;
7169 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7170 if (!arg7) {
7171 goto efault;
7173 arg_sigset = tswapal(arg7[0]);
7174 arg_sigsize = tswapal(arg7[1]);
7175 unlock_user(arg7, arg6, 0);
7177 if (arg_sigset) {
7178 sig.set = &set;
7179 if (arg_sigsize != sizeof(*target_sigset)) {
7180 /* Like the kernel, we enforce correct size sigsets */
7181 ret = -TARGET_EINVAL;
7182 goto fail;
7184 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7185 sizeof(*target_sigset), 1);
7186 if (!target_sigset) {
7187 goto efault;
7189 target_to_host_sigset(&set, target_sigset);
7190 unlock_user(target_sigset, arg_sigset, 0);
7191 } else {
7192 sig.set = NULL;
7194 } else {
7195 sig_ptr = NULL;
7198 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7199 ts_ptr, sig_ptr));
7201 if (!is_error(ret)) {
7202 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7203 goto efault;
7204 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7205 goto efault;
7206 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7207 goto efault;
7209 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7210 goto efault;
7213 break;
7214 #endif
7215 #ifdef TARGET_NR_symlink
7216 case TARGET_NR_symlink:
7218 void *p2;
7219 p = lock_user_string(arg1);
7220 p2 = lock_user_string(arg2);
7221 if (!p || !p2)
7222 ret = -TARGET_EFAULT;
7223 else
7224 ret = get_errno(symlink(p, p2));
7225 unlock_user(p2, arg2, 0);
7226 unlock_user(p, arg1, 0);
7228 break;
7229 #endif
7230 #if defined(TARGET_NR_symlinkat)
7231 case TARGET_NR_symlinkat:
7233 void *p2;
7234 p = lock_user_string(arg1);
7235 p2 = lock_user_string(arg3);
7236 if (!p || !p2)
7237 ret = -TARGET_EFAULT;
7238 else
7239 ret = get_errno(symlinkat(p, arg2, p2));
7240 unlock_user(p2, arg3, 0);
7241 unlock_user(p, arg1, 0);
7243 break;
7244 #endif
7245 #ifdef TARGET_NR_oldlstat
7246 case TARGET_NR_oldlstat:
7247 goto unimplemented;
7248 #endif
7249 #ifdef TARGET_NR_readlink
7250 case TARGET_NR_readlink:
7252 void *p2;
7253 p = lock_user_string(arg1);
7254 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7255 if (!p || !p2) {
7256 ret = -TARGET_EFAULT;
7257 } else if (!arg3) {
7258 /* Short circuit this for the magic exe check. */
7259 ret = -TARGET_EINVAL;
7260 } else if (is_proc_myself((const char *)p, "exe")) {
7261 char real[PATH_MAX], *temp;
7262 temp = realpath(exec_path, real);
7263 /* Return value is # of bytes that we wrote to the buffer. */
7264 if (temp == NULL) {
7265 ret = get_errno(-1);
7266 } else {
7267 /* Don't worry about sign mismatch as earlier mapping
7268 * logic would have thrown a bad address error. */
7269 ret = MIN(strlen(real), arg3);
7270 /* We cannot NUL terminate the string. */
7271 memcpy(p2, real, ret);
7273 } else {
7274 ret = get_errno(readlink(path(p), p2, arg3));
7276 unlock_user(p2, arg2, ret);
7277 unlock_user(p, arg1, 0);
7279 break;
7280 #endif
7281 #if defined(TARGET_NR_readlinkat)
7282 case TARGET_NR_readlinkat:
7284 void *p2;
7285 p = lock_user_string(arg2);
7286 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7287 if (!p || !p2) {
7288 ret = -TARGET_EFAULT;
7289 } else if (is_proc_myself((const char *)p, "exe")) {
7290 char real[PATH_MAX], *temp;
7291 temp = realpath(exec_path, real);
7292 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7293 snprintf((char *)p2, arg4, "%s", real);
7294 } else {
7295 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7297 unlock_user(p2, arg3, ret);
7298 unlock_user(p, arg2, 0);
7300 break;
7301 #endif
7302 #ifdef TARGET_NR_uselib
7303 case TARGET_NR_uselib:
7304 goto unimplemented;
7305 #endif
7306 #ifdef TARGET_NR_swapon
7307 case TARGET_NR_swapon:
7308 if (!(p = lock_user_string(arg1)))
7309 goto efault;
7310 ret = get_errno(swapon(p, arg2));
7311 unlock_user(p, arg1, 0);
7312 break;
7313 #endif
7314 case TARGET_NR_reboot:
7315 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7316 /* arg4 must be ignored in all other cases */
7317 p = lock_user_string(arg4);
7318 if (!p) {
7319 goto efault;
7321 ret = get_errno(reboot(arg1, arg2, arg3, p));
7322 unlock_user(p, arg4, 0);
7323 } else {
7324 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7326 break;
7327 #ifdef TARGET_NR_readdir
7328 case TARGET_NR_readdir:
7329 goto unimplemented;
7330 #endif
7331 #ifdef TARGET_NR_mmap
7332 case TARGET_NR_mmap:
7333 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7334 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7335 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7336 || defined(TARGET_S390X)
7338 abi_ulong *v;
7339 abi_ulong v1, v2, v3, v4, v5, v6;
7340 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7341 goto efault;
7342 v1 = tswapal(v[0]);
7343 v2 = tswapal(v[1]);
7344 v3 = tswapal(v[2]);
7345 v4 = tswapal(v[3]);
7346 v5 = tswapal(v[4]);
7347 v6 = tswapal(v[5]);
7348 unlock_user(v, arg1, 0);
7349 ret = get_errno(target_mmap(v1, v2, v3,
7350 target_to_host_bitmask(v4, mmap_flags_tbl),
7351 v5, v6));
7353 #else
7354 ret = get_errno(target_mmap(arg1, arg2, arg3,
7355 target_to_host_bitmask(arg4, mmap_flags_tbl),
7356 arg5,
7357 arg6));
7358 #endif
7359 break;
7360 #endif
7361 #ifdef TARGET_NR_mmap2
7362 case TARGET_NR_mmap2:
7363 #ifndef MMAP_SHIFT
7364 #define MMAP_SHIFT 12
7365 #endif
7366 ret = get_errno(target_mmap(arg1, arg2, arg3,
7367 target_to_host_bitmask(arg4, mmap_flags_tbl),
7368 arg5,
7369 arg6 << MMAP_SHIFT));
7370 break;
7371 #endif
7372 case TARGET_NR_munmap:
7373 ret = get_errno(target_munmap(arg1, arg2));
7374 break;
7375 case TARGET_NR_mprotect:
7377 TaskState *ts = cpu->opaque;
7378 /* Special hack to detect libc making the stack executable. */
7379 if ((arg3 & PROT_GROWSDOWN)
7380 && arg1 >= ts->info->stack_limit
7381 && arg1 <= ts->info->start_stack) {
7382 arg3 &= ~PROT_GROWSDOWN;
7383 arg2 = arg2 + arg1 - ts->info->stack_limit;
7384 arg1 = ts->info->stack_limit;
7387 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7388 break;
7389 #ifdef TARGET_NR_mremap
7390 case TARGET_NR_mremap:
7391 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7392 break;
7393 #endif
7394 /* ??? msync/mlock/munlock are broken for softmmu. */
7395 #ifdef TARGET_NR_msync
7396 case TARGET_NR_msync:
7397 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7398 break;
7399 #endif
7400 #ifdef TARGET_NR_mlock
7401 case TARGET_NR_mlock:
7402 ret = get_errno(mlock(g2h(arg1), arg2));
7403 break;
7404 #endif
7405 #ifdef TARGET_NR_munlock
7406 case TARGET_NR_munlock:
7407 ret = get_errno(munlock(g2h(arg1), arg2));
7408 break;
7409 #endif
7410 #ifdef TARGET_NR_mlockall
7411 case TARGET_NR_mlockall:
7412 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7413 break;
7414 #endif
7415 #ifdef TARGET_NR_munlockall
7416 case TARGET_NR_munlockall:
7417 ret = get_errno(munlockall());
7418 break;
7419 #endif
7420 case TARGET_NR_truncate:
7421 if (!(p = lock_user_string(arg1)))
7422 goto efault;
7423 ret = get_errno(truncate(p, arg2));
7424 unlock_user(p, arg1, 0);
7425 break;
7426 case TARGET_NR_ftruncate:
7427 ret = get_errno(ftruncate(arg1, arg2));
7428 break;
7429 case TARGET_NR_fchmod:
7430 ret = get_errno(fchmod(arg1, arg2));
7431 break;
7432 #if defined(TARGET_NR_fchmodat)
7433 case TARGET_NR_fchmodat:
7434 if (!(p = lock_user_string(arg2)))
7435 goto efault;
7436 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7437 unlock_user(p, arg2, 0);
7438 break;
7439 #endif
7440 case TARGET_NR_getpriority:
7441 /* Note that negative values are valid for getpriority, so we must
7442 differentiate based on errno settings. */
7443 errno = 0;
7444 ret = getpriority(arg1, arg2);
7445 if (ret == -1 && errno != 0) {
7446 ret = -host_to_target_errno(errno);
7447 break;
7449 #ifdef TARGET_ALPHA
7450 /* Return value is the unbiased priority. Signal no error. */
7451 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7452 #else
7453 /* Return value is a biased priority to avoid negative numbers. */
7454 ret = 20 - ret;
7455 #endif
7456 break;
7457 case TARGET_NR_setpriority:
7458 ret = get_errno(setpriority(arg1, arg2, arg3));
7459 break;
7460 #ifdef TARGET_NR_profil
7461 case TARGET_NR_profil:
7462 goto unimplemented;
7463 #endif
7464 case TARGET_NR_statfs:
7465 if (!(p = lock_user_string(arg1)))
7466 goto efault;
7467 ret = get_errno(statfs(path(p), &stfs));
7468 unlock_user(p, arg1, 0);
7469 convert_statfs:
7470 if (!is_error(ret)) {
7471 struct target_statfs *target_stfs;
7473 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7474 goto efault;
7475 __put_user(stfs.f_type, &target_stfs->f_type);
7476 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7477 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7478 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7479 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7480 __put_user(stfs.f_files, &target_stfs->f_files);
7481 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7482 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7483 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7484 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7485 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7486 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7487 unlock_user_struct(target_stfs, arg2, 1);
7489 break;
7490 case TARGET_NR_fstatfs:
7491 ret = get_errno(fstatfs(arg1, &stfs));
7492 goto convert_statfs;
7493 #ifdef TARGET_NR_statfs64
7494 case TARGET_NR_statfs64:
7495 if (!(p = lock_user_string(arg1)))
7496 goto efault;
7497 ret = get_errno(statfs(path(p), &stfs));
7498 unlock_user(p, arg1, 0);
7499 convert_statfs64:
7500 if (!is_error(ret)) {
7501 struct target_statfs64 *target_stfs;
7503 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7504 goto efault;
7505 __put_user(stfs.f_type, &target_stfs->f_type);
7506 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7507 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7508 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7509 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7510 __put_user(stfs.f_files, &target_stfs->f_files);
7511 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7512 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7513 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7514 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7515 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7516 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7517 unlock_user_struct(target_stfs, arg3, 1);
7519 break;
7520 case TARGET_NR_fstatfs64:
7521 ret = get_errno(fstatfs(arg1, &stfs));
7522 goto convert_statfs64;
7523 #endif
7524 #ifdef TARGET_NR_ioperm
7525 case TARGET_NR_ioperm:
7526 goto unimplemented;
7527 #endif
7528 #ifdef TARGET_NR_socketcall
7529 case TARGET_NR_socketcall:
7530 ret = do_socketcall(arg1, arg2);
7531 break;
7532 #endif
7533 #ifdef TARGET_NR_accept
7534 case TARGET_NR_accept:
7535 ret = do_accept4(arg1, arg2, arg3, 0);
7536 break;
7537 #endif
7538 #ifdef TARGET_NR_accept4
7539 case TARGET_NR_accept4:
7540 #ifdef CONFIG_ACCEPT4
7541 ret = do_accept4(arg1, arg2, arg3, arg4);
7542 #else
7543 goto unimplemented;
7544 #endif
7545 break;
7546 #endif
7547 #ifdef TARGET_NR_bind
7548 case TARGET_NR_bind:
7549 ret = do_bind(arg1, arg2, arg3);
7550 break;
7551 #endif
7552 #ifdef TARGET_NR_connect
7553 case TARGET_NR_connect:
7554 ret = do_connect(arg1, arg2, arg3);
7555 break;
7556 #endif
7557 #ifdef TARGET_NR_getpeername
7558 case TARGET_NR_getpeername:
7559 ret = do_getpeername(arg1, arg2, arg3);
7560 break;
7561 #endif
7562 #ifdef TARGET_NR_getsockname
7563 case TARGET_NR_getsockname:
7564 ret = do_getsockname(arg1, arg2, arg3);
7565 break;
7566 #endif
7567 #ifdef TARGET_NR_getsockopt
7568 case TARGET_NR_getsockopt:
7569 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7570 break;
7571 #endif
7572 #ifdef TARGET_NR_listen
7573 case TARGET_NR_listen:
7574 ret = get_errno(listen(arg1, arg2));
7575 break;
7576 #endif
7577 #ifdef TARGET_NR_recv
7578 case TARGET_NR_recv:
7579 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7580 break;
7581 #endif
7582 #ifdef TARGET_NR_recvfrom
7583 case TARGET_NR_recvfrom:
7584 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7585 break;
7586 #endif
7587 #ifdef TARGET_NR_recvmsg
7588 case TARGET_NR_recvmsg:
7589 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7590 break;
7591 #endif
7592 #ifdef TARGET_NR_send
7593 case TARGET_NR_send:
7594 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7595 break;
7596 #endif
7597 #ifdef TARGET_NR_sendmsg
7598 case TARGET_NR_sendmsg:
7599 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7600 break;
7601 #endif
7602 #ifdef TARGET_NR_sendmmsg
7603 case TARGET_NR_sendmmsg:
7604 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7605 break;
7606 case TARGET_NR_recvmmsg:
7607 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7608 break;
7609 #endif
7610 #ifdef TARGET_NR_sendto
7611 case TARGET_NR_sendto:
7612 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7613 break;
7614 #endif
7615 #ifdef TARGET_NR_shutdown
7616 case TARGET_NR_shutdown:
7617 ret = get_errno(shutdown(arg1, arg2));
7618 break;
7619 #endif
7620 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
7621 case TARGET_NR_getrandom:
7622 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
7623 if (!p) {
7624 goto efault;
7626 ret = get_errno(getrandom(p, arg2, arg3));
7627 unlock_user(p, arg1, ret);
7628 break;
7629 #endif
7630 #ifdef TARGET_NR_socket
7631 case TARGET_NR_socket:
7632 ret = do_socket(arg1, arg2, arg3);
7633 fd_trans_unregister(ret);
7634 break;
7635 #endif
7636 #ifdef TARGET_NR_socketpair
7637 case TARGET_NR_socketpair:
7638 ret = do_socketpair(arg1, arg2, arg3, arg4);
7639 break;
7640 #endif
7641 #ifdef TARGET_NR_setsockopt
7642 case TARGET_NR_setsockopt:
7643 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7644 break;
7645 #endif
7647 case TARGET_NR_syslog:
7648 if (!(p = lock_user_string(arg2)))
7649 goto efault;
7650 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7651 unlock_user(p, arg2, 0);
7652 break;
7654 case TARGET_NR_setitimer:
7656 struct itimerval value, ovalue, *pvalue;
7658 if (arg2) {
7659 pvalue = &value;
7660 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7661 || copy_from_user_timeval(&pvalue->it_value,
7662 arg2 + sizeof(struct target_timeval)))
7663 goto efault;
7664 } else {
7665 pvalue = NULL;
7667 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7668 if (!is_error(ret) && arg3) {
7669 if (copy_to_user_timeval(arg3,
7670 &ovalue.it_interval)
7671 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7672 &ovalue.it_value))
7673 goto efault;
7676 break;
7677 case TARGET_NR_getitimer:
7679 struct itimerval value;
7681 ret = get_errno(getitimer(arg1, &value));
7682 if (!is_error(ret) && arg2) {
7683 if (copy_to_user_timeval(arg2,
7684 &value.it_interval)
7685 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7686 &value.it_value))
7687 goto efault;
7690 break;
7691 #ifdef TARGET_NR_stat
7692 case TARGET_NR_stat:
7693 if (!(p = lock_user_string(arg1)))
7694 goto efault;
7695 ret = get_errno(stat(path(p), &st));
7696 unlock_user(p, arg1, 0);
7697 goto do_stat;
7698 #endif
7699 #ifdef TARGET_NR_lstat
7700 case TARGET_NR_lstat:
7701 if (!(p = lock_user_string(arg1)))
7702 goto efault;
7703 ret = get_errno(lstat(path(p), &st));
7704 unlock_user(p, arg1, 0);
7705 goto do_stat;
7706 #endif
7707 case TARGET_NR_fstat:
7709 ret = get_errno(fstat(arg1, &st));
7710 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7711 do_stat:
7712 #endif
7713 if (!is_error(ret)) {
7714 struct target_stat *target_st;
7716 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7717 goto efault;
7718 memset(target_st, 0, sizeof(*target_st));
7719 __put_user(st.st_dev, &target_st->st_dev);
7720 __put_user(st.st_ino, &target_st->st_ino);
7721 __put_user(st.st_mode, &target_st->st_mode);
7722 __put_user(st.st_uid, &target_st->st_uid);
7723 __put_user(st.st_gid, &target_st->st_gid);
7724 __put_user(st.st_nlink, &target_st->st_nlink);
7725 __put_user(st.st_rdev, &target_st->st_rdev);
7726 __put_user(st.st_size, &target_st->st_size);
7727 __put_user(st.st_blksize, &target_st->st_blksize);
7728 __put_user(st.st_blocks, &target_st->st_blocks);
7729 __put_user(st.st_atime, &target_st->target_st_atime);
7730 __put_user(st.st_mtime, &target_st->target_st_mtime);
7731 __put_user(st.st_ctime, &target_st->target_st_ctime);
7732 unlock_user_struct(target_st, arg2, 1);
7735 break;
7736 #ifdef TARGET_NR_olduname
7737 case TARGET_NR_olduname:
7738 goto unimplemented;
7739 #endif
7740 #ifdef TARGET_NR_iopl
7741 case TARGET_NR_iopl:
7742 goto unimplemented;
7743 #endif
7744 case TARGET_NR_vhangup:
7745 ret = get_errno(vhangup());
7746 break;
7747 #ifdef TARGET_NR_idle
7748 case TARGET_NR_idle:
7749 goto unimplemented;
7750 #endif
7751 #ifdef TARGET_NR_syscall
7752 case TARGET_NR_syscall:
7753 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7754 arg6, arg7, arg8, 0);
7755 break;
7756 #endif
7757 case TARGET_NR_wait4:
7759 int status;
7760 abi_long status_ptr = arg2;
7761 struct rusage rusage, *rusage_ptr;
7762 abi_ulong target_rusage = arg4;
7763 abi_long rusage_err;
7764 if (target_rusage)
7765 rusage_ptr = &rusage;
7766 else
7767 rusage_ptr = NULL;
7768 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
7769 if (!is_error(ret)) {
7770 if (status_ptr && ret) {
7771 status = host_to_target_waitstatus(status);
7772 if (put_user_s32(status, status_ptr))
7773 goto efault;
7775 if (target_rusage) {
7776 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7777 if (rusage_err) {
7778 ret = rusage_err;
7783 break;
7784 #ifdef TARGET_NR_swapoff
7785 case TARGET_NR_swapoff:
7786 if (!(p = lock_user_string(arg1)))
7787 goto efault;
7788 ret = get_errno(swapoff(p));
7789 unlock_user(p, arg1, 0);
7790 break;
7791 #endif
7792 case TARGET_NR_sysinfo:
7794 struct target_sysinfo *target_value;
7795 struct sysinfo value;
7796 ret = get_errno(sysinfo(&value));
7797 if (!is_error(ret) && arg1)
7799 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7800 goto efault;
7801 __put_user(value.uptime, &target_value->uptime);
7802 __put_user(value.loads[0], &target_value->loads[0]);
7803 __put_user(value.loads[1], &target_value->loads[1]);
7804 __put_user(value.loads[2], &target_value->loads[2]);
7805 __put_user(value.totalram, &target_value->totalram);
7806 __put_user(value.freeram, &target_value->freeram);
7807 __put_user(value.sharedram, &target_value->sharedram);
7808 __put_user(value.bufferram, &target_value->bufferram);
7809 __put_user(value.totalswap, &target_value->totalswap);
7810 __put_user(value.freeswap, &target_value->freeswap);
7811 __put_user(value.procs, &target_value->procs);
7812 __put_user(value.totalhigh, &target_value->totalhigh);
7813 __put_user(value.freehigh, &target_value->freehigh);
7814 __put_user(value.mem_unit, &target_value->mem_unit);
7815 unlock_user_struct(target_value, arg1, 1);
7818 break;
7819 #ifdef TARGET_NR_ipc
7820 case TARGET_NR_ipc:
7821 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7822 break;
7823 #endif
7824 #ifdef TARGET_NR_semget
7825 case TARGET_NR_semget:
7826 ret = get_errno(semget(arg1, arg2, arg3));
7827 break;
7828 #endif
7829 #ifdef TARGET_NR_semop
7830 case TARGET_NR_semop:
7831 ret = do_semop(arg1, arg2, arg3);
7832 break;
7833 #endif
7834 #ifdef TARGET_NR_semctl
7835 case TARGET_NR_semctl:
7836 ret = do_semctl(arg1, arg2, arg3, arg4);
7837 break;
7838 #endif
7839 #ifdef TARGET_NR_msgctl
7840 case TARGET_NR_msgctl:
7841 ret = do_msgctl(arg1, arg2, arg3);
7842 break;
7843 #endif
7844 #ifdef TARGET_NR_msgget
7845 case TARGET_NR_msgget:
7846 ret = get_errno(msgget(arg1, arg2));
7847 break;
7848 #endif
7849 #ifdef TARGET_NR_msgrcv
7850 case TARGET_NR_msgrcv:
7851 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7852 break;
7853 #endif
7854 #ifdef TARGET_NR_msgsnd
7855 case TARGET_NR_msgsnd:
7856 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7857 break;
7858 #endif
7859 #ifdef TARGET_NR_shmget
7860 case TARGET_NR_shmget:
7861 ret = get_errno(shmget(arg1, arg2, arg3));
7862 break;
7863 #endif
7864 #ifdef TARGET_NR_shmctl
7865 case TARGET_NR_shmctl:
7866 ret = do_shmctl(arg1, arg2, arg3);
7867 break;
7868 #endif
7869 #ifdef TARGET_NR_shmat
7870 case TARGET_NR_shmat:
7871 ret = do_shmat(arg1, arg2, arg3);
7872 break;
7873 #endif
7874 #ifdef TARGET_NR_shmdt
7875 case TARGET_NR_shmdt:
7876 ret = do_shmdt(arg1);
7877 break;
7878 #endif
7879 case TARGET_NR_fsync:
7880 ret = get_errno(fsync(arg1));
7881 break;
7882 case TARGET_NR_clone:
7883 /* Linux manages to have three different orderings for its
7884 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7885 * match the kernel's CONFIG_CLONE_* settings.
7886 * Microblaze is further special in that it uses a sixth
7887 * implicit argument to clone for the TLS pointer.
7889 #if defined(TARGET_MICROBLAZE)
7890 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7891 #elif defined(TARGET_CLONE_BACKWARDS)
7892 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7893 #elif defined(TARGET_CLONE_BACKWARDS2)
7894 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7895 #else
7896 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7897 #endif
7898 break;
7899 #ifdef __NR_exit_group
7900 /* new thread calls */
7901 case TARGET_NR_exit_group:
7902 #ifdef TARGET_GPROF
7903 _mcleanup();
7904 #endif
7905 gdb_exit(cpu_env, arg1);
7906 ret = get_errno(exit_group(arg1));
7907 break;
7908 #endif
7909 case TARGET_NR_setdomainname:
7910 if (!(p = lock_user_string(arg1)))
7911 goto efault;
7912 ret = get_errno(setdomainname(p, arg2));
7913 unlock_user(p, arg1, 0);
7914 break;
7915 case TARGET_NR_uname:
7916 /* no need to transcode because we use the linux syscall */
7918 struct new_utsname * buf;
7920 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7921 goto efault;
7922 ret = get_errno(sys_uname(buf));
7923 if (!is_error(ret)) {
7924 /* Overrite the native machine name with whatever is being
7925 emulated. */
7926 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7927 /* Allow the user to override the reported release. */
7928 if (qemu_uname_release && *qemu_uname_release)
7929 strcpy (buf->release, qemu_uname_release);
7931 unlock_user_struct(buf, arg1, 1);
7933 break;
7934 #ifdef TARGET_I386
7935 case TARGET_NR_modify_ldt:
7936 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7937 break;
7938 #if !defined(TARGET_X86_64)
7939 case TARGET_NR_vm86old:
7940 goto unimplemented;
7941 case TARGET_NR_vm86:
7942 ret = do_vm86(cpu_env, arg1, arg2);
7943 break;
7944 #endif
7945 #endif
7946 case TARGET_NR_adjtimex:
7947 goto unimplemented;
7948 #ifdef TARGET_NR_create_module
7949 case TARGET_NR_create_module:
7950 #endif
7951 case TARGET_NR_init_module:
7952 case TARGET_NR_delete_module:
7953 #ifdef TARGET_NR_get_kernel_syms
7954 case TARGET_NR_get_kernel_syms:
7955 #endif
7956 goto unimplemented;
7957 case TARGET_NR_quotactl:
7958 goto unimplemented;
7959 case TARGET_NR_getpgid:
7960 ret = get_errno(getpgid(arg1));
7961 break;
7962 case TARGET_NR_fchdir:
7963 ret = get_errno(fchdir(arg1));
7964 break;
7965 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7966 case TARGET_NR_bdflush:
7967 goto unimplemented;
7968 #endif
7969 #ifdef TARGET_NR_sysfs
7970 case TARGET_NR_sysfs:
7971 goto unimplemented;
7972 #endif
7973 case TARGET_NR_personality:
7974 ret = get_errno(personality(arg1));
7975 break;
7976 #ifdef TARGET_NR_afs_syscall
7977 case TARGET_NR_afs_syscall:
7978 goto unimplemented;
7979 #endif
7980 #ifdef TARGET_NR__llseek /* Not on alpha */
7981 case TARGET_NR__llseek:
7983 int64_t res;
7984 #if !defined(__NR_llseek)
7985 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7986 if (res == -1) {
7987 ret = get_errno(res);
7988 } else {
7989 ret = 0;
7991 #else
7992 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7993 #endif
7994 if ((ret == 0) && put_user_s64(res, arg4)) {
7995 goto efault;
7998 break;
7999 #endif
8000 #ifdef TARGET_NR_getdents
8001 case TARGET_NR_getdents:
8002 #ifdef __NR_getdents
8003 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8005 struct target_dirent *target_dirp;
8006 struct linux_dirent *dirp;
8007 abi_long count = arg3;
8009 dirp = g_try_malloc(count);
8010 if (!dirp) {
8011 ret = -TARGET_ENOMEM;
8012 goto fail;
8015 ret = get_errno(sys_getdents(arg1, dirp, count));
8016 if (!is_error(ret)) {
8017 struct linux_dirent *de;
8018 struct target_dirent *tde;
8019 int len = ret;
8020 int reclen, treclen;
8021 int count1, tnamelen;
8023 count1 = 0;
8024 de = dirp;
8025 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8026 goto efault;
8027 tde = target_dirp;
8028 while (len > 0) {
8029 reclen = de->d_reclen;
8030 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
8031 assert(tnamelen >= 0);
8032 treclen = tnamelen + offsetof(struct target_dirent, d_name);
8033 assert(count1 + treclen <= count);
8034 tde->d_reclen = tswap16(treclen);
8035 tde->d_ino = tswapal(de->d_ino);
8036 tde->d_off = tswapal(de->d_off);
8037 memcpy(tde->d_name, de->d_name, tnamelen);
8038 de = (struct linux_dirent *)((char *)de + reclen);
8039 len -= reclen;
8040 tde = (struct target_dirent *)((char *)tde + treclen);
8041 count1 += treclen;
8043 ret = count1;
8044 unlock_user(target_dirp, arg2, ret);
8046 g_free(dirp);
8048 #else
8050 struct linux_dirent *dirp;
8051 abi_long count = arg3;
8053 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8054 goto efault;
8055 ret = get_errno(sys_getdents(arg1, dirp, count));
8056 if (!is_error(ret)) {
8057 struct linux_dirent *de;
8058 int len = ret;
8059 int reclen;
8060 de = dirp;
8061 while (len > 0) {
8062 reclen = de->d_reclen;
8063 if (reclen > len)
8064 break;
8065 de->d_reclen = tswap16(reclen);
8066 tswapls(&de->d_ino);
8067 tswapls(&de->d_off);
8068 de = (struct linux_dirent *)((char *)de + reclen);
8069 len -= reclen;
8072 unlock_user(dirp, arg2, ret);
8074 #endif
8075 #else
8076 /* Implement getdents in terms of getdents64 */
8078 struct linux_dirent64 *dirp;
8079 abi_long count = arg3;
8081 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8082 if (!dirp) {
8083 goto efault;
8085 ret = get_errno(sys_getdents64(arg1, dirp, count));
8086 if (!is_error(ret)) {
8087 /* Convert the dirent64 structs to target dirent. We do this
8088 * in-place, since we can guarantee that a target_dirent is no
8089 * larger than a dirent64; however this means we have to be
8090 * careful to read everything before writing in the new format.
8092 struct linux_dirent64 *de;
8093 struct target_dirent *tde;
8094 int len = ret;
8095 int tlen = 0;
8097 de = dirp;
8098 tde = (struct target_dirent *)dirp;
8099 while (len > 0) {
8100 int namelen, treclen;
8101 int reclen = de->d_reclen;
8102 uint64_t ino = de->d_ino;
8103 int64_t off = de->d_off;
8104 uint8_t type = de->d_type;
8106 namelen = strlen(de->d_name);
8107 treclen = offsetof(struct target_dirent, d_name)
8108 + namelen + 2;
8109 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8111 memmove(tde->d_name, de->d_name, namelen + 1);
8112 tde->d_ino = tswapal(ino);
8113 tde->d_off = tswapal(off);
8114 tde->d_reclen = tswap16(treclen);
8115 /* The target_dirent type is in what was formerly a padding
8116 * byte at the end of the structure:
8118 *(((char *)tde) + treclen - 1) = type;
8120 de = (struct linux_dirent64 *)((char *)de + reclen);
8121 tde = (struct target_dirent *)((char *)tde + treclen);
8122 len -= reclen;
8123 tlen += treclen;
8125 ret = tlen;
8127 unlock_user(dirp, arg2, ret);
8129 #endif
8130 break;
8131 #endif /* TARGET_NR_getdents */
8132 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8133 case TARGET_NR_getdents64:
8135 struct linux_dirent64 *dirp;
8136 abi_long count = arg3;
8137 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8138 goto efault;
8139 ret = get_errno(sys_getdents64(arg1, dirp, count));
8140 if (!is_error(ret)) {
8141 struct linux_dirent64 *de;
8142 int len = ret;
8143 int reclen;
8144 de = dirp;
8145 while (len > 0) {
8146 reclen = de->d_reclen;
8147 if (reclen > len)
8148 break;
8149 de->d_reclen = tswap16(reclen);
8150 tswap64s((uint64_t *)&de->d_ino);
8151 tswap64s((uint64_t *)&de->d_off);
8152 de = (struct linux_dirent64 *)((char *)de + reclen);
8153 len -= reclen;
8156 unlock_user(dirp, arg2, ret);
8158 break;
8159 #endif /* TARGET_NR_getdents64 */
8160 #if defined(TARGET_NR__newselect)
8161 case TARGET_NR__newselect:
8162 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8163 break;
8164 #endif
8165 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8166 # ifdef TARGET_NR_poll
8167 case TARGET_NR_poll:
8168 # endif
8169 # ifdef TARGET_NR_ppoll
8170 case TARGET_NR_ppoll:
8171 # endif
8173 struct target_pollfd *target_pfd;
8174 unsigned int nfds = arg2;
8175 int timeout = arg3;
8176 struct pollfd *pfd;
8177 unsigned int i;
8179 pfd = NULL;
8180 target_pfd = NULL;
8181 if (nfds) {
8182 target_pfd = lock_user(VERIFY_WRITE, arg1,
8183 sizeof(struct target_pollfd) * nfds, 1);
8184 if (!target_pfd) {
8185 goto efault;
8188 pfd = alloca(sizeof(struct pollfd) * nfds);
8189 for (i = 0; i < nfds; i++) {
8190 pfd[i].fd = tswap32(target_pfd[i].fd);
8191 pfd[i].events = tswap16(target_pfd[i].events);
8195 # ifdef TARGET_NR_ppoll
8196 if (num == TARGET_NR_ppoll) {
8197 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8198 target_sigset_t *target_set;
8199 sigset_t _set, *set = &_set;
8201 if (arg3) {
8202 if (target_to_host_timespec(timeout_ts, arg3)) {
8203 unlock_user(target_pfd, arg1, 0);
8204 goto efault;
8206 } else {
8207 timeout_ts = NULL;
8210 if (arg4) {
8211 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8212 if (!target_set) {
8213 unlock_user(target_pfd, arg1, 0);
8214 goto efault;
8216 target_to_host_sigset(set, target_set);
8217 } else {
8218 set = NULL;
8221 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
8223 if (!is_error(ret) && arg3) {
8224 host_to_target_timespec(arg3, timeout_ts);
8226 if (arg4) {
8227 unlock_user(target_set, arg4, 0);
8229 } else
8230 # endif
8231 ret = get_errno(poll(pfd, nfds, timeout));
8233 if (!is_error(ret)) {
8234 for(i = 0; i < nfds; i++) {
8235 target_pfd[i].revents = tswap16(pfd[i].revents);
8238 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8240 break;
8241 #endif
8242 case TARGET_NR_flock:
8243 /* NOTE: the flock constant seems to be the same for every
8244 Linux platform */
8245 ret = get_errno(flock(arg1, arg2));
8246 break;
8247 case TARGET_NR_readv:
8249 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8250 if (vec != NULL) {
8251 ret = get_errno(readv(arg1, vec, arg3));
8252 unlock_iovec(vec, arg2, arg3, 1);
8253 } else {
8254 ret = -host_to_target_errno(errno);
8257 break;
8258 case TARGET_NR_writev:
8260 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8261 if (vec != NULL) {
8262 ret = get_errno(writev(arg1, vec, arg3));
8263 unlock_iovec(vec, arg2, arg3, 0);
8264 } else {
8265 ret = -host_to_target_errno(errno);
8268 break;
8269 case TARGET_NR_getsid:
8270 ret = get_errno(getsid(arg1));
8271 break;
8272 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8273 case TARGET_NR_fdatasync:
8274 ret = get_errno(fdatasync(arg1));
8275 break;
8276 #endif
8277 #ifdef TARGET_NR__sysctl
8278 case TARGET_NR__sysctl:
8279 /* We don't implement this, but ENOTDIR is always a safe
8280 return value. */
8281 ret = -TARGET_ENOTDIR;
8282 break;
8283 #endif
8284 case TARGET_NR_sched_getaffinity:
8286 unsigned int mask_size;
8287 unsigned long *mask;
8290 * sched_getaffinity needs multiples of ulong, so need to take
8291 * care of mismatches between target ulong and host ulong sizes.
8293 if (arg2 & (sizeof(abi_ulong) - 1)) {
8294 ret = -TARGET_EINVAL;
8295 break;
8297 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8299 mask = alloca(mask_size);
8300 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8302 if (!is_error(ret)) {
8303 if (ret > arg2) {
8304 /* More data returned than the caller's buffer will fit.
8305 * This only happens if sizeof(abi_long) < sizeof(long)
8306 * and the caller passed us a buffer holding an odd number
8307 * of abi_longs. If the host kernel is actually using the
8308 * extra 4 bytes then fail EINVAL; otherwise we can just
8309 * ignore them and only copy the interesting part.
8311 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8312 if (numcpus > arg2 * 8) {
8313 ret = -TARGET_EINVAL;
8314 break;
8316 ret = arg2;
8319 if (copy_to_user(arg3, mask, ret)) {
8320 goto efault;
8324 break;
8325 case TARGET_NR_sched_setaffinity:
8327 unsigned int mask_size;
8328 unsigned long *mask;
8331 * sched_setaffinity needs multiples of ulong, so need to take
8332 * care of mismatches between target ulong and host ulong sizes.
8334 if (arg2 & (sizeof(abi_ulong) - 1)) {
8335 ret = -TARGET_EINVAL;
8336 break;
8338 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8340 mask = alloca(mask_size);
8341 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8342 goto efault;
8344 memcpy(mask, p, arg2);
8345 unlock_user_struct(p, arg2, 0);
8347 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8349 break;
8350 case TARGET_NR_sched_setparam:
8352 struct sched_param *target_schp;
8353 struct sched_param schp;
8355 if (arg2 == 0) {
8356 return -TARGET_EINVAL;
8358 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8359 goto efault;
8360 schp.sched_priority = tswap32(target_schp->sched_priority);
8361 unlock_user_struct(target_schp, arg2, 0);
8362 ret = get_errno(sched_setparam(arg1, &schp));
8364 break;
8365 case TARGET_NR_sched_getparam:
8367 struct sched_param *target_schp;
8368 struct sched_param schp;
8370 if (arg2 == 0) {
8371 return -TARGET_EINVAL;
8373 ret = get_errno(sched_getparam(arg1, &schp));
8374 if (!is_error(ret)) {
8375 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8376 goto efault;
8377 target_schp->sched_priority = tswap32(schp.sched_priority);
8378 unlock_user_struct(target_schp, arg2, 1);
8381 break;
8382 case TARGET_NR_sched_setscheduler:
8384 struct sched_param *target_schp;
8385 struct sched_param schp;
8386 if (arg3 == 0) {
8387 return -TARGET_EINVAL;
8389 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8390 goto efault;
8391 schp.sched_priority = tswap32(target_schp->sched_priority);
8392 unlock_user_struct(target_schp, arg3, 0);
8393 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8395 break;
8396 case TARGET_NR_sched_getscheduler:
8397 ret = get_errno(sched_getscheduler(arg1));
8398 break;
8399 case TARGET_NR_sched_yield:
8400 ret = get_errno(sched_yield());
8401 break;
8402 case TARGET_NR_sched_get_priority_max:
8403 ret = get_errno(sched_get_priority_max(arg1));
8404 break;
8405 case TARGET_NR_sched_get_priority_min:
8406 ret = get_errno(sched_get_priority_min(arg1));
8407 break;
8408 case TARGET_NR_sched_rr_get_interval:
8410 struct timespec ts;
8411 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8412 if (!is_error(ret)) {
8413 ret = host_to_target_timespec(arg2, &ts);
8416 break;
8417 case TARGET_NR_nanosleep:
8419 struct timespec req, rem;
8420 target_to_host_timespec(&req, arg1);
8421 ret = get_errno(nanosleep(&req, &rem));
8422 if (is_error(ret) && arg2) {
8423 host_to_target_timespec(arg2, &rem);
8426 break;
8427 #ifdef TARGET_NR_query_module
8428 case TARGET_NR_query_module:
8429 goto unimplemented;
8430 #endif
8431 #ifdef TARGET_NR_nfsservctl
8432 case TARGET_NR_nfsservctl:
8433 goto unimplemented;
8434 #endif
8435 case TARGET_NR_prctl:
8436 switch (arg1) {
8437 case PR_GET_PDEATHSIG:
8439 int deathsig;
8440 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8441 if (!is_error(ret) && arg2
8442 && put_user_ual(deathsig, arg2)) {
8443 goto efault;
8445 break;
8447 #ifdef PR_GET_NAME
8448 case PR_GET_NAME:
8450 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8451 if (!name) {
8452 goto efault;
8454 ret = get_errno(prctl(arg1, (unsigned long)name,
8455 arg3, arg4, arg5));
8456 unlock_user(name, arg2, 16);
8457 break;
8459 case PR_SET_NAME:
8461 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8462 if (!name) {
8463 goto efault;
8465 ret = get_errno(prctl(arg1, (unsigned long)name,
8466 arg3, arg4, arg5));
8467 unlock_user(name, arg2, 0);
8468 break;
8470 #endif
8471 default:
8472 /* Most prctl options have no pointer arguments */
8473 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8474 break;
8476 break;
8477 #ifdef TARGET_NR_arch_prctl
8478 case TARGET_NR_arch_prctl:
8479 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8480 ret = do_arch_prctl(cpu_env, arg1, arg2);
8481 break;
8482 #else
8483 goto unimplemented;
8484 #endif
8485 #endif
8486 #ifdef TARGET_NR_pread64
8487 case TARGET_NR_pread64:
8488 if (regpairs_aligned(cpu_env)) {
8489 arg4 = arg5;
8490 arg5 = arg6;
8492 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8493 goto efault;
8494 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8495 unlock_user(p, arg2, ret);
8496 break;
8497 case TARGET_NR_pwrite64:
8498 if (regpairs_aligned(cpu_env)) {
8499 arg4 = arg5;
8500 arg5 = arg6;
8502 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8503 goto efault;
8504 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8505 unlock_user(p, arg2, 0);
8506 break;
8507 #endif
8508 case TARGET_NR_getcwd:
8509 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8510 goto efault;
8511 ret = get_errno(sys_getcwd1(p, arg2));
8512 unlock_user(p, arg1, ret);
8513 break;
8514 case TARGET_NR_capget:
8515 case TARGET_NR_capset:
8517 struct target_user_cap_header *target_header;
8518 struct target_user_cap_data *target_data = NULL;
8519 struct __user_cap_header_struct header;
8520 struct __user_cap_data_struct data[2];
8521 struct __user_cap_data_struct *dataptr = NULL;
8522 int i, target_datalen;
8523 int data_items = 1;
8525 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8526 goto efault;
8528 header.version = tswap32(target_header->version);
8529 header.pid = tswap32(target_header->pid);
8531 if (header.version != _LINUX_CAPABILITY_VERSION) {
8532 /* Version 2 and up takes pointer to two user_data structs */
8533 data_items = 2;
8536 target_datalen = sizeof(*target_data) * data_items;
8538 if (arg2) {
8539 if (num == TARGET_NR_capget) {
8540 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8541 } else {
8542 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8544 if (!target_data) {
8545 unlock_user_struct(target_header, arg1, 0);
8546 goto efault;
8549 if (num == TARGET_NR_capset) {
8550 for (i = 0; i < data_items; i++) {
8551 data[i].effective = tswap32(target_data[i].effective);
8552 data[i].permitted = tswap32(target_data[i].permitted);
8553 data[i].inheritable = tswap32(target_data[i].inheritable);
8557 dataptr = data;
8560 if (num == TARGET_NR_capget) {
8561 ret = get_errno(capget(&header, dataptr));
8562 } else {
8563 ret = get_errno(capset(&header, dataptr));
8566 /* The kernel always updates version for both capget and capset */
8567 target_header->version = tswap32(header.version);
8568 unlock_user_struct(target_header, arg1, 1);
8570 if (arg2) {
8571 if (num == TARGET_NR_capget) {
8572 for (i = 0; i < data_items; i++) {
8573 target_data[i].effective = tswap32(data[i].effective);
8574 target_data[i].permitted = tswap32(data[i].permitted);
8575 target_data[i].inheritable = tswap32(data[i].inheritable);
8577 unlock_user(target_data, arg2, target_datalen);
8578 } else {
8579 unlock_user(target_data, arg2, 0);
8582 break;
8584 case TARGET_NR_sigaltstack:
8585 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8586 break;
8588 #ifdef CONFIG_SENDFILE
8589 case TARGET_NR_sendfile:
8591 off_t *offp = NULL;
8592 off_t off;
8593 if (arg3) {
8594 ret = get_user_sal(off, arg3);
8595 if (is_error(ret)) {
8596 break;
8598 offp = &off;
8600 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8601 if (!is_error(ret) && arg3) {
8602 abi_long ret2 = put_user_sal(off, arg3);
8603 if (is_error(ret2)) {
8604 ret = ret2;
8607 break;
8609 #ifdef TARGET_NR_sendfile64
8610 case TARGET_NR_sendfile64:
8612 off_t *offp = NULL;
8613 off_t off;
8614 if (arg3) {
8615 ret = get_user_s64(off, arg3);
8616 if (is_error(ret)) {
8617 break;
8619 offp = &off;
8621 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8622 if (!is_error(ret) && arg3) {
8623 abi_long ret2 = put_user_s64(off, arg3);
8624 if (is_error(ret2)) {
8625 ret = ret2;
8628 break;
8630 #endif
8631 #else
8632 case TARGET_NR_sendfile:
8633 #ifdef TARGET_NR_sendfile64
8634 case TARGET_NR_sendfile64:
8635 #endif
8636 goto unimplemented;
8637 #endif
8639 #ifdef TARGET_NR_getpmsg
8640 case TARGET_NR_getpmsg:
8641 goto unimplemented;
8642 #endif
8643 #ifdef TARGET_NR_putpmsg
8644 case TARGET_NR_putpmsg:
8645 goto unimplemented;
8646 #endif
8647 #ifdef TARGET_NR_vfork
8648 case TARGET_NR_vfork:
8649 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8650 0, 0, 0, 0));
8651 break;
8652 #endif
8653 #ifdef TARGET_NR_ugetrlimit
8654 case TARGET_NR_ugetrlimit:
8656 struct rlimit rlim;
8657 int resource = target_to_host_resource(arg1);
8658 ret = get_errno(getrlimit(resource, &rlim));
8659 if (!is_error(ret)) {
8660 struct target_rlimit *target_rlim;
8661 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8662 goto efault;
8663 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8664 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8665 unlock_user_struct(target_rlim, arg2, 1);
8667 break;
8669 #endif
8670 #ifdef TARGET_NR_truncate64
8671 case TARGET_NR_truncate64:
8672 if (!(p = lock_user_string(arg1)))
8673 goto efault;
8674 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8675 unlock_user(p, arg1, 0);
8676 break;
8677 #endif
8678 #ifdef TARGET_NR_ftruncate64
8679 case TARGET_NR_ftruncate64:
8680 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8681 break;
8682 #endif
8683 #ifdef TARGET_NR_stat64
8684 case TARGET_NR_stat64:
8685 if (!(p = lock_user_string(arg1)))
8686 goto efault;
8687 ret = get_errno(stat(path(p), &st));
8688 unlock_user(p, arg1, 0);
8689 if (!is_error(ret))
8690 ret = host_to_target_stat64(cpu_env, arg2, &st);
8691 break;
8692 #endif
8693 #ifdef TARGET_NR_lstat64
8694 case TARGET_NR_lstat64:
8695 if (!(p = lock_user_string(arg1)))
8696 goto efault;
8697 ret = get_errno(lstat(path(p), &st));
8698 unlock_user(p, arg1, 0);
8699 if (!is_error(ret))
8700 ret = host_to_target_stat64(cpu_env, arg2, &st);
8701 break;
8702 #endif
8703 #ifdef TARGET_NR_fstat64
8704 case TARGET_NR_fstat64:
8705 ret = get_errno(fstat(arg1, &st));
8706 if (!is_error(ret))
8707 ret = host_to_target_stat64(cpu_env, arg2, &st);
8708 break;
8709 #endif
8710 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8711 #ifdef TARGET_NR_fstatat64
8712 case TARGET_NR_fstatat64:
8713 #endif
8714 #ifdef TARGET_NR_newfstatat
8715 case TARGET_NR_newfstatat:
8716 #endif
8717 if (!(p = lock_user_string(arg2)))
8718 goto efault;
8719 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8720 if (!is_error(ret))
8721 ret = host_to_target_stat64(cpu_env, arg3, &st);
8722 break;
8723 #endif
8724 #ifdef TARGET_NR_lchown
8725 case TARGET_NR_lchown:
8726 if (!(p = lock_user_string(arg1)))
8727 goto efault;
8728 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8729 unlock_user(p, arg1, 0);
8730 break;
8731 #endif
8732 #ifdef TARGET_NR_getuid
8733 case TARGET_NR_getuid:
8734 ret = get_errno(high2lowuid(getuid()));
8735 break;
8736 #endif
8737 #ifdef TARGET_NR_getgid
8738 case TARGET_NR_getgid:
8739 ret = get_errno(high2lowgid(getgid()));
8740 break;
8741 #endif
8742 #ifdef TARGET_NR_geteuid
8743 case TARGET_NR_geteuid:
8744 ret = get_errno(high2lowuid(geteuid()));
8745 break;
8746 #endif
8747 #ifdef TARGET_NR_getegid
8748 case TARGET_NR_getegid:
8749 ret = get_errno(high2lowgid(getegid()));
8750 break;
8751 #endif
8752 case TARGET_NR_setreuid:
8753 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8754 break;
8755 case TARGET_NR_setregid:
8756 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8757 break;
8758 case TARGET_NR_getgroups:
8760 int gidsetsize = arg1;
8761 target_id *target_grouplist;
8762 gid_t *grouplist;
8763 int i;
8765 grouplist = alloca(gidsetsize * sizeof(gid_t));
8766 ret = get_errno(getgroups(gidsetsize, grouplist));
8767 if (gidsetsize == 0)
8768 break;
8769 if (!is_error(ret)) {
8770 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8771 if (!target_grouplist)
8772 goto efault;
8773 for(i = 0;i < ret; i++)
8774 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8775 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8778 break;
8779 case TARGET_NR_setgroups:
8781 int gidsetsize = arg1;
8782 target_id *target_grouplist;
8783 gid_t *grouplist = NULL;
8784 int i;
8785 if (gidsetsize) {
8786 grouplist = alloca(gidsetsize * sizeof(gid_t));
8787 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8788 if (!target_grouplist) {
8789 ret = -TARGET_EFAULT;
8790 goto fail;
8792 for (i = 0; i < gidsetsize; i++) {
8793 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8795 unlock_user(target_grouplist, arg2, 0);
8797 ret = get_errno(setgroups(gidsetsize, grouplist));
8799 break;
8800 case TARGET_NR_fchown:
8801 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8802 break;
8803 #if defined(TARGET_NR_fchownat)
8804 case TARGET_NR_fchownat:
8805 if (!(p = lock_user_string(arg2)))
8806 goto efault;
8807 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8808 low2highgid(arg4), arg5));
8809 unlock_user(p, arg2, 0);
8810 break;
8811 #endif
8812 #ifdef TARGET_NR_setresuid
8813 case TARGET_NR_setresuid:
8814 ret = get_errno(setresuid(low2highuid(arg1),
8815 low2highuid(arg2),
8816 low2highuid(arg3)));
8817 break;
8818 #endif
8819 #ifdef TARGET_NR_getresuid
8820 case TARGET_NR_getresuid:
8822 uid_t ruid, euid, suid;
8823 ret = get_errno(getresuid(&ruid, &euid, &suid));
8824 if (!is_error(ret)) {
8825 if (put_user_id(high2lowuid(ruid), arg1)
8826 || put_user_id(high2lowuid(euid), arg2)
8827 || put_user_id(high2lowuid(suid), arg3))
8828 goto efault;
8831 break;
8832 #endif
8833 #ifdef TARGET_NR_getresgid
8834 case TARGET_NR_setresgid:
8835 ret = get_errno(setresgid(low2highgid(arg1),
8836 low2highgid(arg2),
8837 low2highgid(arg3)));
8838 break;
8839 #endif
8840 #ifdef TARGET_NR_getresgid
8841 case TARGET_NR_getresgid:
8843 gid_t rgid, egid, sgid;
8844 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8845 if (!is_error(ret)) {
8846 if (put_user_id(high2lowgid(rgid), arg1)
8847 || put_user_id(high2lowgid(egid), arg2)
8848 || put_user_id(high2lowgid(sgid), arg3))
8849 goto efault;
8852 break;
8853 #endif
8854 #ifdef TARGET_NR_chown
8855 case TARGET_NR_chown:
8856 if (!(p = lock_user_string(arg1)))
8857 goto efault;
8858 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8859 unlock_user(p, arg1, 0);
8860 break;
8861 #endif
8862 case TARGET_NR_setuid:
8863 ret = get_errno(setuid(low2highuid(arg1)));
8864 break;
8865 case TARGET_NR_setgid:
8866 ret = get_errno(setgid(low2highgid(arg1)));
8867 break;
8868 case TARGET_NR_setfsuid:
8869 ret = get_errno(setfsuid(arg1));
8870 break;
8871 case TARGET_NR_setfsgid:
8872 ret = get_errno(setfsgid(arg1));
8873 break;
8875 #ifdef TARGET_NR_lchown32
8876 case TARGET_NR_lchown32:
8877 if (!(p = lock_user_string(arg1)))
8878 goto efault;
8879 ret = get_errno(lchown(p, arg2, arg3));
8880 unlock_user(p, arg1, 0);
8881 break;
8882 #endif
8883 #ifdef TARGET_NR_getuid32
8884 case TARGET_NR_getuid32:
8885 ret = get_errno(getuid());
8886 break;
8887 #endif
8889 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8890 /* Alpha specific */
8891 case TARGET_NR_getxuid:
8893 uid_t euid;
8894 euid=geteuid();
8895 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8897 ret = get_errno(getuid());
8898 break;
8899 #endif
8900 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8901 /* Alpha specific */
8902 case TARGET_NR_getxgid:
8904 uid_t egid;
8905 egid=getegid();
8906 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8908 ret = get_errno(getgid());
8909 break;
8910 #endif
8911 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8912 /* Alpha specific */
8913 case TARGET_NR_osf_getsysinfo:
8914 ret = -TARGET_EOPNOTSUPP;
8915 switch (arg1) {
8916 case TARGET_GSI_IEEE_FP_CONTROL:
8918 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8920 /* Copied from linux ieee_fpcr_to_swcr. */
8921 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8922 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8923 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8924 | SWCR_TRAP_ENABLE_DZE
8925 | SWCR_TRAP_ENABLE_OVF);
8926 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8927 | SWCR_TRAP_ENABLE_INE);
8928 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8929 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8931 if (put_user_u64 (swcr, arg2))
8932 goto efault;
8933 ret = 0;
8935 break;
8937 /* case GSI_IEEE_STATE_AT_SIGNAL:
8938 -- Not implemented in linux kernel.
8939 case GSI_UACPROC:
8940 -- Retrieves current unaligned access state; not much used.
8941 case GSI_PROC_TYPE:
8942 -- Retrieves implver information; surely not used.
8943 case GSI_GET_HWRPB:
8944 -- Grabs a copy of the HWRPB; surely not used.
8947 break;
8948 #endif
8949 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8950 /* Alpha specific */
8951 case TARGET_NR_osf_setsysinfo:
8952 ret = -TARGET_EOPNOTSUPP;
8953 switch (arg1) {
8954 case TARGET_SSI_IEEE_FP_CONTROL:
8956 uint64_t swcr, fpcr, orig_fpcr;
8958 if (get_user_u64 (swcr, arg2)) {
8959 goto efault;
8961 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8962 fpcr = orig_fpcr & FPCR_DYN_MASK;
8964 /* Copied from linux ieee_swcr_to_fpcr. */
8965 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8966 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8967 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8968 | SWCR_TRAP_ENABLE_DZE
8969 | SWCR_TRAP_ENABLE_OVF)) << 48;
8970 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8971 | SWCR_TRAP_ENABLE_INE)) << 57;
8972 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8973 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8975 cpu_alpha_store_fpcr(cpu_env, fpcr);
8976 ret = 0;
8978 break;
8980 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8982 uint64_t exc, fpcr, orig_fpcr;
8983 int si_code;
8985 if (get_user_u64(exc, arg2)) {
8986 goto efault;
8989 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8991 /* We only add to the exception status here. */
8992 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8994 cpu_alpha_store_fpcr(cpu_env, fpcr);
8995 ret = 0;
8997 /* Old exceptions are not signaled. */
8998 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
9000 /* If any exceptions set by this call,
9001 and are unmasked, send a signal. */
9002 si_code = 0;
9003 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
9004 si_code = TARGET_FPE_FLTRES;
9006 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
9007 si_code = TARGET_FPE_FLTUND;
9009 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
9010 si_code = TARGET_FPE_FLTOVF;
9012 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
9013 si_code = TARGET_FPE_FLTDIV;
9015 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
9016 si_code = TARGET_FPE_FLTINV;
9018 if (si_code != 0) {
9019 target_siginfo_t info;
9020 info.si_signo = SIGFPE;
9021 info.si_errno = 0;
9022 info.si_code = si_code;
9023 info._sifields._sigfault._addr
9024 = ((CPUArchState *)cpu_env)->pc;
9025 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9028 break;
9030 /* case SSI_NVPAIRS:
9031 -- Used with SSIN_UACPROC to enable unaligned accesses.
9032 case SSI_IEEE_STATE_AT_SIGNAL:
9033 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
9034 -- Not implemented in linux kernel
9037 break;
9038 #endif
9039 #ifdef TARGET_NR_osf_sigprocmask
9040 /* Alpha specific. */
9041 case TARGET_NR_osf_sigprocmask:
9043 abi_ulong mask;
9044 int how;
9045 sigset_t set, oldset;
9047 switch(arg1) {
9048 case TARGET_SIG_BLOCK:
9049 how = SIG_BLOCK;
9050 break;
9051 case TARGET_SIG_UNBLOCK:
9052 how = SIG_UNBLOCK;
9053 break;
9054 case TARGET_SIG_SETMASK:
9055 how = SIG_SETMASK;
9056 break;
9057 default:
9058 ret = -TARGET_EINVAL;
9059 goto fail;
9061 mask = arg2;
9062 target_to_host_old_sigset(&set, &mask);
9063 do_sigprocmask(how, &set, &oldset);
9064 host_to_target_old_sigset(&mask, &oldset);
9065 ret = mask;
9067 break;
9068 #endif
9070 #ifdef TARGET_NR_getgid32
9071 case TARGET_NR_getgid32:
9072 ret = get_errno(getgid());
9073 break;
9074 #endif
9075 #ifdef TARGET_NR_geteuid32
9076 case TARGET_NR_geteuid32:
9077 ret = get_errno(geteuid());
9078 break;
9079 #endif
9080 #ifdef TARGET_NR_getegid32
9081 case TARGET_NR_getegid32:
9082 ret = get_errno(getegid());
9083 break;
9084 #endif
9085 #ifdef TARGET_NR_setreuid32
9086 case TARGET_NR_setreuid32:
9087 ret = get_errno(setreuid(arg1, arg2));
9088 break;
9089 #endif
9090 #ifdef TARGET_NR_setregid32
9091 case TARGET_NR_setregid32:
9092 ret = get_errno(setregid(arg1, arg2));
9093 break;
9094 #endif
9095 #ifdef TARGET_NR_getgroups32
9096 case TARGET_NR_getgroups32:
9098 int gidsetsize = arg1;
9099 uint32_t *target_grouplist;
9100 gid_t *grouplist;
9101 int i;
9103 grouplist = alloca(gidsetsize * sizeof(gid_t));
9104 ret = get_errno(getgroups(gidsetsize, grouplist));
9105 if (gidsetsize == 0)
9106 break;
9107 if (!is_error(ret)) {
9108 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9109 if (!target_grouplist) {
9110 ret = -TARGET_EFAULT;
9111 goto fail;
9113 for(i = 0;i < ret; i++)
9114 target_grouplist[i] = tswap32(grouplist[i]);
9115 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9118 break;
9119 #endif
9120 #ifdef TARGET_NR_setgroups32
9121 case TARGET_NR_setgroups32:
9123 int gidsetsize = arg1;
9124 uint32_t *target_grouplist;
9125 gid_t *grouplist;
9126 int i;
9128 grouplist = alloca(gidsetsize * sizeof(gid_t));
9129 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9130 if (!target_grouplist) {
9131 ret = -TARGET_EFAULT;
9132 goto fail;
9134 for(i = 0;i < gidsetsize; i++)
9135 grouplist[i] = tswap32(target_grouplist[i]);
9136 unlock_user(target_grouplist, arg2, 0);
9137 ret = get_errno(setgroups(gidsetsize, grouplist));
9139 break;
9140 #endif
9141 #ifdef TARGET_NR_fchown32
9142 case TARGET_NR_fchown32:
9143 ret = get_errno(fchown(arg1, arg2, arg3));
9144 break;
9145 #endif
9146 #ifdef TARGET_NR_setresuid32
9147 case TARGET_NR_setresuid32:
9148 ret = get_errno(setresuid(arg1, arg2, arg3));
9149 break;
9150 #endif
9151 #ifdef TARGET_NR_getresuid32
9152 case TARGET_NR_getresuid32:
9154 uid_t ruid, euid, suid;
9155 ret = get_errno(getresuid(&ruid, &euid, &suid));
9156 if (!is_error(ret)) {
9157 if (put_user_u32(ruid, arg1)
9158 || put_user_u32(euid, arg2)
9159 || put_user_u32(suid, arg3))
9160 goto efault;
9163 break;
9164 #endif
9165 #ifdef TARGET_NR_setresgid32
9166 case TARGET_NR_setresgid32:
9167 ret = get_errno(setresgid(arg1, arg2, arg3));
9168 break;
9169 #endif
9170 #ifdef TARGET_NR_getresgid32
9171 case TARGET_NR_getresgid32:
9173 gid_t rgid, egid, sgid;
9174 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9175 if (!is_error(ret)) {
9176 if (put_user_u32(rgid, arg1)
9177 || put_user_u32(egid, arg2)
9178 || put_user_u32(sgid, arg3))
9179 goto efault;
9182 break;
9183 #endif
9184 #ifdef TARGET_NR_chown32
9185 case TARGET_NR_chown32:
9186 if (!(p = lock_user_string(arg1)))
9187 goto efault;
9188 ret = get_errno(chown(p, arg2, arg3));
9189 unlock_user(p, arg1, 0);
9190 break;
9191 #endif
9192 #ifdef TARGET_NR_setuid32
9193 case TARGET_NR_setuid32:
9194 ret = get_errno(setuid(arg1));
9195 break;
9196 #endif
9197 #ifdef TARGET_NR_setgid32
9198 case TARGET_NR_setgid32:
9199 ret = get_errno(setgid(arg1));
9200 break;
9201 #endif
9202 #ifdef TARGET_NR_setfsuid32
9203 case TARGET_NR_setfsuid32:
9204 ret = get_errno(setfsuid(arg1));
9205 break;
9206 #endif
9207 #ifdef TARGET_NR_setfsgid32
9208 case TARGET_NR_setfsgid32:
9209 ret = get_errno(setfsgid(arg1));
9210 break;
9211 #endif
9213 case TARGET_NR_pivot_root:
9214 goto unimplemented;
9215 #ifdef TARGET_NR_mincore
9216 case TARGET_NR_mincore:
9218 void *a;
9219 ret = -TARGET_EFAULT;
9220 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9221 goto efault;
9222 if (!(p = lock_user_string(arg3)))
9223 goto mincore_fail;
9224 ret = get_errno(mincore(a, arg2, p));
9225 unlock_user(p, arg3, ret);
9226 mincore_fail:
9227 unlock_user(a, arg1, 0);
9229 break;
9230 #endif
9231 #ifdef TARGET_NR_arm_fadvise64_64
9232 case TARGET_NR_arm_fadvise64_64:
9235 * arm_fadvise64_64 looks like fadvise64_64 but
9236 * with different argument order
9238 abi_long temp;
9239 temp = arg3;
9240 arg3 = arg4;
9241 arg4 = temp;
9243 #endif
9244 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9245 #ifdef TARGET_NR_fadvise64_64
9246 case TARGET_NR_fadvise64_64:
9247 #endif
9248 #ifdef TARGET_NR_fadvise64
9249 case TARGET_NR_fadvise64:
9250 #endif
9251 #ifdef TARGET_S390X
9252 switch (arg4) {
9253 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9254 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9255 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9256 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9257 default: break;
9259 #endif
9260 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9261 break;
9262 #endif
9263 #ifdef TARGET_NR_madvise
9264 case TARGET_NR_madvise:
9265 /* A straight passthrough may not be safe because qemu sometimes
9266 turns private file-backed mappings into anonymous mappings.
9267 This will break MADV_DONTNEED.
9268 This is a hint, so ignoring and returning success is ok. */
9269 ret = get_errno(0);
9270 break;
9271 #endif
9272 #if TARGET_ABI_BITS == 32
9273 case TARGET_NR_fcntl64:
9275 int cmd;
9276 struct flock64 fl;
9277 struct target_flock64 *target_fl;
9278 #ifdef TARGET_ARM
9279 struct target_eabi_flock64 *target_efl;
9280 #endif
9282 cmd = target_to_host_fcntl_cmd(arg2);
9283 if (cmd == -TARGET_EINVAL) {
9284 ret = cmd;
9285 break;
9288 switch(arg2) {
9289 case TARGET_F_GETLK64:
9290 #ifdef TARGET_ARM
9291 if (((CPUARMState *)cpu_env)->eabi) {
9292 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9293 goto efault;
9294 fl.l_type = tswap16(target_efl->l_type);
9295 fl.l_whence = tswap16(target_efl->l_whence);
9296 fl.l_start = tswap64(target_efl->l_start);
9297 fl.l_len = tswap64(target_efl->l_len);
9298 fl.l_pid = tswap32(target_efl->l_pid);
9299 unlock_user_struct(target_efl, arg3, 0);
9300 } else
9301 #endif
9303 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9304 goto efault;
9305 fl.l_type = tswap16(target_fl->l_type);
9306 fl.l_whence = tswap16(target_fl->l_whence);
9307 fl.l_start = tswap64(target_fl->l_start);
9308 fl.l_len = tswap64(target_fl->l_len);
9309 fl.l_pid = tswap32(target_fl->l_pid);
9310 unlock_user_struct(target_fl, arg3, 0);
9312 ret = get_errno(fcntl(arg1, cmd, &fl));
9313 if (ret == 0) {
9314 #ifdef TARGET_ARM
9315 if (((CPUARMState *)cpu_env)->eabi) {
9316 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9317 goto efault;
9318 target_efl->l_type = tswap16(fl.l_type);
9319 target_efl->l_whence = tswap16(fl.l_whence);
9320 target_efl->l_start = tswap64(fl.l_start);
9321 target_efl->l_len = tswap64(fl.l_len);
9322 target_efl->l_pid = tswap32(fl.l_pid);
9323 unlock_user_struct(target_efl, arg3, 1);
9324 } else
9325 #endif
9327 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9328 goto efault;
9329 target_fl->l_type = tswap16(fl.l_type);
9330 target_fl->l_whence = tswap16(fl.l_whence);
9331 target_fl->l_start = tswap64(fl.l_start);
9332 target_fl->l_len = tswap64(fl.l_len);
9333 target_fl->l_pid = tswap32(fl.l_pid);
9334 unlock_user_struct(target_fl, arg3, 1);
9337 break;
9339 case TARGET_F_SETLK64:
9340 case TARGET_F_SETLKW64:
9341 #ifdef TARGET_ARM
9342 if (((CPUARMState *)cpu_env)->eabi) {
9343 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9344 goto efault;
9345 fl.l_type = tswap16(target_efl->l_type);
9346 fl.l_whence = tswap16(target_efl->l_whence);
9347 fl.l_start = tswap64(target_efl->l_start);
9348 fl.l_len = tswap64(target_efl->l_len);
9349 fl.l_pid = tswap32(target_efl->l_pid);
9350 unlock_user_struct(target_efl, arg3, 0);
9351 } else
9352 #endif
9354 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9355 goto efault;
9356 fl.l_type = tswap16(target_fl->l_type);
9357 fl.l_whence = tswap16(target_fl->l_whence);
9358 fl.l_start = tswap64(target_fl->l_start);
9359 fl.l_len = tswap64(target_fl->l_len);
9360 fl.l_pid = tswap32(target_fl->l_pid);
9361 unlock_user_struct(target_fl, arg3, 0);
9363 ret = get_errno(fcntl(arg1, cmd, &fl));
9364 break;
9365 default:
9366 ret = do_fcntl(arg1, arg2, arg3);
9367 break;
9369 break;
9371 #endif
9372 #ifdef TARGET_NR_cacheflush
9373 case TARGET_NR_cacheflush:
9374 /* self-modifying code is handled automatically, so nothing needed */
9375 ret = 0;
9376 break;
9377 #endif
9378 #ifdef TARGET_NR_security
9379 case TARGET_NR_security:
9380 goto unimplemented;
9381 #endif
9382 #ifdef TARGET_NR_getpagesize
9383 case TARGET_NR_getpagesize:
9384 ret = TARGET_PAGE_SIZE;
9385 break;
9386 #endif
9387 case TARGET_NR_gettid:
9388 ret = get_errno(gettid());
9389 break;
9390 #ifdef TARGET_NR_readahead
9391 case TARGET_NR_readahead:
9392 #if TARGET_ABI_BITS == 32
9393 if (regpairs_aligned(cpu_env)) {
9394 arg2 = arg3;
9395 arg3 = arg4;
9396 arg4 = arg5;
9398 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9399 #else
9400 ret = get_errno(readahead(arg1, arg2, arg3));
9401 #endif
9402 break;
9403 #endif
9404 #ifdef CONFIG_ATTR
9405 #ifdef TARGET_NR_setxattr
9406 case TARGET_NR_listxattr:
9407 case TARGET_NR_llistxattr:
9409 void *p, *b = 0;
9410 if (arg2) {
9411 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9412 if (!b) {
9413 ret = -TARGET_EFAULT;
9414 break;
9417 p = lock_user_string(arg1);
9418 if (p) {
9419 if (num == TARGET_NR_listxattr) {
9420 ret = get_errno(listxattr(p, b, arg3));
9421 } else {
9422 ret = get_errno(llistxattr(p, b, arg3));
9424 } else {
9425 ret = -TARGET_EFAULT;
9427 unlock_user(p, arg1, 0);
9428 unlock_user(b, arg2, arg3);
9429 break;
9431 case TARGET_NR_flistxattr:
9433 void *b = 0;
9434 if (arg2) {
9435 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9436 if (!b) {
9437 ret = -TARGET_EFAULT;
9438 break;
9441 ret = get_errno(flistxattr(arg1, b, arg3));
9442 unlock_user(b, arg2, arg3);
9443 break;
9445 case TARGET_NR_setxattr:
9446 case TARGET_NR_lsetxattr:
9448 void *p, *n, *v = 0;
9449 if (arg3) {
9450 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9451 if (!v) {
9452 ret = -TARGET_EFAULT;
9453 break;
9456 p = lock_user_string(arg1);
9457 n = lock_user_string(arg2);
9458 if (p && n) {
9459 if (num == TARGET_NR_setxattr) {
9460 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9461 } else {
9462 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9464 } else {
9465 ret = -TARGET_EFAULT;
9467 unlock_user(p, arg1, 0);
9468 unlock_user(n, arg2, 0);
9469 unlock_user(v, arg3, 0);
9471 break;
9472 case TARGET_NR_fsetxattr:
9474 void *n, *v = 0;
9475 if (arg3) {
9476 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9477 if (!v) {
9478 ret = -TARGET_EFAULT;
9479 break;
9482 n = lock_user_string(arg2);
9483 if (n) {
9484 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9485 } else {
9486 ret = -TARGET_EFAULT;
9488 unlock_user(n, arg2, 0);
9489 unlock_user(v, arg3, 0);
9491 break;
9492 case TARGET_NR_getxattr:
9493 case TARGET_NR_lgetxattr:
9495 void *p, *n, *v = 0;
9496 if (arg3) {
9497 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9498 if (!v) {
9499 ret = -TARGET_EFAULT;
9500 break;
9503 p = lock_user_string(arg1);
9504 n = lock_user_string(arg2);
9505 if (p && n) {
9506 if (num == TARGET_NR_getxattr) {
9507 ret = get_errno(getxattr(p, n, v, arg4));
9508 } else {
9509 ret = get_errno(lgetxattr(p, n, v, arg4));
9511 } else {
9512 ret = -TARGET_EFAULT;
9514 unlock_user(p, arg1, 0);
9515 unlock_user(n, arg2, 0);
9516 unlock_user(v, arg3, arg4);
9518 break;
9519 case TARGET_NR_fgetxattr:
9521 void *n, *v = 0;
9522 if (arg3) {
9523 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9524 if (!v) {
9525 ret = -TARGET_EFAULT;
9526 break;
9529 n = lock_user_string(arg2);
9530 if (n) {
9531 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9532 } else {
9533 ret = -TARGET_EFAULT;
9535 unlock_user(n, arg2, 0);
9536 unlock_user(v, arg3, arg4);
9538 break;
9539 case TARGET_NR_removexattr:
9540 case TARGET_NR_lremovexattr:
9542 void *p, *n;
9543 p = lock_user_string(arg1);
9544 n = lock_user_string(arg2);
9545 if (p && n) {
9546 if (num == TARGET_NR_removexattr) {
9547 ret = get_errno(removexattr(p, n));
9548 } else {
9549 ret = get_errno(lremovexattr(p, n));
9551 } else {
9552 ret = -TARGET_EFAULT;
9554 unlock_user(p, arg1, 0);
9555 unlock_user(n, arg2, 0);
9557 break;
9558 case TARGET_NR_fremovexattr:
9560 void *n;
9561 n = lock_user_string(arg2);
9562 if (n) {
9563 ret = get_errno(fremovexattr(arg1, n));
9564 } else {
9565 ret = -TARGET_EFAULT;
9567 unlock_user(n, arg2, 0);
9569 break;
9570 #endif
9571 #endif /* CONFIG_ATTR */
9572 #ifdef TARGET_NR_set_thread_area
9573 case TARGET_NR_set_thread_area:
9574 #if defined(TARGET_MIPS)
9575 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9576 ret = 0;
9577 break;
9578 #elif defined(TARGET_CRIS)
9579 if (arg1 & 0xff)
9580 ret = -TARGET_EINVAL;
9581 else {
9582 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9583 ret = 0;
9585 break;
9586 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9587 ret = do_set_thread_area(cpu_env, arg1);
9588 break;
9589 #elif defined(TARGET_M68K)
9591 TaskState *ts = cpu->opaque;
9592 ts->tp_value = arg1;
9593 ret = 0;
9594 break;
9596 #else
9597 goto unimplemented_nowarn;
9598 #endif
9599 #endif
9600 #ifdef TARGET_NR_get_thread_area
9601 case TARGET_NR_get_thread_area:
9602 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9603 ret = do_get_thread_area(cpu_env, arg1);
9604 break;
9605 #elif defined(TARGET_M68K)
9607 TaskState *ts = cpu->opaque;
9608 ret = ts->tp_value;
9609 break;
9611 #else
9612 goto unimplemented_nowarn;
9613 #endif
9614 #endif
9615 #ifdef TARGET_NR_getdomainname
9616 case TARGET_NR_getdomainname:
9617 goto unimplemented_nowarn;
9618 #endif
9620 #ifdef TARGET_NR_clock_gettime
9621 case TARGET_NR_clock_gettime:
9623 struct timespec ts;
9624 ret = get_errno(clock_gettime(arg1, &ts));
9625 if (!is_error(ret)) {
9626 host_to_target_timespec(arg2, &ts);
9628 break;
9630 #endif
9631 #ifdef TARGET_NR_clock_getres
9632 case TARGET_NR_clock_getres:
9634 struct timespec ts;
9635 ret = get_errno(clock_getres(arg1, &ts));
9636 if (!is_error(ret)) {
9637 host_to_target_timespec(arg2, &ts);
9639 break;
9641 #endif
9642 #ifdef TARGET_NR_clock_nanosleep
9643 case TARGET_NR_clock_nanosleep:
9645 struct timespec ts;
9646 target_to_host_timespec(&ts, arg3);
9647 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9648 if (arg4)
9649 host_to_target_timespec(arg4, &ts);
9651 #if defined(TARGET_PPC)
9652 /* clock_nanosleep is odd in that it returns positive errno values.
9653 * On PPC, CR0 bit 3 should be set in such a situation. */
9654 if (ret) {
9655 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9657 #endif
9658 break;
9660 #endif
9662 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9663 case TARGET_NR_set_tid_address:
9664 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9665 break;
9666 #endif
9668 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9669 case TARGET_NR_tkill:
9670 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9671 break;
9672 #endif
9674 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9675 case TARGET_NR_tgkill:
9676 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9677 target_to_host_signal(arg3)));
9678 break;
9679 #endif
9681 #ifdef TARGET_NR_set_robust_list
9682 case TARGET_NR_set_robust_list:
9683 case TARGET_NR_get_robust_list:
9684 /* The ABI for supporting robust futexes has userspace pass
9685 * the kernel a pointer to a linked list which is updated by
9686 * userspace after the syscall; the list is walked by the kernel
9687 * when the thread exits. Since the linked list in QEMU guest
9688 * memory isn't a valid linked list for the host and we have
9689 * no way to reliably intercept the thread-death event, we can't
9690 * support these. Silently return ENOSYS so that guest userspace
9691 * falls back to a non-robust futex implementation (which should
9692 * be OK except in the corner case of the guest crashing while
9693 * holding a mutex that is shared with another process via
9694 * shared memory).
9696 goto unimplemented_nowarn;
9697 #endif
9699 #if defined(TARGET_NR_utimensat)
9700 case TARGET_NR_utimensat:
9702 struct timespec *tsp, ts[2];
9703 if (!arg3) {
9704 tsp = NULL;
9705 } else {
9706 target_to_host_timespec(ts, arg3);
9707 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9708 tsp = ts;
9710 if (!arg2)
9711 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9712 else {
9713 if (!(p = lock_user_string(arg2))) {
9714 ret = -TARGET_EFAULT;
9715 goto fail;
9717 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9718 unlock_user(p, arg2, 0);
9721 break;
9722 #endif
9723 case TARGET_NR_futex:
9724 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9725 break;
9726 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9727 case TARGET_NR_inotify_init:
9728 ret = get_errno(sys_inotify_init());
9729 break;
9730 #endif
9731 #ifdef CONFIG_INOTIFY1
9732 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9733 case TARGET_NR_inotify_init1:
9734 ret = get_errno(sys_inotify_init1(arg1));
9735 break;
9736 #endif
9737 #endif
9738 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9739 case TARGET_NR_inotify_add_watch:
9740 p = lock_user_string(arg2);
9741 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9742 unlock_user(p, arg2, 0);
9743 break;
9744 #endif
9745 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9746 case TARGET_NR_inotify_rm_watch:
9747 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9748 break;
9749 #endif
9751 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9752 case TARGET_NR_mq_open:
9754 struct mq_attr posix_mq_attr, *attrp;
9756 p = lock_user_string(arg1 - 1);
9757 if (arg4 != 0) {
9758 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9759 attrp = &posix_mq_attr;
9760 } else {
9761 attrp = 0;
9763 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9764 unlock_user (p, arg1, 0);
9766 break;
9768 case TARGET_NR_mq_unlink:
9769 p = lock_user_string(arg1 - 1);
9770 ret = get_errno(mq_unlink(p));
9771 unlock_user (p, arg1, 0);
9772 break;
9774 case TARGET_NR_mq_timedsend:
9776 struct timespec ts;
9778 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9779 if (arg5 != 0) {
9780 target_to_host_timespec(&ts, arg5);
9781 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9782 host_to_target_timespec(arg5, &ts);
9784 else
9785 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9786 unlock_user (p, arg2, arg3);
9788 break;
9790 case TARGET_NR_mq_timedreceive:
9792 struct timespec ts;
9793 unsigned int prio;
9795 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9796 if (arg5 != 0) {
9797 target_to_host_timespec(&ts, arg5);
9798 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9799 host_to_target_timespec(arg5, &ts);
9801 else
9802 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9803 unlock_user (p, arg2, arg3);
9804 if (arg4 != 0)
9805 put_user_u32(prio, arg4);
9807 break;
9809 /* Not implemented for now... */
9810 /* case TARGET_NR_mq_notify: */
9811 /* break; */
9813 case TARGET_NR_mq_getsetattr:
9815 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9816 ret = 0;
9817 if (arg3 != 0) {
9818 ret = mq_getattr(arg1, &posix_mq_attr_out);
9819 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9821 if (arg2 != 0) {
9822 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9823 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9827 break;
9828 #endif
9830 #ifdef CONFIG_SPLICE
9831 #ifdef TARGET_NR_tee
9832 case TARGET_NR_tee:
9834 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9836 break;
9837 #endif
9838 #ifdef TARGET_NR_splice
9839 case TARGET_NR_splice:
9841 loff_t loff_in, loff_out;
9842 loff_t *ploff_in = NULL, *ploff_out = NULL;
9843 if (arg2) {
9844 if (get_user_u64(loff_in, arg2)) {
9845 goto efault;
9847 ploff_in = &loff_in;
9849 if (arg4) {
9850 if (get_user_u64(loff_out, arg4)) {
9851 goto efault;
9853 ploff_out = &loff_out;
9855 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9856 if (arg2) {
9857 if (put_user_u64(loff_in, arg2)) {
9858 goto efault;
9861 if (arg4) {
9862 if (put_user_u64(loff_out, arg4)) {
9863 goto efault;
9867 break;
9868 #endif
9869 #ifdef TARGET_NR_vmsplice
9870 case TARGET_NR_vmsplice:
9872 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9873 if (vec != NULL) {
9874 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9875 unlock_iovec(vec, arg2, arg3, 0);
9876 } else {
9877 ret = -host_to_target_errno(errno);
9880 break;
9881 #endif
9882 #endif /* CONFIG_SPLICE */
9883 #ifdef CONFIG_EVENTFD
9884 #if defined(TARGET_NR_eventfd)
9885 case TARGET_NR_eventfd:
9886 ret = get_errno(eventfd(arg1, 0));
9887 fd_trans_unregister(ret);
9888 break;
9889 #endif
9890 #if defined(TARGET_NR_eventfd2)
9891 case TARGET_NR_eventfd2:
9893 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9894 if (arg2 & TARGET_O_NONBLOCK) {
9895 host_flags |= O_NONBLOCK;
9897 if (arg2 & TARGET_O_CLOEXEC) {
9898 host_flags |= O_CLOEXEC;
9900 ret = get_errno(eventfd(arg1, host_flags));
9901 fd_trans_unregister(ret);
9902 break;
9904 #endif
9905 #endif /* CONFIG_EVENTFD */
9906 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9907 case TARGET_NR_fallocate:
9908 #if TARGET_ABI_BITS == 32
9909 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9910 target_offset64(arg5, arg6)));
9911 #else
9912 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9913 #endif
9914 break;
9915 #endif
9916 #if defined(CONFIG_SYNC_FILE_RANGE)
9917 #if defined(TARGET_NR_sync_file_range)
9918 case TARGET_NR_sync_file_range:
9919 #if TARGET_ABI_BITS == 32
9920 #if defined(TARGET_MIPS)
9921 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9922 target_offset64(arg5, arg6), arg7));
9923 #else
9924 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9925 target_offset64(arg4, arg5), arg6));
9926 #endif /* !TARGET_MIPS */
9927 #else
9928 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9929 #endif
9930 break;
9931 #endif
9932 #if defined(TARGET_NR_sync_file_range2)
9933 case TARGET_NR_sync_file_range2:
9934 /* This is like sync_file_range but the arguments are reordered */
9935 #if TARGET_ABI_BITS == 32
9936 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9937 target_offset64(arg5, arg6), arg2));
9938 #else
9939 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9940 #endif
9941 break;
9942 #endif
9943 #endif
9944 #if defined(TARGET_NR_signalfd4)
9945 case TARGET_NR_signalfd4:
9946 ret = do_signalfd4(arg1, arg2, arg4);
9947 break;
9948 #endif
9949 #if defined(TARGET_NR_signalfd)
9950 case TARGET_NR_signalfd:
9951 ret = do_signalfd4(arg1, arg2, 0);
9952 break;
9953 #endif
9954 #if defined(CONFIG_EPOLL)
9955 #if defined(TARGET_NR_epoll_create)
9956 case TARGET_NR_epoll_create:
9957 ret = get_errno(epoll_create(arg1));
9958 break;
9959 #endif
9960 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9961 case TARGET_NR_epoll_create1:
9962 ret = get_errno(epoll_create1(arg1));
9963 break;
9964 #endif
9965 #if defined(TARGET_NR_epoll_ctl)
9966 case TARGET_NR_epoll_ctl:
9968 struct epoll_event ep;
9969 struct epoll_event *epp = 0;
9970 if (arg4) {
9971 struct target_epoll_event *target_ep;
9972 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9973 goto efault;
9975 ep.events = tswap32(target_ep->events);
9976 /* The epoll_data_t union is just opaque data to the kernel,
9977 * so we transfer all 64 bits across and need not worry what
9978 * actual data type it is.
9980 ep.data.u64 = tswap64(target_ep->data.u64);
9981 unlock_user_struct(target_ep, arg4, 0);
9982 epp = &ep;
9984 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9985 break;
9987 #endif
9989 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9990 #define IMPLEMENT_EPOLL_PWAIT
9991 #endif
9992 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9993 #if defined(TARGET_NR_epoll_wait)
9994 case TARGET_NR_epoll_wait:
9995 #endif
9996 #if defined(IMPLEMENT_EPOLL_PWAIT)
9997 case TARGET_NR_epoll_pwait:
9998 #endif
10000 struct target_epoll_event *target_ep;
10001 struct epoll_event *ep;
10002 int epfd = arg1;
10003 int maxevents = arg3;
10004 int timeout = arg4;
10006 target_ep = lock_user(VERIFY_WRITE, arg2,
10007 maxevents * sizeof(struct target_epoll_event), 1);
10008 if (!target_ep) {
10009 goto efault;
10012 ep = alloca(maxevents * sizeof(struct epoll_event));
10014 switch (num) {
10015 #if defined(IMPLEMENT_EPOLL_PWAIT)
10016 case TARGET_NR_epoll_pwait:
10018 target_sigset_t *target_set;
10019 sigset_t _set, *set = &_set;
10021 if (arg5) {
10022 target_set = lock_user(VERIFY_READ, arg5,
10023 sizeof(target_sigset_t), 1);
10024 if (!target_set) {
10025 unlock_user(target_ep, arg2, 0);
10026 goto efault;
10028 target_to_host_sigset(set, target_set);
10029 unlock_user(target_set, arg5, 0);
10030 } else {
10031 set = NULL;
10034 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
10035 break;
10037 #endif
10038 #if defined(TARGET_NR_epoll_wait)
10039 case TARGET_NR_epoll_wait:
10040 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
10041 break;
10042 #endif
10043 default:
10044 ret = -TARGET_ENOSYS;
10046 if (!is_error(ret)) {
10047 int i;
10048 for (i = 0; i < ret; i++) {
10049 target_ep[i].events = tswap32(ep[i].events);
10050 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
10053 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
10054 break;
10056 #endif
10057 #endif
10058 #ifdef TARGET_NR_prlimit64
10059 case TARGET_NR_prlimit64:
10061 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
10062 struct target_rlimit64 *target_rnew, *target_rold;
10063 struct host_rlimit64 rnew, rold, *rnewp = 0;
10064 int resource = target_to_host_resource(arg2);
10065 if (arg3) {
10066 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
10067 goto efault;
10069 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
10070 rnew.rlim_max = tswap64(target_rnew->rlim_max);
10071 unlock_user_struct(target_rnew, arg3, 0);
10072 rnewp = &rnew;
10075 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10076 if (!is_error(ret) && arg4) {
10077 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10078 goto efault;
10080 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10081 target_rold->rlim_max = tswap64(rold.rlim_max);
10082 unlock_user_struct(target_rold, arg4, 1);
10084 break;
10086 #endif
10087 #ifdef TARGET_NR_gethostname
10088 case TARGET_NR_gethostname:
10090 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10091 if (name) {
10092 ret = get_errno(gethostname(name, arg2));
10093 unlock_user(name, arg1, arg2);
10094 } else {
10095 ret = -TARGET_EFAULT;
10097 break;
10099 #endif
10100 #ifdef TARGET_NR_atomic_cmpxchg_32
10101 case TARGET_NR_atomic_cmpxchg_32:
10103 /* should use start_exclusive from main.c */
10104 abi_ulong mem_value;
10105 if (get_user_u32(mem_value, arg6)) {
10106 target_siginfo_t info;
10107 info.si_signo = SIGSEGV;
10108 info.si_errno = 0;
10109 info.si_code = TARGET_SEGV_MAPERR;
10110 info._sifields._sigfault._addr = arg6;
10111 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10112 ret = 0xdeadbeef;
10115 if (mem_value == arg2)
10116 put_user_u32(arg1, arg6);
10117 ret = mem_value;
10118 break;
10120 #endif
10121 #ifdef TARGET_NR_atomic_barrier
10122 case TARGET_NR_atomic_barrier:
10124 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10125 ret = 0;
10126 break;
10128 #endif
10130 #ifdef TARGET_NR_timer_create
10131 case TARGET_NR_timer_create:
10133 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10135 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10137 int clkid = arg1;
10138 int timer_index = next_free_host_timer();
10140 if (timer_index < 0) {
10141 ret = -TARGET_EAGAIN;
10142 } else {
10143 timer_t *phtimer = g_posix_timers + timer_index;
10145 if (arg2) {
10146 phost_sevp = &host_sevp;
10147 ret = target_to_host_sigevent(phost_sevp, arg2);
10148 if (ret != 0) {
10149 break;
10153 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10154 if (ret) {
10155 phtimer = NULL;
10156 } else {
10157 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10158 goto efault;
10162 break;
10164 #endif
10166 #ifdef TARGET_NR_timer_settime
10167 case TARGET_NR_timer_settime:
10169 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10170 * struct itimerspec * old_value */
10171 target_timer_t timerid = get_timer_id(arg1);
10173 if (timerid < 0) {
10174 ret = timerid;
10175 } else if (arg3 == 0) {
10176 ret = -TARGET_EINVAL;
10177 } else {
10178 timer_t htimer = g_posix_timers[timerid];
10179 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10181 target_to_host_itimerspec(&hspec_new, arg3);
10182 ret = get_errno(
10183 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10184 host_to_target_itimerspec(arg2, &hspec_old);
10186 break;
10188 #endif
10190 #ifdef TARGET_NR_timer_gettime
10191 case TARGET_NR_timer_gettime:
10193 /* args: timer_t timerid, struct itimerspec *curr_value */
10194 target_timer_t timerid = get_timer_id(arg1);
10196 if (timerid < 0) {
10197 ret = timerid;
10198 } else if (!arg2) {
10199 ret = -TARGET_EFAULT;
10200 } else {
10201 timer_t htimer = g_posix_timers[timerid];
10202 struct itimerspec hspec;
10203 ret = get_errno(timer_gettime(htimer, &hspec));
10205 if (host_to_target_itimerspec(arg2, &hspec)) {
10206 ret = -TARGET_EFAULT;
10209 break;
10211 #endif
10213 #ifdef TARGET_NR_timer_getoverrun
10214 case TARGET_NR_timer_getoverrun:
10216 /* args: timer_t timerid */
10217 target_timer_t timerid = get_timer_id(arg1);
10219 if (timerid < 0) {
10220 ret = timerid;
10221 } else {
10222 timer_t htimer = g_posix_timers[timerid];
10223 ret = get_errno(timer_getoverrun(htimer));
10225 fd_trans_unregister(ret);
10226 break;
10228 #endif
10230 #ifdef TARGET_NR_timer_delete
10231 case TARGET_NR_timer_delete:
10233 /* args: timer_t timerid */
10234 target_timer_t timerid = get_timer_id(arg1);
10236 if (timerid < 0) {
10237 ret = timerid;
10238 } else {
10239 timer_t htimer = g_posix_timers[timerid];
10240 ret = get_errno(timer_delete(htimer));
10241 g_posix_timers[timerid] = 0;
10243 break;
10245 #endif
10247 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10248 case TARGET_NR_timerfd_create:
10249 ret = get_errno(timerfd_create(arg1,
10250 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10251 break;
10252 #endif
10254 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10255 case TARGET_NR_timerfd_gettime:
10257 struct itimerspec its_curr;
10259 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10261 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10262 goto efault;
10265 break;
10266 #endif
10268 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10269 case TARGET_NR_timerfd_settime:
10271 struct itimerspec its_new, its_old, *p_new;
10273 if (arg3) {
10274 if (target_to_host_itimerspec(&its_new, arg3)) {
10275 goto efault;
10277 p_new = &its_new;
10278 } else {
10279 p_new = NULL;
10282 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10284 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10285 goto efault;
10288 break;
10289 #endif
10291 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10292 case TARGET_NR_ioprio_get:
10293 ret = get_errno(ioprio_get(arg1, arg2));
10294 break;
10295 #endif
10297 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10298 case TARGET_NR_ioprio_set:
10299 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10300 break;
10301 #endif
10303 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10304 case TARGET_NR_setns:
10305 ret = get_errno(setns(arg1, arg2));
10306 break;
10307 #endif
10308 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10309 case TARGET_NR_unshare:
10310 ret = get_errno(unshare(arg1));
10311 break;
10312 #endif
10314 default:
10315 unimplemented:
10316 gemu_log("qemu: Unsupported syscall: %d\n", num);
10317 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10318 unimplemented_nowarn:
10319 #endif
10320 ret = -TARGET_ENOSYS;
10321 break;
10323 fail:
10324 #ifdef DEBUG
10325 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10326 #endif
10327 if(do_strace)
10328 print_syscall_ret(num, ret);
10329 return ret;
10330 efault:
10331 ret = -TARGET_EFAULT;
10332 goto fail;