scsi: revert change to scsi_req_cancel_async and add assertions
[qemu/ar7.git] / linux-user / syscall.c
blob0cbace45fd2314c63eed42c4ece73292e68ad454
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 #include <sys/signalfd.h>
64 //#include <sys/user.h>
65 #include <netinet/ip.h>
66 #include <netinet/tcp.h>
67 #include <linux/wireless.h>
68 #include <linux/icmp.h>
69 #include "qemu-common.h"
70 #ifdef CONFIG_TIMERFD
71 #include <sys/timerfd.h>
72 #endif
73 #ifdef TARGET_GPROF
74 #include <sys/gmon.h>
75 #endif
76 #ifdef CONFIG_EVENTFD
77 #include <sys/eventfd.h>
78 #endif
79 #ifdef CONFIG_EPOLL
80 #include <sys/epoll.h>
81 #endif
82 #ifdef CONFIG_ATTR
83 #include "qemu/xattr.h"
84 #endif
85 #ifdef CONFIG_SENDFILE
86 #include <sys/sendfile.h>
87 #endif
89 #define termios host_termios
90 #define winsize host_winsize
91 #define termio host_termio
92 #define sgttyb host_sgttyb /* same as target */
93 #define tchars host_tchars /* same as target */
94 #define ltchars host_ltchars /* same as target */
96 #include <linux/termios.h>
97 #include <linux/unistd.h>
98 #include <linux/cdrom.h>
99 #include <linux/hdreg.h>
100 #include <linux/soundcard.h>
101 #include <linux/kd.h>
102 #include <linux/mtio.h>
103 #include <linux/fs.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
106 #endif
107 #include <linux/fb.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include "linux_loop.h"
115 #include "uname.h"
117 #include "qemu.h"
119 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
120 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
122 //#define DEBUG
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
129 #undef _syscall0
130 #undef _syscall1
131 #undef _syscall2
132 #undef _syscall3
133 #undef _syscall4
134 #undef _syscall5
135 #undef _syscall6
137 #define _syscall0(type,name) \
138 static type name (void) \
140 return syscall(__NR_##name); \
143 #define _syscall1(type,name,type1,arg1) \
144 static type name (type1 arg1) \
146 return syscall(__NR_##name, arg1); \
149 #define _syscall2(type,name,type1,arg1,type2,arg2) \
150 static type name (type1 arg1,type2 arg2) \
152 return syscall(__NR_##name, arg1, arg2); \
155 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
156 static type name (type1 arg1,type2 arg2,type3 arg3) \
158 return syscall(__NR_##name, arg1, arg2, arg3); \
161 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
162 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
164 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
167 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 type5,arg5) \
169 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
171 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
175 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
176 type5,arg5,type6,arg6) \
177 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
178 type6 arg6) \
180 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
184 #define __NR_sys_uname __NR_uname
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_futex __NR_futex
194 #define __NR_sys_inotify_init __NR_inotify_init
195 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
196 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
198 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
199 defined(__s390x__)
200 #define __NR__llseek __NR_lseek
201 #endif
203 /* Newer kernel ports have llseek() instead of _llseek() */
204 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
205 #define TARGET_NR__llseek TARGET_NR_llseek
206 #endif
208 #ifdef __NR_gettid
209 _syscall0(int, gettid)
210 #else
211 /* This is a replacement for the host gettid() and must return a host
212 errno. */
213 static int gettid(void) {
214 return -ENOSYS;
216 #endif
217 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
218 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
219 #endif
220 #if !defined(__NR_getdents) || \
221 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
222 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
223 #endif
224 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
225 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
226 loff_t *, res, uint, wh);
227 #endif
228 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
229 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
230 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
231 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
232 #endif
233 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
234 _syscall2(int,sys_tkill,int,tid,int,sig)
235 #endif
236 #ifdef __NR_exit_group
237 _syscall1(int,exit_group,int,error_code)
238 #endif
239 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
240 _syscall1(int,set_tid_address,int *,tidptr)
241 #endif
242 #if defined(TARGET_NR_futex) && defined(__NR_futex)
243 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
244 const struct timespec *,timeout,int *,uaddr2,int,val3)
245 #endif
246 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
247 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
248 unsigned long *, user_mask_ptr);
249 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
250 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
251 unsigned long *, user_mask_ptr);
252 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
253 void *, arg);
254 _syscall2(int, capget, struct __user_cap_header_struct *, header,
255 struct __user_cap_data_struct *, data);
256 _syscall2(int, capset, struct __user_cap_header_struct *, header,
257 struct __user_cap_data_struct *, data);
258 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
259 _syscall2(int, ioprio_get, int, which, int, who)
260 #endif
261 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
262 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
263 #endif
265 static bitmask_transtbl fcntl_flags_tbl[] = {
266 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
267 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
268 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
269 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
270 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
271 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
272 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
273 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
274 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
275 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
276 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
277 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
278 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
279 #if defined(O_DIRECT)
280 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
281 #endif
282 #if defined(O_NOATIME)
283 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
284 #endif
285 #if defined(O_CLOEXEC)
286 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
287 #endif
288 #if defined(O_PATH)
289 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
290 #endif
291 /* Don't terminate the list prematurely on 64-bit host+guest. */
292 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
293 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
294 #endif
295 { 0, 0, 0, 0 }
298 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
299 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
300 typedef struct TargetFdTrans {
301 TargetFdDataFunc host_to_target_data;
302 TargetFdDataFunc target_to_host_data;
303 TargetFdAddrFunc target_to_host_addr;
304 } TargetFdTrans;
306 static TargetFdTrans **target_fd_trans;
308 static unsigned int target_fd_max;
310 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
312 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
313 return target_fd_trans[fd]->host_to_target_data;
315 return NULL;
318 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
320 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
321 return target_fd_trans[fd]->target_to_host_addr;
323 return NULL;
326 static void fd_trans_register(int fd, TargetFdTrans *trans)
328 unsigned int oldmax;
330 if (fd >= target_fd_max) {
331 oldmax = target_fd_max;
332 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
333 target_fd_trans = g_realloc(target_fd_trans,
334 target_fd_max * sizeof(TargetFdTrans));
335 memset((void *)(target_fd_trans + oldmax), 0,
336 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
338 target_fd_trans[fd] = trans;
341 static void fd_trans_unregister(int fd)
343 if (fd >= 0 && fd < target_fd_max) {
344 target_fd_trans[fd] = NULL;
348 static void fd_trans_dup(int oldfd, int newfd)
350 fd_trans_unregister(newfd);
351 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
352 fd_trans_register(newfd, target_fd_trans[oldfd]);
356 static int sys_getcwd1(char *buf, size_t size)
358 if (getcwd(buf, size) == NULL) {
359 /* getcwd() sets errno */
360 return (-1);
362 return strlen(buf)+1;
365 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
368 * open(2) has extra parameter 'mode' when called with
369 * flag O_CREAT.
371 if ((flags & O_CREAT) != 0) {
372 return (openat(dirfd, pathname, flags, mode));
374 return (openat(dirfd, pathname, flags));
377 #ifdef TARGET_NR_utimensat
378 #ifdef CONFIG_UTIMENSAT
379 static int sys_utimensat(int dirfd, const char *pathname,
380 const struct timespec times[2], int flags)
382 if (pathname == NULL)
383 return futimens(dirfd, times);
384 else
385 return utimensat(dirfd, pathname, times, flags);
387 #elif defined(__NR_utimensat)
388 #define __NR_sys_utimensat __NR_utimensat
389 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
390 const struct timespec *,tsp,int,flags)
391 #else
392 static int sys_utimensat(int dirfd, const char *pathname,
393 const struct timespec times[2], int flags)
395 errno = ENOSYS;
396 return -1;
398 #endif
399 #endif /* TARGET_NR_utimensat */
401 #ifdef CONFIG_INOTIFY
402 #include <sys/inotify.h>
404 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
405 static int sys_inotify_init(void)
407 return (inotify_init());
409 #endif
410 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
411 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
413 return (inotify_add_watch(fd, pathname, mask));
415 #endif
416 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
417 static int sys_inotify_rm_watch(int fd, int32_t wd)
419 return (inotify_rm_watch(fd, wd));
421 #endif
422 #ifdef CONFIG_INOTIFY1
423 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
424 static int sys_inotify_init1(int flags)
426 return (inotify_init1(flags));
428 #endif
429 #endif
430 #else
431 /* Userspace can usually survive runtime without inotify */
432 #undef TARGET_NR_inotify_init
433 #undef TARGET_NR_inotify_init1
434 #undef TARGET_NR_inotify_add_watch
435 #undef TARGET_NR_inotify_rm_watch
436 #endif /* CONFIG_INOTIFY */
438 #if defined(TARGET_NR_ppoll)
439 #ifndef __NR_ppoll
440 # define __NR_ppoll -1
441 #endif
442 #define __NR_sys_ppoll __NR_ppoll
443 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
444 struct timespec *, timeout, const sigset_t *, sigmask,
445 size_t, sigsetsize)
446 #endif
448 #if defined(TARGET_NR_pselect6)
449 #ifndef __NR_pselect6
450 # define __NR_pselect6 -1
451 #endif
452 #define __NR_sys_pselect6 __NR_pselect6
453 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
454 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
455 #endif
457 #if defined(TARGET_NR_prlimit64)
458 #ifndef __NR_prlimit64
459 # define __NR_prlimit64 -1
460 #endif
461 #define __NR_sys_prlimit64 __NR_prlimit64
462 /* The glibc rlimit structure may not be that used by the underlying syscall */
463 struct host_rlimit64 {
464 uint64_t rlim_cur;
465 uint64_t rlim_max;
467 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
468 const struct host_rlimit64 *, new_limit,
469 struct host_rlimit64 *, old_limit)
470 #endif
473 #if defined(TARGET_NR_timer_create)
474 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
475 static timer_t g_posix_timers[32] = { 0, } ;
477 static inline int next_free_host_timer(void)
479 int k ;
480 /* FIXME: Does finding the next free slot require a lock? */
481 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
482 if (g_posix_timers[k] == 0) {
483 g_posix_timers[k] = (timer_t) 1;
484 return k;
487 return -1;
489 #endif
491 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
492 #ifdef TARGET_ARM
493 static inline int regpairs_aligned(void *cpu_env) {
494 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
496 #elif defined(TARGET_MIPS)
497 static inline int regpairs_aligned(void *cpu_env) { return 1; }
498 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
499 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
500 * of registers which translates to the same as ARM/MIPS, because we start with
501 * r3 as arg1 */
502 static inline int regpairs_aligned(void *cpu_env) { return 1; }
503 #else
504 static inline int regpairs_aligned(void *cpu_env) { return 0; }
505 #endif
507 #define ERRNO_TABLE_SIZE 1200
509 /* target_to_host_errno_table[] is initialized from
510 * host_to_target_errno_table[] in syscall_init(). */
511 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
515 * This list is the union of errno values overridden in asm-<arch>/errno.h
516 * minus the errnos that are not actually generic to all archs.
518 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
519 [EAGAIN] = TARGET_EAGAIN,
520 [EIDRM] = TARGET_EIDRM,
521 [ECHRNG] = TARGET_ECHRNG,
522 [EL2NSYNC] = TARGET_EL2NSYNC,
523 [EL3HLT] = TARGET_EL3HLT,
524 [EL3RST] = TARGET_EL3RST,
525 [ELNRNG] = TARGET_ELNRNG,
526 [EUNATCH] = TARGET_EUNATCH,
527 [ENOCSI] = TARGET_ENOCSI,
528 [EL2HLT] = TARGET_EL2HLT,
529 [EDEADLK] = TARGET_EDEADLK,
530 [ENOLCK] = TARGET_ENOLCK,
531 [EBADE] = TARGET_EBADE,
532 [EBADR] = TARGET_EBADR,
533 [EXFULL] = TARGET_EXFULL,
534 [ENOANO] = TARGET_ENOANO,
535 [EBADRQC] = TARGET_EBADRQC,
536 [EBADSLT] = TARGET_EBADSLT,
537 [EBFONT] = TARGET_EBFONT,
538 [ENOSTR] = TARGET_ENOSTR,
539 [ENODATA] = TARGET_ENODATA,
540 [ETIME] = TARGET_ETIME,
541 [ENOSR] = TARGET_ENOSR,
542 [ENONET] = TARGET_ENONET,
543 [ENOPKG] = TARGET_ENOPKG,
544 [EREMOTE] = TARGET_EREMOTE,
545 [ENOLINK] = TARGET_ENOLINK,
546 [EADV] = TARGET_EADV,
547 [ESRMNT] = TARGET_ESRMNT,
548 [ECOMM] = TARGET_ECOMM,
549 [EPROTO] = TARGET_EPROTO,
550 [EDOTDOT] = TARGET_EDOTDOT,
551 [EMULTIHOP] = TARGET_EMULTIHOP,
552 [EBADMSG] = TARGET_EBADMSG,
553 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
554 [EOVERFLOW] = TARGET_EOVERFLOW,
555 [ENOTUNIQ] = TARGET_ENOTUNIQ,
556 [EBADFD] = TARGET_EBADFD,
557 [EREMCHG] = TARGET_EREMCHG,
558 [ELIBACC] = TARGET_ELIBACC,
559 [ELIBBAD] = TARGET_ELIBBAD,
560 [ELIBSCN] = TARGET_ELIBSCN,
561 [ELIBMAX] = TARGET_ELIBMAX,
562 [ELIBEXEC] = TARGET_ELIBEXEC,
563 [EILSEQ] = TARGET_EILSEQ,
564 [ENOSYS] = TARGET_ENOSYS,
565 [ELOOP] = TARGET_ELOOP,
566 [ERESTART] = TARGET_ERESTART,
567 [ESTRPIPE] = TARGET_ESTRPIPE,
568 [ENOTEMPTY] = TARGET_ENOTEMPTY,
569 [EUSERS] = TARGET_EUSERS,
570 [ENOTSOCK] = TARGET_ENOTSOCK,
571 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
572 [EMSGSIZE] = TARGET_EMSGSIZE,
573 [EPROTOTYPE] = TARGET_EPROTOTYPE,
574 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
575 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
576 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
577 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
578 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
579 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
580 [EADDRINUSE] = TARGET_EADDRINUSE,
581 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
582 [ENETDOWN] = TARGET_ENETDOWN,
583 [ENETUNREACH] = TARGET_ENETUNREACH,
584 [ENETRESET] = TARGET_ENETRESET,
585 [ECONNABORTED] = TARGET_ECONNABORTED,
586 [ECONNRESET] = TARGET_ECONNRESET,
587 [ENOBUFS] = TARGET_ENOBUFS,
588 [EISCONN] = TARGET_EISCONN,
589 [ENOTCONN] = TARGET_ENOTCONN,
590 [EUCLEAN] = TARGET_EUCLEAN,
591 [ENOTNAM] = TARGET_ENOTNAM,
592 [ENAVAIL] = TARGET_ENAVAIL,
593 [EISNAM] = TARGET_EISNAM,
594 [EREMOTEIO] = TARGET_EREMOTEIO,
595 [ESHUTDOWN] = TARGET_ESHUTDOWN,
596 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
597 [ETIMEDOUT] = TARGET_ETIMEDOUT,
598 [ECONNREFUSED] = TARGET_ECONNREFUSED,
599 [EHOSTDOWN] = TARGET_EHOSTDOWN,
600 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
601 [EALREADY] = TARGET_EALREADY,
602 [EINPROGRESS] = TARGET_EINPROGRESS,
603 [ESTALE] = TARGET_ESTALE,
604 [ECANCELED] = TARGET_ECANCELED,
605 [ENOMEDIUM] = TARGET_ENOMEDIUM,
606 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
607 #ifdef ENOKEY
608 [ENOKEY] = TARGET_ENOKEY,
609 #endif
610 #ifdef EKEYEXPIRED
611 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
612 #endif
613 #ifdef EKEYREVOKED
614 [EKEYREVOKED] = TARGET_EKEYREVOKED,
615 #endif
616 #ifdef EKEYREJECTED
617 [EKEYREJECTED] = TARGET_EKEYREJECTED,
618 #endif
619 #ifdef EOWNERDEAD
620 [EOWNERDEAD] = TARGET_EOWNERDEAD,
621 #endif
622 #ifdef ENOTRECOVERABLE
623 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
624 #endif
627 static inline int host_to_target_errno(int err)
629 if(host_to_target_errno_table[err])
630 return host_to_target_errno_table[err];
631 return err;
634 static inline int target_to_host_errno(int err)
636 if (target_to_host_errno_table[err])
637 return target_to_host_errno_table[err];
638 return err;
641 static inline abi_long get_errno(abi_long ret)
643 if (ret == -1)
644 return -host_to_target_errno(errno);
645 else
646 return ret;
649 static inline int is_error(abi_long ret)
651 return (abi_ulong)ret >= (abi_ulong)(-4096);
654 char *target_strerror(int err)
656 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
657 return NULL;
659 return strerror(target_to_host_errno(err));
662 static inline int host_to_target_sock_type(int host_type)
664 int target_type;
666 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
667 case SOCK_DGRAM:
668 target_type = TARGET_SOCK_DGRAM;
669 break;
670 case SOCK_STREAM:
671 target_type = TARGET_SOCK_STREAM;
672 break;
673 default:
674 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
675 break;
678 #if defined(SOCK_CLOEXEC)
679 if (host_type & SOCK_CLOEXEC) {
680 target_type |= TARGET_SOCK_CLOEXEC;
682 #endif
684 #if defined(SOCK_NONBLOCK)
685 if (host_type & SOCK_NONBLOCK) {
686 target_type |= TARGET_SOCK_NONBLOCK;
688 #endif
690 return target_type;
693 static abi_ulong target_brk;
694 static abi_ulong target_original_brk;
695 static abi_ulong brk_page;
697 void target_set_brk(abi_ulong new_brk)
699 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
700 brk_page = HOST_PAGE_ALIGN(target_brk);
703 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
704 #define DEBUGF_BRK(message, args...)
706 /* do_brk() must return target values and target errnos. */
707 abi_long do_brk(abi_ulong new_brk)
709 abi_long mapped_addr;
710 int new_alloc_size;
712 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
714 if (!new_brk) {
715 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
716 return target_brk;
718 if (new_brk < target_original_brk) {
719 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
720 target_brk);
721 return target_brk;
724 /* If the new brk is less than the highest page reserved to the
725 * target heap allocation, set it and we're almost done... */
726 if (new_brk <= brk_page) {
727 /* Heap contents are initialized to zero, as for anonymous
728 * mapped pages. */
729 if (new_brk > target_brk) {
730 memset(g2h(target_brk), 0, new_brk - target_brk);
732 target_brk = new_brk;
733 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
734 return target_brk;
737 /* We need to allocate more memory after the brk... Note that
738 * we don't use MAP_FIXED because that will map over the top of
739 * any existing mapping (like the one with the host libc or qemu
740 * itself); instead we treat "mapped but at wrong address" as
741 * a failure and unmap again.
743 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
744 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
745 PROT_READ|PROT_WRITE,
746 MAP_ANON|MAP_PRIVATE, 0, 0));
748 if (mapped_addr == brk_page) {
749 /* Heap contents are initialized to zero, as for anonymous
750 * mapped pages. Technically the new pages are already
751 * initialized to zero since they *are* anonymous mapped
752 * pages, however we have to take care with the contents that
753 * come from the remaining part of the previous page: it may
754 * contains garbage data due to a previous heap usage (grown
755 * then shrunken). */
756 memset(g2h(target_brk), 0, brk_page - target_brk);
758 target_brk = new_brk;
759 brk_page = HOST_PAGE_ALIGN(target_brk);
760 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
761 target_brk);
762 return target_brk;
763 } else if (mapped_addr != -1) {
764 /* Mapped but at wrong address, meaning there wasn't actually
765 * enough space for this brk.
767 target_munmap(mapped_addr, new_alloc_size);
768 mapped_addr = -1;
769 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
771 else {
772 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
775 #if defined(TARGET_ALPHA)
776 /* We (partially) emulate OSF/1 on Alpha, which requires we
777 return a proper errno, not an unchanged brk value. */
778 return -TARGET_ENOMEM;
779 #endif
780 /* For everything else, return the previous break. */
781 return target_brk;
784 static inline abi_long copy_from_user_fdset(fd_set *fds,
785 abi_ulong target_fds_addr,
786 int n)
788 int i, nw, j, k;
789 abi_ulong b, *target_fds;
791 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
792 if (!(target_fds = lock_user(VERIFY_READ,
793 target_fds_addr,
794 sizeof(abi_ulong) * nw,
795 1)))
796 return -TARGET_EFAULT;
798 FD_ZERO(fds);
799 k = 0;
800 for (i = 0; i < nw; i++) {
801 /* grab the abi_ulong */
802 __get_user(b, &target_fds[i]);
803 for (j = 0; j < TARGET_ABI_BITS; j++) {
804 /* check the bit inside the abi_ulong */
805 if ((b >> j) & 1)
806 FD_SET(k, fds);
807 k++;
811 unlock_user(target_fds, target_fds_addr, 0);
813 return 0;
816 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
817 abi_ulong target_fds_addr,
818 int n)
820 if (target_fds_addr) {
821 if (copy_from_user_fdset(fds, target_fds_addr, n))
822 return -TARGET_EFAULT;
823 *fds_ptr = fds;
824 } else {
825 *fds_ptr = NULL;
827 return 0;
830 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
831 const fd_set *fds,
832 int n)
834 int i, nw, j, k;
835 abi_long v;
836 abi_ulong *target_fds;
838 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
839 if (!(target_fds = lock_user(VERIFY_WRITE,
840 target_fds_addr,
841 sizeof(abi_ulong) * nw,
842 0)))
843 return -TARGET_EFAULT;
845 k = 0;
846 for (i = 0; i < nw; i++) {
847 v = 0;
848 for (j = 0; j < TARGET_ABI_BITS; j++) {
849 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
850 k++;
852 __put_user(v, &target_fds[i]);
855 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
857 return 0;
860 #if defined(__alpha__)
861 #define HOST_HZ 1024
862 #else
863 #define HOST_HZ 100
864 #endif
866 static inline abi_long host_to_target_clock_t(long ticks)
868 #if HOST_HZ == TARGET_HZ
869 return ticks;
870 #else
871 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
872 #endif
875 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
876 const struct rusage *rusage)
878 struct target_rusage *target_rusage;
880 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
881 return -TARGET_EFAULT;
882 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
883 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
884 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
885 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
886 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
887 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
888 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
889 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
890 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
891 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
892 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
893 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
894 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
895 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
896 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
897 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
898 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
899 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
900 unlock_user_struct(target_rusage, target_addr, 1);
902 return 0;
905 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
907 abi_ulong target_rlim_swap;
908 rlim_t result;
910 target_rlim_swap = tswapal(target_rlim);
911 if (target_rlim_swap == TARGET_RLIM_INFINITY)
912 return RLIM_INFINITY;
914 result = target_rlim_swap;
915 if (target_rlim_swap != (rlim_t)result)
916 return RLIM_INFINITY;
918 return result;
921 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
923 abi_ulong target_rlim_swap;
924 abi_ulong result;
926 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
927 target_rlim_swap = TARGET_RLIM_INFINITY;
928 else
929 target_rlim_swap = rlim;
930 result = tswapal(target_rlim_swap);
932 return result;
935 static inline int target_to_host_resource(int code)
937 switch (code) {
938 case TARGET_RLIMIT_AS:
939 return RLIMIT_AS;
940 case TARGET_RLIMIT_CORE:
941 return RLIMIT_CORE;
942 case TARGET_RLIMIT_CPU:
943 return RLIMIT_CPU;
944 case TARGET_RLIMIT_DATA:
945 return RLIMIT_DATA;
946 case TARGET_RLIMIT_FSIZE:
947 return RLIMIT_FSIZE;
948 case TARGET_RLIMIT_LOCKS:
949 return RLIMIT_LOCKS;
950 case TARGET_RLIMIT_MEMLOCK:
951 return RLIMIT_MEMLOCK;
952 case TARGET_RLIMIT_MSGQUEUE:
953 return RLIMIT_MSGQUEUE;
954 case TARGET_RLIMIT_NICE:
955 return RLIMIT_NICE;
956 case TARGET_RLIMIT_NOFILE:
957 return RLIMIT_NOFILE;
958 case TARGET_RLIMIT_NPROC:
959 return RLIMIT_NPROC;
960 case TARGET_RLIMIT_RSS:
961 return RLIMIT_RSS;
962 case TARGET_RLIMIT_RTPRIO:
963 return RLIMIT_RTPRIO;
964 case TARGET_RLIMIT_SIGPENDING:
965 return RLIMIT_SIGPENDING;
966 case TARGET_RLIMIT_STACK:
967 return RLIMIT_STACK;
968 default:
969 return code;
973 static inline abi_long copy_from_user_timeval(struct timeval *tv,
974 abi_ulong target_tv_addr)
976 struct target_timeval *target_tv;
978 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
979 return -TARGET_EFAULT;
981 __get_user(tv->tv_sec, &target_tv->tv_sec);
982 __get_user(tv->tv_usec, &target_tv->tv_usec);
984 unlock_user_struct(target_tv, target_tv_addr, 0);
986 return 0;
989 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
990 const struct timeval *tv)
992 struct target_timeval *target_tv;
994 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
995 return -TARGET_EFAULT;
997 __put_user(tv->tv_sec, &target_tv->tv_sec);
998 __put_user(tv->tv_usec, &target_tv->tv_usec);
1000 unlock_user_struct(target_tv, target_tv_addr, 1);
1002 return 0;
1005 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1006 abi_ulong target_tz_addr)
1008 struct target_timezone *target_tz;
1010 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1011 return -TARGET_EFAULT;
1014 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1015 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1017 unlock_user_struct(target_tz, target_tz_addr, 0);
1019 return 0;
1022 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1023 #include <mqueue.h>
1025 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1026 abi_ulong target_mq_attr_addr)
1028 struct target_mq_attr *target_mq_attr;
1030 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1031 target_mq_attr_addr, 1))
1032 return -TARGET_EFAULT;
1034 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1035 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1036 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1037 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1039 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1041 return 0;
1044 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1045 const struct mq_attr *attr)
1047 struct target_mq_attr *target_mq_attr;
1049 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1050 target_mq_attr_addr, 0))
1051 return -TARGET_EFAULT;
1053 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1054 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1055 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1056 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1058 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1060 return 0;
1062 #endif
1064 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1065 /* do_select() must return target values and target errnos. */
1066 static abi_long do_select(int n,
1067 abi_ulong rfd_addr, abi_ulong wfd_addr,
1068 abi_ulong efd_addr, abi_ulong target_tv_addr)
1070 fd_set rfds, wfds, efds;
1071 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1072 struct timeval tv, *tv_ptr;
1073 abi_long ret;
1075 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1076 if (ret) {
1077 return ret;
1079 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1080 if (ret) {
1081 return ret;
1083 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1084 if (ret) {
1085 return ret;
1088 if (target_tv_addr) {
1089 if (copy_from_user_timeval(&tv, target_tv_addr))
1090 return -TARGET_EFAULT;
1091 tv_ptr = &tv;
1092 } else {
1093 tv_ptr = NULL;
1096 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1098 if (!is_error(ret)) {
1099 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1100 return -TARGET_EFAULT;
1101 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1102 return -TARGET_EFAULT;
1103 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1104 return -TARGET_EFAULT;
1106 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1107 return -TARGET_EFAULT;
1110 return ret;
1112 #endif
1114 static abi_long do_pipe2(int host_pipe[], int flags)
1116 #ifdef CONFIG_PIPE2
1117 return pipe2(host_pipe, flags);
1118 #else
1119 return -ENOSYS;
1120 #endif
1123 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1124 int flags, int is_pipe2)
1126 int host_pipe[2];
1127 abi_long ret;
1128 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1130 if (is_error(ret))
1131 return get_errno(ret);
1133 /* Several targets have special calling conventions for the original
1134 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1135 if (!is_pipe2) {
1136 #if defined(TARGET_ALPHA)
1137 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1138 return host_pipe[0];
1139 #elif defined(TARGET_MIPS)
1140 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1141 return host_pipe[0];
1142 #elif defined(TARGET_SH4)
1143 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1144 return host_pipe[0];
1145 #elif defined(TARGET_SPARC)
1146 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1147 return host_pipe[0];
1148 #endif
1151 if (put_user_s32(host_pipe[0], pipedes)
1152 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1153 return -TARGET_EFAULT;
1154 return get_errno(ret);
1157 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1158 abi_ulong target_addr,
1159 socklen_t len)
1161 struct target_ip_mreqn *target_smreqn;
1163 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1164 if (!target_smreqn)
1165 return -TARGET_EFAULT;
1166 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1167 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1168 if (len == sizeof(struct target_ip_mreqn))
1169 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1170 unlock_user(target_smreqn, target_addr, 0);
1172 return 0;
1175 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1176 abi_ulong target_addr,
1177 socklen_t len)
1179 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1180 sa_family_t sa_family;
1181 struct target_sockaddr *target_saddr;
1183 if (fd_trans_target_to_host_addr(fd)) {
1184 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1187 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1188 if (!target_saddr)
1189 return -TARGET_EFAULT;
1191 sa_family = tswap16(target_saddr->sa_family);
1193 /* Oops. The caller might send a incomplete sun_path; sun_path
1194 * must be terminated by \0 (see the manual page), but
1195 * unfortunately it is quite common to specify sockaddr_un
1196 * length as "strlen(x->sun_path)" while it should be
1197 * "strlen(...) + 1". We'll fix that here if needed.
1198 * Linux kernel has a similar feature.
1201 if (sa_family == AF_UNIX) {
1202 if (len < unix_maxlen && len > 0) {
1203 char *cp = (char*)target_saddr;
1205 if ( cp[len-1] && !cp[len] )
1206 len++;
1208 if (len > unix_maxlen)
1209 len = unix_maxlen;
1212 memcpy(addr, target_saddr, len);
1213 addr->sa_family = sa_family;
1214 if (sa_family == AF_PACKET) {
1215 struct target_sockaddr_ll *lladdr;
1217 lladdr = (struct target_sockaddr_ll *)addr;
1218 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1219 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1221 unlock_user(target_saddr, target_addr, 0);
1223 return 0;
1226 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1227 struct sockaddr *addr,
1228 socklen_t len)
1230 struct target_sockaddr *target_saddr;
1232 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1233 if (!target_saddr)
1234 return -TARGET_EFAULT;
1235 memcpy(target_saddr, addr, len);
1236 target_saddr->sa_family = tswap16(addr->sa_family);
1237 unlock_user(target_saddr, target_addr, len);
1239 return 0;
1242 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1243 struct target_msghdr *target_msgh)
1245 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1246 abi_long msg_controllen;
1247 abi_ulong target_cmsg_addr;
1248 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1249 socklen_t space = 0;
1251 msg_controllen = tswapal(target_msgh->msg_controllen);
1252 if (msg_controllen < sizeof (struct target_cmsghdr))
1253 goto the_end;
1254 target_cmsg_addr = tswapal(target_msgh->msg_control);
1255 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1256 target_cmsg_start = target_cmsg;
1257 if (!target_cmsg)
1258 return -TARGET_EFAULT;
1260 while (cmsg && target_cmsg) {
1261 void *data = CMSG_DATA(cmsg);
1262 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1264 int len = tswapal(target_cmsg->cmsg_len)
1265 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1267 space += CMSG_SPACE(len);
1268 if (space > msgh->msg_controllen) {
1269 space -= CMSG_SPACE(len);
1270 /* This is a QEMU bug, since we allocated the payload
1271 * area ourselves (unlike overflow in host-to-target
1272 * conversion, which is just the guest giving us a buffer
1273 * that's too small). It can't happen for the payload types
1274 * we currently support; if it becomes an issue in future
1275 * we would need to improve our allocation strategy to
1276 * something more intelligent than "twice the size of the
1277 * target buffer we're reading from".
1279 gemu_log("Host cmsg overflow\n");
1280 break;
1283 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1284 cmsg->cmsg_level = SOL_SOCKET;
1285 } else {
1286 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1288 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1289 cmsg->cmsg_len = CMSG_LEN(len);
1291 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1292 int *fd = (int *)data;
1293 int *target_fd = (int *)target_data;
1294 int i, numfds = len / sizeof(int);
1296 for (i = 0; i < numfds; i++) {
1297 __get_user(fd[i], target_fd + i);
1299 } else if (cmsg->cmsg_level == SOL_SOCKET
1300 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1301 struct ucred *cred = (struct ucred *)data;
1302 struct target_ucred *target_cred =
1303 (struct target_ucred *)target_data;
1305 __get_user(cred->pid, &target_cred->pid);
1306 __get_user(cred->uid, &target_cred->uid);
1307 __get_user(cred->gid, &target_cred->gid);
1308 } else {
1309 gemu_log("Unsupported ancillary data: %d/%d\n",
1310 cmsg->cmsg_level, cmsg->cmsg_type);
1311 memcpy(data, target_data, len);
1314 cmsg = CMSG_NXTHDR(msgh, cmsg);
1315 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1316 target_cmsg_start);
1318 unlock_user(target_cmsg, target_cmsg_addr, 0);
1319 the_end:
1320 msgh->msg_controllen = space;
1321 return 0;
1324 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1325 struct msghdr *msgh)
1327 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1328 abi_long msg_controllen;
1329 abi_ulong target_cmsg_addr;
1330 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1331 socklen_t space = 0;
1333 msg_controllen = tswapal(target_msgh->msg_controllen);
1334 if (msg_controllen < sizeof (struct target_cmsghdr))
1335 goto the_end;
1336 target_cmsg_addr = tswapal(target_msgh->msg_control);
1337 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1338 target_cmsg_start = target_cmsg;
1339 if (!target_cmsg)
1340 return -TARGET_EFAULT;
1342 while (cmsg && target_cmsg) {
1343 void *data = CMSG_DATA(cmsg);
1344 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1346 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1347 int tgt_len, tgt_space;
1349 /* We never copy a half-header but may copy half-data;
1350 * this is Linux's behaviour in put_cmsg(). Note that
1351 * truncation here is a guest problem (which we report
1352 * to the guest via the CTRUNC bit), unlike truncation
1353 * in target_to_host_cmsg, which is a QEMU bug.
1355 if (msg_controllen < sizeof(struct cmsghdr)) {
1356 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1357 break;
1360 if (cmsg->cmsg_level == SOL_SOCKET) {
1361 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1362 } else {
1363 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1365 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1367 tgt_len = TARGET_CMSG_LEN(len);
1369 /* Payload types which need a different size of payload on
1370 * the target must adjust tgt_len here.
1372 switch (cmsg->cmsg_level) {
1373 case SOL_SOCKET:
1374 switch (cmsg->cmsg_type) {
1375 case SO_TIMESTAMP:
1376 tgt_len = sizeof(struct target_timeval);
1377 break;
1378 default:
1379 break;
1381 default:
1382 break;
1385 if (msg_controllen < tgt_len) {
1386 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1387 tgt_len = msg_controllen;
1390 /* We must now copy-and-convert len bytes of payload
1391 * into tgt_len bytes of destination space. Bear in mind
1392 * that in both source and destination we may be dealing
1393 * with a truncated value!
1395 switch (cmsg->cmsg_level) {
1396 case SOL_SOCKET:
1397 switch (cmsg->cmsg_type) {
1398 case SCM_RIGHTS:
1400 int *fd = (int *)data;
1401 int *target_fd = (int *)target_data;
1402 int i, numfds = tgt_len / sizeof(int);
1404 for (i = 0; i < numfds; i++) {
1405 __put_user(fd[i], target_fd + i);
1407 break;
1409 case SO_TIMESTAMP:
1411 struct timeval *tv = (struct timeval *)data;
1412 struct target_timeval *target_tv =
1413 (struct target_timeval *)target_data;
1415 if (len != sizeof(struct timeval) ||
1416 tgt_len != sizeof(struct target_timeval)) {
1417 goto unimplemented;
1420 /* copy struct timeval to target */
1421 __put_user(tv->tv_sec, &target_tv->tv_sec);
1422 __put_user(tv->tv_usec, &target_tv->tv_usec);
1423 break;
1425 case SCM_CREDENTIALS:
1427 struct ucred *cred = (struct ucred *)data;
1428 struct target_ucred *target_cred =
1429 (struct target_ucred *)target_data;
1431 __put_user(cred->pid, &target_cred->pid);
1432 __put_user(cred->uid, &target_cred->uid);
1433 __put_user(cred->gid, &target_cred->gid);
1434 break;
1436 default:
1437 goto unimplemented;
1439 break;
1441 default:
1442 unimplemented:
1443 gemu_log("Unsupported ancillary data: %d/%d\n",
1444 cmsg->cmsg_level, cmsg->cmsg_type);
1445 memcpy(target_data, data, MIN(len, tgt_len));
1446 if (tgt_len > len) {
1447 memset(target_data + len, 0, tgt_len - len);
1451 target_cmsg->cmsg_len = tswapal(tgt_len);
1452 tgt_space = TARGET_CMSG_SPACE(len);
1453 if (msg_controllen < tgt_space) {
1454 tgt_space = msg_controllen;
1456 msg_controllen -= tgt_space;
1457 space += tgt_space;
1458 cmsg = CMSG_NXTHDR(msgh, cmsg);
1459 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1460 target_cmsg_start);
1462 unlock_user(target_cmsg, target_cmsg_addr, space);
1463 the_end:
1464 target_msgh->msg_controllen = tswapal(space);
1465 return 0;
1468 /* do_setsockopt() Must return target values and target errnos. */
1469 static abi_long do_setsockopt(int sockfd, int level, int optname,
1470 abi_ulong optval_addr, socklen_t optlen)
1472 abi_long ret;
1473 int val;
1474 struct ip_mreqn *ip_mreq;
1475 struct ip_mreq_source *ip_mreq_source;
1477 switch(level) {
1478 case SOL_TCP:
1479 /* TCP options all take an 'int' value. */
1480 if (optlen < sizeof(uint32_t))
1481 return -TARGET_EINVAL;
1483 if (get_user_u32(val, optval_addr))
1484 return -TARGET_EFAULT;
1485 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1486 break;
1487 case SOL_IP:
1488 switch(optname) {
1489 case IP_TOS:
1490 case IP_TTL:
1491 case IP_HDRINCL:
1492 case IP_ROUTER_ALERT:
1493 case IP_RECVOPTS:
1494 case IP_RETOPTS:
1495 case IP_PKTINFO:
1496 case IP_MTU_DISCOVER:
1497 case IP_RECVERR:
1498 case IP_RECVTOS:
1499 #ifdef IP_FREEBIND
1500 case IP_FREEBIND:
1501 #endif
1502 case IP_MULTICAST_TTL:
1503 case IP_MULTICAST_LOOP:
1504 val = 0;
1505 if (optlen >= sizeof(uint32_t)) {
1506 if (get_user_u32(val, optval_addr))
1507 return -TARGET_EFAULT;
1508 } else if (optlen >= 1) {
1509 if (get_user_u8(val, optval_addr))
1510 return -TARGET_EFAULT;
1512 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1513 break;
1514 case IP_ADD_MEMBERSHIP:
1515 case IP_DROP_MEMBERSHIP:
1516 if (optlen < sizeof (struct target_ip_mreq) ||
1517 optlen > sizeof (struct target_ip_mreqn))
1518 return -TARGET_EINVAL;
1520 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1521 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1522 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1523 break;
1525 case IP_BLOCK_SOURCE:
1526 case IP_UNBLOCK_SOURCE:
1527 case IP_ADD_SOURCE_MEMBERSHIP:
1528 case IP_DROP_SOURCE_MEMBERSHIP:
1529 if (optlen != sizeof (struct target_ip_mreq_source))
1530 return -TARGET_EINVAL;
1532 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1533 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1534 unlock_user (ip_mreq_source, optval_addr, 0);
1535 break;
1537 default:
1538 goto unimplemented;
1540 break;
1541 case SOL_IPV6:
1542 switch (optname) {
1543 case IPV6_MTU_DISCOVER:
1544 case IPV6_MTU:
1545 case IPV6_V6ONLY:
1546 case IPV6_RECVPKTINFO:
1547 val = 0;
1548 if (optlen < sizeof(uint32_t)) {
1549 return -TARGET_EINVAL;
1551 if (get_user_u32(val, optval_addr)) {
1552 return -TARGET_EFAULT;
1554 ret = get_errno(setsockopt(sockfd, level, optname,
1555 &val, sizeof(val)));
1556 break;
1557 default:
1558 goto unimplemented;
1560 break;
1561 case SOL_RAW:
1562 switch (optname) {
1563 case ICMP_FILTER:
1564 /* struct icmp_filter takes an u32 value */
1565 if (optlen < sizeof(uint32_t)) {
1566 return -TARGET_EINVAL;
1569 if (get_user_u32(val, optval_addr)) {
1570 return -TARGET_EFAULT;
1572 ret = get_errno(setsockopt(sockfd, level, optname,
1573 &val, sizeof(val)));
1574 break;
1576 default:
1577 goto unimplemented;
1579 break;
1580 case TARGET_SOL_SOCKET:
1581 switch (optname) {
1582 case TARGET_SO_RCVTIMEO:
1584 struct timeval tv;
1586 optname = SO_RCVTIMEO;
1588 set_timeout:
1589 if (optlen != sizeof(struct target_timeval)) {
1590 return -TARGET_EINVAL;
1593 if (copy_from_user_timeval(&tv, optval_addr)) {
1594 return -TARGET_EFAULT;
1597 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1598 &tv, sizeof(tv)));
1599 return ret;
1601 case TARGET_SO_SNDTIMEO:
1602 optname = SO_SNDTIMEO;
1603 goto set_timeout;
1604 case TARGET_SO_ATTACH_FILTER:
1606 struct target_sock_fprog *tfprog;
1607 struct target_sock_filter *tfilter;
1608 struct sock_fprog fprog;
1609 struct sock_filter *filter;
1610 int i;
1612 if (optlen != sizeof(*tfprog)) {
1613 return -TARGET_EINVAL;
1615 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1616 return -TARGET_EFAULT;
1618 if (!lock_user_struct(VERIFY_READ, tfilter,
1619 tswapal(tfprog->filter), 0)) {
1620 unlock_user_struct(tfprog, optval_addr, 1);
1621 return -TARGET_EFAULT;
1624 fprog.len = tswap16(tfprog->len);
1625 filter = g_try_new(struct sock_filter, fprog.len);
1626 if (filter == NULL) {
1627 unlock_user_struct(tfilter, tfprog->filter, 1);
1628 unlock_user_struct(tfprog, optval_addr, 1);
1629 return -TARGET_ENOMEM;
1631 for (i = 0; i < fprog.len; i++) {
1632 filter[i].code = tswap16(tfilter[i].code);
1633 filter[i].jt = tfilter[i].jt;
1634 filter[i].jf = tfilter[i].jf;
1635 filter[i].k = tswap32(tfilter[i].k);
1637 fprog.filter = filter;
1639 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1640 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1641 g_free(filter);
1643 unlock_user_struct(tfilter, tfprog->filter, 1);
1644 unlock_user_struct(tfprog, optval_addr, 1);
1645 return ret;
1647 case TARGET_SO_BINDTODEVICE:
1649 char *dev_ifname, *addr_ifname;
1651 if (optlen > IFNAMSIZ - 1) {
1652 optlen = IFNAMSIZ - 1;
1654 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1655 if (!dev_ifname) {
1656 return -TARGET_EFAULT;
1658 optname = SO_BINDTODEVICE;
1659 addr_ifname = alloca(IFNAMSIZ);
1660 memcpy(addr_ifname, dev_ifname, optlen);
1661 addr_ifname[optlen] = 0;
1662 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1663 addr_ifname, optlen));
1664 unlock_user (dev_ifname, optval_addr, 0);
1665 return ret;
1667 /* Options with 'int' argument. */
1668 case TARGET_SO_DEBUG:
1669 optname = SO_DEBUG;
1670 break;
1671 case TARGET_SO_REUSEADDR:
1672 optname = SO_REUSEADDR;
1673 break;
1674 case TARGET_SO_TYPE:
1675 optname = SO_TYPE;
1676 break;
1677 case TARGET_SO_ERROR:
1678 optname = SO_ERROR;
1679 break;
1680 case TARGET_SO_DONTROUTE:
1681 optname = SO_DONTROUTE;
1682 break;
1683 case TARGET_SO_BROADCAST:
1684 optname = SO_BROADCAST;
1685 break;
1686 case TARGET_SO_SNDBUF:
1687 optname = SO_SNDBUF;
1688 break;
1689 case TARGET_SO_SNDBUFFORCE:
1690 optname = SO_SNDBUFFORCE;
1691 break;
1692 case TARGET_SO_RCVBUF:
1693 optname = SO_RCVBUF;
1694 break;
1695 case TARGET_SO_RCVBUFFORCE:
1696 optname = SO_RCVBUFFORCE;
1697 break;
1698 case TARGET_SO_KEEPALIVE:
1699 optname = SO_KEEPALIVE;
1700 break;
1701 case TARGET_SO_OOBINLINE:
1702 optname = SO_OOBINLINE;
1703 break;
1704 case TARGET_SO_NO_CHECK:
1705 optname = SO_NO_CHECK;
1706 break;
1707 case TARGET_SO_PRIORITY:
1708 optname = SO_PRIORITY;
1709 break;
1710 #ifdef SO_BSDCOMPAT
1711 case TARGET_SO_BSDCOMPAT:
1712 optname = SO_BSDCOMPAT;
1713 break;
1714 #endif
1715 case TARGET_SO_PASSCRED:
1716 optname = SO_PASSCRED;
1717 break;
1718 case TARGET_SO_PASSSEC:
1719 optname = SO_PASSSEC;
1720 break;
1721 case TARGET_SO_TIMESTAMP:
1722 optname = SO_TIMESTAMP;
1723 break;
1724 case TARGET_SO_RCVLOWAT:
1725 optname = SO_RCVLOWAT;
1726 break;
1727 break;
1728 default:
1729 goto unimplemented;
1731 if (optlen < sizeof(uint32_t))
1732 return -TARGET_EINVAL;
1734 if (get_user_u32(val, optval_addr))
1735 return -TARGET_EFAULT;
1736 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1737 break;
1738 default:
1739 unimplemented:
1740 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1741 ret = -TARGET_ENOPROTOOPT;
1743 return ret;
1746 /* do_getsockopt() Must return target values and target errnos. */
1747 static abi_long do_getsockopt(int sockfd, int level, int optname,
1748 abi_ulong optval_addr, abi_ulong optlen)
1750 abi_long ret;
1751 int len, val;
1752 socklen_t lv;
1754 switch(level) {
1755 case TARGET_SOL_SOCKET:
1756 level = SOL_SOCKET;
1757 switch (optname) {
1758 /* These don't just return a single integer */
1759 case TARGET_SO_LINGER:
1760 case TARGET_SO_RCVTIMEO:
1761 case TARGET_SO_SNDTIMEO:
1762 case TARGET_SO_PEERNAME:
1763 goto unimplemented;
1764 case TARGET_SO_PEERCRED: {
1765 struct ucred cr;
1766 socklen_t crlen;
1767 struct target_ucred *tcr;
1769 if (get_user_u32(len, optlen)) {
1770 return -TARGET_EFAULT;
1772 if (len < 0) {
1773 return -TARGET_EINVAL;
1776 crlen = sizeof(cr);
1777 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1778 &cr, &crlen));
1779 if (ret < 0) {
1780 return ret;
1782 if (len > crlen) {
1783 len = crlen;
1785 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1786 return -TARGET_EFAULT;
1788 __put_user(cr.pid, &tcr->pid);
1789 __put_user(cr.uid, &tcr->uid);
1790 __put_user(cr.gid, &tcr->gid);
1791 unlock_user_struct(tcr, optval_addr, 1);
1792 if (put_user_u32(len, optlen)) {
1793 return -TARGET_EFAULT;
1795 break;
1797 /* Options with 'int' argument. */
1798 case TARGET_SO_DEBUG:
1799 optname = SO_DEBUG;
1800 goto int_case;
1801 case TARGET_SO_REUSEADDR:
1802 optname = SO_REUSEADDR;
1803 goto int_case;
1804 case TARGET_SO_TYPE:
1805 optname = SO_TYPE;
1806 goto int_case;
1807 case TARGET_SO_ERROR:
1808 optname = SO_ERROR;
1809 goto int_case;
1810 case TARGET_SO_DONTROUTE:
1811 optname = SO_DONTROUTE;
1812 goto int_case;
1813 case TARGET_SO_BROADCAST:
1814 optname = SO_BROADCAST;
1815 goto int_case;
1816 case TARGET_SO_SNDBUF:
1817 optname = SO_SNDBUF;
1818 goto int_case;
1819 case TARGET_SO_RCVBUF:
1820 optname = SO_RCVBUF;
1821 goto int_case;
1822 case TARGET_SO_KEEPALIVE:
1823 optname = SO_KEEPALIVE;
1824 goto int_case;
1825 case TARGET_SO_OOBINLINE:
1826 optname = SO_OOBINLINE;
1827 goto int_case;
1828 case TARGET_SO_NO_CHECK:
1829 optname = SO_NO_CHECK;
1830 goto int_case;
1831 case TARGET_SO_PRIORITY:
1832 optname = SO_PRIORITY;
1833 goto int_case;
1834 #ifdef SO_BSDCOMPAT
1835 case TARGET_SO_BSDCOMPAT:
1836 optname = SO_BSDCOMPAT;
1837 goto int_case;
1838 #endif
1839 case TARGET_SO_PASSCRED:
1840 optname = SO_PASSCRED;
1841 goto int_case;
1842 case TARGET_SO_TIMESTAMP:
1843 optname = SO_TIMESTAMP;
1844 goto int_case;
1845 case TARGET_SO_RCVLOWAT:
1846 optname = SO_RCVLOWAT;
1847 goto int_case;
1848 case TARGET_SO_ACCEPTCONN:
1849 optname = SO_ACCEPTCONN;
1850 goto int_case;
1851 default:
1852 goto int_case;
1854 break;
1855 case SOL_TCP:
1856 /* TCP options all take an 'int' value. */
1857 int_case:
1858 if (get_user_u32(len, optlen))
1859 return -TARGET_EFAULT;
1860 if (len < 0)
1861 return -TARGET_EINVAL;
1862 lv = sizeof(lv);
1863 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1864 if (ret < 0)
1865 return ret;
1866 if (optname == SO_TYPE) {
1867 val = host_to_target_sock_type(val);
1869 if (len > lv)
1870 len = lv;
1871 if (len == 4) {
1872 if (put_user_u32(val, optval_addr))
1873 return -TARGET_EFAULT;
1874 } else {
1875 if (put_user_u8(val, optval_addr))
1876 return -TARGET_EFAULT;
1878 if (put_user_u32(len, optlen))
1879 return -TARGET_EFAULT;
1880 break;
1881 case SOL_IP:
1882 switch(optname) {
1883 case IP_TOS:
1884 case IP_TTL:
1885 case IP_HDRINCL:
1886 case IP_ROUTER_ALERT:
1887 case IP_RECVOPTS:
1888 case IP_RETOPTS:
1889 case IP_PKTINFO:
1890 case IP_MTU_DISCOVER:
1891 case IP_RECVERR:
1892 case IP_RECVTOS:
1893 #ifdef IP_FREEBIND
1894 case IP_FREEBIND:
1895 #endif
1896 case IP_MULTICAST_TTL:
1897 case IP_MULTICAST_LOOP:
1898 if (get_user_u32(len, optlen))
1899 return -TARGET_EFAULT;
1900 if (len < 0)
1901 return -TARGET_EINVAL;
1902 lv = sizeof(lv);
1903 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1904 if (ret < 0)
1905 return ret;
1906 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1907 len = 1;
1908 if (put_user_u32(len, optlen)
1909 || put_user_u8(val, optval_addr))
1910 return -TARGET_EFAULT;
1911 } else {
1912 if (len > sizeof(int))
1913 len = sizeof(int);
1914 if (put_user_u32(len, optlen)
1915 || put_user_u32(val, optval_addr))
1916 return -TARGET_EFAULT;
1918 break;
1919 default:
1920 ret = -TARGET_ENOPROTOOPT;
1921 break;
1923 break;
1924 default:
1925 unimplemented:
1926 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1927 level, optname);
1928 ret = -TARGET_EOPNOTSUPP;
1929 break;
1931 return ret;
1934 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1935 int count, int copy)
1937 struct target_iovec *target_vec;
1938 struct iovec *vec;
1939 abi_ulong total_len, max_len;
1940 int i;
1941 int err = 0;
1942 bool bad_address = false;
1944 if (count == 0) {
1945 errno = 0;
1946 return NULL;
1948 if (count < 0 || count > IOV_MAX) {
1949 errno = EINVAL;
1950 return NULL;
1953 vec = g_try_new0(struct iovec, count);
1954 if (vec == NULL) {
1955 errno = ENOMEM;
1956 return NULL;
1959 target_vec = lock_user(VERIFY_READ, target_addr,
1960 count * sizeof(struct target_iovec), 1);
1961 if (target_vec == NULL) {
1962 err = EFAULT;
1963 goto fail2;
1966 /* ??? If host page size > target page size, this will result in a
1967 value larger than what we can actually support. */
1968 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1969 total_len = 0;
1971 for (i = 0; i < count; i++) {
1972 abi_ulong base = tswapal(target_vec[i].iov_base);
1973 abi_long len = tswapal(target_vec[i].iov_len);
1975 if (len < 0) {
1976 err = EINVAL;
1977 goto fail;
1978 } else if (len == 0) {
1979 /* Zero length pointer is ignored. */
1980 vec[i].iov_base = 0;
1981 } else {
1982 vec[i].iov_base = lock_user(type, base, len, copy);
1983 /* If the first buffer pointer is bad, this is a fault. But
1984 * subsequent bad buffers will result in a partial write; this
1985 * is realized by filling the vector with null pointers and
1986 * zero lengths. */
1987 if (!vec[i].iov_base) {
1988 if (i == 0) {
1989 err = EFAULT;
1990 goto fail;
1991 } else {
1992 bad_address = true;
1995 if (bad_address) {
1996 len = 0;
1998 if (len > max_len - total_len) {
1999 len = max_len - total_len;
2002 vec[i].iov_len = len;
2003 total_len += len;
2006 unlock_user(target_vec, target_addr, 0);
2007 return vec;
2009 fail:
2010 while (--i >= 0) {
2011 if (tswapal(target_vec[i].iov_len) > 0) {
2012 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2015 unlock_user(target_vec, target_addr, 0);
2016 fail2:
2017 g_free(vec);
2018 errno = err;
2019 return NULL;
2022 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2023 int count, int copy)
2025 struct target_iovec *target_vec;
2026 int i;
2028 target_vec = lock_user(VERIFY_READ, target_addr,
2029 count * sizeof(struct target_iovec), 1);
2030 if (target_vec) {
2031 for (i = 0; i < count; i++) {
2032 abi_ulong base = tswapal(target_vec[i].iov_base);
2033 abi_long len = tswapal(target_vec[i].iov_len);
2034 if (len < 0) {
2035 break;
2037 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2039 unlock_user(target_vec, target_addr, 0);
2042 g_free(vec);
2045 static inline int target_to_host_sock_type(int *type)
2047 int host_type = 0;
2048 int target_type = *type;
2050 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2051 case TARGET_SOCK_DGRAM:
2052 host_type = SOCK_DGRAM;
2053 break;
2054 case TARGET_SOCK_STREAM:
2055 host_type = SOCK_STREAM;
2056 break;
2057 default:
2058 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2059 break;
2061 if (target_type & TARGET_SOCK_CLOEXEC) {
2062 #if defined(SOCK_CLOEXEC)
2063 host_type |= SOCK_CLOEXEC;
2064 #else
2065 return -TARGET_EINVAL;
2066 #endif
2068 if (target_type & TARGET_SOCK_NONBLOCK) {
2069 #if defined(SOCK_NONBLOCK)
2070 host_type |= SOCK_NONBLOCK;
2071 #elif !defined(O_NONBLOCK)
2072 return -TARGET_EINVAL;
2073 #endif
2075 *type = host_type;
2076 return 0;
2079 /* Try to emulate socket type flags after socket creation. */
2080 static int sock_flags_fixup(int fd, int target_type)
2082 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2083 if (target_type & TARGET_SOCK_NONBLOCK) {
2084 int flags = fcntl(fd, F_GETFL);
2085 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2086 close(fd);
2087 return -TARGET_EINVAL;
2090 #endif
2091 return fd;
2094 static abi_long packet_target_to_host_sockaddr(void *host_addr,
2095 abi_ulong target_addr,
2096 socklen_t len)
2098 struct sockaddr *addr = host_addr;
2099 struct target_sockaddr *target_saddr;
2101 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
2102 if (!target_saddr) {
2103 return -TARGET_EFAULT;
2106 memcpy(addr, target_saddr, len);
2107 addr->sa_family = tswap16(target_saddr->sa_family);
2108 /* spkt_protocol is big-endian */
2110 unlock_user(target_saddr, target_addr, 0);
2111 return 0;
2114 static TargetFdTrans target_packet_trans = {
2115 .target_to_host_addr = packet_target_to_host_sockaddr,
2118 /* do_socket() Must return target values and target errnos. */
2119 static abi_long do_socket(int domain, int type, int protocol)
2121 int target_type = type;
2122 int ret;
2124 ret = target_to_host_sock_type(&type);
2125 if (ret) {
2126 return ret;
2129 if (domain == PF_NETLINK)
2130 return -TARGET_EAFNOSUPPORT;
2132 if (domain == AF_PACKET ||
2133 (domain == AF_INET && type == SOCK_PACKET)) {
2134 protocol = tswap16(protocol);
2137 ret = get_errno(socket(domain, type, protocol));
2138 if (ret >= 0) {
2139 ret = sock_flags_fixup(ret, target_type);
2140 if (type == SOCK_PACKET) {
2141 /* Manage an obsolete case :
2142 * if socket type is SOCK_PACKET, bind by name
2144 fd_trans_register(ret, &target_packet_trans);
2147 return ret;
2150 /* do_bind() Must return target values and target errnos. */
2151 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2152 socklen_t addrlen)
2154 void *addr;
2155 abi_long ret;
2157 if ((int)addrlen < 0) {
2158 return -TARGET_EINVAL;
2161 addr = alloca(addrlen+1);
2163 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2164 if (ret)
2165 return ret;
2167 return get_errno(bind(sockfd, addr, addrlen));
2170 /* do_connect() Must return target values and target errnos. */
2171 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2172 socklen_t addrlen)
2174 void *addr;
2175 abi_long ret;
2177 if ((int)addrlen < 0) {
2178 return -TARGET_EINVAL;
2181 addr = alloca(addrlen+1);
2183 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2184 if (ret)
2185 return ret;
2187 return get_errno(connect(sockfd, addr, addrlen));
2190 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2191 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2192 int flags, int send)
2194 abi_long ret, len;
2195 struct msghdr msg;
2196 int count;
2197 struct iovec *vec;
2198 abi_ulong target_vec;
2200 if (msgp->msg_name) {
2201 msg.msg_namelen = tswap32(msgp->msg_namelen);
2202 msg.msg_name = alloca(msg.msg_namelen+1);
2203 ret = target_to_host_sockaddr(fd, msg.msg_name,
2204 tswapal(msgp->msg_name),
2205 msg.msg_namelen);
2206 if (ret) {
2207 goto out2;
2209 } else {
2210 msg.msg_name = NULL;
2211 msg.msg_namelen = 0;
2213 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2214 msg.msg_control = alloca(msg.msg_controllen);
2215 msg.msg_flags = tswap32(msgp->msg_flags);
2217 count = tswapal(msgp->msg_iovlen);
2218 target_vec = tswapal(msgp->msg_iov);
2219 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2220 target_vec, count, send);
2221 if (vec == NULL) {
2222 ret = -host_to_target_errno(errno);
2223 goto out2;
2225 msg.msg_iovlen = count;
2226 msg.msg_iov = vec;
2228 if (send) {
2229 ret = target_to_host_cmsg(&msg, msgp);
2230 if (ret == 0)
2231 ret = get_errno(sendmsg(fd, &msg, flags));
2232 } else {
2233 ret = get_errno(recvmsg(fd, &msg, flags));
2234 if (!is_error(ret)) {
2235 len = ret;
2236 ret = host_to_target_cmsg(msgp, &msg);
2237 if (!is_error(ret)) {
2238 msgp->msg_namelen = tswap32(msg.msg_namelen);
2239 if (msg.msg_name != NULL) {
2240 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2241 msg.msg_name, msg.msg_namelen);
2242 if (ret) {
2243 goto out;
2247 ret = len;
2252 out:
2253 unlock_iovec(vec, target_vec, count, !send);
2254 out2:
2255 return ret;
2258 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2259 int flags, int send)
2261 abi_long ret;
2262 struct target_msghdr *msgp;
2264 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2265 msgp,
2266 target_msg,
2267 send ? 1 : 0)) {
2268 return -TARGET_EFAULT;
2270 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2271 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2272 return ret;
2275 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2276 * so it might not have this *mmsg-specific flag either.
2278 #ifndef MSG_WAITFORONE
2279 #define MSG_WAITFORONE 0x10000
2280 #endif
2282 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2283 unsigned int vlen, unsigned int flags,
2284 int send)
2286 struct target_mmsghdr *mmsgp;
2287 abi_long ret = 0;
2288 int i;
2290 if (vlen > UIO_MAXIOV) {
2291 vlen = UIO_MAXIOV;
2294 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2295 if (!mmsgp) {
2296 return -TARGET_EFAULT;
2299 for (i = 0; i < vlen; i++) {
2300 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2301 if (is_error(ret)) {
2302 break;
2304 mmsgp[i].msg_len = tswap32(ret);
2305 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2306 if (flags & MSG_WAITFORONE) {
2307 flags |= MSG_DONTWAIT;
2311 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2313 /* Return number of datagrams sent if we sent any at all;
2314 * otherwise return the error.
2316 if (i) {
2317 return i;
2319 return ret;
2322 /* If we don't have a system accept4() then just call accept.
2323 * The callsites to do_accept4() will ensure that they don't
2324 * pass a non-zero flags argument in this config.
2326 #ifndef CONFIG_ACCEPT4
2327 static inline int accept4(int sockfd, struct sockaddr *addr,
2328 socklen_t *addrlen, int flags)
2330 assert(flags == 0);
2331 return accept(sockfd, addr, addrlen);
2333 #endif
2335 /* do_accept4() Must return target values and target errnos. */
2336 static abi_long do_accept4(int fd, abi_ulong target_addr,
2337 abi_ulong target_addrlen_addr, int flags)
2339 socklen_t addrlen;
2340 void *addr;
2341 abi_long ret;
2342 int host_flags;
2344 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2346 if (target_addr == 0) {
2347 return get_errno(accept4(fd, NULL, NULL, host_flags));
2350 /* linux returns EINVAL if addrlen pointer is invalid */
2351 if (get_user_u32(addrlen, target_addrlen_addr))
2352 return -TARGET_EINVAL;
2354 if ((int)addrlen < 0) {
2355 return -TARGET_EINVAL;
2358 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2359 return -TARGET_EINVAL;
2361 addr = alloca(addrlen);
2363 ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
2364 if (!is_error(ret)) {
2365 host_to_target_sockaddr(target_addr, addr, addrlen);
2366 if (put_user_u32(addrlen, target_addrlen_addr))
2367 ret = -TARGET_EFAULT;
2369 return ret;
2372 /* do_getpeername() Must return target values and target errnos. */
2373 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2374 abi_ulong target_addrlen_addr)
2376 socklen_t addrlen;
2377 void *addr;
2378 abi_long ret;
2380 if (get_user_u32(addrlen, target_addrlen_addr))
2381 return -TARGET_EFAULT;
2383 if ((int)addrlen < 0) {
2384 return -TARGET_EINVAL;
2387 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2388 return -TARGET_EFAULT;
2390 addr = alloca(addrlen);
2392 ret = get_errno(getpeername(fd, addr, &addrlen));
2393 if (!is_error(ret)) {
2394 host_to_target_sockaddr(target_addr, addr, addrlen);
2395 if (put_user_u32(addrlen, target_addrlen_addr))
2396 ret = -TARGET_EFAULT;
2398 return ret;
2401 /* do_getsockname() Must return target values and target errnos. */
2402 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2403 abi_ulong target_addrlen_addr)
2405 socklen_t addrlen;
2406 void *addr;
2407 abi_long ret;
2409 if (get_user_u32(addrlen, target_addrlen_addr))
2410 return -TARGET_EFAULT;
2412 if ((int)addrlen < 0) {
2413 return -TARGET_EINVAL;
2416 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2417 return -TARGET_EFAULT;
2419 addr = alloca(addrlen);
2421 ret = get_errno(getsockname(fd, addr, &addrlen));
2422 if (!is_error(ret)) {
2423 host_to_target_sockaddr(target_addr, addr, addrlen);
2424 if (put_user_u32(addrlen, target_addrlen_addr))
2425 ret = -TARGET_EFAULT;
2427 return ret;
2430 /* do_socketpair() Must return target values and target errnos. */
2431 static abi_long do_socketpair(int domain, int type, int protocol,
2432 abi_ulong target_tab_addr)
2434 int tab[2];
2435 abi_long ret;
2437 target_to_host_sock_type(&type);
2439 ret = get_errno(socketpair(domain, type, protocol, tab));
2440 if (!is_error(ret)) {
2441 if (put_user_s32(tab[0], target_tab_addr)
2442 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2443 ret = -TARGET_EFAULT;
2445 return ret;
2448 /* do_sendto() Must return target values and target errnos. */
2449 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2450 abi_ulong target_addr, socklen_t addrlen)
2452 void *addr;
2453 void *host_msg;
2454 abi_long ret;
2456 if ((int)addrlen < 0) {
2457 return -TARGET_EINVAL;
2460 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2461 if (!host_msg)
2462 return -TARGET_EFAULT;
2463 if (target_addr) {
2464 addr = alloca(addrlen+1);
2465 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2466 if (ret) {
2467 unlock_user(host_msg, msg, 0);
2468 return ret;
2470 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2471 } else {
2472 ret = get_errno(send(fd, host_msg, len, flags));
2474 unlock_user(host_msg, msg, 0);
2475 return ret;
2478 /* do_recvfrom() Must return target values and target errnos. */
2479 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2480 abi_ulong target_addr,
2481 abi_ulong target_addrlen)
2483 socklen_t addrlen;
2484 void *addr;
2485 void *host_msg;
2486 abi_long ret;
2488 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2489 if (!host_msg)
2490 return -TARGET_EFAULT;
2491 if (target_addr) {
2492 if (get_user_u32(addrlen, target_addrlen)) {
2493 ret = -TARGET_EFAULT;
2494 goto fail;
2496 if ((int)addrlen < 0) {
2497 ret = -TARGET_EINVAL;
2498 goto fail;
2500 addr = alloca(addrlen);
2501 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2502 } else {
2503 addr = NULL; /* To keep compiler quiet. */
2504 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2506 if (!is_error(ret)) {
2507 if (target_addr) {
2508 host_to_target_sockaddr(target_addr, addr, addrlen);
2509 if (put_user_u32(addrlen, target_addrlen)) {
2510 ret = -TARGET_EFAULT;
2511 goto fail;
2514 unlock_user(host_msg, msg, len);
2515 } else {
2516 fail:
2517 unlock_user(host_msg, msg, 0);
2519 return ret;
2522 #ifdef TARGET_NR_socketcall
2523 /* do_socketcall() Must return target values and target errnos. */
2524 static abi_long do_socketcall(int num, abi_ulong vptr)
2526 static const unsigned ac[] = { /* number of arguments per call */
2527 [SOCKOP_socket] = 3, /* domain, type, protocol */
2528 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2529 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2530 [SOCKOP_listen] = 2, /* sockfd, backlog */
2531 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2532 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2533 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2534 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2535 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2536 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2537 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2538 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2539 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2540 [SOCKOP_shutdown] = 2, /* sockfd, how */
2541 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2542 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2543 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2544 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
2545 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2546 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2548 abi_long a[6]; /* max 6 args */
2550 /* first, collect the arguments in a[] according to ac[] */
2551 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2552 unsigned i;
2553 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2554 for (i = 0; i < ac[num]; ++i) {
2555 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2556 return -TARGET_EFAULT;
2561 /* now when we have the args, actually handle the call */
2562 switch (num) {
2563 case SOCKOP_socket: /* domain, type, protocol */
2564 return do_socket(a[0], a[1], a[2]);
2565 case SOCKOP_bind: /* sockfd, addr, addrlen */
2566 return do_bind(a[0], a[1], a[2]);
2567 case SOCKOP_connect: /* sockfd, addr, addrlen */
2568 return do_connect(a[0], a[1], a[2]);
2569 case SOCKOP_listen: /* sockfd, backlog */
2570 return get_errno(listen(a[0], a[1]));
2571 case SOCKOP_accept: /* sockfd, addr, addrlen */
2572 return do_accept4(a[0], a[1], a[2], 0);
2573 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2574 return do_accept4(a[0], a[1], a[2], a[3]);
2575 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2576 return do_getsockname(a[0], a[1], a[2]);
2577 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2578 return do_getpeername(a[0], a[1], a[2]);
2579 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2580 return do_socketpair(a[0], a[1], a[2], a[3]);
2581 case SOCKOP_send: /* sockfd, msg, len, flags */
2582 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2583 case SOCKOP_recv: /* sockfd, msg, len, flags */
2584 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2585 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2586 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2587 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2588 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2589 case SOCKOP_shutdown: /* sockfd, how */
2590 return get_errno(shutdown(a[0], a[1]));
2591 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2592 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2593 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2594 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2595 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
2596 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
2597 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
2598 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
2599 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2600 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2601 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2602 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2603 default:
2604 gemu_log("Unsupported socketcall: %d\n", num);
2605 return -TARGET_ENOSYS;
2608 #endif
2610 #define N_SHM_REGIONS 32
2612 static struct shm_region {
2613 abi_ulong start;
2614 abi_ulong size;
2615 } shm_regions[N_SHM_REGIONS];
2617 struct target_semid_ds
2619 struct target_ipc_perm sem_perm;
2620 abi_ulong sem_otime;
2621 #if !defined(TARGET_PPC64)
2622 abi_ulong __unused1;
2623 #endif
2624 abi_ulong sem_ctime;
2625 #if !defined(TARGET_PPC64)
2626 abi_ulong __unused2;
2627 #endif
2628 abi_ulong sem_nsems;
2629 abi_ulong __unused3;
2630 abi_ulong __unused4;
2633 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2634 abi_ulong target_addr)
2636 struct target_ipc_perm *target_ip;
2637 struct target_semid_ds *target_sd;
2639 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2640 return -TARGET_EFAULT;
2641 target_ip = &(target_sd->sem_perm);
2642 host_ip->__key = tswap32(target_ip->__key);
2643 host_ip->uid = tswap32(target_ip->uid);
2644 host_ip->gid = tswap32(target_ip->gid);
2645 host_ip->cuid = tswap32(target_ip->cuid);
2646 host_ip->cgid = tswap32(target_ip->cgid);
2647 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2648 host_ip->mode = tswap32(target_ip->mode);
2649 #else
2650 host_ip->mode = tswap16(target_ip->mode);
2651 #endif
2652 #if defined(TARGET_PPC)
2653 host_ip->__seq = tswap32(target_ip->__seq);
2654 #else
2655 host_ip->__seq = tswap16(target_ip->__seq);
2656 #endif
2657 unlock_user_struct(target_sd, target_addr, 0);
2658 return 0;
2661 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2662 struct ipc_perm *host_ip)
2664 struct target_ipc_perm *target_ip;
2665 struct target_semid_ds *target_sd;
2667 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2668 return -TARGET_EFAULT;
2669 target_ip = &(target_sd->sem_perm);
2670 target_ip->__key = tswap32(host_ip->__key);
2671 target_ip->uid = tswap32(host_ip->uid);
2672 target_ip->gid = tswap32(host_ip->gid);
2673 target_ip->cuid = tswap32(host_ip->cuid);
2674 target_ip->cgid = tswap32(host_ip->cgid);
2675 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2676 target_ip->mode = tswap32(host_ip->mode);
2677 #else
2678 target_ip->mode = tswap16(host_ip->mode);
2679 #endif
2680 #if defined(TARGET_PPC)
2681 target_ip->__seq = tswap32(host_ip->__seq);
2682 #else
2683 target_ip->__seq = tswap16(host_ip->__seq);
2684 #endif
2685 unlock_user_struct(target_sd, target_addr, 1);
2686 return 0;
2689 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2690 abi_ulong target_addr)
2692 struct target_semid_ds *target_sd;
2694 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2695 return -TARGET_EFAULT;
2696 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2697 return -TARGET_EFAULT;
2698 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2699 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2700 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2701 unlock_user_struct(target_sd, target_addr, 0);
2702 return 0;
2705 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2706 struct semid_ds *host_sd)
2708 struct target_semid_ds *target_sd;
2710 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2711 return -TARGET_EFAULT;
2712 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2713 return -TARGET_EFAULT;
2714 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2715 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2716 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2717 unlock_user_struct(target_sd, target_addr, 1);
2718 return 0;
2721 struct target_seminfo {
2722 int semmap;
2723 int semmni;
2724 int semmns;
2725 int semmnu;
2726 int semmsl;
2727 int semopm;
2728 int semume;
2729 int semusz;
2730 int semvmx;
2731 int semaem;
2734 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2735 struct seminfo *host_seminfo)
2737 struct target_seminfo *target_seminfo;
2738 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2739 return -TARGET_EFAULT;
2740 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2741 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2742 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2743 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2744 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2745 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2746 __put_user(host_seminfo->semume, &target_seminfo->semume);
2747 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2748 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2749 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2750 unlock_user_struct(target_seminfo, target_addr, 1);
2751 return 0;
2754 union semun {
2755 int val;
2756 struct semid_ds *buf;
2757 unsigned short *array;
2758 struct seminfo *__buf;
2761 union target_semun {
2762 int val;
2763 abi_ulong buf;
2764 abi_ulong array;
2765 abi_ulong __buf;
2768 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2769 abi_ulong target_addr)
2771 int nsems;
2772 unsigned short *array;
2773 union semun semun;
2774 struct semid_ds semid_ds;
2775 int i, ret;
2777 semun.buf = &semid_ds;
2779 ret = semctl(semid, 0, IPC_STAT, semun);
2780 if (ret == -1)
2781 return get_errno(ret);
2783 nsems = semid_ds.sem_nsems;
2785 *host_array = g_try_new(unsigned short, nsems);
2786 if (!*host_array) {
2787 return -TARGET_ENOMEM;
2789 array = lock_user(VERIFY_READ, target_addr,
2790 nsems*sizeof(unsigned short), 1);
2791 if (!array) {
2792 g_free(*host_array);
2793 return -TARGET_EFAULT;
2796 for(i=0; i<nsems; i++) {
2797 __get_user((*host_array)[i], &array[i]);
2799 unlock_user(array, target_addr, 0);
2801 return 0;
2804 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2805 unsigned short **host_array)
2807 int nsems;
2808 unsigned short *array;
2809 union semun semun;
2810 struct semid_ds semid_ds;
2811 int i, ret;
2813 semun.buf = &semid_ds;
2815 ret = semctl(semid, 0, IPC_STAT, semun);
2816 if (ret == -1)
2817 return get_errno(ret);
2819 nsems = semid_ds.sem_nsems;
2821 array = lock_user(VERIFY_WRITE, target_addr,
2822 nsems*sizeof(unsigned short), 0);
2823 if (!array)
2824 return -TARGET_EFAULT;
2826 for(i=0; i<nsems; i++) {
2827 __put_user((*host_array)[i], &array[i]);
2829 g_free(*host_array);
2830 unlock_user(array, target_addr, 1);
2832 return 0;
2835 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2836 abi_ulong target_arg)
2838 union target_semun target_su = { .buf = target_arg };
2839 union semun arg;
2840 struct semid_ds dsarg;
2841 unsigned short *array = NULL;
2842 struct seminfo seminfo;
2843 abi_long ret = -TARGET_EINVAL;
2844 abi_long err;
2845 cmd &= 0xff;
2847 switch( cmd ) {
2848 case GETVAL:
2849 case SETVAL:
2850 /* In 64 bit cross-endian situations, we will erroneously pick up
2851 * the wrong half of the union for the "val" element. To rectify
2852 * this, the entire 8-byte structure is byteswapped, followed by
2853 * a swap of the 4 byte val field. In other cases, the data is
2854 * already in proper host byte order. */
2855 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
2856 target_su.buf = tswapal(target_su.buf);
2857 arg.val = tswap32(target_su.val);
2858 } else {
2859 arg.val = target_su.val;
2861 ret = get_errno(semctl(semid, semnum, cmd, arg));
2862 break;
2863 case GETALL:
2864 case SETALL:
2865 err = target_to_host_semarray(semid, &array, target_su.array);
2866 if (err)
2867 return err;
2868 arg.array = array;
2869 ret = get_errno(semctl(semid, semnum, cmd, arg));
2870 err = host_to_target_semarray(semid, target_su.array, &array);
2871 if (err)
2872 return err;
2873 break;
2874 case IPC_STAT:
2875 case IPC_SET:
2876 case SEM_STAT:
2877 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2878 if (err)
2879 return err;
2880 arg.buf = &dsarg;
2881 ret = get_errno(semctl(semid, semnum, cmd, arg));
2882 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2883 if (err)
2884 return err;
2885 break;
2886 case IPC_INFO:
2887 case SEM_INFO:
2888 arg.__buf = &seminfo;
2889 ret = get_errno(semctl(semid, semnum, cmd, arg));
2890 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2891 if (err)
2892 return err;
2893 break;
2894 case IPC_RMID:
2895 case GETPID:
2896 case GETNCNT:
2897 case GETZCNT:
2898 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2899 break;
2902 return ret;
2905 struct target_sembuf {
2906 unsigned short sem_num;
2907 short sem_op;
2908 short sem_flg;
2911 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2912 abi_ulong target_addr,
2913 unsigned nsops)
2915 struct target_sembuf *target_sembuf;
2916 int i;
2918 target_sembuf = lock_user(VERIFY_READ, target_addr,
2919 nsops*sizeof(struct target_sembuf), 1);
2920 if (!target_sembuf)
2921 return -TARGET_EFAULT;
2923 for(i=0; i<nsops; i++) {
2924 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2925 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2926 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2929 unlock_user(target_sembuf, target_addr, 0);
2931 return 0;
2934 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2936 struct sembuf sops[nsops];
2938 if (target_to_host_sembuf(sops, ptr, nsops))
2939 return -TARGET_EFAULT;
2941 return get_errno(semop(semid, sops, nsops));
2944 struct target_msqid_ds
2946 struct target_ipc_perm msg_perm;
2947 abi_ulong msg_stime;
2948 #if TARGET_ABI_BITS == 32
2949 abi_ulong __unused1;
2950 #endif
2951 abi_ulong msg_rtime;
2952 #if TARGET_ABI_BITS == 32
2953 abi_ulong __unused2;
2954 #endif
2955 abi_ulong msg_ctime;
2956 #if TARGET_ABI_BITS == 32
2957 abi_ulong __unused3;
2958 #endif
2959 abi_ulong __msg_cbytes;
2960 abi_ulong msg_qnum;
2961 abi_ulong msg_qbytes;
2962 abi_ulong msg_lspid;
2963 abi_ulong msg_lrpid;
2964 abi_ulong __unused4;
2965 abi_ulong __unused5;
2968 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2969 abi_ulong target_addr)
2971 struct target_msqid_ds *target_md;
2973 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2974 return -TARGET_EFAULT;
2975 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2976 return -TARGET_EFAULT;
2977 host_md->msg_stime = tswapal(target_md->msg_stime);
2978 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2979 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2980 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2981 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2982 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2983 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2984 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2985 unlock_user_struct(target_md, target_addr, 0);
2986 return 0;
2989 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2990 struct msqid_ds *host_md)
2992 struct target_msqid_ds *target_md;
2994 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2995 return -TARGET_EFAULT;
2996 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2997 return -TARGET_EFAULT;
2998 target_md->msg_stime = tswapal(host_md->msg_stime);
2999 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3000 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3001 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3002 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3003 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3004 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3005 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3006 unlock_user_struct(target_md, target_addr, 1);
3007 return 0;
3010 struct target_msginfo {
3011 int msgpool;
3012 int msgmap;
3013 int msgmax;
3014 int msgmnb;
3015 int msgmni;
3016 int msgssz;
3017 int msgtql;
3018 unsigned short int msgseg;
3021 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3022 struct msginfo *host_msginfo)
3024 struct target_msginfo *target_msginfo;
3025 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3026 return -TARGET_EFAULT;
3027 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3028 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3029 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3030 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3031 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3032 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3033 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3034 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3035 unlock_user_struct(target_msginfo, target_addr, 1);
3036 return 0;
3039 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3041 struct msqid_ds dsarg;
3042 struct msginfo msginfo;
3043 abi_long ret = -TARGET_EINVAL;
3045 cmd &= 0xff;
3047 switch (cmd) {
3048 case IPC_STAT:
3049 case IPC_SET:
3050 case MSG_STAT:
3051 if (target_to_host_msqid_ds(&dsarg,ptr))
3052 return -TARGET_EFAULT;
3053 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3054 if (host_to_target_msqid_ds(ptr,&dsarg))
3055 return -TARGET_EFAULT;
3056 break;
3057 case IPC_RMID:
3058 ret = get_errno(msgctl(msgid, cmd, NULL));
3059 break;
3060 case IPC_INFO:
3061 case MSG_INFO:
3062 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3063 if (host_to_target_msginfo(ptr, &msginfo))
3064 return -TARGET_EFAULT;
3065 break;
3068 return ret;
3071 struct target_msgbuf {
3072 abi_long mtype;
3073 char mtext[1];
3076 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3077 ssize_t msgsz, int msgflg)
3079 struct target_msgbuf *target_mb;
3080 struct msgbuf *host_mb;
3081 abi_long ret = 0;
3083 if (msgsz < 0) {
3084 return -TARGET_EINVAL;
3087 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3088 return -TARGET_EFAULT;
3089 host_mb = g_try_malloc(msgsz + sizeof(long));
3090 if (!host_mb) {
3091 unlock_user_struct(target_mb, msgp, 0);
3092 return -TARGET_ENOMEM;
3094 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3095 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3096 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
3097 g_free(host_mb);
3098 unlock_user_struct(target_mb, msgp, 0);
3100 return ret;
3103 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3104 unsigned int msgsz, abi_long msgtyp,
3105 int msgflg)
3107 struct target_msgbuf *target_mb;
3108 char *target_mtext;
3109 struct msgbuf *host_mb;
3110 abi_long ret = 0;
3112 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3113 return -TARGET_EFAULT;
3115 host_mb = g_malloc(msgsz+sizeof(long));
3116 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3118 if (ret > 0) {
3119 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3120 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3121 if (!target_mtext) {
3122 ret = -TARGET_EFAULT;
3123 goto end;
3125 memcpy(target_mb->mtext, host_mb->mtext, ret);
3126 unlock_user(target_mtext, target_mtext_addr, ret);
3129 target_mb->mtype = tswapal(host_mb->mtype);
3131 end:
3132 if (target_mb)
3133 unlock_user_struct(target_mb, msgp, 1);
3134 g_free(host_mb);
3135 return ret;
3138 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3139 abi_ulong target_addr)
3141 struct target_shmid_ds *target_sd;
3143 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3144 return -TARGET_EFAULT;
3145 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3146 return -TARGET_EFAULT;
3147 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3148 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3149 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3150 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3151 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3152 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3153 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3154 unlock_user_struct(target_sd, target_addr, 0);
3155 return 0;
3158 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3159 struct shmid_ds *host_sd)
3161 struct target_shmid_ds *target_sd;
3163 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3164 return -TARGET_EFAULT;
3165 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3166 return -TARGET_EFAULT;
3167 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3168 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3169 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3170 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3171 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3172 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3173 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3174 unlock_user_struct(target_sd, target_addr, 1);
3175 return 0;
3178 struct target_shminfo {
3179 abi_ulong shmmax;
3180 abi_ulong shmmin;
3181 abi_ulong shmmni;
3182 abi_ulong shmseg;
3183 abi_ulong shmall;
3186 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3187 struct shminfo *host_shminfo)
3189 struct target_shminfo *target_shminfo;
3190 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3191 return -TARGET_EFAULT;
3192 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3193 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3194 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3195 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3196 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3197 unlock_user_struct(target_shminfo, target_addr, 1);
3198 return 0;
3201 struct target_shm_info {
3202 int used_ids;
3203 abi_ulong shm_tot;
3204 abi_ulong shm_rss;
3205 abi_ulong shm_swp;
3206 abi_ulong swap_attempts;
3207 abi_ulong swap_successes;
3210 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3211 struct shm_info *host_shm_info)
3213 struct target_shm_info *target_shm_info;
3214 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3215 return -TARGET_EFAULT;
3216 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3217 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3218 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3219 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3220 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3221 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3222 unlock_user_struct(target_shm_info, target_addr, 1);
3223 return 0;
3226 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3228 struct shmid_ds dsarg;
3229 struct shminfo shminfo;
3230 struct shm_info shm_info;
3231 abi_long ret = -TARGET_EINVAL;
3233 cmd &= 0xff;
3235 switch(cmd) {
3236 case IPC_STAT:
3237 case IPC_SET:
3238 case SHM_STAT:
3239 if (target_to_host_shmid_ds(&dsarg, buf))
3240 return -TARGET_EFAULT;
3241 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3242 if (host_to_target_shmid_ds(buf, &dsarg))
3243 return -TARGET_EFAULT;
3244 break;
3245 case IPC_INFO:
3246 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3247 if (host_to_target_shminfo(buf, &shminfo))
3248 return -TARGET_EFAULT;
3249 break;
3250 case SHM_INFO:
3251 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3252 if (host_to_target_shm_info(buf, &shm_info))
3253 return -TARGET_EFAULT;
3254 break;
3255 case IPC_RMID:
3256 case SHM_LOCK:
3257 case SHM_UNLOCK:
3258 ret = get_errno(shmctl(shmid, cmd, NULL));
3259 break;
3262 return ret;
3265 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
3267 abi_long raddr;
3268 void *host_raddr;
3269 struct shmid_ds shm_info;
3270 int i,ret;
3272 /* find out the length of the shared memory segment */
3273 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3274 if (is_error(ret)) {
3275 /* can't get length, bail out */
3276 return ret;
3279 mmap_lock();
3281 if (shmaddr)
3282 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3283 else {
3284 abi_ulong mmap_start;
3286 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3288 if (mmap_start == -1) {
3289 errno = ENOMEM;
3290 host_raddr = (void *)-1;
3291 } else
3292 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3295 if (host_raddr == (void *)-1) {
3296 mmap_unlock();
3297 return get_errno((long)host_raddr);
3299 raddr=h2g((unsigned long)host_raddr);
3301 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3302 PAGE_VALID | PAGE_READ |
3303 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3305 for (i = 0; i < N_SHM_REGIONS; i++) {
3306 if (shm_regions[i].start == 0) {
3307 shm_regions[i].start = raddr;
3308 shm_regions[i].size = shm_info.shm_segsz;
3309 break;
3313 mmap_unlock();
3314 return raddr;
3318 static inline abi_long do_shmdt(abi_ulong shmaddr)
3320 int i;
3322 for (i = 0; i < N_SHM_REGIONS; ++i) {
3323 if (shm_regions[i].start == shmaddr) {
3324 shm_regions[i].start = 0;
3325 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3326 break;
3330 return get_errno(shmdt(g2h(shmaddr)));
3333 #ifdef TARGET_NR_ipc
3334 /* ??? This only works with linear mappings. */
3335 /* do_ipc() must return target values and target errnos. */
3336 static abi_long do_ipc(unsigned int call, abi_long first,
3337 abi_long second, abi_long third,
3338 abi_long ptr, abi_long fifth)
3340 int version;
3341 abi_long ret = 0;
3343 version = call >> 16;
3344 call &= 0xffff;
3346 switch (call) {
3347 case IPCOP_semop:
3348 ret = do_semop(first, ptr, second);
3349 break;
3351 case IPCOP_semget:
3352 ret = get_errno(semget(first, second, third));
3353 break;
3355 case IPCOP_semctl: {
3356 /* The semun argument to semctl is passed by value, so dereference the
3357 * ptr argument. */
3358 abi_ulong atptr;
3359 get_user_ual(atptr, ptr);
3360 ret = do_semctl(first, second, third, atptr);
3361 break;
3364 case IPCOP_msgget:
3365 ret = get_errno(msgget(first, second));
3366 break;
3368 case IPCOP_msgsnd:
3369 ret = do_msgsnd(first, ptr, second, third);
3370 break;
3372 case IPCOP_msgctl:
3373 ret = do_msgctl(first, second, ptr);
3374 break;
3376 case IPCOP_msgrcv:
3377 switch (version) {
3378 case 0:
3380 struct target_ipc_kludge {
3381 abi_long msgp;
3382 abi_long msgtyp;
3383 } *tmp;
3385 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3386 ret = -TARGET_EFAULT;
3387 break;
3390 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3392 unlock_user_struct(tmp, ptr, 0);
3393 break;
3395 default:
3396 ret = do_msgrcv(first, ptr, second, fifth, third);
3398 break;
3400 case IPCOP_shmat:
3401 switch (version) {
3402 default:
3404 abi_ulong raddr;
3405 raddr = do_shmat(first, ptr, second);
3406 if (is_error(raddr))
3407 return get_errno(raddr);
3408 if (put_user_ual(raddr, third))
3409 return -TARGET_EFAULT;
3410 break;
3412 case 1:
3413 ret = -TARGET_EINVAL;
3414 break;
3416 break;
3417 case IPCOP_shmdt:
3418 ret = do_shmdt(ptr);
3419 break;
3421 case IPCOP_shmget:
3422 /* IPC_* flag values are the same on all linux platforms */
3423 ret = get_errno(shmget(first, second, third));
3424 break;
3426 /* IPC_* and SHM_* command values are the same on all linux platforms */
3427 case IPCOP_shmctl:
3428 ret = do_shmctl(first, second, ptr);
3429 break;
3430 default:
3431 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3432 ret = -TARGET_ENOSYS;
3433 break;
3435 return ret;
3437 #endif
3439 /* kernel structure types definitions */
3441 #define STRUCT(name, ...) STRUCT_ ## name,
3442 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3443 enum {
3444 #include "syscall_types.h"
3445 STRUCT_MAX
3447 #undef STRUCT
3448 #undef STRUCT_SPECIAL
3450 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3451 #define STRUCT_SPECIAL(name)
3452 #include "syscall_types.h"
3453 #undef STRUCT
3454 #undef STRUCT_SPECIAL
3456 typedef struct IOCTLEntry IOCTLEntry;
3458 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3459 int fd, int cmd, abi_long arg);
3461 struct IOCTLEntry {
3462 int target_cmd;
3463 unsigned int host_cmd;
3464 const char *name;
3465 int access;
3466 do_ioctl_fn *do_ioctl;
3467 const argtype arg_type[5];
3470 #define IOC_R 0x0001
3471 #define IOC_W 0x0002
3472 #define IOC_RW (IOC_R | IOC_W)
3474 #define MAX_STRUCT_SIZE 4096
3476 #ifdef CONFIG_FIEMAP
3477 /* So fiemap access checks don't overflow on 32 bit systems.
3478 * This is very slightly smaller than the limit imposed by
3479 * the underlying kernel.
3481 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3482 / sizeof(struct fiemap_extent))
3484 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3485 int fd, int cmd, abi_long arg)
3487 /* The parameter for this ioctl is a struct fiemap followed
3488 * by an array of struct fiemap_extent whose size is set
3489 * in fiemap->fm_extent_count. The array is filled in by the
3490 * ioctl.
3492 int target_size_in, target_size_out;
3493 struct fiemap *fm;
3494 const argtype *arg_type = ie->arg_type;
3495 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3496 void *argptr, *p;
3497 abi_long ret;
3498 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3499 uint32_t outbufsz;
3500 int free_fm = 0;
3502 assert(arg_type[0] == TYPE_PTR);
3503 assert(ie->access == IOC_RW);
3504 arg_type++;
3505 target_size_in = thunk_type_size(arg_type, 0);
3506 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3507 if (!argptr) {
3508 return -TARGET_EFAULT;
3510 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3511 unlock_user(argptr, arg, 0);
3512 fm = (struct fiemap *)buf_temp;
3513 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3514 return -TARGET_EINVAL;
3517 outbufsz = sizeof (*fm) +
3518 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3520 if (outbufsz > MAX_STRUCT_SIZE) {
3521 /* We can't fit all the extents into the fixed size buffer.
3522 * Allocate one that is large enough and use it instead.
3524 fm = g_try_malloc(outbufsz);
3525 if (!fm) {
3526 return -TARGET_ENOMEM;
3528 memcpy(fm, buf_temp, sizeof(struct fiemap));
3529 free_fm = 1;
3531 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3532 if (!is_error(ret)) {
3533 target_size_out = target_size_in;
3534 /* An extent_count of 0 means we were only counting the extents
3535 * so there are no structs to copy
3537 if (fm->fm_extent_count != 0) {
3538 target_size_out += fm->fm_mapped_extents * extent_size;
3540 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3541 if (!argptr) {
3542 ret = -TARGET_EFAULT;
3543 } else {
3544 /* Convert the struct fiemap */
3545 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3546 if (fm->fm_extent_count != 0) {
3547 p = argptr + target_size_in;
3548 /* ...and then all the struct fiemap_extents */
3549 for (i = 0; i < fm->fm_mapped_extents; i++) {
3550 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3551 THUNK_TARGET);
3552 p += extent_size;
3555 unlock_user(argptr, arg, target_size_out);
3558 if (free_fm) {
3559 g_free(fm);
3561 return ret;
3563 #endif
3565 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3566 int fd, int cmd, abi_long arg)
3568 const argtype *arg_type = ie->arg_type;
3569 int target_size;
3570 void *argptr;
3571 int ret;
3572 struct ifconf *host_ifconf;
3573 uint32_t outbufsz;
3574 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3575 int target_ifreq_size;
3576 int nb_ifreq;
3577 int free_buf = 0;
3578 int i;
3579 int target_ifc_len;
3580 abi_long target_ifc_buf;
3581 int host_ifc_len;
3582 char *host_ifc_buf;
3584 assert(arg_type[0] == TYPE_PTR);
3585 assert(ie->access == IOC_RW);
3587 arg_type++;
3588 target_size = thunk_type_size(arg_type, 0);
3590 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3591 if (!argptr)
3592 return -TARGET_EFAULT;
3593 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3594 unlock_user(argptr, arg, 0);
3596 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3597 target_ifc_len = host_ifconf->ifc_len;
3598 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3600 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3601 nb_ifreq = target_ifc_len / target_ifreq_size;
3602 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3604 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3605 if (outbufsz > MAX_STRUCT_SIZE) {
3606 /* We can't fit all the extents into the fixed size buffer.
3607 * Allocate one that is large enough and use it instead.
3609 host_ifconf = malloc(outbufsz);
3610 if (!host_ifconf) {
3611 return -TARGET_ENOMEM;
3613 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3614 free_buf = 1;
3616 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3618 host_ifconf->ifc_len = host_ifc_len;
3619 host_ifconf->ifc_buf = host_ifc_buf;
3621 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3622 if (!is_error(ret)) {
3623 /* convert host ifc_len to target ifc_len */
3625 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3626 target_ifc_len = nb_ifreq * target_ifreq_size;
3627 host_ifconf->ifc_len = target_ifc_len;
3629 /* restore target ifc_buf */
3631 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3633 /* copy struct ifconf to target user */
3635 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3636 if (!argptr)
3637 return -TARGET_EFAULT;
3638 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3639 unlock_user(argptr, arg, target_size);
3641 /* copy ifreq[] to target user */
3643 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3644 for (i = 0; i < nb_ifreq ; i++) {
3645 thunk_convert(argptr + i * target_ifreq_size,
3646 host_ifc_buf + i * sizeof(struct ifreq),
3647 ifreq_arg_type, THUNK_TARGET);
3649 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3652 if (free_buf) {
3653 free(host_ifconf);
3656 return ret;
3659 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3660 int cmd, abi_long arg)
3662 void *argptr;
3663 struct dm_ioctl *host_dm;
3664 abi_long guest_data;
3665 uint32_t guest_data_size;
3666 int target_size;
3667 const argtype *arg_type = ie->arg_type;
3668 abi_long ret;
3669 void *big_buf = NULL;
3670 char *host_data;
3672 arg_type++;
3673 target_size = thunk_type_size(arg_type, 0);
3674 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3675 if (!argptr) {
3676 ret = -TARGET_EFAULT;
3677 goto out;
3679 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3680 unlock_user(argptr, arg, 0);
3682 /* buf_temp is too small, so fetch things into a bigger buffer */
3683 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3684 memcpy(big_buf, buf_temp, target_size);
3685 buf_temp = big_buf;
3686 host_dm = big_buf;
3688 guest_data = arg + host_dm->data_start;
3689 if ((guest_data - arg) < 0) {
3690 ret = -EINVAL;
3691 goto out;
3693 guest_data_size = host_dm->data_size - host_dm->data_start;
3694 host_data = (char*)host_dm + host_dm->data_start;
3696 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3697 switch (ie->host_cmd) {
3698 case DM_REMOVE_ALL:
3699 case DM_LIST_DEVICES:
3700 case DM_DEV_CREATE:
3701 case DM_DEV_REMOVE:
3702 case DM_DEV_SUSPEND:
3703 case DM_DEV_STATUS:
3704 case DM_DEV_WAIT:
3705 case DM_TABLE_STATUS:
3706 case DM_TABLE_CLEAR:
3707 case DM_TABLE_DEPS:
3708 case DM_LIST_VERSIONS:
3709 /* no input data */
3710 break;
3711 case DM_DEV_RENAME:
3712 case DM_DEV_SET_GEOMETRY:
3713 /* data contains only strings */
3714 memcpy(host_data, argptr, guest_data_size);
3715 break;
3716 case DM_TARGET_MSG:
3717 memcpy(host_data, argptr, guest_data_size);
3718 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3719 break;
3720 case DM_TABLE_LOAD:
3722 void *gspec = argptr;
3723 void *cur_data = host_data;
3724 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3725 int spec_size = thunk_type_size(arg_type, 0);
3726 int i;
3728 for (i = 0; i < host_dm->target_count; i++) {
3729 struct dm_target_spec *spec = cur_data;
3730 uint32_t next;
3731 int slen;
3733 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3734 slen = strlen((char*)gspec + spec_size) + 1;
3735 next = spec->next;
3736 spec->next = sizeof(*spec) + slen;
3737 strcpy((char*)&spec[1], gspec + spec_size);
3738 gspec += next;
3739 cur_data += spec->next;
3741 break;
3743 default:
3744 ret = -TARGET_EINVAL;
3745 unlock_user(argptr, guest_data, 0);
3746 goto out;
3748 unlock_user(argptr, guest_data, 0);
3750 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3751 if (!is_error(ret)) {
3752 guest_data = arg + host_dm->data_start;
3753 guest_data_size = host_dm->data_size - host_dm->data_start;
3754 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3755 switch (ie->host_cmd) {
3756 case DM_REMOVE_ALL:
3757 case DM_DEV_CREATE:
3758 case DM_DEV_REMOVE:
3759 case DM_DEV_RENAME:
3760 case DM_DEV_SUSPEND:
3761 case DM_DEV_STATUS:
3762 case DM_TABLE_LOAD:
3763 case DM_TABLE_CLEAR:
3764 case DM_TARGET_MSG:
3765 case DM_DEV_SET_GEOMETRY:
3766 /* no return data */
3767 break;
3768 case DM_LIST_DEVICES:
3770 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3771 uint32_t remaining_data = guest_data_size;
3772 void *cur_data = argptr;
3773 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3774 int nl_size = 12; /* can't use thunk_size due to alignment */
3776 while (1) {
3777 uint32_t next = nl->next;
3778 if (next) {
3779 nl->next = nl_size + (strlen(nl->name) + 1);
3781 if (remaining_data < nl->next) {
3782 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3783 break;
3785 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3786 strcpy(cur_data + nl_size, nl->name);
3787 cur_data += nl->next;
3788 remaining_data -= nl->next;
3789 if (!next) {
3790 break;
3792 nl = (void*)nl + next;
3794 break;
3796 case DM_DEV_WAIT:
3797 case DM_TABLE_STATUS:
3799 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3800 void *cur_data = argptr;
3801 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3802 int spec_size = thunk_type_size(arg_type, 0);
3803 int i;
3805 for (i = 0; i < host_dm->target_count; i++) {
3806 uint32_t next = spec->next;
3807 int slen = strlen((char*)&spec[1]) + 1;
3808 spec->next = (cur_data - argptr) + spec_size + slen;
3809 if (guest_data_size < spec->next) {
3810 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3811 break;
3813 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3814 strcpy(cur_data + spec_size, (char*)&spec[1]);
3815 cur_data = argptr + spec->next;
3816 spec = (void*)host_dm + host_dm->data_start + next;
3818 break;
3820 case DM_TABLE_DEPS:
3822 void *hdata = (void*)host_dm + host_dm->data_start;
3823 int count = *(uint32_t*)hdata;
3824 uint64_t *hdev = hdata + 8;
3825 uint64_t *gdev = argptr + 8;
3826 int i;
3828 *(uint32_t*)argptr = tswap32(count);
3829 for (i = 0; i < count; i++) {
3830 *gdev = tswap64(*hdev);
3831 gdev++;
3832 hdev++;
3834 break;
3836 case DM_LIST_VERSIONS:
3838 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3839 uint32_t remaining_data = guest_data_size;
3840 void *cur_data = argptr;
3841 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3842 int vers_size = thunk_type_size(arg_type, 0);
3844 while (1) {
3845 uint32_t next = vers->next;
3846 if (next) {
3847 vers->next = vers_size + (strlen(vers->name) + 1);
3849 if (remaining_data < vers->next) {
3850 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3851 break;
3853 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3854 strcpy(cur_data + vers_size, vers->name);
3855 cur_data += vers->next;
3856 remaining_data -= vers->next;
3857 if (!next) {
3858 break;
3860 vers = (void*)vers + next;
3862 break;
3864 default:
3865 unlock_user(argptr, guest_data, 0);
3866 ret = -TARGET_EINVAL;
3867 goto out;
3869 unlock_user(argptr, guest_data, guest_data_size);
3871 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3872 if (!argptr) {
3873 ret = -TARGET_EFAULT;
3874 goto out;
3876 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3877 unlock_user(argptr, arg, target_size);
3879 out:
3880 g_free(big_buf);
3881 return ret;
3884 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3885 int cmd, abi_long arg)
3887 void *argptr;
3888 int target_size;
3889 const argtype *arg_type = ie->arg_type;
3890 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
3891 abi_long ret;
3893 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
3894 struct blkpg_partition host_part;
3896 /* Read and convert blkpg */
3897 arg_type++;
3898 target_size = thunk_type_size(arg_type, 0);
3899 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3900 if (!argptr) {
3901 ret = -TARGET_EFAULT;
3902 goto out;
3904 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3905 unlock_user(argptr, arg, 0);
3907 switch (host_blkpg->op) {
3908 case BLKPG_ADD_PARTITION:
3909 case BLKPG_DEL_PARTITION:
3910 /* payload is struct blkpg_partition */
3911 break;
3912 default:
3913 /* Unknown opcode */
3914 ret = -TARGET_EINVAL;
3915 goto out;
3918 /* Read and convert blkpg->data */
3919 arg = (abi_long)(uintptr_t)host_blkpg->data;
3920 target_size = thunk_type_size(part_arg_type, 0);
3921 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3922 if (!argptr) {
3923 ret = -TARGET_EFAULT;
3924 goto out;
3926 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
3927 unlock_user(argptr, arg, 0);
3929 /* Swizzle the data pointer to our local copy and call! */
3930 host_blkpg->data = &host_part;
3931 ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
3933 out:
3934 return ret;
3937 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3938 int fd, int cmd, abi_long arg)
3940 const argtype *arg_type = ie->arg_type;
3941 const StructEntry *se;
3942 const argtype *field_types;
3943 const int *dst_offsets, *src_offsets;
3944 int target_size;
3945 void *argptr;
3946 abi_ulong *target_rt_dev_ptr;
3947 unsigned long *host_rt_dev_ptr;
3948 abi_long ret;
3949 int i;
3951 assert(ie->access == IOC_W);
3952 assert(*arg_type == TYPE_PTR);
3953 arg_type++;
3954 assert(*arg_type == TYPE_STRUCT);
3955 target_size = thunk_type_size(arg_type, 0);
3956 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3957 if (!argptr) {
3958 return -TARGET_EFAULT;
3960 arg_type++;
3961 assert(*arg_type == (int)STRUCT_rtentry);
3962 se = struct_entries + *arg_type++;
3963 assert(se->convert[0] == NULL);
3964 /* convert struct here to be able to catch rt_dev string */
3965 field_types = se->field_types;
3966 dst_offsets = se->field_offsets[THUNK_HOST];
3967 src_offsets = se->field_offsets[THUNK_TARGET];
3968 for (i = 0; i < se->nb_fields; i++) {
3969 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3970 assert(*field_types == TYPE_PTRVOID);
3971 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3972 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3973 if (*target_rt_dev_ptr != 0) {
3974 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3975 tswapal(*target_rt_dev_ptr));
3976 if (!*host_rt_dev_ptr) {
3977 unlock_user(argptr, arg, 0);
3978 return -TARGET_EFAULT;
3980 } else {
3981 *host_rt_dev_ptr = 0;
3983 field_types++;
3984 continue;
3986 field_types = thunk_convert(buf_temp + dst_offsets[i],
3987 argptr + src_offsets[i],
3988 field_types, THUNK_HOST);
3990 unlock_user(argptr, arg, 0);
3992 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3993 if (*host_rt_dev_ptr != 0) {
3994 unlock_user((void *)*host_rt_dev_ptr,
3995 *target_rt_dev_ptr, 0);
3997 return ret;
4000 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4001 int fd, int cmd, abi_long arg)
4003 int sig = target_to_host_signal(arg);
4004 return get_errno(ioctl(fd, ie->host_cmd, sig));
4007 static IOCTLEntry ioctl_entries[] = {
4008 #define IOCTL(cmd, access, ...) \
4009 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4010 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4011 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4012 #include "ioctls.h"
4013 { 0, 0, },
4016 /* ??? Implement proper locking for ioctls. */
4017 /* do_ioctl() Must return target values and target errnos. */
4018 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4020 const IOCTLEntry *ie;
4021 const argtype *arg_type;
4022 abi_long ret;
4023 uint8_t buf_temp[MAX_STRUCT_SIZE];
4024 int target_size;
4025 void *argptr;
4027 ie = ioctl_entries;
4028 for(;;) {
4029 if (ie->target_cmd == 0) {
4030 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4031 return -TARGET_ENOSYS;
4033 if (ie->target_cmd == cmd)
4034 break;
4035 ie++;
4037 arg_type = ie->arg_type;
4038 #if defined(DEBUG)
4039 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
4040 #endif
4041 if (ie->do_ioctl) {
4042 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4045 switch(arg_type[0]) {
4046 case TYPE_NULL:
4047 /* no argument */
4048 ret = get_errno(ioctl(fd, ie->host_cmd));
4049 break;
4050 case TYPE_PTRVOID:
4051 case TYPE_INT:
4052 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
4053 break;
4054 case TYPE_PTR:
4055 arg_type++;
4056 target_size = thunk_type_size(arg_type, 0);
4057 switch(ie->access) {
4058 case IOC_R:
4059 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4060 if (!is_error(ret)) {
4061 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4062 if (!argptr)
4063 return -TARGET_EFAULT;
4064 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4065 unlock_user(argptr, arg, target_size);
4067 break;
4068 case IOC_W:
4069 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4070 if (!argptr)
4071 return -TARGET_EFAULT;
4072 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4073 unlock_user(argptr, arg, 0);
4074 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4075 break;
4076 default:
4077 case IOC_RW:
4078 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4079 if (!argptr)
4080 return -TARGET_EFAULT;
4081 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4082 unlock_user(argptr, arg, 0);
4083 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
4084 if (!is_error(ret)) {
4085 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4086 if (!argptr)
4087 return -TARGET_EFAULT;
4088 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4089 unlock_user(argptr, arg, target_size);
4091 break;
4093 break;
4094 default:
4095 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4096 (long)cmd, arg_type[0]);
4097 ret = -TARGET_ENOSYS;
4098 break;
4100 return ret;
4103 static const bitmask_transtbl iflag_tbl[] = {
4104 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4105 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4106 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4107 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4108 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4109 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4110 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4111 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4112 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4113 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4114 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4115 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4116 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4117 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4118 { 0, 0, 0, 0 }
4121 static const bitmask_transtbl oflag_tbl[] = {
4122 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4123 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4124 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4125 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4126 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4127 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4128 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4129 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4130 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4131 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4132 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4133 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4134 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4135 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4136 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4137 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4138 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4139 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4140 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4141 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4142 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4143 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4144 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4145 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4146 { 0, 0, 0, 0 }
4149 static const bitmask_transtbl cflag_tbl[] = {
4150 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4151 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4152 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4153 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4154 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4155 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4156 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4157 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4158 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4159 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4160 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4161 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4162 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4163 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4164 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4165 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4166 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4167 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4168 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4169 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4170 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4171 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4172 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4173 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4174 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4175 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4176 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4177 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4178 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4179 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4180 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4181 { 0, 0, 0, 0 }
4184 static const bitmask_transtbl lflag_tbl[] = {
4185 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4186 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4187 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4188 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4189 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4190 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4191 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4192 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4193 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4194 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4195 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4196 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4197 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
4198 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
4199 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
4200 { 0, 0, 0, 0 }
4203 static void target_to_host_termios (void *dst, const void *src)
4205 struct host_termios *host = dst;
4206 const struct target_termios *target = src;
4208 host->c_iflag =
4209 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
4210 host->c_oflag =
4211 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
4212 host->c_cflag =
4213 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
4214 host->c_lflag =
4215 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
4216 host->c_line = target->c_line;
4218 memset(host->c_cc, 0, sizeof(host->c_cc));
4219 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
4220 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
4221 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
4222 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
4223 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
4224 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
4225 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
4226 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
4227 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
4228 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
4229 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
4230 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
4231 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
4232 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
4233 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
4234 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
4235 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
4238 static void host_to_target_termios (void *dst, const void *src)
4240 struct target_termios *target = dst;
4241 const struct host_termios *host = src;
4243 target->c_iflag =
4244 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
4245 target->c_oflag =
4246 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
4247 target->c_cflag =
4248 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
4249 target->c_lflag =
4250 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
4251 target->c_line = host->c_line;
4253 memset(target->c_cc, 0, sizeof(target->c_cc));
4254 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
4255 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
4256 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
4257 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
4258 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
4259 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
4260 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
4261 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
4262 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
4263 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
4264 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
4265 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
4266 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
4267 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
4268 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
4269 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
4270 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
4273 static const StructEntry struct_termios_def = {
4274 .convert = { host_to_target_termios, target_to_host_termios },
4275 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
4276 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
4279 static bitmask_transtbl mmap_flags_tbl[] = {
4280 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
4281 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
4282 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
4283 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
4284 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
4285 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
4286 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
4287 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
4288 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
4289 MAP_NORESERVE },
4290 { 0, 0, 0, 0 }
4293 #if defined(TARGET_I386)
4295 /* NOTE: there is really one LDT for all the threads */
4296 static uint8_t *ldt_table;
4298 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
4300 int size;
4301 void *p;
4303 if (!ldt_table)
4304 return 0;
4305 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
4306 if (size > bytecount)
4307 size = bytecount;
4308 p = lock_user(VERIFY_WRITE, ptr, size, 0);
4309 if (!p)
4310 return -TARGET_EFAULT;
4311 /* ??? Should this by byteswapped? */
4312 memcpy(p, ldt_table, size);
4313 unlock_user(p, ptr, size);
4314 return size;
4317 /* XXX: add locking support */
4318 static abi_long write_ldt(CPUX86State *env,
4319 abi_ulong ptr, unsigned long bytecount, int oldmode)
4321 struct target_modify_ldt_ldt_s ldt_info;
4322 struct target_modify_ldt_ldt_s *target_ldt_info;
4323 int seg_32bit, contents, read_exec_only, limit_in_pages;
4324 int seg_not_present, useable, lm;
4325 uint32_t *lp, entry_1, entry_2;
4327 if (bytecount != sizeof(ldt_info))
4328 return -TARGET_EINVAL;
4329 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
4330 return -TARGET_EFAULT;
4331 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4332 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4333 ldt_info.limit = tswap32(target_ldt_info->limit);
4334 ldt_info.flags = tswap32(target_ldt_info->flags);
4335 unlock_user_struct(target_ldt_info, ptr, 0);
4337 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
4338 return -TARGET_EINVAL;
4339 seg_32bit = ldt_info.flags & 1;
4340 contents = (ldt_info.flags >> 1) & 3;
4341 read_exec_only = (ldt_info.flags >> 3) & 1;
4342 limit_in_pages = (ldt_info.flags >> 4) & 1;
4343 seg_not_present = (ldt_info.flags >> 5) & 1;
4344 useable = (ldt_info.flags >> 6) & 1;
4345 #ifdef TARGET_ABI32
4346 lm = 0;
4347 #else
4348 lm = (ldt_info.flags >> 7) & 1;
4349 #endif
4350 if (contents == 3) {
4351 if (oldmode)
4352 return -TARGET_EINVAL;
4353 if (seg_not_present == 0)
4354 return -TARGET_EINVAL;
4356 /* allocate the LDT */
4357 if (!ldt_table) {
4358 env->ldt.base = target_mmap(0,
4359 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
4360 PROT_READ|PROT_WRITE,
4361 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4362 if (env->ldt.base == -1)
4363 return -TARGET_ENOMEM;
4364 memset(g2h(env->ldt.base), 0,
4365 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
4366 env->ldt.limit = 0xffff;
4367 ldt_table = g2h(env->ldt.base);
4370 /* NOTE: same code as Linux kernel */
4371 /* Allow LDTs to be cleared by the user. */
4372 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4373 if (oldmode ||
4374 (contents == 0 &&
4375 read_exec_only == 1 &&
4376 seg_32bit == 0 &&
4377 limit_in_pages == 0 &&
4378 seg_not_present == 1 &&
4379 useable == 0 )) {
4380 entry_1 = 0;
4381 entry_2 = 0;
4382 goto install;
4386 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4387 (ldt_info.limit & 0x0ffff);
4388 entry_2 = (ldt_info.base_addr & 0xff000000) |
4389 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4390 (ldt_info.limit & 0xf0000) |
4391 ((read_exec_only ^ 1) << 9) |
4392 (contents << 10) |
4393 ((seg_not_present ^ 1) << 15) |
4394 (seg_32bit << 22) |
4395 (limit_in_pages << 23) |
4396 (lm << 21) |
4397 0x7000;
4398 if (!oldmode)
4399 entry_2 |= (useable << 20);
4401 /* Install the new entry ... */
4402 install:
4403 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4404 lp[0] = tswap32(entry_1);
4405 lp[1] = tswap32(entry_2);
4406 return 0;
4409 /* specific and weird i386 syscalls */
4410 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4411 unsigned long bytecount)
4413 abi_long ret;
4415 switch (func) {
4416 case 0:
4417 ret = read_ldt(ptr, bytecount);
4418 break;
4419 case 1:
4420 ret = write_ldt(env, ptr, bytecount, 1);
4421 break;
4422 case 0x11:
4423 ret = write_ldt(env, ptr, bytecount, 0);
4424 break;
4425 default:
4426 ret = -TARGET_ENOSYS;
4427 break;
4429 return ret;
4432 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4433 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4435 uint64_t *gdt_table = g2h(env->gdt.base);
4436 struct target_modify_ldt_ldt_s ldt_info;
4437 struct target_modify_ldt_ldt_s *target_ldt_info;
4438 int seg_32bit, contents, read_exec_only, limit_in_pages;
4439 int seg_not_present, useable, lm;
4440 uint32_t *lp, entry_1, entry_2;
4441 int i;
4443 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4444 if (!target_ldt_info)
4445 return -TARGET_EFAULT;
4446 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4447 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4448 ldt_info.limit = tswap32(target_ldt_info->limit);
4449 ldt_info.flags = tswap32(target_ldt_info->flags);
4450 if (ldt_info.entry_number == -1) {
4451 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4452 if (gdt_table[i] == 0) {
4453 ldt_info.entry_number = i;
4454 target_ldt_info->entry_number = tswap32(i);
4455 break;
4459 unlock_user_struct(target_ldt_info, ptr, 1);
4461 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4462 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4463 return -TARGET_EINVAL;
4464 seg_32bit = ldt_info.flags & 1;
4465 contents = (ldt_info.flags >> 1) & 3;
4466 read_exec_only = (ldt_info.flags >> 3) & 1;
4467 limit_in_pages = (ldt_info.flags >> 4) & 1;
4468 seg_not_present = (ldt_info.flags >> 5) & 1;
4469 useable = (ldt_info.flags >> 6) & 1;
4470 #ifdef TARGET_ABI32
4471 lm = 0;
4472 #else
4473 lm = (ldt_info.flags >> 7) & 1;
4474 #endif
4476 if (contents == 3) {
4477 if (seg_not_present == 0)
4478 return -TARGET_EINVAL;
4481 /* NOTE: same code as Linux kernel */
4482 /* Allow LDTs to be cleared by the user. */
4483 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4484 if ((contents == 0 &&
4485 read_exec_only == 1 &&
4486 seg_32bit == 0 &&
4487 limit_in_pages == 0 &&
4488 seg_not_present == 1 &&
4489 useable == 0 )) {
4490 entry_1 = 0;
4491 entry_2 = 0;
4492 goto install;
4496 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4497 (ldt_info.limit & 0x0ffff);
4498 entry_2 = (ldt_info.base_addr & 0xff000000) |
4499 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4500 (ldt_info.limit & 0xf0000) |
4501 ((read_exec_only ^ 1) << 9) |
4502 (contents << 10) |
4503 ((seg_not_present ^ 1) << 15) |
4504 (seg_32bit << 22) |
4505 (limit_in_pages << 23) |
4506 (useable << 20) |
4507 (lm << 21) |
4508 0x7000;
4510 /* Install the new entry ... */
4511 install:
4512 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4513 lp[0] = tswap32(entry_1);
4514 lp[1] = tswap32(entry_2);
4515 return 0;
4518 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4520 struct target_modify_ldt_ldt_s *target_ldt_info;
4521 uint64_t *gdt_table = g2h(env->gdt.base);
4522 uint32_t base_addr, limit, flags;
4523 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4524 int seg_not_present, useable, lm;
4525 uint32_t *lp, entry_1, entry_2;
4527 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4528 if (!target_ldt_info)
4529 return -TARGET_EFAULT;
4530 idx = tswap32(target_ldt_info->entry_number);
4531 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4532 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4533 unlock_user_struct(target_ldt_info, ptr, 1);
4534 return -TARGET_EINVAL;
4536 lp = (uint32_t *)(gdt_table + idx);
4537 entry_1 = tswap32(lp[0]);
4538 entry_2 = tswap32(lp[1]);
4540 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4541 contents = (entry_2 >> 10) & 3;
4542 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4543 seg_32bit = (entry_2 >> 22) & 1;
4544 limit_in_pages = (entry_2 >> 23) & 1;
4545 useable = (entry_2 >> 20) & 1;
4546 #ifdef TARGET_ABI32
4547 lm = 0;
4548 #else
4549 lm = (entry_2 >> 21) & 1;
4550 #endif
4551 flags = (seg_32bit << 0) | (contents << 1) |
4552 (read_exec_only << 3) | (limit_in_pages << 4) |
4553 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4554 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4555 base_addr = (entry_1 >> 16) |
4556 (entry_2 & 0xff000000) |
4557 ((entry_2 & 0xff) << 16);
4558 target_ldt_info->base_addr = tswapal(base_addr);
4559 target_ldt_info->limit = tswap32(limit);
4560 target_ldt_info->flags = tswap32(flags);
4561 unlock_user_struct(target_ldt_info, ptr, 1);
4562 return 0;
4564 #endif /* TARGET_I386 && TARGET_ABI32 */
4566 #ifndef TARGET_ABI32
4567 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4569 abi_long ret = 0;
4570 abi_ulong val;
4571 int idx;
4573 switch(code) {
4574 case TARGET_ARCH_SET_GS:
4575 case TARGET_ARCH_SET_FS:
4576 if (code == TARGET_ARCH_SET_GS)
4577 idx = R_GS;
4578 else
4579 idx = R_FS;
4580 cpu_x86_load_seg(env, idx, 0);
4581 env->segs[idx].base = addr;
4582 break;
4583 case TARGET_ARCH_GET_GS:
4584 case TARGET_ARCH_GET_FS:
4585 if (code == TARGET_ARCH_GET_GS)
4586 idx = R_GS;
4587 else
4588 idx = R_FS;
4589 val = env->segs[idx].base;
4590 if (put_user(val, addr, abi_ulong))
4591 ret = -TARGET_EFAULT;
4592 break;
4593 default:
4594 ret = -TARGET_EINVAL;
4595 break;
4597 return ret;
4599 #endif
4601 #endif /* defined(TARGET_I386) */
4603 #define NEW_STACK_SIZE 0x40000
4606 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4607 typedef struct {
4608 CPUArchState *env;
4609 pthread_mutex_t mutex;
4610 pthread_cond_t cond;
4611 pthread_t thread;
4612 uint32_t tid;
4613 abi_ulong child_tidptr;
4614 abi_ulong parent_tidptr;
4615 sigset_t sigmask;
4616 } new_thread_info;
4618 static void *clone_func(void *arg)
4620 new_thread_info *info = arg;
4621 CPUArchState *env;
4622 CPUState *cpu;
4623 TaskState *ts;
4625 rcu_register_thread();
4626 env = info->env;
4627 cpu = ENV_GET_CPU(env);
4628 thread_cpu = cpu;
4629 ts = (TaskState *)cpu->opaque;
4630 info->tid = gettid();
4631 cpu->host_tid = info->tid;
4632 task_settid(ts);
4633 if (info->child_tidptr)
4634 put_user_u32(info->tid, info->child_tidptr);
4635 if (info->parent_tidptr)
4636 put_user_u32(info->tid, info->parent_tidptr);
4637 /* Enable signals. */
4638 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4639 /* Signal to the parent that we're ready. */
4640 pthread_mutex_lock(&info->mutex);
4641 pthread_cond_broadcast(&info->cond);
4642 pthread_mutex_unlock(&info->mutex);
4643 /* Wait until the parent has finshed initializing the tls state. */
4644 pthread_mutex_lock(&clone_lock);
4645 pthread_mutex_unlock(&clone_lock);
4646 cpu_loop(env);
4647 /* never exits */
4648 return NULL;
4651 /* do_fork() Must return host values and target errnos (unlike most
4652 do_*() functions). */
4653 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4654 abi_ulong parent_tidptr, target_ulong newtls,
4655 abi_ulong child_tidptr)
4657 CPUState *cpu = ENV_GET_CPU(env);
4658 int ret;
4659 TaskState *ts;
4660 CPUState *new_cpu;
4661 CPUArchState *new_env;
4662 unsigned int nptl_flags;
4663 sigset_t sigmask;
4665 /* Emulate vfork() with fork() */
4666 if (flags & CLONE_VFORK)
4667 flags &= ~(CLONE_VFORK | CLONE_VM);
4669 if (flags & CLONE_VM) {
4670 TaskState *parent_ts = (TaskState *)cpu->opaque;
4671 new_thread_info info;
4672 pthread_attr_t attr;
4674 ts = g_new0(TaskState, 1);
4675 init_task_state(ts);
4676 /* we create a new CPU instance. */
4677 new_env = cpu_copy(env);
4678 /* Init regs that differ from the parent. */
4679 cpu_clone_regs(new_env, newsp);
4680 new_cpu = ENV_GET_CPU(new_env);
4681 new_cpu->opaque = ts;
4682 ts->bprm = parent_ts->bprm;
4683 ts->info = parent_ts->info;
4684 nptl_flags = flags;
4685 flags &= ~CLONE_NPTL_FLAGS2;
4687 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4688 ts->child_tidptr = child_tidptr;
4691 if (nptl_flags & CLONE_SETTLS)
4692 cpu_set_tls (new_env, newtls);
4694 /* Grab a mutex so that thread setup appears atomic. */
4695 pthread_mutex_lock(&clone_lock);
4697 memset(&info, 0, sizeof(info));
4698 pthread_mutex_init(&info.mutex, NULL);
4699 pthread_mutex_lock(&info.mutex);
4700 pthread_cond_init(&info.cond, NULL);
4701 info.env = new_env;
4702 if (nptl_flags & CLONE_CHILD_SETTID)
4703 info.child_tidptr = child_tidptr;
4704 if (nptl_flags & CLONE_PARENT_SETTID)
4705 info.parent_tidptr = parent_tidptr;
4707 ret = pthread_attr_init(&attr);
4708 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4709 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4710 /* It is not safe to deliver signals until the child has finished
4711 initializing, so temporarily block all signals. */
4712 sigfillset(&sigmask);
4713 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4715 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4716 /* TODO: Free new CPU state if thread creation failed. */
4718 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4719 pthread_attr_destroy(&attr);
4720 if (ret == 0) {
4721 /* Wait for the child to initialize. */
4722 pthread_cond_wait(&info.cond, &info.mutex);
4723 ret = info.tid;
4724 if (flags & CLONE_PARENT_SETTID)
4725 put_user_u32(ret, parent_tidptr);
4726 } else {
4727 ret = -1;
4729 pthread_mutex_unlock(&info.mutex);
4730 pthread_cond_destroy(&info.cond);
4731 pthread_mutex_destroy(&info.mutex);
4732 pthread_mutex_unlock(&clone_lock);
4733 } else {
4734 /* if no CLONE_VM, we consider it is a fork */
4735 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
4736 return -TARGET_EINVAL;
4738 fork_start();
4739 ret = fork();
4740 if (ret == 0) {
4741 /* Child Process. */
4742 rcu_after_fork();
4743 cpu_clone_regs(env, newsp);
4744 fork_end(1);
4745 /* There is a race condition here. The parent process could
4746 theoretically read the TID in the child process before the child
4747 tid is set. This would require using either ptrace
4748 (not implemented) or having *_tidptr to point at a shared memory
4749 mapping. We can't repeat the spinlock hack used above because
4750 the child process gets its own copy of the lock. */
4751 if (flags & CLONE_CHILD_SETTID)
4752 put_user_u32(gettid(), child_tidptr);
4753 if (flags & CLONE_PARENT_SETTID)
4754 put_user_u32(gettid(), parent_tidptr);
4755 ts = (TaskState *)cpu->opaque;
4756 if (flags & CLONE_SETTLS)
4757 cpu_set_tls (env, newtls);
4758 if (flags & CLONE_CHILD_CLEARTID)
4759 ts->child_tidptr = child_tidptr;
4760 } else {
4761 fork_end(0);
4764 return ret;
4767 /* warning : doesn't handle linux specific flags... */
4768 static int target_to_host_fcntl_cmd(int cmd)
4770 switch(cmd) {
4771 case TARGET_F_DUPFD:
4772 case TARGET_F_GETFD:
4773 case TARGET_F_SETFD:
4774 case TARGET_F_GETFL:
4775 case TARGET_F_SETFL:
4776 return cmd;
4777 case TARGET_F_GETLK:
4778 return F_GETLK;
4779 case TARGET_F_SETLK:
4780 return F_SETLK;
4781 case TARGET_F_SETLKW:
4782 return F_SETLKW;
4783 case TARGET_F_GETOWN:
4784 return F_GETOWN;
4785 case TARGET_F_SETOWN:
4786 return F_SETOWN;
4787 case TARGET_F_GETSIG:
4788 return F_GETSIG;
4789 case TARGET_F_SETSIG:
4790 return F_SETSIG;
4791 #if TARGET_ABI_BITS == 32
4792 case TARGET_F_GETLK64:
4793 return F_GETLK64;
4794 case TARGET_F_SETLK64:
4795 return F_SETLK64;
4796 case TARGET_F_SETLKW64:
4797 return F_SETLKW64;
4798 #endif
4799 case TARGET_F_SETLEASE:
4800 return F_SETLEASE;
4801 case TARGET_F_GETLEASE:
4802 return F_GETLEASE;
4803 #ifdef F_DUPFD_CLOEXEC
4804 case TARGET_F_DUPFD_CLOEXEC:
4805 return F_DUPFD_CLOEXEC;
4806 #endif
4807 case TARGET_F_NOTIFY:
4808 return F_NOTIFY;
4809 #ifdef F_GETOWN_EX
4810 case TARGET_F_GETOWN_EX:
4811 return F_GETOWN_EX;
4812 #endif
4813 #ifdef F_SETOWN_EX
4814 case TARGET_F_SETOWN_EX:
4815 return F_SETOWN_EX;
4816 #endif
4817 default:
4818 return -TARGET_EINVAL;
4820 return -TARGET_EINVAL;
4823 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4824 static const bitmask_transtbl flock_tbl[] = {
4825 TRANSTBL_CONVERT(F_RDLCK),
4826 TRANSTBL_CONVERT(F_WRLCK),
4827 TRANSTBL_CONVERT(F_UNLCK),
4828 TRANSTBL_CONVERT(F_EXLCK),
4829 TRANSTBL_CONVERT(F_SHLCK),
4830 { 0, 0, 0, 0 }
4833 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4835 struct flock fl;
4836 struct target_flock *target_fl;
4837 struct flock64 fl64;
4838 struct target_flock64 *target_fl64;
4839 #ifdef F_GETOWN_EX
4840 struct f_owner_ex fox;
4841 struct target_f_owner_ex *target_fox;
4842 #endif
4843 abi_long ret;
4844 int host_cmd = target_to_host_fcntl_cmd(cmd);
4846 if (host_cmd == -TARGET_EINVAL)
4847 return host_cmd;
4849 switch(cmd) {
4850 case TARGET_F_GETLK:
4851 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4852 return -TARGET_EFAULT;
4853 fl.l_type =
4854 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4855 fl.l_whence = tswap16(target_fl->l_whence);
4856 fl.l_start = tswapal(target_fl->l_start);
4857 fl.l_len = tswapal(target_fl->l_len);
4858 fl.l_pid = tswap32(target_fl->l_pid);
4859 unlock_user_struct(target_fl, arg, 0);
4860 ret = get_errno(fcntl(fd, host_cmd, &fl));
4861 if (ret == 0) {
4862 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4863 return -TARGET_EFAULT;
4864 target_fl->l_type =
4865 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4866 target_fl->l_whence = tswap16(fl.l_whence);
4867 target_fl->l_start = tswapal(fl.l_start);
4868 target_fl->l_len = tswapal(fl.l_len);
4869 target_fl->l_pid = tswap32(fl.l_pid);
4870 unlock_user_struct(target_fl, arg, 1);
4872 break;
4874 case TARGET_F_SETLK:
4875 case TARGET_F_SETLKW:
4876 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4877 return -TARGET_EFAULT;
4878 fl.l_type =
4879 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4880 fl.l_whence = tswap16(target_fl->l_whence);
4881 fl.l_start = tswapal(target_fl->l_start);
4882 fl.l_len = tswapal(target_fl->l_len);
4883 fl.l_pid = tswap32(target_fl->l_pid);
4884 unlock_user_struct(target_fl, arg, 0);
4885 ret = get_errno(fcntl(fd, host_cmd, &fl));
4886 break;
4888 case TARGET_F_GETLK64:
4889 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4890 return -TARGET_EFAULT;
4891 fl64.l_type =
4892 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4893 fl64.l_whence = tswap16(target_fl64->l_whence);
4894 fl64.l_start = tswap64(target_fl64->l_start);
4895 fl64.l_len = tswap64(target_fl64->l_len);
4896 fl64.l_pid = tswap32(target_fl64->l_pid);
4897 unlock_user_struct(target_fl64, arg, 0);
4898 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4899 if (ret == 0) {
4900 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4901 return -TARGET_EFAULT;
4902 target_fl64->l_type =
4903 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4904 target_fl64->l_whence = tswap16(fl64.l_whence);
4905 target_fl64->l_start = tswap64(fl64.l_start);
4906 target_fl64->l_len = tswap64(fl64.l_len);
4907 target_fl64->l_pid = tswap32(fl64.l_pid);
4908 unlock_user_struct(target_fl64, arg, 1);
4910 break;
4911 case TARGET_F_SETLK64:
4912 case TARGET_F_SETLKW64:
4913 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4914 return -TARGET_EFAULT;
4915 fl64.l_type =
4916 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4917 fl64.l_whence = tswap16(target_fl64->l_whence);
4918 fl64.l_start = tswap64(target_fl64->l_start);
4919 fl64.l_len = tswap64(target_fl64->l_len);
4920 fl64.l_pid = tswap32(target_fl64->l_pid);
4921 unlock_user_struct(target_fl64, arg, 0);
4922 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4923 break;
4925 case TARGET_F_GETFL:
4926 ret = get_errno(fcntl(fd, host_cmd, arg));
4927 if (ret >= 0) {
4928 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4930 break;
4932 case TARGET_F_SETFL:
4933 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4934 break;
4936 #ifdef F_GETOWN_EX
4937 case TARGET_F_GETOWN_EX:
4938 ret = get_errno(fcntl(fd, host_cmd, &fox));
4939 if (ret >= 0) {
4940 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4941 return -TARGET_EFAULT;
4942 target_fox->type = tswap32(fox.type);
4943 target_fox->pid = tswap32(fox.pid);
4944 unlock_user_struct(target_fox, arg, 1);
4946 break;
4947 #endif
4949 #ifdef F_SETOWN_EX
4950 case TARGET_F_SETOWN_EX:
4951 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4952 return -TARGET_EFAULT;
4953 fox.type = tswap32(target_fox->type);
4954 fox.pid = tswap32(target_fox->pid);
4955 unlock_user_struct(target_fox, arg, 0);
4956 ret = get_errno(fcntl(fd, host_cmd, &fox));
4957 break;
4958 #endif
4960 case TARGET_F_SETOWN:
4961 case TARGET_F_GETOWN:
4962 case TARGET_F_SETSIG:
4963 case TARGET_F_GETSIG:
4964 case TARGET_F_SETLEASE:
4965 case TARGET_F_GETLEASE:
4966 ret = get_errno(fcntl(fd, host_cmd, arg));
4967 break;
4969 default:
4970 ret = get_errno(fcntl(fd, cmd, arg));
4971 break;
4973 return ret;
4976 #ifdef USE_UID16
4978 static inline int high2lowuid(int uid)
4980 if (uid > 65535)
4981 return 65534;
4982 else
4983 return uid;
4986 static inline int high2lowgid(int gid)
4988 if (gid > 65535)
4989 return 65534;
4990 else
4991 return gid;
4994 static inline int low2highuid(int uid)
4996 if ((int16_t)uid == -1)
4997 return -1;
4998 else
4999 return uid;
5002 static inline int low2highgid(int gid)
5004 if ((int16_t)gid == -1)
5005 return -1;
5006 else
5007 return gid;
5009 static inline int tswapid(int id)
5011 return tswap16(id);
5014 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
5016 #else /* !USE_UID16 */
5017 static inline int high2lowuid(int uid)
5019 return uid;
5021 static inline int high2lowgid(int gid)
5023 return gid;
5025 static inline int low2highuid(int uid)
5027 return uid;
5029 static inline int low2highgid(int gid)
5031 return gid;
5033 static inline int tswapid(int id)
5035 return tswap32(id);
5038 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
5040 #endif /* USE_UID16 */
5042 void syscall_init(void)
5044 IOCTLEntry *ie;
5045 const argtype *arg_type;
5046 int size;
5047 int i;
5049 thunk_init(STRUCT_MAX);
5051 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
5052 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
5053 #include "syscall_types.h"
5054 #undef STRUCT
5055 #undef STRUCT_SPECIAL
5057 /* Build target_to_host_errno_table[] table from
5058 * host_to_target_errno_table[]. */
5059 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
5060 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
5063 /* we patch the ioctl size if necessary. We rely on the fact that
5064 no ioctl has all the bits at '1' in the size field */
5065 ie = ioctl_entries;
5066 while (ie->target_cmd != 0) {
5067 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
5068 TARGET_IOC_SIZEMASK) {
5069 arg_type = ie->arg_type;
5070 if (arg_type[0] != TYPE_PTR) {
5071 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
5072 ie->target_cmd);
5073 exit(1);
5075 arg_type++;
5076 size = thunk_type_size(arg_type, 0);
5077 ie->target_cmd = (ie->target_cmd &
5078 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
5079 (size << TARGET_IOC_SIZESHIFT);
5082 /* automatic consistency check if same arch */
5083 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
5084 (defined(__x86_64__) && defined(TARGET_X86_64))
5085 if (unlikely(ie->target_cmd != ie->host_cmd)) {
5086 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
5087 ie->name, ie->target_cmd, ie->host_cmd);
5089 #endif
5090 ie++;
5094 #if TARGET_ABI_BITS == 32
5095 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
5097 #ifdef TARGET_WORDS_BIGENDIAN
5098 return ((uint64_t)word0 << 32) | word1;
5099 #else
5100 return ((uint64_t)word1 << 32) | word0;
5101 #endif
5103 #else /* TARGET_ABI_BITS == 32 */
5104 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
5106 return word0;
5108 #endif /* TARGET_ABI_BITS != 32 */
5110 #ifdef TARGET_NR_truncate64
5111 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
5112 abi_long arg2,
5113 abi_long arg3,
5114 abi_long arg4)
5116 if (regpairs_aligned(cpu_env)) {
5117 arg2 = arg3;
5118 arg3 = arg4;
5120 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
5122 #endif
5124 #ifdef TARGET_NR_ftruncate64
5125 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
5126 abi_long arg2,
5127 abi_long arg3,
5128 abi_long arg4)
5130 if (regpairs_aligned(cpu_env)) {
5131 arg2 = arg3;
5132 arg3 = arg4;
5134 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
5136 #endif
5138 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
5139 abi_ulong target_addr)
5141 struct target_timespec *target_ts;
5143 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
5144 return -TARGET_EFAULT;
5145 host_ts->tv_sec = tswapal(target_ts->tv_sec);
5146 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
5147 unlock_user_struct(target_ts, target_addr, 0);
5148 return 0;
5151 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
5152 struct timespec *host_ts)
5154 struct target_timespec *target_ts;
5156 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
5157 return -TARGET_EFAULT;
5158 target_ts->tv_sec = tswapal(host_ts->tv_sec);
5159 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
5160 unlock_user_struct(target_ts, target_addr, 1);
5161 return 0;
5164 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
5165 abi_ulong target_addr)
5167 struct target_itimerspec *target_itspec;
5169 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
5170 return -TARGET_EFAULT;
5173 host_itspec->it_interval.tv_sec =
5174 tswapal(target_itspec->it_interval.tv_sec);
5175 host_itspec->it_interval.tv_nsec =
5176 tswapal(target_itspec->it_interval.tv_nsec);
5177 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
5178 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
5180 unlock_user_struct(target_itspec, target_addr, 1);
5181 return 0;
5184 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
5185 struct itimerspec *host_its)
5187 struct target_itimerspec *target_itspec;
5189 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
5190 return -TARGET_EFAULT;
5193 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
5194 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
5196 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
5197 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
5199 unlock_user_struct(target_itspec, target_addr, 0);
5200 return 0;
5203 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
5204 abi_ulong target_addr)
5206 struct target_sigevent *target_sevp;
5208 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
5209 return -TARGET_EFAULT;
5212 /* This union is awkward on 64 bit systems because it has a 32 bit
5213 * integer and a pointer in it; we follow the conversion approach
5214 * used for handling sigval types in signal.c so the guest should get
5215 * the correct value back even if we did a 64 bit byteswap and it's
5216 * using the 32 bit integer.
5218 host_sevp->sigev_value.sival_ptr =
5219 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
5220 host_sevp->sigev_signo =
5221 target_to_host_signal(tswap32(target_sevp->sigev_signo));
5222 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
5223 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
5225 unlock_user_struct(target_sevp, target_addr, 1);
5226 return 0;
5229 #if defined(TARGET_NR_mlockall)
5230 static inline int target_to_host_mlockall_arg(int arg)
5232 int result = 0;
5234 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
5235 result |= MCL_CURRENT;
5237 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
5238 result |= MCL_FUTURE;
5240 return result;
5242 #endif
5244 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
5245 static inline abi_long host_to_target_stat64(void *cpu_env,
5246 abi_ulong target_addr,
5247 struct stat *host_st)
5249 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
5250 if (((CPUARMState *)cpu_env)->eabi) {
5251 struct target_eabi_stat64 *target_st;
5253 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5254 return -TARGET_EFAULT;
5255 memset(target_st, 0, sizeof(struct target_eabi_stat64));
5256 __put_user(host_st->st_dev, &target_st->st_dev);
5257 __put_user(host_st->st_ino, &target_st->st_ino);
5258 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5259 __put_user(host_st->st_ino, &target_st->__st_ino);
5260 #endif
5261 __put_user(host_st->st_mode, &target_st->st_mode);
5262 __put_user(host_st->st_nlink, &target_st->st_nlink);
5263 __put_user(host_st->st_uid, &target_st->st_uid);
5264 __put_user(host_st->st_gid, &target_st->st_gid);
5265 __put_user(host_st->st_rdev, &target_st->st_rdev);
5266 __put_user(host_st->st_size, &target_st->st_size);
5267 __put_user(host_st->st_blksize, &target_st->st_blksize);
5268 __put_user(host_st->st_blocks, &target_st->st_blocks);
5269 __put_user(host_st->st_atime, &target_st->target_st_atime);
5270 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5271 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5272 unlock_user_struct(target_st, target_addr, 1);
5273 } else
5274 #endif
5276 #if defined(TARGET_HAS_STRUCT_STAT64)
5277 struct target_stat64 *target_st;
5278 #else
5279 struct target_stat *target_st;
5280 #endif
5282 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
5283 return -TARGET_EFAULT;
5284 memset(target_st, 0, sizeof(*target_st));
5285 __put_user(host_st->st_dev, &target_st->st_dev);
5286 __put_user(host_st->st_ino, &target_st->st_ino);
5287 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
5288 __put_user(host_st->st_ino, &target_st->__st_ino);
5289 #endif
5290 __put_user(host_st->st_mode, &target_st->st_mode);
5291 __put_user(host_st->st_nlink, &target_st->st_nlink);
5292 __put_user(host_st->st_uid, &target_st->st_uid);
5293 __put_user(host_st->st_gid, &target_st->st_gid);
5294 __put_user(host_st->st_rdev, &target_st->st_rdev);
5295 /* XXX: better use of kernel struct */
5296 __put_user(host_st->st_size, &target_st->st_size);
5297 __put_user(host_st->st_blksize, &target_st->st_blksize);
5298 __put_user(host_st->st_blocks, &target_st->st_blocks);
5299 __put_user(host_st->st_atime, &target_st->target_st_atime);
5300 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
5301 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
5302 unlock_user_struct(target_st, target_addr, 1);
5305 return 0;
5307 #endif
5309 /* ??? Using host futex calls even when target atomic operations
5310 are not really atomic probably breaks things. However implementing
5311 futexes locally would make futexes shared between multiple processes
5312 tricky. However they're probably useless because guest atomic
5313 operations won't work either. */
5314 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
5315 target_ulong uaddr2, int val3)
5317 struct timespec ts, *pts;
5318 int base_op;
5320 /* ??? We assume FUTEX_* constants are the same on both host
5321 and target. */
5322 #ifdef FUTEX_CMD_MASK
5323 base_op = op & FUTEX_CMD_MASK;
5324 #else
5325 base_op = op;
5326 #endif
5327 switch (base_op) {
5328 case FUTEX_WAIT:
5329 case FUTEX_WAIT_BITSET:
5330 if (timeout) {
5331 pts = &ts;
5332 target_to_host_timespec(pts, timeout);
5333 } else {
5334 pts = NULL;
5336 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
5337 pts, NULL, val3));
5338 case FUTEX_WAKE:
5339 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5340 case FUTEX_FD:
5341 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
5342 case FUTEX_REQUEUE:
5343 case FUTEX_CMP_REQUEUE:
5344 case FUTEX_WAKE_OP:
5345 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
5346 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
5347 But the prototype takes a `struct timespec *'; insert casts
5348 to satisfy the compiler. We do not need to tswap TIMEOUT
5349 since it's not compared to guest memory. */
5350 pts = (struct timespec *)(uintptr_t) timeout;
5351 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
5352 g2h(uaddr2),
5353 (base_op == FUTEX_CMP_REQUEUE
5354 ? tswap32(val3)
5355 : val3)));
5356 default:
5357 return -TARGET_ENOSYS;
5360 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5361 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
5362 abi_long handle, abi_long mount_id,
5363 abi_long flags)
5365 struct file_handle *target_fh;
5366 struct file_handle *fh;
5367 int mid = 0;
5368 abi_long ret;
5369 char *name;
5370 unsigned int size, total_size;
5372 if (get_user_s32(size, handle)) {
5373 return -TARGET_EFAULT;
5376 name = lock_user_string(pathname);
5377 if (!name) {
5378 return -TARGET_EFAULT;
5381 total_size = sizeof(struct file_handle) + size;
5382 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
5383 if (!target_fh) {
5384 unlock_user(name, pathname, 0);
5385 return -TARGET_EFAULT;
5388 fh = g_malloc0(total_size);
5389 fh->handle_bytes = size;
5391 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
5392 unlock_user(name, pathname, 0);
5394 /* man name_to_handle_at(2):
5395 * Other than the use of the handle_bytes field, the caller should treat
5396 * the file_handle structure as an opaque data type
5399 memcpy(target_fh, fh, total_size);
5400 target_fh->handle_bytes = tswap32(fh->handle_bytes);
5401 target_fh->handle_type = tswap32(fh->handle_type);
5402 g_free(fh);
5403 unlock_user(target_fh, handle, total_size);
5405 if (put_user_s32(mid, mount_id)) {
5406 return -TARGET_EFAULT;
5409 return ret;
5412 #endif
5414 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5415 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
5416 abi_long flags)
5418 struct file_handle *target_fh;
5419 struct file_handle *fh;
5420 unsigned int size, total_size;
5421 abi_long ret;
5423 if (get_user_s32(size, handle)) {
5424 return -TARGET_EFAULT;
5427 total_size = sizeof(struct file_handle) + size;
5428 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
5429 if (!target_fh) {
5430 return -TARGET_EFAULT;
5433 fh = g_memdup(target_fh, total_size);
5434 fh->handle_bytes = size;
5435 fh->handle_type = tswap32(target_fh->handle_type);
5437 ret = get_errno(open_by_handle_at(mount_fd, fh,
5438 target_to_host_bitmask(flags, fcntl_flags_tbl)));
5440 g_free(fh);
5442 unlock_user(target_fh, handle, total_size);
5444 return ret;
5446 #endif
5448 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
5450 /* signalfd siginfo conversion */
5452 static void
5453 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
5454 const struct signalfd_siginfo *info)
5456 int sig = host_to_target_signal(info->ssi_signo);
5458 /* linux/signalfd.h defines a ssi_addr_lsb
5459 * not defined in sys/signalfd.h but used by some kernels
5462 #ifdef BUS_MCEERR_AO
5463 if (tinfo->ssi_signo == SIGBUS &&
5464 (tinfo->ssi_code == BUS_MCEERR_AR ||
5465 tinfo->ssi_code == BUS_MCEERR_AO)) {
5466 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
5467 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
5468 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
5470 #endif
5472 tinfo->ssi_signo = tswap32(sig);
5473 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
5474 tinfo->ssi_code = tswap32(info->ssi_code);
5475 tinfo->ssi_pid = tswap32(info->ssi_pid);
5476 tinfo->ssi_uid = tswap32(info->ssi_uid);
5477 tinfo->ssi_fd = tswap32(info->ssi_fd);
5478 tinfo->ssi_tid = tswap32(info->ssi_tid);
5479 tinfo->ssi_band = tswap32(info->ssi_band);
5480 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
5481 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
5482 tinfo->ssi_status = tswap32(info->ssi_status);
5483 tinfo->ssi_int = tswap32(info->ssi_int);
5484 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
5485 tinfo->ssi_utime = tswap64(info->ssi_utime);
5486 tinfo->ssi_stime = tswap64(info->ssi_stime);
5487 tinfo->ssi_addr = tswap64(info->ssi_addr);
5490 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
5492 int i;
5494 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
5495 host_to_target_signalfd_siginfo(buf + i, buf + i);
5498 return len;
5501 static TargetFdTrans target_signalfd_trans = {
5502 .host_to_target_data = host_to_target_data_signalfd,
5505 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
5507 int host_flags;
5508 target_sigset_t *target_mask;
5509 sigset_t host_mask;
5510 abi_long ret;
5512 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
5513 return -TARGET_EINVAL;
5515 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
5516 return -TARGET_EFAULT;
5519 target_to_host_sigset(&host_mask, target_mask);
5521 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
5523 ret = get_errno(signalfd(fd, &host_mask, host_flags));
5524 if (ret >= 0) {
5525 fd_trans_register(ret, &target_signalfd_trans);
5528 unlock_user_struct(target_mask, mask, 0);
5530 return ret;
5532 #endif
5534 /* Map host to target signal numbers for the wait family of syscalls.
5535 Assume all other status bits are the same. */
5536 int host_to_target_waitstatus(int status)
5538 if (WIFSIGNALED(status)) {
5539 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
5541 if (WIFSTOPPED(status)) {
5542 return (host_to_target_signal(WSTOPSIG(status)) << 8)
5543 | (status & 0xff);
5545 return status;
5548 static int open_self_cmdline(void *cpu_env, int fd)
5550 int fd_orig = -1;
5551 bool word_skipped = false;
5553 fd_orig = open("/proc/self/cmdline", O_RDONLY);
5554 if (fd_orig < 0) {
5555 return fd_orig;
5558 while (true) {
5559 ssize_t nb_read;
5560 char buf[128];
5561 char *cp_buf = buf;
5563 nb_read = read(fd_orig, buf, sizeof(buf));
5564 if (nb_read < 0) {
5565 fd_orig = close(fd_orig);
5566 return -1;
5567 } else if (nb_read == 0) {
5568 break;
5571 if (!word_skipped) {
5572 /* Skip the first string, which is the path to qemu-*-static
5573 instead of the actual command. */
5574 cp_buf = memchr(buf, 0, sizeof(buf));
5575 if (cp_buf) {
5576 /* Null byte found, skip one string */
5577 cp_buf++;
5578 nb_read -= cp_buf - buf;
5579 word_skipped = true;
5583 if (word_skipped) {
5584 if (write(fd, cp_buf, nb_read) != nb_read) {
5585 close(fd_orig);
5586 return -1;
5591 return close(fd_orig);
5594 static int open_self_maps(void *cpu_env, int fd)
5596 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5597 TaskState *ts = cpu->opaque;
5598 FILE *fp;
5599 char *line = NULL;
5600 size_t len = 0;
5601 ssize_t read;
5603 fp = fopen("/proc/self/maps", "r");
5604 if (fp == NULL) {
5605 return -EACCES;
5608 while ((read = getline(&line, &len, fp)) != -1) {
5609 int fields, dev_maj, dev_min, inode;
5610 uint64_t min, max, offset;
5611 char flag_r, flag_w, flag_x, flag_p;
5612 char path[512] = "";
5613 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5614 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5615 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5617 if ((fields < 10) || (fields > 11)) {
5618 continue;
5620 if (h2g_valid(min)) {
5621 int flags = page_get_flags(h2g(min));
5622 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
5623 if (page_check_range(h2g(min), max - min, flags) == -1) {
5624 continue;
5626 if (h2g(min) == ts->info->stack_limit) {
5627 pstrcpy(path, sizeof(path), " [stack]");
5629 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5630 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5631 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
5632 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5633 path[0] ? " " : "", path);
5637 free(line);
5638 fclose(fp);
5640 return 0;
5643 static int open_self_stat(void *cpu_env, int fd)
5645 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5646 TaskState *ts = cpu->opaque;
5647 abi_ulong start_stack = ts->info->start_stack;
5648 int i;
5650 for (i = 0; i < 44; i++) {
5651 char buf[128];
5652 int len;
5653 uint64_t val = 0;
5655 if (i == 0) {
5656 /* pid */
5657 val = getpid();
5658 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5659 } else if (i == 1) {
5660 /* app name */
5661 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5662 } else if (i == 27) {
5663 /* stack bottom */
5664 val = start_stack;
5665 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5666 } else {
5667 /* for the rest, there is MasterCard */
5668 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5671 len = strlen(buf);
5672 if (write(fd, buf, len) != len) {
5673 return -1;
5677 return 0;
5680 static int open_self_auxv(void *cpu_env, int fd)
5682 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5683 TaskState *ts = cpu->opaque;
5684 abi_ulong auxv = ts->info->saved_auxv;
5685 abi_ulong len = ts->info->auxv_len;
5686 char *ptr;
5689 * Auxiliary vector is stored in target process stack.
5690 * read in whole auxv vector and copy it to file
5692 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5693 if (ptr != NULL) {
5694 while (len > 0) {
5695 ssize_t r;
5696 r = write(fd, ptr, len);
5697 if (r <= 0) {
5698 break;
5700 len -= r;
5701 ptr += r;
5703 lseek(fd, 0, SEEK_SET);
5704 unlock_user(ptr, auxv, len);
5707 return 0;
5710 static int is_proc_myself(const char *filename, const char *entry)
5712 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5713 filename += strlen("/proc/");
5714 if (!strncmp(filename, "self/", strlen("self/"))) {
5715 filename += strlen("self/");
5716 } else if (*filename >= '1' && *filename <= '9') {
5717 char myself[80];
5718 snprintf(myself, sizeof(myself), "%d/", getpid());
5719 if (!strncmp(filename, myself, strlen(myself))) {
5720 filename += strlen(myself);
5721 } else {
5722 return 0;
5724 } else {
5725 return 0;
5727 if (!strcmp(filename, entry)) {
5728 return 1;
5731 return 0;
5734 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5735 static int is_proc(const char *filename, const char *entry)
5737 return strcmp(filename, entry) == 0;
5740 static int open_net_route(void *cpu_env, int fd)
5742 FILE *fp;
5743 char *line = NULL;
5744 size_t len = 0;
5745 ssize_t read;
5747 fp = fopen("/proc/net/route", "r");
5748 if (fp == NULL) {
5749 return -EACCES;
5752 /* read header */
5754 read = getline(&line, &len, fp);
5755 dprintf(fd, "%s", line);
5757 /* read routes */
5759 while ((read = getline(&line, &len, fp)) != -1) {
5760 char iface[16];
5761 uint32_t dest, gw, mask;
5762 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5763 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5764 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5765 &mask, &mtu, &window, &irtt);
5766 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5767 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5768 metric, tswap32(mask), mtu, window, irtt);
5771 free(line);
5772 fclose(fp);
5774 return 0;
5776 #endif
5778 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
5780 struct fake_open {
5781 const char *filename;
5782 int (*fill)(void *cpu_env, int fd);
5783 int (*cmp)(const char *s1, const char *s2);
5785 const struct fake_open *fake_open;
5786 static const struct fake_open fakes[] = {
5787 { "maps", open_self_maps, is_proc_myself },
5788 { "stat", open_self_stat, is_proc_myself },
5789 { "auxv", open_self_auxv, is_proc_myself },
5790 { "cmdline", open_self_cmdline, is_proc_myself },
5791 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5792 { "/proc/net/route", open_net_route, is_proc },
5793 #endif
5794 { NULL, NULL, NULL }
5797 if (is_proc_myself(pathname, "exe")) {
5798 int execfd = qemu_getauxval(AT_EXECFD);
5799 return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
5802 for (fake_open = fakes; fake_open->filename; fake_open++) {
5803 if (fake_open->cmp(pathname, fake_open->filename)) {
5804 break;
5808 if (fake_open->filename) {
5809 const char *tmpdir;
5810 char filename[PATH_MAX];
5811 int fd, r;
5813 /* create temporary file to map stat to */
5814 tmpdir = getenv("TMPDIR");
5815 if (!tmpdir)
5816 tmpdir = "/tmp";
5817 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5818 fd = mkstemp(filename);
5819 if (fd < 0) {
5820 return fd;
5822 unlink(filename);
5824 if ((r = fake_open->fill(cpu_env, fd))) {
5825 close(fd);
5826 return r;
5828 lseek(fd, 0, SEEK_SET);
5830 return fd;
5833 return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
5836 #define TIMER_MAGIC 0x0caf0000
5837 #define TIMER_MAGIC_MASK 0xffff0000
5839 /* Convert QEMU provided timer ID back to internal 16bit index format */
5840 static target_timer_t get_timer_id(abi_long arg)
5842 target_timer_t timerid = arg;
5844 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
5845 return -TARGET_EINVAL;
5848 timerid &= 0xffff;
5850 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
5851 return -TARGET_EINVAL;
5854 return timerid;
5857 /* do_syscall() should always have a single exit point at the end so
5858 that actions, such as logging of syscall results, can be performed.
5859 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5860 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5861 abi_long arg2, abi_long arg3, abi_long arg4,
5862 abi_long arg5, abi_long arg6, abi_long arg7,
5863 abi_long arg8)
5865 CPUState *cpu = ENV_GET_CPU(cpu_env);
5866 abi_long ret;
5867 struct stat st;
5868 struct statfs stfs;
5869 void *p;
5871 #ifdef DEBUG
5872 gemu_log("syscall %d", num);
5873 #endif
5874 if(do_strace)
5875 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5877 switch(num) {
5878 case TARGET_NR_exit:
5879 /* In old applications this may be used to implement _exit(2).
5880 However in threaded applictions it is used for thread termination,
5881 and _exit_group is used for application termination.
5882 Do thread termination if we have more then one thread. */
5883 /* FIXME: This probably breaks if a signal arrives. We should probably
5884 be disabling signals. */
5885 if (CPU_NEXT(first_cpu)) {
5886 TaskState *ts;
5888 cpu_list_lock();
5889 /* Remove the CPU from the list. */
5890 QTAILQ_REMOVE(&cpus, cpu, node);
5891 cpu_list_unlock();
5892 ts = cpu->opaque;
5893 if (ts->child_tidptr) {
5894 put_user_u32(0, ts->child_tidptr);
5895 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5896 NULL, NULL, 0);
5898 thread_cpu = NULL;
5899 object_unref(OBJECT(cpu));
5900 g_free(ts);
5901 rcu_unregister_thread();
5902 pthread_exit(NULL);
5904 #ifdef TARGET_GPROF
5905 _mcleanup();
5906 #endif
5907 gdb_exit(cpu_env, arg1);
5908 _exit(arg1);
5909 ret = 0; /* avoid warning */
5910 break;
5911 case TARGET_NR_read:
5912 if (arg3 == 0)
5913 ret = 0;
5914 else {
5915 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5916 goto efault;
5917 ret = get_errno(read(arg1, p, arg3));
5918 if (ret >= 0 &&
5919 fd_trans_host_to_target_data(arg1)) {
5920 ret = fd_trans_host_to_target_data(arg1)(p, ret);
5922 unlock_user(p, arg2, ret);
5924 break;
5925 case TARGET_NR_write:
5926 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5927 goto efault;
5928 ret = get_errno(write(arg1, p, arg3));
5929 unlock_user(p, arg2, 0);
5930 break;
5931 #ifdef TARGET_NR_open
5932 case TARGET_NR_open:
5933 if (!(p = lock_user_string(arg1)))
5934 goto efault;
5935 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
5936 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5937 arg3));
5938 fd_trans_unregister(ret);
5939 unlock_user(p, arg1, 0);
5940 break;
5941 #endif
5942 case TARGET_NR_openat:
5943 if (!(p = lock_user_string(arg2)))
5944 goto efault;
5945 ret = get_errno(do_openat(cpu_env, arg1, p,
5946 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5947 arg4));
5948 fd_trans_unregister(ret);
5949 unlock_user(p, arg2, 0);
5950 break;
5951 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5952 case TARGET_NR_name_to_handle_at:
5953 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
5954 break;
5955 #endif
5956 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
5957 case TARGET_NR_open_by_handle_at:
5958 ret = do_open_by_handle_at(arg1, arg2, arg3);
5959 fd_trans_unregister(ret);
5960 break;
5961 #endif
5962 case TARGET_NR_close:
5963 fd_trans_unregister(arg1);
5964 ret = get_errno(close(arg1));
5965 break;
5966 case TARGET_NR_brk:
5967 ret = do_brk(arg1);
5968 break;
5969 #ifdef TARGET_NR_fork
5970 case TARGET_NR_fork:
5971 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5972 break;
5973 #endif
5974 #ifdef TARGET_NR_waitpid
5975 case TARGET_NR_waitpid:
5977 int status;
5978 ret = get_errno(waitpid(arg1, &status, arg3));
5979 if (!is_error(ret) && arg2 && ret
5980 && put_user_s32(host_to_target_waitstatus(status), arg2))
5981 goto efault;
5983 break;
5984 #endif
5985 #ifdef TARGET_NR_waitid
5986 case TARGET_NR_waitid:
5988 siginfo_t info;
5989 info.si_pid = 0;
5990 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5991 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5992 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5993 goto efault;
5994 host_to_target_siginfo(p, &info);
5995 unlock_user(p, arg3, sizeof(target_siginfo_t));
5998 break;
5999 #endif
6000 #ifdef TARGET_NR_creat /* not on alpha */
6001 case TARGET_NR_creat:
6002 if (!(p = lock_user_string(arg1)))
6003 goto efault;
6004 ret = get_errno(creat(p, arg2));
6005 fd_trans_unregister(ret);
6006 unlock_user(p, arg1, 0);
6007 break;
6008 #endif
6009 #ifdef TARGET_NR_link
6010 case TARGET_NR_link:
6012 void * p2;
6013 p = lock_user_string(arg1);
6014 p2 = lock_user_string(arg2);
6015 if (!p || !p2)
6016 ret = -TARGET_EFAULT;
6017 else
6018 ret = get_errno(link(p, p2));
6019 unlock_user(p2, arg2, 0);
6020 unlock_user(p, arg1, 0);
6022 break;
6023 #endif
6024 #if defined(TARGET_NR_linkat)
6025 case TARGET_NR_linkat:
6027 void * p2 = NULL;
6028 if (!arg2 || !arg4)
6029 goto efault;
6030 p = lock_user_string(arg2);
6031 p2 = lock_user_string(arg4);
6032 if (!p || !p2)
6033 ret = -TARGET_EFAULT;
6034 else
6035 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
6036 unlock_user(p, arg2, 0);
6037 unlock_user(p2, arg4, 0);
6039 break;
6040 #endif
6041 #ifdef TARGET_NR_unlink
6042 case TARGET_NR_unlink:
6043 if (!(p = lock_user_string(arg1)))
6044 goto efault;
6045 ret = get_errno(unlink(p));
6046 unlock_user(p, arg1, 0);
6047 break;
6048 #endif
6049 #if defined(TARGET_NR_unlinkat)
6050 case TARGET_NR_unlinkat:
6051 if (!(p = lock_user_string(arg2)))
6052 goto efault;
6053 ret = get_errno(unlinkat(arg1, p, arg3));
6054 unlock_user(p, arg2, 0);
6055 break;
6056 #endif
6057 case TARGET_NR_execve:
6059 char **argp, **envp;
6060 int argc, envc;
6061 abi_ulong gp;
6062 abi_ulong guest_argp;
6063 abi_ulong guest_envp;
6064 abi_ulong addr;
6065 char **q;
6066 int total_size = 0;
6068 argc = 0;
6069 guest_argp = arg2;
6070 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
6071 if (get_user_ual(addr, gp))
6072 goto efault;
6073 if (!addr)
6074 break;
6075 argc++;
6077 envc = 0;
6078 guest_envp = arg3;
6079 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
6080 if (get_user_ual(addr, gp))
6081 goto efault;
6082 if (!addr)
6083 break;
6084 envc++;
6087 argp = alloca((argc + 1) * sizeof(void *));
6088 envp = alloca((envc + 1) * sizeof(void *));
6090 for (gp = guest_argp, q = argp; gp;
6091 gp += sizeof(abi_ulong), q++) {
6092 if (get_user_ual(addr, gp))
6093 goto execve_efault;
6094 if (!addr)
6095 break;
6096 if (!(*q = lock_user_string(addr)))
6097 goto execve_efault;
6098 total_size += strlen(*q) + 1;
6100 *q = NULL;
6102 for (gp = guest_envp, q = envp; gp;
6103 gp += sizeof(abi_ulong), q++) {
6104 if (get_user_ual(addr, gp))
6105 goto execve_efault;
6106 if (!addr)
6107 break;
6108 if (!(*q = lock_user_string(addr)))
6109 goto execve_efault;
6110 total_size += strlen(*q) + 1;
6112 *q = NULL;
6114 if (!(p = lock_user_string(arg1)))
6115 goto execve_efault;
6116 ret = get_errno(execve(p, argp, envp));
6117 unlock_user(p, arg1, 0);
6119 goto execve_end;
6121 execve_efault:
6122 ret = -TARGET_EFAULT;
6124 execve_end:
6125 for (gp = guest_argp, q = argp; *q;
6126 gp += sizeof(abi_ulong), q++) {
6127 if (get_user_ual(addr, gp)
6128 || !addr)
6129 break;
6130 unlock_user(*q, addr, 0);
6132 for (gp = guest_envp, q = envp; *q;
6133 gp += sizeof(abi_ulong), q++) {
6134 if (get_user_ual(addr, gp)
6135 || !addr)
6136 break;
6137 unlock_user(*q, addr, 0);
6140 break;
6141 case TARGET_NR_chdir:
6142 if (!(p = lock_user_string(arg1)))
6143 goto efault;
6144 ret = get_errno(chdir(p));
6145 unlock_user(p, arg1, 0);
6146 break;
6147 #ifdef TARGET_NR_time
6148 case TARGET_NR_time:
6150 time_t host_time;
6151 ret = get_errno(time(&host_time));
6152 if (!is_error(ret)
6153 && arg1
6154 && put_user_sal(host_time, arg1))
6155 goto efault;
6157 break;
6158 #endif
6159 #ifdef TARGET_NR_mknod
6160 case TARGET_NR_mknod:
6161 if (!(p = lock_user_string(arg1)))
6162 goto efault;
6163 ret = get_errno(mknod(p, arg2, arg3));
6164 unlock_user(p, arg1, 0);
6165 break;
6166 #endif
6167 #if defined(TARGET_NR_mknodat)
6168 case TARGET_NR_mknodat:
6169 if (!(p = lock_user_string(arg2)))
6170 goto efault;
6171 ret = get_errno(mknodat(arg1, p, arg3, arg4));
6172 unlock_user(p, arg2, 0);
6173 break;
6174 #endif
6175 #ifdef TARGET_NR_chmod
6176 case TARGET_NR_chmod:
6177 if (!(p = lock_user_string(arg1)))
6178 goto efault;
6179 ret = get_errno(chmod(p, arg2));
6180 unlock_user(p, arg1, 0);
6181 break;
6182 #endif
6183 #ifdef TARGET_NR_break
6184 case TARGET_NR_break:
6185 goto unimplemented;
6186 #endif
6187 #ifdef TARGET_NR_oldstat
6188 case TARGET_NR_oldstat:
6189 goto unimplemented;
6190 #endif
6191 case TARGET_NR_lseek:
6192 ret = get_errno(lseek(arg1, arg2, arg3));
6193 break;
6194 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
6195 /* Alpha specific */
6196 case TARGET_NR_getxpid:
6197 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
6198 ret = get_errno(getpid());
6199 break;
6200 #endif
6201 #ifdef TARGET_NR_getpid
6202 case TARGET_NR_getpid:
6203 ret = get_errno(getpid());
6204 break;
6205 #endif
6206 case TARGET_NR_mount:
6208 /* need to look at the data field */
6209 void *p2, *p3;
6211 if (arg1) {
6212 p = lock_user_string(arg1);
6213 if (!p) {
6214 goto efault;
6216 } else {
6217 p = NULL;
6220 p2 = lock_user_string(arg2);
6221 if (!p2) {
6222 if (arg1) {
6223 unlock_user(p, arg1, 0);
6225 goto efault;
6228 if (arg3) {
6229 p3 = lock_user_string(arg3);
6230 if (!p3) {
6231 if (arg1) {
6232 unlock_user(p, arg1, 0);
6234 unlock_user(p2, arg2, 0);
6235 goto efault;
6237 } else {
6238 p3 = NULL;
6241 /* FIXME - arg5 should be locked, but it isn't clear how to
6242 * do that since it's not guaranteed to be a NULL-terminated
6243 * string.
6245 if (!arg5) {
6246 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
6247 } else {
6248 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
6250 ret = get_errno(ret);
6252 if (arg1) {
6253 unlock_user(p, arg1, 0);
6255 unlock_user(p2, arg2, 0);
6256 if (arg3) {
6257 unlock_user(p3, arg3, 0);
6260 break;
6261 #ifdef TARGET_NR_umount
6262 case TARGET_NR_umount:
6263 if (!(p = lock_user_string(arg1)))
6264 goto efault;
6265 ret = get_errno(umount(p));
6266 unlock_user(p, arg1, 0);
6267 break;
6268 #endif
6269 #ifdef TARGET_NR_stime /* not on alpha */
6270 case TARGET_NR_stime:
6272 time_t host_time;
6273 if (get_user_sal(host_time, arg1))
6274 goto efault;
6275 ret = get_errno(stime(&host_time));
6277 break;
6278 #endif
6279 case TARGET_NR_ptrace:
6280 goto unimplemented;
6281 #ifdef TARGET_NR_alarm /* not on alpha */
6282 case TARGET_NR_alarm:
6283 ret = alarm(arg1);
6284 break;
6285 #endif
6286 #ifdef TARGET_NR_oldfstat
6287 case TARGET_NR_oldfstat:
6288 goto unimplemented;
6289 #endif
6290 #ifdef TARGET_NR_pause /* not on alpha */
6291 case TARGET_NR_pause:
6292 ret = get_errno(pause());
6293 break;
6294 #endif
6295 #ifdef TARGET_NR_utime
6296 case TARGET_NR_utime:
6298 struct utimbuf tbuf, *host_tbuf;
6299 struct target_utimbuf *target_tbuf;
6300 if (arg2) {
6301 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
6302 goto efault;
6303 tbuf.actime = tswapal(target_tbuf->actime);
6304 tbuf.modtime = tswapal(target_tbuf->modtime);
6305 unlock_user_struct(target_tbuf, arg2, 0);
6306 host_tbuf = &tbuf;
6307 } else {
6308 host_tbuf = NULL;
6310 if (!(p = lock_user_string(arg1)))
6311 goto efault;
6312 ret = get_errno(utime(p, host_tbuf));
6313 unlock_user(p, arg1, 0);
6315 break;
6316 #endif
6317 #ifdef TARGET_NR_utimes
6318 case TARGET_NR_utimes:
6320 struct timeval *tvp, tv[2];
6321 if (arg2) {
6322 if (copy_from_user_timeval(&tv[0], arg2)
6323 || copy_from_user_timeval(&tv[1],
6324 arg2 + sizeof(struct target_timeval)))
6325 goto efault;
6326 tvp = tv;
6327 } else {
6328 tvp = NULL;
6330 if (!(p = lock_user_string(arg1)))
6331 goto efault;
6332 ret = get_errno(utimes(p, tvp));
6333 unlock_user(p, arg1, 0);
6335 break;
6336 #endif
6337 #if defined(TARGET_NR_futimesat)
6338 case TARGET_NR_futimesat:
6340 struct timeval *tvp, tv[2];
6341 if (arg3) {
6342 if (copy_from_user_timeval(&tv[0], arg3)
6343 || copy_from_user_timeval(&tv[1],
6344 arg3 + sizeof(struct target_timeval)))
6345 goto efault;
6346 tvp = tv;
6347 } else {
6348 tvp = NULL;
6350 if (!(p = lock_user_string(arg2)))
6351 goto efault;
6352 ret = get_errno(futimesat(arg1, path(p), tvp));
6353 unlock_user(p, arg2, 0);
6355 break;
6356 #endif
6357 #ifdef TARGET_NR_stty
6358 case TARGET_NR_stty:
6359 goto unimplemented;
6360 #endif
6361 #ifdef TARGET_NR_gtty
6362 case TARGET_NR_gtty:
6363 goto unimplemented;
6364 #endif
6365 #ifdef TARGET_NR_access
6366 case TARGET_NR_access:
6367 if (!(p = lock_user_string(arg1)))
6368 goto efault;
6369 ret = get_errno(access(path(p), arg2));
6370 unlock_user(p, arg1, 0);
6371 break;
6372 #endif
6373 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
6374 case TARGET_NR_faccessat:
6375 if (!(p = lock_user_string(arg2)))
6376 goto efault;
6377 ret = get_errno(faccessat(arg1, p, arg3, 0));
6378 unlock_user(p, arg2, 0);
6379 break;
6380 #endif
6381 #ifdef TARGET_NR_nice /* not on alpha */
6382 case TARGET_NR_nice:
6383 ret = get_errno(nice(arg1));
6384 break;
6385 #endif
6386 #ifdef TARGET_NR_ftime
6387 case TARGET_NR_ftime:
6388 goto unimplemented;
6389 #endif
6390 case TARGET_NR_sync:
6391 sync();
6392 ret = 0;
6393 break;
6394 case TARGET_NR_kill:
6395 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
6396 break;
6397 #ifdef TARGET_NR_rename
6398 case TARGET_NR_rename:
6400 void *p2;
6401 p = lock_user_string(arg1);
6402 p2 = lock_user_string(arg2);
6403 if (!p || !p2)
6404 ret = -TARGET_EFAULT;
6405 else
6406 ret = get_errno(rename(p, p2));
6407 unlock_user(p2, arg2, 0);
6408 unlock_user(p, arg1, 0);
6410 break;
6411 #endif
6412 #if defined(TARGET_NR_renameat)
6413 case TARGET_NR_renameat:
6415 void *p2;
6416 p = lock_user_string(arg2);
6417 p2 = lock_user_string(arg4);
6418 if (!p || !p2)
6419 ret = -TARGET_EFAULT;
6420 else
6421 ret = get_errno(renameat(arg1, p, arg3, p2));
6422 unlock_user(p2, arg4, 0);
6423 unlock_user(p, arg2, 0);
6425 break;
6426 #endif
6427 #ifdef TARGET_NR_mkdir
6428 case TARGET_NR_mkdir:
6429 if (!(p = lock_user_string(arg1)))
6430 goto efault;
6431 ret = get_errno(mkdir(p, arg2));
6432 unlock_user(p, arg1, 0);
6433 break;
6434 #endif
6435 #if defined(TARGET_NR_mkdirat)
6436 case TARGET_NR_mkdirat:
6437 if (!(p = lock_user_string(arg2)))
6438 goto efault;
6439 ret = get_errno(mkdirat(arg1, p, arg3));
6440 unlock_user(p, arg2, 0);
6441 break;
6442 #endif
6443 #ifdef TARGET_NR_rmdir
6444 case TARGET_NR_rmdir:
6445 if (!(p = lock_user_string(arg1)))
6446 goto efault;
6447 ret = get_errno(rmdir(p));
6448 unlock_user(p, arg1, 0);
6449 break;
6450 #endif
6451 case TARGET_NR_dup:
6452 ret = get_errno(dup(arg1));
6453 if (ret >= 0) {
6454 fd_trans_dup(arg1, ret);
6456 break;
6457 #ifdef TARGET_NR_pipe
6458 case TARGET_NR_pipe:
6459 ret = do_pipe(cpu_env, arg1, 0, 0);
6460 break;
6461 #endif
6462 #ifdef TARGET_NR_pipe2
6463 case TARGET_NR_pipe2:
6464 ret = do_pipe(cpu_env, arg1,
6465 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
6466 break;
6467 #endif
6468 case TARGET_NR_times:
6470 struct target_tms *tmsp;
6471 struct tms tms;
6472 ret = get_errno(times(&tms));
6473 if (arg1) {
6474 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
6475 if (!tmsp)
6476 goto efault;
6477 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
6478 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
6479 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
6480 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
6482 if (!is_error(ret))
6483 ret = host_to_target_clock_t(ret);
6485 break;
6486 #ifdef TARGET_NR_prof
6487 case TARGET_NR_prof:
6488 goto unimplemented;
6489 #endif
6490 #ifdef TARGET_NR_signal
6491 case TARGET_NR_signal:
6492 goto unimplemented;
6493 #endif
6494 case TARGET_NR_acct:
6495 if (arg1 == 0) {
6496 ret = get_errno(acct(NULL));
6497 } else {
6498 if (!(p = lock_user_string(arg1)))
6499 goto efault;
6500 ret = get_errno(acct(path(p)));
6501 unlock_user(p, arg1, 0);
6503 break;
6504 #ifdef TARGET_NR_umount2
6505 case TARGET_NR_umount2:
6506 if (!(p = lock_user_string(arg1)))
6507 goto efault;
6508 ret = get_errno(umount2(p, arg2));
6509 unlock_user(p, arg1, 0);
6510 break;
6511 #endif
6512 #ifdef TARGET_NR_lock
6513 case TARGET_NR_lock:
6514 goto unimplemented;
6515 #endif
6516 case TARGET_NR_ioctl:
6517 ret = do_ioctl(arg1, arg2, arg3);
6518 break;
6519 case TARGET_NR_fcntl:
6520 ret = do_fcntl(arg1, arg2, arg3);
6521 break;
6522 #ifdef TARGET_NR_mpx
6523 case TARGET_NR_mpx:
6524 goto unimplemented;
6525 #endif
6526 case TARGET_NR_setpgid:
6527 ret = get_errno(setpgid(arg1, arg2));
6528 break;
6529 #ifdef TARGET_NR_ulimit
6530 case TARGET_NR_ulimit:
6531 goto unimplemented;
6532 #endif
6533 #ifdef TARGET_NR_oldolduname
6534 case TARGET_NR_oldolduname:
6535 goto unimplemented;
6536 #endif
6537 case TARGET_NR_umask:
6538 ret = get_errno(umask(arg1));
6539 break;
6540 case TARGET_NR_chroot:
6541 if (!(p = lock_user_string(arg1)))
6542 goto efault;
6543 ret = get_errno(chroot(p));
6544 unlock_user(p, arg1, 0);
6545 break;
6546 #ifdef TARGET_NR_ustat
6547 case TARGET_NR_ustat:
6548 goto unimplemented;
6549 #endif
6550 #ifdef TARGET_NR_dup2
6551 case TARGET_NR_dup2:
6552 ret = get_errno(dup2(arg1, arg2));
6553 if (ret >= 0) {
6554 fd_trans_dup(arg1, arg2);
6556 break;
6557 #endif
6558 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
6559 case TARGET_NR_dup3:
6560 ret = get_errno(dup3(arg1, arg2, arg3));
6561 if (ret >= 0) {
6562 fd_trans_dup(arg1, arg2);
6564 break;
6565 #endif
6566 #ifdef TARGET_NR_getppid /* not on alpha */
6567 case TARGET_NR_getppid:
6568 ret = get_errno(getppid());
6569 break;
6570 #endif
6571 #ifdef TARGET_NR_getpgrp
6572 case TARGET_NR_getpgrp:
6573 ret = get_errno(getpgrp());
6574 break;
6575 #endif
6576 case TARGET_NR_setsid:
6577 ret = get_errno(setsid());
6578 break;
6579 #ifdef TARGET_NR_sigaction
6580 case TARGET_NR_sigaction:
6582 #if defined(TARGET_ALPHA)
6583 struct target_sigaction act, oact, *pact = 0;
6584 struct target_old_sigaction *old_act;
6585 if (arg2) {
6586 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6587 goto efault;
6588 act._sa_handler = old_act->_sa_handler;
6589 target_siginitset(&act.sa_mask, old_act->sa_mask);
6590 act.sa_flags = old_act->sa_flags;
6591 act.sa_restorer = 0;
6592 unlock_user_struct(old_act, arg2, 0);
6593 pact = &act;
6595 ret = get_errno(do_sigaction(arg1, pact, &oact));
6596 if (!is_error(ret) && arg3) {
6597 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6598 goto efault;
6599 old_act->_sa_handler = oact._sa_handler;
6600 old_act->sa_mask = oact.sa_mask.sig[0];
6601 old_act->sa_flags = oact.sa_flags;
6602 unlock_user_struct(old_act, arg3, 1);
6604 #elif defined(TARGET_MIPS)
6605 struct target_sigaction act, oact, *pact, *old_act;
6607 if (arg2) {
6608 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6609 goto efault;
6610 act._sa_handler = old_act->_sa_handler;
6611 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
6612 act.sa_flags = old_act->sa_flags;
6613 unlock_user_struct(old_act, arg2, 0);
6614 pact = &act;
6615 } else {
6616 pact = NULL;
6619 ret = get_errno(do_sigaction(arg1, pact, &oact));
6621 if (!is_error(ret) && arg3) {
6622 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6623 goto efault;
6624 old_act->_sa_handler = oact._sa_handler;
6625 old_act->sa_flags = oact.sa_flags;
6626 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
6627 old_act->sa_mask.sig[1] = 0;
6628 old_act->sa_mask.sig[2] = 0;
6629 old_act->sa_mask.sig[3] = 0;
6630 unlock_user_struct(old_act, arg3, 1);
6632 #else
6633 struct target_old_sigaction *old_act;
6634 struct target_sigaction act, oact, *pact;
6635 if (arg2) {
6636 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
6637 goto efault;
6638 act._sa_handler = old_act->_sa_handler;
6639 target_siginitset(&act.sa_mask, old_act->sa_mask);
6640 act.sa_flags = old_act->sa_flags;
6641 act.sa_restorer = old_act->sa_restorer;
6642 unlock_user_struct(old_act, arg2, 0);
6643 pact = &act;
6644 } else {
6645 pact = NULL;
6647 ret = get_errno(do_sigaction(arg1, pact, &oact));
6648 if (!is_error(ret) && arg3) {
6649 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
6650 goto efault;
6651 old_act->_sa_handler = oact._sa_handler;
6652 old_act->sa_mask = oact.sa_mask.sig[0];
6653 old_act->sa_flags = oact.sa_flags;
6654 old_act->sa_restorer = oact.sa_restorer;
6655 unlock_user_struct(old_act, arg3, 1);
6657 #endif
6659 break;
6660 #endif
6661 case TARGET_NR_rt_sigaction:
6663 #if defined(TARGET_ALPHA)
6664 struct target_sigaction act, oact, *pact = 0;
6665 struct target_rt_sigaction *rt_act;
6666 /* ??? arg4 == sizeof(sigset_t). */
6667 if (arg2) {
6668 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
6669 goto efault;
6670 act._sa_handler = rt_act->_sa_handler;
6671 act.sa_mask = rt_act->sa_mask;
6672 act.sa_flags = rt_act->sa_flags;
6673 act.sa_restorer = arg5;
6674 unlock_user_struct(rt_act, arg2, 0);
6675 pact = &act;
6677 ret = get_errno(do_sigaction(arg1, pact, &oact));
6678 if (!is_error(ret) && arg3) {
6679 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6680 goto efault;
6681 rt_act->_sa_handler = oact._sa_handler;
6682 rt_act->sa_mask = oact.sa_mask;
6683 rt_act->sa_flags = oact.sa_flags;
6684 unlock_user_struct(rt_act, arg3, 1);
6686 #else
6687 struct target_sigaction *act;
6688 struct target_sigaction *oact;
6690 if (arg2) {
6691 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6692 goto efault;
6693 } else
6694 act = NULL;
6695 if (arg3) {
6696 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6697 ret = -TARGET_EFAULT;
6698 goto rt_sigaction_fail;
6700 } else
6701 oact = NULL;
6702 ret = get_errno(do_sigaction(arg1, act, oact));
6703 rt_sigaction_fail:
6704 if (act)
6705 unlock_user_struct(act, arg2, 0);
6706 if (oact)
6707 unlock_user_struct(oact, arg3, 1);
6708 #endif
6710 break;
6711 #ifdef TARGET_NR_sgetmask /* not on alpha */
6712 case TARGET_NR_sgetmask:
6714 sigset_t cur_set;
6715 abi_ulong target_set;
6716 do_sigprocmask(0, NULL, &cur_set);
6717 host_to_target_old_sigset(&target_set, &cur_set);
6718 ret = target_set;
6720 break;
6721 #endif
6722 #ifdef TARGET_NR_ssetmask /* not on alpha */
6723 case TARGET_NR_ssetmask:
6725 sigset_t set, oset, cur_set;
6726 abi_ulong target_set = arg1;
6727 do_sigprocmask(0, NULL, &cur_set);
6728 target_to_host_old_sigset(&set, &target_set);
6729 sigorset(&set, &set, &cur_set);
6730 do_sigprocmask(SIG_SETMASK, &set, &oset);
6731 host_to_target_old_sigset(&target_set, &oset);
6732 ret = target_set;
6734 break;
6735 #endif
6736 #ifdef TARGET_NR_sigprocmask
6737 case TARGET_NR_sigprocmask:
6739 #if defined(TARGET_ALPHA)
6740 sigset_t set, oldset;
6741 abi_ulong mask;
6742 int how;
6744 switch (arg1) {
6745 case TARGET_SIG_BLOCK:
6746 how = SIG_BLOCK;
6747 break;
6748 case TARGET_SIG_UNBLOCK:
6749 how = SIG_UNBLOCK;
6750 break;
6751 case TARGET_SIG_SETMASK:
6752 how = SIG_SETMASK;
6753 break;
6754 default:
6755 ret = -TARGET_EINVAL;
6756 goto fail;
6758 mask = arg2;
6759 target_to_host_old_sigset(&set, &mask);
6761 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6762 if (!is_error(ret)) {
6763 host_to_target_old_sigset(&mask, &oldset);
6764 ret = mask;
6765 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6767 #else
6768 sigset_t set, oldset, *set_ptr;
6769 int how;
6771 if (arg2) {
6772 switch (arg1) {
6773 case TARGET_SIG_BLOCK:
6774 how = SIG_BLOCK;
6775 break;
6776 case TARGET_SIG_UNBLOCK:
6777 how = SIG_UNBLOCK;
6778 break;
6779 case TARGET_SIG_SETMASK:
6780 how = SIG_SETMASK;
6781 break;
6782 default:
6783 ret = -TARGET_EINVAL;
6784 goto fail;
6786 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6787 goto efault;
6788 target_to_host_old_sigset(&set, p);
6789 unlock_user(p, arg2, 0);
6790 set_ptr = &set;
6791 } else {
6792 how = 0;
6793 set_ptr = NULL;
6795 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6796 if (!is_error(ret) && arg3) {
6797 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6798 goto efault;
6799 host_to_target_old_sigset(p, &oldset);
6800 unlock_user(p, arg3, sizeof(target_sigset_t));
6802 #endif
6804 break;
6805 #endif
6806 case TARGET_NR_rt_sigprocmask:
6808 int how = arg1;
6809 sigset_t set, oldset, *set_ptr;
6811 if (arg2) {
6812 switch(how) {
6813 case TARGET_SIG_BLOCK:
6814 how = SIG_BLOCK;
6815 break;
6816 case TARGET_SIG_UNBLOCK:
6817 how = SIG_UNBLOCK;
6818 break;
6819 case TARGET_SIG_SETMASK:
6820 how = SIG_SETMASK;
6821 break;
6822 default:
6823 ret = -TARGET_EINVAL;
6824 goto fail;
6826 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6827 goto efault;
6828 target_to_host_sigset(&set, p);
6829 unlock_user(p, arg2, 0);
6830 set_ptr = &set;
6831 } else {
6832 how = 0;
6833 set_ptr = NULL;
6835 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6836 if (!is_error(ret) && arg3) {
6837 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6838 goto efault;
6839 host_to_target_sigset(p, &oldset);
6840 unlock_user(p, arg3, sizeof(target_sigset_t));
6843 break;
6844 #ifdef TARGET_NR_sigpending
6845 case TARGET_NR_sigpending:
6847 sigset_t set;
6848 ret = get_errno(sigpending(&set));
6849 if (!is_error(ret)) {
6850 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6851 goto efault;
6852 host_to_target_old_sigset(p, &set);
6853 unlock_user(p, arg1, sizeof(target_sigset_t));
6856 break;
6857 #endif
6858 case TARGET_NR_rt_sigpending:
6860 sigset_t set;
6861 ret = get_errno(sigpending(&set));
6862 if (!is_error(ret)) {
6863 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6864 goto efault;
6865 host_to_target_sigset(p, &set);
6866 unlock_user(p, arg1, sizeof(target_sigset_t));
6869 break;
6870 #ifdef TARGET_NR_sigsuspend
6871 case TARGET_NR_sigsuspend:
6873 sigset_t set;
6874 #if defined(TARGET_ALPHA)
6875 abi_ulong mask = arg1;
6876 target_to_host_old_sigset(&set, &mask);
6877 #else
6878 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6879 goto efault;
6880 target_to_host_old_sigset(&set, p);
6881 unlock_user(p, arg1, 0);
6882 #endif
6883 ret = get_errno(sigsuspend(&set));
6885 break;
6886 #endif
6887 case TARGET_NR_rt_sigsuspend:
6889 sigset_t set;
6890 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6891 goto efault;
6892 target_to_host_sigset(&set, p);
6893 unlock_user(p, arg1, 0);
6894 ret = get_errno(sigsuspend(&set));
6896 break;
6897 case TARGET_NR_rt_sigtimedwait:
6899 sigset_t set;
6900 struct timespec uts, *puts;
6901 siginfo_t uinfo;
6903 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6904 goto efault;
6905 target_to_host_sigset(&set, p);
6906 unlock_user(p, arg1, 0);
6907 if (arg3) {
6908 puts = &uts;
6909 target_to_host_timespec(puts, arg3);
6910 } else {
6911 puts = NULL;
6913 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6914 if (!is_error(ret)) {
6915 if (arg2) {
6916 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6918 if (!p) {
6919 goto efault;
6921 host_to_target_siginfo(p, &uinfo);
6922 unlock_user(p, arg2, sizeof(target_siginfo_t));
6924 ret = host_to_target_signal(ret);
6927 break;
6928 case TARGET_NR_rt_sigqueueinfo:
6930 siginfo_t uinfo;
6931 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6932 goto efault;
6933 target_to_host_siginfo(&uinfo, p);
6934 unlock_user(p, arg1, 0);
6935 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6937 break;
6938 #ifdef TARGET_NR_sigreturn
6939 case TARGET_NR_sigreturn:
6940 /* NOTE: ret is eax, so not transcoding must be done */
6941 ret = do_sigreturn(cpu_env);
6942 break;
6943 #endif
6944 case TARGET_NR_rt_sigreturn:
6945 /* NOTE: ret is eax, so not transcoding must be done */
6946 ret = do_rt_sigreturn(cpu_env);
6947 break;
6948 case TARGET_NR_sethostname:
6949 if (!(p = lock_user_string(arg1)))
6950 goto efault;
6951 ret = get_errno(sethostname(p, arg2));
6952 unlock_user(p, arg1, 0);
6953 break;
6954 case TARGET_NR_setrlimit:
6956 int resource = target_to_host_resource(arg1);
6957 struct target_rlimit *target_rlim;
6958 struct rlimit rlim;
6959 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6960 goto efault;
6961 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6962 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6963 unlock_user_struct(target_rlim, arg2, 0);
6964 ret = get_errno(setrlimit(resource, &rlim));
6966 break;
6967 case TARGET_NR_getrlimit:
6969 int resource = target_to_host_resource(arg1);
6970 struct target_rlimit *target_rlim;
6971 struct rlimit rlim;
6973 ret = get_errno(getrlimit(resource, &rlim));
6974 if (!is_error(ret)) {
6975 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6976 goto efault;
6977 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6978 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6979 unlock_user_struct(target_rlim, arg2, 1);
6982 break;
6983 case TARGET_NR_getrusage:
6985 struct rusage rusage;
6986 ret = get_errno(getrusage(arg1, &rusage));
6987 if (!is_error(ret)) {
6988 ret = host_to_target_rusage(arg2, &rusage);
6991 break;
6992 case TARGET_NR_gettimeofday:
6994 struct timeval tv;
6995 ret = get_errno(gettimeofday(&tv, NULL));
6996 if (!is_error(ret)) {
6997 if (copy_to_user_timeval(arg1, &tv))
6998 goto efault;
7001 break;
7002 case TARGET_NR_settimeofday:
7004 struct timeval tv, *ptv = NULL;
7005 struct timezone tz, *ptz = NULL;
7007 if (arg1) {
7008 if (copy_from_user_timeval(&tv, arg1)) {
7009 goto efault;
7011 ptv = &tv;
7014 if (arg2) {
7015 if (copy_from_user_timezone(&tz, arg2)) {
7016 goto efault;
7018 ptz = &tz;
7021 ret = get_errno(settimeofday(ptv, ptz));
7023 break;
7024 #if defined(TARGET_NR_select)
7025 case TARGET_NR_select:
7026 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
7027 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7028 #else
7030 struct target_sel_arg_struct *sel;
7031 abi_ulong inp, outp, exp, tvp;
7032 long nsel;
7034 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
7035 goto efault;
7036 nsel = tswapal(sel->n);
7037 inp = tswapal(sel->inp);
7038 outp = tswapal(sel->outp);
7039 exp = tswapal(sel->exp);
7040 tvp = tswapal(sel->tvp);
7041 unlock_user_struct(sel, arg1, 0);
7042 ret = do_select(nsel, inp, outp, exp, tvp);
7044 #endif
7045 break;
7046 #endif
7047 #ifdef TARGET_NR_pselect6
7048 case TARGET_NR_pselect6:
7050 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
7051 fd_set rfds, wfds, efds;
7052 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
7053 struct timespec ts, *ts_ptr;
7056 * The 6th arg is actually two args smashed together,
7057 * so we cannot use the C library.
7059 sigset_t set;
7060 struct {
7061 sigset_t *set;
7062 size_t size;
7063 } sig, *sig_ptr;
7065 abi_ulong arg_sigset, arg_sigsize, *arg7;
7066 target_sigset_t *target_sigset;
7068 n = arg1;
7069 rfd_addr = arg2;
7070 wfd_addr = arg3;
7071 efd_addr = arg4;
7072 ts_addr = arg5;
7074 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
7075 if (ret) {
7076 goto fail;
7078 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
7079 if (ret) {
7080 goto fail;
7082 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
7083 if (ret) {
7084 goto fail;
7088 * This takes a timespec, and not a timeval, so we cannot
7089 * use the do_select() helper ...
7091 if (ts_addr) {
7092 if (target_to_host_timespec(&ts, ts_addr)) {
7093 goto efault;
7095 ts_ptr = &ts;
7096 } else {
7097 ts_ptr = NULL;
7100 /* Extract the two packed args for the sigset */
7101 if (arg6) {
7102 sig_ptr = &sig;
7103 sig.size = _NSIG / 8;
7105 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
7106 if (!arg7) {
7107 goto efault;
7109 arg_sigset = tswapal(arg7[0]);
7110 arg_sigsize = tswapal(arg7[1]);
7111 unlock_user(arg7, arg6, 0);
7113 if (arg_sigset) {
7114 sig.set = &set;
7115 if (arg_sigsize != sizeof(*target_sigset)) {
7116 /* Like the kernel, we enforce correct size sigsets */
7117 ret = -TARGET_EINVAL;
7118 goto fail;
7120 target_sigset = lock_user(VERIFY_READ, arg_sigset,
7121 sizeof(*target_sigset), 1);
7122 if (!target_sigset) {
7123 goto efault;
7125 target_to_host_sigset(&set, target_sigset);
7126 unlock_user(target_sigset, arg_sigset, 0);
7127 } else {
7128 sig.set = NULL;
7130 } else {
7131 sig_ptr = NULL;
7134 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
7135 ts_ptr, sig_ptr));
7137 if (!is_error(ret)) {
7138 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
7139 goto efault;
7140 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
7141 goto efault;
7142 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
7143 goto efault;
7145 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
7146 goto efault;
7149 break;
7150 #endif
7151 #ifdef TARGET_NR_symlink
7152 case TARGET_NR_symlink:
7154 void *p2;
7155 p = lock_user_string(arg1);
7156 p2 = lock_user_string(arg2);
7157 if (!p || !p2)
7158 ret = -TARGET_EFAULT;
7159 else
7160 ret = get_errno(symlink(p, p2));
7161 unlock_user(p2, arg2, 0);
7162 unlock_user(p, arg1, 0);
7164 break;
7165 #endif
7166 #if defined(TARGET_NR_symlinkat)
7167 case TARGET_NR_symlinkat:
7169 void *p2;
7170 p = lock_user_string(arg1);
7171 p2 = lock_user_string(arg3);
7172 if (!p || !p2)
7173 ret = -TARGET_EFAULT;
7174 else
7175 ret = get_errno(symlinkat(p, arg2, p2));
7176 unlock_user(p2, arg3, 0);
7177 unlock_user(p, arg1, 0);
7179 break;
7180 #endif
7181 #ifdef TARGET_NR_oldlstat
7182 case TARGET_NR_oldlstat:
7183 goto unimplemented;
7184 #endif
7185 #ifdef TARGET_NR_readlink
7186 case TARGET_NR_readlink:
7188 void *p2;
7189 p = lock_user_string(arg1);
7190 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
7191 if (!p || !p2) {
7192 ret = -TARGET_EFAULT;
7193 } else if (!arg3) {
7194 /* Short circuit this for the magic exe check. */
7195 ret = -TARGET_EINVAL;
7196 } else if (is_proc_myself((const char *)p, "exe")) {
7197 char real[PATH_MAX], *temp;
7198 temp = realpath(exec_path, real);
7199 /* Return value is # of bytes that we wrote to the buffer. */
7200 if (temp == NULL) {
7201 ret = get_errno(-1);
7202 } else {
7203 /* Don't worry about sign mismatch as earlier mapping
7204 * logic would have thrown a bad address error. */
7205 ret = MIN(strlen(real), arg3);
7206 /* We cannot NUL terminate the string. */
7207 memcpy(p2, real, ret);
7209 } else {
7210 ret = get_errno(readlink(path(p), p2, arg3));
7212 unlock_user(p2, arg2, ret);
7213 unlock_user(p, arg1, 0);
7215 break;
7216 #endif
7217 #if defined(TARGET_NR_readlinkat)
7218 case TARGET_NR_readlinkat:
7220 void *p2;
7221 p = lock_user_string(arg2);
7222 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7223 if (!p || !p2) {
7224 ret = -TARGET_EFAULT;
7225 } else if (is_proc_myself((const char *)p, "exe")) {
7226 char real[PATH_MAX], *temp;
7227 temp = realpath(exec_path, real);
7228 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
7229 snprintf((char *)p2, arg4, "%s", real);
7230 } else {
7231 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
7233 unlock_user(p2, arg3, ret);
7234 unlock_user(p, arg2, 0);
7236 break;
7237 #endif
7238 #ifdef TARGET_NR_uselib
7239 case TARGET_NR_uselib:
7240 goto unimplemented;
7241 #endif
7242 #ifdef TARGET_NR_swapon
7243 case TARGET_NR_swapon:
7244 if (!(p = lock_user_string(arg1)))
7245 goto efault;
7246 ret = get_errno(swapon(p, arg2));
7247 unlock_user(p, arg1, 0);
7248 break;
7249 #endif
7250 case TARGET_NR_reboot:
7251 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
7252 /* arg4 must be ignored in all other cases */
7253 p = lock_user_string(arg4);
7254 if (!p) {
7255 goto efault;
7257 ret = get_errno(reboot(arg1, arg2, arg3, p));
7258 unlock_user(p, arg4, 0);
7259 } else {
7260 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
7262 break;
7263 #ifdef TARGET_NR_readdir
7264 case TARGET_NR_readdir:
7265 goto unimplemented;
7266 #endif
7267 #ifdef TARGET_NR_mmap
7268 case TARGET_NR_mmap:
7269 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7270 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
7271 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
7272 || defined(TARGET_S390X)
7274 abi_ulong *v;
7275 abi_ulong v1, v2, v3, v4, v5, v6;
7276 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
7277 goto efault;
7278 v1 = tswapal(v[0]);
7279 v2 = tswapal(v[1]);
7280 v3 = tswapal(v[2]);
7281 v4 = tswapal(v[3]);
7282 v5 = tswapal(v[4]);
7283 v6 = tswapal(v[5]);
7284 unlock_user(v, arg1, 0);
7285 ret = get_errno(target_mmap(v1, v2, v3,
7286 target_to_host_bitmask(v4, mmap_flags_tbl),
7287 v5, v6));
7289 #else
7290 ret = get_errno(target_mmap(arg1, arg2, arg3,
7291 target_to_host_bitmask(arg4, mmap_flags_tbl),
7292 arg5,
7293 arg6));
7294 #endif
7295 break;
7296 #endif
7297 #ifdef TARGET_NR_mmap2
7298 case TARGET_NR_mmap2:
7299 #ifndef MMAP_SHIFT
7300 #define MMAP_SHIFT 12
7301 #endif
7302 ret = get_errno(target_mmap(arg1, arg2, arg3,
7303 target_to_host_bitmask(arg4, mmap_flags_tbl),
7304 arg5,
7305 arg6 << MMAP_SHIFT));
7306 break;
7307 #endif
7308 case TARGET_NR_munmap:
7309 ret = get_errno(target_munmap(arg1, arg2));
7310 break;
7311 case TARGET_NR_mprotect:
7313 TaskState *ts = cpu->opaque;
7314 /* Special hack to detect libc making the stack executable. */
7315 if ((arg3 & PROT_GROWSDOWN)
7316 && arg1 >= ts->info->stack_limit
7317 && arg1 <= ts->info->start_stack) {
7318 arg3 &= ~PROT_GROWSDOWN;
7319 arg2 = arg2 + arg1 - ts->info->stack_limit;
7320 arg1 = ts->info->stack_limit;
7323 ret = get_errno(target_mprotect(arg1, arg2, arg3));
7324 break;
7325 #ifdef TARGET_NR_mremap
7326 case TARGET_NR_mremap:
7327 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
7328 break;
7329 #endif
7330 /* ??? msync/mlock/munlock are broken for softmmu. */
7331 #ifdef TARGET_NR_msync
7332 case TARGET_NR_msync:
7333 ret = get_errno(msync(g2h(arg1), arg2, arg3));
7334 break;
7335 #endif
7336 #ifdef TARGET_NR_mlock
7337 case TARGET_NR_mlock:
7338 ret = get_errno(mlock(g2h(arg1), arg2));
7339 break;
7340 #endif
7341 #ifdef TARGET_NR_munlock
7342 case TARGET_NR_munlock:
7343 ret = get_errno(munlock(g2h(arg1), arg2));
7344 break;
7345 #endif
7346 #ifdef TARGET_NR_mlockall
7347 case TARGET_NR_mlockall:
7348 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
7349 break;
7350 #endif
7351 #ifdef TARGET_NR_munlockall
7352 case TARGET_NR_munlockall:
7353 ret = get_errno(munlockall());
7354 break;
7355 #endif
7356 case TARGET_NR_truncate:
7357 if (!(p = lock_user_string(arg1)))
7358 goto efault;
7359 ret = get_errno(truncate(p, arg2));
7360 unlock_user(p, arg1, 0);
7361 break;
7362 case TARGET_NR_ftruncate:
7363 ret = get_errno(ftruncate(arg1, arg2));
7364 break;
7365 case TARGET_NR_fchmod:
7366 ret = get_errno(fchmod(arg1, arg2));
7367 break;
7368 #if defined(TARGET_NR_fchmodat)
7369 case TARGET_NR_fchmodat:
7370 if (!(p = lock_user_string(arg2)))
7371 goto efault;
7372 ret = get_errno(fchmodat(arg1, p, arg3, 0));
7373 unlock_user(p, arg2, 0);
7374 break;
7375 #endif
7376 case TARGET_NR_getpriority:
7377 /* Note that negative values are valid for getpriority, so we must
7378 differentiate based on errno settings. */
7379 errno = 0;
7380 ret = getpriority(arg1, arg2);
7381 if (ret == -1 && errno != 0) {
7382 ret = -host_to_target_errno(errno);
7383 break;
7385 #ifdef TARGET_ALPHA
7386 /* Return value is the unbiased priority. Signal no error. */
7387 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
7388 #else
7389 /* Return value is a biased priority to avoid negative numbers. */
7390 ret = 20 - ret;
7391 #endif
7392 break;
7393 case TARGET_NR_setpriority:
7394 ret = get_errno(setpriority(arg1, arg2, arg3));
7395 break;
7396 #ifdef TARGET_NR_profil
7397 case TARGET_NR_profil:
7398 goto unimplemented;
7399 #endif
7400 case TARGET_NR_statfs:
7401 if (!(p = lock_user_string(arg1)))
7402 goto efault;
7403 ret = get_errno(statfs(path(p), &stfs));
7404 unlock_user(p, arg1, 0);
7405 convert_statfs:
7406 if (!is_error(ret)) {
7407 struct target_statfs *target_stfs;
7409 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
7410 goto efault;
7411 __put_user(stfs.f_type, &target_stfs->f_type);
7412 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7413 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7414 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7415 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7416 __put_user(stfs.f_files, &target_stfs->f_files);
7417 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7418 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7419 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7420 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7421 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7422 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7423 unlock_user_struct(target_stfs, arg2, 1);
7425 break;
7426 case TARGET_NR_fstatfs:
7427 ret = get_errno(fstatfs(arg1, &stfs));
7428 goto convert_statfs;
7429 #ifdef TARGET_NR_statfs64
7430 case TARGET_NR_statfs64:
7431 if (!(p = lock_user_string(arg1)))
7432 goto efault;
7433 ret = get_errno(statfs(path(p), &stfs));
7434 unlock_user(p, arg1, 0);
7435 convert_statfs64:
7436 if (!is_error(ret)) {
7437 struct target_statfs64 *target_stfs;
7439 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
7440 goto efault;
7441 __put_user(stfs.f_type, &target_stfs->f_type);
7442 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
7443 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
7444 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
7445 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
7446 __put_user(stfs.f_files, &target_stfs->f_files);
7447 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
7448 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
7449 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
7450 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
7451 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
7452 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
7453 unlock_user_struct(target_stfs, arg3, 1);
7455 break;
7456 case TARGET_NR_fstatfs64:
7457 ret = get_errno(fstatfs(arg1, &stfs));
7458 goto convert_statfs64;
7459 #endif
7460 #ifdef TARGET_NR_ioperm
7461 case TARGET_NR_ioperm:
7462 goto unimplemented;
7463 #endif
7464 #ifdef TARGET_NR_socketcall
7465 case TARGET_NR_socketcall:
7466 ret = do_socketcall(arg1, arg2);
7467 break;
7468 #endif
7469 #ifdef TARGET_NR_accept
7470 case TARGET_NR_accept:
7471 ret = do_accept4(arg1, arg2, arg3, 0);
7472 break;
7473 #endif
7474 #ifdef TARGET_NR_accept4
7475 case TARGET_NR_accept4:
7476 #ifdef CONFIG_ACCEPT4
7477 ret = do_accept4(arg1, arg2, arg3, arg4);
7478 #else
7479 goto unimplemented;
7480 #endif
7481 break;
7482 #endif
7483 #ifdef TARGET_NR_bind
7484 case TARGET_NR_bind:
7485 ret = do_bind(arg1, arg2, arg3);
7486 break;
7487 #endif
7488 #ifdef TARGET_NR_connect
7489 case TARGET_NR_connect:
7490 ret = do_connect(arg1, arg2, arg3);
7491 break;
7492 #endif
7493 #ifdef TARGET_NR_getpeername
7494 case TARGET_NR_getpeername:
7495 ret = do_getpeername(arg1, arg2, arg3);
7496 break;
7497 #endif
7498 #ifdef TARGET_NR_getsockname
7499 case TARGET_NR_getsockname:
7500 ret = do_getsockname(arg1, arg2, arg3);
7501 break;
7502 #endif
7503 #ifdef TARGET_NR_getsockopt
7504 case TARGET_NR_getsockopt:
7505 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
7506 break;
7507 #endif
7508 #ifdef TARGET_NR_listen
7509 case TARGET_NR_listen:
7510 ret = get_errno(listen(arg1, arg2));
7511 break;
7512 #endif
7513 #ifdef TARGET_NR_recv
7514 case TARGET_NR_recv:
7515 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
7516 break;
7517 #endif
7518 #ifdef TARGET_NR_recvfrom
7519 case TARGET_NR_recvfrom:
7520 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
7521 break;
7522 #endif
7523 #ifdef TARGET_NR_recvmsg
7524 case TARGET_NR_recvmsg:
7525 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
7526 break;
7527 #endif
7528 #ifdef TARGET_NR_send
7529 case TARGET_NR_send:
7530 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
7531 break;
7532 #endif
7533 #ifdef TARGET_NR_sendmsg
7534 case TARGET_NR_sendmsg:
7535 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
7536 break;
7537 #endif
7538 #ifdef TARGET_NR_sendmmsg
7539 case TARGET_NR_sendmmsg:
7540 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
7541 break;
7542 case TARGET_NR_recvmmsg:
7543 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
7544 break;
7545 #endif
7546 #ifdef TARGET_NR_sendto
7547 case TARGET_NR_sendto:
7548 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
7549 break;
7550 #endif
7551 #ifdef TARGET_NR_shutdown
7552 case TARGET_NR_shutdown:
7553 ret = get_errno(shutdown(arg1, arg2));
7554 break;
7555 #endif
7556 #ifdef TARGET_NR_socket
7557 case TARGET_NR_socket:
7558 ret = do_socket(arg1, arg2, arg3);
7559 fd_trans_unregister(ret);
7560 break;
7561 #endif
7562 #ifdef TARGET_NR_socketpair
7563 case TARGET_NR_socketpair:
7564 ret = do_socketpair(arg1, arg2, arg3, arg4);
7565 break;
7566 #endif
7567 #ifdef TARGET_NR_setsockopt
7568 case TARGET_NR_setsockopt:
7569 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
7570 break;
7571 #endif
7573 case TARGET_NR_syslog:
7574 if (!(p = lock_user_string(arg2)))
7575 goto efault;
7576 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
7577 unlock_user(p, arg2, 0);
7578 break;
7580 case TARGET_NR_setitimer:
7582 struct itimerval value, ovalue, *pvalue;
7584 if (arg2) {
7585 pvalue = &value;
7586 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
7587 || copy_from_user_timeval(&pvalue->it_value,
7588 arg2 + sizeof(struct target_timeval)))
7589 goto efault;
7590 } else {
7591 pvalue = NULL;
7593 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
7594 if (!is_error(ret) && arg3) {
7595 if (copy_to_user_timeval(arg3,
7596 &ovalue.it_interval)
7597 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
7598 &ovalue.it_value))
7599 goto efault;
7602 break;
7603 case TARGET_NR_getitimer:
7605 struct itimerval value;
7607 ret = get_errno(getitimer(arg1, &value));
7608 if (!is_error(ret) && arg2) {
7609 if (copy_to_user_timeval(arg2,
7610 &value.it_interval)
7611 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
7612 &value.it_value))
7613 goto efault;
7616 break;
7617 #ifdef TARGET_NR_stat
7618 case TARGET_NR_stat:
7619 if (!(p = lock_user_string(arg1)))
7620 goto efault;
7621 ret = get_errno(stat(path(p), &st));
7622 unlock_user(p, arg1, 0);
7623 goto do_stat;
7624 #endif
7625 #ifdef TARGET_NR_lstat
7626 case TARGET_NR_lstat:
7627 if (!(p = lock_user_string(arg1)))
7628 goto efault;
7629 ret = get_errno(lstat(path(p), &st));
7630 unlock_user(p, arg1, 0);
7631 goto do_stat;
7632 #endif
7633 case TARGET_NR_fstat:
7635 ret = get_errno(fstat(arg1, &st));
7636 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
7637 do_stat:
7638 #endif
7639 if (!is_error(ret)) {
7640 struct target_stat *target_st;
7642 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
7643 goto efault;
7644 memset(target_st, 0, sizeof(*target_st));
7645 __put_user(st.st_dev, &target_st->st_dev);
7646 __put_user(st.st_ino, &target_st->st_ino);
7647 __put_user(st.st_mode, &target_st->st_mode);
7648 __put_user(st.st_uid, &target_st->st_uid);
7649 __put_user(st.st_gid, &target_st->st_gid);
7650 __put_user(st.st_nlink, &target_st->st_nlink);
7651 __put_user(st.st_rdev, &target_st->st_rdev);
7652 __put_user(st.st_size, &target_st->st_size);
7653 __put_user(st.st_blksize, &target_st->st_blksize);
7654 __put_user(st.st_blocks, &target_st->st_blocks);
7655 __put_user(st.st_atime, &target_st->target_st_atime);
7656 __put_user(st.st_mtime, &target_st->target_st_mtime);
7657 __put_user(st.st_ctime, &target_st->target_st_ctime);
7658 unlock_user_struct(target_st, arg2, 1);
7661 break;
7662 #ifdef TARGET_NR_olduname
7663 case TARGET_NR_olduname:
7664 goto unimplemented;
7665 #endif
7666 #ifdef TARGET_NR_iopl
7667 case TARGET_NR_iopl:
7668 goto unimplemented;
7669 #endif
7670 case TARGET_NR_vhangup:
7671 ret = get_errno(vhangup());
7672 break;
7673 #ifdef TARGET_NR_idle
7674 case TARGET_NR_idle:
7675 goto unimplemented;
7676 #endif
7677 #ifdef TARGET_NR_syscall
7678 case TARGET_NR_syscall:
7679 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
7680 arg6, arg7, arg8, 0);
7681 break;
7682 #endif
7683 case TARGET_NR_wait4:
7685 int status;
7686 abi_long status_ptr = arg2;
7687 struct rusage rusage, *rusage_ptr;
7688 abi_ulong target_rusage = arg4;
7689 abi_long rusage_err;
7690 if (target_rusage)
7691 rusage_ptr = &rusage;
7692 else
7693 rusage_ptr = NULL;
7694 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
7695 if (!is_error(ret)) {
7696 if (status_ptr && ret) {
7697 status = host_to_target_waitstatus(status);
7698 if (put_user_s32(status, status_ptr))
7699 goto efault;
7701 if (target_rusage) {
7702 rusage_err = host_to_target_rusage(target_rusage, &rusage);
7703 if (rusage_err) {
7704 ret = rusage_err;
7709 break;
7710 #ifdef TARGET_NR_swapoff
7711 case TARGET_NR_swapoff:
7712 if (!(p = lock_user_string(arg1)))
7713 goto efault;
7714 ret = get_errno(swapoff(p));
7715 unlock_user(p, arg1, 0);
7716 break;
7717 #endif
7718 case TARGET_NR_sysinfo:
7720 struct target_sysinfo *target_value;
7721 struct sysinfo value;
7722 ret = get_errno(sysinfo(&value));
7723 if (!is_error(ret) && arg1)
7725 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7726 goto efault;
7727 __put_user(value.uptime, &target_value->uptime);
7728 __put_user(value.loads[0], &target_value->loads[0]);
7729 __put_user(value.loads[1], &target_value->loads[1]);
7730 __put_user(value.loads[2], &target_value->loads[2]);
7731 __put_user(value.totalram, &target_value->totalram);
7732 __put_user(value.freeram, &target_value->freeram);
7733 __put_user(value.sharedram, &target_value->sharedram);
7734 __put_user(value.bufferram, &target_value->bufferram);
7735 __put_user(value.totalswap, &target_value->totalswap);
7736 __put_user(value.freeswap, &target_value->freeswap);
7737 __put_user(value.procs, &target_value->procs);
7738 __put_user(value.totalhigh, &target_value->totalhigh);
7739 __put_user(value.freehigh, &target_value->freehigh);
7740 __put_user(value.mem_unit, &target_value->mem_unit);
7741 unlock_user_struct(target_value, arg1, 1);
7744 break;
7745 #ifdef TARGET_NR_ipc
7746 case TARGET_NR_ipc:
7747 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7748 break;
7749 #endif
7750 #ifdef TARGET_NR_semget
7751 case TARGET_NR_semget:
7752 ret = get_errno(semget(arg1, arg2, arg3));
7753 break;
7754 #endif
7755 #ifdef TARGET_NR_semop
7756 case TARGET_NR_semop:
7757 ret = do_semop(arg1, arg2, arg3);
7758 break;
7759 #endif
7760 #ifdef TARGET_NR_semctl
7761 case TARGET_NR_semctl:
7762 ret = do_semctl(arg1, arg2, arg3, arg4);
7763 break;
7764 #endif
7765 #ifdef TARGET_NR_msgctl
7766 case TARGET_NR_msgctl:
7767 ret = do_msgctl(arg1, arg2, arg3);
7768 break;
7769 #endif
7770 #ifdef TARGET_NR_msgget
7771 case TARGET_NR_msgget:
7772 ret = get_errno(msgget(arg1, arg2));
7773 break;
7774 #endif
7775 #ifdef TARGET_NR_msgrcv
7776 case TARGET_NR_msgrcv:
7777 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7778 break;
7779 #endif
7780 #ifdef TARGET_NR_msgsnd
7781 case TARGET_NR_msgsnd:
7782 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7783 break;
7784 #endif
7785 #ifdef TARGET_NR_shmget
7786 case TARGET_NR_shmget:
7787 ret = get_errno(shmget(arg1, arg2, arg3));
7788 break;
7789 #endif
7790 #ifdef TARGET_NR_shmctl
7791 case TARGET_NR_shmctl:
7792 ret = do_shmctl(arg1, arg2, arg3);
7793 break;
7794 #endif
7795 #ifdef TARGET_NR_shmat
7796 case TARGET_NR_shmat:
7797 ret = do_shmat(arg1, arg2, arg3);
7798 break;
7799 #endif
7800 #ifdef TARGET_NR_shmdt
7801 case TARGET_NR_shmdt:
7802 ret = do_shmdt(arg1);
7803 break;
7804 #endif
7805 case TARGET_NR_fsync:
7806 ret = get_errno(fsync(arg1));
7807 break;
7808 case TARGET_NR_clone:
7809 /* Linux manages to have three different orderings for its
7810 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7811 * match the kernel's CONFIG_CLONE_* settings.
7812 * Microblaze is further special in that it uses a sixth
7813 * implicit argument to clone for the TLS pointer.
7815 #if defined(TARGET_MICROBLAZE)
7816 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7817 #elif defined(TARGET_CLONE_BACKWARDS)
7818 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7819 #elif defined(TARGET_CLONE_BACKWARDS2)
7820 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7821 #else
7822 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7823 #endif
7824 break;
7825 #ifdef __NR_exit_group
7826 /* new thread calls */
7827 case TARGET_NR_exit_group:
7828 #ifdef TARGET_GPROF
7829 _mcleanup();
7830 #endif
7831 gdb_exit(cpu_env, arg1);
7832 ret = get_errno(exit_group(arg1));
7833 break;
7834 #endif
7835 case TARGET_NR_setdomainname:
7836 if (!(p = lock_user_string(arg1)))
7837 goto efault;
7838 ret = get_errno(setdomainname(p, arg2));
7839 unlock_user(p, arg1, 0);
7840 break;
7841 case TARGET_NR_uname:
7842 /* no need to transcode because we use the linux syscall */
7844 struct new_utsname * buf;
7846 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7847 goto efault;
7848 ret = get_errno(sys_uname(buf));
7849 if (!is_error(ret)) {
7850 /* Overrite the native machine name with whatever is being
7851 emulated. */
7852 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7853 /* Allow the user to override the reported release. */
7854 if (qemu_uname_release && *qemu_uname_release)
7855 strcpy (buf->release, qemu_uname_release);
7857 unlock_user_struct(buf, arg1, 1);
7859 break;
7860 #ifdef TARGET_I386
7861 case TARGET_NR_modify_ldt:
7862 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7863 break;
7864 #if !defined(TARGET_X86_64)
7865 case TARGET_NR_vm86old:
7866 goto unimplemented;
7867 case TARGET_NR_vm86:
7868 ret = do_vm86(cpu_env, arg1, arg2);
7869 break;
7870 #endif
7871 #endif
7872 case TARGET_NR_adjtimex:
7873 goto unimplemented;
7874 #ifdef TARGET_NR_create_module
7875 case TARGET_NR_create_module:
7876 #endif
7877 case TARGET_NR_init_module:
7878 case TARGET_NR_delete_module:
7879 #ifdef TARGET_NR_get_kernel_syms
7880 case TARGET_NR_get_kernel_syms:
7881 #endif
7882 goto unimplemented;
7883 case TARGET_NR_quotactl:
7884 goto unimplemented;
7885 case TARGET_NR_getpgid:
7886 ret = get_errno(getpgid(arg1));
7887 break;
7888 case TARGET_NR_fchdir:
7889 ret = get_errno(fchdir(arg1));
7890 break;
7891 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7892 case TARGET_NR_bdflush:
7893 goto unimplemented;
7894 #endif
7895 #ifdef TARGET_NR_sysfs
7896 case TARGET_NR_sysfs:
7897 goto unimplemented;
7898 #endif
7899 case TARGET_NR_personality:
7900 ret = get_errno(personality(arg1));
7901 break;
7902 #ifdef TARGET_NR_afs_syscall
7903 case TARGET_NR_afs_syscall:
7904 goto unimplemented;
7905 #endif
7906 #ifdef TARGET_NR__llseek /* Not on alpha */
7907 case TARGET_NR__llseek:
7909 int64_t res;
7910 #if !defined(__NR_llseek)
7911 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7912 if (res == -1) {
7913 ret = get_errno(res);
7914 } else {
7915 ret = 0;
7917 #else
7918 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7919 #endif
7920 if ((ret == 0) && put_user_s64(res, arg4)) {
7921 goto efault;
7924 break;
7925 #endif
7926 #ifdef TARGET_NR_getdents
7927 case TARGET_NR_getdents:
7928 #ifdef __NR_getdents
7929 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7931 struct target_dirent *target_dirp;
7932 struct linux_dirent *dirp;
7933 abi_long count = arg3;
7935 dirp = g_try_malloc(count);
7936 if (!dirp) {
7937 ret = -TARGET_ENOMEM;
7938 goto fail;
7941 ret = get_errno(sys_getdents(arg1, dirp, count));
7942 if (!is_error(ret)) {
7943 struct linux_dirent *de;
7944 struct target_dirent *tde;
7945 int len = ret;
7946 int reclen, treclen;
7947 int count1, tnamelen;
7949 count1 = 0;
7950 de = dirp;
7951 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7952 goto efault;
7953 tde = target_dirp;
7954 while (len > 0) {
7955 reclen = de->d_reclen;
7956 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7957 assert(tnamelen >= 0);
7958 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7959 assert(count1 + treclen <= count);
7960 tde->d_reclen = tswap16(treclen);
7961 tde->d_ino = tswapal(de->d_ino);
7962 tde->d_off = tswapal(de->d_off);
7963 memcpy(tde->d_name, de->d_name, tnamelen);
7964 de = (struct linux_dirent *)((char *)de + reclen);
7965 len -= reclen;
7966 tde = (struct target_dirent *)((char *)tde + treclen);
7967 count1 += treclen;
7969 ret = count1;
7970 unlock_user(target_dirp, arg2, ret);
7972 g_free(dirp);
7974 #else
7976 struct linux_dirent *dirp;
7977 abi_long count = arg3;
7979 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7980 goto efault;
7981 ret = get_errno(sys_getdents(arg1, dirp, count));
7982 if (!is_error(ret)) {
7983 struct linux_dirent *de;
7984 int len = ret;
7985 int reclen;
7986 de = dirp;
7987 while (len > 0) {
7988 reclen = de->d_reclen;
7989 if (reclen > len)
7990 break;
7991 de->d_reclen = tswap16(reclen);
7992 tswapls(&de->d_ino);
7993 tswapls(&de->d_off);
7994 de = (struct linux_dirent *)((char *)de + reclen);
7995 len -= reclen;
7998 unlock_user(dirp, arg2, ret);
8000 #endif
8001 #else
8002 /* Implement getdents in terms of getdents64 */
8004 struct linux_dirent64 *dirp;
8005 abi_long count = arg3;
8007 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8008 if (!dirp) {
8009 goto efault;
8011 ret = get_errno(sys_getdents64(arg1, dirp, count));
8012 if (!is_error(ret)) {
8013 /* Convert the dirent64 structs to target dirent. We do this
8014 * in-place, since we can guarantee that a target_dirent is no
8015 * larger than a dirent64; however this means we have to be
8016 * careful to read everything before writing in the new format.
8018 struct linux_dirent64 *de;
8019 struct target_dirent *tde;
8020 int len = ret;
8021 int tlen = 0;
8023 de = dirp;
8024 tde = (struct target_dirent *)dirp;
8025 while (len > 0) {
8026 int namelen, treclen;
8027 int reclen = de->d_reclen;
8028 uint64_t ino = de->d_ino;
8029 int64_t off = de->d_off;
8030 uint8_t type = de->d_type;
8032 namelen = strlen(de->d_name);
8033 treclen = offsetof(struct target_dirent, d_name)
8034 + namelen + 2;
8035 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
8037 memmove(tde->d_name, de->d_name, namelen + 1);
8038 tde->d_ino = tswapal(ino);
8039 tde->d_off = tswapal(off);
8040 tde->d_reclen = tswap16(treclen);
8041 /* The target_dirent type is in what was formerly a padding
8042 * byte at the end of the structure:
8044 *(((char *)tde) + treclen - 1) = type;
8046 de = (struct linux_dirent64 *)((char *)de + reclen);
8047 tde = (struct target_dirent *)((char *)tde + treclen);
8048 len -= reclen;
8049 tlen += treclen;
8051 ret = tlen;
8053 unlock_user(dirp, arg2, ret);
8055 #endif
8056 break;
8057 #endif /* TARGET_NR_getdents */
8058 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8059 case TARGET_NR_getdents64:
8061 struct linux_dirent64 *dirp;
8062 abi_long count = arg3;
8063 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
8064 goto efault;
8065 ret = get_errno(sys_getdents64(arg1, dirp, count));
8066 if (!is_error(ret)) {
8067 struct linux_dirent64 *de;
8068 int len = ret;
8069 int reclen;
8070 de = dirp;
8071 while (len > 0) {
8072 reclen = de->d_reclen;
8073 if (reclen > len)
8074 break;
8075 de->d_reclen = tswap16(reclen);
8076 tswap64s((uint64_t *)&de->d_ino);
8077 tswap64s((uint64_t *)&de->d_off);
8078 de = (struct linux_dirent64 *)((char *)de + reclen);
8079 len -= reclen;
8082 unlock_user(dirp, arg2, ret);
8084 break;
8085 #endif /* TARGET_NR_getdents64 */
8086 #if defined(TARGET_NR__newselect)
8087 case TARGET_NR__newselect:
8088 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8089 break;
8090 #endif
8091 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
8092 # ifdef TARGET_NR_poll
8093 case TARGET_NR_poll:
8094 # endif
8095 # ifdef TARGET_NR_ppoll
8096 case TARGET_NR_ppoll:
8097 # endif
8099 struct target_pollfd *target_pfd;
8100 unsigned int nfds = arg2;
8101 int timeout = arg3;
8102 struct pollfd *pfd;
8103 unsigned int i;
8105 pfd = NULL;
8106 target_pfd = NULL;
8107 if (nfds) {
8108 target_pfd = lock_user(VERIFY_WRITE, arg1,
8109 sizeof(struct target_pollfd) * nfds, 1);
8110 if (!target_pfd) {
8111 goto efault;
8114 pfd = alloca(sizeof(struct pollfd) * nfds);
8115 for (i = 0; i < nfds; i++) {
8116 pfd[i].fd = tswap32(target_pfd[i].fd);
8117 pfd[i].events = tswap16(target_pfd[i].events);
8121 # ifdef TARGET_NR_ppoll
8122 if (num == TARGET_NR_ppoll) {
8123 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
8124 target_sigset_t *target_set;
8125 sigset_t _set, *set = &_set;
8127 if (arg3) {
8128 if (target_to_host_timespec(timeout_ts, arg3)) {
8129 unlock_user(target_pfd, arg1, 0);
8130 goto efault;
8132 } else {
8133 timeout_ts = NULL;
8136 if (arg4) {
8137 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
8138 if (!target_set) {
8139 unlock_user(target_pfd, arg1, 0);
8140 goto efault;
8142 target_to_host_sigset(set, target_set);
8143 } else {
8144 set = NULL;
8147 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
8149 if (!is_error(ret) && arg3) {
8150 host_to_target_timespec(arg3, timeout_ts);
8152 if (arg4) {
8153 unlock_user(target_set, arg4, 0);
8155 } else
8156 # endif
8157 ret = get_errno(poll(pfd, nfds, timeout));
8159 if (!is_error(ret)) {
8160 for(i = 0; i < nfds; i++) {
8161 target_pfd[i].revents = tswap16(pfd[i].revents);
8164 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
8166 break;
8167 #endif
8168 case TARGET_NR_flock:
8169 /* NOTE: the flock constant seems to be the same for every
8170 Linux platform */
8171 ret = get_errno(flock(arg1, arg2));
8172 break;
8173 case TARGET_NR_readv:
8175 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
8176 if (vec != NULL) {
8177 ret = get_errno(readv(arg1, vec, arg3));
8178 unlock_iovec(vec, arg2, arg3, 1);
8179 } else {
8180 ret = -host_to_target_errno(errno);
8183 break;
8184 case TARGET_NR_writev:
8186 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
8187 if (vec != NULL) {
8188 ret = get_errno(writev(arg1, vec, arg3));
8189 unlock_iovec(vec, arg2, arg3, 0);
8190 } else {
8191 ret = -host_to_target_errno(errno);
8194 break;
8195 case TARGET_NR_getsid:
8196 ret = get_errno(getsid(arg1));
8197 break;
8198 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
8199 case TARGET_NR_fdatasync:
8200 ret = get_errno(fdatasync(arg1));
8201 break;
8202 #endif
8203 #ifdef TARGET_NR__sysctl
8204 case TARGET_NR__sysctl:
8205 /* We don't implement this, but ENOTDIR is always a safe
8206 return value. */
8207 ret = -TARGET_ENOTDIR;
8208 break;
8209 #endif
8210 case TARGET_NR_sched_getaffinity:
8212 unsigned int mask_size;
8213 unsigned long *mask;
8216 * sched_getaffinity needs multiples of ulong, so need to take
8217 * care of mismatches between target ulong and host ulong sizes.
8219 if (arg2 & (sizeof(abi_ulong) - 1)) {
8220 ret = -TARGET_EINVAL;
8221 break;
8223 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8225 mask = alloca(mask_size);
8226 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
8228 if (!is_error(ret)) {
8229 if (ret > arg2) {
8230 /* More data returned than the caller's buffer will fit.
8231 * This only happens if sizeof(abi_long) < sizeof(long)
8232 * and the caller passed us a buffer holding an odd number
8233 * of abi_longs. If the host kernel is actually using the
8234 * extra 4 bytes then fail EINVAL; otherwise we can just
8235 * ignore them and only copy the interesting part.
8237 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
8238 if (numcpus > arg2 * 8) {
8239 ret = -TARGET_EINVAL;
8240 break;
8242 ret = arg2;
8245 if (copy_to_user(arg3, mask, ret)) {
8246 goto efault;
8250 break;
8251 case TARGET_NR_sched_setaffinity:
8253 unsigned int mask_size;
8254 unsigned long *mask;
8257 * sched_setaffinity needs multiples of ulong, so need to take
8258 * care of mismatches between target ulong and host ulong sizes.
8260 if (arg2 & (sizeof(abi_ulong) - 1)) {
8261 ret = -TARGET_EINVAL;
8262 break;
8264 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
8266 mask = alloca(mask_size);
8267 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
8268 goto efault;
8270 memcpy(mask, p, arg2);
8271 unlock_user_struct(p, arg2, 0);
8273 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
8275 break;
8276 case TARGET_NR_sched_setparam:
8278 struct sched_param *target_schp;
8279 struct sched_param schp;
8281 if (arg2 == 0) {
8282 return -TARGET_EINVAL;
8284 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
8285 goto efault;
8286 schp.sched_priority = tswap32(target_schp->sched_priority);
8287 unlock_user_struct(target_schp, arg2, 0);
8288 ret = get_errno(sched_setparam(arg1, &schp));
8290 break;
8291 case TARGET_NR_sched_getparam:
8293 struct sched_param *target_schp;
8294 struct sched_param schp;
8296 if (arg2 == 0) {
8297 return -TARGET_EINVAL;
8299 ret = get_errno(sched_getparam(arg1, &schp));
8300 if (!is_error(ret)) {
8301 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
8302 goto efault;
8303 target_schp->sched_priority = tswap32(schp.sched_priority);
8304 unlock_user_struct(target_schp, arg2, 1);
8307 break;
8308 case TARGET_NR_sched_setscheduler:
8310 struct sched_param *target_schp;
8311 struct sched_param schp;
8312 if (arg3 == 0) {
8313 return -TARGET_EINVAL;
8315 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
8316 goto efault;
8317 schp.sched_priority = tswap32(target_schp->sched_priority);
8318 unlock_user_struct(target_schp, arg3, 0);
8319 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
8321 break;
8322 case TARGET_NR_sched_getscheduler:
8323 ret = get_errno(sched_getscheduler(arg1));
8324 break;
8325 case TARGET_NR_sched_yield:
8326 ret = get_errno(sched_yield());
8327 break;
8328 case TARGET_NR_sched_get_priority_max:
8329 ret = get_errno(sched_get_priority_max(arg1));
8330 break;
8331 case TARGET_NR_sched_get_priority_min:
8332 ret = get_errno(sched_get_priority_min(arg1));
8333 break;
8334 case TARGET_NR_sched_rr_get_interval:
8336 struct timespec ts;
8337 ret = get_errno(sched_rr_get_interval(arg1, &ts));
8338 if (!is_error(ret)) {
8339 ret = host_to_target_timespec(arg2, &ts);
8342 break;
8343 case TARGET_NR_nanosleep:
8345 struct timespec req, rem;
8346 target_to_host_timespec(&req, arg1);
8347 ret = get_errno(nanosleep(&req, &rem));
8348 if (is_error(ret) && arg2) {
8349 host_to_target_timespec(arg2, &rem);
8352 break;
8353 #ifdef TARGET_NR_query_module
8354 case TARGET_NR_query_module:
8355 goto unimplemented;
8356 #endif
8357 #ifdef TARGET_NR_nfsservctl
8358 case TARGET_NR_nfsservctl:
8359 goto unimplemented;
8360 #endif
8361 case TARGET_NR_prctl:
8362 switch (arg1) {
8363 case PR_GET_PDEATHSIG:
8365 int deathsig;
8366 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
8367 if (!is_error(ret) && arg2
8368 && put_user_ual(deathsig, arg2)) {
8369 goto efault;
8371 break;
8373 #ifdef PR_GET_NAME
8374 case PR_GET_NAME:
8376 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
8377 if (!name) {
8378 goto efault;
8380 ret = get_errno(prctl(arg1, (unsigned long)name,
8381 arg3, arg4, arg5));
8382 unlock_user(name, arg2, 16);
8383 break;
8385 case PR_SET_NAME:
8387 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
8388 if (!name) {
8389 goto efault;
8391 ret = get_errno(prctl(arg1, (unsigned long)name,
8392 arg3, arg4, arg5));
8393 unlock_user(name, arg2, 0);
8394 break;
8396 #endif
8397 default:
8398 /* Most prctl options have no pointer arguments */
8399 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
8400 break;
8402 break;
8403 #ifdef TARGET_NR_arch_prctl
8404 case TARGET_NR_arch_prctl:
8405 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
8406 ret = do_arch_prctl(cpu_env, arg1, arg2);
8407 break;
8408 #else
8409 goto unimplemented;
8410 #endif
8411 #endif
8412 #ifdef TARGET_NR_pread64
8413 case TARGET_NR_pread64:
8414 if (regpairs_aligned(cpu_env)) {
8415 arg4 = arg5;
8416 arg5 = arg6;
8418 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8419 goto efault;
8420 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
8421 unlock_user(p, arg2, ret);
8422 break;
8423 case TARGET_NR_pwrite64:
8424 if (regpairs_aligned(cpu_env)) {
8425 arg4 = arg5;
8426 arg5 = arg6;
8428 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8429 goto efault;
8430 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
8431 unlock_user(p, arg2, 0);
8432 break;
8433 #endif
8434 case TARGET_NR_getcwd:
8435 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
8436 goto efault;
8437 ret = get_errno(sys_getcwd1(p, arg2));
8438 unlock_user(p, arg1, ret);
8439 break;
8440 case TARGET_NR_capget:
8441 case TARGET_NR_capset:
8443 struct target_user_cap_header *target_header;
8444 struct target_user_cap_data *target_data = NULL;
8445 struct __user_cap_header_struct header;
8446 struct __user_cap_data_struct data[2];
8447 struct __user_cap_data_struct *dataptr = NULL;
8448 int i, target_datalen;
8449 int data_items = 1;
8451 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
8452 goto efault;
8454 header.version = tswap32(target_header->version);
8455 header.pid = tswap32(target_header->pid);
8457 if (header.version != _LINUX_CAPABILITY_VERSION) {
8458 /* Version 2 and up takes pointer to two user_data structs */
8459 data_items = 2;
8462 target_datalen = sizeof(*target_data) * data_items;
8464 if (arg2) {
8465 if (num == TARGET_NR_capget) {
8466 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
8467 } else {
8468 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
8470 if (!target_data) {
8471 unlock_user_struct(target_header, arg1, 0);
8472 goto efault;
8475 if (num == TARGET_NR_capset) {
8476 for (i = 0; i < data_items; i++) {
8477 data[i].effective = tswap32(target_data[i].effective);
8478 data[i].permitted = tswap32(target_data[i].permitted);
8479 data[i].inheritable = tswap32(target_data[i].inheritable);
8483 dataptr = data;
8486 if (num == TARGET_NR_capget) {
8487 ret = get_errno(capget(&header, dataptr));
8488 } else {
8489 ret = get_errno(capset(&header, dataptr));
8492 /* The kernel always updates version for both capget and capset */
8493 target_header->version = tswap32(header.version);
8494 unlock_user_struct(target_header, arg1, 1);
8496 if (arg2) {
8497 if (num == TARGET_NR_capget) {
8498 for (i = 0; i < data_items; i++) {
8499 target_data[i].effective = tswap32(data[i].effective);
8500 target_data[i].permitted = tswap32(data[i].permitted);
8501 target_data[i].inheritable = tswap32(data[i].inheritable);
8503 unlock_user(target_data, arg2, target_datalen);
8504 } else {
8505 unlock_user(target_data, arg2, 0);
8508 break;
8510 case TARGET_NR_sigaltstack:
8511 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
8512 break;
8514 #ifdef CONFIG_SENDFILE
8515 case TARGET_NR_sendfile:
8517 off_t *offp = NULL;
8518 off_t off;
8519 if (arg3) {
8520 ret = get_user_sal(off, arg3);
8521 if (is_error(ret)) {
8522 break;
8524 offp = &off;
8526 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8527 if (!is_error(ret) && arg3) {
8528 abi_long ret2 = put_user_sal(off, arg3);
8529 if (is_error(ret2)) {
8530 ret = ret2;
8533 break;
8535 #ifdef TARGET_NR_sendfile64
8536 case TARGET_NR_sendfile64:
8538 off_t *offp = NULL;
8539 off_t off;
8540 if (arg3) {
8541 ret = get_user_s64(off, arg3);
8542 if (is_error(ret)) {
8543 break;
8545 offp = &off;
8547 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
8548 if (!is_error(ret) && arg3) {
8549 abi_long ret2 = put_user_s64(off, arg3);
8550 if (is_error(ret2)) {
8551 ret = ret2;
8554 break;
8556 #endif
8557 #else
8558 case TARGET_NR_sendfile:
8559 #ifdef TARGET_NR_sendfile64
8560 case TARGET_NR_sendfile64:
8561 #endif
8562 goto unimplemented;
8563 #endif
8565 #ifdef TARGET_NR_getpmsg
8566 case TARGET_NR_getpmsg:
8567 goto unimplemented;
8568 #endif
8569 #ifdef TARGET_NR_putpmsg
8570 case TARGET_NR_putpmsg:
8571 goto unimplemented;
8572 #endif
8573 #ifdef TARGET_NR_vfork
8574 case TARGET_NR_vfork:
8575 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
8576 0, 0, 0, 0));
8577 break;
8578 #endif
8579 #ifdef TARGET_NR_ugetrlimit
8580 case TARGET_NR_ugetrlimit:
8582 struct rlimit rlim;
8583 int resource = target_to_host_resource(arg1);
8584 ret = get_errno(getrlimit(resource, &rlim));
8585 if (!is_error(ret)) {
8586 struct target_rlimit *target_rlim;
8587 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8588 goto efault;
8589 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8590 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8591 unlock_user_struct(target_rlim, arg2, 1);
8593 break;
8595 #endif
8596 #ifdef TARGET_NR_truncate64
8597 case TARGET_NR_truncate64:
8598 if (!(p = lock_user_string(arg1)))
8599 goto efault;
8600 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
8601 unlock_user(p, arg1, 0);
8602 break;
8603 #endif
8604 #ifdef TARGET_NR_ftruncate64
8605 case TARGET_NR_ftruncate64:
8606 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
8607 break;
8608 #endif
8609 #ifdef TARGET_NR_stat64
8610 case TARGET_NR_stat64:
8611 if (!(p = lock_user_string(arg1)))
8612 goto efault;
8613 ret = get_errno(stat(path(p), &st));
8614 unlock_user(p, arg1, 0);
8615 if (!is_error(ret))
8616 ret = host_to_target_stat64(cpu_env, arg2, &st);
8617 break;
8618 #endif
8619 #ifdef TARGET_NR_lstat64
8620 case TARGET_NR_lstat64:
8621 if (!(p = lock_user_string(arg1)))
8622 goto efault;
8623 ret = get_errno(lstat(path(p), &st));
8624 unlock_user(p, arg1, 0);
8625 if (!is_error(ret))
8626 ret = host_to_target_stat64(cpu_env, arg2, &st);
8627 break;
8628 #endif
8629 #ifdef TARGET_NR_fstat64
8630 case TARGET_NR_fstat64:
8631 ret = get_errno(fstat(arg1, &st));
8632 if (!is_error(ret))
8633 ret = host_to_target_stat64(cpu_env, arg2, &st);
8634 break;
8635 #endif
8636 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
8637 #ifdef TARGET_NR_fstatat64
8638 case TARGET_NR_fstatat64:
8639 #endif
8640 #ifdef TARGET_NR_newfstatat
8641 case TARGET_NR_newfstatat:
8642 #endif
8643 if (!(p = lock_user_string(arg2)))
8644 goto efault;
8645 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
8646 if (!is_error(ret))
8647 ret = host_to_target_stat64(cpu_env, arg3, &st);
8648 break;
8649 #endif
8650 #ifdef TARGET_NR_lchown
8651 case TARGET_NR_lchown:
8652 if (!(p = lock_user_string(arg1)))
8653 goto efault;
8654 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
8655 unlock_user(p, arg1, 0);
8656 break;
8657 #endif
8658 #ifdef TARGET_NR_getuid
8659 case TARGET_NR_getuid:
8660 ret = get_errno(high2lowuid(getuid()));
8661 break;
8662 #endif
8663 #ifdef TARGET_NR_getgid
8664 case TARGET_NR_getgid:
8665 ret = get_errno(high2lowgid(getgid()));
8666 break;
8667 #endif
8668 #ifdef TARGET_NR_geteuid
8669 case TARGET_NR_geteuid:
8670 ret = get_errno(high2lowuid(geteuid()));
8671 break;
8672 #endif
8673 #ifdef TARGET_NR_getegid
8674 case TARGET_NR_getegid:
8675 ret = get_errno(high2lowgid(getegid()));
8676 break;
8677 #endif
8678 case TARGET_NR_setreuid:
8679 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
8680 break;
8681 case TARGET_NR_setregid:
8682 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
8683 break;
8684 case TARGET_NR_getgroups:
8686 int gidsetsize = arg1;
8687 target_id *target_grouplist;
8688 gid_t *grouplist;
8689 int i;
8691 grouplist = alloca(gidsetsize * sizeof(gid_t));
8692 ret = get_errno(getgroups(gidsetsize, grouplist));
8693 if (gidsetsize == 0)
8694 break;
8695 if (!is_error(ret)) {
8696 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
8697 if (!target_grouplist)
8698 goto efault;
8699 for(i = 0;i < ret; i++)
8700 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
8701 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
8704 break;
8705 case TARGET_NR_setgroups:
8707 int gidsetsize = arg1;
8708 target_id *target_grouplist;
8709 gid_t *grouplist = NULL;
8710 int i;
8711 if (gidsetsize) {
8712 grouplist = alloca(gidsetsize * sizeof(gid_t));
8713 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
8714 if (!target_grouplist) {
8715 ret = -TARGET_EFAULT;
8716 goto fail;
8718 for (i = 0; i < gidsetsize; i++) {
8719 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
8721 unlock_user(target_grouplist, arg2, 0);
8723 ret = get_errno(setgroups(gidsetsize, grouplist));
8725 break;
8726 case TARGET_NR_fchown:
8727 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
8728 break;
8729 #if defined(TARGET_NR_fchownat)
8730 case TARGET_NR_fchownat:
8731 if (!(p = lock_user_string(arg2)))
8732 goto efault;
8733 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
8734 low2highgid(arg4), arg5));
8735 unlock_user(p, arg2, 0);
8736 break;
8737 #endif
8738 #ifdef TARGET_NR_setresuid
8739 case TARGET_NR_setresuid:
8740 ret = get_errno(setresuid(low2highuid(arg1),
8741 low2highuid(arg2),
8742 low2highuid(arg3)));
8743 break;
8744 #endif
8745 #ifdef TARGET_NR_getresuid
8746 case TARGET_NR_getresuid:
8748 uid_t ruid, euid, suid;
8749 ret = get_errno(getresuid(&ruid, &euid, &suid));
8750 if (!is_error(ret)) {
8751 if (put_user_id(high2lowuid(ruid), arg1)
8752 || put_user_id(high2lowuid(euid), arg2)
8753 || put_user_id(high2lowuid(suid), arg3))
8754 goto efault;
8757 break;
8758 #endif
8759 #ifdef TARGET_NR_getresgid
8760 case TARGET_NR_setresgid:
8761 ret = get_errno(setresgid(low2highgid(arg1),
8762 low2highgid(arg2),
8763 low2highgid(arg3)));
8764 break;
8765 #endif
8766 #ifdef TARGET_NR_getresgid
8767 case TARGET_NR_getresgid:
8769 gid_t rgid, egid, sgid;
8770 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8771 if (!is_error(ret)) {
8772 if (put_user_id(high2lowgid(rgid), arg1)
8773 || put_user_id(high2lowgid(egid), arg2)
8774 || put_user_id(high2lowgid(sgid), arg3))
8775 goto efault;
8778 break;
8779 #endif
8780 #ifdef TARGET_NR_chown
8781 case TARGET_NR_chown:
8782 if (!(p = lock_user_string(arg1)))
8783 goto efault;
8784 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8785 unlock_user(p, arg1, 0);
8786 break;
8787 #endif
8788 case TARGET_NR_setuid:
8789 ret = get_errno(setuid(low2highuid(arg1)));
8790 break;
8791 case TARGET_NR_setgid:
8792 ret = get_errno(setgid(low2highgid(arg1)));
8793 break;
8794 case TARGET_NR_setfsuid:
8795 ret = get_errno(setfsuid(arg1));
8796 break;
8797 case TARGET_NR_setfsgid:
8798 ret = get_errno(setfsgid(arg1));
8799 break;
8801 #ifdef TARGET_NR_lchown32
8802 case TARGET_NR_lchown32:
8803 if (!(p = lock_user_string(arg1)))
8804 goto efault;
8805 ret = get_errno(lchown(p, arg2, arg3));
8806 unlock_user(p, arg1, 0);
8807 break;
8808 #endif
8809 #ifdef TARGET_NR_getuid32
8810 case TARGET_NR_getuid32:
8811 ret = get_errno(getuid());
8812 break;
8813 #endif
8815 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8816 /* Alpha specific */
8817 case TARGET_NR_getxuid:
8819 uid_t euid;
8820 euid=geteuid();
8821 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8823 ret = get_errno(getuid());
8824 break;
8825 #endif
8826 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8827 /* Alpha specific */
8828 case TARGET_NR_getxgid:
8830 uid_t egid;
8831 egid=getegid();
8832 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8834 ret = get_errno(getgid());
8835 break;
8836 #endif
8837 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8838 /* Alpha specific */
8839 case TARGET_NR_osf_getsysinfo:
8840 ret = -TARGET_EOPNOTSUPP;
8841 switch (arg1) {
8842 case TARGET_GSI_IEEE_FP_CONTROL:
8844 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8846 /* Copied from linux ieee_fpcr_to_swcr. */
8847 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8848 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8849 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8850 | SWCR_TRAP_ENABLE_DZE
8851 | SWCR_TRAP_ENABLE_OVF);
8852 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8853 | SWCR_TRAP_ENABLE_INE);
8854 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8855 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8857 if (put_user_u64 (swcr, arg2))
8858 goto efault;
8859 ret = 0;
8861 break;
8863 /* case GSI_IEEE_STATE_AT_SIGNAL:
8864 -- Not implemented in linux kernel.
8865 case GSI_UACPROC:
8866 -- Retrieves current unaligned access state; not much used.
8867 case GSI_PROC_TYPE:
8868 -- Retrieves implver information; surely not used.
8869 case GSI_GET_HWRPB:
8870 -- Grabs a copy of the HWRPB; surely not used.
8873 break;
8874 #endif
8875 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8876 /* Alpha specific */
8877 case TARGET_NR_osf_setsysinfo:
8878 ret = -TARGET_EOPNOTSUPP;
8879 switch (arg1) {
8880 case TARGET_SSI_IEEE_FP_CONTROL:
8882 uint64_t swcr, fpcr, orig_fpcr;
8884 if (get_user_u64 (swcr, arg2)) {
8885 goto efault;
8887 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8888 fpcr = orig_fpcr & FPCR_DYN_MASK;
8890 /* Copied from linux ieee_swcr_to_fpcr. */
8891 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8892 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8893 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8894 | SWCR_TRAP_ENABLE_DZE
8895 | SWCR_TRAP_ENABLE_OVF)) << 48;
8896 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8897 | SWCR_TRAP_ENABLE_INE)) << 57;
8898 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8899 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8901 cpu_alpha_store_fpcr(cpu_env, fpcr);
8902 ret = 0;
8904 break;
8906 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8908 uint64_t exc, fpcr, orig_fpcr;
8909 int si_code;
8911 if (get_user_u64(exc, arg2)) {
8912 goto efault;
8915 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8917 /* We only add to the exception status here. */
8918 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8920 cpu_alpha_store_fpcr(cpu_env, fpcr);
8921 ret = 0;
8923 /* Old exceptions are not signaled. */
8924 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8926 /* If any exceptions set by this call,
8927 and are unmasked, send a signal. */
8928 si_code = 0;
8929 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8930 si_code = TARGET_FPE_FLTRES;
8932 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8933 si_code = TARGET_FPE_FLTUND;
8935 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8936 si_code = TARGET_FPE_FLTOVF;
8938 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8939 si_code = TARGET_FPE_FLTDIV;
8941 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8942 si_code = TARGET_FPE_FLTINV;
8944 if (si_code != 0) {
8945 target_siginfo_t info;
8946 info.si_signo = SIGFPE;
8947 info.si_errno = 0;
8948 info.si_code = si_code;
8949 info._sifields._sigfault._addr
8950 = ((CPUArchState *)cpu_env)->pc;
8951 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8954 break;
8956 /* case SSI_NVPAIRS:
8957 -- Used with SSIN_UACPROC to enable unaligned accesses.
8958 case SSI_IEEE_STATE_AT_SIGNAL:
8959 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8960 -- Not implemented in linux kernel
8963 break;
8964 #endif
8965 #ifdef TARGET_NR_osf_sigprocmask
8966 /* Alpha specific. */
8967 case TARGET_NR_osf_sigprocmask:
8969 abi_ulong mask;
8970 int how;
8971 sigset_t set, oldset;
8973 switch(arg1) {
8974 case TARGET_SIG_BLOCK:
8975 how = SIG_BLOCK;
8976 break;
8977 case TARGET_SIG_UNBLOCK:
8978 how = SIG_UNBLOCK;
8979 break;
8980 case TARGET_SIG_SETMASK:
8981 how = SIG_SETMASK;
8982 break;
8983 default:
8984 ret = -TARGET_EINVAL;
8985 goto fail;
8987 mask = arg2;
8988 target_to_host_old_sigset(&set, &mask);
8989 do_sigprocmask(how, &set, &oldset);
8990 host_to_target_old_sigset(&mask, &oldset);
8991 ret = mask;
8993 break;
8994 #endif
8996 #ifdef TARGET_NR_getgid32
8997 case TARGET_NR_getgid32:
8998 ret = get_errno(getgid());
8999 break;
9000 #endif
9001 #ifdef TARGET_NR_geteuid32
9002 case TARGET_NR_geteuid32:
9003 ret = get_errno(geteuid());
9004 break;
9005 #endif
9006 #ifdef TARGET_NR_getegid32
9007 case TARGET_NR_getegid32:
9008 ret = get_errno(getegid());
9009 break;
9010 #endif
9011 #ifdef TARGET_NR_setreuid32
9012 case TARGET_NR_setreuid32:
9013 ret = get_errno(setreuid(arg1, arg2));
9014 break;
9015 #endif
9016 #ifdef TARGET_NR_setregid32
9017 case TARGET_NR_setregid32:
9018 ret = get_errno(setregid(arg1, arg2));
9019 break;
9020 #endif
9021 #ifdef TARGET_NR_getgroups32
9022 case TARGET_NR_getgroups32:
9024 int gidsetsize = arg1;
9025 uint32_t *target_grouplist;
9026 gid_t *grouplist;
9027 int i;
9029 grouplist = alloca(gidsetsize * sizeof(gid_t));
9030 ret = get_errno(getgroups(gidsetsize, grouplist));
9031 if (gidsetsize == 0)
9032 break;
9033 if (!is_error(ret)) {
9034 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
9035 if (!target_grouplist) {
9036 ret = -TARGET_EFAULT;
9037 goto fail;
9039 for(i = 0;i < ret; i++)
9040 target_grouplist[i] = tswap32(grouplist[i]);
9041 unlock_user(target_grouplist, arg2, gidsetsize * 4);
9044 break;
9045 #endif
9046 #ifdef TARGET_NR_setgroups32
9047 case TARGET_NR_setgroups32:
9049 int gidsetsize = arg1;
9050 uint32_t *target_grouplist;
9051 gid_t *grouplist;
9052 int i;
9054 grouplist = alloca(gidsetsize * sizeof(gid_t));
9055 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
9056 if (!target_grouplist) {
9057 ret = -TARGET_EFAULT;
9058 goto fail;
9060 for(i = 0;i < gidsetsize; i++)
9061 grouplist[i] = tswap32(target_grouplist[i]);
9062 unlock_user(target_grouplist, arg2, 0);
9063 ret = get_errno(setgroups(gidsetsize, grouplist));
9065 break;
9066 #endif
9067 #ifdef TARGET_NR_fchown32
9068 case TARGET_NR_fchown32:
9069 ret = get_errno(fchown(arg1, arg2, arg3));
9070 break;
9071 #endif
9072 #ifdef TARGET_NR_setresuid32
9073 case TARGET_NR_setresuid32:
9074 ret = get_errno(setresuid(arg1, arg2, arg3));
9075 break;
9076 #endif
9077 #ifdef TARGET_NR_getresuid32
9078 case TARGET_NR_getresuid32:
9080 uid_t ruid, euid, suid;
9081 ret = get_errno(getresuid(&ruid, &euid, &suid));
9082 if (!is_error(ret)) {
9083 if (put_user_u32(ruid, arg1)
9084 || put_user_u32(euid, arg2)
9085 || put_user_u32(suid, arg3))
9086 goto efault;
9089 break;
9090 #endif
9091 #ifdef TARGET_NR_setresgid32
9092 case TARGET_NR_setresgid32:
9093 ret = get_errno(setresgid(arg1, arg2, arg3));
9094 break;
9095 #endif
9096 #ifdef TARGET_NR_getresgid32
9097 case TARGET_NR_getresgid32:
9099 gid_t rgid, egid, sgid;
9100 ret = get_errno(getresgid(&rgid, &egid, &sgid));
9101 if (!is_error(ret)) {
9102 if (put_user_u32(rgid, arg1)
9103 || put_user_u32(egid, arg2)
9104 || put_user_u32(sgid, arg3))
9105 goto efault;
9108 break;
9109 #endif
9110 #ifdef TARGET_NR_chown32
9111 case TARGET_NR_chown32:
9112 if (!(p = lock_user_string(arg1)))
9113 goto efault;
9114 ret = get_errno(chown(p, arg2, arg3));
9115 unlock_user(p, arg1, 0);
9116 break;
9117 #endif
9118 #ifdef TARGET_NR_setuid32
9119 case TARGET_NR_setuid32:
9120 ret = get_errno(setuid(arg1));
9121 break;
9122 #endif
9123 #ifdef TARGET_NR_setgid32
9124 case TARGET_NR_setgid32:
9125 ret = get_errno(setgid(arg1));
9126 break;
9127 #endif
9128 #ifdef TARGET_NR_setfsuid32
9129 case TARGET_NR_setfsuid32:
9130 ret = get_errno(setfsuid(arg1));
9131 break;
9132 #endif
9133 #ifdef TARGET_NR_setfsgid32
9134 case TARGET_NR_setfsgid32:
9135 ret = get_errno(setfsgid(arg1));
9136 break;
9137 #endif
9139 case TARGET_NR_pivot_root:
9140 goto unimplemented;
9141 #ifdef TARGET_NR_mincore
9142 case TARGET_NR_mincore:
9144 void *a;
9145 ret = -TARGET_EFAULT;
9146 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
9147 goto efault;
9148 if (!(p = lock_user_string(arg3)))
9149 goto mincore_fail;
9150 ret = get_errno(mincore(a, arg2, p));
9151 unlock_user(p, arg3, ret);
9152 mincore_fail:
9153 unlock_user(a, arg1, 0);
9155 break;
9156 #endif
9157 #ifdef TARGET_NR_arm_fadvise64_64
9158 case TARGET_NR_arm_fadvise64_64:
9161 * arm_fadvise64_64 looks like fadvise64_64 but
9162 * with different argument order
9164 abi_long temp;
9165 temp = arg3;
9166 arg3 = arg4;
9167 arg4 = temp;
9169 #endif
9170 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
9171 #ifdef TARGET_NR_fadvise64_64
9172 case TARGET_NR_fadvise64_64:
9173 #endif
9174 #ifdef TARGET_NR_fadvise64
9175 case TARGET_NR_fadvise64:
9176 #endif
9177 #ifdef TARGET_S390X
9178 switch (arg4) {
9179 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
9180 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
9181 case 6: arg4 = POSIX_FADV_DONTNEED; break;
9182 case 7: arg4 = POSIX_FADV_NOREUSE; break;
9183 default: break;
9185 #endif
9186 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
9187 break;
9188 #endif
9189 #ifdef TARGET_NR_madvise
9190 case TARGET_NR_madvise:
9191 /* A straight passthrough may not be safe because qemu sometimes
9192 turns private file-backed mappings into anonymous mappings.
9193 This will break MADV_DONTNEED.
9194 This is a hint, so ignoring and returning success is ok. */
9195 ret = get_errno(0);
9196 break;
9197 #endif
9198 #if TARGET_ABI_BITS == 32
9199 case TARGET_NR_fcntl64:
9201 int cmd;
9202 struct flock64 fl;
9203 struct target_flock64 *target_fl;
9204 #ifdef TARGET_ARM
9205 struct target_eabi_flock64 *target_efl;
9206 #endif
9208 cmd = target_to_host_fcntl_cmd(arg2);
9209 if (cmd == -TARGET_EINVAL) {
9210 ret = cmd;
9211 break;
9214 switch(arg2) {
9215 case TARGET_F_GETLK64:
9216 #ifdef TARGET_ARM
9217 if (((CPUARMState *)cpu_env)->eabi) {
9218 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9219 goto efault;
9220 fl.l_type = tswap16(target_efl->l_type);
9221 fl.l_whence = tswap16(target_efl->l_whence);
9222 fl.l_start = tswap64(target_efl->l_start);
9223 fl.l_len = tswap64(target_efl->l_len);
9224 fl.l_pid = tswap32(target_efl->l_pid);
9225 unlock_user_struct(target_efl, arg3, 0);
9226 } else
9227 #endif
9229 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9230 goto efault;
9231 fl.l_type = tswap16(target_fl->l_type);
9232 fl.l_whence = tswap16(target_fl->l_whence);
9233 fl.l_start = tswap64(target_fl->l_start);
9234 fl.l_len = tswap64(target_fl->l_len);
9235 fl.l_pid = tswap32(target_fl->l_pid);
9236 unlock_user_struct(target_fl, arg3, 0);
9238 ret = get_errno(fcntl(arg1, cmd, &fl));
9239 if (ret == 0) {
9240 #ifdef TARGET_ARM
9241 if (((CPUARMState *)cpu_env)->eabi) {
9242 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
9243 goto efault;
9244 target_efl->l_type = tswap16(fl.l_type);
9245 target_efl->l_whence = tswap16(fl.l_whence);
9246 target_efl->l_start = tswap64(fl.l_start);
9247 target_efl->l_len = tswap64(fl.l_len);
9248 target_efl->l_pid = tswap32(fl.l_pid);
9249 unlock_user_struct(target_efl, arg3, 1);
9250 } else
9251 #endif
9253 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
9254 goto efault;
9255 target_fl->l_type = tswap16(fl.l_type);
9256 target_fl->l_whence = tswap16(fl.l_whence);
9257 target_fl->l_start = tswap64(fl.l_start);
9258 target_fl->l_len = tswap64(fl.l_len);
9259 target_fl->l_pid = tswap32(fl.l_pid);
9260 unlock_user_struct(target_fl, arg3, 1);
9263 break;
9265 case TARGET_F_SETLK64:
9266 case TARGET_F_SETLKW64:
9267 #ifdef TARGET_ARM
9268 if (((CPUARMState *)cpu_env)->eabi) {
9269 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
9270 goto efault;
9271 fl.l_type = tswap16(target_efl->l_type);
9272 fl.l_whence = tswap16(target_efl->l_whence);
9273 fl.l_start = tswap64(target_efl->l_start);
9274 fl.l_len = tswap64(target_efl->l_len);
9275 fl.l_pid = tswap32(target_efl->l_pid);
9276 unlock_user_struct(target_efl, arg3, 0);
9277 } else
9278 #endif
9280 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
9281 goto efault;
9282 fl.l_type = tswap16(target_fl->l_type);
9283 fl.l_whence = tswap16(target_fl->l_whence);
9284 fl.l_start = tswap64(target_fl->l_start);
9285 fl.l_len = tswap64(target_fl->l_len);
9286 fl.l_pid = tswap32(target_fl->l_pid);
9287 unlock_user_struct(target_fl, arg3, 0);
9289 ret = get_errno(fcntl(arg1, cmd, &fl));
9290 break;
9291 default:
9292 ret = do_fcntl(arg1, arg2, arg3);
9293 break;
9295 break;
9297 #endif
9298 #ifdef TARGET_NR_cacheflush
9299 case TARGET_NR_cacheflush:
9300 /* self-modifying code is handled automatically, so nothing needed */
9301 ret = 0;
9302 break;
9303 #endif
9304 #ifdef TARGET_NR_security
9305 case TARGET_NR_security:
9306 goto unimplemented;
9307 #endif
9308 #ifdef TARGET_NR_getpagesize
9309 case TARGET_NR_getpagesize:
9310 ret = TARGET_PAGE_SIZE;
9311 break;
9312 #endif
9313 case TARGET_NR_gettid:
9314 ret = get_errno(gettid());
9315 break;
9316 #ifdef TARGET_NR_readahead
9317 case TARGET_NR_readahead:
9318 #if TARGET_ABI_BITS == 32
9319 if (regpairs_aligned(cpu_env)) {
9320 arg2 = arg3;
9321 arg3 = arg4;
9322 arg4 = arg5;
9324 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
9325 #else
9326 ret = get_errno(readahead(arg1, arg2, arg3));
9327 #endif
9328 break;
9329 #endif
9330 #ifdef CONFIG_ATTR
9331 #ifdef TARGET_NR_setxattr
9332 case TARGET_NR_listxattr:
9333 case TARGET_NR_llistxattr:
9335 void *p, *b = 0;
9336 if (arg2) {
9337 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9338 if (!b) {
9339 ret = -TARGET_EFAULT;
9340 break;
9343 p = lock_user_string(arg1);
9344 if (p) {
9345 if (num == TARGET_NR_listxattr) {
9346 ret = get_errno(listxattr(p, b, arg3));
9347 } else {
9348 ret = get_errno(llistxattr(p, b, arg3));
9350 } else {
9351 ret = -TARGET_EFAULT;
9353 unlock_user(p, arg1, 0);
9354 unlock_user(b, arg2, arg3);
9355 break;
9357 case TARGET_NR_flistxattr:
9359 void *b = 0;
9360 if (arg2) {
9361 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9362 if (!b) {
9363 ret = -TARGET_EFAULT;
9364 break;
9367 ret = get_errno(flistxattr(arg1, b, arg3));
9368 unlock_user(b, arg2, arg3);
9369 break;
9371 case TARGET_NR_setxattr:
9372 case TARGET_NR_lsetxattr:
9374 void *p, *n, *v = 0;
9375 if (arg3) {
9376 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9377 if (!v) {
9378 ret = -TARGET_EFAULT;
9379 break;
9382 p = lock_user_string(arg1);
9383 n = lock_user_string(arg2);
9384 if (p && n) {
9385 if (num == TARGET_NR_setxattr) {
9386 ret = get_errno(setxattr(p, n, v, arg4, arg5));
9387 } else {
9388 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
9390 } else {
9391 ret = -TARGET_EFAULT;
9393 unlock_user(p, arg1, 0);
9394 unlock_user(n, arg2, 0);
9395 unlock_user(v, arg3, 0);
9397 break;
9398 case TARGET_NR_fsetxattr:
9400 void *n, *v = 0;
9401 if (arg3) {
9402 v = lock_user(VERIFY_READ, arg3, arg4, 1);
9403 if (!v) {
9404 ret = -TARGET_EFAULT;
9405 break;
9408 n = lock_user_string(arg2);
9409 if (n) {
9410 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
9411 } else {
9412 ret = -TARGET_EFAULT;
9414 unlock_user(n, arg2, 0);
9415 unlock_user(v, arg3, 0);
9417 break;
9418 case TARGET_NR_getxattr:
9419 case TARGET_NR_lgetxattr:
9421 void *p, *n, *v = 0;
9422 if (arg3) {
9423 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9424 if (!v) {
9425 ret = -TARGET_EFAULT;
9426 break;
9429 p = lock_user_string(arg1);
9430 n = lock_user_string(arg2);
9431 if (p && n) {
9432 if (num == TARGET_NR_getxattr) {
9433 ret = get_errno(getxattr(p, n, v, arg4));
9434 } else {
9435 ret = get_errno(lgetxattr(p, n, v, arg4));
9437 } else {
9438 ret = -TARGET_EFAULT;
9440 unlock_user(p, arg1, 0);
9441 unlock_user(n, arg2, 0);
9442 unlock_user(v, arg3, arg4);
9444 break;
9445 case TARGET_NR_fgetxattr:
9447 void *n, *v = 0;
9448 if (arg3) {
9449 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9450 if (!v) {
9451 ret = -TARGET_EFAULT;
9452 break;
9455 n = lock_user_string(arg2);
9456 if (n) {
9457 ret = get_errno(fgetxattr(arg1, n, v, arg4));
9458 } else {
9459 ret = -TARGET_EFAULT;
9461 unlock_user(n, arg2, 0);
9462 unlock_user(v, arg3, arg4);
9464 break;
9465 case TARGET_NR_removexattr:
9466 case TARGET_NR_lremovexattr:
9468 void *p, *n;
9469 p = lock_user_string(arg1);
9470 n = lock_user_string(arg2);
9471 if (p && n) {
9472 if (num == TARGET_NR_removexattr) {
9473 ret = get_errno(removexattr(p, n));
9474 } else {
9475 ret = get_errno(lremovexattr(p, n));
9477 } else {
9478 ret = -TARGET_EFAULT;
9480 unlock_user(p, arg1, 0);
9481 unlock_user(n, arg2, 0);
9483 break;
9484 case TARGET_NR_fremovexattr:
9486 void *n;
9487 n = lock_user_string(arg2);
9488 if (n) {
9489 ret = get_errno(fremovexattr(arg1, n));
9490 } else {
9491 ret = -TARGET_EFAULT;
9493 unlock_user(n, arg2, 0);
9495 break;
9496 #endif
9497 #endif /* CONFIG_ATTR */
9498 #ifdef TARGET_NR_set_thread_area
9499 case TARGET_NR_set_thread_area:
9500 #if defined(TARGET_MIPS)
9501 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
9502 ret = 0;
9503 break;
9504 #elif defined(TARGET_CRIS)
9505 if (arg1 & 0xff)
9506 ret = -TARGET_EINVAL;
9507 else {
9508 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
9509 ret = 0;
9511 break;
9512 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
9513 ret = do_set_thread_area(cpu_env, arg1);
9514 break;
9515 #elif defined(TARGET_M68K)
9517 TaskState *ts = cpu->opaque;
9518 ts->tp_value = arg1;
9519 ret = 0;
9520 break;
9522 #else
9523 goto unimplemented_nowarn;
9524 #endif
9525 #endif
9526 #ifdef TARGET_NR_get_thread_area
9527 case TARGET_NR_get_thread_area:
9528 #if defined(TARGET_I386) && defined(TARGET_ABI32)
9529 ret = do_get_thread_area(cpu_env, arg1);
9530 break;
9531 #elif defined(TARGET_M68K)
9533 TaskState *ts = cpu->opaque;
9534 ret = ts->tp_value;
9535 break;
9537 #else
9538 goto unimplemented_nowarn;
9539 #endif
9540 #endif
9541 #ifdef TARGET_NR_getdomainname
9542 case TARGET_NR_getdomainname:
9543 goto unimplemented_nowarn;
9544 #endif
9546 #ifdef TARGET_NR_clock_gettime
9547 case TARGET_NR_clock_gettime:
9549 struct timespec ts;
9550 ret = get_errno(clock_gettime(arg1, &ts));
9551 if (!is_error(ret)) {
9552 host_to_target_timespec(arg2, &ts);
9554 break;
9556 #endif
9557 #ifdef TARGET_NR_clock_getres
9558 case TARGET_NR_clock_getres:
9560 struct timespec ts;
9561 ret = get_errno(clock_getres(arg1, &ts));
9562 if (!is_error(ret)) {
9563 host_to_target_timespec(arg2, &ts);
9565 break;
9567 #endif
9568 #ifdef TARGET_NR_clock_nanosleep
9569 case TARGET_NR_clock_nanosleep:
9571 struct timespec ts;
9572 target_to_host_timespec(&ts, arg3);
9573 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
9574 if (arg4)
9575 host_to_target_timespec(arg4, &ts);
9577 #if defined(TARGET_PPC)
9578 /* clock_nanosleep is odd in that it returns positive errno values.
9579 * On PPC, CR0 bit 3 should be set in such a situation. */
9580 if (ret) {
9581 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
9583 #endif
9584 break;
9586 #endif
9588 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
9589 case TARGET_NR_set_tid_address:
9590 ret = get_errno(set_tid_address((int *)g2h(arg1)));
9591 break;
9592 #endif
9594 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
9595 case TARGET_NR_tkill:
9596 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
9597 break;
9598 #endif
9600 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
9601 case TARGET_NR_tgkill:
9602 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
9603 target_to_host_signal(arg3)));
9604 break;
9605 #endif
9607 #ifdef TARGET_NR_set_robust_list
9608 case TARGET_NR_set_robust_list:
9609 case TARGET_NR_get_robust_list:
9610 /* The ABI for supporting robust futexes has userspace pass
9611 * the kernel a pointer to a linked list which is updated by
9612 * userspace after the syscall; the list is walked by the kernel
9613 * when the thread exits. Since the linked list in QEMU guest
9614 * memory isn't a valid linked list for the host and we have
9615 * no way to reliably intercept the thread-death event, we can't
9616 * support these. Silently return ENOSYS so that guest userspace
9617 * falls back to a non-robust futex implementation (which should
9618 * be OK except in the corner case of the guest crashing while
9619 * holding a mutex that is shared with another process via
9620 * shared memory).
9622 goto unimplemented_nowarn;
9623 #endif
9625 #if defined(TARGET_NR_utimensat)
9626 case TARGET_NR_utimensat:
9628 struct timespec *tsp, ts[2];
9629 if (!arg3) {
9630 tsp = NULL;
9631 } else {
9632 target_to_host_timespec(ts, arg3);
9633 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
9634 tsp = ts;
9636 if (!arg2)
9637 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
9638 else {
9639 if (!(p = lock_user_string(arg2))) {
9640 ret = -TARGET_EFAULT;
9641 goto fail;
9643 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
9644 unlock_user(p, arg2, 0);
9647 break;
9648 #endif
9649 case TARGET_NR_futex:
9650 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
9651 break;
9652 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
9653 case TARGET_NR_inotify_init:
9654 ret = get_errno(sys_inotify_init());
9655 break;
9656 #endif
9657 #ifdef CONFIG_INOTIFY1
9658 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
9659 case TARGET_NR_inotify_init1:
9660 ret = get_errno(sys_inotify_init1(arg1));
9661 break;
9662 #endif
9663 #endif
9664 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
9665 case TARGET_NR_inotify_add_watch:
9666 p = lock_user_string(arg2);
9667 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
9668 unlock_user(p, arg2, 0);
9669 break;
9670 #endif
9671 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
9672 case TARGET_NR_inotify_rm_watch:
9673 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
9674 break;
9675 #endif
9677 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
9678 case TARGET_NR_mq_open:
9680 struct mq_attr posix_mq_attr, *attrp;
9682 p = lock_user_string(arg1 - 1);
9683 if (arg4 != 0) {
9684 copy_from_user_mq_attr (&posix_mq_attr, arg4);
9685 attrp = &posix_mq_attr;
9686 } else {
9687 attrp = 0;
9689 ret = get_errno(mq_open(p, arg2, arg3, attrp));
9690 unlock_user (p, arg1, 0);
9692 break;
9694 case TARGET_NR_mq_unlink:
9695 p = lock_user_string(arg1 - 1);
9696 ret = get_errno(mq_unlink(p));
9697 unlock_user (p, arg1, 0);
9698 break;
9700 case TARGET_NR_mq_timedsend:
9702 struct timespec ts;
9704 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9705 if (arg5 != 0) {
9706 target_to_host_timespec(&ts, arg5);
9707 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
9708 host_to_target_timespec(arg5, &ts);
9710 else
9711 ret = get_errno(mq_send(arg1, p, arg3, arg4));
9712 unlock_user (p, arg2, arg3);
9714 break;
9716 case TARGET_NR_mq_timedreceive:
9718 struct timespec ts;
9719 unsigned int prio;
9721 p = lock_user (VERIFY_READ, arg2, arg3, 1);
9722 if (arg5 != 0) {
9723 target_to_host_timespec(&ts, arg5);
9724 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
9725 host_to_target_timespec(arg5, &ts);
9727 else
9728 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
9729 unlock_user (p, arg2, arg3);
9730 if (arg4 != 0)
9731 put_user_u32(prio, arg4);
9733 break;
9735 /* Not implemented for now... */
9736 /* case TARGET_NR_mq_notify: */
9737 /* break; */
9739 case TARGET_NR_mq_getsetattr:
9741 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
9742 ret = 0;
9743 if (arg3 != 0) {
9744 ret = mq_getattr(arg1, &posix_mq_attr_out);
9745 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
9747 if (arg2 != 0) {
9748 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
9749 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
9753 break;
9754 #endif
9756 #ifdef CONFIG_SPLICE
9757 #ifdef TARGET_NR_tee
9758 case TARGET_NR_tee:
9760 ret = get_errno(tee(arg1,arg2,arg3,arg4));
9762 break;
9763 #endif
9764 #ifdef TARGET_NR_splice
9765 case TARGET_NR_splice:
9767 loff_t loff_in, loff_out;
9768 loff_t *ploff_in = NULL, *ploff_out = NULL;
9769 if (arg2) {
9770 if (get_user_u64(loff_in, arg2)) {
9771 goto efault;
9773 ploff_in = &loff_in;
9775 if (arg4) {
9776 if (get_user_u64(loff_out, arg4)) {
9777 goto efault;
9779 ploff_out = &loff_out;
9781 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9782 if (arg2) {
9783 if (put_user_u64(loff_in, arg2)) {
9784 goto efault;
9787 if (arg4) {
9788 if (put_user_u64(loff_out, arg4)) {
9789 goto efault;
9793 break;
9794 #endif
9795 #ifdef TARGET_NR_vmsplice
9796 case TARGET_NR_vmsplice:
9798 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9799 if (vec != NULL) {
9800 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9801 unlock_iovec(vec, arg2, arg3, 0);
9802 } else {
9803 ret = -host_to_target_errno(errno);
9806 break;
9807 #endif
9808 #endif /* CONFIG_SPLICE */
9809 #ifdef CONFIG_EVENTFD
9810 #if defined(TARGET_NR_eventfd)
9811 case TARGET_NR_eventfd:
9812 ret = get_errno(eventfd(arg1, 0));
9813 fd_trans_unregister(ret);
9814 break;
9815 #endif
9816 #if defined(TARGET_NR_eventfd2)
9817 case TARGET_NR_eventfd2:
9819 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9820 if (arg2 & TARGET_O_NONBLOCK) {
9821 host_flags |= O_NONBLOCK;
9823 if (arg2 & TARGET_O_CLOEXEC) {
9824 host_flags |= O_CLOEXEC;
9826 ret = get_errno(eventfd(arg1, host_flags));
9827 fd_trans_unregister(ret);
9828 break;
9830 #endif
9831 #endif /* CONFIG_EVENTFD */
9832 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9833 case TARGET_NR_fallocate:
9834 #if TARGET_ABI_BITS == 32
9835 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9836 target_offset64(arg5, arg6)));
9837 #else
9838 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9839 #endif
9840 break;
9841 #endif
9842 #if defined(CONFIG_SYNC_FILE_RANGE)
9843 #if defined(TARGET_NR_sync_file_range)
9844 case TARGET_NR_sync_file_range:
9845 #if TARGET_ABI_BITS == 32
9846 #if defined(TARGET_MIPS)
9847 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9848 target_offset64(arg5, arg6), arg7));
9849 #else
9850 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9851 target_offset64(arg4, arg5), arg6));
9852 #endif /* !TARGET_MIPS */
9853 #else
9854 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9855 #endif
9856 break;
9857 #endif
9858 #if defined(TARGET_NR_sync_file_range2)
9859 case TARGET_NR_sync_file_range2:
9860 /* This is like sync_file_range but the arguments are reordered */
9861 #if TARGET_ABI_BITS == 32
9862 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9863 target_offset64(arg5, arg6), arg2));
9864 #else
9865 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9866 #endif
9867 break;
9868 #endif
9869 #endif
9870 #if defined(TARGET_NR_signalfd4)
9871 case TARGET_NR_signalfd4:
9872 ret = do_signalfd4(arg1, arg2, arg4);
9873 break;
9874 #endif
9875 #if defined(TARGET_NR_signalfd)
9876 case TARGET_NR_signalfd:
9877 ret = do_signalfd4(arg1, arg2, 0);
9878 break;
9879 #endif
9880 #if defined(CONFIG_EPOLL)
9881 #if defined(TARGET_NR_epoll_create)
9882 case TARGET_NR_epoll_create:
9883 ret = get_errno(epoll_create(arg1));
9884 break;
9885 #endif
9886 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9887 case TARGET_NR_epoll_create1:
9888 ret = get_errno(epoll_create1(arg1));
9889 break;
9890 #endif
9891 #if defined(TARGET_NR_epoll_ctl)
9892 case TARGET_NR_epoll_ctl:
9894 struct epoll_event ep;
9895 struct epoll_event *epp = 0;
9896 if (arg4) {
9897 struct target_epoll_event *target_ep;
9898 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9899 goto efault;
9901 ep.events = tswap32(target_ep->events);
9902 /* The epoll_data_t union is just opaque data to the kernel,
9903 * so we transfer all 64 bits across and need not worry what
9904 * actual data type it is.
9906 ep.data.u64 = tswap64(target_ep->data.u64);
9907 unlock_user_struct(target_ep, arg4, 0);
9908 epp = &ep;
9910 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9911 break;
9913 #endif
9915 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9916 #define IMPLEMENT_EPOLL_PWAIT
9917 #endif
9918 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9919 #if defined(TARGET_NR_epoll_wait)
9920 case TARGET_NR_epoll_wait:
9921 #endif
9922 #if defined(IMPLEMENT_EPOLL_PWAIT)
9923 case TARGET_NR_epoll_pwait:
9924 #endif
9926 struct target_epoll_event *target_ep;
9927 struct epoll_event *ep;
9928 int epfd = arg1;
9929 int maxevents = arg3;
9930 int timeout = arg4;
9932 target_ep = lock_user(VERIFY_WRITE, arg2,
9933 maxevents * sizeof(struct target_epoll_event), 1);
9934 if (!target_ep) {
9935 goto efault;
9938 ep = alloca(maxevents * sizeof(struct epoll_event));
9940 switch (num) {
9941 #if defined(IMPLEMENT_EPOLL_PWAIT)
9942 case TARGET_NR_epoll_pwait:
9944 target_sigset_t *target_set;
9945 sigset_t _set, *set = &_set;
9947 if (arg5) {
9948 target_set = lock_user(VERIFY_READ, arg5,
9949 sizeof(target_sigset_t), 1);
9950 if (!target_set) {
9951 unlock_user(target_ep, arg2, 0);
9952 goto efault;
9954 target_to_host_sigset(set, target_set);
9955 unlock_user(target_set, arg5, 0);
9956 } else {
9957 set = NULL;
9960 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9961 break;
9963 #endif
9964 #if defined(TARGET_NR_epoll_wait)
9965 case TARGET_NR_epoll_wait:
9966 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9967 break;
9968 #endif
9969 default:
9970 ret = -TARGET_ENOSYS;
9972 if (!is_error(ret)) {
9973 int i;
9974 for (i = 0; i < ret; i++) {
9975 target_ep[i].events = tswap32(ep[i].events);
9976 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9979 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9980 break;
9982 #endif
9983 #endif
9984 #ifdef TARGET_NR_prlimit64
9985 case TARGET_NR_prlimit64:
9987 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9988 struct target_rlimit64 *target_rnew, *target_rold;
9989 struct host_rlimit64 rnew, rold, *rnewp = 0;
9990 int resource = target_to_host_resource(arg2);
9991 if (arg3) {
9992 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9993 goto efault;
9995 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9996 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9997 unlock_user_struct(target_rnew, arg3, 0);
9998 rnewp = &rnew;
10001 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
10002 if (!is_error(ret) && arg4) {
10003 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
10004 goto efault;
10006 target_rold->rlim_cur = tswap64(rold.rlim_cur);
10007 target_rold->rlim_max = tswap64(rold.rlim_max);
10008 unlock_user_struct(target_rold, arg4, 1);
10010 break;
10012 #endif
10013 #ifdef TARGET_NR_gethostname
10014 case TARGET_NR_gethostname:
10016 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10017 if (name) {
10018 ret = get_errno(gethostname(name, arg2));
10019 unlock_user(name, arg1, arg2);
10020 } else {
10021 ret = -TARGET_EFAULT;
10023 break;
10025 #endif
10026 #ifdef TARGET_NR_atomic_cmpxchg_32
10027 case TARGET_NR_atomic_cmpxchg_32:
10029 /* should use start_exclusive from main.c */
10030 abi_ulong mem_value;
10031 if (get_user_u32(mem_value, arg6)) {
10032 target_siginfo_t info;
10033 info.si_signo = SIGSEGV;
10034 info.si_errno = 0;
10035 info.si_code = TARGET_SEGV_MAPERR;
10036 info._sifields._sigfault._addr = arg6;
10037 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10038 ret = 0xdeadbeef;
10041 if (mem_value == arg2)
10042 put_user_u32(arg1, arg6);
10043 ret = mem_value;
10044 break;
10046 #endif
10047 #ifdef TARGET_NR_atomic_barrier
10048 case TARGET_NR_atomic_barrier:
10050 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
10051 ret = 0;
10052 break;
10054 #endif
10056 #ifdef TARGET_NR_timer_create
10057 case TARGET_NR_timer_create:
10059 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
10061 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
10063 int clkid = arg1;
10064 int timer_index = next_free_host_timer();
10066 if (timer_index < 0) {
10067 ret = -TARGET_EAGAIN;
10068 } else {
10069 timer_t *phtimer = g_posix_timers + timer_index;
10071 if (arg2) {
10072 phost_sevp = &host_sevp;
10073 ret = target_to_host_sigevent(phost_sevp, arg2);
10074 if (ret != 0) {
10075 break;
10079 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
10080 if (ret) {
10081 phtimer = NULL;
10082 } else {
10083 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
10084 goto efault;
10088 break;
10090 #endif
10092 #ifdef TARGET_NR_timer_settime
10093 case TARGET_NR_timer_settime:
10095 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
10096 * struct itimerspec * old_value */
10097 target_timer_t timerid = get_timer_id(arg1);
10099 if (timerid < 0) {
10100 ret = timerid;
10101 } else if (arg3 == 0) {
10102 ret = -TARGET_EINVAL;
10103 } else {
10104 timer_t htimer = g_posix_timers[timerid];
10105 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
10107 target_to_host_itimerspec(&hspec_new, arg3);
10108 ret = get_errno(
10109 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
10110 host_to_target_itimerspec(arg2, &hspec_old);
10112 break;
10114 #endif
10116 #ifdef TARGET_NR_timer_gettime
10117 case TARGET_NR_timer_gettime:
10119 /* args: timer_t timerid, struct itimerspec *curr_value */
10120 target_timer_t timerid = get_timer_id(arg1);
10122 if (timerid < 0) {
10123 ret = timerid;
10124 } else if (!arg2) {
10125 ret = -TARGET_EFAULT;
10126 } else {
10127 timer_t htimer = g_posix_timers[timerid];
10128 struct itimerspec hspec;
10129 ret = get_errno(timer_gettime(htimer, &hspec));
10131 if (host_to_target_itimerspec(arg2, &hspec)) {
10132 ret = -TARGET_EFAULT;
10135 break;
10137 #endif
10139 #ifdef TARGET_NR_timer_getoverrun
10140 case TARGET_NR_timer_getoverrun:
10142 /* args: timer_t timerid */
10143 target_timer_t timerid = get_timer_id(arg1);
10145 if (timerid < 0) {
10146 ret = timerid;
10147 } else {
10148 timer_t htimer = g_posix_timers[timerid];
10149 ret = get_errno(timer_getoverrun(htimer));
10151 fd_trans_unregister(ret);
10152 break;
10154 #endif
10156 #ifdef TARGET_NR_timer_delete
10157 case TARGET_NR_timer_delete:
10159 /* args: timer_t timerid */
10160 target_timer_t timerid = get_timer_id(arg1);
10162 if (timerid < 0) {
10163 ret = timerid;
10164 } else {
10165 timer_t htimer = g_posix_timers[timerid];
10166 ret = get_errno(timer_delete(htimer));
10167 g_posix_timers[timerid] = 0;
10169 break;
10171 #endif
10173 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
10174 case TARGET_NR_timerfd_create:
10175 ret = get_errno(timerfd_create(arg1,
10176 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
10177 break;
10178 #endif
10180 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
10181 case TARGET_NR_timerfd_gettime:
10183 struct itimerspec its_curr;
10185 ret = get_errno(timerfd_gettime(arg1, &its_curr));
10187 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
10188 goto efault;
10191 break;
10192 #endif
10194 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
10195 case TARGET_NR_timerfd_settime:
10197 struct itimerspec its_new, its_old, *p_new;
10199 if (arg3) {
10200 if (target_to_host_itimerspec(&its_new, arg3)) {
10201 goto efault;
10203 p_new = &its_new;
10204 } else {
10205 p_new = NULL;
10208 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
10210 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
10211 goto efault;
10214 break;
10215 #endif
10217 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
10218 case TARGET_NR_ioprio_get:
10219 ret = get_errno(ioprio_get(arg1, arg2));
10220 break;
10221 #endif
10223 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
10224 case TARGET_NR_ioprio_set:
10225 ret = get_errno(ioprio_set(arg1, arg2, arg3));
10226 break;
10227 #endif
10229 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
10230 case TARGET_NR_setns:
10231 ret = get_errno(setns(arg1, arg2));
10232 break;
10233 #endif
10234 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
10235 case TARGET_NR_unshare:
10236 ret = get_errno(unshare(arg1));
10237 break;
10238 #endif
10240 default:
10241 unimplemented:
10242 gemu_log("qemu: Unsupported syscall: %d\n", num);
10243 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
10244 unimplemented_nowarn:
10245 #endif
10246 ret = -TARGET_ENOSYS;
10247 break;
10249 fail:
10250 #ifdef DEBUG
10251 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
10252 #endif
10253 if(do_strace)
10254 print_syscall_ret(num, ret);
10255 return ret;
10256 efault:
10257 ret = -TARGET_EFAULT;
10258 goto fail;