tests: Add virtio-9p qtest
[qemu/ar7.git] / linux-user / syscall.c
blob2eac6d5aa9731c9f4e0cb1bcc5bb2e28e892a66b
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <grp.h>
32 #include <sys/types.h>
33 #include <sys/ipc.h>
34 #include <sys/msg.h>
35 #include <sys/wait.h>
36 #include <sys/time.h>
37 #include <sys/stat.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/mman.h>
45 #include <sys/swap.h>
46 #include <linux/capability.h>
47 #include <signal.h>
48 #include <sched.h>
49 #ifdef __ia64__
50 int __clone2(int (*fn)(void *), void *child_stack_base,
51 size_t stack_size, int flags, void *arg, ...);
52 #endif
53 #include <sys/socket.h>
54 #include <sys/un.h>
55 #include <sys/uio.h>
56 #include <sys/poll.h>
57 #include <sys/times.h>
58 #include <sys/shm.h>
59 #include <sys/sem.h>
60 #include <sys/statfs.h>
61 #include <utime.h>
62 #include <sys/sysinfo.h>
63 #include <sys/utsname.h>
64 //#include <sys/user.h>
65 #include <netinet/ip.h>
66 #include <netinet/tcp.h>
67 #include <linux/wireless.h>
68 #include <linux/icmp.h>
69 #include "qemu-common.h"
70 #ifdef TARGET_GPROF
71 #include <sys/gmon.h>
72 #endif
73 #ifdef CONFIG_EVENTFD
74 #include <sys/eventfd.h>
75 #endif
76 #ifdef CONFIG_EPOLL
77 #include <sys/epoll.h>
78 #endif
79 #ifdef CONFIG_ATTR
80 #include "qemu/xattr.h"
81 #endif
82 #ifdef CONFIG_SENDFILE
83 #include <sys/sendfile.h>
84 #endif
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/utsname.h>
96 #include <linux/cdrom.h>
97 #include <linux/hdreg.h>
98 #include <linux/soundcard.h>
99 #include <linux/kd.h>
100 #include <linux/mtio.h>
101 #include <linux/fs.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #include <linux/vt.h>
107 #include <linux/dm-ioctl.h>
108 #include <linux/reboot.h>
109 #include <linux/route.h>
110 #include <linux/filter.h>
111 #include <linux/blkpg.h>
112 #include "linux_loop.h"
113 #include "cpu-uname.h"
115 #include "qemu.h"
117 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
118 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
120 //#define DEBUG
122 //#include <linux/msdos_fs.h>
123 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
124 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127 #undef _syscall0
128 #undef _syscall1
129 #undef _syscall2
130 #undef _syscall3
131 #undef _syscall4
132 #undef _syscall5
133 #undef _syscall6
135 #define _syscall0(type,name) \
136 static type name (void) \
138 return syscall(__NR_##name); \
141 #define _syscall1(type,name,type1,arg1) \
142 static type name (type1 arg1) \
144 return syscall(__NR_##name, arg1); \
147 #define _syscall2(type,name,type1,arg1,type2,arg2) \
148 static type name (type1 arg1,type2 arg2) \
150 return syscall(__NR_##name, arg1, arg2); \
153 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
154 static type name (type1 arg1,type2 arg2,type3 arg3) \
156 return syscall(__NR_##name, arg1, arg2, arg3); \
159 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
160 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
162 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
165 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
166 type5,arg5) \
167 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
173 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
174 type5,arg5,type6,arg6) \
175 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
176 type6 arg6) \
178 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
182 #define __NR_sys_uname __NR_uname
183 #define __NR_sys_getcwd1 __NR_getcwd
184 #define __NR_sys_getdents __NR_getdents
185 #define __NR_sys_getdents64 __NR_getdents64
186 #define __NR_sys_getpriority __NR_getpriority
187 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
188 #define __NR_sys_syslog __NR_syslog
189 #define __NR_sys_tgkill __NR_tgkill
190 #define __NR_sys_tkill __NR_tkill
191 #define __NR_sys_futex __NR_futex
192 #define __NR_sys_inotify_init __NR_inotify_init
193 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
194 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
196 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 defined(__s390x__)
198 #define __NR__llseek __NR_lseek
199 #endif
201 #ifdef __NR_gettid
202 _syscall0(int, gettid)
203 #else
204 /* This is a replacement for the host gettid() and must return a host
205 errno. */
206 static int gettid(void) {
207 return -ENOSYS;
209 #endif
210 #ifdef __NR_getdents
211 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
212 #endif
213 #if !defined(__NR_getdents) || \
214 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
215 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
216 #endif
217 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
218 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
219 loff_t *, res, uint, wh);
220 #endif
221 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
222 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
223 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
224 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
225 #endif
226 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
227 _syscall2(int,sys_tkill,int,tid,int,sig)
228 #endif
229 #ifdef __NR_exit_group
230 _syscall1(int,exit_group,int,error_code)
231 #endif
232 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
233 _syscall1(int,set_tid_address,int *,tidptr)
234 #endif
235 #if defined(TARGET_NR_futex) && defined(__NR_futex)
236 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
237 const struct timespec *,timeout,int *,uaddr2,int,val3)
238 #endif
239 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
240 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
241 unsigned long *, user_mask_ptr);
242 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
243 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
244 unsigned long *, user_mask_ptr);
245 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
246 void *, arg);
247 _syscall2(int, capget, struct __user_cap_header_struct *, header,
248 struct __user_cap_data_struct *, data);
249 _syscall2(int, capset, struct __user_cap_header_struct *, header,
250 struct __user_cap_data_struct *, data);
252 static bitmask_transtbl fcntl_flags_tbl[] = {
253 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
254 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
255 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
256 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
257 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
258 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
259 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
260 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
261 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
262 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
263 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
264 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
265 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
266 #if defined(O_DIRECT)
267 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
268 #endif
269 #if defined(O_NOATIME)
270 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
271 #endif
272 #if defined(O_CLOEXEC)
273 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
274 #endif
275 #if defined(O_PATH)
276 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
277 #endif
278 /* Don't terminate the list prematurely on 64-bit host+guest. */
279 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
280 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
281 #endif
282 { 0, 0, 0, 0 }
285 #define COPY_UTSNAME_FIELD(dest, src) \
286 do { \
287 /* __NEW_UTS_LEN doesn't include terminating null */ \
288 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
289 (dest)[__NEW_UTS_LEN] = '\0'; \
290 } while (0)
292 static int sys_uname(struct new_utsname *buf)
294 struct utsname uts_buf;
296 if (uname(&uts_buf) < 0)
297 return (-1);
300 * Just in case these have some differences, we
301 * translate utsname to new_utsname (which is the
302 * struct linux kernel uses).
305 memset(buf, 0, sizeof(*buf));
306 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
307 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
308 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
309 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
310 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
311 #ifdef _GNU_SOURCE
312 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
313 #endif
314 return (0);
316 #undef COPY_UTSNAME_FIELD
319 static int sys_getcwd1(char *buf, size_t size)
321 if (getcwd(buf, size) == NULL) {
322 /* getcwd() sets errno */
323 return (-1);
325 return strlen(buf)+1;
328 #ifdef TARGET_NR_openat
329 static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
332 * open(2) has extra parameter 'mode' when called with
333 * flag O_CREAT.
335 if ((flags & O_CREAT) != 0) {
336 return (openat(dirfd, pathname, flags, mode));
338 return (openat(dirfd, pathname, flags));
340 #endif
342 #ifdef TARGET_NR_utimensat
343 #ifdef CONFIG_UTIMENSAT
344 static int sys_utimensat(int dirfd, const char *pathname,
345 const struct timespec times[2], int flags)
347 if (pathname == NULL)
348 return futimens(dirfd, times);
349 else
350 return utimensat(dirfd, pathname, times, flags);
352 #elif defined(__NR_utimensat)
353 #define __NR_sys_utimensat __NR_utimensat
354 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
355 const struct timespec *,tsp,int,flags)
356 #else
357 static int sys_utimensat(int dirfd, const char *pathname,
358 const struct timespec times[2], int flags)
360 errno = ENOSYS;
361 return -1;
363 #endif
364 #endif /* TARGET_NR_utimensat */
366 #ifdef CONFIG_INOTIFY
367 #include <sys/inotify.h>
369 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
370 static int sys_inotify_init(void)
372 return (inotify_init());
374 #endif
375 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
376 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
378 return (inotify_add_watch(fd, pathname, mask));
380 #endif
381 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
382 static int sys_inotify_rm_watch(int fd, int32_t wd)
384 return (inotify_rm_watch(fd, wd));
386 #endif
387 #ifdef CONFIG_INOTIFY1
388 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
389 static int sys_inotify_init1(int flags)
391 return (inotify_init1(flags));
393 #endif
394 #endif
395 #else
396 /* Userspace can usually survive runtime without inotify */
397 #undef TARGET_NR_inotify_init
398 #undef TARGET_NR_inotify_init1
399 #undef TARGET_NR_inotify_add_watch
400 #undef TARGET_NR_inotify_rm_watch
401 #endif /* CONFIG_INOTIFY */
403 #if defined(TARGET_NR_ppoll)
404 #ifndef __NR_ppoll
405 # define __NR_ppoll -1
406 #endif
407 #define __NR_sys_ppoll __NR_ppoll
408 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
409 struct timespec *, timeout, const __sigset_t *, sigmask,
410 size_t, sigsetsize)
411 #endif
413 #if defined(TARGET_NR_pselect6)
414 #ifndef __NR_pselect6
415 # define __NR_pselect6 -1
416 #endif
417 #define __NR_sys_pselect6 __NR_pselect6
418 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
419 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
420 #endif
422 #if defined(TARGET_NR_prlimit64)
423 #ifndef __NR_prlimit64
424 # define __NR_prlimit64 -1
425 #endif
426 #define __NR_sys_prlimit64 __NR_prlimit64
427 /* The glibc rlimit structure may not be that used by the underlying syscall */
428 struct host_rlimit64 {
429 uint64_t rlim_cur;
430 uint64_t rlim_max;
432 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
433 const struct host_rlimit64 *, new_limit,
434 struct host_rlimit64 *, old_limit)
435 #endif
438 #if defined(TARGET_NR_timer_create)
439 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
440 static timer_t g_posix_timers[32] = { 0, } ;
442 static inline int next_free_host_timer(void)
444 int k ;
445 /* FIXME: Does finding the next free slot require a lock? */
446 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
447 if (g_posix_timers[k] == 0) {
448 g_posix_timers[k] = (timer_t) 1;
449 return k;
452 return -1;
454 #endif
456 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
457 #ifdef TARGET_ARM
458 static inline int regpairs_aligned(void *cpu_env) {
459 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
461 #elif defined(TARGET_MIPS)
462 static inline int regpairs_aligned(void *cpu_env) { return 1; }
463 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
464 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
465 * of registers which translates to the same as ARM/MIPS, because we start with
466 * r3 as arg1 */
467 static inline int regpairs_aligned(void *cpu_env) { return 1; }
468 #else
469 static inline int regpairs_aligned(void *cpu_env) { return 0; }
470 #endif
472 #define ERRNO_TABLE_SIZE 1200
474 /* target_to_host_errno_table[] is initialized from
475 * host_to_target_errno_table[] in syscall_init(). */
476 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
480 * This list is the union of errno values overridden in asm-<arch>/errno.h
481 * minus the errnos that are not actually generic to all archs.
483 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
484 [EIDRM] = TARGET_EIDRM,
485 [ECHRNG] = TARGET_ECHRNG,
486 [EL2NSYNC] = TARGET_EL2NSYNC,
487 [EL3HLT] = TARGET_EL3HLT,
488 [EL3RST] = TARGET_EL3RST,
489 [ELNRNG] = TARGET_ELNRNG,
490 [EUNATCH] = TARGET_EUNATCH,
491 [ENOCSI] = TARGET_ENOCSI,
492 [EL2HLT] = TARGET_EL2HLT,
493 [EDEADLK] = TARGET_EDEADLK,
494 [ENOLCK] = TARGET_ENOLCK,
495 [EBADE] = TARGET_EBADE,
496 [EBADR] = TARGET_EBADR,
497 [EXFULL] = TARGET_EXFULL,
498 [ENOANO] = TARGET_ENOANO,
499 [EBADRQC] = TARGET_EBADRQC,
500 [EBADSLT] = TARGET_EBADSLT,
501 [EBFONT] = TARGET_EBFONT,
502 [ENOSTR] = TARGET_ENOSTR,
503 [ENODATA] = TARGET_ENODATA,
504 [ETIME] = TARGET_ETIME,
505 [ENOSR] = TARGET_ENOSR,
506 [ENONET] = TARGET_ENONET,
507 [ENOPKG] = TARGET_ENOPKG,
508 [EREMOTE] = TARGET_EREMOTE,
509 [ENOLINK] = TARGET_ENOLINK,
510 [EADV] = TARGET_EADV,
511 [ESRMNT] = TARGET_ESRMNT,
512 [ECOMM] = TARGET_ECOMM,
513 [EPROTO] = TARGET_EPROTO,
514 [EDOTDOT] = TARGET_EDOTDOT,
515 [EMULTIHOP] = TARGET_EMULTIHOP,
516 [EBADMSG] = TARGET_EBADMSG,
517 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
518 [EOVERFLOW] = TARGET_EOVERFLOW,
519 [ENOTUNIQ] = TARGET_ENOTUNIQ,
520 [EBADFD] = TARGET_EBADFD,
521 [EREMCHG] = TARGET_EREMCHG,
522 [ELIBACC] = TARGET_ELIBACC,
523 [ELIBBAD] = TARGET_ELIBBAD,
524 [ELIBSCN] = TARGET_ELIBSCN,
525 [ELIBMAX] = TARGET_ELIBMAX,
526 [ELIBEXEC] = TARGET_ELIBEXEC,
527 [EILSEQ] = TARGET_EILSEQ,
528 [ENOSYS] = TARGET_ENOSYS,
529 [ELOOP] = TARGET_ELOOP,
530 [ERESTART] = TARGET_ERESTART,
531 [ESTRPIPE] = TARGET_ESTRPIPE,
532 [ENOTEMPTY] = TARGET_ENOTEMPTY,
533 [EUSERS] = TARGET_EUSERS,
534 [ENOTSOCK] = TARGET_ENOTSOCK,
535 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
536 [EMSGSIZE] = TARGET_EMSGSIZE,
537 [EPROTOTYPE] = TARGET_EPROTOTYPE,
538 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
539 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
540 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
541 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
542 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
543 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
544 [EADDRINUSE] = TARGET_EADDRINUSE,
545 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
546 [ENETDOWN] = TARGET_ENETDOWN,
547 [ENETUNREACH] = TARGET_ENETUNREACH,
548 [ENETRESET] = TARGET_ENETRESET,
549 [ECONNABORTED] = TARGET_ECONNABORTED,
550 [ECONNRESET] = TARGET_ECONNRESET,
551 [ENOBUFS] = TARGET_ENOBUFS,
552 [EISCONN] = TARGET_EISCONN,
553 [ENOTCONN] = TARGET_ENOTCONN,
554 [EUCLEAN] = TARGET_EUCLEAN,
555 [ENOTNAM] = TARGET_ENOTNAM,
556 [ENAVAIL] = TARGET_ENAVAIL,
557 [EISNAM] = TARGET_EISNAM,
558 [EREMOTEIO] = TARGET_EREMOTEIO,
559 [ESHUTDOWN] = TARGET_ESHUTDOWN,
560 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
561 [ETIMEDOUT] = TARGET_ETIMEDOUT,
562 [ECONNREFUSED] = TARGET_ECONNREFUSED,
563 [EHOSTDOWN] = TARGET_EHOSTDOWN,
564 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
565 [EALREADY] = TARGET_EALREADY,
566 [EINPROGRESS] = TARGET_EINPROGRESS,
567 [ESTALE] = TARGET_ESTALE,
568 [ECANCELED] = TARGET_ECANCELED,
569 [ENOMEDIUM] = TARGET_ENOMEDIUM,
570 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
571 #ifdef ENOKEY
572 [ENOKEY] = TARGET_ENOKEY,
573 #endif
574 #ifdef EKEYEXPIRED
575 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
576 #endif
577 #ifdef EKEYREVOKED
578 [EKEYREVOKED] = TARGET_EKEYREVOKED,
579 #endif
580 #ifdef EKEYREJECTED
581 [EKEYREJECTED] = TARGET_EKEYREJECTED,
582 #endif
583 #ifdef EOWNERDEAD
584 [EOWNERDEAD] = TARGET_EOWNERDEAD,
585 #endif
586 #ifdef ENOTRECOVERABLE
587 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
588 #endif
591 static inline int host_to_target_errno(int err)
593 if(host_to_target_errno_table[err])
594 return host_to_target_errno_table[err];
595 return err;
598 static inline int target_to_host_errno(int err)
600 if (target_to_host_errno_table[err])
601 return target_to_host_errno_table[err];
602 return err;
605 static inline abi_long get_errno(abi_long ret)
607 if (ret == -1)
608 return -host_to_target_errno(errno);
609 else
610 return ret;
613 static inline int is_error(abi_long ret)
615 return (abi_ulong)ret >= (abi_ulong)(-4096);
618 char *target_strerror(int err)
620 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
621 return NULL;
623 return strerror(target_to_host_errno(err));
626 static abi_ulong target_brk;
627 static abi_ulong target_original_brk;
628 static abi_ulong brk_page;
630 void target_set_brk(abi_ulong new_brk)
632 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
633 brk_page = HOST_PAGE_ALIGN(target_brk);
636 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
637 #define DEBUGF_BRK(message, args...)
639 /* do_brk() must return target values and target errnos. */
640 abi_long do_brk(abi_ulong new_brk)
642 abi_long mapped_addr;
643 int new_alloc_size;
645 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
647 if (!new_brk) {
648 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
649 return target_brk;
651 if (new_brk < target_original_brk) {
652 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
653 target_brk);
654 return target_brk;
657 /* If the new brk is less than the highest page reserved to the
658 * target heap allocation, set it and we're almost done... */
659 if (new_brk <= brk_page) {
660 /* Heap contents are initialized to zero, as for anonymous
661 * mapped pages. */
662 if (new_brk > target_brk) {
663 memset(g2h(target_brk), 0, new_brk - target_brk);
665 target_brk = new_brk;
666 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
667 return target_brk;
670 /* We need to allocate more memory after the brk... Note that
671 * we don't use MAP_FIXED because that will map over the top of
672 * any existing mapping (like the one with the host libc or qemu
673 * itself); instead we treat "mapped but at wrong address" as
674 * a failure and unmap again.
676 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
677 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
678 PROT_READ|PROT_WRITE,
679 MAP_ANON|MAP_PRIVATE, 0, 0));
681 if (mapped_addr == brk_page) {
682 /* Heap contents are initialized to zero, as for anonymous
683 * mapped pages. Technically the new pages are already
684 * initialized to zero since they *are* anonymous mapped
685 * pages, however we have to take care with the contents that
686 * come from the remaining part of the previous page: it may
687 * contains garbage data due to a previous heap usage (grown
688 * then shrunken). */
689 memset(g2h(target_brk), 0, brk_page - target_brk);
691 target_brk = new_brk;
692 brk_page = HOST_PAGE_ALIGN(target_brk);
693 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
694 target_brk);
695 return target_brk;
696 } else if (mapped_addr != -1) {
697 /* Mapped but at wrong address, meaning there wasn't actually
698 * enough space for this brk.
700 target_munmap(mapped_addr, new_alloc_size);
701 mapped_addr = -1;
702 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
704 else {
705 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
708 #if defined(TARGET_ALPHA)
709 /* We (partially) emulate OSF/1 on Alpha, which requires we
710 return a proper errno, not an unchanged brk value. */
711 return -TARGET_ENOMEM;
712 #endif
713 /* For everything else, return the previous break. */
714 return target_brk;
717 static inline abi_long copy_from_user_fdset(fd_set *fds,
718 abi_ulong target_fds_addr,
719 int n)
721 int i, nw, j, k;
722 abi_ulong b, *target_fds;
724 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
725 if (!(target_fds = lock_user(VERIFY_READ,
726 target_fds_addr,
727 sizeof(abi_ulong) * nw,
728 1)))
729 return -TARGET_EFAULT;
731 FD_ZERO(fds);
732 k = 0;
733 for (i = 0; i < nw; i++) {
734 /* grab the abi_ulong */
735 __get_user(b, &target_fds[i]);
736 for (j = 0; j < TARGET_ABI_BITS; j++) {
737 /* check the bit inside the abi_ulong */
738 if ((b >> j) & 1)
739 FD_SET(k, fds);
740 k++;
744 unlock_user(target_fds, target_fds_addr, 0);
746 return 0;
749 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
750 abi_ulong target_fds_addr,
751 int n)
753 if (target_fds_addr) {
754 if (copy_from_user_fdset(fds, target_fds_addr, n))
755 return -TARGET_EFAULT;
756 *fds_ptr = fds;
757 } else {
758 *fds_ptr = NULL;
760 return 0;
763 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
764 const fd_set *fds,
765 int n)
767 int i, nw, j, k;
768 abi_long v;
769 abi_ulong *target_fds;
771 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
772 if (!(target_fds = lock_user(VERIFY_WRITE,
773 target_fds_addr,
774 sizeof(abi_ulong) * nw,
775 0)))
776 return -TARGET_EFAULT;
778 k = 0;
779 for (i = 0; i < nw; i++) {
780 v = 0;
781 for (j = 0; j < TARGET_ABI_BITS; j++) {
782 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
783 k++;
785 __put_user(v, &target_fds[i]);
788 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
790 return 0;
793 #if defined(__alpha__)
794 #define HOST_HZ 1024
795 #else
796 #define HOST_HZ 100
797 #endif
799 static inline abi_long host_to_target_clock_t(long ticks)
801 #if HOST_HZ == TARGET_HZ
802 return ticks;
803 #else
804 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
805 #endif
808 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
809 const struct rusage *rusage)
811 struct target_rusage *target_rusage;
813 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
814 return -TARGET_EFAULT;
815 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
816 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
817 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
818 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
819 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
820 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
821 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
822 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
823 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
824 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
825 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
826 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
827 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
828 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
829 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
830 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
831 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
832 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
833 unlock_user_struct(target_rusage, target_addr, 1);
835 return 0;
838 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
840 abi_ulong target_rlim_swap;
841 rlim_t result;
843 target_rlim_swap = tswapal(target_rlim);
844 if (target_rlim_swap == TARGET_RLIM_INFINITY)
845 return RLIM_INFINITY;
847 result = target_rlim_swap;
848 if (target_rlim_swap != (rlim_t)result)
849 return RLIM_INFINITY;
851 return result;
854 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
856 abi_ulong target_rlim_swap;
857 abi_ulong result;
859 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
860 target_rlim_swap = TARGET_RLIM_INFINITY;
861 else
862 target_rlim_swap = rlim;
863 result = tswapal(target_rlim_swap);
865 return result;
868 static inline int target_to_host_resource(int code)
870 switch (code) {
871 case TARGET_RLIMIT_AS:
872 return RLIMIT_AS;
873 case TARGET_RLIMIT_CORE:
874 return RLIMIT_CORE;
875 case TARGET_RLIMIT_CPU:
876 return RLIMIT_CPU;
877 case TARGET_RLIMIT_DATA:
878 return RLIMIT_DATA;
879 case TARGET_RLIMIT_FSIZE:
880 return RLIMIT_FSIZE;
881 case TARGET_RLIMIT_LOCKS:
882 return RLIMIT_LOCKS;
883 case TARGET_RLIMIT_MEMLOCK:
884 return RLIMIT_MEMLOCK;
885 case TARGET_RLIMIT_MSGQUEUE:
886 return RLIMIT_MSGQUEUE;
887 case TARGET_RLIMIT_NICE:
888 return RLIMIT_NICE;
889 case TARGET_RLIMIT_NOFILE:
890 return RLIMIT_NOFILE;
891 case TARGET_RLIMIT_NPROC:
892 return RLIMIT_NPROC;
893 case TARGET_RLIMIT_RSS:
894 return RLIMIT_RSS;
895 case TARGET_RLIMIT_RTPRIO:
896 return RLIMIT_RTPRIO;
897 case TARGET_RLIMIT_SIGPENDING:
898 return RLIMIT_SIGPENDING;
899 case TARGET_RLIMIT_STACK:
900 return RLIMIT_STACK;
901 default:
902 return code;
906 static inline abi_long copy_from_user_timeval(struct timeval *tv,
907 abi_ulong target_tv_addr)
909 struct target_timeval *target_tv;
911 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
912 return -TARGET_EFAULT;
914 __get_user(tv->tv_sec, &target_tv->tv_sec);
915 __get_user(tv->tv_usec, &target_tv->tv_usec);
917 unlock_user_struct(target_tv, target_tv_addr, 0);
919 return 0;
922 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
923 const struct timeval *tv)
925 struct target_timeval *target_tv;
927 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
928 return -TARGET_EFAULT;
930 __put_user(tv->tv_sec, &target_tv->tv_sec);
931 __put_user(tv->tv_usec, &target_tv->tv_usec);
933 unlock_user_struct(target_tv, target_tv_addr, 1);
935 return 0;
938 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
939 #include <mqueue.h>
941 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
942 abi_ulong target_mq_attr_addr)
944 struct target_mq_attr *target_mq_attr;
946 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
947 target_mq_attr_addr, 1))
948 return -TARGET_EFAULT;
950 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
951 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
952 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
953 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
955 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
957 return 0;
960 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
961 const struct mq_attr *attr)
963 struct target_mq_attr *target_mq_attr;
965 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
966 target_mq_attr_addr, 0))
967 return -TARGET_EFAULT;
969 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
970 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
971 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
972 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
974 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
976 return 0;
978 #endif
980 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
981 /* do_select() must return target values and target errnos. */
982 static abi_long do_select(int n,
983 abi_ulong rfd_addr, abi_ulong wfd_addr,
984 abi_ulong efd_addr, abi_ulong target_tv_addr)
986 fd_set rfds, wfds, efds;
987 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
988 struct timeval tv, *tv_ptr;
989 abi_long ret;
991 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
992 if (ret) {
993 return ret;
995 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
996 if (ret) {
997 return ret;
999 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1000 if (ret) {
1001 return ret;
1004 if (target_tv_addr) {
1005 if (copy_from_user_timeval(&tv, target_tv_addr))
1006 return -TARGET_EFAULT;
1007 tv_ptr = &tv;
1008 } else {
1009 tv_ptr = NULL;
1012 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1014 if (!is_error(ret)) {
1015 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1016 return -TARGET_EFAULT;
1017 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1018 return -TARGET_EFAULT;
1019 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1020 return -TARGET_EFAULT;
1022 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1023 return -TARGET_EFAULT;
1026 return ret;
1028 #endif
1030 static abi_long do_pipe2(int host_pipe[], int flags)
1032 #ifdef CONFIG_PIPE2
1033 return pipe2(host_pipe, flags);
1034 #else
1035 return -ENOSYS;
1036 #endif
1039 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1040 int flags, int is_pipe2)
1042 int host_pipe[2];
1043 abi_long ret;
1044 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1046 if (is_error(ret))
1047 return get_errno(ret);
1049 /* Several targets have special calling conventions for the original
1050 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1051 if (!is_pipe2) {
1052 #if defined(TARGET_ALPHA)
1053 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1054 return host_pipe[0];
1055 #elif defined(TARGET_MIPS)
1056 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1057 return host_pipe[0];
1058 #elif defined(TARGET_SH4)
1059 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1060 return host_pipe[0];
1061 #elif defined(TARGET_SPARC)
1062 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1063 return host_pipe[0];
1064 #endif
1067 if (put_user_s32(host_pipe[0], pipedes)
1068 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1069 return -TARGET_EFAULT;
1070 return get_errno(ret);
1073 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1074 abi_ulong target_addr,
1075 socklen_t len)
1077 struct target_ip_mreqn *target_smreqn;
1079 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1080 if (!target_smreqn)
1081 return -TARGET_EFAULT;
1082 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1083 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1084 if (len == sizeof(struct target_ip_mreqn))
1085 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1086 unlock_user(target_smreqn, target_addr, 0);
1088 return 0;
1091 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1092 abi_ulong target_addr,
1093 socklen_t len)
1095 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1096 sa_family_t sa_family;
1097 struct target_sockaddr *target_saddr;
1099 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1100 if (!target_saddr)
1101 return -TARGET_EFAULT;
1103 sa_family = tswap16(target_saddr->sa_family);
1105 /* Oops. The caller might send a incomplete sun_path; sun_path
1106 * must be terminated by \0 (see the manual page), but
1107 * unfortunately it is quite common to specify sockaddr_un
1108 * length as "strlen(x->sun_path)" while it should be
1109 * "strlen(...) + 1". We'll fix that here if needed.
1110 * Linux kernel has a similar feature.
1113 if (sa_family == AF_UNIX) {
1114 if (len < unix_maxlen && len > 0) {
1115 char *cp = (char*)target_saddr;
1117 if ( cp[len-1] && !cp[len] )
1118 len++;
1120 if (len > unix_maxlen)
1121 len = unix_maxlen;
1124 memcpy(addr, target_saddr, len);
1125 addr->sa_family = sa_family;
1126 unlock_user(target_saddr, target_addr, 0);
1128 return 0;
1131 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1132 struct sockaddr *addr,
1133 socklen_t len)
1135 struct target_sockaddr *target_saddr;
1137 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1138 if (!target_saddr)
1139 return -TARGET_EFAULT;
1140 memcpy(target_saddr, addr, len);
1141 target_saddr->sa_family = tswap16(addr->sa_family);
1142 unlock_user(target_saddr, target_addr, len);
1144 return 0;
1147 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1148 struct target_msghdr *target_msgh)
1150 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1151 abi_long msg_controllen;
1152 abi_ulong target_cmsg_addr;
1153 struct target_cmsghdr *target_cmsg;
1154 socklen_t space = 0;
1156 msg_controllen = tswapal(target_msgh->msg_controllen);
1157 if (msg_controllen < sizeof (struct target_cmsghdr))
1158 goto the_end;
1159 target_cmsg_addr = tswapal(target_msgh->msg_control);
1160 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1161 if (!target_cmsg)
1162 return -TARGET_EFAULT;
1164 while (cmsg && target_cmsg) {
1165 void *data = CMSG_DATA(cmsg);
1166 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1168 int len = tswapal(target_cmsg->cmsg_len)
1169 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1171 space += CMSG_SPACE(len);
1172 if (space > msgh->msg_controllen) {
1173 space -= CMSG_SPACE(len);
1174 gemu_log("Host cmsg overflow\n");
1175 break;
1178 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1179 cmsg->cmsg_level = SOL_SOCKET;
1180 } else {
1181 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1183 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1184 cmsg->cmsg_len = CMSG_LEN(len);
1186 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1187 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1188 memcpy(data, target_data, len);
1189 } else {
1190 int *fd = (int *)data;
1191 int *target_fd = (int *)target_data;
1192 int i, numfds = len / sizeof(int);
1194 for (i = 0; i < numfds; i++)
1195 fd[i] = tswap32(target_fd[i]);
1198 cmsg = CMSG_NXTHDR(msgh, cmsg);
1199 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1201 unlock_user(target_cmsg, target_cmsg_addr, 0);
1202 the_end:
1203 msgh->msg_controllen = space;
1204 return 0;
1207 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1208 struct msghdr *msgh)
1210 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1211 abi_long msg_controllen;
1212 abi_ulong target_cmsg_addr;
1213 struct target_cmsghdr *target_cmsg;
1214 socklen_t space = 0;
1216 msg_controllen = tswapal(target_msgh->msg_controllen);
1217 if (msg_controllen < sizeof (struct target_cmsghdr))
1218 goto the_end;
1219 target_cmsg_addr = tswapal(target_msgh->msg_control);
1220 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1221 if (!target_cmsg)
1222 return -TARGET_EFAULT;
1224 while (cmsg && target_cmsg) {
1225 void *data = CMSG_DATA(cmsg);
1226 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1228 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1230 space += TARGET_CMSG_SPACE(len);
1231 if (space > msg_controllen) {
1232 space -= TARGET_CMSG_SPACE(len);
1233 gemu_log("Target cmsg overflow\n");
1234 break;
1237 if (cmsg->cmsg_level == SOL_SOCKET) {
1238 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1239 } else {
1240 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1242 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1243 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
1245 if ((cmsg->cmsg_level == SOL_SOCKET) &&
1246 (cmsg->cmsg_type == SCM_RIGHTS)) {
1247 int *fd = (int *)data;
1248 int *target_fd = (int *)target_data;
1249 int i, numfds = len / sizeof(int);
1251 for (i = 0; i < numfds; i++)
1252 target_fd[i] = tswap32(fd[i]);
1253 } else if ((cmsg->cmsg_level == SOL_SOCKET) &&
1254 (cmsg->cmsg_type == SO_TIMESTAMP) &&
1255 (len == sizeof(struct timeval))) {
1256 /* copy struct timeval to target */
1257 struct timeval *tv = (struct timeval *)data;
1258 struct target_timeval *target_tv =
1259 (struct target_timeval *)target_data;
1261 target_tv->tv_sec = tswapal(tv->tv_sec);
1262 target_tv->tv_usec = tswapal(tv->tv_usec);
1263 } else {
1264 gemu_log("Unsupported ancillary data: %d/%d\n",
1265 cmsg->cmsg_level, cmsg->cmsg_type);
1266 memcpy(target_data, data, len);
1269 cmsg = CMSG_NXTHDR(msgh, cmsg);
1270 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1272 unlock_user(target_cmsg, target_cmsg_addr, space);
1273 the_end:
1274 target_msgh->msg_controllen = tswapal(space);
1275 return 0;
1278 /* do_setsockopt() Must return target values and target errnos. */
1279 static abi_long do_setsockopt(int sockfd, int level, int optname,
1280 abi_ulong optval_addr, socklen_t optlen)
1282 abi_long ret;
1283 int val;
1284 struct ip_mreqn *ip_mreq;
1285 struct ip_mreq_source *ip_mreq_source;
1287 switch(level) {
1288 case SOL_TCP:
1289 /* TCP options all take an 'int' value. */
1290 if (optlen < sizeof(uint32_t))
1291 return -TARGET_EINVAL;
1293 if (get_user_u32(val, optval_addr))
1294 return -TARGET_EFAULT;
1295 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1296 break;
1297 case SOL_IP:
1298 switch(optname) {
1299 case IP_TOS:
1300 case IP_TTL:
1301 case IP_HDRINCL:
1302 case IP_ROUTER_ALERT:
1303 case IP_RECVOPTS:
1304 case IP_RETOPTS:
1305 case IP_PKTINFO:
1306 case IP_MTU_DISCOVER:
1307 case IP_RECVERR:
1308 case IP_RECVTOS:
1309 #ifdef IP_FREEBIND
1310 case IP_FREEBIND:
1311 #endif
1312 case IP_MULTICAST_TTL:
1313 case IP_MULTICAST_LOOP:
1314 val = 0;
1315 if (optlen >= sizeof(uint32_t)) {
1316 if (get_user_u32(val, optval_addr))
1317 return -TARGET_EFAULT;
1318 } else if (optlen >= 1) {
1319 if (get_user_u8(val, optval_addr))
1320 return -TARGET_EFAULT;
1322 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1323 break;
1324 case IP_ADD_MEMBERSHIP:
1325 case IP_DROP_MEMBERSHIP:
1326 if (optlen < sizeof (struct target_ip_mreq) ||
1327 optlen > sizeof (struct target_ip_mreqn))
1328 return -TARGET_EINVAL;
1330 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1331 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1332 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1333 break;
1335 case IP_BLOCK_SOURCE:
1336 case IP_UNBLOCK_SOURCE:
1337 case IP_ADD_SOURCE_MEMBERSHIP:
1338 case IP_DROP_SOURCE_MEMBERSHIP:
1339 if (optlen != sizeof (struct target_ip_mreq_source))
1340 return -TARGET_EINVAL;
1342 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1343 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1344 unlock_user (ip_mreq_source, optval_addr, 0);
1345 break;
1347 default:
1348 goto unimplemented;
1350 break;
1351 case SOL_IPV6:
1352 switch (optname) {
1353 case IPV6_MTU_DISCOVER:
1354 case IPV6_MTU:
1355 case IPV6_V6ONLY:
1356 case IPV6_RECVPKTINFO:
1357 val = 0;
1358 if (optlen < sizeof(uint32_t)) {
1359 return -TARGET_EINVAL;
1361 if (get_user_u32(val, optval_addr)) {
1362 return -TARGET_EFAULT;
1364 ret = get_errno(setsockopt(sockfd, level, optname,
1365 &val, sizeof(val)));
1366 break;
1367 default:
1368 goto unimplemented;
1370 break;
1371 case SOL_RAW:
1372 switch (optname) {
1373 case ICMP_FILTER:
1374 /* struct icmp_filter takes an u32 value */
1375 if (optlen < sizeof(uint32_t)) {
1376 return -TARGET_EINVAL;
1379 if (get_user_u32(val, optval_addr)) {
1380 return -TARGET_EFAULT;
1382 ret = get_errno(setsockopt(sockfd, level, optname,
1383 &val, sizeof(val)));
1384 break;
1386 default:
1387 goto unimplemented;
1389 break;
1390 case TARGET_SOL_SOCKET:
1391 switch (optname) {
1392 case TARGET_SO_RCVTIMEO:
1394 struct timeval tv;
1396 optname = SO_RCVTIMEO;
1398 set_timeout:
1399 if (optlen != sizeof(struct target_timeval)) {
1400 return -TARGET_EINVAL;
1403 if (copy_from_user_timeval(&tv, optval_addr)) {
1404 return -TARGET_EFAULT;
1407 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1408 &tv, sizeof(tv)));
1409 return ret;
1411 case TARGET_SO_SNDTIMEO:
1412 optname = SO_SNDTIMEO;
1413 goto set_timeout;
1414 case TARGET_SO_ATTACH_FILTER:
1416 struct target_sock_fprog *tfprog;
1417 struct target_sock_filter *tfilter;
1418 struct sock_fprog fprog;
1419 struct sock_filter *filter;
1420 int i;
1422 if (optlen != sizeof(*tfprog)) {
1423 return -TARGET_EINVAL;
1425 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1426 return -TARGET_EFAULT;
1428 if (!lock_user_struct(VERIFY_READ, tfilter,
1429 tswapal(tfprog->filter), 0)) {
1430 unlock_user_struct(tfprog, optval_addr, 1);
1431 return -TARGET_EFAULT;
1434 fprog.len = tswap16(tfprog->len);
1435 filter = malloc(fprog.len * sizeof(*filter));
1436 if (filter == NULL) {
1437 unlock_user_struct(tfilter, tfprog->filter, 1);
1438 unlock_user_struct(tfprog, optval_addr, 1);
1439 return -TARGET_ENOMEM;
1441 for (i = 0; i < fprog.len; i++) {
1442 filter[i].code = tswap16(tfilter[i].code);
1443 filter[i].jt = tfilter[i].jt;
1444 filter[i].jf = tfilter[i].jf;
1445 filter[i].k = tswap32(tfilter[i].k);
1447 fprog.filter = filter;
1449 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
1450 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
1451 free(filter);
1453 unlock_user_struct(tfilter, tfprog->filter, 1);
1454 unlock_user_struct(tfprog, optval_addr, 1);
1455 return ret;
1457 /* Options with 'int' argument. */
1458 case TARGET_SO_DEBUG:
1459 optname = SO_DEBUG;
1460 break;
1461 case TARGET_SO_REUSEADDR:
1462 optname = SO_REUSEADDR;
1463 break;
1464 case TARGET_SO_TYPE:
1465 optname = SO_TYPE;
1466 break;
1467 case TARGET_SO_ERROR:
1468 optname = SO_ERROR;
1469 break;
1470 case TARGET_SO_DONTROUTE:
1471 optname = SO_DONTROUTE;
1472 break;
1473 case TARGET_SO_BROADCAST:
1474 optname = SO_BROADCAST;
1475 break;
1476 case TARGET_SO_SNDBUF:
1477 optname = SO_SNDBUF;
1478 break;
1479 case TARGET_SO_RCVBUF:
1480 optname = SO_RCVBUF;
1481 break;
1482 case TARGET_SO_KEEPALIVE:
1483 optname = SO_KEEPALIVE;
1484 break;
1485 case TARGET_SO_OOBINLINE:
1486 optname = SO_OOBINLINE;
1487 break;
1488 case TARGET_SO_NO_CHECK:
1489 optname = SO_NO_CHECK;
1490 break;
1491 case TARGET_SO_PRIORITY:
1492 optname = SO_PRIORITY;
1493 break;
1494 #ifdef SO_BSDCOMPAT
1495 case TARGET_SO_BSDCOMPAT:
1496 optname = SO_BSDCOMPAT;
1497 break;
1498 #endif
1499 case TARGET_SO_PASSCRED:
1500 optname = SO_PASSCRED;
1501 break;
1502 case TARGET_SO_TIMESTAMP:
1503 optname = SO_TIMESTAMP;
1504 break;
1505 case TARGET_SO_RCVLOWAT:
1506 optname = SO_RCVLOWAT;
1507 break;
1508 break;
1509 default:
1510 goto unimplemented;
1512 if (optlen < sizeof(uint32_t))
1513 return -TARGET_EINVAL;
1515 if (get_user_u32(val, optval_addr))
1516 return -TARGET_EFAULT;
1517 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1518 break;
1519 default:
1520 unimplemented:
1521 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1522 ret = -TARGET_ENOPROTOOPT;
1524 return ret;
1527 /* do_getsockopt() Must return target values and target errnos. */
1528 static abi_long do_getsockopt(int sockfd, int level, int optname,
1529 abi_ulong optval_addr, abi_ulong optlen)
1531 abi_long ret;
1532 int len, val;
1533 socklen_t lv;
1535 switch(level) {
1536 case TARGET_SOL_SOCKET:
1537 level = SOL_SOCKET;
1538 switch (optname) {
1539 /* These don't just return a single integer */
1540 case TARGET_SO_LINGER:
1541 case TARGET_SO_RCVTIMEO:
1542 case TARGET_SO_SNDTIMEO:
1543 case TARGET_SO_PEERNAME:
1544 goto unimplemented;
1545 case TARGET_SO_PEERCRED: {
1546 struct ucred cr;
1547 socklen_t crlen;
1548 struct target_ucred *tcr;
1550 if (get_user_u32(len, optlen)) {
1551 return -TARGET_EFAULT;
1553 if (len < 0) {
1554 return -TARGET_EINVAL;
1557 crlen = sizeof(cr);
1558 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
1559 &cr, &crlen));
1560 if (ret < 0) {
1561 return ret;
1563 if (len > crlen) {
1564 len = crlen;
1566 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
1567 return -TARGET_EFAULT;
1569 __put_user(cr.pid, &tcr->pid);
1570 __put_user(cr.uid, &tcr->uid);
1571 __put_user(cr.gid, &tcr->gid);
1572 unlock_user_struct(tcr, optval_addr, 1);
1573 if (put_user_u32(len, optlen)) {
1574 return -TARGET_EFAULT;
1576 break;
1578 /* Options with 'int' argument. */
1579 case TARGET_SO_DEBUG:
1580 optname = SO_DEBUG;
1581 goto int_case;
1582 case TARGET_SO_REUSEADDR:
1583 optname = SO_REUSEADDR;
1584 goto int_case;
1585 case TARGET_SO_TYPE:
1586 optname = SO_TYPE;
1587 goto int_case;
1588 case TARGET_SO_ERROR:
1589 optname = SO_ERROR;
1590 goto int_case;
1591 case TARGET_SO_DONTROUTE:
1592 optname = SO_DONTROUTE;
1593 goto int_case;
1594 case TARGET_SO_BROADCAST:
1595 optname = SO_BROADCAST;
1596 goto int_case;
1597 case TARGET_SO_SNDBUF:
1598 optname = SO_SNDBUF;
1599 goto int_case;
1600 case TARGET_SO_RCVBUF:
1601 optname = SO_RCVBUF;
1602 goto int_case;
1603 case TARGET_SO_KEEPALIVE:
1604 optname = SO_KEEPALIVE;
1605 goto int_case;
1606 case TARGET_SO_OOBINLINE:
1607 optname = SO_OOBINLINE;
1608 goto int_case;
1609 case TARGET_SO_NO_CHECK:
1610 optname = SO_NO_CHECK;
1611 goto int_case;
1612 case TARGET_SO_PRIORITY:
1613 optname = SO_PRIORITY;
1614 goto int_case;
1615 #ifdef SO_BSDCOMPAT
1616 case TARGET_SO_BSDCOMPAT:
1617 optname = SO_BSDCOMPAT;
1618 goto int_case;
1619 #endif
1620 case TARGET_SO_PASSCRED:
1621 optname = SO_PASSCRED;
1622 goto int_case;
1623 case TARGET_SO_TIMESTAMP:
1624 optname = SO_TIMESTAMP;
1625 goto int_case;
1626 case TARGET_SO_RCVLOWAT:
1627 optname = SO_RCVLOWAT;
1628 goto int_case;
1629 default:
1630 goto int_case;
1632 break;
1633 case SOL_TCP:
1634 /* TCP options all take an 'int' value. */
1635 int_case:
1636 if (get_user_u32(len, optlen))
1637 return -TARGET_EFAULT;
1638 if (len < 0)
1639 return -TARGET_EINVAL;
1640 lv = sizeof(lv);
1641 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1642 if (ret < 0)
1643 return ret;
1644 if (len > lv)
1645 len = lv;
1646 if (len == 4) {
1647 if (put_user_u32(val, optval_addr))
1648 return -TARGET_EFAULT;
1649 } else {
1650 if (put_user_u8(val, optval_addr))
1651 return -TARGET_EFAULT;
1653 if (put_user_u32(len, optlen))
1654 return -TARGET_EFAULT;
1655 break;
1656 case SOL_IP:
1657 switch(optname) {
1658 case IP_TOS:
1659 case IP_TTL:
1660 case IP_HDRINCL:
1661 case IP_ROUTER_ALERT:
1662 case IP_RECVOPTS:
1663 case IP_RETOPTS:
1664 case IP_PKTINFO:
1665 case IP_MTU_DISCOVER:
1666 case IP_RECVERR:
1667 case IP_RECVTOS:
1668 #ifdef IP_FREEBIND
1669 case IP_FREEBIND:
1670 #endif
1671 case IP_MULTICAST_TTL:
1672 case IP_MULTICAST_LOOP:
1673 if (get_user_u32(len, optlen))
1674 return -TARGET_EFAULT;
1675 if (len < 0)
1676 return -TARGET_EINVAL;
1677 lv = sizeof(lv);
1678 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1679 if (ret < 0)
1680 return ret;
1681 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1682 len = 1;
1683 if (put_user_u32(len, optlen)
1684 || put_user_u8(val, optval_addr))
1685 return -TARGET_EFAULT;
1686 } else {
1687 if (len > sizeof(int))
1688 len = sizeof(int);
1689 if (put_user_u32(len, optlen)
1690 || put_user_u32(val, optval_addr))
1691 return -TARGET_EFAULT;
1693 break;
1694 default:
1695 ret = -TARGET_ENOPROTOOPT;
1696 break;
1698 break;
1699 default:
1700 unimplemented:
1701 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1702 level, optname);
1703 ret = -TARGET_EOPNOTSUPP;
1704 break;
1706 return ret;
1709 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
1710 int count, int copy)
1712 struct target_iovec *target_vec;
1713 struct iovec *vec;
1714 abi_ulong total_len, max_len;
1715 int i;
1716 int err = 0;
1718 if (count == 0) {
1719 errno = 0;
1720 return NULL;
1722 if (count < 0 || count > IOV_MAX) {
1723 errno = EINVAL;
1724 return NULL;
1727 vec = calloc(count, sizeof(struct iovec));
1728 if (vec == NULL) {
1729 errno = ENOMEM;
1730 return NULL;
1733 target_vec = lock_user(VERIFY_READ, target_addr,
1734 count * sizeof(struct target_iovec), 1);
1735 if (target_vec == NULL) {
1736 err = EFAULT;
1737 goto fail2;
1740 /* ??? If host page size > target page size, this will result in a
1741 value larger than what we can actually support. */
1742 max_len = 0x7fffffff & TARGET_PAGE_MASK;
1743 total_len = 0;
1745 for (i = 0; i < count; i++) {
1746 abi_ulong base = tswapal(target_vec[i].iov_base);
1747 abi_long len = tswapal(target_vec[i].iov_len);
1749 if (len < 0) {
1750 err = EINVAL;
1751 goto fail;
1752 } else if (len == 0) {
1753 /* Zero length pointer is ignored. */
1754 vec[i].iov_base = 0;
1755 } else {
1756 vec[i].iov_base = lock_user(type, base, len, copy);
1757 if (!vec[i].iov_base) {
1758 err = EFAULT;
1759 goto fail;
1761 if (len > max_len - total_len) {
1762 len = max_len - total_len;
1765 vec[i].iov_len = len;
1766 total_len += len;
1769 unlock_user(target_vec, target_addr, 0);
1770 return vec;
1772 fail:
1773 unlock_user(target_vec, target_addr, 0);
1774 fail2:
1775 free(vec);
1776 errno = err;
1777 return NULL;
1780 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1781 int count, int copy)
1783 struct target_iovec *target_vec;
1784 int i;
1786 target_vec = lock_user(VERIFY_READ, target_addr,
1787 count * sizeof(struct target_iovec), 1);
1788 if (target_vec) {
1789 for (i = 0; i < count; i++) {
1790 abi_ulong base = tswapal(target_vec[i].iov_base);
1791 abi_long len = tswapal(target_vec[i].iov_base);
1792 if (len < 0) {
1793 break;
1795 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1797 unlock_user(target_vec, target_addr, 0);
1800 free(vec);
1803 static inline int target_to_host_sock_type(int *type)
1805 int host_type = 0;
1806 int target_type = *type;
1808 switch (target_type & TARGET_SOCK_TYPE_MASK) {
1809 case TARGET_SOCK_DGRAM:
1810 host_type = SOCK_DGRAM;
1811 break;
1812 case TARGET_SOCK_STREAM:
1813 host_type = SOCK_STREAM;
1814 break;
1815 default:
1816 host_type = target_type & TARGET_SOCK_TYPE_MASK;
1817 break;
1819 if (target_type & TARGET_SOCK_CLOEXEC) {
1820 #if defined(SOCK_CLOEXEC)
1821 host_type |= SOCK_CLOEXEC;
1822 #else
1823 return -TARGET_EINVAL;
1824 #endif
1826 if (target_type & TARGET_SOCK_NONBLOCK) {
1827 #if defined(SOCK_NONBLOCK)
1828 host_type |= SOCK_NONBLOCK;
1829 #elif !defined(O_NONBLOCK)
1830 return -TARGET_EINVAL;
1831 #endif
1833 *type = host_type;
1834 return 0;
1837 /* Try to emulate socket type flags after socket creation. */
1838 static int sock_flags_fixup(int fd, int target_type)
1840 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
1841 if (target_type & TARGET_SOCK_NONBLOCK) {
1842 int flags = fcntl(fd, F_GETFL);
1843 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
1844 close(fd);
1845 return -TARGET_EINVAL;
1848 #endif
1849 return fd;
1852 /* do_socket() Must return target values and target errnos. */
1853 static abi_long do_socket(int domain, int type, int protocol)
1855 int target_type = type;
1856 int ret;
1858 ret = target_to_host_sock_type(&type);
1859 if (ret) {
1860 return ret;
1863 if (domain == PF_NETLINK)
1864 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1865 ret = get_errno(socket(domain, type, protocol));
1866 if (ret >= 0) {
1867 ret = sock_flags_fixup(ret, target_type);
1869 return ret;
1872 /* do_bind() Must return target values and target errnos. */
1873 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1874 socklen_t addrlen)
1876 void *addr;
1877 abi_long ret;
1879 if ((int)addrlen < 0) {
1880 return -TARGET_EINVAL;
1883 addr = alloca(addrlen+1);
1885 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1886 if (ret)
1887 return ret;
1889 return get_errno(bind(sockfd, addr, addrlen));
1892 /* do_connect() Must return target values and target errnos. */
1893 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1894 socklen_t addrlen)
1896 void *addr;
1897 abi_long ret;
1899 if ((int)addrlen < 0) {
1900 return -TARGET_EINVAL;
1903 addr = alloca(addrlen);
1905 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1906 if (ret)
1907 return ret;
1909 return get_errno(connect(sockfd, addr, addrlen));
1912 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
1913 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
1914 int flags, int send)
1916 abi_long ret, len;
1917 struct msghdr msg;
1918 int count;
1919 struct iovec *vec;
1920 abi_ulong target_vec;
1922 if (msgp->msg_name) {
1923 msg.msg_namelen = tswap32(msgp->msg_namelen);
1924 msg.msg_name = alloca(msg.msg_namelen);
1925 ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
1926 msg.msg_namelen);
1927 if (ret) {
1928 goto out2;
1930 } else {
1931 msg.msg_name = NULL;
1932 msg.msg_namelen = 0;
1934 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
1935 msg.msg_control = alloca(msg.msg_controllen);
1936 msg.msg_flags = tswap32(msgp->msg_flags);
1938 count = tswapal(msgp->msg_iovlen);
1939 target_vec = tswapal(msgp->msg_iov);
1940 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
1941 target_vec, count, send);
1942 if (vec == NULL) {
1943 ret = -host_to_target_errno(errno);
1944 goto out2;
1946 msg.msg_iovlen = count;
1947 msg.msg_iov = vec;
1949 if (send) {
1950 ret = target_to_host_cmsg(&msg, msgp);
1951 if (ret == 0)
1952 ret = get_errno(sendmsg(fd, &msg, flags));
1953 } else {
1954 ret = get_errno(recvmsg(fd, &msg, flags));
1955 if (!is_error(ret)) {
1956 len = ret;
1957 ret = host_to_target_cmsg(msgp, &msg);
1958 if (!is_error(ret)) {
1959 msgp->msg_namelen = tswap32(msg.msg_namelen);
1960 if (msg.msg_name != NULL) {
1961 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
1962 msg.msg_name, msg.msg_namelen);
1963 if (ret) {
1964 goto out;
1968 ret = len;
1973 out:
1974 unlock_iovec(vec, target_vec, count, !send);
1975 out2:
1976 return ret;
1979 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1980 int flags, int send)
1982 abi_long ret;
1983 struct target_msghdr *msgp;
1985 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1986 msgp,
1987 target_msg,
1988 send ? 1 : 0)) {
1989 return -TARGET_EFAULT;
1991 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
1992 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1993 return ret;
1996 #ifdef TARGET_NR_sendmmsg
1997 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
1998 * so it might not have this *mmsg-specific flag either.
2000 #ifndef MSG_WAITFORONE
2001 #define MSG_WAITFORONE 0x10000
2002 #endif
2004 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2005 unsigned int vlen, unsigned int flags,
2006 int send)
2008 struct target_mmsghdr *mmsgp;
2009 abi_long ret = 0;
2010 int i;
2012 if (vlen > UIO_MAXIOV) {
2013 vlen = UIO_MAXIOV;
2016 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2017 if (!mmsgp) {
2018 return -TARGET_EFAULT;
2021 for (i = 0; i < vlen; i++) {
2022 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2023 if (is_error(ret)) {
2024 break;
2026 mmsgp[i].msg_len = tswap32(ret);
2027 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2028 if (flags & MSG_WAITFORONE) {
2029 flags |= MSG_DONTWAIT;
2033 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2035 /* Return number of datagrams sent if we sent any at all;
2036 * otherwise return the error.
2038 if (i) {
2039 return i;
2041 return ret;
2043 #endif
2045 /* If we don't have a system accept4() then just call accept.
2046 * The callsites to do_accept4() will ensure that they don't
2047 * pass a non-zero flags argument in this config.
2049 #ifndef CONFIG_ACCEPT4
2050 static inline int accept4(int sockfd, struct sockaddr *addr,
2051 socklen_t *addrlen, int flags)
2053 assert(flags == 0);
2054 return accept(sockfd, addr, addrlen);
2056 #endif
2058 /* do_accept4() Must return target values and target errnos. */
2059 static abi_long do_accept4(int fd, abi_ulong target_addr,
2060 abi_ulong target_addrlen_addr, int flags)
2062 socklen_t addrlen;
2063 void *addr;
2064 abi_long ret;
2066 if (target_addr == 0) {
2067 return get_errno(accept4(fd, NULL, NULL, flags));
2070 /* linux returns EINVAL if addrlen pointer is invalid */
2071 if (get_user_u32(addrlen, target_addrlen_addr))
2072 return -TARGET_EINVAL;
2074 if ((int)addrlen < 0) {
2075 return -TARGET_EINVAL;
2078 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2079 return -TARGET_EINVAL;
2081 addr = alloca(addrlen);
2083 ret = get_errno(accept4(fd, addr, &addrlen, flags));
2084 if (!is_error(ret)) {
2085 host_to_target_sockaddr(target_addr, addr, addrlen);
2086 if (put_user_u32(addrlen, target_addrlen_addr))
2087 ret = -TARGET_EFAULT;
2089 return ret;
2092 /* do_getpeername() Must return target values and target errnos. */
2093 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2094 abi_ulong target_addrlen_addr)
2096 socklen_t addrlen;
2097 void *addr;
2098 abi_long ret;
2100 if (get_user_u32(addrlen, target_addrlen_addr))
2101 return -TARGET_EFAULT;
2103 if ((int)addrlen < 0) {
2104 return -TARGET_EINVAL;
2107 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2108 return -TARGET_EFAULT;
2110 addr = alloca(addrlen);
2112 ret = get_errno(getpeername(fd, addr, &addrlen));
2113 if (!is_error(ret)) {
2114 host_to_target_sockaddr(target_addr, addr, addrlen);
2115 if (put_user_u32(addrlen, target_addrlen_addr))
2116 ret = -TARGET_EFAULT;
2118 return ret;
2121 /* do_getsockname() Must return target values and target errnos. */
2122 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2123 abi_ulong target_addrlen_addr)
2125 socklen_t addrlen;
2126 void *addr;
2127 abi_long ret;
2129 if (get_user_u32(addrlen, target_addrlen_addr))
2130 return -TARGET_EFAULT;
2132 if ((int)addrlen < 0) {
2133 return -TARGET_EINVAL;
2136 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2137 return -TARGET_EFAULT;
2139 addr = alloca(addrlen);
2141 ret = get_errno(getsockname(fd, addr, &addrlen));
2142 if (!is_error(ret)) {
2143 host_to_target_sockaddr(target_addr, addr, addrlen);
2144 if (put_user_u32(addrlen, target_addrlen_addr))
2145 ret = -TARGET_EFAULT;
2147 return ret;
2150 /* do_socketpair() Must return target values and target errnos. */
2151 static abi_long do_socketpair(int domain, int type, int protocol,
2152 abi_ulong target_tab_addr)
2154 int tab[2];
2155 abi_long ret;
2157 target_to_host_sock_type(&type);
2159 ret = get_errno(socketpair(domain, type, protocol, tab));
2160 if (!is_error(ret)) {
2161 if (put_user_s32(tab[0], target_tab_addr)
2162 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2163 ret = -TARGET_EFAULT;
2165 return ret;
2168 /* do_sendto() Must return target values and target errnos. */
2169 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2170 abi_ulong target_addr, socklen_t addrlen)
2172 void *addr;
2173 void *host_msg;
2174 abi_long ret;
2176 if ((int)addrlen < 0) {
2177 return -TARGET_EINVAL;
2180 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2181 if (!host_msg)
2182 return -TARGET_EFAULT;
2183 if (target_addr) {
2184 addr = alloca(addrlen);
2185 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
2186 if (ret) {
2187 unlock_user(host_msg, msg, 0);
2188 return ret;
2190 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
2191 } else {
2192 ret = get_errno(send(fd, host_msg, len, flags));
2194 unlock_user(host_msg, msg, 0);
2195 return ret;
2198 /* do_recvfrom() Must return target values and target errnos. */
2199 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
2200 abi_ulong target_addr,
2201 abi_ulong target_addrlen)
2203 socklen_t addrlen;
2204 void *addr;
2205 void *host_msg;
2206 abi_long ret;
2208 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2209 if (!host_msg)
2210 return -TARGET_EFAULT;
2211 if (target_addr) {
2212 if (get_user_u32(addrlen, target_addrlen)) {
2213 ret = -TARGET_EFAULT;
2214 goto fail;
2216 if ((int)addrlen < 0) {
2217 ret = -TARGET_EINVAL;
2218 goto fail;
2220 addr = alloca(addrlen);
2221 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2222 } else {
2223 addr = NULL; /* To keep compiler quiet. */
2224 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2226 if (!is_error(ret)) {
2227 if (target_addr) {
2228 host_to_target_sockaddr(target_addr, addr, addrlen);
2229 if (put_user_u32(addrlen, target_addrlen)) {
2230 ret = -TARGET_EFAULT;
2231 goto fail;
2234 unlock_user(host_msg, msg, len);
2235 } else {
2236 fail:
2237 unlock_user(host_msg, msg, 0);
2239 return ret;
2242 #ifdef TARGET_NR_socketcall
2243 /* do_socketcall() Must return target values and target errnos. */
2244 static abi_long do_socketcall(int num, abi_ulong vptr)
2246 static const unsigned ac[] = { /* number of arguments per call */
2247 [SOCKOP_socket] = 3, /* domain, type, protocol */
2248 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
2249 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
2250 [SOCKOP_listen] = 2, /* sockfd, backlog */
2251 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
2252 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
2253 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
2254 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
2255 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
2256 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
2257 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
2258 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2259 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
2260 [SOCKOP_shutdown] = 2, /* sockfd, how */
2261 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
2262 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
2263 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2264 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
2266 abi_long a[6]; /* max 6 args */
2268 /* first, collect the arguments in a[] according to ac[] */
2269 if (num >= 0 && num < ARRAY_SIZE(ac)) {
2270 unsigned i;
2271 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
2272 for (i = 0; i < ac[num]; ++i) {
2273 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
2274 return -TARGET_EFAULT;
2279 /* now when we have the args, actually handle the call */
2280 switch (num) {
2281 case SOCKOP_socket: /* domain, type, protocol */
2282 return do_socket(a[0], a[1], a[2]);
2283 case SOCKOP_bind: /* sockfd, addr, addrlen */
2284 return do_bind(a[0], a[1], a[2]);
2285 case SOCKOP_connect: /* sockfd, addr, addrlen */
2286 return do_connect(a[0], a[1], a[2]);
2287 case SOCKOP_listen: /* sockfd, backlog */
2288 return get_errno(listen(a[0], a[1]));
2289 case SOCKOP_accept: /* sockfd, addr, addrlen */
2290 return do_accept4(a[0], a[1], a[2], 0);
2291 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
2292 return do_accept4(a[0], a[1], a[2], a[3]);
2293 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
2294 return do_getsockname(a[0], a[1], a[2]);
2295 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
2296 return do_getpeername(a[0], a[1], a[2]);
2297 case SOCKOP_socketpair: /* domain, type, protocol, tab */
2298 return do_socketpair(a[0], a[1], a[2], a[3]);
2299 case SOCKOP_send: /* sockfd, msg, len, flags */
2300 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
2301 case SOCKOP_recv: /* sockfd, msg, len, flags */
2302 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
2303 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
2304 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
2305 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
2306 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
2307 case SOCKOP_shutdown: /* sockfd, how */
2308 return get_errno(shutdown(a[0], a[1]));
2309 case SOCKOP_sendmsg: /* sockfd, msg, flags */
2310 return do_sendrecvmsg(a[0], a[1], a[2], 1);
2311 case SOCKOP_recvmsg: /* sockfd, msg, flags */
2312 return do_sendrecvmsg(a[0], a[1], a[2], 0);
2313 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
2314 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
2315 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
2316 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
2317 default:
2318 gemu_log("Unsupported socketcall: %d\n", num);
2319 return -TARGET_ENOSYS;
2322 #endif
2324 #define N_SHM_REGIONS 32
2326 static struct shm_region {
2327 abi_ulong start;
2328 abi_ulong size;
2329 } shm_regions[N_SHM_REGIONS];
2331 struct target_semid_ds
2333 struct target_ipc_perm sem_perm;
2334 abi_ulong sem_otime;
2335 abi_ulong __unused1;
2336 abi_ulong sem_ctime;
2337 abi_ulong __unused2;
2338 abi_ulong sem_nsems;
2339 abi_ulong __unused3;
2340 abi_ulong __unused4;
2343 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2344 abi_ulong target_addr)
2346 struct target_ipc_perm *target_ip;
2347 struct target_semid_ds *target_sd;
2349 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2350 return -TARGET_EFAULT;
2351 target_ip = &(target_sd->sem_perm);
2352 host_ip->__key = tswap32(target_ip->__key);
2353 host_ip->uid = tswap32(target_ip->uid);
2354 host_ip->gid = tswap32(target_ip->gid);
2355 host_ip->cuid = tswap32(target_ip->cuid);
2356 host_ip->cgid = tswap32(target_ip->cgid);
2357 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2358 host_ip->mode = tswap32(target_ip->mode);
2359 #else
2360 host_ip->mode = tswap16(target_ip->mode);
2361 #endif
2362 #if defined(TARGET_PPC)
2363 host_ip->__seq = tswap32(target_ip->__seq);
2364 #else
2365 host_ip->__seq = tswap16(target_ip->__seq);
2366 #endif
2367 unlock_user_struct(target_sd, target_addr, 0);
2368 return 0;
2371 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2372 struct ipc_perm *host_ip)
2374 struct target_ipc_perm *target_ip;
2375 struct target_semid_ds *target_sd;
2377 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2378 return -TARGET_EFAULT;
2379 target_ip = &(target_sd->sem_perm);
2380 target_ip->__key = tswap32(host_ip->__key);
2381 target_ip->uid = tswap32(host_ip->uid);
2382 target_ip->gid = tswap32(host_ip->gid);
2383 target_ip->cuid = tswap32(host_ip->cuid);
2384 target_ip->cgid = tswap32(host_ip->cgid);
2385 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
2386 target_ip->mode = tswap32(host_ip->mode);
2387 #else
2388 target_ip->mode = tswap16(host_ip->mode);
2389 #endif
2390 #if defined(TARGET_PPC)
2391 target_ip->__seq = tswap32(host_ip->__seq);
2392 #else
2393 target_ip->__seq = tswap16(host_ip->__seq);
2394 #endif
2395 unlock_user_struct(target_sd, target_addr, 1);
2396 return 0;
2399 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2400 abi_ulong target_addr)
2402 struct target_semid_ds *target_sd;
2404 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2405 return -TARGET_EFAULT;
2406 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2407 return -TARGET_EFAULT;
2408 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
2409 host_sd->sem_otime = tswapal(target_sd->sem_otime);
2410 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
2411 unlock_user_struct(target_sd, target_addr, 0);
2412 return 0;
2415 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2416 struct semid_ds *host_sd)
2418 struct target_semid_ds *target_sd;
2420 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2421 return -TARGET_EFAULT;
2422 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2423 return -TARGET_EFAULT;
2424 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
2425 target_sd->sem_otime = tswapal(host_sd->sem_otime);
2426 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
2427 unlock_user_struct(target_sd, target_addr, 1);
2428 return 0;
2431 struct target_seminfo {
2432 int semmap;
2433 int semmni;
2434 int semmns;
2435 int semmnu;
2436 int semmsl;
2437 int semopm;
2438 int semume;
2439 int semusz;
2440 int semvmx;
2441 int semaem;
2444 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2445 struct seminfo *host_seminfo)
2447 struct target_seminfo *target_seminfo;
2448 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2449 return -TARGET_EFAULT;
2450 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2451 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2452 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2453 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2454 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2455 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2456 __put_user(host_seminfo->semume, &target_seminfo->semume);
2457 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2458 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2459 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2460 unlock_user_struct(target_seminfo, target_addr, 1);
2461 return 0;
2464 union semun {
2465 int val;
2466 struct semid_ds *buf;
2467 unsigned short *array;
2468 struct seminfo *__buf;
2471 union target_semun {
2472 int val;
2473 abi_ulong buf;
2474 abi_ulong array;
2475 abi_ulong __buf;
2478 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2479 abi_ulong target_addr)
2481 int nsems;
2482 unsigned short *array;
2483 union semun semun;
2484 struct semid_ds semid_ds;
2485 int i, ret;
2487 semun.buf = &semid_ds;
2489 ret = semctl(semid, 0, IPC_STAT, semun);
2490 if (ret == -1)
2491 return get_errno(ret);
2493 nsems = semid_ds.sem_nsems;
2495 *host_array = malloc(nsems*sizeof(unsigned short));
2496 if (!*host_array) {
2497 return -TARGET_ENOMEM;
2499 array = lock_user(VERIFY_READ, target_addr,
2500 nsems*sizeof(unsigned short), 1);
2501 if (!array) {
2502 free(*host_array);
2503 return -TARGET_EFAULT;
2506 for(i=0; i<nsems; i++) {
2507 __get_user((*host_array)[i], &array[i]);
2509 unlock_user(array, target_addr, 0);
2511 return 0;
2514 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2515 unsigned short **host_array)
2517 int nsems;
2518 unsigned short *array;
2519 union semun semun;
2520 struct semid_ds semid_ds;
2521 int i, ret;
2523 semun.buf = &semid_ds;
2525 ret = semctl(semid, 0, IPC_STAT, semun);
2526 if (ret == -1)
2527 return get_errno(ret);
2529 nsems = semid_ds.sem_nsems;
2531 array = lock_user(VERIFY_WRITE, target_addr,
2532 nsems*sizeof(unsigned short), 0);
2533 if (!array)
2534 return -TARGET_EFAULT;
2536 for(i=0; i<nsems; i++) {
2537 __put_user((*host_array)[i], &array[i]);
2539 free(*host_array);
2540 unlock_user(array, target_addr, 1);
2542 return 0;
2545 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2546 union target_semun target_su)
2548 union semun arg;
2549 struct semid_ds dsarg;
2550 unsigned short *array = NULL;
2551 struct seminfo seminfo;
2552 abi_long ret = -TARGET_EINVAL;
2553 abi_long err;
2554 cmd &= 0xff;
2556 switch( cmd ) {
2557 case GETVAL:
2558 case SETVAL:
2559 arg.val = tswap32(target_su.val);
2560 ret = get_errno(semctl(semid, semnum, cmd, arg));
2561 target_su.val = tswap32(arg.val);
2562 break;
2563 case GETALL:
2564 case SETALL:
2565 err = target_to_host_semarray(semid, &array, target_su.array);
2566 if (err)
2567 return err;
2568 arg.array = array;
2569 ret = get_errno(semctl(semid, semnum, cmd, arg));
2570 err = host_to_target_semarray(semid, target_su.array, &array);
2571 if (err)
2572 return err;
2573 break;
2574 case IPC_STAT:
2575 case IPC_SET:
2576 case SEM_STAT:
2577 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2578 if (err)
2579 return err;
2580 arg.buf = &dsarg;
2581 ret = get_errno(semctl(semid, semnum, cmd, arg));
2582 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2583 if (err)
2584 return err;
2585 break;
2586 case IPC_INFO:
2587 case SEM_INFO:
2588 arg.__buf = &seminfo;
2589 ret = get_errno(semctl(semid, semnum, cmd, arg));
2590 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2591 if (err)
2592 return err;
2593 break;
2594 case IPC_RMID:
2595 case GETPID:
2596 case GETNCNT:
2597 case GETZCNT:
2598 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2599 break;
2602 return ret;
2605 struct target_sembuf {
2606 unsigned short sem_num;
2607 short sem_op;
2608 short sem_flg;
2611 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2612 abi_ulong target_addr,
2613 unsigned nsops)
2615 struct target_sembuf *target_sembuf;
2616 int i;
2618 target_sembuf = lock_user(VERIFY_READ, target_addr,
2619 nsops*sizeof(struct target_sembuf), 1);
2620 if (!target_sembuf)
2621 return -TARGET_EFAULT;
2623 for(i=0; i<nsops; i++) {
2624 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2625 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2626 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2629 unlock_user(target_sembuf, target_addr, 0);
2631 return 0;
2634 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2636 struct sembuf sops[nsops];
2638 if (target_to_host_sembuf(sops, ptr, nsops))
2639 return -TARGET_EFAULT;
2641 return get_errno(semop(semid, sops, nsops));
2644 struct target_msqid_ds
2646 struct target_ipc_perm msg_perm;
2647 abi_ulong msg_stime;
2648 #if TARGET_ABI_BITS == 32
2649 abi_ulong __unused1;
2650 #endif
2651 abi_ulong msg_rtime;
2652 #if TARGET_ABI_BITS == 32
2653 abi_ulong __unused2;
2654 #endif
2655 abi_ulong msg_ctime;
2656 #if TARGET_ABI_BITS == 32
2657 abi_ulong __unused3;
2658 #endif
2659 abi_ulong __msg_cbytes;
2660 abi_ulong msg_qnum;
2661 abi_ulong msg_qbytes;
2662 abi_ulong msg_lspid;
2663 abi_ulong msg_lrpid;
2664 abi_ulong __unused4;
2665 abi_ulong __unused5;
2668 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2669 abi_ulong target_addr)
2671 struct target_msqid_ds *target_md;
2673 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2674 return -TARGET_EFAULT;
2675 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2676 return -TARGET_EFAULT;
2677 host_md->msg_stime = tswapal(target_md->msg_stime);
2678 host_md->msg_rtime = tswapal(target_md->msg_rtime);
2679 host_md->msg_ctime = tswapal(target_md->msg_ctime);
2680 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
2681 host_md->msg_qnum = tswapal(target_md->msg_qnum);
2682 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
2683 host_md->msg_lspid = tswapal(target_md->msg_lspid);
2684 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
2685 unlock_user_struct(target_md, target_addr, 0);
2686 return 0;
2689 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2690 struct msqid_ds *host_md)
2692 struct target_msqid_ds *target_md;
2694 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2695 return -TARGET_EFAULT;
2696 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2697 return -TARGET_EFAULT;
2698 target_md->msg_stime = tswapal(host_md->msg_stime);
2699 target_md->msg_rtime = tswapal(host_md->msg_rtime);
2700 target_md->msg_ctime = tswapal(host_md->msg_ctime);
2701 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
2702 target_md->msg_qnum = tswapal(host_md->msg_qnum);
2703 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
2704 target_md->msg_lspid = tswapal(host_md->msg_lspid);
2705 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
2706 unlock_user_struct(target_md, target_addr, 1);
2707 return 0;
2710 struct target_msginfo {
2711 int msgpool;
2712 int msgmap;
2713 int msgmax;
2714 int msgmnb;
2715 int msgmni;
2716 int msgssz;
2717 int msgtql;
2718 unsigned short int msgseg;
2721 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2722 struct msginfo *host_msginfo)
2724 struct target_msginfo *target_msginfo;
2725 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2726 return -TARGET_EFAULT;
2727 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2728 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2729 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2730 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2731 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2732 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2733 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2734 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2735 unlock_user_struct(target_msginfo, target_addr, 1);
2736 return 0;
2739 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2741 struct msqid_ds dsarg;
2742 struct msginfo msginfo;
2743 abi_long ret = -TARGET_EINVAL;
2745 cmd &= 0xff;
2747 switch (cmd) {
2748 case IPC_STAT:
2749 case IPC_SET:
2750 case MSG_STAT:
2751 if (target_to_host_msqid_ds(&dsarg,ptr))
2752 return -TARGET_EFAULT;
2753 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2754 if (host_to_target_msqid_ds(ptr,&dsarg))
2755 return -TARGET_EFAULT;
2756 break;
2757 case IPC_RMID:
2758 ret = get_errno(msgctl(msgid, cmd, NULL));
2759 break;
2760 case IPC_INFO:
2761 case MSG_INFO:
2762 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2763 if (host_to_target_msginfo(ptr, &msginfo))
2764 return -TARGET_EFAULT;
2765 break;
2768 return ret;
2771 struct target_msgbuf {
2772 abi_long mtype;
2773 char mtext[1];
2776 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2777 unsigned int msgsz, int msgflg)
2779 struct target_msgbuf *target_mb;
2780 struct msgbuf *host_mb;
2781 abi_long ret = 0;
2783 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2784 return -TARGET_EFAULT;
2785 host_mb = malloc(msgsz+sizeof(long));
2786 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
2787 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2788 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2789 free(host_mb);
2790 unlock_user_struct(target_mb, msgp, 0);
2792 return ret;
2795 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2796 unsigned int msgsz, abi_long msgtyp,
2797 int msgflg)
2799 struct target_msgbuf *target_mb;
2800 char *target_mtext;
2801 struct msgbuf *host_mb;
2802 abi_long ret = 0;
2804 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2805 return -TARGET_EFAULT;
2807 host_mb = g_malloc(msgsz+sizeof(long));
2808 ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
2810 if (ret > 0) {
2811 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2812 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2813 if (!target_mtext) {
2814 ret = -TARGET_EFAULT;
2815 goto end;
2817 memcpy(target_mb->mtext, host_mb->mtext, ret);
2818 unlock_user(target_mtext, target_mtext_addr, ret);
2821 target_mb->mtype = tswapal(host_mb->mtype);
2823 end:
2824 if (target_mb)
2825 unlock_user_struct(target_mb, msgp, 1);
2826 g_free(host_mb);
2827 return ret;
2830 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2831 abi_ulong target_addr)
2833 struct target_shmid_ds *target_sd;
2835 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2836 return -TARGET_EFAULT;
2837 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2838 return -TARGET_EFAULT;
2839 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2840 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2841 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2842 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2843 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2844 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2845 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2846 unlock_user_struct(target_sd, target_addr, 0);
2847 return 0;
2850 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2851 struct shmid_ds *host_sd)
2853 struct target_shmid_ds *target_sd;
2855 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2856 return -TARGET_EFAULT;
2857 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2858 return -TARGET_EFAULT;
2859 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2860 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2861 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2862 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2863 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2864 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2865 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2866 unlock_user_struct(target_sd, target_addr, 1);
2867 return 0;
2870 struct target_shminfo {
2871 abi_ulong shmmax;
2872 abi_ulong shmmin;
2873 abi_ulong shmmni;
2874 abi_ulong shmseg;
2875 abi_ulong shmall;
2878 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2879 struct shminfo *host_shminfo)
2881 struct target_shminfo *target_shminfo;
2882 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2883 return -TARGET_EFAULT;
2884 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2885 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2886 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2887 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2888 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2889 unlock_user_struct(target_shminfo, target_addr, 1);
2890 return 0;
2893 struct target_shm_info {
2894 int used_ids;
2895 abi_ulong shm_tot;
2896 abi_ulong shm_rss;
2897 abi_ulong shm_swp;
2898 abi_ulong swap_attempts;
2899 abi_ulong swap_successes;
2902 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2903 struct shm_info *host_shm_info)
2905 struct target_shm_info *target_shm_info;
2906 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2907 return -TARGET_EFAULT;
2908 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2909 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2910 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2911 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2912 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2913 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2914 unlock_user_struct(target_shm_info, target_addr, 1);
2915 return 0;
2918 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2920 struct shmid_ds dsarg;
2921 struct shminfo shminfo;
2922 struct shm_info shm_info;
2923 abi_long ret = -TARGET_EINVAL;
2925 cmd &= 0xff;
2927 switch(cmd) {
2928 case IPC_STAT:
2929 case IPC_SET:
2930 case SHM_STAT:
2931 if (target_to_host_shmid_ds(&dsarg, buf))
2932 return -TARGET_EFAULT;
2933 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2934 if (host_to_target_shmid_ds(buf, &dsarg))
2935 return -TARGET_EFAULT;
2936 break;
2937 case IPC_INFO:
2938 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2939 if (host_to_target_shminfo(buf, &shminfo))
2940 return -TARGET_EFAULT;
2941 break;
2942 case SHM_INFO:
2943 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2944 if (host_to_target_shm_info(buf, &shm_info))
2945 return -TARGET_EFAULT;
2946 break;
2947 case IPC_RMID:
2948 case SHM_LOCK:
2949 case SHM_UNLOCK:
2950 ret = get_errno(shmctl(shmid, cmd, NULL));
2951 break;
2954 return ret;
2957 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2959 abi_long raddr;
2960 void *host_raddr;
2961 struct shmid_ds shm_info;
2962 int i,ret;
2964 /* find out the length of the shared memory segment */
2965 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2966 if (is_error(ret)) {
2967 /* can't get length, bail out */
2968 return ret;
2971 mmap_lock();
2973 if (shmaddr)
2974 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2975 else {
2976 abi_ulong mmap_start;
2978 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2980 if (mmap_start == -1) {
2981 errno = ENOMEM;
2982 host_raddr = (void *)-1;
2983 } else
2984 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2987 if (host_raddr == (void *)-1) {
2988 mmap_unlock();
2989 return get_errno((long)host_raddr);
2991 raddr=h2g((unsigned long)host_raddr);
2993 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2994 PAGE_VALID | PAGE_READ |
2995 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2997 for (i = 0; i < N_SHM_REGIONS; i++) {
2998 if (shm_regions[i].start == 0) {
2999 shm_regions[i].start = raddr;
3000 shm_regions[i].size = shm_info.shm_segsz;
3001 break;
3005 mmap_unlock();
3006 return raddr;
3010 static inline abi_long do_shmdt(abi_ulong shmaddr)
3012 int i;
3014 for (i = 0; i < N_SHM_REGIONS; ++i) {
3015 if (shm_regions[i].start == shmaddr) {
3016 shm_regions[i].start = 0;
3017 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3018 break;
3022 return get_errno(shmdt(g2h(shmaddr)));
3025 #ifdef TARGET_NR_ipc
3026 /* ??? This only works with linear mappings. */
3027 /* do_ipc() must return target values and target errnos. */
3028 static abi_long do_ipc(unsigned int call, int first,
3029 int second, int third,
3030 abi_long ptr, abi_long fifth)
3032 int version;
3033 abi_long ret = 0;
3035 version = call >> 16;
3036 call &= 0xffff;
3038 switch (call) {
3039 case IPCOP_semop:
3040 ret = do_semop(first, ptr, second);
3041 break;
3043 case IPCOP_semget:
3044 ret = get_errno(semget(first, second, third));
3045 break;
3047 case IPCOP_semctl:
3048 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3049 break;
3051 case IPCOP_msgget:
3052 ret = get_errno(msgget(first, second));
3053 break;
3055 case IPCOP_msgsnd:
3056 ret = do_msgsnd(first, ptr, second, third);
3057 break;
3059 case IPCOP_msgctl:
3060 ret = do_msgctl(first, second, ptr);
3061 break;
3063 case IPCOP_msgrcv:
3064 switch (version) {
3065 case 0:
3067 struct target_ipc_kludge {
3068 abi_long msgp;
3069 abi_long msgtyp;
3070 } *tmp;
3072 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3073 ret = -TARGET_EFAULT;
3074 break;
3077 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3079 unlock_user_struct(tmp, ptr, 0);
3080 break;
3082 default:
3083 ret = do_msgrcv(first, ptr, second, fifth, third);
3085 break;
3087 case IPCOP_shmat:
3088 switch (version) {
3089 default:
3091 abi_ulong raddr;
3092 raddr = do_shmat(first, ptr, second);
3093 if (is_error(raddr))
3094 return get_errno(raddr);
3095 if (put_user_ual(raddr, third))
3096 return -TARGET_EFAULT;
3097 break;
3099 case 1:
3100 ret = -TARGET_EINVAL;
3101 break;
3103 break;
3104 case IPCOP_shmdt:
3105 ret = do_shmdt(ptr);
3106 break;
3108 case IPCOP_shmget:
3109 /* IPC_* flag values are the same on all linux platforms */
3110 ret = get_errno(shmget(first, second, third));
3111 break;
3113 /* IPC_* and SHM_* command values are the same on all linux platforms */
3114 case IPCOP_shmctl:
3115 ret = do_shmctl(first, second, ptr);
3116 break;
3117 default:
3118 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3119 ret = -TARGET_ENOSYS;
3120 break;
3122 return ret;
3124 #endif
3126 /* kernel structure types definitions */
3128 #define STRUCT(name, ...) STRUCT_ ## name,
3129 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3130 enum {
3131 #include "syscall_types.h"
3133 #undef STRUCT
3134 #undef STRUCT_SPECIAL
3136 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3137 #define STRUCT_SPECIAL(name)
3138 #include "syscall_types.h"
3139 #undef STRUCT
3140 #undef STRUCT_SPECIAL
3142 typedef struct IOCTLEntry IOCTLEntry;
3144 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3145 int fd, abi_long cmd, abi_long arg);
3147 struct IOCTLEntry {
3148 unsigned int target_cmd;
3149 unsigned int host_cmd;
3150 const char *name;
3151 int access;
3152 do_ioctl_fn *do_ioctl;
3153 const argtype arg_type[5];
3156 #define IOC_R 0x0001
3157 #define IOC_W 0x0002
3158 #define IOC_RW (IOC_R | IOC_W)
3160 #define MAX_STRUCT_SIZE 4096
3162 #ifdef CONFIG_FIEMAP
3163 /* So fiemap access checks don't overflow on 32 bit systems.
3164 * This is very slightly smaller than the limit imposed by
3165 * the underlying kernel.
3167 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3168 / sizeof(struct fiemap_extent))
3170 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3171 int fd, abi_long cmd, abi_long arg)
3173 /* The parameter for this ioctl is a struct fiemap followed
3174 * by an array of struct fiemap_extent whose size is set
3175 * in fiemap->fm_extent_count. The array is filled in by the
3176 * ioctl.
3178 int target_size_in, target_size_out;
3179 struct fiemap *fm;
3180 const argtype *arg_type = ie->arg_type;
3181 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3182 void *argptr, *p;
3183 abi_long ret;
3184 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3185 uint32_t outbufsz;
3186 int free_fm = 0;
3188 assert(arg_type[0] == TYPE_PTR);
3189 assert(ie->access == IOC_RW);
3190 arg_type++;
3191 target_size_in = thunk_type_size(arg_type, 0);
3192 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3193 if (!argptr) {
3194 return -TARGET_EFAULT;
3196 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3197 unlock_user(argptr, arg, 0);
3198 fm = (struct fiemap *)buf_temp;
3199 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3200 return -TARGET_EINVAL;
3203 outbufsz = sizeof (*fm) +
3204 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3206 if (outbufsz > MAX_STRUCT_SIZE) {
3207 /* We can't fit all the extents into the fixed size buffer.
3208 * Allocate one that is large enough and use it instead.
3210 fm = malloc(outbufsz);
3211 if (!fm) {
3212 return -TARGET_ENOMEM;
3214 memcpy(fm, buf_temp, sizeof(struct fiemap));
3215 free_fm = 1;
3217 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3218 if (!is_error(ret)) {
3219 target_size_out = target_size_in;
3220 /* An extent_count of 0 means we were only counting the extents
3221 * so there are no structs to copy
3223 if (fm->fm_extent_count != 0) {
3224 target_size_out += fm->fm_mapped_extents * extent_size;
3226 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3227 if (!argptr) {
3228 ret = -TARGET_EFAULT;
3229 } else {
3230 /* Convert the struct fiemap */
3231 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3232 if (fm->fm_extent_count != 0) {
3233 p = argptr + target_size_in;
3234 /* ...and then all the struct fiemap_extents */
3235 for (i = 0; i < fm->fm_mapped_extents; i++) {
3236 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3237 THUNK_TARGET);
3238 p += extent_size;
3241 unlock_user(argptr, arg, target_size_out);
3244 if (free_fm) {
3245 free(fm);
3247 return ret;
3249 #endif
3251 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3252 int fd, abi_long cmd, abi_long arg)
3254 const argtype *arg_type = ie->arg_type;
3255 int target_size;
3256 void *argptr;
3257 int ret;
3258 struct ifconf *host_ifconf;
3259 uint32_t outbufsz;
3260 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3261 int target_ifreq_size;
3262 int nb_ifreq;
3263 int free_buf = 0;
3264 int i;
3265 int target_ifc_len;
3266 abi_long target_ifc_buf;
3267 int host_ifc_len;
3268 char *host_ifc_buf;
3270 assert(arg_type[0] == TYPE_PTR);
3271 assert(ie->access == IOC_RW);
3273 arg_type++;
3274 target_size = thunk_type_size(arg_type, 0);
3276 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3277 if (!argptr)
3278 return -TARGET_EFAULT;
3279 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3280 unlock_user(argptr, arg, 0);
3282 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3283 target_ifc_len = host_ifconf->ifc_len;
3284 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3286 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3287 nb_ifreq = target_ifc_len / target_ifreq_size;
3288 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3290 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3291 if (outbufsz > MAX_STRUCT_SIZE) {
3292 /* We can't fit all the extents into the fixed size buffer.
3293 * Allocate one that is large enough and use it instead.
3295 host_ifconf = malloc(outbufsz);
3296 if (!host_ifconf) {
3297 return -TARGET_ENOMEM;
3299 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3300 free_buf = 1;
3302 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3304 host_ifconf->ifc_len = host_ifc_len;
3305 host_ifconf->ifc_buf = host_ifc_buf;
3307 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3308 if (!is_error(ret)) {
3309 /* convert host ifc_len to target ifc_len */
3311 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3312 target_ifc_len = nb_ifreq * target_ifreq_size;
3313 host_ifconf->ifc_len = target_ifc_len;
3315 /* restore target ifc_buf */
3317 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3319 /* copy struct ifconf to target user */
3321 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3322 if (!argptr)
3323 return -TARGET_EFAULT;
3324 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3325 unlock_user(argptr, arg, target_size);
3327 /* copy ifreq[] to target user */
3329 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3330 for (i = 0; i < nb_ifreq ; i++) {
3331 thunk_convert(argptr + i * target_ifreq_size,
3332 host_ifc_buf + i * sizeof(struct ifreq),
3333 ifreq_arg_type, THUNK_TARGET);
3335 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3338 if (free_buf) {
3339 free(host_ifconf);
3342 return ret;
3345 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
3346 abi_long cmd, abi_long arg)
3348 void *argptr;
3349 struct dm_ioctl *host_dm;
3350 abi_long guest_data;
3351 uint32_t guest_data_size;
3352 int target_size;
3353 const argtype *arg_type = ie->arg_type;
3354 abi_long ret;
3355 void *big_buf = NULL;
3356 char *host_data;
3358 arg_type++;
3359 target_size = thunk_type_size(arg_type, 0);
3360 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3361 if (!argptr) {
3362 ret = -TARGET_EFAULT;
3363 goto out;
3365 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3366 unlock_user(argptr, arg, 0);
3368 /* buf_temp is too small, so fetch things into a bigger buffer */
3369 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
3370 memcpy(big_buf, buf_temp, target_size);
3371 buf_temp = big_buf;
3372 host_dm = big_buf;
3374 guest_data = arg + host_dm->data_start;
3375 if ((guest_data - arg) < 0) {
3376 ret = -EINVAL;
3377 goto out;
3379 guest_data_size = host_dm->data_size - host_dm->data_start;
3380 host_data = (char*)host_dm + host_dm->data_start;
3382 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
3383 switch (ie->host_cmd) {
3384 case DM_REMOVE_ALL:
3385 case DM_LIST_DEVICES:
3386 case DM_DEV_CREATE:
3387 case DM_DEV_REMOVE:
3388 case DM_DEV_SUSPEND:
3389 case DM_DEV_STATUS:
3390 case DM_DEV_WAIT:
3391 case DM_TABLE_STATUS:
3392 case DM_TABLE_CLEAR:
3393 case DM_TABLE_DEPS:
3394 case DM_LIST_VERSIONS:
3395 /* no input data */
3396 break;
3397 case DM_DEV_RENAME:
3398 case DM_DEV_SET_GEOMETRY:
3399 /* data contains only strings */
3400 memcpy(host_data, argptr, guest_data_size);
3401 break;
3402 case DM_TARGET_MSG:
3403 memcpy(host_data, argptr, guest_data_size);
3404 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
3405 break;
3406 case DM_TABLE_LOAD:
3408 void *gspec = argptr;
3409 void *cur_data = host_data;
3410 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3411 int spec_size = thunk_type_size(arg_type, 0);
3412 int i;
3414 for (i = 0; i < host_dm->target_count; i++) {
3415 struct dm_target_spec *spec = cur_data;
3416 uint32_t next;
3417 int slen;
3419 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
3420 slen = strlen((char*)gspec + spec_size) + 1;
3421 next = spec->next;
3422 spec->next = sizeof(*spec) + slen;
3423 strcpy((char*)&spec[1], gspec + spec_size);
3424 gspec += next;
3425 cur_data += spec->next;
3427 break;
3429 default:
3430 ret = -TARGET_EINVAL;
3431 goto out;
3433 unlock_user(argptr, guest_data, 0);
3435 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3436 if (!is_error(ret)) {
3437 guest_data = arg + host_dm->data_start;
3438 guest_data_size = host_dm->data_size - host_dm->data_start;
3439 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
3440 switch (ie->host_cmd) {
3441 case DM_REMOVE_ALL:
3442 case DM_DEV_CREATE:
3443 case DM_DEV_REMOVE:
3444 case DM_DEV_RENAME:
3445 case DM_DEV_SUSPEND:
3446 case DM_DEV_STATUS:
3447 case DM_TABLE_LOAD:
3448 case DM_TABLE_CLEAR:
3449 case DM_TARGET_MSG:
3450 case DM_DEV_SET_GEOMETRY:
3451 /* no return data */
3452 break;
3453 case DM_LIST_DEVICES:
3455 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
3456 uint32_t remaining_data = guest_data_size;
3457 void *cur_data = argptr;
3458 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
3459 int nl_size = 12; /* can't use thunk_size due to alignment */
3461 while (1) {
3462 uint32_t next = nl->next;
3463 if (next) {
3464 nl->next = nl_size + (strlen(nl->name) + 1);
3466 if (remaining_data < nl->next) {
3467 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3468 break;
3470 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
3471 strcpy(cur_data + nl_size, nl->name);
3472 cur_data += nl->next;
3473 remaining_data -= nl->next;
3474 if (!next) {
3475 break;
3477 nl = (void*)nl + next;
3479 break;
3481 case DM_DEV_WAIT:
3482 case DM_TABLE_STATUS:
3484 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
3485 void *cur_data = argptr;
3486 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
3487 int spec_size = thunk_type_size(arg_type, 0);
3488 int i;
3490 for (i = 0; i < host_dm->target_count; i++) {
3491 uint32_t next = spec->next;
3492 int slen = strlen((char*)&spec[1]) + 1;
3493 spec->next = (cur_data - argptr) + spec_size + slen;
3494 if (guest_data_size < spec->next) {
3495 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3496 break;
3498 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
3499 strcpy(cur_data + spec_size, (char*)&spec[1]);
3500 cur_data = argptr + spec->next;
3501 spec = (void*)host_dm + host_dm->data_start + next;
3503 break;
3505 case DM_TABLE_DEPS:
3507 void *hdata = (void*)host_dm + host_dm->data_start;
3508 int count = *(uint32_t*)hdata;
3509 uint64_t *hdev = hdata + 8;
3510 uint64_t *gdev = argptr + 8;
3511 int i;
3513 *(uint32_t*)argptr = tswap32(count);
3514 for (i = 0; i < count; i++) {
3515 *gdev = tswap64(*hdev);
3516 gdev++;
3517 hdev++;
3519 break;
3521 case DM_LIST_VERSIONS:
3523 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
3524 uint32_t remaining_data = guest_data_size;
3525 void *cur_data = argptr;
3526 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
3527 int vers_size = thunk_type_size(arg_type, 0);
3529 while (1) {
3530 uint32_t next = vers->next;
3531 if (next) {
3532 vers->next = vers_size + (strlen(vers->name) + 1);
3534 if (remaining_data < vers->next) {
3535 host_dm->flags |= DM_BUFFER_FULL_FLAG;
3536 break;
3538 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
3539 strcpy(cur_data + vers_size, vers->name);
3540 cur_data += vers->next;
3541 remaining_data -= vers->next;
3542 if (!next) {
3543 break;
3545 vers = (void*)vers + next;
3547 break;
3549 default:
3550 ret = -TARGET_EINVAL;
3551 goto out;
3553 unlock_user(argptr, guest_data, guest_data_size);
3555 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3556 if (!argptr) {
3557 ret = -TARGET_EFAULT;
3558 goto out;
3560 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3561 unlock_user(argptr, arg, target_size);
3563 out:
3564 g_free(big_buf);
3565 return ret;
3568 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
3569 int fd, abi_long cmd, abi_long arg)
3571 const argtype *arg_type = ie->arg_type;
3572 const StructEntry *se;
3573 const argtype *field_types;
3574 const int *dst_offsets, *src_offsets;
3575 int target_size;
3576 void *argptr;
3577 abi_ulong *target_rt_dev_ptr;
3578 unsigned long *host_rt_dev_ptr;
3579 abi_long ret;
3580 int i;
3582 assert(ie->access == IOC_W);
3583 assert(*arg_type == TYPE_PTR);
3584 arg_type++;
3585 assert(*arg_type == TYPE_STRUCT);
3586 target_size = thunk_type_size(arg_type, 0);
3587 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3588 if (!argptr) {
3589 return -TARGET_EFAULT;
3591 arg_type++;
3592 assert(*arg_type == (int)STRUCT_rtentry);
3593 se = struct_entries + *arg_type++;
3594 assert(se->convert[0] == NULL);
3595 /* convert struct here to be able to catch rt_dev string */
3596 field_types = se->field_types;
3597 dst_offsets = se->field_offsets[THUNK_HOST];
3598 src_offsets = se->field_offsets[THUNK_TARGET];
3599 for (i = 0; i < se->nb_fields; i++) {
3600 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
3601 assert(*field_types == TYPE_PTRVOID);
3602 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
3603 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
3604 if (*target_rt_dev_ptr != 0) {
3605 *host_rt_dev_ptr = (unsigned long)lock_user_string(
3606 tswapal(*target_rt_dev_ptr));
3607 if (!*host_rt_dev_ptr) {
3608 unlock_user(argptr, arg, 0);
3609 return -TARGET_EFAULT;
3611 } else {
3612 *host_rt_dev_ptr = 0;
3614 field_types++;
3615 continue;
3617 field_types = thunk_convert(buf_temp + dst_offsets[i],
3618 argptr + src_offsets[i],
3619 field_types, THUNK_HOST);
3621 unlock_user(argptr, arg, 0);
3623 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3624 if (*host_rt_dev_ptr != 0) {
3625 unlock_user((void *)*host_rt_dev_ptr,
3626 *target_rt_dev_ptr, 0);
3628 return ret;
3631 static IOCTLEntry ioctl_entries[] = {
3632 #define IOCTL(cmd, access, ...) \
3633 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3634 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3635 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3636 #include "ioctls.h"
3637 { 0, 0, },
3640 /* ??? Implement proper locking for ioctls. */
3641 /* do_ioctl() Must return target values and target errnos. */
3642 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3644 const IOCTLEntry *ie;
3645 const argtype *arg_type;
3646 abi_long ret;
3647 uint8_t buf_temp[MAX_STRUCT_SIZE];
3648 int target_size;
3649 void *argptr;
3651 ie = ioctl_entries;
3652 for(;;) {
3653 if (ie->target_cmd == 0) {
3654 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3655 return -TARGET_ENOSYS;
3657 if (ie->target_cmd == cmd)
3658 break;
3659 ie++;
3661 arg_type = ie->arg_type;
3662 #if defined(DEBUG)
3663 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3664 #endif
3665 if (ie->do_ioctl) {
3666 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3669 switch(arg_type[0]) {
3670 case TYPE_NULL:
3671 /* no argument */
3672 ret = get_errno(ioctl(fd, ie->host_cmd));
3673 break;
3674 case TYPE_PTRVOID:
3675 case TYPE_INT:
3676 /* int argment */
3677 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3678 break;
3679 case TYPE_PTR:
3680 arg_type++;
3681 target_size = thunk_type_size(arg_type, 0);
3682 switch(ie->access) {
3683 case IOC_R:
3684 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3685 if (!is_error(ret)) {
3686 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3687 if (!argptr)
3688 return -TARGET_EFAULT;
3689 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3690 unlock_user(argptr, arg, target_size);
3692 break;
3693 case IOC_W:
3694 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3695 if (!argptr)
3696 return -TARGET_EFAULT;
3697 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3698 unlock_user(argptr, arg, 0);
3699 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3700 break;
3701 default:
3702 case IOC_RW:
3703 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3704 if (!argptr)
3705 return -TARGET_EFAULT;
3706 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3707 unlock_user(argptr, arg, 0);
3708 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3709 if (!is_error(ret)) {
3710 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3711 if (!argptr)
3712 return -TARGET_EFAULT;
3713 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3714 unlock_user(argptr, arg, target_size);
3716 break;
3718 break;
3719 default:
3720 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3721 (long)cmd, arg_type[0]);
3722 ret = -TARGET_ENOSYS;
3723 break;
3725 return ret;
3728 static const bitmask_transtbl iflag_tbl[] = {
3729 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3730 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3731 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3732 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3733 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3734 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3735 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3736 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3737 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3738 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3739 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3740 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3741 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3742 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3743 { 0, 0, 0, 0 }
3746 static const bitmask_transtbl oflag_tbl[] = {
3747 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3748 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3749 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3750 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3751 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3752 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3753 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3754 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3755 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3756 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3757 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3758 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3759 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3760 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3761 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3762 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3763 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3764 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3765 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3766 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3767 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3768 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3769 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3770 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3771 { 0, 0, 0, 0 }
3774 static const bitmask_transtbl cflag_tbl[] = {
3775 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3776 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3777 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3778 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3779 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3780 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3781 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3782 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3783 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3784 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3785 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3786 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3787 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3788 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3789 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3790 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3791 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3792 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3793 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3794 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3795 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3796 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3797 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3798 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3799 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3800 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3801 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3802 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3803 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3804 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3805 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3806 { 0, 0, 0, 0 }
3809 static const bitmask_transtbl lflag_tbl[] = {
3810 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3811 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3812 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3813 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3814 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3815 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3816 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3817 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3818 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3819 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3820 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3821 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3822 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3823 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3824 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3825 { 0, 0, 0, 0 }
3828 static void target_to_host_termios (void *dst, const void *src)
3830 struct host_termios *host = dst;
3831 const struct target_termios *target = src;
3833 host->c_iflag =
3834 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3835 host->c_oflag =
3836 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3837 host->c_cflag =
3838 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3839 host->c_lflag =
3840 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3841 host->c_line = target->c_line;
3843 memset(host->c_cc, 0, sizeof(host->c_cc));
3844 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3845 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3846 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3847 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3848 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3849 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3850 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3851 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3852 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3853 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3854 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3855 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3856 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3857 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3858 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3859 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3860 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3863 static void host_to_target_termios (void *dst, const void *src)
3865 struct target_termios *target = dst;
3866 const struct host_termios *host = src;
3868 target->c_iflag =
3869 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3870 target->c_oflag =
3871 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3872 target->c_cflag =
3873 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3874 target->c_lflag =
3875 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3876 target->c_line = host->c_line;
3878 memset(target->c_cc, 0, sizeof(target->c_cc));
3879 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3880 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3881 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3882 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3883 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3884 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3885 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3886 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3887 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3888 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3889 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3890 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3891 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3892 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3893 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3894 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3895 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3898 static const StructEntry struct_termios_def = {
3899 .convert = { host_to_target_termios, target_to_host_termios },
3900 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3901 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3904 static bitmask_transtbl mmap_flags_tbl[] = {
3905 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3906 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3907 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3908 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3909 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3910 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3911 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3912 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3913 { 0, 0, 0, 0 }
3916 #if defined(TARGET_I386)
3918 /* NOTE: there is really one LDT for all the threads */
3919 static uint8_t *ldt_table;
3921 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3923 int size;
3924 void *p;
3926 if (!ldt_table)
3927 return 0;
3928 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3929 if (size > bytecount)
3930 size = bytecount;
3931 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3932 if (!p)
3933 return -TARGET_EFAULT;
3934 /* ??? Should this by byteswapped? */
3935 memcpy(p, ldt_table, size);
3936 unlock_user(p, ptr, size);
3937 return size;
3940 /* XXX: add locking support */
3941 static abi_long write_ldt(CPUX86State *env,
3942 abi_ulong ptr, unsigned long bytecount, int oldmode)
3944 struct target_modify_ldt_ldt_s ldt_info;
3945 struct target_modify_ldt_ldt_s *target_ldt_info;
3946 int seg_32bit, contents, read_exec_only, limit_in_pages;
3947 int seg_not_present, useable, lm;
3948 uint32_t *lp, entry_1, entry_2;
3950 if (bytecount != sizeof(ldt_info))
3951 return -TARGET_EINVAL;
3952 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3953 return -TARGET_EFAULT;
3954 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3955 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
3956 ldt_info.limit = tswap32(target_ldt_info->limit);
3957 ldt_info.flags = tswap32(target_ldt_info->flags);
3958 unlock_user_struct(target_ldt_info, ptr, 0);
3960 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3961 return -TARGET_EINVAL;
3962 seg_32bit = ldt_info.flags & 1;
3963 contents = (ldt_info.flags >> 1) & 3;
3964 read_exec_only = (ldt_info.flags >> 3) & 1;
3965 limit_in_pages = (ldt_info.flags >> 4) & 1;
3966 seg_not_present = (ldt_info.flags >> 5) & 1;
3967 useable = (ldt_info.flags >> 6) & 1;
3968 #ifdef TARGET_ABI32
3969 lm = 0;
3970 #else
3971 lm = (ldt_info.flags >> 7) & 1;
3972 #endif
3973 if (contents == 3) {
3974 if (oldmode)
3975 return -TARGET_EINVAL;
3976 if (seg_not_present == 0)
3977 return -TARGET_EINVAL;
3979 /* allocate the LDT */
3980 if (!ldt_table) {
3981 env->ldt.base = target_mmap(0,
3982 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3983 PROT_READ|PROT_WRITE,
3984 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3985 if (env->ldt.base == -1)
3986 return -TARGET_ENOMEM;
3987 memset(g2h(env->ldt.base), 0,
3988 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3989 env->ldt.limit = 0xffff;
3990 ldt_table = g2h(env->ldt.base);
3993 /* NOTE: same code as Linux kernel */
3994 /* Allow LDTs to be cleared by the user. */
3995 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3996 if (oldmode ||
3997 (contents == 0 &&
3998 read_exec_only == 1 &&
3999 seg_32bit == 0 &&
4000 limit_in_pages == 0 &&
4001 seg_not_present == 1 &&
4002 useable == 0 )) {
4003 entry_1 = 0;
4004 entry_2 = 0;
4005 goto install;
4009 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4010 (ldt_info.limit & 0x0ffff);
4011 entry_2 = (ldt_info.base_addr & 0xff000000) |
4012 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4013 (ldt_info.limit & 0xf0000) |
4014 ((read_exec_only ^ 1) << 9) |
4015 (contents << 10) |
4016 ((seg_not_present ^ 1) << 15) |
4017 (seg_32bit << 22) |
4018 (limit_in_pages << 23) |
4019 (lm << 21) |
4020 0x7000;
4021 if (!oldmode)
4022 entry_2 |= (useable << 20);
4024 /* Install the new entry ... */
4025 install:
4026 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
4027 lp[0] = tswap32(entry_1);
4028 lp[1] = tswap32(entry_2);
4029 return 0;
4032 /* specific and weird i386 syscalls */
4033 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
4034 unsigned long bytecount)
4036 abi_long ret;
4038 switch (func) {
4039 case 0:
4040 ret = read_ldt(ptr, bytecount);
4041 break;
4042 case 1:
4043 ret = write_ldt(env, ptr, bytecount, 1);
4044 break;
4045 case 0x11:
4046 ret = write_ldt(env, ptr, bytecount, 0);
4047 break;
4048 default:
4049 ret = -TARGET_ENOSYS;
4050 break;
4052 return ret;
4055 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4056 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
4058 uint64_t *gdt_table = g2h(env->gdt.base);
4059 struct target_modify_ldt_ldt_s ldt_info;
4060 struct target_modify_ldt_ldt_s *target_ldt_info;
4061 int seg_32bit, contents, read_exec_only, limit_in_pages;
4062 int seg_not_present, useable, lm;
4063 uint32_t *lp, entry_1, entry_2;
4064 int i;
4066 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4067 if (!target_ldt_info)
4068 return -TARGET_EFAULT;
4069 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
4070 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
4071 ldt_info.limit = tswap32(target_ldt_info->limit);
4072 ldt_info.flags = tswap32(target_ldt_info->flags);
4073 if (ldt_info.entry_number == -1) {
4074 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
4075 if (gdt_table[i] == 0) {
4076 ldt_info.entry_number = i;
4077 target_ldt_info->entry_number = tswap32(i);
4078 break;
4082 unlock_user_struct(target_ldt_info, ptr, 1);
4084 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
4085 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
4086 return -TARGET_EINVAL;
4087 seg_32bit = ldt_info.flags & 1;
4088 contents = (ldt_info.flags >> 1) & 3;
4089 read_exec_only = (ldt_info.flags >> 3) & 1;
4090 limit_in_pages = (ldt_info.flags >> 4) & 1;
4091 seg_not_present = (ldt_info.flags >> 5) & 1;
4092 useable = (ldt_info.flags >> 6) & 1;
4093 #ifdef TARGET_ABI32
4094 lm = 0;
4095 #else
4096 lm = (ldt_info.flags >> 7) & 1;
4097 #endif
4099 if (contents == 3) {
4100 if (seg_not_present == 0)
4101 return -TARGET_EINVAL;
4104 /* NOTE: same code as Linux kernel */
4105 /* Allow LDTs to be cleared by the user. */
4106 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
4107 if ((contents == 0 &&
4108 read_exec_only == 1 &&
4109 seg_32bit == 0 &&
4110 limit_in_pages == 0 &&
4111 seg_not_present == 1 &&
4112 useable == 0 )) {
4113 entry_1 = 0;
4114 entry_2 = 0;
4115 goto install;
4119 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
4120 (ldt_info.limit & 0x0ffff);
4121 entry_2 = (ldt_info.base_addr & 0xff000000) |
4122 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
4123 (ldt_info.limit & 0xf0000) |
4124 ((read_exec_only ^ 1) << 9) |
4125 (contents << 10) |
4126 ((seg_not_present ^ 1) << 15) |
4127 (seg_32bit << 22) |
4128 (limit_in_pages << 23) |
4129 (useable << 20) |
4130 (lm << 21) |
4131 0x7000;
4133 /* Install the new entry ... */
4134 install:
4135 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
4136 lp[0] = tswap32(entry_1);
4137 lp[1] = tswap32(entry_2);
4138 return 0;
4141 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
4143 struct target_modify_ldt_ldt_s *target_ldt_info;
4144 uint64_t *gdt_table = g2h(env->gdt.base);
4145 uint32_t base_addr, limit, flags;
4146 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
4147 int seg_not_present, useable, lm;
4148 uint32_t *lp, entry_1, entry_2;
4150 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
4151 if (!target_ldt_info)
4152 return -TARGET_EFAULT;
4153 idx = tswap32(target_ldt_info->entry_number);
4154 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
4155 idx > TARGET_GDT_ENTRY_TLS_MAX) {
4156 unlock_user_struct(target_ldt_info, ptr, 1);
4157 return -TARGET_EINVAL;
4159 lp = (uint32_t *)(gdt_table + idx);
4160 entry_1 = tswap32(lp[0]);
4161 entry_2 = tswap32(lp[1]);
4163 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
4164 contents = (entry_2 >> 10) & 3;
4165 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
4166 seg_32bit = (entry_2 >> 22) & 1;
4167 limit_in_pages = (entry_2 >> 23) & 1;
4168 useable = (entry_2 >> 20) & 1;
4169 #ifdef TARGET_ABI32
4170 lm = 0;
4171 #else
4172 lm = (entry_2 >> 21) & 1;
4173 #endif
4174 flags = (seg_32bit << 0) | (contents << 1) |
4175 (read_exec_only << 3) | (limit_in_pages << 4) |
4176 (seg_not_present << 5) | (useable << 6) | (lm << 7);
4177 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
4178 base_addr = (entry_1 >> 16) |
4179 (entry_2 & 0xff000000) |
4180 ((entry_2 & 0xff) << 16);
4181 target_ldt_info->base_addr = tswapal(base_addr);
4182 target_ldt_info->limit = tswap32(limit);
4183 target_ldt_info->flags = tswap32(flags);
4184 unlock_user_struct(target_ldt_info, ptr, 1);
4185 return 0;
4187 #endif /* TARGET_I386 && TARGET_ABI32 */
4189 #ifndef TARGET_ABI32
4190 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
4192 abi_long ret = 0;
4193 abi_ulong val;
4194 int idx;
4196 switch(code) {
4197 case TARGET_ARCH_SET_GS:
4198 case TARGET_ARCH_SET_FS:
4199 if (code == TARGET_ARCH_SET_GS)
4200 idx = R_GS;
4201 else
4202 idx = R_FS;
4203 cpu_x86_load_seg(env, idx, 0);
4204 env->segs[idx].base = addr;
4205 break;
4206 case TARGET_ARCH_GET_GS:
4207 case TARGET_ARCH_GET_FS:
4208 if (code == TARGET_ARCH_GET_GS)
4209 idx = R_GS;
4210 else
4211 idx = R_FS;
4212 val = env->segs[idx].base;
4213 if (put_user(val, addr, abi_ulong))
4214 ret = -TARGET_EFAULT;
4215 break;
4216 default:
4217 ret = -TARGET_EINVAL;
4218 break;
4220 return ret;
4222 #endif
4224 #endif /* defined(TARGET_I386) */
4226 #define NEW_STACK_SIZE 0x40000
4229 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
4230 typedef struct {
4231 CPUArchState *env;
4232 pthread_mutex_t mutex;
4233 pthread_cond_t cond;
4234 pthread_t thread;
4235 uint32_t tid;
4236 abi_ulong child_tidptr;
4237 abi_ulong parent_tidptr;
4238 sigset_t sigmask;
4239 } new_thread_info;
4241 static void *clone_func(void *arg)
4243 new_thread_info *info = arg;
4244 CPUArchState *env;
4245 CPUState *cpu;
4246 TaskState *ts;
4248 env = info->env;
4249 cpu = ENV_GET_CPU(env);
4250 thread_cpu = cpu;
4251 ts = (TaskState *)cpu->opaque;
4252 info->tid = gettid();
4253 cpu->host_tid = info->tid;
4254 task_settid(ts);
4255 if (info->child_tidptr)
4256 put_user_u32(info->tid, info->child_tidptr);
4257 if (info->parent_tidptr)
4258 put_user_u32(info->tid, info->parent_tidptr);
4259 /* Enable signals. */
4260 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
4261 /* Signal to the parent that we're ready. */
4262 pthread_mutex_lock(&info->mutex);
4263 pthread_cond_broadcast(&info->cond);
4264 pthread_mutex_unlock(&info->mutex);
4265 /* Wait until the parent has finshed initializing the tls state. */
4266 pthread_mutex_lock(&clone_lock);
4267 pthread_mutex_unlock(&clone_lock);
4268 cpu_loop(env);
4269 /* never exits */
4270 return NULL;
4273 /* do_fork() Must return host values and target errnos (unlike most
4274 do_*() functions). */
4275 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
4276 abi_ulong parent_tidptr, target_ulong newtls,
4277 abi_ulong child_tidptr)
4279 CPUState *cpu = ENV_GET_CPU(env);
4280 int ret;
4281 TaskState *ts;
4282 CPUState *new_cpu;
4283 CPUArchState *new_env;
4284 unsigned int nptl_flags;
4285 sigset_t sigmask;
4287 /* Emulate vfork() with fork() */
4288 if (flags & CLONE_VFORK)
4289 flags &= ~(CLONE_VFORK | CLONE_VM);
4291 if (flags & CLONE_VM) {
4292 TaskState *parent_ts = (TaskState *)cpu->opaque;
4293 new_thread_info info;
4294 pthread_attr_t attr;
4296 ts = g_malloc0(sizeof(TaskState));
4297 init_task_state(ts);
4298 /* we create a new CPU instance. */
4299 new_env = cpu_copy(env);
4300 /* Init regs that differ from the parent. */
4301 cpu_clone_regs(new_env, newsp);
4302 new_cpu = ENV_GET_CPU(new_env);
4303 new_cpu->opaque = ts;
4304 ts->bprm = parent_ts->bprm;
4305 ts->info = parent_ts->info;
4306 nptl_flags = flags;
4307 flags &= ~CLONE_NPTL_FLAGS2;
4309 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4310 ts->child_tidptr = child_tidptr;
4313 if (nptl_flags & CLONE_SETTLS)
4314 cpu_set_tls (new_env, newtls);
4316 /* Grab a mutex so that thread setup appears atomic. */
4317 pthread_mutex_lock(&clone_lock);
4319 memset(&info, 0, sizeof(info));
4320 pthread_mutex_init(&info.mutex, NULL);
4321 pthread_mutex_lock(&info.mutex);
4322 pthread_cond_init(&info.cond, NULL);
4323 info.env = new_env;
4324 if (nptl_flags & CLONE_CHILD_SETTID)
4325 info.child_tidptr = child_tidptr;
4326 if (nptl_flags & CLONE_PARENT_SETTID)
4327 info.parent_tidptr = parent_tidptr;
4329 ret = pthread_attr_init(&attr);
4330 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4331 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4332 /* It is not safe to deliver signals until the child has finished
4333 initializing, so temporarily block all signals. */
4334 sigfillset(&sigmask);
4335 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4337 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4338 /* TODO: Free new CPU state if thread creation failed. */
4340 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4341 pthread_attr_destroy(&attr);
4342 if (ret == 0) {
4343 /* Wait for the child to initialize. */
4344 pthread_cond_wait(&info.cond, &info.mutex);
4345 ret = info.tid;
4346 if (flags & CLONE_PARENT_SETTID)
4347 put_user_u32(ret, parent_tidptr);
4348 } else {
4349 ret = -1;
4351 pthread_mutex_unlock(&info.mutex);
4352 pthread_cond_destroy(&info.cond);
4353 pthread_mutex_destroy(&info.mutex);
4354 pthread_mutex_unlock(&clone_lock);
4355 } else {
4356 /* if no CLONE_VM, we consider it is a fork */
4357 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4358 return -EINVAL;
4359 fork_start();
4360 ret = fork();
4361 if (ret == 0) {
4362 /* Child Process. */
4363 cpu_clone_regs(env, newsp);
4364 fork_end(1);
4365 /* There is a race condition here. The parent process could
4366 theoretically read the TID in the child process before the child
4367 tid is set. This would require using either ptrace
4368 (not implemented) or having *_tidptr to point at a shared memory
4369 mapping. We can't repeat the spinlock hack used above because
4370 the child process gets its own copy of the lock. */
4371 if (flags & CLONE_CHILD_SETTID)
4372 put_user_u32(gettid(), child_tidptr);
4373 if (flags & CLONE_PARENT_SETTID)
4374 put_user_u32(gettid(), parent_tidptr);
4375 ts = (TaskState *)cpu->opaque;
4376 if (flags & CLONE_SETTLS)
4377 cpu_set_tls (env, newtls);
4378 if (flags & CLONE_CHILD_CLEARTID)
4379 ts->child_tidptr = child_tidptr;
4380 } else {
4381 fork_end(0);
4384 return ret;
4387 /* warning : doesn't handle linux specific flags... */
4388 static int target_to_host_fcntl_cmd(int cmd)
4390 switch(cmd) {
4391 case TARGET_F_DUPFD:
4392 case TARGET_F_GETFD:
4393 case TARGET_F_SETFD:
4394 case TARGET_F_GETFL:
4395 case TARGET_F_SETFL:
4396 return cmd;
4397 case TARGET_F_GETLK:
4398 return F_GETLK;
4399 case TARGET_F_SETLK:
4400 return F_SETLK;
4401 case TARGET_F_SETLKW:
4402 return F_SETLKW;
4403 case TARGET_F_GETOWN:
4404 return F_GETOWN;
4405 case TARGET_F_SETOWN:
4406 return F_SETOWN;
4407 case TARGET_F_GETSIG:
4408 return F_GETSIG;
4409 case TARGET_F_SETSIG:
4410 return F_SETSIG;
4411 #if TARGET_ABI_BITS == 32
4412 case TARGET_F_GETLK64:
4413 return F_GETLK64;
4414 case TARGET_F_SETLK64:
4415 return F_SETLK64;
4416 case TARGET_F_SETLKW64:
4417 return F_SETLKW64;
4418 #endif
4419 case TARGET_F_SETLEASE:
4420 return F_SETLEASE;
4421 case TARGET_F_GETLEASE:
4422 return F_GETLEASE;
4423 #ifdef F_DUPFD_CLOEXEC
4424 case TARGET_F_DUPFD_CLOEXEC:
4425 return F_DUPFD_CLOEXEC;
4426 #endif
4427 case TARGET_F_NOTIFY:
4428 return F_NOTIFY;
4429 #ifdef F_GETOWN_EX
4430 case TARGET_F_GETOWN_EX:
4431 return F_GETOWN_EX;
4432 #endif
4433 #ifdef F_SETOWN_EX
4434 case TARGET_F_SETOWN_EX:
4435 return F_SETOWN_EX;
4436 #endif
4437 default:
4438 return -TARGET_EINVAL;
4440 return -TARGET_EINVAL;
4443 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
4444 static const bitmask_transtbl flock_tbl[] = {
4445 TRANSTBL_CONVERT(F_RDLCK),
4446 TRANSTBL_CONVERT(F_WRLCK),
4447 TRANSTBL_CONVERT(F_UNLCK),
4448 TRANSTBL_CONVERT(F_EXLCK),
4449 TRANSTBL_CONVERT(F_SHLCK),
4450 { 0, 0, 0, 0 }
4453 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4455 struct flock fl;
4456 struct target_flock *target_fl;
4457 struct flock64 fl64;
4458 struct target_flock64 *target_fl64;
4459 #ifdef F_GETOWN_EX
4460 struct f_owner_ex fox;
4461 struct target_f_owner_ex *target_fox;
4462 #endif
4463 abi_long ret;
4464 int host_cmd = target_to_host_fcntl_cmd(cmd);
4466 if (host_cmd == -TARGET_EINVAL)
4467 return host_cmd;
4469 switch(cmd) {
4470 case TARGET_F_GETLK:
4471 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4472 return -TARGET_EFAULT;
4473 fl.l_type =
4474 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4475 fl.l_whence = tswap16(target_fl->l_whence);
4476 fl.l_start = tswapal(target_fl->l_start);
4477 fl.l_len = tswapal(target_fl->l_len);
4478 fl.l_pid = tswap32(target_fl->l_pid);
4479 unlock_user_struct(target_fl, arg, 0);
4480 ret = get_errno(fcntl(fd, host_cmd, &fl));
4481 if (ret == 0) {
4482 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4483 return -TARGET_EFAULT;
4484 target_fl->l_type =
4485 host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
4486 target_fl->l_whence = tswap16(fl.l_whence);
4487 target_fl->l_start = tswapal(fl.l_start);
4488 target_fl->l_len = tswapal(fl.l_len);
4489 target_fl->l_pid = tswap32(fl.l_pid);
4490 unlock_user_struct(target_fl, arg, 1);
4492 break;
4494 case TARGET_F_SETLK:
4495 case TARGET_F_SETLKW:
4496 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4497 return -TARGET_EFAULT;
4498 fl.l_type =
4499 target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
4500 fl.l_whence = tswap16(target_fl->l_whence);
4501 fl.l_start = tswapal(target_fl->l_start);
4502 fl.l_len = tswapal(target_fl->l_len);
4503 fl.l_pid = tswap32(target_fl->l_pid);
4504 unlock_user_struct(target_fl, arg, 0);
4505 ret = get_errno(fcntl(fd, host_cmd, &fl));
4506 break;
4508 case TARGET_F_GETLK64:
4509 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4510 return -TARGET_EFAULT;
4511 fl64.l_type =
4512 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4513 fl64.l_whence = tswap16(target_fl64->l_whence);
4514 fl64.l_start = tswap64(target_fl64->l_start);
4515 fl64.l_len = tswap64(target_fl64->l_len);
4516 fl64.l_pid = tswap32(target_fl64->l_pid);
4517 unlock_user_struct(target_fl64, arg, 0);
4518 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4519 if (ret == 0) {
4520 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4521 return -TARGET_EFAULT;
4522 target_fl64->l_type =
4523 host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
4524 target_fl64->l_whence = tswap16(fl64.l_whence);
4525 target_fl64->l_start = tswap64(fl64.l_start);
4526 target_fl64->l_len = tswap64(fl64.l_len);
4527 target_fl64->l_pid = tswap32(fl64.l_pid);
4528 unlock_user_struct(target_fl64, arg, 1);
4530 break;
4531 case TARGET_F_SETLK64:
4532 case TARGET_F_SETLKW64:
4533 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4534 return -TARGET_EFAULT;
4535 fl64.l_type =
4536 target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
4537 fl64.l_whence = tswap16(target_fl64->l_whence);
4538 fl64.l_start = tswap64(target_fl64->l_start);
4539 fl64.l_len = tswap64(target_fl64->l_len);
4540 fl64.l_pid = tswap32(target_fl64->l_pid);
4541 unlock_user_struct(target_fl64, arg, 0);
4542 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4543 break;
4545 case TARGET_F_GETFL:
4546 ret = get_errno(fcntl(fd, host_cmd, arg));
4547 if (ret >= 0) {
4548 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4550 break;
4552 case TARGET_F_SETFL:
4553 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4554 break;
4556 #ifdef F_GETOWN_EX
4557 case TARGET_F_GETOWN_EX:
4558 ret = get_errno(fcntl(fd, host_cmd, &fox));
4559 if (ret >= 0) {
4560 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
4561 return -TARGET_EFAULT;
4562 target_fox->type = tswap32(fox.type);
4563 target_fox->pid = tswap32(fox.pid);
4564 unlock_user_struct(target_fox, arg, 1);
4566 break;
4567 #endif
4569 #ifdef F_SETOWN_EX
4570 case TARGET_F_SETOWN_EX:
4571 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
4572 return -TARGET_EFAULT;
4573 fox.type = tswap32(target_fox->type);
4574 fox.pid = tswap32(target_fox->pid);
4575 unlock_user_struct(target_fox, arg, 0);
4576 ret = get_errno(fcntl(fd, host_cmd, &fox));
4577 break;
4578 #endif
4580 case TARGET_F_SETOWN:
4581 case TARGET_F_GETOWN:
4582 case TARGET_F_SETSIG:
4583 case TARGET_F_GETSIG:
4584 case TARGET_F_SETLEASE:
4585 case TARGET_F_GETLEASE:
4586 ret = get_errno(fcntl(fd, host_cmd, arg));
4587 break;
4589 default:
4590 ret = get_errno(fcntl(fd, cmd, arg));
4591 break;
4593 return ret;
4596 #ifdef USE_UID16
4598 static inline int high2lowuid(int uid)
4600 if (uid > 65535)
4601 return 65534;
4602 else
4603 return uid;
4606 static inline int high2lowgid(int gid)
4608 if (gid > 65535)
4609 return 65534;
4610 else
4611 return gid;
4614 static inline int low2highuid(int uid)
4616 if ((int16_t)uid == -1)
4617 return -1;
4618 else
4619 return uid;
4622 static inline int low2highgid(int gid)
4624 if ((int16_t)gid == -1)
4625 return -1;
4626 else
4627 return gid;
4629 static inline int tswapid(int id)
4631 return tswap16(id);
4634 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
4636 #else /* !USE_UID16 */
4637 static inline int high2lowuid(int uid)
4639 return uid;
4641 static inline int high2lowgid(int gid)
4643 return gid;
4645 static inline int low2highuid(int uid)
4647 return uid;
4649 static inline int low2highgid(int gid)
4651 return gid;
4653 static inline int tswapid(int id)
4655 return tswap32(id);
4658 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
4660 #endif /* USE_UID16 */
4662 void syscall_init(void)
4664 IOCTLEntry *ie;
4665 const argtype *arg_type;
4666 int size;
4667 int i;
4669 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4670 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4671 #include "syscall_types.h"
4672 #undef STRUCT
4673 #undef STRUCT_SPECIAL
4675 /* Build target_to_host_errno_table[] table from
4676 * host_to_target_errno_table[]. */
4677 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
4678 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4681 /* we patch the ioctl size if necessary. We rely on the fact that
4682 no ioctl has all the bits at '1' in the size field */
4683 ie = ioctl_entries;
4684 while (ie->target_cmd != 0) {
4685 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4686 TARGET_IOC_SIZEMASK) {
4687 arg_type = ie->arg_type;
4688 if (arg_type[0] != TYPE_PTR) {
4689 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4690 ie->target_cmd);
4691 exit(1);
4693 arg_type++;
4694 size = thunk_type_size(arg_type, 0);
4695 ie->target_cmd = (ie->target_cmd &
4696 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4697 (size << TARGET_IOC_SIZESHIFT);
4700 /* automatic consistency check if same arch */
4701 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4702 (defined(__x86_64__) && defined(TARGET_X86_64))
4703 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4704 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4705 ie->name, ie->target_cmd, ie->host_cmd);
4707 #endif
4708 ie++;
4712 #if TARGET_ABI_BITS == 32
4713 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4715 #ifdef TARGET_WORDS_BIGENDIAN
4716 return ((uint64_t)word0 << 32) | word1;
4717 #else
4718 return ((uint64_t)word1 << 32) | word0;
4719 #endif
4721 #else /* TARGET_ABI_BITS == 32 */
4722 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4724 return word0;
4726 #endif /* TARGET_ABI_BITS != 32 */
4728 #ifdef TARGET_NR_truncate64
4729 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4730 abi_long arg2,
4731 abi_long arg3,
4732 abi_long arg4)
4734 if (regpairs_aligned(cpu_env)) {
4735 arg2 = arg3;
4736 arg3 = arg4;
4738 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4740 #endif
4742 #ifdef TARGET_NR_ftruncate64
4743 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4744 abi_long arg2,
4745 abi_long arg3,
4746 abi_long arg4)
4748 if (regpairs_aligned(cpu_env)) {
4749 arg2 = arg3;
4750 arg3 = arg4;
4752 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4754 #endif
4756 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4757 abi_ulong target_addr)
4759 struct target_timespec *target_ts;
4761 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4762 return -TARGET_EFAULT;
4763 host_ts->tv_sec = tswapal(target_ts->tv_sec);
4764 host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
4765 unlock_user_struct(target_ts, target_addr, 0);
4766 return 0;
4769 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4770 struct timespec *host_ts)
4772 struct target_timespec *target_ts;
4774 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4775 return -TARGET_EFAULT;
4776 target_ts->tv_sec = tswapal(host_ts->tv_sec);
4777 target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
4778 unlock_user_struct(target_ts, target_addr, 1);
4779 return 0;
4782 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
4783 abi_ulong target_addr)
4785 struct target_itimerspec *target_itspec;
4787 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
4788 return -TARGET_EFAULT;
4791 host_itspec->it_interval.tv_sec =
4792 tswapal(target_itspec->it_interval.tv_sec);
4793 host_itspec->it_interval.tv_nsec =
4794 tswapal(target_itspec->it_interval.tv_nsec);
4795 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
4796 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
4798 unlock_user_struct(target_itspec, target_addr, 1);
4799 return 0;
4802 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
4803 struct itimerspec *host_its)
4805 struct target_itimerspec *target_itspec;
4807 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
4808 return -TARGET_EFAULT;
4811 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
4812 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
4814 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
4815 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
4817 unlock_user_struct(target_itspec, target_addr, 0);
4818 return 0;
4821 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4822 static inline abi_long host_to_target_stat64(void *cpu_env,
4823 abi_ulong target_addr,
4824 struct stat *host_st)
4826 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
4827 if (((CPUARMState *)cpu_env)->eabi) {
4828 struct target_eabi_stat64 *target_st;
4830 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4831 return -TARGET_EFAULT;
4832 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4833 __put_user(host_st->st_dev, &target_st->st_dev);
4834 __put_user(host_st->st_ino, &target_st->st_ino);
4835 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4836 __put_user(host_st->st_ino, &target_st->__st_ino);
4837 #endif
4838 __put_user(host_st->st_mode, &target_st->st_mode);
4839 __put_user(host_st->st_nlink, &target_st->st_nlink);
4840 __put_user(host_st->st_uid, &target_st->st_uid);
4841 __put_user(host_st->st_gid, &target_st->st_gid);
4842 __put_user(host_st->st_rdev, &target_st->st_rdev);
4843 __put_user(host_st->st_size, &target_st->st_size);
4844 __put_user(host_st->st_blksize, &target_st->st_blksize);
4845 __put_user(host_st->st_blocks, &target_st->st_blocks);
4846 __put_user(host_st->st_atime, &target_st->target_st_atime);
4847 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4848 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4849 unlock_user_struct(target_st, target_addr, 1);
4850 } else
4851 #endif
4853 #if defined(TARGET_HAS_STRUCT_STAT64)
4854 struct target_stat64 *target_st;
4855 #else
4856 struct target_stat *target_st;
4857 #endif
4859 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4860 return -TARGET_EFAULT;
4861 memset(target_st, 0, sizeof(*target_st));
4862 __put_user(host_st->st_dev, &target_st->st_dev);
4863 __put_user(host_st->st_ino, &target_st->st_ino);
4864 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4865 __put_user(host_st->st_ino, &target_st->__st_ino);
4866 #endif
4867 __put_user(host_st->st_mode, &target_st->st_mode);
4868 __put_user(host_st->st_nlink, &target_st->st_nlink);
4869 __put_user(host_st->st_uid, &target_st->st_uid);
4870 __put_user(host_st->st_gid, &target_st->st_gid);
4871 __put_user(host_st->st_rdev, &target_st->st_rdev);
4872 /* XXX: better use of kernel struct */
4873 __put_user(host_st->st_size, &target_st->st_size);
4874 __put_user(host_st->st_blksize, &target_st->st_blksize);
4875 __put_user(host_st->st_blocks, &target_st->st_blocks);
4876 __put_user(host_st->st_atime, &target_st->target_st_atime);
4877 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4878 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4879 unlock_user_struct(target_st, target_addr, 1);
4882 return 0;
4884 #endif
4886 /* ??? Using host futex calls even when target atomic operations
4887 are not really atomic probably breaks things. However implementing
4888 futexes locally would make futexes shared between multiple processes
4889 tricky. However they're probably useless because guest atomic
4890 operations won't work either. */
4891 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4892 target_ulong uaddr2, int val3)
4894 struct timespec ts, *pts;
4895 int base_op;
4897 /* ??? We assume FUTEX_* constants are the same on both host
4898 and target. */
4899 #ifdef FUTEX_CMD_MASK
4900 base_op = op & FUTEX_CMD_MASK;
4901 #else
4902 base_op = op;
4903 #endif
4904 switch (base_op) {
4905 case FUTEX_WAIT:
4906 case FUTEX_WAIT_BITSET:
4907 if (timeout) {
4908 pts = &ts;
4909 target_to_host_timespec(pts, timeout);
4910 } else {
4911 pts = NULL;
4913 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4914 pts, NULL, val3));
4915 case FUTEX_WAKE:
4916 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4917 case FUTEX_FD:
4918 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4919 case FUTEX_REQUEUE:
4920 case FUTEX_CMP_REQUEUE:
4921 case FUTEX_WAKE_OP:
4922 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4923 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4924 But the prototype takes a `struct timespec *'; insert casts
4925 to satisfy the compiler. We do not need to tswap TIMEOUT
4926 since it's not compared to guest memory. */
4927 pts = (struct timespec *)(uintptr_t) timeout;
4928 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4929 g2h(uaddr2),
4930 (base_op == FUTEX_CMP_REQUEUE
4931 ? tswap32(val3)
4932 : val3)));
4933 default:
4934 return -TARGET_ENOSYS;
4938 /* Map host to target signal numbers for the wait family of syscalls.
4939 Assume all other status bits are the same. */
4940 int host_to_target_waitstatus(int status)
4942 if (WIFSIGNALED(status)) {
4943 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4945 if (WIFSTOPPED(status)) {
4946 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4947 | (status & 0xff);
4949 return status;
4952 static int relstr_to_int(const char *s)
4954 /* Convert a uname release string like "2.6.18" to an integer
4955 * of the form 0x020612. (Beware that 0x020612 is *not* 2.6.12.)
4957 int i, n, tmp;
4959 tmp = 0;
4960 for (i = 0; i < 3; i++) {
4961 n = 0;
4962 while (*s >= '0' && *s <= '9') {
4963 n *= 10;
4964 n += *s - '0';
4965 s++;
4967 tmp = (tmp << 8) + n;
4968 if (*s == '.') {
4969 s++;
4972 return tmp;
4975 int get_osversion(void)
4977 static int osversion;
4978 struct new_utsname buf;
4979 const char *s;
4981 if (osversion)
4982 return osversion;
4983 if (qemu_uname_release && *qemu_uname_release) {
4984 s = qemu_uname_release;
4985 } else {
4986 if (sys_uname(&buf))
4987 return 0;
4988 s = buf.release;
4990 osversion = relstr_to_int(s);
4991 return osversion;
4994 void init_qemu_uname_release(void)
4996 /* Initialize qemu_uname_release for later use.
4997 * If the host kernel is too old and the user hasn't asked for
4998 * a specific fake version number, we might want to fake a minimum
4999 * target kernel version.
5001 #ifdef UNAME_MINIMUM_RELEASE
5002 struct new_utsname buf;
5004 if (qemu_uname_release && *qemu_uname_release) {
5005 return;
5008 if (sys_uname(&buf)) {
5009 return;
5012 if (relstr_to_int(buf.release) < relstr_to_int(UNAME_MINIMUM_RELEASE)) {
5013 qemu_uname_release = UNAME_MINIMUM_RELEASE;
5015 #endif
5018 static int open_self_maps(void *cpu_env, int fd)
5020 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5021 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5022 TaskState *ts = cpu->opaque;
5023 #endif
5024 FILE *fp;
5025 char *line = NULL;
5026 size_t len = 0;
5027 ssize_t read;
5029 fp = fopen("/proc/self/maps", "r");
5030 if (fp == NULL) {
5031 return -EACCES;
5034 while ((read = getline(&line, &len, fp)) != -1) {
5035 int fields, dev_maj, dev_min, inode;
5036 uint64_t min, max, offset;
5037 char flag_r, flag_w, flag_x, flag_p;
5038 char path[512] = "";
5039 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
5040 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
5041 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
5043 if ((fields < 10) || (fields > 11)) {
5044 continue;
5046 if (!strncmp(path, "[stack]", 7)) {
5047 continue;
5049 if (h2g_valid(min) && h2g_valid(max)) {
5050 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
5051 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
5052 h2g(min), h2g(max), flag_r, flag_w,
5053 flag_x, flag_p, offset, dev_maj, dev_min, inode,
5054 path[0] ? " " : "", path);
5058 free(line);
5059 fclose(fp);
5061 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5062 dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5063 (unsigned long long)ts->info->stack_limit,
5064 (unsigned long long)(ts->info->start_stack +
5065 (TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
5066 (unsigned long long)0);
5067 #endif
5069 return 0;
5072 static int open_self_stat(void *cpu_env, int fd)
5074 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5075 TaskState *ts = cpu->opaque;
5076 abi_ulong start_stack = ts->info->start_stack;
5077 int i;
5079 for (i = 0; i < 44; i++) {
5080 char buf[128];
5081 int len;
5082 uint64_t val = 0;
5084 if (i == 0) {
5085 /* pid */
5086 val = getpid();
5087 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5088 } else if (i == 1) {
5089 /* app name */
5090 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
5091 } else if (i == 27) {
5092 /* stack bottom */
5093 val = start_stack;
5094 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
5095 } else {
5096 /* for the rest, there is MasterCard */
5097 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
5100 len = strlen(buf);
5101 if (write(fd, buf, len) != len) {
5102 return -1;
5106 return 0;
5109 static int open_self_auxv(void *cpu_env, int fd)
5111 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
5112 TaskState *ts = cpu->opaque;
5113 abi_ulong auxv = ts->info->saved_auxv;
5114 abi_ulong len = ts->info->auxv_len;
5115 char *ptr;
5118 * Auxiliary vector is stored in target process stack.
5119 * read in whole auxv vector and copy it to file
5121 ptr = lock_user(VERIFY_READ, auxv, len, 0);
5122 if (ptr != NULL) {
5123 while (len > 0) {
5124 ssize_t r;
5125 r = write(fd, ptr, len);
5126 if (r <= 0) {
5127 break;
5129 len -= r;
5130 ptr += r;
5132 lseek(fd, 0, SEEK_SET);
5133 unlock_user(ptr, auxv, len);
5136 return 0;
5139 static int is_proc_myself(const char *filename, const char *entry)
5141 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
5142 filename += strlen("/proc/");
5143 if (!strncmp(filename, "self/", strlen("self/"))) {
5144 filename += strlen("self/");
5145 } else if (*filename >= '1' && *filename <= '9') {
5146 char myself[80];
5147 snprintf(myself, sizeof(myself), "%d/", getpid());
5148 if (!strncmp(filename, myself, strlen(myself))) {
5149 filename += strlen(myself);
5150 } else {
5151 return 0;
5153 } else {
5154 return 0;
5156 if (!strcmp(filename, entry)) {
5157 return 1;
5160 return 0;
5163 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5164 static int is_proc(const char *filename, const char *entry)
5166 return strcmp(filename, entry) == 0;
5169 static int open_net_route(void *cpu_env, int fd)
5171 FILE *fp;
5172 char *line = NULL;
5173 size_t len = 0;
5174 ssize_t read;
5176 fp = fopen("/proc/net/route", "r");
5177 if (fp == NULL) {
5178 return -EACCES;
5181 /* read header */
5183 read = getline(&line, &len, fp);
5184 dprintf(fd, "%s", line);
5186 /* read routes */
5188 while ((read = getline(&line, &len, fp)) != -1) {
5189 char iface[16];
5190 uint32_t dest, gw, mask;
5191 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
5192 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5193 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
5194 &mask, &mtu, &window, &irtt);
5195 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
5196 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
5197 metric, tswap32(mask), mtu, window, irtt);
5200 free(line);
5201 fclose(fp);
5203 return 0;
5205 #endif
5207 static int do_open(void *cpu_env, const char *pathname, int flags, mode_t mode)
5209 struct fake_open {
5210 const char *filename;
5211 int (*fill)(void *cpu_env, int fd);
5212 int (*cmp)(const char *s1, const char *s2);
5214 const struct fake_open *fake_open;
5215 static const struct fake_open fakes[] = {
5216 { "maps", open_self_maps, is_proc_myself },
5217 { "stat", open_self_stat, is_proc_myself },
5218 { "auxv", open_self_auxv, is_proc_myself },
5219 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
5220 { "/proc/net/route", open_net_route, is_proc },
5221 #endif
5222 { NULL, NULL, NULL }
5225 for (fake_open = fakes; fake_open->filename; fake_open++) {
5226 if (fake_open->cmp(pathname, fake_open->filename)) {
5227 break;
5231 if (fake_open->filename) {
5232 const char *tmpdir;
5233 char filename[PATH_MAX];
5234 int fd, r;
5236 /* create temporary file to map stat to */
5237 tmpdir = getenv("TMPDIR");
5238 if (!tmpdir)
5239 tmpdir = "/tmp";
5240 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
5241 fd = mkstemp(filename);
5242 if (fd < 0) {
5243 return fd;
5245 unlink(filename);
5247 if ((r = fake_open->fill(cpu_env, fd))) {
5248 close(fd);
5249 return r;
5251 lseek(fd, 0, SEEK_SET);
5253 return fd;
5256 return get_errno(open(path(pathname), flags, mode));
5259 /* do_syscall() should always have a single exit point at the end so
5260 that actions, such as logging of syscall results, can be performed.
5261 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5262 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
5263 abi_long arg2, abi_long arg3, abi_long arg4,
5264 abi_long arg5, abi_long arg6, abi_long arg7,
5265 abi_long arg8)
5267 CPUState *cpu = ENV_GET_CPU(cpu_env);
5268 abi_long ret;
5269 struct stat st;
5270 struct statfs stfs;
5271 void *p;
5273 #ifdef DEBUG
5274 gemu_log("syscall %d", num);
5275 #endif
5276 if(do_strace)
5277 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
5279 switch(num) {
5280 case TARGET_NR_exit:
5281 /* In old applications this may be used to implement _exit(2).
5282 However in threaded applictions it is used for thread termination,
5283 and _exit_group is used for application termination.
5284 Do thread termination if we have more then one thread. */
5285 /* FIXME: This probably breaks if a signal arrives. We should probably
5286 be disabling signals. */
5287 if (CPU_NEXT(first_cpu)) {
5288 TaskState *ts;
5290 cpu_list_lock();
5291 /* Remove the CPU from the list. */
5292 QTAILQ_REMOVE(&cpus, cpu, node);
5293 cpu_list_unlock();
5294 ts = cpu->opaque;
5295 if (ts->child_tidptr) {
5296 put_user_u32(0, ts->child_tidptr);
5297 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
5298 NULL, NULL, 0);
5300 thread_cpu = NULL;
5301 object_unref(OBJECT(cpu));
5302 g_free(ts);
5303 pthread_exit(NULL);
5305 #ifdef TARGET_GPROF
5306 _mcleanup();
5307 #endif
5308 gdb_exit(cpu_env, arg1);
5309 _exit(arg1);
5310 ret = 0; /* avoid warning */
5311 break;
5312 case TARGET_NR_read:
5313 if (arg3 == 0)
5314 ret = 0;
5315 else {
5316 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
5317 goto efault;
5318 ret = get_errno(read(arg1, p, arg3));
5319 unlock_user(p, arg2, ret);
5321 break;
5322 case TARGET_NR_write:
5323 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
5324 goto efault;
5325 ret = get_errno(write(arg1, p, arg3));
5326 unlock_user(p, arg2, 0);
5327 break;
5328 case TARGET_NR_open:
5329 if (!(p = lock_user_string(arg1)))
5330 goto efault;
5331 ret = get_errno(do_open(cpu_env, p,
5332 target_to_host_bitmask(arg2, fcntl_flags_tbl),
5333 arg3));
5334 unlock_user(p, arg1, 0);
5335 break;
5336 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5337 case TARGET_NR_openat:
5338 if (!(p = lock_user_string(arg2)))
5339 goto efault;
5340 ret = get_errno(sys_openat(arg1,
5341 path(p),
5342 target_to_host_bitmask(arg3, fcntl_flags_tbl),
5343 arg4));
5344 unlock_user(p, arg2, 0);
5345 break;
5346 #endif
5347 case TARGET_NR_close:
5348 ret = get_errno(close(arg1));
5349 break;
5350 case TARGET_NR_brk:
5351 ret = do_brk(arg1);
5352 break;
5353 case TARGET_NR_fork:
5354 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
5355 break;
5356 #ifdef TARGET_NR_waitpid
5357 case TARGET_NR_waitpid:
5359 int status;
5360 ret = get_errno(waitpid(arg1, &status, arg3));
5361 if (!is_error(ret) && arg2 && ret
5362 && put_user_s32(host_to_target_waitstatus(status), arg2))
5363 goto efault;
5365 break;
5366 #endif
5367 #ifdef TARGET_NR_waitid
5368 case TARGET_NR_waitid:
5370 siginfo_t info;
5371 info.si_pid = 0;
5372 ret = get_errno(waitid(arg1, arg2, &info, arg4));
5373 if (!is_error(ret) && arg3 && info.si_pid != 0) {
5374 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
5375 goto efault;
5376 host_to_target_siginfo(p, &info);
5377 unlock_user(p, arg3, sizeof(target_siginfo_t));
5380 break;
5381 #endif
5382 #ifdef TARGET_NR_creat /* not on alpha */
5383 case TARGET_NR_creat:
5384 if (!(p = lock_user_string(arg1)))
5385 goto efault;
5386 ret = get_errno(creat(p, arg2));
5387 unlock_user(p, arg1, 0);
5388 break;
5389 #endif
5390 case TARGET_NR_link:
5392 void * p2;
5393 p = lock_user_string(arg1);
5394 p2 = lock_user_string(arg2);
5395 if (!p || !p2)
5396 ret = -TARGET_EFAULT;
5397 else
5398 ret = get_errno(link(p, p2));
5399 unlock_user(p2, arg2, 0);
5400 unlock_user(p, arg1, 0);
5402 break;
5403 #if defined(TARGET_NR_linkat)
5404 case TARGET_NR_linkat:
5406 void * p2 = NULL;
5407 if (!arg2 || !arg4)
5408 goto efault;
5409 p = lock_user_string(arg2);
5410 p2 = lock_user_string(arg4);
5411 if (!p || !p2)
5412 ret = -TARGET_EFAULT;
5413 else
5414 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
5415 unlock_user(p, arg2, 0);
5416 unlock_user(p2, arg4, 0);
5418 break;
5419 #endif
5420 case TARGET_NR_unlink:
5421 if (!(p = lock_user_string(arg1)))
5422 goto efault;
5423 ret = get_errno(unlink(p));
5424 unlock_user(p, arg1, 0);
5425 break;
5426 #if defined(TARGET_NR_unlinkat)
5427 case TARGET_NR_unlinkat:
5428 if (!(p = lock_user_string(arg2)))
5429 goto efault;
5430 ret = get_errno(unlinkat(arg1, p, arg3));
5431 unlock_user(p, arg2, 0);
5432 break;
5433 #endif
5434 case TARGET_NR_execve:
5436 char **argp, **envp;
5437 int argc, envc;
5438 abi_ulong gp;
5439 abi_ulong guest_argp;
5440 abi_ulong guest_envp;
5441 abi_ulong addr;
5442 char **q;
5443 int total_size = 0;
5445 argc = 0;
5446 guest_argp = arg2;
5447 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
5448 if (get_user_ual(addr, gp))
5449 goto efault;
5450 if (!addr)
5451 break;
5452 argc++;
5454 envc = 0;
5455 guest_envp = arg3;
5456 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
5457 if (get_user_ual(addr, gp))
5458 goto efault;
5459 if (!addr)
5460 break;
5461 envc++;
5464 argp = alloca((argc + 1) * sizeof(void *));
5465 envp = alloca((envc + 1) * sizeof(void *));
5467 for (gp = guest_argp, q = argp; gp;
5468 gp += sizeof(abi_ulong), q++) {
5469 if (get_user_ual(addr, gp))
5470 goto execve_efault;
5471 if (!addr)
5472 break;
5473 if (!(*q = lock_user_string(addr)))
5474 goto execve_efault;
5475 total_size += strlen(*q) + 1;
5477 *q = NULL;
5479 for (gp = guest_envp, q = envp; gp;
5480 gp += sizeof(abi_ulong), q++) {
5481 if (get_user_ual(addr, gp))
5482 goto execve_efault;
5483 if (!addr)
5484 break;
5485 if (!(*q = lock_user_string(addr)))
5486 goto execve_efault;
5487 total_size += strlen(*q) + 1;
5489 *q = NULL;
5491 /* This case will not be caught by the host's execve() if its
5492 page size is bigger than the target's. */
5493 if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
5494 ret = -TARGET_E2BIG;
5495 goto execve_end;
5497 if (!(p = lock_user_string(arg1)))
5498 goto execve_efault;
5499 ret = get_errno(execve(p, argp, envp));
5500 unlock_user(p, arg1, 0);
5502 goto execve_end;
5504 execve_efault:
5505 ret = -TARGET_EFAULT;
5507 execve_end:
5508 for (gp = guest_argp, q = argp; *q;
5509 gp += sizeof(abi_ulong), q++) {
5510 if (get_user_ual(addr, gp)
5511 || !addr)
5512 break;
5513 unlock_user(*q, addr, 0);
5515 for (gp = guest_envp, q = envp; *q;
5516 gp += sizeof(abi_ulong), q++) {
5517 if (get_user_ual(addr, gp)
5518 || !addr)
5519 break;
5520 unlock_user(*q, addr, 0);
5523 break;
5524 case TARGET_NR_chdir:
5525 if (!(p = lock_user_string(arg1)))
5526 goto efault;
5527 ret = get_errno(chdir(p));
5528 unlock_user(p, arg1, 0);
5529 break;
5530 #ifdef TARGET_NR_time
5531 case TARGET_NR_time:
5533 time_t host_time;
5534 ret = get_errno(time(&host_time));
5535 if (!is_error(ret)
5536 && arg1
5537 && put_user_sal(host_time, arg1))
5538 goto efault;
5540 break;
5541 #endif
5542 case TARGET_NR_mknod:
5543 if (!(p = lock_user_string(arg1)))
5544 goto efault;
5545 ret = get_errno(mknod(p, arg2, arg3));
5546 unlock_user(p, arg1, 0);
5547 break;
5548 #if defined(TARGET_NR_mknodat)
5549 case TARGET_NR_mknodat:
5550 if (!(p = lock_user_string(arg2)))
5551 goto efault;
5552 ret = get_errno(mknodat(arg1, p, arg3, arg4));
5553 unlock_user(p, arg2, 0);
5554 break;
5555 #endif
5556 case TARGET_NR_chmod:
5557 if (!(p = lock_user_string(arg1)))
5558 goto efault;
5559 ret = get_errno(chmod(p, arg2));
5560 unlock_user(p, arg1, 0);
5561 break;
5562 #ifdef TARGET_NR_break
5563 case TARGET_NR_break:
5564 goto unimplemented;
5565 #endif
5566 #ifdef TARGET_NR_oldstat
5567 case TARGET_NR_oldstat:
5568 goto unimplemented;
5569 #endif
5570 case TARGET_NR_lseek:
5571 ret = get_errno(lseek(arg1, arg2, arg3));
5572 break;
5573 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5574 /* Alpha specific */
5575 case TARGET_NR_getxpid:
5576 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
5577 ret = get_errno(getpid());
5578 break;
5579 #endif
5580 #ifdef TARGET_NR_getpid
5581 case TARGET_NR_getpid:
5582 ret = get_errno(getpid());
5583 break;
5584 #endif
5585 case TARGET_NR_mount:
5587 /* need to look at the data field */
5588 void *p2, *p3;
5589 p = lock_user_string(arg1);
5590 p2 = lock_user_string(arg2);
5591 p3 = lock_user_string(arg3);
5592 if (!p || !p2 || !p3)
5593 ret = -TARGET_EFAULT;
5594 else {
5595 /* FIXME - arg5 should be locked, but it isn't clear how to
5596 * do that since it's not guaranteed to be a NULL-terminated
5597 * string.
5599 if ( ! arg5 )
5600 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
5601 else
5602 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
5604 unlock_user(p, arg1, 0);
5605 unlock_user(p2, arg2, 0);
5606 unlock_user(p3, arg3, 0);
5607 break;
5609 #ifdef TARGET_NR_umount
5610 case TARGET_NR_umount:
5611 if (!(p = lock_user_string(arg1)))
5612 goto efault;
5613 ret = get_errno(umount(p));
5614 unlock_user(p, arg1, 0);
5615 break;
5616 #endif
5617 #ifdef TARGET_NR_stime /* not on alpha */
5618 case TARGET_NR_stime:
5620 time_t host_time;
5621 if (get_user_sal(host_time, arg1))
5622 goto efault;
5623 ret = get_errno(stime(&host_time));
5625 break;
5626 #endif
5627 case TARGET_NR_ptrace:
5628 goto unimplemented;
5629 #ifdef TARGET_NR_alarm /* not on alpha */
5630 case TARGET_NR_alarm:
5631 ret = alarm(arg1);
5632 break;
5633 #endif
5634 #ifdef TARGET_NR_oldfstat
5635 case TARGET_NR_oldfstat:
5636 goto unimplemented;
5637 #endif
5638 #ifdef TARGET_NR_pause /* not on alpha */
5639 case TARGET_NR_pause:
5640 ret = get_errno(pause());
5641 break;
5642 #endif
5643 #ifdef TARGET_NR_utime
5644 case TARGET_NR_utime:
5646 struct utimbuf tbuf, *host_tbuf;
5647 struct target_utimbuf *target_tbuf;
5648 if (arg2) {
5649 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5650 goto efault;
5651 tbuf.actime = tswapal(target_tbuf->actime);
5652 tbuf.modtime = tswapal(target_tbuf->modtime);
5653 unlock_user_struct(target_tbuf, arg2, 0);
5654 host_tbuf = &tbuf;
5655 } else {
5656 host_tbuf = NULL;
5658 if (!(p = lock_user_string(arg1)))
5659 goto efault;
5660 ret = get_errno(utime(p, host_tbuf));
5661 unlock_user(p, arg1, 0);
5663 break;
5664 #endif
5665 case TARGET_NR_utimes:
5667 struct timeval *tvp, tv[2];
5668 if (arg2) {
5669 if (copy_from_user_timeval(&tv[0], arg2)
5670 || copy_from_user_timeval(&tv[1],
5671 arg2 + sizeof(struct target_timeval)))
5672 goto efault;
5673 tvp = tv;
5674 } else {
5675 tvp = NULL;
5677 if (!(p = lock_user_string(arg1)))
5678 goto efault;
5679 ret = get_errno(utimes(p, tvp));
5680 unlock_user(p, arg1, 0);
5682 break;
5683 #if defined(TARGET_NR_futimesat)
5684 case TARGET_NR_futimesat:
5686 struct timeval *tvp, tv[2];
5687 if (arg3) {
5688 if (copy_from_user_timeval(&tv[0], arg3)
5689 || copy_from_user_timeval(&tv[1],
5690 arg3 + sizeof(struct target_timeval)))
5691 goto efault;
5692 tvp = tv;
5693 } else {
5694 tvp = NULL;
5696 if (!(p = lock_user_string(arg2)))
5697 goto efault;
5698 ret = get_errno(futimesat(arg1, path(p), tvp));
5699 unlock_user(p, arg2, 0);
5701 break;
5702 #endif
5703 #ifdef TARGET_NR_stty
5704 case TARGET_NR_stty:
5705 goto unimplemented;
5706 #endif
5707 #ifdef TARGET_NR_gtty
5708 case TARGET_NR_gtty:
5709 goto unimplemented;
5710 #endif
5711 case TARGET_NR_access:
5712 if (!(p = lock_user_string(arg1)))
5713 goto efault;
5714 ret = get_errno(access(path(p), arg2));
5715 unlock_user(p, arg1, 0);
5716 break;
5717 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5718 case TARGET_NR_faccessat:
5719 if (!(p = lock_user_string(arg2)))
5720 goto efault;
5721 ret = get_errno(faccessat(arg1, p, arg3, 0));
5722 unlock_user(p, arg2, 0);
5723 break;
5724 #endif
5725 #ifdef TARGET_NR_nice /* not on alpha */
5726 case TARGET_NR_nice:
5727 ret = get_errno(nice(arg1));
5728 break;
5729 #endif
5730 #ifdef TARGET_NR_ftime
5731 case TARGET_NR_ftime:
5732 goto unimplemented;
5733 #endif
5734 case TARGET_NR_sync:
5735 sync();
5736 ret = 0;
5737 break;
5738 case TARGET_NR_kill:
5739 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5740 break;
5741 case TARGET_NR_rename:
5743 void *p2;
5744 p = lock_user_string(arg1);
5745 p2 = lock_user_string(arg2);
5746 if (!p || !p2)
5747 ret = -TARGET_EFAULT;
5748 else
5749 ret = get_errno(rename(p, p2));
5750 unlock_user(p2, arg2, 0);
5751 unlock_user(p, arg1, 0);
5753 break;
5754 #if defined(TARGET_NR_renameat)
5755 case TARGET_NR_renameat:
5757 void *p2;
5758 p = lock_user_string(arg2);
5759 p2 = lock_user_string(arg4);
5760 if (!p || !p2)
5761 ret = -TARGET_EFAULT;
5762 else
5763 ret = get_errno(renameat(arg1, p, arg3, p2));
5764 unlock_user(p2, arg4, 0);
5765 unlock_user(p, arg2, 0);
5767 break;
5768 #endif
5769 case TARGET_NR_mkdir:
5770 if (!(p = lock_user_string(arg1)))
5771 goto efault;
5772 ret = get_errno(mkdir(p, arg2));
5773 unlock_user(p, arg1, 0);
5774 break;
5775 #if defined(TARGET_NR_mkdirat)
5776 case TARGET_NR_mkdirat:
5777 if (!(p = lock_user_string(arg2)))
5778 goto efault;
5779 ret = get_errno(mkdirat(arg1, p, arg3));
5780 unlock_user(p, arg2, 0);
5781 break;
5782 #endif
5783 case TARGET_NR_rmdir:
5784 if (!(p = lock_user_string(arg1)))
5785 goto efault;
5786 ret = get_errno(rmdir(p));
5787 unlock_user(p, arg1, 0);
5788 break;
5789 case TARGET_NR_dup:
5790 ret = get_errno(dup(arg1));
5791 break;
5792 case TARGET_NR_pipe:
5793 ret = do_pipe(cpu_env, arg1, 0, 0);
5794 break;
5795 #ifdef TARGET_NR_pipe2
5796 case TARGET_NR_pipe2:
5797 ret = do_pipe(cpu_env, arg1,
5798 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
5799 break;
5800 #endif
5801 case TARGET_NR_times:
5803 struct target_tms *tmsp;
5804 struct tms tms;
5805 ret = get_errno(times(&tms));
5806 if (arg1) {
5807 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5808 if (!tmsp)
5809 goto efault;
5810 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
5811 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
5812 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
5813 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
5815 if (!is_error(ret))
5816 ret = host_to_target_clock_t(ret);
5818 break;
5819 #ifdef TARGET_NR_prof
5820 case TARGET_NR_prof:
5821 goto unimplemented;
5822 #endif
5823 #ifdef TARGET_NR_signal
5824 case TARGET_NR_signal:
5825 goto unimplemented;
5826 #endif
5827 case TARGET_NR_acct:
5828 if (arg1 == 0) {
5829 ret = get_errno(acct(NULL));
5830 } else {
5831 if (!(p = lock_user_string(arg1)))
5832 goto efault;
5833 ret = get_errno(acct(path(p)));
5834 unlock_user(p, arg1, 0);
5836 break;
5837 #ifdef TARGET_NR_umount2
5838 case TARGET_NR_umount2:
5839 if (!(p = lock_user_string(arg1)))
5840 goto efault;
5841 ret = get_errno(umount2(p, arg2));
5842 unlock_user(p, arg1, 0);
5843 break;
5844 #endif
5845 #ifdef TARGET_NR_lock
5846 case TARGET_NR_lock:
5847 goto unimplemented;
5848 #endif
5849 case TARGET_NR_ioctl:
5850 ret = do_ioctl(arg1, arg2, arg3);
5851 break;
5852 case TARGET_NR_fcntl:
5853 ret = do_fcntl(arg1, arg2, arg3);
5854 break;
5855 #ifdef TARGET_NR_mpx
5856 case TARGET_NR_mpx:
5857 goto unimplemented;
5858 #endif
5859 case TARGET_NR_setpgid:
5860 ret = get_errno(setpgid(arg1, arg2));
5861 break;
5862 #ifdef TARGET_NR_ulimit
5863 case TARGET_NR_ulimit:
5864 goto unimplemented;
5865 #endif
5866 #ifdef TARGET_NR_oldolduname
5867 case TARGET_NR_oldolduname:
5868 goto unimplemented;
5869 #endif
5870 case TARGET_NR_umask:
5871 ret = get_errno(umask(arg1));
5872 break;
5873 case TARGET_NR_chroot:
5874 if (!(p = lock_user_string(arg1)))
5875 goto efault;
5876 ret = get_errno(chroot(p));
5877 unlock_user(p, arg1, 0);
5878 break;
5879 case TARGET_NR_ustat:
5880 goto unimplemented;
5881 case TARGET_NR_dup2:
5882 ret = get_errno(dup2(arg1, arg2));
5883 break;
5884 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5885 case TARGET_NR_dup3:
5886 ret = get_errno(dup3(arg1, arg2, arg3));
5887 break;
5888 #endif
5889 #ifdef TARGET_NR_getppid /* not on alpha */
5890 case TARGET_NR_getppid:
5891 ret = get_errno(getppid());
5892 break;
5893 #endif
5894 case TARGET_NR_getpgrp:
5895 ret = get_errno(getpgrp());
5896 break;
5897 case TARGET_NR_setsid:
5898 ret = get_errno(setsid());
5899 break;
5900 #ifdef TARGET_NR_sigaction
5901 case TARGET_NR_sigaction:
5903 #if defined(TARGET_ALPHA)
5904 struct target_sigaction act, oact, *pact = 0;
5905 struct target_old_sigaction *old_act;
5906 if (arg2) {
5907 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5908 goto efault;
5909 act._sa_handler = old_act->_sa_handler;
5910 target_siginitset(&act.sa_mask, old_act->sa_mask);
5911 act.sa_flags = old_act->sa_flags;
5912 act.sa_restorer = 0;
5913 unlock_user_struct(old_act, arg2, 0);
5914 pact = &act;
5916 ret = get_errno(do_sigaction(arg1, pact, &oact));
5917 if (!is_error(ret) && arg3) {
5918 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5919 goto efault;
5920 old_act->_sa_handler = oact._sa_handler;
5921 old_act->sa_mask = oact.sa_mask.sig[0];
5922 old_act->sa_flags = oact.sa_flags;
5923 unlock_user_struct(old_act, arg3, 1);
5925 #elif defined(TARGET_MIPS)
5926 struct target_sigaction act, oact, *pact, *old_act;
5928 if (arg2) {
5929 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5930 goto efault;
5931 act._sa_handler = old_act->_sa_handler;
5932 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5933 act.sa_flags = old_act->sa_flags;
5934 unlock_user_struct(old_act, arg2, 0);
5935 pact = &act;
5936 } else {
5937 pact = NULL;
5940 ret = get_errno(do_sigaction(arg1, pact, &oact));
5942 if (!is_error(ret) && arg3) {
5943 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5944 goto efault;
5945 old_act->_sa_handler = oact._sa_handler;
5946 old_act->sa_flags = oact.sa_flags;
5947 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5948 old_act->sa_mask.sig[1] = 0;
5949 old_act->sa_mask.sig[2] = 0;
5950 old_act->sa_mask.sig[3] = 0;
5951 unlock_user_struct(old_act, arg3, 1);
5953 #else
5954 struct target_old_sigaction *old_act;
5955 struct target_sigaction act, oact, *pact;
5956 if (arg2) {
5957 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5958 goto efault;
5959 act._sa_handler = old_act->_sa_handler;
5960 target_siginitset(&act.sa_mask, old_act->sa_mask);
5961 act.sa_flags = old_act->sa_flags;
5962 act.sa_restorer = old_act->sa_restorer;
5963 unlock_user_struct(old_act, arg2, 0);
5964 pact = &act;
5965 } else {
5966 pact = NULL;
5968 ret = get_errno(do_sigaction(arg1, pact, &oact));
5969 if (!is_error(ret) && arg3) {
5970 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5971 goto efault;
5972 old_act->_sa_handler = oact._sa_handler;
5973 old_act->sa_mask = oact.sa_mask.sig[0];
5974 old_act->sa_flags = oact.sa_flags;
5975 old_act->sa_restorer = oact.sa_restorer;
5976 unlock_user_struct(old_act, arg3, 1);
5978 #endif
5980 break;
5981 #endif
5982 case TARGET_NR_rt_sigaction:
5984 #if defined(TARGET_ALPHA)
5985 struct target_sigaction act, oact, *pact = 0;
5986 struct target_rt_sigaction *rt_act;
5987 /* ??? arg4 == sizeof(sigset_t). */
5988 if (arg2) {
5989 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5990 goto efault;
5991 act._sa_handler = rt_act->_sa_handler;
5992 act.sa_mask = rt_act->sa_mask;
5993 act.sa_flags = rt_act->sa_flags;
5994 act.sa_restorer = arg5;
5995 unlock_user_struct(rt_act, arg2, 0);
5996 pact = &act;
5998 ret = get_errno(do_sigaction(arg1, pact, &oact));
5999 if (!is_error(ret) && arg3) {
6000 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
6001 goto efault;
6002 rt_act->_sa_handler = oact._sa_handler;
6003 rt_act->sa_mask = oact.sa_mask;
6004 rt_act->sa_flags = oact.sa_flags;
6005 unlock_user_struct(rt_act, arg3, 1);
6007 #else
6008 struct target_sigaction *act;
6009 struct target_sigaction *oact;
6011 if (arg2) {
6012 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
6013 goto efault;
6014 } else
6015 act = NULL;
6016 if (arg3) {
6017 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
6018 ret = -TARGET_EFAULT;
6019 goto rt_sigaction_fail;
6021 } else
6022 oact = NULL;
6023 ret = get_errno(do_sigaction(arg1, act, oact));
6024 rt_sigaction_fail:
6025 if (act)
6026 unlock_user_struct(act, arg2, 0);
6027 if (oact)
6028 unlock_user_struct(oact, arg3, 1);
6029 #endif
6031 break;
6032 #ifdef TARGET_NR_sgetmask /* not on alpha */
6033 case TARGET_NR_sgetmask:
6035 sigset_t cur_set;
6036 abi_ulong target_set;
6037 do_sigprocmask(0, NULL, &cur_set);
6038 host_to_target_old_sigset(&target_set, &cur_set);
6039 ret = target_set;
6041 break;
6042 #endif
6043 #ifdef TARGET_NR_ssetmask /* not on alpha */
6044 case TARGET_NR_ssetmask:
6046 sigset_t set, oset, cur_set;
6047 abi_ulong target_set = arg1;
6048 do_sigprocmask(0, NULL, &cur_set);
6049 target_to_host_old_sigset(&set, &target_set);
6050 sigorset(&set, &set, &cur_set);
6051 do_sigprocmask(SIG_SETMASK, &set, &oset);
6052 host_to_target_old_sigset(&target_set, &oset);
6053 ret = target_set;
6055 break;
6056 #endif
6057 #ifdef TARGET_NR_sigprocmask
6058 case TARGET_NR_sigprocmask:
6060 #if defined(TARGET_ALPHA)
6061 sigset_t set, oldset;
6062 abi_ulong mask;
6063 int how;
6065 switch (arg1) {
6066 case TARGET_SIG_BLOCK:
6067 how = SIG_BLOCK;
6068 break;
6069 case TARGET_SIG_UNBLOCK:
6070 how = SIG_UNBLOCK;
6071 break;
6072 case TARGET_SIG_SETMASK:
6073 how = SIG_SETMASK;
6074 break;
6075 default:
6076 ret = -TARGET_EINVAL;
6077 goto fail;
6079 mask = arg2;
6080 target_to_host_old_sigset(&set, &mask);
6082 ret = get_errno(do_sigprocmask(how, &set, &oldset));
6083 if (!is_error(ret)) {
6084 host_to_target_old_sigset(&mask, &oldset);
6085 ret = mask;
6086 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
6088 #else
6089 sigset_t set, oldset, *set_ptr;
6090 int how;
6092 if (arg2) {
6093 switch (arg1) {
6094 case TARGET_SIG_BLOCK:
6095 how = SIG_BLOCK;
6096 break;
6097 case TARGET_SIG_UNBLOCK:
6098 how = SIG_UNBLOCK;
6099 break;
6100 case TARGET_SIG_SETMASK:
6101 how = SIG_SETMASK;
6102 break;
6103 default:
6104 ret = -TARGET_EINVAL;
6105 goto fail;
6107 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6108 goto efault;
6109 target_to_host_old_sigset(&set, p);
6110 unlock_user(p, arg2, 0);
6111 set_ptr = &set;
6112 } else {
6113 how = 0;
6114 set_ptr = NULL;
6116 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6117 if (!is_error(ret) && arg3) {
6118 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6119 goto efault;
6120 host_to_target_old_sigset(p, &oldset);
6121 unlock_user(p, arg3, sizeof(target_sigset_t));
6123 #endif
6125 break;
6126 #endif
6127 case TARGET_NR_rt_sigprocmask:
6129 int how = arg1;
6130 sigset_t set, oldset, *set_ptr;
6132 if (arg2) {
6133 switch(how) {
6134 case TARGET_SIG_BLOCK:
6135 how = SIG_BLOCK;
6136 break;
6137 case TARGET_SIG_UNBLOCK:
6138 how = SIG_UNBLOCK;
6139 break;
6140 case TARGET_SIG_SETMASK:
6141 how = SIG_SETMASK;
6142 break;
6143 default:
6144 ret = -TARGET_EINVAL;
6145 goto fail;
6147 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
6148 goto efault;
6149 target_to_host_sigset(&set, p);
6150 unlock_user(p, arg2, 0);
6151 set_ptr = &set;
6152 } else {
6153 how = 0;
6154 set_ptr = NULL;
6156 ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
6157 if (!is_error(ret) && arg3) {
6158 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
6159 goto efault;
6160 host_to_target_sigset(p, &oldset);
6161 unlock_user(p, arg3, sizeof(target_sigset_t));
6164 break;
6165 #ifdef TARGET_NR_sigpending
6166 case TARGET_NR_sigpending:
6168 sigset_t set;
6169 ret = get_errno(sigpending(&set));
6170 if (!is_error(ret)) {
6171 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6172 goto efault;
6173 host_to_target_old_sigset(p, &set);
6174 unlock_user(p, arg1, sizeof(target_sigset_t));
6177 break;
6178 #endif
6179 case TARGET_NR_rt_sigpending:
6181 sigset_t set;
6182 ret = get_errno(sigpending(&set));
6183 if (!is_error(ret)) {
6184 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
6185 goto efault;
6186 host_to_target_sigset(p, &set);
6187 unlock_user(p, arg1, sizeof(target_sigset_t));
6190 break;
6191 #ifdef TARGET_NR_sigsuspend
6192 case TARGET_NR_sigsuspend:
6194 sigset_t set;
6195 #if defined(TARGET_ALPHA)
6196 abi_ulong mask = arg1;
6197 target_to_host_old_sigset(&set, &mask);
6198 #else
6199 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6200 goto efault;
6201 target_to_host_old_sigset(&set, p);
6202 unlock_user(p, arg1, 0);
6203 #endif
6204 ret = get_errno(sigsuspend(&set));
6206 break;
6207 #endif
6208 case TARGET_NR_rt_sigsuspend:
6210 sigset_t set;
6211 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6212 goto efault;
6213 target_to_host_sigset(&set, p);
6214 unlock_user(p, arg1, 0);
6215 ret = get_errno(sigsuspend(&set));
6217 break;
6218 case TARGET_NR_rt_sigtimedwait:
6220 sigset_t set;
6221 struct timespec uts, *puts;
6222 siginfo_t uinfo;
6224 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
6225 goto efault;
6226 target_to_host_sigset(&set, p);
6227 unlock_user(p, arg1, 0);
6228 if (arg3) {
6229 puts = &uts;
6230 target_to_host_timespec(puts, arg3);
6231 } else {
6232 puts = NULL;
6234 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
6235 if (!is_error(ret)) {
6236 if (arg2) {
6237 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
6239 if (!p) {
6240 goto efault;
6242 host_to_target_siginfo(p, &uinfo);
6243 unlock_user(p, arg2, sizeof(target_siginfo_t));
6245 ret = host_to_target_signal(ret);
6248 break;
6249 case TARGET_NR_rt_sigqueueinfo:
6251 siginfo_t uinfo;
6252 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
6253 goto efault;
6254 target_to_host_siginfo(&uinfo, p);
6255 unlock_user(p, arg1, 0);
6256 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
6258 break;
6259 #ifdef TARGET_NR_sigreturn
6260 case TARGET_NR_sigreturn:
6261 /* NOTE: ret is eax, so not transcoding must be done */
6262 ret = do_sigreturn(cpu_env);
6263 break;
6264 #endif
6265 case TARGET_NR_rt_sigreturn:
6266 /* NOTE: ret is eax, so not transcoding must be done */
6267 ret = do_rt_sigreturn(cpu_env);
6268 break;
6269 case TARGET_NR_sethostname:
6270 if (!(p = lock_user_string(arg1)))
6271 goto efault;
6272 ret = get_errno(sethostname(p, arg2));
6273 unlock_user(p, arg1, 0);
6274 break;
6275 case TARGET_NR_setrlimit:
6277 int resource = target_to_host_resource(arg1);
6278 struct target_rlimit *target_rlim;
6279 struct rlimit rlim;
6280 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
6281 goto efault;
6282 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
6283 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
6284 unlock_user_struct(target_rlim, arg2, 0);
6285 ret = get_errno(setrlimit(resource, &rlim));
6287 break;
6288 case TARGET_NR_getrlimit:
6290 int resource = target_to_host_resource(arg1);
6291 struct target_rlimit *target_rlim;
6292 struct rlimit rlim;
6294 ret = get_errno(getrlimit(resource, &rlim));
6295 if (!is_error(ret)) {
6296 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6297 goto efault;
6298 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6299 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6300 unlock_user_struct(target_rlim, arg2, 1);
6303 break;
6304 case TARGET_NR_getrusage:
6306 struct rusage rusage;
6307 ret = get_errno(getrusage(arg1, &rusage));
6308 if (!is_error(ret)) {
6309 host_to_target_rusage(arg2, &rusage);
6312 break;
6313 case TARGET_NR_gettimeofday:
6315 struct timeval tv;
6316 ret = get_errno(gettimeofday(&tv, NULL));
6317 if (!is_error(ret)) {
6318 if (copy_to_user_timeval(arg1, &tv))
6319 goto efault;
6322 break;
6323 case TARGET_NR_settimeofday:
6325 struct timeval tv;
6326 if (copy_from_user_timeval(&tv, arg1))
6327 goto efault;
6328 ret = get_errno(settimeofday(&tv, NULL));
6330 break;
6331 #if defined(TARGET_NR_select)
6332 case TARGET_NR_select:
6333 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
6334 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6335 #else
6337 struct target_sel_arg_struct *sel;
6338 abi_ulong inp, outp, exp, tvp;
6339 long nsel;
6341 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
6342 goto efault;
6343 nsel = tswapal(sel->n);
6344 inp = tswapal(sel->inp);
6345 outp = tswapal(sel->outp);
6346 exp = tswapal(sel->exp);
6347 tvp = tswapal(sel->tvp);
6348 unlock_user_struct(sel, arg1, 0);
6349 ret = do_select(nsel, inp, outp, exp, tvp);
6351 #endif
6352 break;
6353 #endif
6354 #ifdef TARGET_NR_pselect6
6355 case TARGET_NR_pselect6:
6357 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
6358 fd_set rfds, wfds, efds;
6359 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
6360 struct timespec ts, *ts_ptr;
6363 * The 6th arg is actually two args smashed together,
6364 * so we cannot use the C library.
6366 sigset_t set;
6367 struct {
6368 sigset_t *set;
6369 size_t size;
6370 } sig, *sig_ptr;
6372 abi_ulong arg_sigset, arg_sigsize, *arg7;
6373 target_sigset_t *target_sigset;
6375 n = arg1;
6376 rfd_addr = arg2;
6377 wfd_addr = arg3;
6378 efd_addr = arg4;
6379 ts_addr = arg5;
6381 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
6382 if (ret) {
6383 goto fail;
6385 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
6386 if (ret) {
6387 goto fail;
6389 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
6390 if (ret) {
6391 goto fail;
6395 * This takes a timespec, and not a timeval, so we cannot
6396 * use the do_select() helper ...
6398 if (ts_addr) {
6399 if (target_to_host_timespec(&ts, ts_addr)) {
6400 goto efault;
6402 ts_ptr = &ts;
6403 } else {
6404 ts_ptr = NULL;
6407 /* Extract the two packed args for the sigset */
6408 if (arg6) {
6409 sig_ptr = &sig;
6410 sig.size = _NSIG / 8;
6412 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
6413 if (!arg7) {
6414 goto efault;
6416 arg_sigset = tswapal(arg7[0]);
6417 arg_sigsize = tswapal(arg7[1]);
6418 unlock_user(arg7, arg6, 0);
6420 if (arg_sigset) {
6421 sig.set = &set;
6422 if (arg_sigsize != sizeof(*target_sigset)) {
6423 /* Like the kernel, we enforce correct size sigsets */
6424 ret = -TARGET_EINVAL;
6425 goto fail;
6427 target_sigset = lock_user(VERIFY_READ, arg_sigset,
6428 sizeof(*target_sigset), 1);
6429 if (!target_sigset) {
6430 goto efault;
6432 target_to_host_sigset(&set, target_sigset);
6433 unlock_user(target_sigset, arg_sigset, 0);
6434 } else {
6435 sig.set = NULL;
6437 } else {
6438 sig_ptr = NULL;
6441 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
6442 ts_ptr, sig_ptr));
6444 if (!is_error(ret)) {
6445 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
6446 goto efault;
6447 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
6448 goto efault;
6449 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
6450 goto efault;
6452 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
6453 goto efault;
6456 break;
6457 #endif
6458 case TARGET_NR_symlink:
6460 void *p2;
6461 p = lock_user_string(arg1);
6462 p2 = lock_user_string(arg2);
6463 if (!p || !p2)
6464 ret = -TARGET_EFAULT;
6465 else
6466 ret = get_errno(symlink(p, p2));
6467 unlock_user(p2, arg2, 0);
6468 unlock_user(p, arg1, 0);
6470 break;
6471 #if defined(TARGET_NR_symlinkat)
6472 case TARGET_NR_symlinkat:
6474 void *p2;
6475 p = lock_user_string(arg1);
6476 p2 = lock_user_string(arg3);
6477 if (!p || !p2)
6478 ret = -TARGET_EFAULT;
6479 else
6480 ret = get_errno(symlinkat(p, arg2, p2));
6481 unlock_user(p2, arg3, 0);
6482 unlock_user(p, arg1, 0);
6484 break;
6485 #endif
6486 #ifdef TARGET_NR_oldlstat
6487 case TARGET_NR_oldlstat:
6488 goto unimplemented;
6489 #endif
6490 case TARGET_NR_readlink:
6492 void *p2;
6493 p = lock_user_string(arg1);
6494 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
6495 if (!p || !p2) {
6496 ret = -TARGET_EFAULT;
6497 } else if (is_proc_myself((const char *)p, "exe")) {
6498 char real[PATH_MAX], *temp;
6499 temp = realpath(exec_path, real);
6500 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6501 snprintf((char *)p2, arg3, "%s", real);
6502 } else {
6503 ret = get_errno(readlink(path(p), p2, arg3));
6505 unlock_user(p2, arg2, ret);
6506 unlock_user(p, arg1, 0);
6508 break;
6509 #if defined(TARGET_NR_readlinkat)
6510 case TARGET_NR_readlinkat:
6512 void *p2;
6513 p = lock_user_string(arg2);
6514 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
6515 if (!p || !p2) {
6516 ret = -TARGET_EFAULT;
6517 } else if (is_proc_myself((const char *)p, "exe")) {
6518 char real[PATH_MAX], *temp;
6519 temp = realpath(exec_path, real);
6520 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
6521 snprintf((char *)p2, arg4, "%s", real);
6522 } else {
6523 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
6525 unlock_user(p2, arg3, ret);
6526 unlock_user(p, arg2, 0);
6528 break;
6529 #endif
6530 #ifdef TARGET_NR_uselib
6531 case TARGET_NR_uselib:
6532 goto unimplemented;
6533 #endif
6534 #ifdef TARGET_NR_swapon
6535 case TARGET_NR_swapon:
6536 if (!(p = lock_user_string(arg1)))
6537 goto efault;
6538 ret = get_errno(swapon(p, arg2));
6539 unlock_user(p, arg1, 0);
6540 break;
6541 #endif
6542 case TARGET_NR_reboot:
6543 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
6544 /* arg4 must be ignored in all other cases */
6545 p = lock_user_string(arg4);
6546 if (!p) {
6547 goto efault;
6549 ret = get_errno(reboot(arg1, arg2, arg3, p));
6550 unlock_user(p, arg4, 0);
6551 } else {
6552 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
6554 break;
6555 #ifdef TARGET_NR_readdir
6556 case TARGET_NR_readdir:
6557 goto unimplemented;
6558 #endif
6559 #ifdef TARGET_NR_mmap
6560 case TARGET_NR_mmap:
6561 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6562 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
6563 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6564 || defined(TARGET_S390X)
6566 abi_ulong *v;
6567 abi_ulong v1, v2, v3, v4, v5, v6;
6568 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
6569 goto efault;
6570 v1 = tswapal(v[0]);
6571 v2 = tswapal(v[1]);
6572 v3 = tswapal(v[2]);
6573 v4 = tswapal(v[3]);
6574 v5 = tswapal(v[4]);
6575 v6 = tswapal(v[5]);
6576 unlock_user(v, arg1, 0);
6577 ret = get_errno(target_mmap(v1, v2, v3,
6578 target_to_host_bitmask(v4, mmap_flags_tbl),
6579 v5, v6));
6581 #else
6582 ret = get_errno(target_mmap(arg1, arg2, arg3,
6583 target_to_host_bitmask(arg4, mmap_flags_tbl),
6584 arg5,
6585 arg6));
6586 #endif
6587 break;
6588 #endif
6589 #ifdef TARGET_NR_mmap2
6590 case TARGET_NR_mmap2:
6591 #ifndef MMAP_SHIFT
6592 #define MMAP_SHIFT 12
6593 #endif
6594 ret = get_errno(target_mmap(arg1, arg2, arg3,
6595 target_to_host_bitmask(arg4, mmap_flags_tbl),
6596 arg5,
6597 arg6 << MMAP_SHIFT));
6598 break;
6599 #endif
6600 case TARGET_NR_munmap:
6601 ret = get_errno(target_munmap(arg1, arg2));
6602 break;
6603 case TARGET_NR_mprotect:
6605 TaskState *ts = cpu->opaque;
6606 /* Special hack to detect libc making the stack executable. */
6607 if ((arg3 & PROT_GROWSDOWN)
6608 && arg1 >= ts->info->stack_limit
6609 && arg1 <= ts->info->start_stack) {
6610 arg3 &= ~PROT_GROWSDOWN;
6611 arg2 = arg2 + arg1 - ts->info->stack_limit;
6612 arg1 = ts->info->stack_limit;
6615 ret = get_errno(target_mprotect(arg1, arg2, arg3));
6616 break;
6617 #ifdef TARGET_NR_mremap
6618 case TARGET_NR_mremap:
6619 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
6620 break;
6621 #endif
6622 /* ??? msync/mlock/munlock are broken for softmmu. */
6623 #ifdef TARGET_NR_msync
6624 case TARGET_NR_msync:
6625 ret = get_errno(msync(g2h(arg1), arg2, arg3));
6626 break;
6627 #endif
6628 #ifdef TARGET_NR_mlock
6629 case TARGET_NR_mlock:
6630 ret = get_errno(mlock(g2h(arg1), arg2));
6631 break;
6632 #endif
6633 #ifdef TARGET_NR_munlock
6634 case TARGET_NR_munlock:
6635 ret = get_errno(munlock(g2h(arg1), arg2));
6636 break;
6637 #endif
6638 #ifdef TARGET_NR_mlockall
6639 case TARGET_NR_mlockall:
6640 ret = get_errno(mlockall(arg1));
6641 break;
6642 #endif
6643 #ifdef TARGET_NR_munlockall
6644 case TARGET_NR_munlockall:
6645 ret = get_errno(munlockall());
6646 break;
6647 #endif
6648 case TARGET_NR_truncate:
6649 if (!(p = lock_user_string(arg1)))
6650 goto efault;
6651 ret = get_errno(truncate(p, arg2));
6652 unlock_user(p, arg1, 0);
6653 break;
6654 case TARGET_NR_ftruncate:
6655 ret = get_errno(ftruncate(arg1, arg2));
6656 break;
6657 case TARGET_NR_fchmod:
6658 ret = get_errno(fchmod(arg1, arg2));
6659 break;
6660 #if defined(TARGET_NR_fchmodat)
6661 case TARGET_NR_fchmodat:
6662 if (!(p = lock_user_string(arg2)))
6663 goto efault;
6664 ret = get_errno(fchmodat(arg1, p, arg3, 0));
6665 unlock_user(p, arg2, 0);
6666 break;
6667 #endif
6668 case TARGET_NR_getpriority:
6669 /* Note that negative values are valid for getpriority, so we must
6670 differentiate based on errno settings. */
6671 errno = 0;
6672 ret = getpriority(arg1, arg2);
6673 if (ret == -1 && errno != 0) {
6674 ret = -host_to_target_errno(errno);
6675 break;
6677 #ifdef TARGET_ALPHA
6678 /* Return value is the unbiased priority. Signal no error. */
6679 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
6680 #else
6681 /* Return value is a biased priority to avoid negative numbers. */
6682 ret = 20 - ret;
6683 #endif
6684 break;
6685 case TARGET_NR_setpriority:
6686 ret = get_errno(setpriority(arg1, arg2, arg3));
6687 break;
6688 #ifdef TARGET_NR_profil
6689 case TARGET_NR_profil:
6690 goto unimplemented;
6691 #endif
6692 case TARGET_NR_statfs:
6693 if (!(p = lock_user_string(arg1)))
6694 goto efault;
6695 ret = get_errno(statfs(path(p), &stfs));
6696 unlock_user(p, arg1, 0);
6697 convert_statfs:
6698 if (!is_error(ret)) {
6699 struct target_statfs *target_stfs;
6701 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6702 goto efault;
6703 __put_user(stfs.f_type, &target_stfs->f_type);
6704 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6705 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6706 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6707 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6708 __put_user(stfs.f_files, &target_stfs->f_files);
6709 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6710 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6711 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6712 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6713 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6714 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6715 unlock_user_struct(target_stfs, arg2, 1);
6717 break;
6718 case TARGET_NR_fstatfs:
6719 ret = get_errno(fstatfs(arg1, &stfs));
6720 goto convert_statfs;
6721 #ifdef TARGET_NR_statfs64
6722 case TARGET_NR_statfs64:
6723 if (!(p = lock_user_string(arg1)))
6724 goto efault;
6725 ret = get_errno(statfs(path(p), &stfs));
6726 unlock_user(p, arg1, 0);
6727 convert_statfs64:
6728 if (!is_error(ret)) {
6729 struct target_statfs64 *target_stfs;
6731 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6732 goto efault;
6733 __put_user(stfs.f_type, &target_stfs->f_type);
6734 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6735 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6736 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6737 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6738 __put_user(stfs.f_files, &target_stfs->f_files);
6739 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6740 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6741 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6742 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6743 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
6744 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
6745 unlock_user_struct(target_stfs, arg3, 1);
6747 break;
6748 case TARGET_NR_fstatfs64:
6749 ret = get_errno(fstatfs(arg1, &stfs));
6750 goto convert_statfs64;
6751 #endif
6752 #ifdef TARGET_NR_ioperm
6753 case TARGET_NR_ioperm:
6754 goto unimplemented;
6755 #endif
6756 #ifdef TARGET_NR_socketcall
6757 case TARGET_NR_socketcall:
6758 ret = do_socketcall(arg1, arg2);
6759 break;
6760 #endif
6761 #ifdef TARGET_NR_accept
6762 case TARGET_NR_accept:
6763 ret = do_accept4(arg1, arg2, arg3, 0);
6764 break;
6765 #endif
6766 #ifdef TARGET_NR_accept4
6767 case TARGET_NR_accept4:
6768 #ifdef CONFIG_ACCEPT4
6769 ret = do_accept4(arg1, arg2, arg3, arg4);
6770 #else
6771 goto unimplemented;
6772 #endif
6773 break;
6774 #endif
6775 #ifdef TARGET_NR_bind
6776 case TARGET_NR_bind:
6777 ret = do_bind(arg1, arg2, arg3);
6778 break;
6779 #endif
6780 #ifdef TARGET_NR_connect
6781 case TARGET_NR_connect:
6782 ret = do_connect(arg1, arg2, arg3);
6783 break;
6784 #endif
6785 #ifdef TARGET_NR_getpeername
6786 case TARGET_NR_getpeername:
6787 ret = do_getpeername(arg1, arg2, arg3);
6788 break;
6789 #endif
6790 #ifdef TARGET_NR_getsockname
6791 case TARGET_NR_getsockname:
6792 ret = do_getsockname(arg1, arg2, arg3);
6793 break;
6794 #endif
6795 #ifdef TARGET_NR_getsockopt
6796 case TARGET_NR_getsockopt:
6797 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6798 break;
6799 #endif
6800 #ifdef TARGET_NR_listen
6801 case TARGET_NR_listen:
6802 ret = get_errno(listen(arg1, arg2));
6803 break;
6804 #endif
6805 #ifdef TARGET_NR_recv
6806 case TARGET_NR_recv:
6807 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6808 break;
6809 #endif
6810 #ifdef TARGET_NR_recvfrom
6811 case TARGET_NR_recvfrom:
6812 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6813 break;
6814 #endif
6815 #ifdef TARGET_NR_recvmsg
6816 case TARGET_NR_recvmsg:
6817 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6818 break;
6819 #endif
6820 #ifdef TARGET_NR_send
6821 case TARGET_NR_send:
6822 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6823 break;
6824 #endif
6825 #ifdef TARGET_NR_sendmsg
6826 case TARGET_NR_sendmsg:
6827 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6828 break;
6829 #endif
6830 #ifdef TARGET_NR_sendmmsg
6831 case TARGET_NR_sendmmsg:
6832 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
6833 break;
6834 case TARGET_NR_recvmmsg:
6835 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
6836 break;
6837 #endif
6838 #ifdef TARGET_NR_sendto
6839 case TARGET_NR_sendto:
6840 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6841 break;
6842 #endif
6843 #ifdef TARGET_NR_shutdown
6844 case TARGET_NR_shutdown:
6845 ret = get_errno(shutdown(arg1, arg2));
6846 break;
6847 #endif
6848 #ifdef TARGET_NR_socket
6849 case TARGET_NR_socket:
6850 ret = do_socket(arg1, arg2, arg3);
6851 break;
6852 #endif
6853 #ifdef TARGET_NR_socketpair
6854 case TARGET_NR_socketpair:
6855 ret = do_socketpair(arg1, arg2, arg3, arg4);
6856 break;
6857 #endif
6858 #ifdef TARGET_NR_setsockopt
6859 case TARGET_NR_setsockopt:
6860 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6861 break;
6862 #endif
6864 case TARGET_NR_syslog:
6865 if (!(p = lock_user_string(arg2)))
6866 goto efault;
6867 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6868 unlock_user(p, arg2, 0);
6869 break;
6871 case TARGET_NR_setitimer:
6873 struct itimerval value, ovalue, *pvalue;
6875 if (arg2) {
6876 pvalue = &value;
6877 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6878 || copy_from_user_timeval(&pvalue->it_value,
6879 arg2 + sizeof(struct target_timeval)))
6880 goto efault;
6881 } else {
6882 pvalue = NULL;
6884 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6885 if (!is_error(ret) && arg3) {
6886 if (copy_to_user_timeval(arg3,
6887 &ovalue.it_interval)
6888 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6889 &ovalue.it_value))
6890 goto efault;
6893 break;
6894 case TARGET_NR_getitimer:
6896 struct itimerval value;
6898 ret = get_errno(getitimer(arg1, &value));
6899 if (!is_error(ret) && arg2) {
6900 if (copy_to_user_timeval(arg2,
6901 &value.it_interval)
6902 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6903 &value.it_value))
6904 goto efault;
6907 break;
6908 case TARGET_NR_stat:
6909 if (!(p = lock_user_string(arg1)))
6910 goto efault;
6911 ret = get_errno(stat(path(p), &st));
6912 unlock_user(p, arg1, 0);
6913 goto do_stat;
6914 case TARGET_NR_lstat:
6915 if (!(p = lock_user_string(arg1)))
6916 goto efault;
6917 ret = get_errno(lstat(path(p), &st));
6918 unlock_user(p, arg1, 0);
6919 goto do_stat;
6920 case TARGET_NR_fstat:
6922 ret = get_errno(fstat(arg1, &st));
6923 do_stat:
6924 if (!is_error(ret)) {
6925 struct target_stat *target_st;
6927 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6928 goto efault;
6929 memset(target_st, 0, sizeof(*target_st));
6930 __put_user(st.st_dev, &target_st->st_dev);
6931 __put_user(st.st_ino, &target_st->st_ino);
6932 __put_user(st.st_mode, &target_st->st_mode);
6933 __put_user(st.st_uid, &target_st->st_uid);
6934 __put_user(st.st_gid, &target_st->st_gid);
6935 __put_user(st.st_nlink, &target_st->st_nlink);
6936 __put_user(st.st_rdev, &target_st->st_rdev);
6937 __put_user(st.st_size, &target_st->st_size);
6938 __put_user(st.st_blksize, &target_st->st_blksize);
6939 __put_user(st.st_blocks, &target_st->st_blocks);
6940 __put_user(st.st_atime, &target_st->target_st_atime);
6941 __put_user(st.st_mtime, &target_st->target_st_mtime);
6942 __put_user(st.st_ctime, &target_st->target_st_ctime);
6943 unlock_user_struct(target_st, arg2, 1);
6946 break;
6947 #ifdef TARGET_NR_olduname
6948 case TARGET_NR_olduname:
6949 goto unimplemented;
6950 #endif
6951 #ifdef TARGET_NR_iopl
6952 case TARGET_NR_iopl:
6953 goto unimplemented;
6954 #endif
6955 case TARGET_NR_vhangup:
6956 ret = get_errno(vhangup());
6957 break;
6958 #ifdef TARGET_NR_idle
6959 case TARGET_NR_idle:
6960 goto unimplemented;
6961 #endif
6962 #ifdef TARGET_NR_syscall
6963 case TARGET_NR_syscall:
6964 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6965 arg6, arg7, arg8, 0);
6966 break;
6967 #endif
6968 case TARGET_NR_wait4:
6970 int status;
6971 abi_long status_ptr = arg2;
6972 struct rusage rusage, *rusage_ptr;
6973 abi_ulong target_rusage = arg4;
6974 if (target_rusage)
6975 rusage_ptr = &rusage;
6976 else
6977 rusage_ptr = NULL;
6978 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6979 if (!is_error(ret)) {
6980 if (status_ptr && ret) {
6981 status = host_to_target_waitstatus(status);
6982 if (put_user_s32(status, status_ptr))
6983 goto efault;
6985 if (target_rusage)
6986 host_to_target_rusage(target_rusage, &rusage);
6989 break;
6990 #ifdef TARGET_NR_swapoff
6991 case TARGET_NR_swapoff:
6992 if (!(p = lock_user_string(arg1)))
6993 goto efault;
6994 ret = get_errno(swapoff(p));
6995 unlock_user(p, arg1, 0);
6996 break;
6997 #endif
6998 case TARGET_NR_sysinfo:
7000 struct target_sysinfo *target_value;
7001 struct sysinfo value;
7002 ret = get_errno(sysinfo(&value));
7003 if (!is_error(ret) && arg1)
7005 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
7006 goto efault;
7007 __put_user(value.uptime, &target_value->uptime);
7008 __put_user(value.loads[0], &target_value->loads[0]);
7009 __put_user(value.loads[1], &target_value->loads[1]);
7010 __put_user(value.loads[2], &target_value->loads[2]);
7011 __put_user(value.totalram, &target_value->totalram);
7012 __put_user(value.freeram, &target_value->freeram);
7013 __put_user(value.sharedram, &target_value->sharedram);
7014 __put_user(value.bufferram, &target_value->bufferram);
7015 __put_user(value.totalswap, &target_value->totalswap);
7016 __put_user(value.freeswap, &target_value->freeswap);
7017 __put_user(value.procs, &target_value->procs);
7018 __put_user(value.totalhigh, &target_value->totalhigh);
7019 __put_user(value.freehigh, &target_value->freehigh);
7020 __put_user(value.mem_unit, &target_value->mem_unit);
7021 unlock_user_struct(target_value, arg1, 1);
7024 break;
7025 #ifdef TARGET_NR_ipc
7026 case TARGET_NR_ipc:
7027 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
7028 break;
7029 #endif
7030 #ifdef TARGET_NR_semget
7031 case TARGET_NR_semget:
7032 ret = get_errno(semget(arg1, arg2, arg3));
7033 break;
7034 #endif
7035 #ifdef TARGET_NR_semop
7036 case TARGET_NR_semop:
7037 ret = do_semop(arg1, arg2, arg3);
7038 break;
7039 #endif
7040 #ifdef TARGET_NR_semctl
7041 case TARGET_NR_semctl:
7042 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
7043 break;
7044 #endif
7045 #ifdef TARGET_NR_msgctl
7046 case TARGET_NR_msgctl:
7047 ret = do_msgctl(arg1, arg2, arg3);
7048 break;
7049 #endif
7050 #ifdef TARGET_NR_msgget
7051 case TARGET_NR_msgget:
7052 ret = get_errno(msgget(arg1, arg2));
7053 break;
7054 #endif
7055 #ifdef TARGET_NR_msgrcv
7056 case TARGET_NR_msgrcv:
7057 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
7058 break;
7059 #endif
7060 #ifdef TARGET_NR_msgsnd
7061 case TARGET_NR_msgsnd:
7062 ret = do_msgsnd(arg1, arg2, arg3, arg4);
7063 break;
7064 #endif
7065 #ifdef TARGET_NR_shmget
7066 case TARGET_NR_shmget:
7067 ret = get_errno(shmget(arg1, arg2, arg3));
7068 break;
7069 #endif
7070 #ifdef TARGET_NR_shmctl
7071 case TARGET_NR_shmctl:
7072 ret = do_shmctl(arg1, arg2, arg3);
7073 break;
7074 #endif
7075 #ifdef TARGET_NR_shmat
7076 case TARGET_NR_shmat:
7077 ret = do_shmat(arg1, arg2, arg3);
7078 break;
7079 #endif
7080 #ifdef TARGET_NR_shmdt
7081 case TARGET_NR_shmdt:
7082 ret = do_shmdt(arg1);
7083 break;
7084 #endif
7085 case TARGET_NR_fsync:
7086 ret = get_errno(fsync(arg1));
7087 break;
7088 case TARGET_NR_clone:
7089 /* Linux manages to have three different orderings for its
7090 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
7091 * match the kernel's CONFIG_CLONE_* settings.
7092 * Microblaze is further special in that it uses a sixth
7093 * implicit argument to clone for the TLS pointer.
7095 #if defined(TARGET_MICROBLAZE)
7096 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
7097 #elif defined(TARGET_CLONE_BACKWARDS)
7098 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
7099 #elif defined(TARGET_CLONE_BACKWARDS2)
7100 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
7101 #else
7102 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
7103 #endif
7104 break;
7105 #ifdef __NR_exit_group
7106 /* new thread calls */
7107 case TARGET_NR_exit_group:
7108 #ifdef TARGET_GPROF
7109 _mcleanup();
7110 #endif
7111 gdb_exit(cpu_env, arg1);
7112 ret = get_errno(exit_group(arg1));
7113 break;
7114 #endif
7115 case TARGET_NR_setdomainname:
7116 if (!(p = lock_user_string(arg1)))
7117 goto efault;
7118 ret = get_errno(setdomainname(p, arg2));
7119 unlock_user(p, arg1, 0);
7120 break;
7121 case TARGET_NR_uname:
7122 /* no need to transcode because we use the linux syscall */
7124 struct new_utsname * buf;
7126 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
7127 goto efault;
7128 ret = get_errno(sys_uname(buf));
7129 if (!is_error(ret)) {
7130 /* Overrite the native machine name with whatever is being
7131 emulated. */
7132 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
7133 /* Allow the user to override the reported release. */
7134 if (qemu_uname_release && *qemu_uname_release)
7135 strcpy (buf->release, qemu_uname_release);
7137 unlock_user_struct(buf, arg1, 1);
7139 break;
7140 #ifdef TARGET_I386
7141 case TARGET_NR_modify_ldt:
7142 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
7143 break;
7144 #if !defined(TARGET_X86_64)
7145 case TARGET_NR_vm86old:
7146 goto unimplemented;
7147 case TARGET_NR_vm86:
7148 ret = do_vm86(cpu_env, arg1, arg2);
7149 break;
7150 #endif
7151 #endif
7152 case TARGET_NR_adjtimex:
7153 goto unimplemented;
7154 #ifdef TARGET_NR_create_module
7155 case TARGET_NR_create_module:
7156 #endif
7157 case TARGET_NR_init_module:
7158 case TARGET_NR_delete_module:
7159 #ifdef TARGET_NR_get_kernel_syms
7160 case TARGET_NR_get_kernel_syms:
7161 #endif
7162 goto unimplemented;
7163 case TARGET_NR_quotactl:
7164 goto unimplemented;
7165 case TARGET_NR_getpgid:
7166 ret = get_errno(getpgid(arg1));
7167 break;
7168 case TARGET_NR_fchdir:
7169 ret = get_errno(fchdir(arg1));
7170 break;
7171 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7172 case TARGET_NR_bdflush:
7173 goto unimplemented;
7174 #endif
7175 #ifdef TARGET_NR_sysfs
7176 case TARGET_NR_sysfs:
7177 goto unimplemented;
7178 #endif
7179 case TARGET_NR_personality:
7180 ret = get_errno(personality(arg1));
7181 break;
7182 #ifdef TARGET_NR_afs_syscall
7183 case TARGET_NR_afs_syscall:
7184 goto unimplemented;
7185 #endif
7186 #ifdef TARGET_NR__llseek /* Not on alpha */
7187 case TARGET_NR__llseek:
7189 int64_t res;
7190 #if !defined(__NR_llseek)
7191 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
7192 if (res == -1) {
7193 ret = get_errno(res);
7194 } else {
7195 ret = 0;
7197 #else
7198 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
7199 #endif
7200 if ((ret == 0) && put_user_s64(res, arg4)) {
7201 goto efault;
7204 break;
7205 #endif
7206 case TARGET_NR_getdents:
7207 #ifdef __NR_getdents
7208 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7210 struct target_dirent *target_dirp;
7211 struct linux_dirent *dirp;
7212 abi_long count = arg3;
7214 dirp = malloc(count);
7215 if (!dirp) {
7216 ret = -TARGET_ENOMEM;
7217 goto fail;
7220 ret = get_errno(sys_getdents(arg1, dirp, count));
7221 if (!is_error(ret)) {
7222 struct linux_dirent *de;
7223 struct target_dirent *tde;
7224 int len = ret;
7225 int reclen, treclen;
7226 int count1, tnamelen;
7228 count1 = 0;
7229 de = dirp;
7230 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7231 goto efault;
7232 tde = target_dirp;
7233 while (len > 0) {
7234 reclen = de->d_reclen;
7235 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
7236 assert(tnamelen >= 0);
7237 treclen = tnamelen + offsetof(struct target_dirent, d_name);
7238 assert(count1 + treclen <= count);
7239 tde->d_reclen = tswap16(treclen);
7240 tde->d_ino = tswapal(de->d_ino);
7241 tde->d_off = tswapal(de->d_off);
7242 memcpy(tde->d_name, de->d_name, tnamelen);
7243 de = (struct linux_dirent *)((char *)de + reclen);
7244 len -= reclen;
7245 tde = (struct target_dirent *)((char *)tde + treclen);
7246 count1 += treclen;
7248 ret = count1;
7249 unlock_user(target_dirp, arg2, ret);
7251 free(dirp);
7253 #else
7255 struct linux_dirent *dirp;
7256 abi_long count = arg3;
7258 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7259 goto efault;
7260 ret = get_errno(sys_getdents(arg1, dirp, count));
7261 if (!is_error(ret)) {
7262 struct linux_dirent *de;
7263 int len = ret;
7264 int reclen;
7265 de = dirp;
7266 while (len > 0) {
7267 reclen = de->d_reclen;
7268 if (reclen > len)
7269 break;
7270 de->d_reclen = tswap16(reclen);
7271 tswapls(&de->d_ino);
7272 tswapls(&de->d_off);
7273 de = (struct linux_dirent *)((char *)de + reclen);
7274 len -= reclen;
7277 unlock_user(dirp, arg2, ret);
7279 #endif
7280 #else
7281 /* Implement getdents in terms of getdents64 */
7283 struct linux_dirent64 *dirp;
7284 abi_long count = arg3;
7286 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
7287 if (!dirp) {
7288 goto efault;
7290 ret = get_errno(sys_getdents64(arg1, dirp, count));
7291 if (!is_error(ret)) {
7292 /* Convert the dirent64 structs to target dirent. We do this
7293 * in-place, since we can guarantee that a target_dirent is no
7294 * larger than a dirent64; however this means we have to be
7295 * careful to read everything before writing in the new format.
7297 struct linux_dirent64 *de;
7298 struct target_dirent *tde;
7299 int len = ret;
7300 int tlen = 0;
7302 de = dirp;
7303 tde = (struct target_dirent *)dirp;
7304 while (len > 0) {
7305 int namelen, treclen;
7306 int reclen = de->d_reclen;
7307 uint64_t ino = de->d_ino;
7308 int64_t off = de->d_off;
7309 uint8_t type = de->d_type;
7311 namelen = strlen(de->d_name);
7312 treclen = offsetof(struct target_dirent, d_name)
7313 + namelen + 2;
7314 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
7316 memmove(tde->d_name, de->d_name, namelen + 1);
7317 tde->d_ino = tswapal(ino);
7318 tde->d_off = tswapal(off);
7319 tde->d_reclen = tswap16(treclen);
7320 /* The target_dirent type is in what was formerly a padding
7321 * byte at the end of the structure:
7323 *(((char *)tde) + treclen - 1) = type;
7325 de = (struct linux_dirent64 *)((char *)de + reclen);
7326 tde = (struct target_dirent *)((char *)tde + treclen);
7327 len -= reclen;
7328 tlen += treclen;
7330 ret = tlen;
7332 unlock_user(dirp, arg2, ret);
7334 #endif
7335 break;
7336 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7337 case TARGET_NR_getdents64:
7339 struct linux_dirent64 *dirp;
7340 abi_long count = arg3;
7341 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
7342 goto efault;
7343 ret = get_errno(sys_getdents64(arg1, dirp, count));
7344 if (!is_error(ret)) {
7345 struct linux_dirent64 *de;
7346 int len = ret;
7347 int reclen;
7348 de = dirp;
7349 while (len > 0) {
7350 reclen = de->d_reclen;
7351 if (reclen > len)
7352 break;
7353 de->d_reclen = tswap16(reclen);
7354 tswap64s((uint64_t *)&de->d_ino);
7355 tswap64s((uint64_t *)&de->d_off);
7356 de = (struct linux_dirent64 *)((char *)de + reclen);
7357 len -= reclen;
7360 unlock_user(dirp, arg2, ret);
7362 break;
7363 #endif /* TARGET_NR_getdents64 */
7364 #if defined(TARGET_NR__newselect)
7365 case TARGET_NR__newselect:
7366 ret = do_select(arg1, arg2, arg3, arg4, arg5);
7367 break;
7368 #endif
7369 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7370 # ifdef TARGET_NR_poll
7371 case TARGET_NR_poll:
7372 # endif
7373 # ifdef TARGET_NR_ppoll
7374 case TARGET_NR_ppoll:
7375 # endif
7377 struct target_pollfd *target_pfd;
7378 unsigned int nfds = arg2;
7379 int timeout = arg3;
7380 struct pollfd *pfd;
7381 unsigned int i;
7383 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
7384 if (!target_pfd)
7385 goto efault;
7387 pfd = alloca(sizeof(struct pollfd) * nfds);
7388 for(i = 0; i < nfds; i++) {
7389 pfd[i].fd = tswap32(target_pfd[i].fd);
7390 pfd[i].events = tswap16(target_pfd[i].events);
7393 # ifdef TARGET_NR_ppoll
7394 if (num == TARGET_NR_ppoll) {
7395 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
7396 target_sigset_t *target_set;
7397 sigset_t _set, *set = &_set;
7399 if (arg3) {
7400 if (target_to_host_timespec(timeout_ts, arg3)) {
7401 unlock_user(target_pfd, arg1, 0);
7402 goto efault;
7404 } else {
7405 timeout_ts = NULL;
7408 if (arg4) {
7409 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
7410 if (!target_set) {
7411 unlock_user(target_pfd, arg1, 0);
7412 goto efault;
7414 target_to_host_sigset(set, target_set);
7415 } else {
7416 set = NULL;
7419 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
7421 if (!is_error(ret) && arg3) {
7422 host_to_target_timespec(arg3, timeout_ts);
7424 if (arg4) {
7425 unlock_user(target_set, arg4, 0);
7427 } else
7428 # endif
7429 ret = get_errno(poll(pfd, nfds, timeout));
7431 if (!is_error(ret)) {
7432 for(i = 0; i < nfds; i++) {
7433 target_pfd[i].revents = tswap16(pfd[i].revents);
7436 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
7438 break;
7439 #endif
7440 case TARGET_NR_flock:
7441 /* NOTE: the flock constant seems to be the same for every
7442 Linux platform */
7443 ret = get_errno(flock(arg1, arg2));
7444 break;
7445 case TARGET_NR_readv:
7447 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
7448 if (vec != NULL) {
7449 ret = get_errno(readv(arg1, vec, arg3));
7450 unlock_iovec(vec, arg2, arg3, 1);
7451 } else {
7452 ret = -host_to_target_errno(errno);
7455 break;
7456 case TARGET_NR_writev:
7458 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
7459 if (vec != NULL) {
7460 ret = get_errno(writev(arg1, vec, arg3));
7461 unlock_iovec(vec, arg2, arg3, 0);
7462 } else {
7463 ret = -host_to_target_errno(errno);
7466 break;
7467 case TARGET_NR_getsid:
7468 ret = get_errno(getsid(arg1));
7469 break;
7470 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7471 case TARGET_NR_fdatasync:
7472 ret = get_errno(fdatasync(arg1));
7473 break;
7474 #endif
7475 case TARGET_NR__sysctl:
7476 /* We don't implement this, but ENOTDIR is always a safe
7477 return value. */
7478 ret = -TARGET_ENOTDIR;
7479 break;
7480 case TARGET_NR_sched_getaffinity:
7482 unsigned int mask_size;
7483 unsigned long *mask;
7486 * sched_getaffinity needs multiples of ulong, so need to take
7487 * care of mismatches between target ulong and host ulong sizes.
7489 if (arg2 & (sizeof(abi_ulong) - 1)) {
7490 ret = -TARGET_EINVAL;
7491 break;
7493 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7495 mask = alloca(mask_size);
7496 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
7498 if (!is_error(ret)) {
7499 if (copy_to_user(arg3, mask, ret)) {
7500 goto efault;
7504 break;
7505 case TARGET_NR_sched_setaffinity:
7507 unsigned int mask_size;
7508 unsigned long *mask;
7511 * sched_setaffinity needs multiples of ulong, so need to take
7512 * care of mismatches between target ulong and host ulong sizes.
7514 if (arg2 & (sizeof(abi_ulong) - 1)) {
7515 ret = -TARGET_EINVAL;
7516 break;
7518 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
7520 mask = alloca(mask_size);
7521 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
7522 goto efault;
7524 memcpy(mask, p, arg2);
7525 unlock_user_struct(p, arg2, 0);
7527 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
7529 break;
7530 case TARGET_NR_sched_setparam:
7532 struct sched_param *target_schp;
7533 struct sched_param schp;
7535 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
7536 goto efault;
7537 schp.sched_priority = tswap32(target_schp->sched_priority);
7538 unlock_user_struct(target_schp, arg2, 0);
7539 ret = get_errno(sched_setparam(arg1, &schp));
7541 break;
7542 case TARGET_NR_sched_getparam:
7544 struct sched_param *target_schp;
7545 struct sched_param schp;
7546 ret = get_errno(sched_getparam(arg1, &schp));
7547 if (!is_error(ret)) {
7548 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
7549 goto efault;
7550 target_schp->sched_priority = tswap32(schp.sched_priority);
7551 unlock_user_struct(target_schp, arg2, 1);
7554 break;
7555 case TARGET_NR_sched_setscheduler:
7557 struct sched_param *target_schp;
7558 struct sched_param schp;
7559 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
7560 goto efault;
7561 schp.sched_priority = tswap32(target_schp->sched_priority);
7562 unlock_user_struct(target_schp, arg3, 0);
7563 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
7565 break;
7566 case TARGET_NR_sched_getscheduler:
7567 ret = get_errno(sched_getscheduler(arg1));
7568 break;
7569 case TARGET_NR_sched_yield:
7570 ret = get_errno(sched_yield());
7571 break;
7572 case TARGET_NR_sched_get_priority_max:
7573 ret = get_errno(sched_get_priority_max(arg1));
7574 break;
7575 case TARGET_NR_sched_get_priority_min:
7576 ret = get_errno(sched_get_priority_min(arg1));
7577 break;
7578 case TARGET_NR_sched_rr_get_interval:
7580 struct timespec ts;
7581 ret = get_errno(sched_rr_get_interval(arg1, &ts));
7582 if (!is_error(ret)) {
7583 host_to_target_timespec(arg2, &ts);
7586 break;
7587 case TARGET_NR_nanosleep:
7589 struct timespec req, rem;
7590 target_to_host_timespec(&req, arg1);
7591 ret = get_errno(nanosleep(&req, &rem));
7592 if (is_error(ret) && arg2) {
7593 host_to_target_timespec(arg2, &rem);
7596 break;
7597 #ifdef TARGET_NR_query_module
7598 case TARGET_NR_query_module:
7599 goto unimplemented;
7600 #endif
7601 #ifdef TARGET_NR_nfsservctl
7602 case TARGET_NR_nfsservctl:
7603 goto unimplemented;
7604 #endif
7605 case TARGET_NR_prctl:
7606 switch (arg1) {
7607 case PR_GET_PDEATHSIG:
7609 int deathsig;
7610 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
7611 if (!is_error(ret) && arg2
7612 && put_user_ual(deathsig, arg2)) {
7613 goto efault;
7615 break;
7617 #ifdef PR_GET_NAME
7618 case PR_GET_NAME:
7620 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
7621 if (!name) {
7622 goto efault;
7624 ret = get_errno(prctl(arg1, (unsigned long)name,
7625 arg3, arg4, arg5));
7626 unlock_user(name, arg2, 16);
7627 break;
7629 case PR_SET_NAME:
7631 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
7632 if (!name) {
7633 goto efault;
7635 ret = get_errno(prctl(arg1, (unsigned long)name,
7636 arg3, arg4, arg5));
7637 unlock_user(name, arg2, 0);
7638 break;
7640 #endif
7641 default:
7642 /* Most prctl options have no pointer arguments */
7643 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
7644 break;
7646 break;
7647 #ifdef TARGET_NR_arch_prctl
7648 case TARGET_NR_arch_prctl:
7649 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7650 ret = do_arch_prctl(cpu_env, arg1, arg2);
7651 break;
7652 #else
7653 goto unimplemented;
7654 #endif
7655 #endif
7656 #ifdef TARGET_NR_pread64
7657 case TARGET_NR_pread64:
7658 if (regpairs_aligned(cpu_env)) {
7659 arg4 = arg5;
7660 arg5 = arg6;
7662 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7663 goto efault;
7664 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
7665 unlock_user(p, arg2, ret);
7666 break;
7667 case TARGET_NR_pwrite64:
7668 if (regpairs_aligned(cpu_env)) {
7669 arg4 = arg5;
7670 arg5 = arg6;
7672 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7673 goto efault;
7674 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
7675 unlock_user(p, arg2, 0);
7676 break;
7677 #endif
7678 case TARGET_NR_getcwd:
7679 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
7680 goto efault;
7681 ret = get_errno(sys_getcwd1(p, arg2));
7682 unlock_user(p, arg1, ret);
7683 break;
7684 case TARGET_NR_capget:
7685 case TARGET_NR_capset:
7687 struct target_user_cap_header *target_header;
7688 struct target_user_cap_data *target_data = NULL;
7689 struct __user_cap_header_struct header;
7690 struct __user_cap_data_struct data[2];
7691 struct __user_cap_data_struct *dataptr = NULL;
7692 int i, target_datalen;
7693 int data_items = 1;
7695 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
7696 goto efault;
7698 header.version = tswap32(target_header->version);
7699 header.pid = tswap32(target_header->pid);
7701 if (header.version != _LINUX_CAPABILITY_VERSION) {
7702 /* Version 2 and up takes pointer to two user_data structs */
7703 data_items = 2;
7706 target_datalen = sizeof(*target_data) * data_items;
7708 if (arg2) {
7709 if (num == TARGET_NR_capget) {
7710 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
7711 } else {
7712 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
7714 if (!target_data) {
7715 unlock_user_struct(target_header, arg1, 0);
7716 goto efault;
7719 if (num == TARGET_NR_capset) {
7720 for (i = 0; i < data_items; i++) {
7721 data[i].effective = tswap32(target_data[i].effective);
7722 data[i].permitted = tswap32(target_data[i].permitted);
7723 data[i].inheritable = tswap32(target_data[i].inheritable);
7727 dataptr = data;
7730 if (num == TARGET_NR_capget) {
7731 ret = get_errno(capget(&header, dataptr));
7732 } else {
7733 ret = get_errno(capset(&header, dataptr));
7736 /* The kernel always updates version for both capget and capset */
7737 target_header->version = tswap32(header.version);
7738 unlock_user_struct(target_header, arg1, 1);
7740 if (arg2) {
7741 if (num == TARGET_NR_capget) {
7742 for (i = 0; i < data_items; i++) {
7743 target_data[i].effective = tswap32(data[i].effective);
7744 target_data[i].permitted = tswap32(data[i].permitted);
7745 target_data[i].inheritable = tswap32(data[i].inheritable);
7747 unlock_user(target_data, arg2, target_datalen);
7748 } else {
7749 unlock_user(target_data, arg2, 0);
7752 break;
7754 case TARGET_NR_sigaltstack:
7755 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7756 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7757 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7758 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
7759 break;
7760 #else
7761 goto unimplemented;
7762 #endif
7764 #ifdef CONFIG_SENDFILE
7765 case TARGET_NR_sendfile:
7767 off_t *offp = NULL;
7768 off_t off;
7769 if (arg3) {
7770 ret = get_user_sal(off, arg3);
7771 if (is_error(ret)) {
7772 break;
7774 offp = &off;
7776 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7777 if (!is_error(ret) && arg3) {
7778 abi_long ret2 = put_user_sal(off, arg3);
7779 if (is_error(ret2)) {
7780 ret = ret2;
7783 break;
7785 #ifdef TARGET_NR_sendfile64
7786 case TARGET_NR_sendfile64:
7788 off_t *offp = NULL;
7789 off_t off;
7790 if (arg3) {
7791 ret = get_user_s64(off, arg3);
7792 if (is_error(ret)) {
7793 break;
7795 offp = &off;
7797 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
7798 if (!is_error(ret) && arg3) {
7799 abi_long ret2 = put_user_s64(off, arg3);
7800 if (is_error(ret2)) {
7801 ret = ret2;
7804 break;
7806 #endif
7807 #else
7808 case TARGET_NR_sendfile:
7809 #ifdef TARGET_NR_sendfile64
7810 case TARGET_NR_sendfile64:
7811 #endif
7812 goto unimplemented;
7813 #endif
7815 #ifdef TARGET_NR_getpmsg
7816 case TARGET_NR_getpmsg:
7817 goto unimplemented;
7818 #endif
7819 #ifdef TARGET_NR_putpmsg
7820 case TARGET_NR_putpmsg:
7821 goto unimplemented;
7822 #endif
7823 #ifdef TARGET_NR_vfork
7824 case TARGET_NR_vfork:
7825 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
7826 0, 0, 0, 0));
7827 break;
7828 #endif
7829 #ifdef TARGET_NR_ugetrlimit
7830 case TARGET_NR_ugetrlimit:
7832 struct rlimit rlim;
7833 int resource = target_to_host_resource(arg1);
7834 ret = get_errno(getrlimit(resource, &rlim));
7835 if (!is_error(ret)) {
7836 struct target_rlimit *target_rlim;
7837 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
7838 goto efault;
7839 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
7840 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
7841 unlock_user_struct(target_rlim, arg2, 1);
7843 break;
7845 #endif
7846 #ifdef TARGET_NR_truncate64
7847 case TARGET_NR_truncate64:
7848 if (!(p = lock_user_string(arg1)))
7849 goto efault;
7850 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
7851 unlock_user(p, arg1, 0);
7852 break;
7853 #endif
7854 #ifdef TARGET_NR_ftruncate64
7855 case TARGET_NR_ftruncate64:
7856 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
7857 break;
7858 #endif
7859 #ifdef TARGET_NR_stat64
7860 case TARGET_NR_stat64:
7861 if (!(p = lock_user_string(arg1)))
7862 goto efault;
7863 ret = get_errno(stat(path(p), &st));
7864 unlock_user(p, arg1, 0);
7865 if (!is_error(ret))
7866 ret = host_to_target_stat64(cpu_env, arg2, &st);
7867 break;
7868 #endif
7869 #ifdef TARGET_NR_lstat64
7870 case TARGET_NR_lstat64:
7871 if (!(p = lock_user_string(arg1)))
7872 goto efault;
7873 ret = get_errno(lstat(path(p), &st));
7874 unlock_user(p, arg1, 0);
7875 if (!is_error(ret))
7876 ret = host_to_target_stat64(cpu_env, arg2, &st);
7877 break;
7878 #endif
7879 #ifdef TARGET_NR_fstat64
7880 case TARGET_NR_fstat64:
7881 ret = get_errno(fstat(arg1, &st));
7882 if (!is_error(ret))
7883 ret = host_to_target_stat64(cpu_env, arg2, &st);
7884 break;
7885 #endif
7886 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
7887 #ifdef TARGET_NR_fstatat64
7888 case TARGET_NR_fstatat64:
7889 #endif
7890 #ifdef TARGET_NR_newfstatat
7891 case TARGET_NR_newfstatat:
7892 #endif
7893 if (!(p = lock_user_string(arg2)))
7894 goto efault;
7895 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
7896 if (!is_error(ret))
7897 ret = host_to_target_stat64(cpu_env, arg3, &st);
7898 break;
7899 #endif
7900 case TARGET_NR_lchown:
7901 if (!(p = lock_user_string(arg1)))
7902 goto efault;
7903 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7904 unlock_user(p, arg1, 0);
7905 break;
7906 #ifdef TARGET_NR_getuid
7907 case TARGET_NR_getuid:
7908 ret = get_errno(high2lowuid(getuid()));
7909 break;
7910 #endif
7911 #ifdef TARGET_NR_getgid
7912 case TARGET_NR_getgid:
7913 ret = get_errno(high2lowgid(getgid()));
7914 break;
7915 #endif
7916 #ifdef TARGET_NR_geteuid
7917 case TARGET_NR_geteuid:
7918 ret = get_errno(high2lowuid(geteuid()));
7919 break;
7920 #endif
7921 #ifdef TARGET_NR_getegid
7922 case TARGET_NR_getegid:
7923 ret = get_errno(high2lowgid(getegid()));
7924 break;
7925 #endif
7926 case TARGET_NR_setreuid:
7927 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7928 break;
7929 case TARGET_NR_setregid:
7930 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7931 break;
7932 case TARGET_NR_getgroups:
7934 int gidsetsize = arg1;
7935 target_id *target_grouplist;
7936 gid_t *grouplist;
7937 int i;
7939 grouplist = alloca(gidsetsize * sizeof(gid_t));
7940 ret = get_errno(getgroups(gidsetsize, grouplist));
7941 if (gidsetsize == 0)
7942 break;
7943 if (!is_error(ret)) {
7944 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
7945 if (!target_grouplist)
7946 goto efault;
7947 for(i = 0;i < ret; i++)
7948 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7949 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
7952 break;
7953 case TARGET_NR_setgroups:
7955 int gidsetsize = arg1;
7956 target_id *target_grouplist;
7957 gid_t *grouplist = NULL;
7958 int i;
7959 if (gidsetsize) {
7960 grouplist = alloca(gidsetsize * sizeof(gid_t));
7961 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
7962 if (!target_grouplist) {
7963 ret = -TARGET_EFAULT;
7964 goto fail;
7966 for (i = 0; i < gidsetsize; i++) {
7967 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7969 unlock_user(target_grouplist, arg2, 0);
7971 ret = get_errno(setgroups(gidsetsize, grouplist));
7973 break;
7974 case TARGET_NR_fchown:
7975 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7976 break;
7977 #if defined(TARGET_NR_fchownat)
7978 case TARGET_NR_fchownat:
7979 if (!(p = lock_user_string(arg2)))
7980 goto efault;
7981 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
7982 low2highgid(arg4), arg5));
7983 unlock_user(p, arg2, 0);
7984 break;
7985 #endif
7986 #ifdef TARGET_NR_setresuid
7987 case TARGET_NR_setresuid:
7988 ret = get_errno(setresuid(low2highuid(arg1),
7989 low2highuid(arg2),
7990 low2highuid(arg3)));
7991 break;
7992 #endif
7993 #ifdef TARGET_NR_getresuid
7994 case TARGET_NR_getresuid:
7996 uid_t ruid, euid, suid;
7997 ret = get_errno(getresuid(&ruid, &euid, &suid));
7998 if (!is_error(ret)) {
7999 if (put_user_id(high2lowuid(ruid), arg1)
8000 || put_user_id(high2lowuid(euid), arg2)
8001 || put_user_id(high2lowuid(suid), arg3))
8002 goto efault;
8005 break;
8006 #endif
8007 #ifdef TARGET_NR_getresgid
8008 case TARGET_NR_setresgid:
8009 ret = get_errno(setresgid(low2highgid(arg1),
8010 low2highgid(arg2),
8011 low2highgid(arg3)));
8012 break;
8013 #endif
8014 #ifdef TARGET_NR_getresgid
8015 case TARGET_NR_getresgid:
8017 gid_t rgid, egid, sgid;
8018 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8019 if (!is_error(ret)) {
8020 if (put_user_id(high2lowgid(rgid), arg1)
8021 || put_user_id(high2lowgid(egid), arg2)
8022 || put_user_id(high2lowgid(sgid), arg3))
8023 goto efault;
8026 break;
8027 #endif
8028 case TARGET_NR_chown:
8029 if (!(p = lock_user_string(arg1)))
8030 goto efault;
8031 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
8032 unlock_user(p, arg1, 0);
8033 break;
8034 case TARGET_NR_setuid:
8035 ret = get_errno(setuid(low2highuid(arg1)));
8036 break;
8037 case TARGET_NR_setgid:
8038 ret = get_errno(setgid(low2highgid(arg1)));
8039 break;
8040 case TARGET_NR_setfsuid:
8041 ret = get_errno(setfsuid(arg1));
8042 break;
8043 case TARGET_NR_setfsgid:
8044 ret = get_errno(setfsgid(arg1));
8045 break;
8047 #ifdef TARGET_NR_lchown32
8048 case TARGET_NR_lchown32:
8049 if (!(p = lock_user_string(arg1)))
8050 goto efault;
8051 ret = get_errno(lchown(p, arg2, arg3));
8052 unlock_user(p, arg1, 0);
8053 break;
8054 #endif
8055 #ifdef TARGET_NR_getuid32
8056 case TARGET_NR_getuid32:
8057 ret = get_errno(getuid());
8058 break;
8059 #endif
8061 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
8062 /* Alpha specific */
8063 case TARGET_NR_getxuid:
8065 uid_t euid;
8066 euid=geteuid();
8067 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
8069 ret = get_errno(getuid());
8070 break;
8071 #endif
8072 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
8073 /* Alpha specific */
8074 case TARGET_NR_getxgid:
8076 uid_t egid;
8077 egid=getegid();
8078 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
8080 ret = get_errno(getgid());
8081 break;
8082 #endif
8083 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
8084 /* Alpha specific */
8085 case TARGET_NR_osf_getsysinfo:
8086 ret = -TARGET_EOPNOTSUPP;
8087 switch (arg1) {
8088 case TARGET_GSI_IEEE_FP_CONTROL:
8090 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
8092 /* Copied from linux ieee_fpcr_to_swcr. */
8093 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
8094 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
8095 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
8096 | SWCR_TRAP_ENABLE_DZE
8097 | SWCR_TRAP_ENABLE_OVF);
8098 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
8099 | SWCR_TRAP_ENABLE_INE);
8100 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
8101 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
8103 if (put_user_u64 (swcr, arg2))
8104 goto efault;
8105 ret = 0;
8107 break;
8109 /* case GSI_IEEE_STATE_AT_SIGNAL:
8110 -- Not implemented in linux kernel.
8111 case GSI_UACPROC:
8112 -- Retrieves current unaligned access state; not much used.
8113 case GSI_PROC_TYPE:
8114 -- Retrieves implver information; surely not used.
8115 case GSI_GET_HWRPB:
8116 -- Grabs a copy of the HWRPB; surely not used.
8119 break;
8120 #endif
8121 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
8122 /* Alpha specific */
8123 case TARGET_NR_osf_setsysinfo:
8124 ret = -TARGET_EOPNOTSUPP;
8125 switch (arg1) {
8126 case TARGET_SSI_IEEE_FP_CONTROL:
8128 uint64_t swcr, fpcr, orig_fpcr;
8130 if (get_user_u64 (swcr, arg2)) {
8131 goto efault;
8133 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8134 fpcr = orig_fpcr & FPCR_DYN_MASK;
8136 /* Copied from linux ieee_swcr_to_fpcr. */
8137 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
8138 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
8139 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
8140 | SWCR_TRAP_ENABLE_DZE
8141 | SWCR_TRAP_ENABLE_OVF)) << 48;
8142 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
8143 | SWCR_TRAP_ENABLE_INE)) << 57;
8144 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
8145 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
8147 cpu_alpha_store_fpcr(cpu_env, fpcr);
8148 ret = 0;
8150 break;
8152 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
8154 uint64_t exc, fpcr, orig_fpcr;
8155 int si_code;
8157 if (get_user_u64(exc, arg2)) {
8158 goto efault;
8161 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
8163 /* We only add to the exception status here. */
8164 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
8166 cpu_alpha_store_fpcr(cpu_env, fpcr);
8167 ret = 0;
8169 /* Old exceptions are not signaled. */
8170 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
8172 /* If any exceptions set by this call,
8173 and are unmasked, send a signal. */
8174 si_code = 0;
8175 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
8176 si_code = TARGET_FPE_FLTRES;
8178 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
8179 si_code = TARGET_FPE_FLTUND;
8181 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
8182 si_code = TARGET_FPE_FLTOVF;
8184 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
8185 si_code = TARGET_FPE_FLTDIV;
8187 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
8188 si_code = TARGET_FPE_FLTINV;
8190 if (si_code != 0) {
8191 target_siginfo_t info;
8192 info.si_signo = SIGFPE;
8193 info.si_errno = 0;
8194 info.si_code = si_code;
8195 info._sifields._sigfault._addr
8196 = ((CPUArchState *)cpu_env)->pc;
8197 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
8200 break;
8202 /* case SSI_NVPAIRS:
8203 -- Used with SSIN_UACPROC to enable unaligned accesses.
8204 case SSI_IEEE_STATE_AT_SIGNAL:
8205 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
8206 -- Not implemented in linux kernel
8209 break;
8210 #endif
8211 #ifdef TARGET_NR_osf_sigprocmask
8212 /* Alpha specific. */
8213 case TARGET_NR_osf_sigprocmask:
8215 abi_ulong mask;
8216 int how;
8217 sigset_t set, oldset;
8219 switch(arg1) {
8220 case TARGET_SIG_BLOCK:
8221 how = SIG_BLOCK;
8222 break;
8223 case TARGET_SIG_UNBLOCK:
8224 how = SIG_UNBLOCK;
8225 break;
8226 case TARGET_SIG_SETMASK:
8227 how = SIG_SETMASK;
8228 break;
8229 default:
8230 ret = -TARGET_EINVAL;
8231 goto fail;
8233 mask = arg2;
8234 target_to_host_old_sigset(&set, &mask);
8235 do_sigprocmask(how, &set, &oldset);
8236 host_to_target_old_sigset(&mask, &oldset);
8237 ret = mask;
8239 break;
8240 #endif
8242 #ifdef TARGET_NR_getgid32
8243 case TARGET_NR_getgid32:
8244 ret = get_errno(getgid());
8245 break;
8246 #endif
8247 #ifdef TARGET_NR_geteuid32
8248 case TARGET_NR_geteuid32:
8249 ret = get_errno(geteuid());
8250 break;
8251 #endif
8252 #ifdef TARGET_NR_getegid32
8253 case TARGET_NR_getegid32:
8254 ret = get_errno(getegid());
8255 break;
8256 #endif
8257 #ifdef TARGET_NR_setreuid32
8258 case TARGET_NR_setreuid32:
8259 ret = get_errno(setreuid(arg1, arg2));
8260 break;
8261 #endif
8262 #ifdef TARGET_NR_setregid32
8263 case TARGET_NR_setregid32:
8264 ret = get_errno(setregid(arg1, arg2));
8265 break;
8266 #endif
8267 #ifdef TARGET_NR_getgroups32
8268 case TARGET_NR_getgroups32:
8270 int gidsetsize = arg1;
8271 uint32_t *target_grouplist;
8272 gid_t *grouplist;
8273 int i;
8275 grouplist = alloca(gidsetsize * sizeof(gid_t));
8276 ret = get_errno(getgroups(gidsetsize, grouplist));
8277 if (gidsetsize == 0)
8278 break;
8279 if (!is_error(ret)) {
8280 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
8281 if (!target_grouplist) {
8282 ret = -TARGET_EFAULT;
8283 goto fail;
8285 for(i = 0;i < ret; i++)
8286 target_grouplist[i] = tswap32(grouplist[i]);
8287 unlock_user(target_grouplist, arg2, gidsetsize * 4);
8290 break;
8291 #endif
8292 #ifdef TARGET_NR_setgroups32
8293 case TARGET_NR_setgroups32:
8295 int gidsetsize = arg1;
8296 uint32_t *target_grouplist;
8297 gid_t *grouplist;
8298 int i;
8300 grouplist = alloca(gidsetsize * sizeof(gid_t));
8301 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
8302 if (!target_grouplist) {
8303 ret = -TARGET_EFAULT;
8304 goto fail;
8306 for(i = 0;i < gidsetsize; i++)
8307 grouplist[i] = tswap32(target_grouplist[i]);
8308 unlock_user(target_grouplist, arg2, 0);
8309 ret = get_errno(setgroups(gidsetsize, grouplist));
8311 break;
8312 #endif
8313 #ifdef TARGET_NR_fchown32
8314 case TARGET_NR_fchown32:
8315 ret = get_errno(fchown(arg1, arg2, arg3));
8316 break;
8317 #endif
8318 #ifdef TARGET_NR_setresuid32
8319 case TARGET_NR_setresuid32:
8320 ret = get_errno(setresuid(arg1, arg2, arg3));
8321 break;
8322 #endif
8323 #ifdef TARGET_NR_getresuid32
8324 case TARGET_NR_getresuid32:
8326 uid_t ruid, euid, suid;
8327 ret = get_errno(getresuid(&ruid, &euid, &suid));
8328 if (!is_error(ret)) {
8329 if (put_user_u32(ruid, arg1)
8330 || put_user_u32(euid, arg2)
8331 || put_user_u32(suid, arg3))
8332 goto efault;
8335 break;
8336 #endif
8337 #ifdef TARGET_NR_setresgid32
8338 case TARGET_NR_setresgid32:
8339 ret = get_errno(setresgid(arg1, arg2, arg3));
8340 break;
8341 #endif
8342 #ifdef TARGET_NR_getresgid32
8343 case TARGET_NR_getresgid32:
8345 gid_t rgid, egid, sgid;
8346 ret = get_errno(getresgid(&rgid, &egid, &sgid));
8347 if (!is_error(ret)) {
8348 if (put_user_u32(rgid, arg1)
8349 || put_user_u32(egid, arg2)
8350 || put_user_u32(sgid, arg3))
8351 goto efault;
8354 break;
8355 #endif
8356 #ifdef TARGET_NR_chown32
8357 case TARGET_NR_chown32:
8358 if (!(p = lock_user_string(arg1)))
8359 goto efault;
8360 ret = get_errno(chown(p, arg2, arg3));
8361 unlock_user(p, arg1, 0);
8362 break;
8363 #endif
8364 #ifdef TARGET_NR_setuid32
8365 case TARGET_NR_setuid32:
8366 ret = get_errno(setuid(arg1));
8367 break;
8368 #endif
8369 #ifdef TARGET_NR_setgid32
8370 case TARGET_NR_setgid32:
8371 ret = get_errno(setgid(arg1));
8372 break;
8373 #endif
8374 #ifdef TARGET_NR_setfsuid32
8375 case TARGET_NR_setfsuid32:
8376 ret = get_errno(setfsuid(arg1));
8377 break;
8378 #endif
8379 #ifdef TARGET_NR_setfsgid32
8380 case TARGET_NR_setfsgid32:
8381 ret = get_errno(setfsgid(arg1));
8382 break;
8383 #endif
8385 case TARGET_NR_pivot_root:
8386 goto unimplemented;
8387 #ifdef TARGET_NR_mincore
8388 case TARGET_NR_mincore:
8390 void *a;
8391 ret = -TARGET_EFAULT;
8392 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
8393 goto efault;
8394 if (!(p = lock_user_string(arg3)))
8395 goto mincore_fail;
8396 ret = get_errno(mincore(a, arg2, p));
8397 unlock_user(p, arg3, ret);
8398 mincore_fail:
8399 unlock_user(a, arg1, 0);
8401 break;
8402 #endif
8403 #ifdef TARGET_NR_arm_fadvise64_64
8404 case TARGET_NR_arm_fadvise64_64:
8407 * arm_fadvise64_64 looks like fadvise64_64 but
8408 * with different argument order
8410 abi_long temp;
8411 temp = arg3;
8412 arg3 = arg4;
8413 arg4 = temp;
8415 #endif
8416 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8417 #ifdef TARGET_NR_fadvise64_64
8418 case TARGET_NR_fadvise64_64:
8419 #endif
8420 #ifdef TARGET_NR_fadvise64
8421 case TARGET_NR_fadvise64:
8422 #endif
8423 #ifdef TARGET_S390X
8424 switch (arg4) {
8425 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
8426 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
8427 case 6: arg4 = POSIX_FADV_DONTNEED; break;
8428 case 7: arg4 = POSIX_FADV_NOREUSE; break;
8429 default: break;
8431 #endif
8432 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
8433 break;
8434 #endif
8435 #ifdef TARGET_NR_madvise
8436 case TARGET_NR_madvise:
8437 /* A straight passthrough may not be safe because qemu sometimes
8438 turns private file-backed mappings into anonymous mappings.
8439 This will break MADV_DONTNEED.
8440 This is a hint, so ignoring and returning success is ok. */
8441 ret = get_errno(0);
8442 break;
8443 #endif
8444 #if TARGET_ABI_BITS == 32
8445 case TARGET_NR_fcntl64:
8447 int cmd;
8448 struct flock64 fl;
8449 struct target_flock64 *target_fl;
8450 #ifdef TARGET_ARM
8451 struct target_eabi_flock64 *target_efl;
8452 #endif
8454 cmd = target_to_host_fcntl_cmd(arg2);
8455 if (cmd == -TARGET_EINVAL) {
8456 ret = cmd;
8457 break;
8460 switch(arg2) {
8461 case TARGET_F_GETLK64:
8462 #ifdef TARGET_ARM
8463 if (((CPUARMState *)cpu_env)->eabi) {
8464 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8465 goto efault;
8466 fl.l_type = tswap16(target_efl->l_type);
8467 fl.l_whence = tswap16(target_efl->l_whence);
8468 fl.l_start = tswap64(target_efl->l_start);
8469 fl.l_len = tswap64(target_efl->l_len);
8470 fl.l_pid = tswap32(target_efl->l_pid);
8471 unlock_user_struct(target_efl, arg3, 0);
8472 } else
8473 #endif
8475 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8476 goto efault;
8477 fl.l_type = tswap16(target_fl->l_type);
8478 fl.l_whence = tswap16(target_fl->l_whence);
8479 fl.l_start = tswap64(target_fl->l_start);
8480 fl.l_len = tswap64(target_fl->l_len);
8481 fl.l_pid = tswap32(target_fl->l_pid);
8482 unlock_user_struct(target_fl, arg3, 0);
8484 ret = get_errno(fcntl(arg1, cmd, &fl));
8485 if (ret == 0) {
8486 #ifdef TARGET_ARM
8487 if (((CPUARMState *)cpu_env)->eabi) {
8488 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
8489 goto efault;
8490 target_efl->l_type = tswap16(fl.l_type);
8491 target_efl->l_whence = tswap16(fl.l_whence);
8492 target_efl->l_start = tswap64(fl.l_start);
8493 target_efl->l_len = tswap64(fl.l_len);
8494 target_efl->l_pid = tswap32(fl.l_pid);
8495 unlock_user_struct(target_efl, arg3, 1);
8496 } else
8497 #endif
8499 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
8500 goto efault;
8501 target_fl->l_type = tswap16(fl.l_type);
8502 target_fl->l_whence = tswap16(fl.l_whence);
8503 target_fl->l_start = tswap64(fl.l_start);
8504 target_fl->l_len = tswap64(fl.l_len);
8505 target_fl->l_pid = tswap32(fl.l_pid);
8506 unlock_user_struct(target_fl, arg3, 1);
8509 break;
8511 case TARGET_F_SETLK64:
8512 case TARGET_F_SETLKW64:
8513 #ifdef TARGET_ARM
8514 if (((CPUARMState *)cpu_env)->eabi) {
8515 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
8516 goto efault;
8517 fl.l_type = tswap16(target_efl->l_type);
8518 fl.l_whence = tswap16(target_efl->l_whence);
8519 fl.l_start = tswap64(target_efl->l_start);
8520 fl.l_len = tswap64(target_efl->l_len);
8521 fl.l_pid = tswap32(target_efl->l_pid);
8522 unlock_user_struct(target_efl, arg3, 0);
8523 } else
8524 #endif
8526 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
8527 goto efault;
8528 fl.l_type = tswap16(target_fl->l_type);
8529 fl.l_whence = tswap16(target_fl->l_whence);
8530 fl.l_start = tswap64(target_fl->l_start);
8531 fl.l_len = tswap64(target_fl->l_len);
8532 fl.l_pid = tswap32(target_fl->l_pid);
8533 unlock_user_struct(target_fl, arg3, 0);
8535 ret = get_errno(fcntl(arg1, cmd, &fl));
8536 break;
8537 default:
8538 ret = do_fcntl(arg1, arg2, arg3);
8539 break;
8541 break;
8543 #endif
8544 #ifdef TARGET_NR_cacheflush
8545 case TARGET_NR_cacheflush:
8546 /* self-modifying code is handled automatically, so nothing needed */
8547 ret = 0;
8548 break;
8549 #endif
8550 #ifdef TARGET_NR_security
8551 case TARGET_NR_security:
8552 goto unimplemented;
8553 #endif
8554 #ifdef TARGET_NR_getpagesize
8555 case TARGET_NR_getpagesize:
8556 ret = TARGET_PAGE_SIZE;
8557 break;
8558 #endif
8559 case TARGET_NR_gettid:
8560 ret = get_errno(gettid());
8561 break;
8562 #ifdef TARGET_NR_readahead
8563 case TARGET_NR_readahead:
8564 #if TARGET_ABI_BITS == 32
8565 if (regpairs_aligned(cpu_env)) {
8566 arg2 = arg3;
8567 arg3 = arg4;
8568 arg4 = arg5;
8570 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
8571 #else
8572 ret = get_errno(readahead(arg1, arg2, arg3));
8573 #endif
8574 break;
8575 #endif
8576 #ifdef CONFIG_ATTR
8577 #ifdef TARGET_NR_setxattr
8578 case TARGET_NR_listxattr:
8579 case TARGET_NR_llistxattr:
8581 void *p, *b = 0;
8582 if (arg2) {
8583 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8584 if (!b) {
8585 ret = -TARGET_EFAULT;
8586 break;
8589 p = lock_user_string(arg1);
8590 if (p) {
8591 if (num == TARGET_NR_listxattr) {
8592 ret = get_errno(listxattr(p, b, arg3));
8593 } else {
8594 ret = get_errno(llistxattr(p, b, arg3));
8596 } else {
8597 ret = -TARGET_EFAULT;
8599 unlock_user(p, arg1, 0);
8600 unlock_user(b, arg2, arg3);
8601 break;
8603 case TARGET_NR_flistxattr:
8605 void *b = 0;
8606 if (arg2) {
8607 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8608 if (!b) {
8609 ret = -TARGET_EFAULT;
8610 break;
8613 ret = get_errno(flistxattr(arg1, b, arg3));
8614 unlock_user(b, arg2, arg3);
8615 break;
8617 case TARGET_NR_setxattr:
8618 case TARGET_NR_lsetxattr:
8620 void *p, *n, *v = 0;
8621 if (arg3) {
8622 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8623 if (!v) {
8624 ret = -TARGET_EFAULT;
8625 break;
8628 p = lock_user_string(arg1);
8629 n = lock_user_string(arg2);
8630 if (p && n) {
8631 if (num == TARGET_NR_setxattr) {
8632 ret = get_errno(setxattr(p, n, v, arg4, arg5));
8633 } else {
8634 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
8636 } else {
8637 ret = -TARGET_EFAULT;
8639 unlock_user(p, arg1, 0);
8640 unlock_user(n, arg2, 0);
8641 unlock_user(v, arg3, 0);
8643 break;
8644 case TARGET_NR_fsetxattr:
8646 void *n, *v = 0;
8647 if (arg3) {
8648 v = lock_user(VERIFY_READ, arg3, arg4, 1);
8649 if (!v) {
8650 ret = -TARGET_EFAULT;
8651 break;
8654 n = lock_user_string(arg2);
8655 if (n) {
8656 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
8657 } else {
8658 ret = -TARGET_EFAULT;
8660 unlock_user(n, arg2, 0);
8661 unlock_user(v, arg3, 0);
8663 break;
8664 case TARGET_NR_getxattr:
8665 case TARGET_NR_lgetxattr:
8667 void *p, *n, *v = 0;
8668 if (arg3) {
8669 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8670 if (!v) {
8671 ret = -TARGET_EFAULT;
8672 break;
8675 p = lock_user_string(arg1);
8676 n = lock_user_string(arg2);
8677 if (p && n) {
8678 if (num == TARGET_NR_getxattr) {
8679 ret = get_errno(getxattr(p, n, v, arg4));
8680 } else {
8681 ret = get_errno(lgetxattr(p, n, v, arg4));
8683 } else {
8684 ret = -TARGET_EFAULT;
8686 unlock_user(p, arg1, 0);
8687 unlock_user(n, arg2, 0);
8688 unlock_user(v, arg3, arg4);
8690 break;
8691 case TARGET_NR_fgetxattr:
8693 void *n, *v = 0;
8694 if (arg3) {
8695 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8696 if (!v) {
8697 ret = -TARGET_EFAULT;
8698 break;
8701 n = lock_user_string(arg2);
8702 if (n) {
8703 ret = get_errno(fgetxattr(arg1, n, v, arg4));
8704 } else {
8705 ret = -TARGET_EFAULT;
8707 unlock_user(n, arg2, 0);
8708 unlock_user(v, arg3, arg4);
8710 break;
8711 case TARGET_NR_removexattr:
8712 case TARGET_NR_lremovexattr:
8714 void *p, *n;
8715 p = lock_user_string(arg1);
8716 n = lock_user_string(arg2);
8717 if (p && n) {
8718 if (num == TARGET_NR_removexattr) {
8719 ret = get_errno(removexattr(p, n));
8720 } else {
8721 ret = get_errno(lremovexattr(p, n));
8723 } else {
8724 ret = -TARGET_EFAULT;
8726 unlock_user(p, arg1, 0);
8727 unlock_user(n, arg2, 0);
8729 break;
8730 case TARGET_NR_fremovexattr:
8732 void *n;
8733 n = lock_user_string(arg2);
8734 if (n) {
8735 ret = get_errno(fremovexattr(arg1, n));
8736 } else {
8737 ret = -TARGET_EFAULT;
8739 unlock_user(n, arg2, 0);
8741 break;
8742 #endif
8743 #endif /* CONFIG_ATTR */
8744 #ifdef TARGET_NR_set_thread_area
8745 case TARGET_NR_set_thread_area:
8746 #if defined(TARGET_MIPS)
8747 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
8748 ret = 0;
8749 break;
8750 #elif defined(TARGET_CRIS)
8751 if (arg1 & 0xff)
8752 ret = -TARGET_EINVAL;
8753 else {
8754 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
8755 ret = 0;
8757 break;
8758 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8759 ret = do_set_thread_area(cpu_env, arg1);
8760 break;
8761 #elif defined(TARGET_M68K)
8763 TaskState *ts = cpu->opaque;
8764 ts->tp_value = arg1;
8765 ret = 0;
8766 break;
8768 #else
8769 goto unimplemented_nowarn;
8770 #endif
8771 #endif
8772 #ifdef TARGET_NR_get_thread_area
8773 case TARGET_NR_get_thread_area:
8774 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8775 ret = do_get_thread_area(cpu_env, arg1);
8776 break;
8777 #elif defined(TARGET_M68K)
8779 TaskState *ts = cpu->opaque;
8780 ret = ts->tp_value;
8781 break;
8783 #else
8784 goto unimplemented_nowarn;
8785 #endif
8786 #endif
8787 #ifdef TARGET_NR_getdomainname
8788 case TARGET_NR_getdomainname:
8789 goto unimplemented_nowarn;
8790 #endif
8792 #ifdef TARGET_NR_clock_gettime
8793 case TARGET_NR_clock_gettime:
8795 struct timespec ts;
8796 ret = get_errno(clock_gettime(arg1, &ts));
8797 if (!is_error(ret)) {
8798 host_to_target_timespec(arg2, &ts);
8800 break;
8802 #endif
8803 #ifdef TARGET_NR_clock_getres
8804 case TARGET_NR_clock_getres:
8806 struct timespec ts;
8807 ret = get_errno(clock_getres(arg1, &ts));
8808 if (!is_error(ret)) {
8809 host_to_target_timespec(arg2, &ts);
8811 break;
8813 #endif
8814 #ifdef TARGET_NR_clock_nanosleep
8815 case TARGET_NR_clock_nanosleep:
8817 struct timespec ts;
8818 target_to_host_timespec(&ts, arg3);
8819 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
8820 if (arg4)
8821 host_to_target_timespec(arg4, &ts);
8822 break;
8824 #endif
8826 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8827 case TARGET_NR_set_tid_address:
8828 ret = get_errno(set_tid_address((int *)g2h(arg1)));
8829 break;
8830 #endif
8832 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8833 case TARGET_NR_tkill:
8834 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
8835 break;
8836 #endif
8838 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8839 case TARGET_NR_tgkill:
8840 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
8841 target_to_host_signal(arg3)));
8842 break;
8843 #endif
8845 #ifdef TARGET_NR_set_robust_list
8846 case TARGET_NR_set_robust_list:
8847 case TARGET_NR_get_robust_list:
8848 /* The ABI for supporting robust futexes has userspace pass
8849 * the kernel a pointer to a linked list which is updated by
8850 * userspace after the syscall; the list is walked by the kernel
8851 * when the thread exits. Since the linked list in QEMU guest
8852 * memory isn't a valid linked list for the host and we have
8853 * no way to reliably intercept the thread-death event, we can't
8854 * support these. Silently return ENOSYS so that guest userspace
8855 * falls back to a non-robust futex implementation (which should
8856 * be OK except in the corner case of the guest crashing while
8857 * holding a mutex that is shared with another process via
8858 * shared memory).
8860 goto unimplemented_nowarn;
8861 #endif
8863 #if defined(TARGET_NR_utimensat)
8864 case TARGET_NR_utimensat:
8866 struct timespec *tsp, ts[2];
8867 if (!arg3) {
8868 tsp = NULL;
8869 } else {
8870 target_to_host_timespec(ts, arg3);
8871 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
8872 tsp = ts;
8874 if (!arg2)
8875 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
8876 else {
8877 if (!(p = lock_user_string(arg2))) {
8878 ret = -TARGET_EFAULT;
8879 goto fail;
8881 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
8882 unlock_user(p, arg2, 0);
8885 break;
8886 #endif
8887 case TARGET_NR_futex:
8888 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
8889 break;
8890 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8891 case TARGET_NR_inotify_init:
8892 ret = get_errno(sys_inotify_init());
8893 break;
8894 #endif
8895 #ifdef CONFIG_INOTIFY1
8896 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8897 case TARGET_NR_inotify_init1:
8898 ret = get_errno(sys_inotify_init1(arg1));
8899 break;
8900 #endif
8901 #endif
8902 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8903 case TARGET_NR_inotify_add_watch:
8904 p = lock_user_string(arg2);
8905 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
8906 unlock_user(p, arg2, 0);
8907 break;
8908 #endif
8909 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8910 case TARGET_NR_inotify_rm_watch:
8911 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
8912 break;
8913 #endif
8915 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8916 case TARGET_NR_mq_open:
8918 struct mq_attr posix_mq_attr;
8920 p = lock_user_string(arg1 - 1);
8921 if (arg4 != 0)
8922 copy_from_user_mq_attr (&posix_mq_attr, arg4);
8923 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
8924 unlock_user (p, arg1, 0);
8926 break;
8928 case TARGET_NR_mq_unlink:
8929 p = lock_user_string(arg1 - 1);
8930 ret = get_errno(mq_unlink(p));
8931 unlock_user (p, arg1, 0);
8932 break;
8934 case TARGET_NR_mq_timedsend:
8936 struct timespec ts;
8938 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8939 if (arg5 != 0) {
8940 target_to_host_timespec(&ts, arg5);
8941 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
8942 host_to_target_timespec(arg5, &ts);
8944 else
8945 ret = get_errno(mq_send(arg1, p, arg3, arg4));
8946 unlock_user (p, arg2, arg3);
8948 break;
8950 case TARGET_NR_mq_timedreceive:
8952 struct timespec ts;
8953 unsigned int prio;
8955 p = lock_user (VERIFY_READ, arg2, arg3, 1);
8956 if (arg5 != 0) {
8957 target_to_host_timespec(&ts, arg5);
8958 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
8959 host_to_target_timespec(arg5, &ts);
8961 else
8962 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
8963 unlock_user (p, arg2, arg3);
8964 if (arg4 != 0)
8965 put_user_u32(prio, arg4);
8967 break;
8969 /* Not implemented for now... */
8970 /* case TARGET_NR_mq_notify: */
8971 /* break; */
8973 case TARGET_NR_mq_getsetattr:
8975 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
8976 ret = 0;
8977 if (arg3 != 0) {
8978 ret = mq_getattr(arg1, &posix_mq_attr_out);
8979 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
8981 if (arg2 != 0) {
8982 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
8983 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
8987 break;
8988 #endif
8990 #ifdef CONFIG_SPLICE
8991 #ifdef TARGET_NR_tee
8992 case TARGET_NR_tee:
8994 ret = get_errno(tee(arg1,arg2,arg3,arg4));
8996 break;
8997 #endif
8998 #ifdef TARGET_NR_splice
8999 case TARGET_NR_splice:
9001 loff_t loff_in, loff_out;
9002 loff_t *ploff_in = NULL, *ploff_out = NULL;
9003 if(arg2) {
9004 get_user_u64(loff_in, arg2);
9005 ploff_in = &loff_in;
9007 if(arg4) {
9008 get_user_u64(loff_out, arg2);
9009 ploff_out = &loff_out;
9011 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
9013 break;
9014 #endif
9015 #ifdef TARGET_NR_vmsplice
9016 case TARGET_NR_vmsplice:
9018 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9019 if (vec != NULL) {
9020 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
9021 unlock_iovec(vec, arg2, arg3, 0);
9022 } else {
9023 ret = -host_to_target_errno(errno);
9026 break;
9027 #endif
9028 #endif /* CONFIG_SPLICE */
9029 #ifdef CONFIG_EVENTFD
9030 #if defined(TARGET_NR_eventfd)
9031 case TARGET_NR_eventfd:
9032 ret = get_errno(eventfd(arg1, 0));
9033 break;
9034 #endif
9035 #if defined(TARGET_NR_eventfd2)
9036 case TARGET_NR_eventfd2:
9038 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
9039 if (arg2 & TARGET_O_NONBLOCK) {
9040 host_flags |= O_NONBLOCK;
9042 if (arg2 & TARGET_O_CLOEXEC) {
9043 host_flags |= O_CLOEXEC;
9045 ret = get_errno(eventfd(arg1, host_flags));
9046 break;
9048 #endif
9049 #endif /* CONFIG_EVENTFD */
9050 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
9051 case TARGET_NR_fallocate:
9052 #if TARGET_ABI_BITS == 32
9053 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
9054 target_offset64(arg5, arg6)));
9055 #else
9056 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
9057 #endif
9058 break;
9059 #endif
9060 #if defined(CONFIG_SYNC_FILE_RANGE)
9061 #if defined(TARGET_NR_sync_file_range)
9062 case TARGET_NR_sync_file_range:
9063 #if TARGET_ABI_BITS == 32
9064 #if defined(TARGET_MIPS)
9065 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9066 target_offset64(arg5, arg6), arg7));
9067 #else
9068 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
9069 target_offset64(arg4, arg5), arg6));
9070 #endif /* !TARGET_MIPS */
9071 #else
9072 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
9073 #endif
9074 break;
9075 #endif
9076 #if defined(TARGET_NR_sync_file_range2)
9077 case TARGET_NR_sync_file_range2:
9078 /* This is like sync_file_range but the arguments are reordered */
9079 #if TARGET_ABI_BITS == 32
9080 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
9081 target_offset64(arg5, arg6), arg2));
9082 #else
9083 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
9084 #endif
9085 break;
9086 #endif
9087 #endif
9088 #if defined(CONFIG_EPOLL)
9089 #if defined(TARGET_NR_epoll_create)
9090 case TARGET_NR_epoll_create:
9091 ret = get_errno(epoll_create(arg1));
9092 break;
9093 #endif
9094 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
9095 case TARGET_NR_epoll_create1:
9096 ret = get_errno(epoll_create1(arg1));
9097 break;
9098 #endif
9099 #if defined(TARGET_NR_epoll_ctl)
9100 case TARGET_NR_epoll_ctl:
9102 struct epoll_event ep;
9103 struct epoll_event *epp = 0;
9104 if (arg4) {
9105 struct target_epoll_event *target_ep;
9106 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
9107 goto efault;
9109 ep.events = tswap32(target_ep->events);
9110 /* The epoll_data_t union is just opaque data to the kernel,
9111 * so we transfer all 64 bits across and need not worry what
9112 * actual data type it is.
9114 ep.data.u64 = tswap64(target_ep->data.u64);
9115 unlock_user_struct(target_ep, arg4, 0);
9116 epp = &ep;
9118 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
9119 break;
9121 #endif
9123 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
9124 #define IMPLEMENT_EPOLL_PWAIT
9125 #endif
9126 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
9127 #if defined(TARGET_NR_epoll_wait)
9128 case TARGET_NR_epoll_wait:
9129 #endif
9130 #if defined(IMPLEMENT_EPOLL_PWAIT)
9131 case TARGET_NR_epoll_pwait:
9132 #endif
9134 struct target_epoll_event *target_ep;
9135 struct epoll_event *ep;
9136 int epfd = arg1;
9137 int maxevents = arg3;
9138 int timeout = arg4;
9140 target_ep = lock_user(VERIFY_WRITE, arg2,
9141 maxevents * sizeof(struct target_epoll_event), 1);
9142 if (!target_ep) {
9143 goto efault;
9146 ep = alloca(maxevents * sizeof(struct epoll_event));
9148 switch (num) {
9149 #if defined(IMPLEMENT_EPOLL_PWAIT)
9150 case TARGET_NR_epoll_pwait:
9152 target_sigset_t *target_set;
9153 sigset_t _set, *set = &_set;
9155 if (arg5) {
9156 target_set = lock_user(VERIFY_READ, arg5,
9157 sizeof(target_sigset_t), 1);
9158 if (!target_set) {
9159 unlock_user(target_ep, arg2, 0);
9160 goto efault;
9162 target_to_host_sigset(set, target_set);
9163 unlock_user(target_set, arg5, 0);
9164 } else {
9165 set = NULL;
9168 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
9169 break;
9171 #endif
9172 #if defined(TARGET_NR_epoll_wait)
9173 case TARGET_NR_epoll_wait:
9174 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
9175 break;
9176 #endif
9177 default:
9178 ret = -TARGET_ENOSYS;
9180 if (!is_error(ret)) {
9181 int i;
9182 for (i = 0; i < ret; i++) {
9183 target_ep[i].events = tswap32(ep[i].events);
9184 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
9187 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
9188 break;
9190 #endif
9191 #endif
9192 #ifdef TARGET_NR_prlimit64
9193 case TARGET_NR_prlimit64:
9195 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
9196 struct target_rlimit64 *target_rnew, *target_rold;
9197 struct host_rlimit64 rnew, rold, *rnewp = 0;
9198 if (arg3) {
9199 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
9200 goto efault;
9202 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
9203 rnew.rlim_max = tswap64(target_rnew->rlim_max);
9204 unlock_user_struct(target_rnew, arg3, 0);
9205 rnewp = &rnew;
9208 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
9209 if (!is_error(ret) && arg4) {
9210 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
9211 goto efault;
9213 target_rold->rlim_cur = tswap64(rold.rlim_cur);
9214 target_rold->rlim_max = tswap64(rold.rlim_max);
9215 unlock_user_struct(target_rold, arg4, 1);
9217 break;
9219 #endif
9220 #ifdef TARGET_NR_gethostname
9221 case TARGET_NR_gethostname:
9223 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9224 if (name) {
9225 ret = get_errno(gethostname(name, arg2));
9226 unlock_user(name, arg1, arg2);
9227 } else {
9228 ret = -TARGET_EFAULT;
9230 break;
9232 #endif
9233 #ifdef TARGET_NR_atomic_cmpxchg_32
9234 case TARGET_NR_atomic_cmpxchg_32:
9236 /* should use start_exclusive from main.c */
9237 abi_ulong mem_value;
9238 if (get_user_u32(mem_value, arg6)) {
9239 target_siginfo_t info;
9240 info.si_signo = SIGSEGV;
9241 info.si_errno = 0;
9242 info.si_code = TARGET_SEGV_MAPERR;
9243 info._sifields._sigfault._addr = arg6;
9244 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
9245 ret = 0xdeadbeef;
9248 if (mem_value == arg2)
9249 put_user_u32(arg1, arg6);
9250 ret = mem_value;
9251 break;
9253 #endif
9254 #ifdef TARGET_NR_atomic_barrier
9255 case TARGET_NR_atomic_barrier:
9257 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
9258 ret = 0;
9259 break;
9261 #endif
9263 #ifdef TARGET_NR_timer_create
9264 case TARGET_NR_timer_create:
9266 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
9268 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
9269 struct target_sigevent *ptarget_sevp;
9270 struct target_timer_t *ptarget_timer;
9272 int clkid = arg1;
9273 int timer_index = next_free_host_timer();
9275 if (timer_index < 0) {
9276 ret = -TARGET_EAGAIN;
9277 } else {
9278 timer_t *phtimer = g_posix_timers + timer_index;
9280 if (arg2) {
9281 if (!lock_user_struct(VERIFY_READ, ptarget_sevp, arg2, 1)) {
9282 goto efault;
9285 host_sevp.sigev_signo = tswap32(ptarget_sevp->sigev_signo);
9286 host_sevp.sigev_notify = tswap32(ptarget_sevp->sigev_notify);
9288 phost_sevp = &host_sevp;
9291 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
9292 if (ret) {
9293 phtimer = NULL;
9294 } else {
9295 if (!lock_user_struct(VERIFY_WRITE, ptarget_timer, arg3, 1)) {
9296 goto efault;
9298 ptarget_timer->ptr = tswap32(0xcafe0000 | timer_index);
9299 unlock_user_struct(ptarget_timer, arg3, 1);
9302 break;
9304 #endif
9306 #ifdef TARGET_NR_timer_settime
9307 case TARGET_NR_timer_settime:
9309 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
9310 * struct itimerspec * old_value */
9311 arg1 &= 0xffff;
9312 if (arg3 == 0 || arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9313 ret = -TARGET_EINVAL;
9314 } else {
9315 timer_t htimer = g_posix_timers[arg1];
9316 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
9318 target_to_host_itimerspec(&hspec_new, arg3);
9319 ret = get_errno(
9320 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
9321 host_to_target_itimerspec(arg2, &hspec_old);
9323 break;
9325 #endif
9327 #ifdef TARGET_NR_timer_gettime
9328 case TARGET_NR_timer_gettime:
9330 /* args: timer_t timerid, struct itimerspec *curr_value */
9331 arg1 &= 0xffff;
9332 if (!arg2) {
9333 return -TARGET_EFAULT;
9334 } else if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9335 ret = -TARGET_EINVAL;
9336 } else {
9337 timer_t htimer = g_posix_timers[arg1];
9338 struct itimerspec hspec;
9339 ret = get_errno(timer_gettime(htimer, &hspec));
9341 if (host_to_target_itimerspec(arg2, &hspec)) {
9342 ret = -TARGET_EFAULT;
9345 break;
9347 #endif
9349 #ifdef TARGET_NR_timer_getoverrun
9350 case TARGET_NR_timer_getoverrun:
9352 /* args: timer_t timerid */
9353 arg1 &= 0xffff;
9354 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9355 ret = -TARGET_EINVAL;
9356 } else {
9357 timer_t htimer = g_posix_timers[arg1];
9358 ret = get_errno(timer_getoverrun(htimer));
9360 break;
9362 #endif
9364 #ifdef TARGET_NR_timer_delete
9365 case TARGET_NR_timer_delete:
9367 /* args: timer_t timerid */
9368 arg1 &= 0xffff;
9369 if (arg1 < 0 || arg1 >= ARRAY_SIZE(g_posix_timers)) {
9370 ret = -TARGET_EINVAL;
9371 } else {
9372 timer_t htimer = g_posix_timers[arg1];
9373 ret = get_errno(timer_delete(htimer));
9374 g_posix_timers[arg1] = 0;
9376 break;
9378 #endif
9380 default:
9381 unimplemented:
9382 gemu_log("qemu: Unsupported syscall: %d\n", num);
9383 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
9384 unimplemented_nowarn:
9385 #endif
9386 ret = -TARGET_ENOSYS;
9387 break;
9389 fail:
9390 #ifdef DEBUG
9391 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
9392 #endif
9393 if(do_strace)
9394 print_syscall_ret(num, ret);
9395 return ret;
9396 efault:
9397 ret = -TARGET_EFAULT;
9398 goto fail;