linux-user: fix emulation of accept4/getpeername/getsockname/recvfrom syscalls
[qemu/ar7.git] / linux-user / syscall.c
blob5f72209debc9ba83f1281ffeaecba0728a95ee90
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/uio.h>
42 #include <poll.h>
43 #include <sys/times.h>
44 #include <sys/shm.h>
45 #include <sys/sem.h>
46 #include <sys/statfs.h>
47 #include <utime.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #if defined(CONFIG_USBFS)
98 #include <linux/usbdevice_fs.h>
99 #include <linux/usb/ch9.h>
100 #endif
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #include "linux_loop.h"
110 #include "uname.h"
112 #include "qemu.h"
113 #include "fd-trans.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167 * once. This exercises the codepaths for restart.
169 //#define DEBUG_ERESTARTSYS
171 //#include <linux/msdos_fs.h>
172 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
173 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
175 #undef _syscall0
176 #undef _syscall1
177 #undef _syscall2
178 #undef _syscall3
179 #undef _syscall4
180 #undef _syscall5
181 #undef _syscall6
183 #define _syscall0(type,name) \
184 static type name (void) \
186 return syscall(__NR_##name); \
189 #define _syscall1(type,name,type1,arg1) \
190 static type name (type1 arg1) \
192 return syscall(__NR_##name, arg1); \
195 #define _syscall2(type,name,type1,arg1,type2,arg2) \
196 static type name (type1 arg1,type2 arg2) \
198 return syscall(__NR_##name, arg1, arg2); \
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
202 static type name (type1 arg1,type2 arg2,type3 arg3) \
204 return syscall(__NR_##name, arg1, arg2, arg3); \
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
210 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
214 type5,arg5) \
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
217 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
222 type5,arg5,type6,arg6) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
224 type6 arg6) \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
245 #endif
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
250 #endif
252 #ifdef __NR_gettid
253 _syscall0(int, gettid)
254 #else
255 /* This is a replacement for the host gettid() and must return a host
256 errno. */
257 static int gettid(void) {
258 return -ENOSYS;
260 #endif
262 /* For the 64-bit guest on 32-bit host case we must emulate
263 * getdents using getdents64, because otherwise the host
264 * might hand us back more dirent records than we can fit
265 * into the guest buffer after structure format conversion.
266 * Otherwise we emulate getdents with getdents if the host has it.
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
270 #endif
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
274 #endif
275 #if (defined(TARGET_NR_getdents) && \
276 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
279 #endif
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
282 loff_t *, res, uint, wh);
283 #endif
284 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
285 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
286 siginfo_t *, uinfo)
287 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group,int,error_code)
290 #endif
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address,int *,tidptr)
293 #endif
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
296 const struct timespec *,timeout,int *,uaddr2,int,val3)
297 #endif
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
300 unsigned long *, user_mask_ptr);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
303 unsigned long *, user_mask_ptr);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
306 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
307 void *, arg);
308 _syscall2(int, capget, struct __user_cap_header_struct *, header,
309 struct __user_cap_data_struct *, data);
310 _syscall2(int, capset, struct __user_cap_header_struct *, header,
311 struct __user_cap_data_struct *, data);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get, int, which, int, who)
314 #endif
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
317 #endif
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
320 #endif
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
324 unsigned long, idx1, unsigned long, idx2)
325 #endif
327 static bitmask_transtbl fcntl_flags_tbl[] = {
328 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
329 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
330 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
331 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
332 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
333 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
334 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
335 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
336 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
337 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
338 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
339 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
340 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
341 #if defined(O_DIRECT)
342 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
343 #endif
344 #if defined(O_NOATIME)
345 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
346 #endif
347 #if defined(O_CLOEXEC)
348 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
349 #endif
350 #if defined(O_PATH)
351 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
352 #endif
353 #if defined(O_TMPFILE)
354 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
355 #endif
356 /* Don't terminate the list prematurely on 64-bit host+guest. */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
359 #endif
360 { 0, 0, 0, 0 }
363 static int sys_getcwd1(char *buf, size_t size)
365 if (getcwd(buf, size) == NULL) {
366 /* getcwd() sets errno */
367 return (-1);
369 return strlen(buf)+1;
372 #ifdef TARGET_NR_utimensat
373 #if defined(__NR_utimensat)
374 #define __NR_sys_utimensat __NR_utimensat
375 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
376 const struct timespec *,tsp,int,flags)
377 #else
378 static int sys_utimensat(int dirfd, const char *pathname,
379 const struct timespec times[2], int flags)
381 errno = ENOSYS;
382 return -1;
384 #endif
385 #endif /* TARGET_NR_utimensat */
387 #ifdef TARGET_NR_renameat2
388 #if defined(__NR_renameat2)
389 #define __NR_sys_renameat2 __NR_renameat2
390 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
391 const char *, new, unsigned int, flags)
392 #else
393 static int sys_renameat2(int oldfd, const char *old,
394 int newfd, const char *new, int flags)
396 if (flags == 0) {
397 return renameat(oldfd, old, newfd, new);
399 errno = ENOSYS;
400 return -1;
402 #endif
403 #endif /* TARGET_NR_renameat2 */
405 #ifdef CONFIG_INOTIFY
406 #include <sys/inotify.h>
408 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
409 static int sys_inotify_init(void)
411 return (inotify_init());
413 #endif
414 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
415 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
417 return (inotify_add_watch(fd, pathname, mask));
419 #endif
420 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
421 static int sys_inotify_rm_watch(int fd, int32_t wd)
423 return (inotify_rm_watch(fd, wd));
425 #endif
426 #ifdef CONFIG_INOTIFY1
427 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
428 static int sys_inotify_init1(int flags)
430 return (inotify_init1(flags));
432 #endif
433 #endif
434 #else
435 /* Userspace can usually survive runtime without inotify */
436 #undef TARGET_NR_inotify_init
437 #undef TARGET_NR_inotify_init1
438 #undef TARGET_NR_inotify_add_watch
439 #undef TARGET_NR_inotify_rm_watch
440 #endif /* CONFIG_INOTIFY */
442 #if defined(TARGET_NR_prlimit64)
443 #ifndef __NR_prlimit64
444 # define __NR_prlimit64 -1
445 #endif
446 #define __NR_sys_prlimit64 __NR_prlimit64
447 /* The glibc rlimit structure may not be that used by the underlying syscall */
448 struct host_rlimit64 {
449 uint64_t rlim_cur;
450 uint64_t rlim_max;
452 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
453 const struct host_rlimit64 *, new_limit,
454 struct host_rlimit64 *, old_limit)
455 #endif
458 #if defined(TARGET_NR_timer_create)
459 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
460 static timer_t g_posix_timers[32] = { 0, } ;
462 static inline int next_free_host_timer(void)
464 int k ;
465 /* FIXME: Does finding the next free slot require a lock? */
466 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
467 if (g_posix_timers[k] == 0) {
468 g_posix_timers[k] = (timer_t) 1;
469 return k;
472 return -1;
474 #endif
476 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
477 #ifdef TARGET_ARM
478 static inline int regpairs_aligned(void *cpu_env, int num)
480 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
482 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
483 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
484 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
485 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
486 * of registers which translates to the same as ARM/MIPS, because we start with
487 * r3 as arg1 */
488 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
489 #elif defined(TARGET_SH4)
490 /* SH4 doesn't align register pairs, except for p{read,write}64 */
491 static inline int regpairs_aligned(void *cpu_env, int num)
493 switch (num) {
494 case TARGET_NR_pread64:
495 case TARGET_NR_pwrite64:
496 return 1;
498 default:
499 return 0;
502 #elif defined(TARGET_XTENSA)
503 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
504 #else
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
506 #endif
508 #define ERRNO_TABLE_SIZE 1200
510 /* target_to_host_errno_table[] is initialized from
511 * host_to_target_errno_table[] in syscall_init(). */
512 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
516 * This list is the union of errno values overridden in asm-<arch>/errno.h
517 * minus the errnos that are not actually generic to all archs.
519 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
520 [EAGAIN] = TARGET_EAGAIN,
521 [EIDRM] = TARGET_EIDRM,
522 [ECHRNG] = TARGET_ECHRNG,
523 [EL2NSYNC] = TARGET_EL2NSYNC,
524 [EL3HLT] = TARGET_EL3HLT,
525 [EL3RST] = TARGET_EL3RST,
526 [ELNRNG] = TARGET_ELNRNG,
527 [EUNATCH] = TARGET_EUNATCH,
528 [ENOCSI] = TARGET_ENOCSI,
529 [EL2HLT] = TARGET_EL2HLT,
530 [EDEADLK] = TARGET_EDEADLK,
531 [ENOLCK] = TARGET_ENOLCK,
532 [EBADE] = TARGET_EBADE,
533 [EBADR] = TARGET_EBADR,
534 [EXFULL] = TARGET_EXFULL,
535 [ENOANO] = TARGET_ENOANO,
536 [EBADRQC] = TARGET_EBADRQC,
537 [EBADSLT] = TARGET_EBADSLT,
538 [EBFONT] = TARGET_EBFONT,
539 [ENOSTR] = TARGET_ENOSTR,
540 [ENODATA] = TARGET_ENODATA,
541 [ETIME] = TARGET_ETIME,
542 [ENOSR] = TARGET_ENOSR,
543 [ENONET] = TARGET_ENONET,
544 [ENOPKG] = TARGET_ENOPKG,
545 [EREMOTE] = TARGET_EREMOTE,
546 [ENOLINK] = TARGET_ENOLINK,
547 [EADV] = TARGET_EADV,
548 [ESRMNT] = TARGET_ESRMNT,
549 [ECOMM] = TARGET_ECOMM,
550 [EPROTO] = TARGET_EPROTO,
551 [EDOTDOT] = TARGET_EDOTDOT,
552 [EMULTIHOP] = TARGET_EMULTIHOP,
553 [EBADMSG] = TARGET_EBADMSG,
554 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
555 [EOVERFLOW] = TARGET_EOVERFLOW,
556 [ENOTUNIQ] = TARGET_ENOTUNIQ,
557 [EBADFD] = TARGET_EBADFD,
558 [EREMCHG] = TARGET_EREMCHG,
559 [ELIBACC] = TARGET_ELIBACC,
560 [ELIBBAD] = TARGET_ELIBBAD,
561 [ELIBSCN] = TARGET_ELIBSCN,
562 [ELIBMAX] = TARGET_ELIBMAX,
563 [ELIBEXEC] = TARGET_ELIBEXEC,
564 [EILSEQ] = TARGET_EILSEQ,
565 [ENOSYS] = TARGET_ENOSYS,
566 [ELOOP] = TARGET_ELOOP,
567 [ERESTART] = TARGET_ERESTART,
568 [ESTRPIPE] = TARGET_ESTRPIPE,
569 [ENOTEMPTY] = TARGET_ENOTEMPTY,
570 [EUSERS] = TARGET_EUSERS,
571 [ENOTSOCK] = TARGET_ENOTSOCK,
572 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
573 [EMSGSIZE] = TARGET_EMSGSIZE,
574 [EPROTOTYPE] = TARGET_EPROTOTYPE,
575 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
576 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
577 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
578 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
579 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
580 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
581 [EADDRINUSE] = TARGET_EADDRINUSE,
582 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
583 [ENETDOWN] = TARGET_ENETDOWN,
584 [ENETUNREACH] = TARGET_ENETUNREACH,
585 [ENETRESET] = TARGET_ENETRESET,
586 [ECONNABORTED] = TARGET_ECONNABORTED,
587 [ECONNRESET] = TARGET_ECONNRESET,
588 [ENOBUFS] = TARGET_ENOBUFS,
589 [EISCONN] = TARGET_EISCONN,
590 [ENOTCONN] = TARGET_ENOTCONN,
591 [EUCLEAN] = TARGET_EUCLEAN,
592 [ENOTNAM] = TARGET_ENOTNAM,
593 [ENAVAIL] = TARGET_ENAVAIL,
594 [EISNAM] = TARGET_EISNAM,
595 [EREMOTEIO] = TARGET_EREMOTEIO,
596 [EDQUOT] = TARGET_EDQUOT,
597 [ESHUTDOWN] = TARGET_ESHUTDOWN,
598 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
599 [ETIMEDOUT] = TARGET_ETIMEDOUT,
600 [ECONNREFUSED] = TARGET_ECONNREFUSED,
601 [EHOSTDOWN] = TARGET_EHOSTDOWN,
602 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
603 [EALREADY] = TARGET_EALREADY,
604 [EINPROGRESS] = TARGET_EINPROGRESS,
605 [ESTALE] = TARGET_ESTALE,
606 [ECANCELED] = TARGET_ECANCELED,
607 [ENOMEDIUM] = TARGET_ENOMEDIUM,
608 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
609 #ifdef ENOKEY
610 [ENOKEY] = TARGET_ENOKEY,
611 #endif
612 #ifdef EKEYEXPIRED
613 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
614 #endif
615 #ifdef EKEYREVOKED
616 [EKEYREVOKED] = TARGET_EKEYREVOKED,
617 #endif
618 #ifdef EKEYREJECTED
619 [EKEYREJECTED] = TARGET_EKEYREJECTED,
620 #endif
621 #ifdef EOWNERDEAD
622 [EOWNERDEAD] = TARGET_EOWNERDEAD,
623 #endif
624 #ifdef ENOTRECOVERABLE
625 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
626 #endif
627 #ifdef ENOMSG
628 [ENOMSG] = TARGET_ENOMSG,
629 #endif
630 #ifdef ERKFILL
631 [ERFKILL] = TARGET_ERFKILL,
632 #endif
633 #ifdef EHWPOISON
634 [EHWPOISON] = TARGET_EHWPOISON,
635 #endif
638 static inline int host_to_target_errno(int err)
640 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
641 host_to_target_errno_table[err]) {
642 return host_to_target_errno_table[err];
644 return err;
647 static inline int target_to_host_errno(int err)
649 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
650 target_to_host_errno_table[err]) {
651 return target_to_host_errno_table[err];
653 return err;
656 static inline abi_long get_errno(abi_long ret)
658 if (ret == -1)
659 return -host_to_target_errno(errno);
660 else
661 return ret;
664 const char *target_strerror(int err)
666 if (err == TARGET_ERESTARTSYS) {
667 return "To be restarted";
669 if (err == TARGET_QEMU_ESIGRETURN) {
670 return "Successful exit from sigreturn";
673 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
674 return NULL;
676 return strerror(target_to_host_errno(err));
679 #define safe_syscall0(type, name) \
680 static type safe_##name(void) \
682 return safe_syscall(__NR_##name); \
685 #define safe_syscall1(type, name, type1, arg1) \
686 static type safe_##name(type1 arg1) \
688 return safe_syscall(__NR_##name, arg1); \
691 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
692 static type safe_##name(type1 arg1, type2 arg2) \
694 return safe_syscall(__NR_##name, arg1, arg2); \
697 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
703 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
704 type4, arg4) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
707 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
710 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
711 type4, arg4, type5, arg5) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
713 type5 arg5) \
715 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
718 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
719 type4, arg4, type5, arg5, type6, arg6) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
721 type5 arg5, type6 arg6) \
723 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
726 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
727 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
728 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
729 int, flags, mode_t, mode)
730 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
731 struct rusage *, rusage)
732 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
733 int, options, struct rusage *, rusage)
734 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
735 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
736 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
737 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
738 struct timespec *, tsp, const sigset_t *, sigmask,
739 size_t, sigsetsize)
740 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
741 int, maxevents, int, timeout, const sigset_t *, sigmask,
742 size_t, sigsetsize)
743 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
744 const struct timespec *,timeout,int *,uaddr2,int,val3)
745 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
746 safe_syscall2(int, kill, pid_t, pid, int, sig)
747 safe_syscall2(int, tkill, int, tid, int, sig)
748 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
749 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
750 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
751 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
752 unsigned long, pos_l, unsigned long, pos_h)
753 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
754 unsigned long, pos_l, unsigned long, pos_h)
755 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
756 socklen_t, addrlen)
757 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
758 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
759 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
760 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
761 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
762 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
763 safe_syscall2(int, flock, int, fd, int, operation)
764 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
765 const struct timespec *, uts, size_t, sigsetsize)
766 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
767 int, flags)
768 safe_syscall2(int, nanosleep, const struct timespec *, req,
769 struct timespec *, rem)
770 #ifdef TARGET_NR_clock_nanosleep
771 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
772 const struct timespec *, req, struct timespec *, rem)
773 #endif
774 #ifdef __NR_msgsnd
775 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
776 int, flags)
777 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
778 long, msgtype, int, flags)
779 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
780 unsigned, nsops, const struct timespec *, timeout)
781 #else
782 /* This host kernel architecture uses a single ipc syscall; fake up
783 * wrappers for the sub-operations to hide this implementation detail.
784 * Annoyingly we can't include linux/ipc.h to get the constant definitions
785 * for the call parameter because some structs in there conflict with the
786 * sys/ipc.h ones. So we just define them here, and rely on them being
787 * the same for all host architectures.
789 #define Q_SEMTIMEDOP 4
790 #define Q_MSGSND 11
791 #define Q_MSGRCV 12
792 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
794 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
795 void *, ptr, long, fifth)
796 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
798 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
800 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
802 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
804 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
805 const struct timespec *timeout)
807 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
808 (long)timeout);
810 #endif
811 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
812 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
813 size_t, len, unsigned, prio, const struct timespec *, timeout)
814 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
815 size_t, len, unsigned *, prio, const struct timespec *, timeout)
816 #endif
817 /* We do ioctl like this rather than via safe_syscall3 to preserve the
818 * "third argument might be integer or pointer or not present" behaviour of
819 * the libc function.
821 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
822 /* Similarly for fcntl. Note that callers must always:
823 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
824 * use the flock64 struct rather than unsuffixed flock
825 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
827 #ifdef __NR_fcntl64
828 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #else
830 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
831 #endif
833 static inline int host_to_target_sock_type(int host_type)
835 int target_type;
837 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
838 case SOCK_DGRAM:
839 target_type = TARGET_SOCK_DGRAM;
840 break;
841 case SOCK_STREAM:
842 target_type = TARGET_SOCK_STREAM;
843 break;
844 default:
845 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
846 break;
849 #if defined(SOCK_CLOEXEC)
850 if (host_type & SOCK_CLOEXEC) {
851 target_type |= TARGET_SOCK_CLOEXEC;
853 #endif
855 #if defined(SOCK_NONBLOCK)
856 if (host_type & SOCK_NONBLOCK) {
857 target_type |= TARGET_SOCK_NONBLOCK;
859 #endif
861 return target_type;
864 static abi_ulong target_brk;
865 static abi_ulong target_original_brk;
866 static abi_ulong brk_page;
868 void target_set_brk(abi_ulong new_brk)
870 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
871 brk_page = HOST_PAGE_ALIGN(target_brk);
874 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
875 #define DEBUGF_BRK(message, args...)
877 /* do_brk() must return target values and target errnos. */
878 abi_long do_brk(abi_ulong new_brk)
880 abi_long mapped_addr;
881 abi_ulong new_alloc_size;
883 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
885 if (!new_brk) {
886 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
887 return target_brk;
889 if (new_brk < target_original_brk) {
890 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
891 target_brk);
892 return target_brk;
895 /* If the new brk is less than the highest page reserved to the
896 * target heap allocation, set it and we're almost done... */
897 if (new_brk <= brk_page) {
898 /* Heap contents are initialized to zero, as for anonymous
899 * mapped pages. */
900 if (new_brk > target_brk) {
901 memset(g2h(target_brk), 0, new_brk - target_brk);
903 target_brk = new_brk;
904 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
905 return target_brk;
908 /* We need to allocate more memory after the brk... Note that
909 * we don't use MAP_FIXED because that will map over the top of
910 * any existing mapping (like the one with the host libc or qemu
911 * itself); instead we treat "mapped but at wrong address" as
912 * a failure and unmap again.
914 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
915 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
916 PROT_READ|PROT_WRITE,
917 MAP_ANON|MAP_PRIVATE, 0, 0));
919 if (mapped_addr == brk_page) {
920 /* Heap contents are initialized to zero, as for anonymous
921 * mapped pages. Technically the new pages are already
922 * initialized to zero since they *are* anonymous mapped
923 * pages, however we have to take care with the contents that
924 * come from the remaining part of the previous page: it may
925 * contains garbage data due to a previous heap usage (grown
926 * then shrunken). */
927 memset(g2h(target_brk), 0, brk_page - target_brk);
929 target_brk = new_brk;
930 brk_page = HOST_PAGE_ALIGN(target_brk);
931 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
932 target_brk);
933 return target_brk;
934 } else if (mapped_addr != -1) {
935 /* Mapped but at wrong address, meaning there wasn't actually
936 * enough space for this brk.
938 target_munmap(mapped_addr, new_alloc_size);
939 mapped_addr = -1;
940 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
942 else {
943 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
946 #if defined(TARGET_ALPHA)
947 /* We (partially) emulate OSF/1 on Alpha, which requires we
948 return a proper errno, not an unchanged brk value. */
949 return -TARGET_ENOMEM;
950 #endif
951 /* For everything else, return the previous break. */
952 return target_brk;
955 static inline abi_long copy_from_user_fdset(fd_set *fds,
956 abi_ulong target_fds_addr,
957 int n)
959 int i, nw, j, k;
960 abi_ulong b, *target_fds;
962 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
963 if (!(target_fds = lock_user(VERIFY_READ,
964 target_fds_addr,
965 sizeof(abi_ulong) * nw,
966 1)))
967 return -TARGET_EFAULT;
969 FD_ZERO(fds);
970 k = 0;
971 for (i = 0; i < nw; i++) {
972 /* grab the abi_ulong */
973 __get_user(b, &target_fds[i]);
974 for (j = 0; j < TARGET_ABI_BITS; j++) {
975 /* check the bit inside the abi_ulong */
976 if ((b >> j) & 1)
977 FD_SET(k, fds);
978 k++;
982 unlock_user(target_fds, target_fds_addr, 0);
984 return 0;
987 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
988 abi_ulong target_fds_addr,
989 int n)
991 if (target_fds_addr) {
992 if (copy_from_user_fdset(fds, target_fds_addr, n))
993 return -TARGET_EFAULT;
994 *fds_ptr = fds;
995 } else {
996 *fds_ptr = NULL;
998 return 0;
1001 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1002 const fd_set *fds,
1003 int n)
1005 int i, nw, j, k;
1006 abi_long v;
1007 abi_ulong *target_fds;
1009 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1010 if (!(target_fds = lock_user(VERIFY_WRITE,
1011 target_fds_addr,
1012 sizeof(abi_ulong) * nw,
1013 0)))
1014 return -TARGET_EFAULT;
1016 k = 0;
1017 for (i = 0; i < nw; i++) {
1018 v = 0;
1019 for (j = 0; j < TARGET_ABI_BITS; j++) {
1020 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1021 k++;
1023 __put_user(v, &target_fds[i]);
1026 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1028 return 0;
1031 #if defined(__alpha__)
1032 #define HOST_HZ 1024
1033 #else
1034 #define HOST_HZ 100
1035 #endif
1037 static inline abi_long host_to_target_clock_t(long ticks)
1039 #if HOST_HZ == TARGET_HZ
1040 return ticks;
1041 #else
1042 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1043 #endif
1046 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1047 const struct rusage *rusage)
1049 struct target_rusage *target_rusage;
1051 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1052 return -TARGET_EFAULT;
1053 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1054 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1055 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1056 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1057 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1058 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1059 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1060 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1061 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1062 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1063 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1064 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1065 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1066 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1067 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1068 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1069 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1070 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1071 unlock_user_struct(target_rusage, target_addr, 1);
1073 return 0;
1076 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1078 abi_ulong target_rlim_swap;
1079 rlim_t result;
1081 target_rlim_swap = tswapal(target_rlim);
1082 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1083 return RLIM_INFINITY;
1085 result = target_rlim_swap;
1086 if (target_rlim_swap != (rlim_t)result)
1087 return RLIM_INFINITY;
1089 return result;
1092 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1094 abi_ulong target_rlim_swap;
1095 abi_ulong result;
1097 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1098 target_rlim_swap = TARGET_RLIM_INFINITY;
1099 else
1100 target_rlim_swap = rlim;
1101 result = tswapal(target_rlim_swap);
1103 return result;
1106 static inline int target_to_host_resource(int code)
1108 switch (code) {
1109 case TARGET_RLIMIT_AS:
1110 return RLIMIT_AS;
1111 case TARGET_RLIMIT_CORE:
1112 return RLIMIT_CORE;
1113 case TARGET_RLIMIT_CPU:
1114 return RLIMIT_CPU;
1115 case TARGET_RLIMIT_DATA:
1116 return RLIMIT_DATA;
1117 case TARGET_RLIMIT_FSIZE:
1118 return RLIMIT_FSIZE;
1119 case TARGET_RLIMIT_LOCKS:
1120 return RLIMIT_LOCKS;
1121 case TARGET_RLIMIT_MEMLOCK:
1122 return RLIMIT_MEMLOCK;
1123 case TARGET_RLIMIT_MSGQUEUE:
1124 return RLIMIT_MSGQUEUE;
1125 case TARGET_RLIMIT_NICE:
1126 return RLIMIT_NICE;
1127 case TARGET_RLIMIT_NOFILE:
1128 return RLIMIT_NOFILE;
1129 case TARGET_RLIMIT_NPROC:
1130 return RLIMIT_NPROC;
1131 case TARGET_RLIMIT_RSS:
1132 return RLIMIT_RSS;
1133 case TARGET_RLIMIT_RTPRIO:
1134 return RLIMIT_RTPRIO;
1135 case TARGET_RLIMIT_SIGPENDING:
1136 return RLIMIT_SIGPENDING;
1137 case TARGET_RLIMIT_STACK:
1138 return RLIMIT_STACK;
1139 default:
1140 return code;
1144 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1145 abi_ulong target_tv_addr)
1147 struct target_timeval *target_tv;
1149 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1150 return -TARGET_EFAULT;
1152 __get_user(tv->tv_sec, &target_tv->tv_sec);
1153 __get_user(tv->tv_usec, &target_tv->tv_usec);
1155 unlock_user_struct(target_tv, target_tv_addr, 0);
1157 return 0;
1160 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1161 const struct timeval *tv)
1163 struct target_timeval *target_tv;
1165 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1166 return -TARGET_EFAULT;
1168 __put_user(tv->tv_sec, &target_tv->tv_sec);
1169 __put_user(tv->tv_usec, &target_tv->tv_usec);
1171 unlock_user_struct(target_tv, target_tv_addr, 1);
1173 return 0;
1176 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1177 abi_ulong target_tz_addr)
1179 struct target_timezone *target_tz;
1181 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1182 return -TARGET_EFAULT;
1185 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1186 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1188 unlock_user_struct(target_tz, target_tz_addr, 0);
1190 return 0;
1193 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1194 #include <mqueue.h>
1196 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1197 abi_ulong target_mq_attr_addr)
1199 struct target_mq_attr *target_mq_attr;
1201 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1202 target_mq_attr_addr, 1))
1203 return -TARGET_EFAULT;
1205 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1206 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1207 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1208 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1210 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1212 return 0;
1215 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1216 const struct mq_attr *attr)
1218 struct target_mq_attr *target_mq_attr;
1220 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1221 target_mq_attr_addr, 0))
1222 return -TARGET_EFAULT;
1224 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1225 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1226 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1227 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1229 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1231 return 0;
1233 #endif
1235 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1236 /* do_select() must return target values and target errnos. */
1237 static abi_long do_select(int n,
1238 abi_ulong rfd_addr, abi_ulong wfd_addr,
1239 abi_ulong efd_addr, abi_ulong target_tv_addr)
1241 fd_set rfds, wfds, efds;
1242 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1243 struct timeval tv;
1244 struct timespec ts, *ts_ptr;
1245 abi_long ret;
1247 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1248 if (ret) {
1249 return ret;
1251 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1252 if (ret) {
1253 return ret;
1255 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1256 if (ret) {
1257 return ret;
1260 if (target_tv_addr) {
1261 if (copy_from_user_timeval(&tv, target_tv_addr))
1262 return -TARGET_EFAULT;
1263 ts.tv_sec = tv.tv_sec;
1264 ts.tv_nsec = tv.tv_usec * 1000;
1265 ts_ptr = &ts;
1266 } else {
1267 ts_ptr = NULL;
1270 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1271 ts_ptr, NULL));
1273 if (!is_error(ret)) {
1274 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1275 return -TARGET_EFAULT;
1276 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1277 return -TARGET_EFAULT;
1278 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1279 return -TARGET_EFAULT;
1281 if (target_tv_addr) {
1282 tv.tv_sec = ts.tv_sec;
1283 tv.tv_usec = ts.tv_nsec / 1000;
1284 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1285 return -TARGET_EFAULT;
1290 return ret;
1293 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1294 static abi_long do_old_select(abi_ulong arg1)
1296 struct target_sel_arg_struct *sel;
1297 abi_ulong inp, outp, exp, tvp;
1298 long nsel;
1300 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1301 return -TARGET_EFAULT;
1304 nsel = tswapal(sel->n);
1305 inp = tswapal(sel->inp);
1306 outp = tswapal(sel->outp);
1307 exp = tswapal(sel->exp);
1308 tvp = tswapal(sel->tvp);
1310 unlock_user_struct(sel, arg1, 0);
1312 return do_select(nsel, inp, outp, exp, tvp);
1314 #endif
1315 #endif
1317 static abi_long do_pipe2(int host_pipe[], int flags)
1319 #ifdef CONFIG_PIPE2
1320 return pipe2(host_pipe, flags);
1321 #else
1322 return -ENOSYS;
1323 #endif
1326 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1327 int flags, int is_pipe2)
1329 int host_pipe[2];
1330 abi_long ret;
1331 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1333 if (is_error(ret))
1334 return get_errno(ret);
1336 /* Several targets have special calling conventions for the original
1337 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1338 if (!is_pipe2) {
1339 #if defined(TARGET_ALPHA)
1340 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1341 return host_pipe[0];
1342 #elif defined(TARGET_MIPS)
1343 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1344 return host_pipe[0];
1345 #elif defined(TARGET_SH4)
1346 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1347 return host_pipe[0];
1348 #elif defined(TARGET_SPARC)
1349 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1350 return host_pipe[0];
1351 #endif
1354 if (put_user_s32(host_pipe[0], pipedes)
1355 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1356 return -TARGET_EFAULT;
1357 return get_errno(ret);
1360 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1361 abi_ulong target_addr,
1362 socklen_t len)
1364 struct target_ip_mreqn *target_smreqn;
1366 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1367 if (!target_smreqn)
1368 return -TARGET_EFAULT;
1369 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1370 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1371 if (len == sizeof(struct target_ip_mreqn))
1372 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1373 unlock_user(target_smreqn, target_addr, 0);
1375 return 0;
1378 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1379 abi_ulong target_addr,
1380 socklen_t len)
1382 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1383 sa_family_t sa_family;
1384 struct target_sockaddr *target_saddr;
1386 if (fd_trans_target_to_host_addr(fd)) {
1387 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1390 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1391 if (!target_saddr)
1392 return -TARGET_EFAULT;
1394 sa_family = tswap16(target_saddr->sa_family);
1396 /* Oops. The caller might send a incomplete sun_path; sun_path
1397 * must be terminated by \0 (see the manual page), but
1398 * unfortunately it is quite common to specify sockaddr_un
1399 * length as "strlen(x->sun_path)" while it should be
1400 * "strlen(...) + 1". We'll fix that here if needed.
1401 * Linux kernel has a similar feature.
1404 if (sa_family == AF_UNIX) {
1405 if (len < unix_maxlen && len > 0) {
1406 char *cp = (char*)target_saddr;
1408 if ( cp[len-1] && !cp[len] )
1409 len++;
1411 if (len > unix_maxlen)
1412 len = unix_maxlen;
1415 memcpy(addr, target_saddr, len);
1416 addr->sa_family = sa_family;
1417 if (sa_family == AF_NETLINK) {
1418 struct sockaddr_nl *nladdr;
1420 nladdr = (struct sockaddr_nl *)addr;
1421 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1422 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1423 } else if (sa_family == AF_PACKET) {
1424 struct target_sockaddr_ll *lladdr;
1426 lladdr = (struct target_sockaddr_ll *)addr;
1427 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1428 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1430 unlock_user(target_saddr, target_addr, 0);
1432 return 0;
1435 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1436 struct sockaddr *addr,
1437 socklen_t len)
1439 struct target_sockaddr *target_saddr;
1441 if (len == 0) {
1442 return 0;
1444 assert(addr);
1446 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1447 if (!target_saddr)
1448 return -TARGET_EFAULT;
1449 memcpy(target_saddr, addr, len);
1450 if (len >= offsetof(struct target_sockaddr, sa_family) +
1451 sizeof(target_saddr->sa_family)) {
1452 target_saddr->sa_family = tswap16(addr->sa_family);
1454 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1455 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1456 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1457 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1458 } else if (addr->sa_family == AF_PACKET) {
1459 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1460 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1461 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1462 } else if (addr->sa_family == AF_INET6 &&
1463 len >= sizeof(struct target_sockaddr_in6)) {
1464 struct target_sockaddr_in6 *target_in6 =
1465 (struct target_sockaddr_in6 *)target_saddr;
1466 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1468 unlock_user(target_saddr, target_addr, len);
1470 return 0;
1473 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1474 struct target_msghdr *target_msgh)
1476 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1477 abi_long msg_controllen;
1478 abi_ulong target_cmsg_addr;
1479 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1480 socklen_t space = 0;
1482 msg_controllen = tswapal(target_msgh->msg_controllen);
1483 if (msg_controllen < sizeof (struct target_cmsghdr))
1484 goto the_end;
1485 target_cmsg_addr = tswapal(target_msgh->msg_control);
1486 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1487 target_cmsg_start = target_cmsg;
1488 if (!target_cmsg)
1489 return -TARGET_EFAULT;
1491 while (cmsg && target_cmsg) {
1492 void *data = CMSG_DATA(cmsg);
1493 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1495 int len = tswapal(target_cmsg->cmsg_len)
1496 - sizeof(struct target_cmsghdr);
1498 space += CMSG_SPACE(len);
1499 if (space > msgh->msg_controllen) {
1500 space -= CMSG_SPACE(len);
1501 /* This is a QEMU bug, since we allocated the payload
1502 * area ourselves (unlike overflow in host-to-target
1503 * conversion, which is just the guest giving us a buffer
1504 * that's too small). It can't happen for the payload types
1505 * we currently support; if it becomes an issue in future
1506 * we would need to improve our allocation strategy to
1507 * something more intelligent than "twice the size of the
1508 * target buffer we're reading from".
1510 gemu_log("Host cmsg overflow\n");
1511 break;
1514 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1515 cmsg->cmsg_level = SOL_SOCKET;
1516 } else {
1517 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1519 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1520 cmsg->cmsg_len = CMSG_LEN(len);
1522 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1523 int *fd = (int *)data;
1524 int *target_fd = (int *)target_data;
1525 int i, numfds = len / sizeof(int);
1527 for (i = 0; i < numfds; i++) {
1528 __get_user(fd[i], target_fd + i);
1530 } else if (cmsg->cmsg_level == SOL_SOCKET
1531 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1532 struct ucred *cred = (struct ucred *)data;
1533 struct target_ucred *target_cred =
1534 (struct target_ucred *)target_data;
1536 __get_user(cred->pid, &target_cred->pid);
1537 __get_user(cred->uid, &target_cred->uid);
1538 __get_user(cred->gid, &target_cred->gid);
1539 } else {
1540 gemu_log("Unsupported ancillary data: %d/%d\n",
1541 cmsg->cmsg_level, cmsg->cmsg_type);
1542 memcpy(data, target_data, len);
1545 cmsg = CMSG_NXTHDR(msgh, cmsg);
1546 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1547 target_cmsg_start);
1549 unlock_user(target_cmsg, target_cmsg_addr, 0);
1550 the_end:
1551 msgh->msg_controllen = space;
1552 return 0;
1555 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1556 struct msghdr *msgh)
1558 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1559 abi_long msg_controllen;
1560 abi_ulong target_cmsg_addr;
1561 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1562 socklen_t space = 0;
1564 msg_controllen = tswapal(target_msgh->msg_controllen);
1565 if (msg_controllen < sizeof (struct target_cmsghdr))
1566 goto the_end;
1567 target_cmsg_addr = tswapal(target_msgh->msg_control);
1568 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1569 target_cmsg_start = target_cmsg;
1570 if (!target_cmsg)
1571 return -TARGET_EFAULT;
1573 while (cmsg && target_cmsg) {
1574 void *data = CMSG_DATA(cmsg);
1575 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1577 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1578 int tgt_len, tgt_space;
1580 /* We never copy a half-header but may copy half-data;
1581 * this is Linux's behaviour in put_cmsg(). Note that
1582 * truncation here is a guest problem (which we report
1583 * to the guest via the CTRUNC bit), unlike truncation
1584 * in target_to_host_cmsg, which is a QEMU bug.
1586 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1587 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1588 break;
1591 if (cmsg->cmsg_level == SOL_SOCKET) {
1592 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1593 } else {
1594 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1596 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1598 /* Payload types which need a different size of payload on
1599 * the target must adjust tgt_len here.
1601 tgt_len = len;
1602 switch (cmsg->cmsg_level) {
1603 case SOL_SOCKET:
1604 switch (cmsg->cmsg_type) {
1605 case SO_TIMESTAMP:
1606 tgt_len = sizeof(struct target_timeval);
1607 break;
1608 default:
1609 break;
1611 break;
1612 default:
1613 break;
1616 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1617 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1618 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1621 /* We must now copy-and-convert len bytes of payload
1622 * into tgt_len bytes of destination space. Bear in mind
1623 * that in both source and destination we may be dealing
1624 * with a truncated value!
1626 switch (cmsg->cmsg_level) {
1627 case SOL_SOCKET:
1628 switch (cmsg->cmsg_type) {
1629 case SCM_RIGHTS:
1631 int *fd = (int *)data;
1632 int *target_fd = (int *)target_data;
1633 int i, numfds = tgt_len / sizeof(int);
1635 for (i = 0; i < numfds; i++) {
1636 __put_user(fd[i], target_fd + i);
1638 break;
1640 case SO_TIMESTAMP:
1642 struct timeval *tv = (struct timeval *)data;
1643 struct target_timeval *target_tv =
1644 (struct target_timeval *)target_data;
1646 if (len != sizeof(struct timeval) ||
1647 tgt_len != sizeof(struct target_timeval)) {
1648 goto unimplemented;
1651 /* copy struct timeval to target */
1652 __put_user(tv->tv_sec, &target_tv->tv_sec);
1653 __put_user(tv->tv_usec, &target_tv->tv_usec);
1654 break;
1656 case SCM_CREDENTIALS:
1658 struct ucred *cred = (struct ucred *)data;
1659 struct target_ucred *target_cred =
1660 (struct target_ucred *)target_data;
1662 __put_user(cred->pid, &target_cred->pid);
1663 __put_user(cred->uid, &target_cred->uid);
1664 __put_user(cred->gid, &target_cred->gid);
1665 break;
1667 default:
1668 goto unimplemented;
1670 break;
1672 case SOL_IP:
1673 switch (cmsg->cmsg_type) {
1674 case IP_TTL:
1676 uint32_t *v = (uint32_t *)data;
1677 uint32_t *t_int = (uint32_t *)target_data;
1679 if (len != sizeof(uint32_t) ||
1680 tgt_len != sizeof(uint32_t)) {
1681 goto unimplemented;
1683 __put_user(*v, t_int);
1684 break;
1686 case IP_RECVERR:
1688 struct errhdr_t {
1689 struct sock_extended_err ee;
1690 struct sockaddr_in offender;
1692 struct errhdr_t *errh = (struct errhdr_t *)data;
1693 struct errhdr_t *target_errh =
1694 (struct errhdr_t *)target_data;
1696 if (len != sizeof(struct errhdr_t) ||
1697 tgt_len != sizeof(struct errhdr_t)) {
1698 goto unimplemented;
1700 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1701 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1702 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1703 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1704 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1705 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1706 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1707 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1708 (void *) &errh->offender, sizeof(errh->offender));
1709 break;
1711 default:
1712 goto unimplemented;
1714 break;
1716 case SOL_IPV6:
1717 switch (cmsg->cmsg_type) {
1718 case IPV6_HOPLIMIT:
1720 uint32_t *v = (uint32_t *)data;
1721 uint32_t *t_int = (uint32_t *)target_data;
1723 if (len != sizeof(uint32_t) ||
1724 tgt_len != sizeof(uint32_t)) {
1725 goto unimplemented;
1727 __put_user(*v, t_int);
1728 break;
1730 case IPV6_RECVERR:
1732 struct errhdr6_t {
1733 struct sock_extended_err ee;
1734 struct sockaddr_in6 offender;
1736 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1737 struct errhdr6_t *target_errh =
1738 (struct errhdr6_t *)target_data;
1740 if (len != sizeof(struct errhdr6_t) ||
1741 tgt_len != sizeof(struct errhdr6_t)) {
1742 goto unimplemented;
1744 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1745 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1746 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1747 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1748 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1749 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1750 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1751 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1752 (void *) &errh->offender, sizeof(errh->offender));
1753 break;
1755 default:
1756 goto unimplemented;
1758 break;
1760 default:
1761 unimplemented:
1762 gemu_log("Unsupported ancillary data: %d/%d\n",
1763 cmsg->cmsg_level, cmsg->cmsg_type);
1764 memcpy(target_data, data, MIN(len, tgt_len));
1765 if (tgt_len > len) {
1766 memset(target_data + len, 0, tgt_len - len);
1770 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1771 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1772 if (msg_controllen < tgt_space) {
1773 tgt_space = msg_controllen;
1775 msg_controllen -= tgt_space;
1776 space += tgt_space;
1777 cmsg = CMSG_NXTHDR(msgh, cmsg);
1778 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1779 target_cmsg_start);
1781 unlock_user(target_cmsg, target_cmsg_addr, space);
1782 the_end:
1783 target_msgh->msg_controllen = tswapal(space);
1784 return 0;
1787 /* do_setsockopt() Must return target values and target errnos. */
1788 static abi_long do_setsockopt(int sockfd, int level, int optname,
1789 abi_ulong optval_addr, socklen_t optlen)
1791 abi_long ret;
1792 int val;
1793 struct ip_mreqn *ip_mreq;
1794 struct ip_mreq_source *ip_mreq_source;
1796 switch(level) {
1797 case SOL_TCP:
1798 /* TCP options all take an 'int' value. */
1799 if (optlen < sizeof(uint32_t))
1800 return -TARGET_EINVAL;
1802 if (get_user_u32(val, optval_addr))
1803 return -TARGET_EFAULT;
1804 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1805 break;
1806 case SOL_IP:
1807 switch(optname) {
1808 case IP_TOS:
1809 case IP_TTL:
1810 case IP_HDRINCL:
1811 case IP_ROUTER_ALERT:
1812 case IP_RECVOPTS:
1813 case IP_RETOPTS:
1814 case IP_PKTINFO:
1815 case IP_MTU_DISCOVER:
1816 case IP_RECVERR:
1817 case IP_RECVTTL:
1818 case IP_RECVTOS:
1819 #ifdef IP_FREEBIND
1820 case IP_FREEBIND:
1821 #endif
1822 case IP_MULTICAST_TTL:
1823 case IP_MULTICAST_LOOP:
1824 val = 0;
1825 if (optlen >= sizeof(uint32_t)) {
1826 if (get_user_u32(val, optval_addr))
1827 return -TARGET_EFAULT;
1828 } else if (optlen >= 1) {
1829 if (get_user_u8(val, optval_addr))
1830 return -TARGET_EFAULT;
1832 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1833 break;
1834 case IP_ADD_MEMBERSHIP:
1835 case IP_DROP_MEMBERSHIP:
1836 if (optlen < sizeof (struct target_ip_mreq) ||
1837 optlen > sizeof (struct target_ip_mreqn))
1838 return -TARGET_EINVAL;
1840 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1841 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1842 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1843 break;
1845 case IP_BLOCK_SOURCE:
1846 case IP_UNBLOCK_SOURCE:
1847 case IP_ADD_SOURCE_MEMBERSHIP:
1848 case IP_DROP_SOURCE_MEMBERSHIP:
1849 if (optlen != sizeof (struct target_ip_mreq_source))
1850 return -TARGET_EINVAL;
1852 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1853 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1854 unlock_user (ip_mreq_source, optval_addr, 0);
1855 break;
1857 default:
1858 goto unimplemented;
1860 break;
1861 case SOL_IPV6:
1862 switch (optname) {
1863 case IPV6_MTU_DISCOVER:
1864 case IPV6_MTU:
1865 case IPV6_V6ONLY:
1866 case IPV6_RECVPKTINFO:
1867 case IPV6_UNICAST_HOPS:
1868 case IPV6_MULTICAST_HOPS:
1869 case IPV6_MULTICAST_LOOP:
1870 case IPV6_RECVERR:
1871 case IPV6_RECVHOPLIMIT:
1872 case IPV6_2292HOPLIMIT:
1873 case IPV6_CHECKSUM:
1874 val = 0;
1875 if (optlen < sizeof(uint32_t)) {
1876 return -TARGET_EINVAL;
1878 if (get_user_u32(val, optval_addr)) {
1879 return -TARGET_EFAULT;
1881 ret = get_errno(setsockopt(sockfd, level, optname,
1882 &val, sizeof(val)));
1883 break;
1884 case IPV6_PKTINFO:
1886 struct in6_pktinfo pki;
1888 if (optlen < sizeof(pki)) {
1889 return -TARGET_EINVAL;
1892 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1893 return -TARGET_EFAULT;
1896 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1898 ret = get_errno(setsockopt(sockfd, level, optname,
1899 &pki, sizeof(pki)));
1900 break;
1902 default:
1903 goto unimplemented;
1905 break;
1906 case SOL_ICMPV6:
1907 switch (optname) {
1908 case ICMPV6_FILTER:
1910 struct icmp6_filter icmp6f;
1912 if (optlen > sizeof(icmp6f)) {
1913 optlen = sizeof(icmp6f);
1916 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
1917 return -TARGET_EFAULT;
1920 for (val = 0; val < 8; val++) {
1921 icmp6f.data[val] = tswap32(icmp6f.data[val]);
1924 ret = get_errno(setsockopt(sockfd, level, optname,
1925 &icmp6f, optlen));
1926 break;
1928 default:
1929 goto unimplemented;
1931 break;
1932 case SOL_RAW:
1933 switch (optname) {
1934 case ICMP_FILTER:
1935 case IPV6_CHECKSUM:
1936 /* those take an u32 value */
1937 if (optlen < sizeof(uint32_t)) {
1938 return -TARGET_EINVAL;
1941 if (get_user_u32(val, optval_addr)) {
1942 return -TARGET_EFAULT;
1944 ret = get_errno(setsockopt(sockfd, level, optname,
1945 &val, sizeof(val)));
1946 break;
1948 default:
1949 goto unimplemented;
1951 break;
1952 case TARGET_SOL_SOCKET:
1953 switch (optname) {
1954 case TARGET_SO_RCVTIMEO:
1956 struct timeval tv;
1958 optname = SO_RCVTIMEO;
1960 set_timeout:
1961 if (optlen != sizeof(struct target_timeval)) {
1962 return -TARGET_EINVAL;
1965 if (copy_from_user_timeval(&tv, optval_addr)) {
1966 return -TARGET_EFAULT;
1969 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
1970 &tv, sizeof(tv)));
1971 return ret;
1973 case TARGET_SO_SNDTIMEO:
1974 optname = SO_SNDTIMEO;
1975 goto set_timeout;
1976 case TARGET_SO_ATTACH_FILTER:
1978 struct target_sock_fprog *tfprog;
1979 struct target_sock_filter *tfilter;
1980 struct sock_fprog fprog;
1981 struct sock_filter *filter;
1982 int i;
1984 if (optlen != sizeof(*tfprog)) {
1985 return -TARGET_EINVAL;
1987 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
1988 return -TARGET_EFAULT;
1990 if (!lock_user_struct(VERIFY_READ, tfilter,
1991 tswapal(tfprog->filter), 0)) {
1992 unlock_user_struct(tfprog, optval_addr, 1);
1993 return -TARGET_EFAULT;
1996 fprog.len = tswap16(tfprog->len);
1997 filter = g_try_new(struct sock_filter, fprog.len);
1998 if (filter == NULL) {
1999 unlock_user_struct(tfilter, tfprog->filter, 1);
2000 unlock_user_struct(tfprog, optval_addr, 1);
2001 return -TARGET_ENOMEM;
2003 for (i = 0; i < fprog.len; i++) {
2004 filter[i].code = tswap16(tfilter[i].code);
2005 filter[i].jt = tfilter[i].jt;
2006 filter[i].jf = tfilter[i].jf;
2007 filter[i].k = tswap32(tfilter[i].k);
2009 fprog.filter = filter;
2011 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2012 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2013 g_free(filter);
2015 unlock_user_struct(tfilter, tfprog->filter, 1);
2016 unlock_user_struct(tfprog, optval_addr, 1);
2017 return ret;
2019 case TARGET_SO_BINDTODEVICE:
2021 char *dev_ifname, *addr_ifname;
2023 if (optlen > IFNAMSIZ - 1) {
2024 optlen = IFNAMSIZ - 1;
2026 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2027 if (!dev_ifname) {
2028 return -TARGET_EFAULT;
2030 optname = SO_BINDTODEVICE;
2031 addr_ifname = alloca(IFNAMSIZ);
2032 memcpy(addr_ifname, dev_ifname, optlen);
2033 addr_ifname[optlen] = 0;
2034 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2035 addr_ifname, optlen));
2036 unlock_user (dev_ifname, optval_addr, 0);
2037 return ret;
2039 case TARGET_SO_LINGER:
2041 struct linger lg;
2042 struct target_linger *tlg;
2044 if (optlen != sizeof(struct target_linger)) {
2045 return -TARGET_EINVAL;
2047 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2048 return -TARGET_EFAULT;
2050 __get_user(lg.l_onoff, &tlg->l_onoff);
2051 __get_user(lg.l_linger, &tlg->l_linger);
2052 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2053 &lg, sizeof(lg)));
2054 unlock_user_struct(tlg, optval_addr, 0);
2055 return ret;
2057 /* Options with 'int' argument. */
2058 case TARGET_SO_DEBUG:
2059 optname = SO_DEBUG;
2060 break;
2061 case TARGET_SO_REUSEADDR:
2062 optname = SO_REUSEADDR;
2063 break;
2064 #ifdef SO_REUSEPORT
2065 case TARGET_SO_REUSEPORT:
2066 optname = SO_REUSEPORT;
2067 break;
2068 #endif
2069 case TARGET_SO_TYPE:
2070 optname = SO_TYPE;
2071 break;
2072 case TARGET_SO_ERROR:
2073 optname = SO_ERROR;
2074 break;
2075 case TARGET_SO_DONTROUTE:
2076 optname = SO_DONTROUTE;
2077 break;
2078 case TARGET_SO_BROADCAST:
2079 optname = SO_BROADCAST;
2080 break;
2081 case TARGET_SO_SNDBUF:
2082 optname = SO_SNDBUF;
2083 break;
2084 case TARGET_SO_SNDBUFFORCE:
2085 optname = SO_SNDBUFFORCE;
2086 break;
2087 case TARGET_SO_RCVBUF:
2088 optname = SO_RCVBUF;
2089 break;
2090 case TARGET_SO_RCVBUFFORCE:
2091 optname = SO_RCVBUFFORCE;
2092 break;
2093 case TARGET_SO_KEEPALIVE:
2094 optname = SO_KEEPALIVE;
2095 break;
2096 case TARGET_SO_OOBINLINE:
2097 optname = SO_OOBINLINE;
2098 break;
2099 case TARGET_SO_NO_CHECK:
2100 optname = SO_NO_CHECK;
2101 break;
2102 case TARGET_SO_PRIORITY:
2103 optname = SO_PRIORITY;
2104 break;
2105 #ifdef SO_BSDCOMPAT
2106 case TARGET_SO_BSDCOMPAT:
2107 optname = SO_BSDCOMPAT;
2108 break;
2109 #endif
2110 case TARGET_SO_PASSCRED:
2111 optname = SO_PASSCRED;
2112 break;
2113 case TARGET_SO_PASSSEC:
2114 optname = SO_PASSSEC;
2115 break;
2116 case TARGET_SO_TIMESTAMP:
2117 optname = SO_TIMESTAMP;
2118 break;
2119 case TARGET_SO_RCVLOWAT:
2120 optname = SO_RCVLOWAT;
2121 break;
2122 default:
2123 goto unimplemented;
2125 if (optlen < sizeof(uint32_t))
2126 return -TARGET_EINVAL;
2128 if (get_user_u32(val, optval_addr))
2129 return -TARGET_EFAULT;
2130 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2131 break;
2132 default:
2133 unimplemented:
2134 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2135 ret = -TARGET_ENOPROTOOPT;
2137 return ret;
2140 /* do_getsockopt() Must return target values and target errnos. */
2141 static abi_long do_getsockopt(int sockfd, int level, int optname,
2142 abi_ulong optval_addr, abi_ulong optlen)
2144 abi_long ret;
2145 int len, val;
2146 socklen_t lv;
2148 switch(level) {
2149 case TARGET_SOL_SOCKET:
2150 level = SOL_SOCKET;
2151 switch (optname) {
2152 /* These don't just return a single integer */
2153 case TARGET_SO_RCVTIMEO:
2154 case TARGET_SO_SNDTIMEO:
2155 case TARGET_SO_PEERNAME:
2156 goto unimplemented;
2157 case TARGET_SO_PEERCRED: {
2158 struct ucred cr;
2159 socklen_t crlen;
2160 struct target_ucred *tcr;
2162 if (get_user_u32(len, optlen)) {
2163 return -TARGET_EFAULT;
2165 if (len < 0) {
2166 return -TARGET_EINVAL;
2169 crlen = sizeof(cr);
2170 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2171 &cr, &crlen));
2172 if (ret < 0) {
2173 return ret;
2175 if (len > crlen) {
2176 len = crlen;
2178 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2179 return -TARGET_EFAULT;
2181 __put_user(cr.pid, &tcr->pid);
2182 __put_user(cr.uid, &tcr->uid);
2183 __put_user(cr.gid, &tcr->gid);
2184 unlock_user_struct(tcr, optval_addr, 1);
2185 if (put_user_u32(len, optlen)) {
2186 return -TARGET_EFAULT;
2188 break;
2190 case TARGET_SO_LINGER:
2192 struct linger lg;
2193 socklen_t lglen;
2194 struct target_linger *tlg;
2196 if (get_user_u32(len, optlen)) {
2197 return -TARGET_EFAULT;
2199 if (len < 0) {
2200 return -TARGET_EINVAL;
2203 lglen = sizeof(lg);
2204 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2205 &lg, &lglen));
2206 if (ret < 0) {
2207 return ret;
2209 if (len > lglen) {
2210 len = lglen;
2212 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2213 return -TARGET_EFAULT;
2215 __put_user(lg.l_onoff, &tlg->l_onoff);
2216 __put_user(lg.l_linger, &tlg->l_linger);
2217 unlock_user_struct(tlg, optval_addr, 1);
2218 if (put_user_u32(len, optlen)) {
2219 return -TARGET_EFAULT;
2221 break;
2223 /* Options with 'int' argument. */
2224 case TARGET_SO_DEBUG:
2225 optname = SO_DEBUG;
2226 goto int_case;
2227 case TARGET_SO_REUSEADDR:
2228 optname = SO_REUSEADDR;
2229 goto int_case;
2230 #ifdef SO_REUSEPORT
2231 case TARGET_SO_REUSEPORT:
2232 optname = SO_REUSEPORT;
2233 goto int_case;
2234 #endif
2235 case TARGET_SO_TYPE:
2236 optname = SO_TYPE;
2237 goto int_case;
2238 case TARGET_SO_ERROR:
2239 optname = SO_ERROR;
2240 goto int_case;
2241 case TARGET_SO_DONTROUTE:
2242 optname = SO_DONTROUTE;
2243 goto int_case;
2244 case TARGET_SO_BROADCAST:
2245 optname = SO_BROADCAST;
2246 goto int_case;
2247 case TARGET_SO_SNDBUF:
2248 optname = SO_SNDBUF;
2249 goto int_case;
2250 case TARGET_SO_RCVBUF:
2251 optname = SO_RCVBUF;
2252 goto int_case;
2253 case TARGET_SO_KEEPALIVE:
2254 optname = SO_KEEPALIVE;
2255 goto int_case;
2256 case TARGET_SO_OOBINLINE:
2257 optname = SO_OOBINLINE;
2258 goto int_case;
2259 case TARGET_SO_NO_CHECK:
2260 optname = SO_NO_CHECK;
2261 goto int_case;
2262 case TARGET_SO_PRIORITY:
2263 optname = SO_PRIORITY;
2264 goto int_case;
2265 #ifdef SO_BSDCOMPAT
2266 case TARGET_SO_BSDCOMPAT:
2267 optname = SO_BSDCOMPAT;
2268 goto int_case;
2269 #endif
2270 case TARGET_SO_PASSCRED:
2271 optname = SO_PASSCRED;
2272 goto int_case;
2273 case TARGET_SO_TIMESTAMP:
2274 optname = SO_TIMESTAMP;
2275 goto int_case;
2276 case TARGET_SO_RCVLOWAT:
2277 optname = SO_RCVLOWAT;
2278 goto int_case;
2279 case TARGET_SO_ACCEPTCONN:
2280 optname = SO_ACCEPTCONN;
2281 goto int_case;
2282 default:
2283 goto int_case;
2285 break;
2286 case SOL_TCP:
2287 /* TCP options all take an 'int' value. */
2288 int_case:
2289 if (get_user_u32(len, optlen))
2290 return -TARGET_EFAULT;
2291 if (len < 0)
2292 return -TARGET_EINVAL;
2293 lv = sizeof(lv);
2294 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2295 if (ret < 0)
2296 return ret;
2297 if (optname == SO_TYPE) {
2298 val = host_to_target_sock_type(val);
2300 if (len > lv)
2301 len = lv;
2302 if (len == 4) {
2303 if (put_user_u32(val, optval_addr))
2304 return -TARGET_EFAULT;
2305 } else {
2306 if (put_user_u8(val, optval_addr))
2307 return -TARGET_EFAULT;
2309 if (put_user_u32(len, optlen))
2310 return -TARGET_EFAULT;
2311 break;
2312 case SOL_IP:
2313 switch(optname) {
2314 case IP_TOS:
2315 case IP_TTL:
2316 case IP_HDRINCL:
2317 case IP_ROUTER_ALERT:
2318 case IP_RECVOPTS:
2319 case IP_RETOPTS:
2320 case IP_PKTINFO:
2321 case IP_MTU_DISCOVER:
2322 case IP_RECVERR:
2323 case IP_RECVTOS:
2324 #ifdef IP_FREEBIND
2325 case IP_FREEBIND:
2326 #endif
2327 case IP_MULTICAST_TTL:
2328 case IP_MULTICAST_LOOP:
2329 if (get_user_u32(len, optlen))
2330 return -TARGET_EFAULT;
2331 if (len < 0)
2332 return -TARGET_EINVAL;
2333 lv = sizeof(lv);
2334 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2335 if (ret < 0)
2336 return ret;
2337 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2338 len = 1;
2339 if (put_user_u32(len, optlen)
2340 || put_user_u8(val, optval_addr))
2341 return -TARGET_EFAULT;
2342 } else {
2343 if (len > sizeof(int))
2344 len = sizeof(int);
2345 if (put_user_u32(len, optlen)
2346 || put_user_u32(val, optval_addr))
2347 return -TARGET_EFAULT;
2349 break;
2350 default:
2351 ret = -TARGET_ENOPROTOOPT;
2352 break;
2354 break;
2355 case SOL_IPV6:
2356 switch (optname) {
2357 case IPV6_MTU_DISCOVER:
2358 case IPV6_MTU:
2359 case IPV6_V6ONLY:
2360 case IPV6_RECVPKTINFO:
2361 case IPV6_UNICAST_HOPS:
2362 case IPV6_MULTICAST_HOPS:
2363 case IPV6_MULTICAST_LOOP:
2364 case IPV6_RECVERR:
2365 case IPV6_RECVHOPLIMIT:
2366 case IPV6_2292HOPLIMIT:
2367 case IPV6_CHECKSUM:
2368 if (get_user_u32(len, optlen))
2369 return -TARGET_EFAULT;
2370 if (len < 0)
2371 return -TARGET_EINVAL;
2372 lv = sizeof(lv);
2373 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2374 if (ret < 0)
2375 return ret;
2376 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2377 len = 1;
2378 if (put_user_u32(len, optlen)
2379 || put_user_u8(val, optval_addr))
2380 return -TARGET_EFAULT;
2381 } else {
2382 if (len > sizeof(int))
2383 len = sizeof(int);
2384 if (put_user_u32(len, optlen)
2385 || put_user_u32(val, optval_addr))
2386 return -TARGET_EFAULT;
2388 break;
2389 default:
2390 ret = -TARGET_ENOPROTOOPT;
2391 break;
2393 break;
2394 default:
2395 unimplemented:
2396 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2397 level, optname);
2398 ret = -TARGET_EOPNOTSUPP;
2399 break;
2401 return ret;
2404 /* Convert target low/high pair representing file offset into the host
2405 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2406 * as the kernel doesn't handle them either.
2408 static void target_to_host_low_high(abi_ulong tlow,
2409 abi_ulong thigh,
2410 unsigned long *hlow,
2411 unsigned long *hhigh)
2413 uint64_t off = tlow |
2414 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2415 TARGET_LONG_BITS / 2;
2417 *hlow = off;
2418 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2421 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2422 abi_ulong count, int copy)
2424 struct target_iovec *target_vec;
2425 struct iovec *vec;
2426 abi_ulong total_len, max_len;
2427 int i;
2428 int err = 0;
2429 bool bad_address = false;
2431 if (count == 0) {
2432 errno = 0;
2433 return NULL;
2435 if (count > IOV_MAX) {
2436 errno = EINVAL;
2437 return NULL;
2440 vec = g_try_new0(struct iovec, count);
2441 if (vec == NULL) {
2442 errno = ENOMEM;
2443 return NULL;
2446 target_vec = lock_user(VERIFY_READ, target_addr,
2447 count * sizeof(struct target_iovec), 1);
2448 if (target_vec == NULL) {
2449 err = EFAULT;
2450 goto fail2;
2453 /* ??? If host page size > target page size, this will result in a
2454 value larger than what we can actually support. */
2455 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2456 total_len = 0;
2458 for (i = 0; i < count; i++) {
2459 abi_ulong base = tswapal(target_vec[i].iov_base);
2460 abi_long len = tswapal(target_vec[i].iov_len);
2462 if (len < 0) {
2463 err = EINVAL;
2464 goto fail;
2465 } else if (len == 0) {
2466 /* Zero length pointer is ignored. */
2467 vec[i].iov_base = 0;
2468 } else {
2469 vec[i].iov_base = lock_user(type, base, len, copy);
2470 /* If the first buffer pointer is bad, this is a fault. But
2471 * subsequent bad buffers will result in a partial write; this
2472 * is realized by filling the vector with null pointers and
2473 * zero lengths. */
2474 if (!vec[i].iov_base) {
2475 if (i == 0) {
2476 err = EFAULT;
2477 goto fail;
2478 } else {
2479 bad_address = true;
2482 if (bad_address) {
2483 len = 0;
2485 if (len > max_len - total_len) {
2486 len = max_len - total_len;
2489 vec[i].iov_len = len;
2490 total_len += len;
2493 unlock_user(target_vec, target_addr, 0);
2494 return vec;
2496 fail:
2497 while (--i >= 0) {
2498 if (tswapal(target_vec[i].iov_len) > 0) {
2499 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2502 unlock_user(target_vec, target_addr, 0);
2503 fail2:
2504 g_free(vec);
2505 errno = err;
2506 return NULL;
2509 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2510 abi_ulong count, int copy)
2512 struct target_iovec *target_vec;
2513 int i;
2515 target_vec = lock_user(VERIFY_READ, target_addr,
2516 count * sizeof(struct target_iovec), 1);
2517 if (target_vec) {
2518 for (i = 0; i < count; i++) {
2519 abi_ulong base = tswapal(target_vec[i].iov_base);
2520 abi_long len = tswapal(target_vec[i].iov_len);
2521 if (len < 0) {
2522 break;
2524 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2526 unlock_user(target_vec, target_addr, 0);
2529 g_free(vec);
2532 static inline int target_to_host_sock_type(int *type)
2534 int host_type = 0;
2535 int target_type = *type;
2537 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2538 case TARGET_SOCK_DGRAM:
2539 host_type = SOCK_DGRAM;
2540 break;
2541 case TARGET_SOCK_STREAM:
2542 host_type = SOCK_STREAM;
2543 break;
2544 default:
2545 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2546 break;
2548 if (target_type & TARGET_SOCK_CLOEXEC) {
2549 #if defined(SOCK_CLOEXEC)
2550 host_type |= SOCK_CLOEXEC;
2551 #else
2552 return -TARGET_EINVAL;
2553 #endif
2555 if (target_type & TARGET_SOCK_NONBLOCK) {
2556 #if defined(SOCK_NONBLOCK)
2557 host_type |= SOCK_NONBLOCK;
2558 #elif !defined(O_NONBLOCK)
2559 return -TARGET_EINVAL;
2560 #endif
2562 *type = host_type;
2563 return 0;
2566 /* Try to emulate socket type flags after socket creation. */
2567 static int sock_flags_fixup(int fd, int target_type)
2569 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2570 if (target_type & TARGET_SOCK_NONBLOCK) {
2571 int flags = fcntl(fd, F_GETFL);
2572 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2573 close(fd);
2574 return -TARGET_EINVAL;
2577 #endif
2578 return fd;
2581 /* do_socket() Must return target values and target errnos. */
2582 static abi_long do_socket(int domain, int type, int protocol)
2584 int target_type = type;
2585 int ret;
2587 ret = target_to_host_sock_type(&type);
2588 if (ret) {
2589 return ret;
2592 if (domain == PF_NETLINK && !(
2593 #ifdef CONFIG_RTNETLINK
2594 protocol == NETLINK_ROUTE ||
2595 #endif
2596 protocol == NETLINK_KOBJECT_UEVENT ||
2597 protocol == NETLINK_AUDIT)) {
2598 return -EPFNOSUPPORT;
2601 if (domain == AF_PACKET ||
2602 (domain == AF_INET && type == SOCK_PACKET)) {
2603 protocol = tswap16(protocol);
2606 ret = get_errno(socket(domain, type, protocol));
2607 if (ret >= 0) {
2608 ret = sock_flags_fixup(ret, target_type);
2609 if (type == SOCK_PACKET) {
2610 /* Manage an obsolete case :
2611 * if socket type is SOCK_PACKET, bind by name
2613 fd_trans_register(ret, &target_packet_trans);
2614 } else if (domain == PF_NETLINK) {
2615 switch (protocol) {
2616 #ifdef CONFIG_RTNETLINK
2617 case NETLINK_ROUTE:
2618 fd_trans_register(ret, &target_netlink_route_trans);
2619 break;
2620 #endif
2621 case NETLINK_KOBJECT_UEVENT:
2622 /* nothing to do: messages are strings */
2623 break;
2624 case NETLINK_AUDIT:
2625 fd_trans_register(ret, &target_netlink_audit_trans);
2626 break;
2627 default:
2628 g_assert_not_reached();
2632 return ret;
2635 /* do_bind() Must return target values and target errnos. */
2636 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2637 socklen_t addrlen)
2639 void *addr;
2640 abi_long ret;
2642 if ((int)addrlen < 0) {
2643 return -TARGET_EINVAL;
2646 addr = alloca(addrlen+1);
2648 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2649 if (ret)
2650 return ret;
2652 return get_errno(bind(sockfd, addr, addrlen));
2655 /* do_connect() Must return target values and target errnos. */
2656 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2657 socklen_t addrlen)
2659 void *addr;
2660 abi_long ret;
2662 if ((int)addrlen < 0) {
2663 return -TARGET_EINVAL;
2666 addr = alloca(addrlen+1);
2668 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2669 if (ret)
2670 return ret;
2672 return get_errno(safe_connect(sockfd, addr, addrlen));
2675 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2676 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2677 int flags, int send)
2679 abi_long ret, len;
2680 struct msghdr msg;
2681 abi_ulong count;
2682 struct iovec *vec;
2683 abi_ulong target_vec;
2685 if (msgp->msg_name) {
2686 msg.msg_namelen = tswap32(msgp->msg_namelen);
2687 msg.msg_name = alloca(msg.msg_namelen+1);
2688 ret = target_to_host_sockaddr(fd, msg.msg_name,
2689 tswapal(msgp->msg_name),
2690 msg.msg_namelen);
2691 if (ret == -TARGET_EFAULT) {
2692 /* For connected sockets msg_name and msg_namelen must
2693 * be ignored, so returning EFAULT immediately is wrong.
2694 * Instead, pass a bad msg_name to the host kernel, and
2695 * let it decide whether to return EFAULT or not.
2697 msg.msg_name = (void *)-1;
2698 } else if (ret) {
2699 goto out2;
2701 } else {
2702 msg.msg_name = NULL;
2703 msg.msg_namelen = 0;
2705 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
2706 msg.msg_control = alloca(msg.msg_controllen);
2707 memset(msg.msg_control, 0, msg.msg_controllen);
2709 msg.msg_flags = tswap32(msgp->msg_flags);
2711 count = tswapal(msgp->msg_iovlen);
2712 target_vec = tswapal(msgp->msg_iov);
2714 if (count > IOV_MAX) {
2715 /* sendrcvmsg returns a different errno for this condition than
2716 * readv/writev, so we must catch it here before lock_iovec() does.
2718 ret = -TARGET_EMSGSIZE;
2719 goto out2;
2722 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
2723 target_vec, count, send);
2724 if (vec == NULL) {
2725 ret = -host_to_target_errno(errno);
2726 goto out2;
2728 msg.msg_iovlen = count;
2729 msg.msg_iov = vec;
2731 if (send) {
2732 if (fd_trans_target_to_host_data(fd)) {
2733 void *host_msg;
2735 host_msg = g_malloc(msg.msg_iov->iov_len);
2736 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
2737 ret = fd_trans_target_to_host_data(fd)(host_msg,
2738 msg.msg_iov->iov_len);
2739 if (ret >= 0) {
2740 msg.msg_iov->iov_base = host_msg;
2741 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2743 g_free(host_msg);
2744 } else {
2745 ret = target_to_host_cmsg(&msg, msgp);
2746 if (ret == 0) {
2747 ret = get_errno(safe_sendmsg(fd, &msg, flags));
2750 } else {
2751 ret = get_errno(safe_recvmsg(fd, &msg, flags));
2752 if (!is_error(ret)) {
2753 len = ret;
2754 if (fd_trans_host_to_target_data(fd)) {
2755 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
2756 MIN(msg.msg_iov->iov_len, len));
2757 } else {
2758 ret = host_to_target_cmsg(msgp, &msg);
2760 if (!is_error(ret)) {
2761 msgp->msg_namelen = tswap32(msg.msg_namelen);
2762 msgp->msg_flags = tswap32(msg.msg_flags);
2763 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
2764 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
2765 msg.msg_name, msg.msg_namelen);
2766 if (ret) {
2767 goto out;
2771 ret = len;
2776 out:
2777 unlock_iovec(vec, target_vec, count, !send);
2778 out2:
2779 return ret;
2782 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
2783 int flags, int send)
2785 abi_long ret;
2786 struct target_msghdr *msgp;
2788 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
2789 msgp,
2790 target_msg,
2791 send ? 1 : 0)) {
2792 return -TARGET_EFAULT;
2794 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
2795 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
2796 return ret;
2799 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
2800 * so it might not have this *mmsg-specific flag either.
2802 #ifndef MSG_WAITFORONE
2803 #define MSG_WAITFORONE 0x10000
2804 #endif
2806 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
2807 unsigned int vlen, unsigned int flags,
2808 int send)
2810 struct target_mmsghdr *mmsgp;
2811 abi_long ret = 0;
2812 int i;
2814 if (vlen > UIO_MAXIOV) {
2815 vlen = UIO_MAXIOV;
2818 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
2819 if (!mmsgp) {
2820 return -TARGET_EFAULT;
2823 for (i = 0; i < vlen; i++) {
2824 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
2825 if (is_error(ret)) {
2826 break;
2828 mmsgp[i].msg_len = tswap32(ret);
2829 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
2830 if (flags & MSG_WAITFORONE) {
2831 flags |= MSG_DONTWAIT;
2835 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
2837 /* Return number of datagrams sent if we sent any at all;
2838 * otherwise return the error.
2840 if (i) {
2841 return i;
2843 return ret;
2846 /* do_accept4() Must return target values and target errnos. */
2847 static abi_long do_accept4(int fd, abi_ulong target_addr,
2848 abi_ulong target_addrlen_addr, int flags)
2850 socklen_t addrlen, ret_addrlen;
2851 void *addr;
2852 abi_long ret;
2853 int host_flags;
2855 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
2857 if (target_addr == 0) {
2858 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
2861 /* linux returns EINVAL if addrlen pointer is invalid */
2862 if (get_user_u32(addrlen, target_addrlen_addr))
2863 return -TARGET_EINVAL;
2865 if ((int)addrlen < 0) {
2866 return -TARGET_EINVAL;
2869 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2870 return -TARGET_EINVAL;
2872 addr = alloca(addrlen);
2874 ret_addrlen = addrlen;
2875 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
2876 if (!is_error(ret)) {
2877 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2878 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2879 ret = -TARGET_EFAULT;
2882 return ret;
2885 /* do_getpeername() Must return target values and target errnos. */
2886 static abi_long do_getpeername(int fd, abi_ulong target_addr,
2887 abi_ulong target_addrlen_addr)
2889 socklen_t addrlen, ret_addrlen;
2890 void *addr;
2891 abi_long ret;
2893 if (get_user_u32(addrlen, target_addrlen_addr))
2894 return -TARGET_EFAULT;
2896 if ((int)addrlen < 0) {
2897 return -TARGET_EINVAL;
2900 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2901 return -TARGET_EFAULT;
2903 addr = alloca(addrlen);
2905 ret_addrlen = addrlen;
2906 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
2907 if (!is_error(ret)) {
2908 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2909 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2910 ret = -TARGET_EFAULT;
2913 return ret;
2916 /* do_getsockname() Must return target values and target errnos. */
2917 static abi_long do_getsockname(int fd, abi_ulong target_addr,
2918 abi_ulong target_addrlen_addr)
2920 socklen_t addrlen, ret_addrlen;
2921 void *addr;
2922 abi_long ret;
2924 if (get_user_u32(addrlen, target_addrlen_addr))
2925 return -TARGET_EFAULT;
2927 if ((int)addrlen < 0) {
2928 return -TARGET_EINVAL;
2931 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
2932 return -TARGET_EFAULT;
2934 addr = alloca(addrlen);
2936 ret_addrlen = addrlen;
2937 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
2938 if (!is_error(ret)) {
2939 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
2940 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
2941 ret = -TARGET_EFAULT;
2944 return ret;
2947 /* do_socketpair() Must return target values and target errnos. */
2948 static abi_long do_socketpair(int domain, int type, int protocol,
2949 abi_ulong target_tab_addr)
2951 int tab[2];
2952 abi_long ret;
2954 target_to_host_sock_type(&type);
2956 ret = get_errno(socketpair(domain, type, protocol, tab));
2957 if (!is_error(ret)) {
2958 if (put_user_s32(tab[0], target_tab_addr)
2959 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
2960 ret = -TARGET_EFAULT;
2962 return ret;
2965 /* do_sendto() Must return target values and target errnos. */
2966 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
2967 abi_ulong target_addr, socklen_t addrlen)
2969 void *addr;
2970 void *host_msg;
2971 void *copy_msg = NULL;
2972 abi_long ret;
2974 if ((int)addrlen < 0) {
2975 return -TARGET_EINVAL;
2978 host_msg = lock_user(VERIFY_READ, msg, len, 1);
2979 if (!host_msg)
2980 return -TARGET_EFAULT;
2981 if (fd_trans_target_to_host_data(fd)) {
2982 copy_msg = host_msg;
2983 host_msg = g_malloc(len);
2984 memcpy(host_msg, copy_msg, len);
2985 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
2986 if (ret < 0) {
2987 goto fail;
2990 if (target_addr) {
2991 addr = alloca(addrlen+1);
2992 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
2993 if (ret) {
2994 goto fail;
2996 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
2997 } else {
2998 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3000 fail:
3001 if (copy_msg) {
3002 g_free(host_msg);
3003 host_msg = copy_msg;
3005 unlock_user(host_msg, msg, 0);
3006 return ret;
3009 /* do_recvfrom() Must return target values and target errnos. */
3010 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3011 abi_ulong target_addr,
3012 abi_ulong target_addrlen)
3014 socklen_t addrlen, ret_addrlen;
3015 void *addr;
3016 void *host_msg;
3017 abi_long ret;
3019 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3020 if (!host_msg)
3021 return -TARGET_EFAULT;
3022 if (target_addr) {
3023 if (get_user_u32(addrlen, target_addrlen)) {
3024 ret = -TARGET_EFAULT;
3025 goto fail;
3027 if ((int)addrlen < 0) {
3028 ret = -TARGET_EINVAL;
3029 goto fail;
3031 addr = alloca(addrlen);
3032 ret_addrlen = addrlen;
3033 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3034 addr, &ret_addrlen));
3035 } else {
3036 addr = NULL; /* To keep compiler quiet. */
3037 addrlen = 0; /* To keep compiler quiet. */
3038 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3040 if (!is_error(ret)) {
3041 if (fd_trans_host_to_target_data(fd)) {
3042 abi_long trans;
3043 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3044 if (is_error(trans)) {
3045 ret = trans;
3046 goto fail;
3049 if (target_addr) {
3050 host_to_target_sockaddr(target_addr, addr,
3051 MIN(addrlen, ret_addrlen));
3052 if (put_user_u32(ret_addrlen, target_addrlen)) {
3053 ret = -TARGET_EFAULT;
3054 goto fail;
3057 unlock_user(host_msg, msg, len);
3058 } else {
3059 fail:
3060 unlock_user(host_msg, msg, 0);
3062 return ret;
3065 #ifdef TARGET_NR_socketcall
3066 /* do_socketcall() must return target values and target errnos. */
3067 static abi_long do_socketcall(int num, abi_ulong vptr)
3069 static const unsigned nargs[] = { /* number of arguments per operation */
3070 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3071 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3072 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3073 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3074 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3075 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3076 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3077 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3078 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3079 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3080 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3081 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3082 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3083 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3084 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3085 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3086 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3087 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3088 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3089 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3091 abi_long a[6]; /* max 6 args */
3092 unsigned i;
3094 /* check the range of the first argument num */
3095 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3096 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3097 return -TARGET_EINVAL;
3099 /* ensure we have space for args */
3100 if (nargs[num] > ARRAY_SIZE(a)) {
3101 return -TARGET_EINVAL;
3103 /* collect the arguments in a[] according to nargs[] */
3104 for (i = 0; i < nargs[num]; ++i) {
3105 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3106 return -TARGET_EFAULT;
3109 /* now when we have the args, invoke the appropriate underlying function */
3110 switch (num) {
3111 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3112 return do_socket(a[0], a[1], a[2]);
3113 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3114 return do_bind(a[0], a[1], a[2]);
3115 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3116 return do_connect(a[0], a[1], a[2]);
3117 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3118 return get_errno(listen(a[0], a[1]));
3119 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3120 return do_accept4(a[0], a[1], a[2], 0);
3121 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3122 return do_getsockname(a[0], a[1], a[2]);
3123 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3124 return do_getpeername(a[0], a[1], a[2]);
3125 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3126 return do_socketpair(a[0], a[1], a[2], a[3]);
3127 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3128 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3129 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3130 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3131 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3132 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3133 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3134 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3135 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3136 return get_errno(shutdown(a[0], a[1]));
3137 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3138 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3139 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3140 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3141 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3142 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3143 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3144 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3145 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3146 return do_accept4(a[0], a[1], a[2], a[3]);
3147 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3148 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3149 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3150 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3151 default:
3152 gemu_log("Unsupported socketcall: %d\n", num);
3153 return -TARGET_EINVAL;
3156 #endif
3158 #define N_SHM_REGIONS 32
3160 static struct shm_region {
3161 abi_ulong start;
3162 abi_ulong size;
3163 bool in_use;
3164 } shm_regions[N_SHM_REGIONS];
3166 #ifndef TARGET_SEMID64_DS
3167 /* asm-generic version of this struct */
3168 struct target_semid64_ds
3170 struct target_ipc_perm sem_perm;
3171 abi_ulong sem_otime;
3172 #if TARGET_ABI_BITS == 32
3173 abi_ulong __unused1;
3174 #endif
3175 abi_ulong sem_ctime;
3176 #if TARGET_ABI_BITS == 32
3177 abi_ulong __unused2;
3178 #endif
3179 abi_ulong sem_nsems;
3180 abi_ulong __unused3;
3181 abi_ulong __unused4;
3183 #endif
3185 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3186 abi_ulong target_addr)
3188 struct target_ipc_perm *target_ip;
3189 struct target_semid64_ds *target_sd;
3191 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3192 return -TARGET_EFAULT;
3193 target_ip = &(target_sd->sem_perm);
3194 host_ip->__key = tswap32(target_ip->__key);
3195 host_ip->uid = tswap32(target_ip->uid);
3196 host_ip->gid = tswap32(target_ip->gid);
3197 host_ip->cuid = tswap32(target_ip->cuid);
3198 host_ip->cgid = tswap32(target_ip->cgid);
3199 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3200 host_ip->mode = tswap32(target_ip->mode);
3201 #else
3202 host_ip->mode = tswap16(target_ip->mode);
3203 #endif
3204 #if defined(TARGET_PPC)
3205 host_ip->__seq = tswap32(target_ip->__seq);
3206 #else
3207 host_ip->__seq = tswap16(target_ip->__seq);
3208 #endif
3209 unlock_user_struct(target_sd, target_addr, 0);
3210 return 0;
3213 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3214 struct ipc_perm *host_ip)
3216 struct target_ipc_perm *target_ip;
3217 struct target_semid64_ds *target_sd;
3219 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3220 return -TARGET_EFAULT;
3221 target_ip = &(target_sd->sem_perm);
3222 target_ip->__key = tswap32(host_ip->__key);
3223 target_ip->uid = tswap32(host_ip->uid);
3224 target_ip->gid = tswap32(host_ip->gid);
3225 target_ip->cuid = tswap32(host_ip->cuid);
3226 target_ip->cgid = tswap32(host_ip->cgid);
3227 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3228 target_ip->mode = tswap32(host_ip->mode);
3229 #else
3230 target_ip->mode = tswap16(host_ip->mode);
3231 #endif
3232 #if defined(TARGET_PPC)
3233 target_ip->__seq = tswap32(host_ip->__seq);
3234 #else
3235 target_ip->__seq = tswap16(host_ip->__seq);
3236 #endif
3237 unlock_user_struct(target_sd, target_addr, 1);
3238 return 0;
3241 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3242 abi_ulong target_addr)
3244 struct target_semid64_ds *target_sd;
3246 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3247 return -TARGET_EFAULT;
3248 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3249 return -TARGET_EFAULT;
3250 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3251 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3252 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3253 unlock_user_struct(target_sd, target_addr, 0);
3254 return 0;
3257 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3258 struct semid_ds *host_sd)
3260 struct target_semid64_ds *target_sd;
3262 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3263 return -TARGET_EFAULT;
3264 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3265 return -TARGET_EFAULT;
3266 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3267 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3268 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3269 unlock_user_struct(target_sd, target_addr, 1);
3270 return 0;
3273 struct target_seminfo {
3274 int semmap;
3275 int semmni;
3276 int semmns;
3277 int semmnu;
3278 int semmsl;
3279 int semopm;
3280 int semume;
3281 int semusz;
3282 int semvmx;
3283 int semaem;
3286 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3287 struct seminfo *host_seminfo)
3289 struct target_seminfo *target_seminfo;
3290 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3291 return -TARGET_EFAULT;
3292 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3293 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3294 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3295 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3296 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3297 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3298 __put_user(host_seminfo->semume, &target_seminfo->semume);
3299 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3300 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3301 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3302 unlock_user_struct(target_seminfo, target_addr, 1);
3303 return 0;
3306 union semun {
3307 int val;
3308 struct semid_ds *buf;
3309 unsigned short *array;
3310 struct seminfo *__buf;
3313 union target_semun {
3314 int val;
3315 abi_ulong buf;
3316 abi_ulong array;
3317 abi_ulong __buf;
3320 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3321 abi_ulong target_addr)
3323 int nsems;
3324 unsigned short *array;
3325 union semun semun;
3326 struct semid_ds semid_ds;
3327 int i, ret;
3329 semun.buf = &semid_ds;
3331 ret = semctl(semid, 0, IPC_STAT, semun);
3332 if (ret == -1)
3333 return get_errno(ret);
3335 nsems = semid_ds.sem_nsems;
3337 *host_array = g_try_new(unsigned short, nsems);
3338 if (!*host_array) {
3339 return -TARGET_ENOMEM;
3341 array = lock_user(VERIFY_READ, target_addr,
3342 nsems*sizeof(unsigned short), 1);
3343 if (!array) {
3344 g_free(*host_array);
3345 return -TARGET_EFAULT;
3348 for(i=0; i<nsems; i++) {
3349 __get_user((*host_array)[i], &array[i]);
3351 unlock_user(array, target_addr, 0);
3353 return 0;
3356 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3357 unsigned short **host_array)
3359 int nsems;
3360 unsigned short *array;
3361 union semun semun;
3362 struct semid_ds semid_ds;
3363 int i, ret;
3365 semun.buf = &semid_ds;
3367 ret = semctl(semid, 0, IPC_STAT, semun);
3368 if (ret == -1)
3369 return get_errno(ret);
3371 nsems = semid_ds.sem_nsems;
3373 array = lock_user(VERIFY_WRITE, target_addr,
3374 nsems*sizeof(unsigned short), 0);
3375 if (!array)
3376 return -TARGET_EFAULT;
3378 for(i=0; i<nsems; i++) {
3379 __put_user((*host_array)[i], &array[i]);
3381 g_free(*host_array);
3382 unlock_user(array, target_addr, 1);
3384 return 0;
3387 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3388 abi_ulong target_arg)
3390 union target_semun target_su = { .buf = target_arg };
3391 union semun arg;
3392 struct semid_ds dsarg;
3393 unsigned short *array = NULL;
3394 struct seminfo seminfo;
3395 abi_long ret = -TARGET_EINVAL;
3396 abi_long err;
3397 cmd &= 0xff;
3399 switch( cmd ) {
3400 case GETVAL:
3401 case SETVAL:
3402 /* In 64 bit cross-endian situations, we will erroneously pick up
3403 * the wrong half of the union for the "val" element. To rectify
3404 * this, the entire 8-byte structure is byteswapped, followed by
3405 * a swap of the 4 byte val field. In other cases, the data is
3406 * already in proper host byte order. */
3407 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3408 target_su.buf = tswapal(target_su.buf);
3409 arg.val = tswap32(target_su.val);
3410 } else {
3411 arg.val = target_su.val;
3413 ret = get_errno(semctl(semid, semnum, cmd, arg));
3414 break;
3415 case GETALL:
3416 case SETALL:
3417 err = target_to_host_semarray(semid, &array, target_su.array);
3418 if (err)
3419 return err;
3420 arg.array = array;
3421 ret = get_errno(semctl(semid, semnum, cmd, arg));
3422 err = host_to_target_semarray(semid, target_su.array, &array);
3423 if (err)
3424 return err;
3425 break;
3426 case IPC_STAT:
3427 case IPC_SET:
3428 case SEM_STAT:
3429 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3430 if (err)
3431 return err;
3432 arg.buf = &dsarg;
3433 ret = get_errno(semctl(semid, semnum, cmd, arg));
3434 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3435 if (err)
3436 return err;
3437 break;
3438 case IPC_INFO:
3439 case SEM_INFO:
3440 arg.__buf = &seminfo;
3441 ret = get_errno(semctl(semid, semnum, cmd, arg));
3442 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3443 if (err)
3444 return err;
3445 break;
3446 case IPC_RMID:
3447 case GETPID:
3448 case GETNCNT:
3449 case GETZCNT:
3450 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3451 break;
3454 return ret;
3457 struct target_sembuf {
3458 unsigned short sem_num;
3459 short sem_op;
3460 short sem_flg;
3463 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3464 abi_ulong target_addr,
3465 unsigned nsops)
3467 struct target_sembuf *target_sembuf;
3468 int i;
3470 target_sembuf = lock_user(VERIFY_READ, target_addr,
3471 nsops*sizeof(struct target_sembuf), 1);
3472 if (!target_sembuf)
3473 return -TARGET_EFAULT;
3475 for(i=0; i<nsops; i++) {
3476 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3477 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3478 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3481 unlock_user(target_sembuf, target_addr, 0);
3483 return 0;
3486 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3488 struct sembuf sops[nsops];
3490 if (target_to_host_sembuf(sops, ptr, nsops))
3491 return -TARGET_EFAULT;
3493 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3496 struct target_msqid_ds
3498 struct target_ipc_perm msg_perm;
3499 abi_ulong msg_stime;
3500 #if TARGET_ABI_BITS == 32
3501 abi_ulong __unused1;
3502 #endif
3503 abi_ulong msg_rtime;
3504 #if TARGET_ABI_BITS == 32
3505 abi_ulong __unused2;
3506 #endif
3507 abi_ulong msg_ctime;
3508 #if TARGET_ABI_BITS == 32
3509 abi_ulong __unused3;
3510 #endif
3511 abi_ulong __msg_cbytes;
3512 abi_ulong msg_qnum;
3513 abi_ulong msg_qbytes;
3514 abi_ulong msg_lspid;
3515 abi_ulong msg_lrpid;
3516 abi_ulong __unused4;
3517 abi_ulong __unused5;
3520 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3521 abi_ulong target_addr)
3523 struct target_msqid_ds *target_md;
3525 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3526 return -TARGET_EFAULT;
3527 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3528 return -TARGET_EFAULT;
3529 host_md->msg_stime = tswapal(target_md->msg_stime);
3530 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3531 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3532 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3533 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3534 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3535 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3536 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3537 unlock_user_struct(target_md, target_addr, 0);
3538 return 0;
3541 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3542 struct msqid_ds *host_md)
3544 struct target_msqid_ds *target_md;
3546 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3547 return -TARGET_EFAULT;
3548 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3549 return -TARGET_EFAULT;
3550 target_md->msg_stime = tswapal(host_md->msg_stime);
3551 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3552 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3553 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3554 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3555 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3556 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3557 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3558 unlock_user_struct(target_md, target_addr, 1);
3559 return 0;
3562 struct target_msginfo {
3563 int msgpool;
3564 int msgmap;
3565 int msgmax;
3566 int msgmnb;
3567 int msgmni;
3568 int msgssz;
3569 int msgtql;
3570 unsigned short int msgseg;
3573 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3574 struct msginfo *host_msginfo)
3576 struct target_msginfo *target_msginfo;
3577 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3578 return -TARGET_EFAULT;
3579 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3580 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3581 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3582 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3583 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3584 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3585 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3586 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3587 unlock_user_struct(target_msginfo, target_addr, 1);
3588 return 0;
3591 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3593 struct msqid_ds dsarg;
3594 struct msginfo msginfo;
3595 abi_long ret = -TARGET_EINVAL;
3597 cmd &= 0xff;
3599 switch (cmd) {
3600 case IPC_STAT:
3601 case IPC_SET:
3602 case MSG_STAT:
3603 if (target_to_host_msqid_ds(&dsarg,ptr))
3604 return -TARGET_EFAULT;
3605 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3606 if (host_to_target_msqid_ds(ptr,&dsarg))
3607 return -TARGET_EFAULT;
3608 break;
3609 case IPC_RMID:
3610 ret = get_errno(msgctl(msgid, cmd, NULL));
3611 break;
3612 case IPC_INFO:
3613 case MSG_INFO:
3614 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3615 if (host_to_target_msginfo(ptr, &msginfo))
3616 return -TARGET_EFAULT;
3617 break;
3620 return ret;
3623 struct target_msgbuf {
3624 abi_long mtype;
3625 char mtext[1];
3628 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3629 ssize_t msgsz, int msgflg)
3631 struct target_msgbuf *target_mb;
3632 struct msgbuf *host_mb;
3633 abi_long ret = 0;
3635 if (msgsz < 0) {
3636 return -TARGET_EINVAL;
3639 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3640 return -TARGET_EFAULT;
3641 host_mb = g_try_malloc(msgsz + sizeof(long));
3642 if (!host_mb) {
3643 unlock_user_struct(target_mb, msgp, 0);
3644 return -TARGET_ENOMEM;
3646 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3647 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3648 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3649 g_free(host_mb);
3650 unlock_user_struct(target_mb, msgp, 0);
3652 return ret;
3655 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3656 ssize_t msgsz, abi_long msgtyp,
3657 int msgflg)
3659 struct target_msgbuf *target_mb;
3660 char *target_mtext;
3661 struct msgbuf *host_mb;
3662 abi_long ret = 0;
3664 if (msgsz < 0) {
3665 return -TARGET_EINVAL;
3668 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3669 return -TARGET_EFAULT;
3671 host_mb = g_try_malloc(msgsz + sizeof(long));
3672 if (!host_mb) {
3673 ret = -TARGET_ENOMEM;
3674 goto end;
3676 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
3678 if (ret > 0) {
3679 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
3680 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
3681 if (!target_mtext) {
3682 ret = -TARGET_EFAULT;
3683 goto end;
3685 memcpy(target_mb->mtext, host_mb->mtext, ret);
3686 unlock_user(target_mtext, target_mtext_addr, ret);
3689 target_mb->mtype = tswapal(host_mb->mtype);
3691 end:
3692 if (target_mb)
3693 unlock_user_struct(target_mb, msgp, 1);
3694 g_free(host_mb);
3695 return ret;
3698 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
3699 abi_ulong target_addr)
3701 struct target_shmid_ds *target_sd;
3703 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3704 return -TARGET_EFAULT;
3705 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
3706 return -TARGET_EFAULT;
3707 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3708 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
3709 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3710 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3711 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3712 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3713 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3714 unlock_user_struct(target_sd, target_addr, 0);
3715 return 0;
3718 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
3719 struct shmid_ds *host_sd)
3721 struct target_shmid_ds *target_sd;
3723 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3724 return -TARGET_EFAULT;
3725 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
3726 return -TARGET_EFAULT;
3727 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
3728 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
3729 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
3730 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
3731 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
3732 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
3733 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
3734 unlock_user_struct(target_sd, target_addr, 1);
3735 return 0;
3738 struct target_shminfo {
3739 abi_ulong shmmax;
3740 abi_ulong shmmin;
3741 abi_ulong shmmni;
3742 abi_ulong shmseg;
3743 abi_ulong shmall;
3746 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
3747 struct shminfo *host_shminfo)
3749 struct target_shminfo *target_shminfo;
3750 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
3751 return -TARGET_EFAULT;
3752 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
3753 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
3754 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
3755 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
3756 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
3757 unlock_user_struct(target_shminfo, target_addr, 1);
3758 return 0;
3761 struct target_shm_info {
3762 int used_ids;
3763 abi_ulong shm_tot;
3764 abi_ulong shm_rss;
3765 abi_ulong shm_swp;
3766 abi_ulong swap_attempts;
3767 abi_ulong swap_successes;
3770 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
3771 struct shm_info *host_shm_info)
3773 struct target_shm_info *target_shm_info;
3774 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
3775 return -TARGET_EFAULT;
3776 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
3777 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
3778 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
3779 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
3780 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
3781 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
3782 unlock_user_struct(target_shm_info, target_addr, 1);
3783 return 0;
3786 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
3788 struct shmid_ds dsarg;
3789 struct shminfo shminfo;
3790 struct shm_info shm_info;
3791 abi_long ret = -TARGET_EINVAL;
3793 cmd &= 0xff;
3795 switch(cmd) {
3796 case IPC_STAT:
3797 case IPC_SET:
3798 case SHM_STAT:
3799 if (target_to_host_shmid_ds(&dsarg, buf))
3800 return -TARGET_EFAULT;
3801 ret = get_errno(shmctl(shmid, cmd, &dsarg));
3802 if (host_to_target_shmid_ds(buf, &dsarg))
3803 return -TARGET_EFAULT;
3804 break;
3805 case IPC_INFO:
3806 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
3807 if (host_to_target_shminfo(buf, &shminfo))
3808 return -TARGET_EFAULT;
3809 break;
3810 case SHM_INFO:
3811 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
3812 if (host_to_target_shm_info(buf, &shm_info))
3813 return -TARGET_EFAULT;
3814 break;
3815 case IPC_RMID:
3816 case SHM_LOCK:
3817 case SHM_UNLOCK:
3818 ret = get_errno(shmctl(shmid, cmd, NULL));
3819 break;
3822 return ret;
3825 #ifndef TARGET_FORCE_SHMLBA
3826 /* For most architectures, SHMLBA is the same as the page size;
3827 * some architectures have larger values, in which case they should
3828 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
3829 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
3830 * and defining its own value for SHMLBA.
3832 * The kernel also permits SHMLBA to be set by the architecture to a
3833 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
3834 * this means that addresses are rounded to the large size if
3835 * SHM_RND is set but addresses not aligned to that size are not rejected
3836 * as long as they are at least page-aligned. Since the only architecture
3837 * which uses this is ia64 this code doesn't provide for that oddity.
3839 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
3841 return TARGET_PAGE_SIZE;
3843 #endif
3845 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
3846 int shmid, abi_ulong shmaddr, int shmflg)
3848 abi_long raddr;
3849 void *host_raddr;
3850 struct shmid_ds shm_info;
3851 int i,ret;
3852 abi_ulong shmlba;
3854 /* find out the length of the shared memory segment */
3855 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
3856 if (is_error(ret)) {
3857 /* can't get length, bail out */
3858 return ret;
3861 shmlba = target_shmlba(cpu_env);
3863 if (shmaddr & (shmlba - 1)) {
3864 if (shmflg & SHM_RND) {
3865 shmaddr &= ~(shmlba - 1);
3866 } else {
3867 return -TARGET_EINVAL;
3870 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
3871 return -TARGET_EINVAL;
3874 mmap_lock();
3876 if (shmaddr)
3877 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
3878 else {
3879 abi_ulong mmap_start;
3881 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
3883 if (mmap_start == -1) {
3884 errno = ENOMEM;
3885 host_raddr = (void *)-1;
3886 } else
3887 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
3890 if (host_raddr == (void *)-1) {
3891 mmap_unlock();
3892 return get_errno((long)host_raddr);
3894 raddr=h2g((unsigned long)host_raddr);
3896 page_set_flags(raddr, raddr + shm_info.shm_segsz,
3897 PAGE_VALID | PAGE_READ |
3898 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
3900 for (i = 0; i < N_SHM_REGIONS; i++) {
3901 if (!shm_regions[i].in_use) {
3902 shm_regions[i].in_use = true;
3903 shm_regions[i].start = raddr;
3904 shm_regions[i].size = shm_info.shm_segsz;
3905 break;
3909 mmap_unlock();
3910 return raddr;
3914 static inline abi_long do_shmdt(abi_ulong shmaddr)
3916 int i;
3917 abi_long rv;
3919 mmap_lock();
3921 for (i = 0; i < N_SHM_REGIONS; ++i) {
3922 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
3923 shm_regions[i].in_use = false;
3924 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3925 break;
3928 rv = get_errno(shmdt(g2h(shmaddr)));
3930 mmap_unlock();
3932 return rv;
3935 #ifdef TARGET_NR_ipc
3936 /* ??? This only works with linear mappings. */
3937 /* do_ipc() must return target values and target errnos. */
3938 static abi_long do_ipc(CPUArchState *cpu_env,
3939 unsigned int call, abi_long first,
3940 abi_long second, abi_long third,
3941 abi_long ptr, abi_long fifth)
3943 int version;
3944 abi_long ret = 0;
3946 version = call >> 16;
3947 call &= 0xffff;
3949 switch (call) {
3950 case IPCOP_semop:
3951 ret = do_semop(first, ptr, second);
3952 break;
3954 case IPCOP_semget:
3955 ret = get_errno(semget(first, second, third));
3956 break;
3958 case IPCOP_semctl: {
3959 /* The semun argument to semctl is passed by value, so dereference the
3960 * ptr argument. */
3961 abi_ulong atptr;
3962 get_user_ual(atptr, ptr);
3963 ret = do_semctl(first, second, third, atptr);
3964 break;
3967 case IPCOP_msgget:
3968 ret = get_errno(msgget(first, second));
3969 break;
3971 case IPCOP_msgsnd:
3972 ret = do_msgsnd(first, ptr, second, third);
3973 break;
3975 case IPCOP_msgctl:
3976 ret = do_msgctl(first, second, ptr);
3977 break;
3979 case IPCOP_msgrcv:
3980 switch (version) {
3981 case 0:
3983 struct target_ipc_kludge {
3984 abi_long msgp;
3985 abi_long msgtyp;
3986 } *tmp;
3988 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3989 ret = -TARGET_EFAULT;
3990 break;
3993 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
3995 unlock_user_struct(tmp, ptr, 0);
3996 break;
3998 default:
3999 ret = do_msgrcv(first, ptr, second, fifth, third);
4001 break;
4003 case IPCOP_shmat:
4004 switch (version) {
4005 default:
4007 abi_ulong raddr;
4008 raddr = do_shmat(cpu_env, first, ptr, second);
4009 if (is_error(raddr))
4010 return get_errno(raddr);
4011 if (put_user_ual(raddr, third))
4012 return -TARGET_EFAULT;
4013 break;
4015 case 1:
4016 ret = -TARGET_EINVAL;
4017 break;
4019 break;
4020 case IPCOP_shmdt:
4021 ret = do_shmdt(ptr);
4022 break;
4024 case IPCOP_shmget:
4025 /* IPC_* flag values are the same on all linux platforms */
4026 ret = get_errno(shmget(first, second, third));
4027 break;
4029 /* IPC_* and SHM_* command values are the same on all linux platforms */
4030 case IPCOP_shmctl:
4031 ret = do_shmctl(first, second, ptr);
4032 break;
4033 default:
4034 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4035 ret = -TARGET_ENOSYS;
4036 break;
4038 return ret;
4040 #endif
4042 /* kernel structure types definitions */
4044 #define STRUCT(name, ...) STRUCT_ ## name,
4045 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4046 enum {
4047 #include "syscall_types.h"
4048 STRUCT_MAX
4050 #undef STRUCT
4051 #undef STRUCT_SPECIAL
4053 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4054 #define STRUCT_SPECIAL(name)
4055 #include "syscall_types.h"
4056 #undef STRUCT
4057 #undef STRUCT_SPECIAL
4059 typedef struct IOCTLEntry IOCTLEntry;
4061 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4062 int fd, int cmd, abi_long arg);
4064 struct IOCTLEntry {
4065 int target_cmd;
4066 unsigned int host_cmd;
4067 const char *name;
4068 int access;
4069 do_ioctl_fn *do_ioctl;
4070 const argtype arg_type[5];
4073 #define IOC_R 0x0001
4074 #define IOC_W 0x0002
4075 #define IOC_RW (IOC_R | IOC_W)
4077 #define MAX_STRUCT_SIZE 4096
4079 #ifdef CONFIG_FIEMAP
4080 /* So fiemap access checks don't overflow on 32 bit systems.
4081 * This is very slightly smaller than the limit imposed by
4082 * the underlying kernel.
4084 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4085 / sizeof(struct fiemap_extent))
4087 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4088 int fd, int cmd, abi_long arg)
4090 /* The parameter for this ioctl is a struct fiemap followed
4091 * by an array of struct fiemap_extent whose size is set
4092 * in fiemap->fm_extent_count. The array is filled in by the
4093 * ioctl.
4095 int target_size_in, target_size_out;
4096 struct fiemap *fm;
4097 const argtype *arg_type = ie->arg_type;
4098 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4099 void *argptr, *p;
4100 abi_long ret;
4101 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4102 uint32_t outbufsz;
4103 int free_fm = 0;
4105 assert(arg_type[0] == TYPE_PTR);
4106 assert(ie->access == IOC_RW);
4107 arg_type++;
4108 target_size_in = thunk_type_size(arg_type, 0);
4109 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4110 if (!argptr) {
4111 return -TARGET_EFAULT;
4113 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4114 unlock_user(argptr, arg, 0);
4115 fm = (struct fiemap *)buf_temp;
4116 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4117 return -TARGET_EINVAL;
4120 outbufsz = sizeof (*fm) +
4121 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4123 if (outbufsz > MAX_STRUCT_SIZE) {
4124 /* We can't fit all the extents into the fixed size buffer.
4125 * Allocate one that is large enough and use it instead.
4127 fm = g_try_malloc(outbufsz);
4128 if (!fm) {
4129 return -TARGET_ENOMEM;
4131 memcpy(fm, buf_temp, sizeof(struct fiemap));
4132 free_fm = 1;
4134 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4135 if (!is_error(ret)) {
4136 target_size_out = target_size_in;
4137 /* An extent_count of 0 means we were only counting the extents
4138 * so there are no structs to copy
4140 if (fm->fm_extent_count != 0) {
4141 target_size_out += fm->fm_mapped_extents * extent_size;
4143 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4144 if (!argptr) {
4145 ret = -TARGET_EFAULT;
4146 } else {
4147 /* Convert the struct fiemap */
4148 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4149 if (fm->fm_extent_count != 0) {
4150 p = argptr + target_size_in;
4151 /* ...and then all the struct fiemap_extents */
4152 for (i = 0; i < fm->fm_mapped_extents; i++) {
4153 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4154 THUNK_TARGET);
4155 p += extent_size;
4158 unlock_user(argptr, arg, target_size_out);
4161 if (free_fm) {
4162 g_free(fm);
4164 return ret;
4166 #endif
4168 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4169 int fd, int cmd, abi_long arg)
4171 const argtype *arg_type = ie->arg_type;
4172 int target_size;
4173 void *argptr;
4174 int ret;
4175 struct ifconf *host_ifconf;
4176 uint32_t outbufsz;
4177 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4178 int target_ifreq_size;
4179 int nb_ifreq;
4180 int free_buf = 0;
4181 int i;
4182 int target_ifc_len;
4183 abi_long target_ifc_buf;
4184 int host_ifc_len;
4185 char *host_ifc_buf;
4187 assert(arg_type[0] == TYPE_PTR);
4188 assert(ie->access == IOC_RW);
4190 arg_type++;
4191 target_size = thunk_type_size(arg_type, 0);
4193 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4194 if (!argptr)
4195 return -TARGET_EFAULT;
4196 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4197 unlock_user(argptr, arg, 0);
4199 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4200 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4201 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4203 if (target_ifc_buf != 0) {
4204 target_ifc_len = host_ifconf->ifc_len;
4205 nb_ifreq = target_ifc_len / target_ifreq_size;
4206 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4208 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4209 if (outbufsz > MAX_STRUCT_SIZE) {
4211 * We can't fit all the extents into the fixed size buffer.
4212 * Allocate one that is large enough and use it instead.
4214 host_ifconf = malloc(outbufsz);
4215 if (!host_ifconf) {
4216 return -TARGET_ENOMEM;
4218 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4219 free_buf = 1;
4221 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4223 host_ifconf->ifc_len = host_ifc_len;
4224 } else {
4225 host_ifc_buf = NULL;
4227 host_ifconf->ifc_buf = host_ifc_buf;
4229 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4230 if (!is_error(ret)) {
4231 /* convert host ifc_len to target ifc_len */
4233 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4234 target_ifc_len = nb_ifreq * target_ifreq_size;
4235 host_ifconf->ifc_len = target_ifc_len;
4237 /* restore target ifc_buf */
4239 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4241 /* copy struct ifconf to target user */
4243 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4244 if (!argptr)
4245 return -TARGET_EFAULT;
4246 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4247 unlock_user(argptr, arg, target_size);
4249 if (target_ifc_buf != 0) {
4250 /* copy ifreq[] to target user */
4251 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4252 for (i = 0; i < nb_ifreq ; i++) {
4253 thunk_convert(argptr + i * target_ifreq_size,
4254 host_ifc_buf + i * sizeof(struct ifreq),
4255 ifreq_arg_type, THUNK_TARGET);
4257 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4261 if (free_buf) {
4262 free(host_ifconf);
4265 return ret;
4268 #if defined(CONFIG_USBFS)
4269 #if HOST_LONG_BITS > 64
4270 #error USBDEVFS thunks do not support >64 bit hosts yet.
4271 #endif
4272 struct live_urb {
4273 uint64_t target_urb_adr;
4274 uint64_t target_buf_adr;
4275 char *target_buf_ptr;
4276 struct usbdevfs_urb host_urb;
4279 static GHashTable *usbdevfs_urb_hashtable(void)
4281 static GHashTable *urb_hashtable;
4283 if (!urb_hashtable) {
4284 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4286 return urb_hashtable;
4289 static void urb_hashtable_insert(struct live_urb *urb)
4291 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4292 g_hash_table_insert(urb_hashtable, urb, urb);
4295 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4297 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4298 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4301 static void urb_hashtable_remove(struct live_urb *urb)
4303 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4304 g_hash_table_remove(urb_hashtable, urb);
4307 static abi_long
4308 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4309 int fd, int cmd, abi_long arg)
4311 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4312 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4313 struct live_urb *lurb;
4314 void *argptr;
4315 uint64_t hurb;
4316 int target_size;
4317 uintptr_t target_urb_adr;
4318 abi_long ret;
4320 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4322 memset(buf_temp, 0, sizeof(uint64_t));
4323 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4324 if (is_error(ret)) {
4325 return ret;
4328 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4329 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4330 if (!lurb->target_urb_adr) {
4331 return -TARGET_EFAULT;
4333 urb_hashtable_remove(lurb);
4334 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4335 lurb->host_urb.buffer_length);
4336 lurb->target_buf_ptr = NULL;
4338 /* restore the guest buffer pointer */
4339 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4341 /* update the guest urb struct */
4342 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4343 if (!argptr) {
4344 g_free(lurb);
4345 return -TARGET_EFAULT;
4347 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4348 unlock_user(argptr, lurb->target_urb_adr, target_size);
4350 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4351 /* write back the urb handle */
4352 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4353 if (!argptr) {
4354 g_free(lurb);
4355 return -TARGET_EFAULT;
4358 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4359 target_urb_adr = lurb->target_urb_adr;
4360 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4361 unlock_user(argptr, arg, target_size);
4363 g_free(lurb);
4364 return ret;
4367 static abi_long
4368 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4369 uint8_t *buf_temp __attribute__((unused)),
4370 int fd, int cmd, abi_long arg)
4372 struct live_urb *lurb;
4374 /* map target address back to host URB with metadata. */
4375 lurb = urb_hashtable_lookup(arg);
4376 if (!lurb) {
4377 return -TARGET_EFAULT;
4379 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4382 static abi_long
4383 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4384 int fd, int cmd, abi_long arg)
4386 const argtype *arg_type = ie->arg_type;
4387 int target_size;
4388 abi_long ret;
4389 void *argptr;
4390 int rw_dir;
4391 struct live_urb *lurb;
4394 * each submitted URB needs to map to a unique ID for the
4395 * kernel, and that unique ID needs to be a pointer to
4396 * host memory. hence, we need to malloc for each URB.
4397 * isochronous transfers have a variable length struct.
4399 arg_type++;
4400 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4402 /* construct host copy of urb and metadata */
4403 lurb = g_try_malloc0(sizeof(struct live_urb));
4404 if (!lurb) {
4405 return -TARGET_ENOMEM;
4408 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4409 if (!argptr) {
4410 g_free(lurb);
4411 return -TARGET_EFAULT;
4413 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4414 unlock_user(argptr, arg, 0);
4416 lurb->target_urb_adr = arg;
4417 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4419 /* buffer space used depends on endpoint type so lock the entire buffer */
4420 /* control type urbs should check the buffer contents for true direction */
4421 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4422 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4423 lurb->host_urb.buffer_length, 1);
4424 if (lurb->target_buf_ptr == NULL) {
4425 g_free(lurb);
4426 return -TARGET_EFAULT;
4429 /* update buffer pointer in host copy */
4430 lurb->host_urb.buffer = lurb->target_buf_ptr;
4432 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4433 if (is_error(ret)) {
4434 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4435 g_free(lurb);
4436 } else {
4437 urb_hashtable_insert(lurb);
4440 return ret;
4442 #endif /* CONFIG_USBFS */
4444 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4445 int cmd, abi_long arg)
4447 void *argptr;
4448 struct dm_ioctl *host_dm;
4449 abi_long guest_data;
4450 uint32_t guest_data_size;
4451 int target_size;
4452 const argtype *arg_type = ie->arg_type;
4453 abi_long ret;
4454 void *big_buf = NULL;
4455 char *host_data;
4457 arg_type++;
4458 target_size = thunk_type_size(arg_type, 0);
4459 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4460 if (!argptr) {
4461 ret = -TARGET_EFAULT;
4462 goto out;
4464 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4465 unlock_user(argptr, arg, 0);
4467 /* buf_temp is too small, so fetch things into a bigger buffer */
4468 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4469 memcpy(big_buf, buf_temp, target_size);
4470 buf_temp = big_buf;
4471 host_dm = big_buf;
4473 guest_data = arg + host_dm->data_start;
4474 if ((guest_data - arg) < 0) {
4475 ret = -TARGET_EINVAL;
4476 goto out;
4478 guest_data_size = host_dm->data_size - host_dm->data_start;
4479 host_data = (char*)host_dm + host_dm->data_start;
4481 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4482 if (!argptr) {
4483 ret = -TARGET_EFAULT;
4484 goto out;
4487 switch (ie->host_cmd) {
4488 case DM_REMOVE_ALL:
4489 case DM_LIST_DEVICES:
4490 case DM_DEV_CREATE:
4491 case DM_DEV_REMOVE:
4492 case DM_DEV_SUSPEND:
4493 case DM_DEV_STATUS:
4494 case DM_DEV_WAIT:
4495 case DM_TABLE_STATUS:
4496 case DM_TABLE_CLEAR:
4497 case DM_TABLE_DEPS:
4498 case DM_LIST_VERSIONS:
4499 /* no input data */
4500 break;
4501 case DM_DEV_RENAME:
4502 case DM_DEV_SET_GEOMETRY:
4503 /* data contains only strings */
4504 memcpy(host_data, argptr, guest_data_size);
4505 break;
4506 case DM_TARGET_MSG:
4507 memcpy(host_data, argptr, guest_data_size);
4508 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4509 break;
4510 case DM_TABLE_LOAD:
4512 void *gspec = argptr;
4513 void *cur_data = host_data;
4514 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4515 int spec_size = thunk_type_size(arg_type, 0);
4516 int i;
4518 for (i = 0; i < host_dm->target_count; i++) {
4519 struct dm_target_spec *spec = cur_data;
4520 uint32_t next;
4521 int slen;
4523 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4524 slen = strlen((char*)gspec + spec_size) + 1;
4525 next = spec->next;
4526 spec->next = sizeof(*spec) + slen;
4527 strcpy((char*)&spec[1], gspec + spec_size);
4528 gspec += next;
4529 cur_data += spec->next;
4531 break;
4533 default:
4534 ret = -TARGET_EINVAL;
4535 unlock_user(argptr, guest_data, 0);
4536 goto out;
4538 unlock_user(argptr, guest_data, 0);
4540 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4541 if (!is_error(ret)) {
4542 guest_data = arg + host_dm->data_start;
4543 guest_data_size = host_dm->data_size - host_dm->data_start;
4544 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4545 switch (ie->host_cmd) {
4546 case DM_REMOVE_ALL:
4547 case DM_DEV_CREATE:
4548 case DM_DEV_REMOVE:
4549 case DM_DEV_RENAME:
4550 case DM_DEV_SUSPEND:
4551 case DM_DEV_STATUS:
4552 case DM_TABLE_LOAD:
4553 case DM_TABLE_CLEAR:
4554 case DM_TARGET_MSG:
4555 case DM_DEV_SET_GEOMETRY:
4556 /* no return data */
4557 break;
4558 case DM_LIST_DEVICES:
4560 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4561 uint32_t remaining_data = guest_data_size;
4562 void *cur_data = argptr;
4563 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4564 int nl_size = 12; /* can't use thunk_size due to alignment */
4566 while (1) {
4567 uint32_t next = nl->next;
4568 if (next) {
4569 nl->next = nl_size + (strlen(nl->name) + 1);
4571 if (remaining_data < nl->next) {
4572 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4573 break;
4575 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4576 strcpy(cur_data + nl_size, nl->name);
4577 cur_data += nl->next;
4578 remaining_data -= nl->next;
4579 if (!next) {
4580 break;
4582 nl = (void*)nl + next;
4584 break;
4586 case DM_DEV_WAIT:
4587 case DM_TABLE_STATUS:
4589 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4590 void *cur_data = argptr;
4591 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4592 int spec_size = thunk_type_size(arg_type, 0);
4593 int i;
4595 for (i = 0; i < host_dm->target_count; i++) {
4596 uint32_t next = spec->next;
4597 int slen = strlen((char*)&spec[1]) + 1;
4598 spec->next = (cur_data - argptr) + spec_size + slen;
4599 if (guest_data_size < spec->next) {
4600 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4601 break;
4603 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4604 strcpy(cur_data + spec_size, (char*)&spec[1]);
4605 cur_data = argptr + spec->next;
4606 spec = (void*)host_dm + host_dm->data_start + next;
4608 break;
4610 case DM_TABLE_DEPS:
4612 void *hdata = (void*)host_dm + host_dm->data_start;
4613 int count = *(uint32_t*)hdata;
4614 uint64_t *hdev = hdata + 8;
4615 uint64_t *gdev = argptr + 8;
4616 int i;
4618 *(uint32_t*)argptr = tswap32(count);
4619 for (i = 0; i < count; i++) {
4620 *gdev = tswap64(*hdev);
4621 gdev++;
4622 hdev++;
4624 break;
4626 case DM_LIST_VERSIONS:
4628 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4629 uint32_t remaining_data = guest_data_size;
4630 void *cur_data = argptr;
4631 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4632 int vers_size = thunk_type_size(arg_type, 0);
4634 while (1) {
4635 uint32_t next = vers->next;
4636 if (next) {
4637 vers->next = vers_size + (strlen(vers->name) + 1);
4639 if (remaining_data < vers->next) {
4640 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4641 break;
4643 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4644 strcpy(cur_data + vers_size, vers->name);
4645 cur_data += vers->next;
4646 remaining_data -= vers->next;
4647 if (!next) {
4648 break;
4650 vers = (void*)vers + next;
4652 break;
4654 default:
4655 unlock_user(argptr, guest_data, 0);
4656 ret = -TARGET_EINVAL;
4657 goto out;
4659 unlock_user(argptr, guest_data, guest_data_size);
4661 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4662 if (!argptr) {
4663 ret = -TARGET_EFAULT;
4664 goto out;
4666 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4667 unlock_user(argptr, arg, target_size);
4669 out:
4670 g_free(big_buf);
4671 return ret;
4674 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4675 int cmd, abi_long arg)
4677 void *argptr;
4678 int target_size;
4679 const argtype *arg_type = ie->arg_type;
4680 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
4681 abi_long ret;
4683 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
4684 struct blkpg_partition host_part;
4686 /* Read and convert blkpg */
4687 arg_type++;
4688 target_size = thunk_type_size(arg_type, 0);
4689 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4690 if (!argptr) {
4691 ret = -TARGET_EFAULT;
4692 goto out;
4694 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4695 unlock_user(argptr, arg, 0);
4697 switch (host_blkpg->op) {
4698 case BLKPG_ADD_PARTITION:
4699 case BLKPG_DEL_PARTITION:
4700 /* payload is struct blkpg_partition */
4701 break;
4702 default:
4703 /* Unknown opcode */
4704 ret = -TARGET_EINVAL;
4705 goto out;
4708 /* Read and convert blkpg->data */
4709 arg = (abi_long)(uintptr_t)host_blkpg->data;
4710 target_size = thunk_type_size(part_arg_type, 0);
4711 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4712 if (!argptr) {
4713 ret = -TARGET_EFAULT;
4714 goto out;
4716 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
4717 unlock_user(argptr, arg, 0);
4719 /* Swizzle the data pointer to our local copy and call! */
4720 host_blkpg->data = &host_part;
4721 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
4723 out:
4724 return ret;
4727 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
4728 int fd, int cmd, abi_long arg)
4730 const argtype *arg_type = ie->arg_type;
4731 const StructEntry *se;
4732 const argtype *field_types;
4733 const int *dst_offsets, *src_offsets;
4734 int target_size;
4735 void *argptr;
4736 abi_ulong *target_rt_dev_ptr;
4737 unsigned long *host_rt_dev_ptr;
4738 abi_long ret;
4739 int i;
4741 assert(ie->access == IOC_W);
4742 assert(*arg_type == TYPE_PTR);
4743 arg_type++;
4744 assert(*arg_type == TYPE_STRUCT);
4745 target_size = thunk_type_size(arg_type, 0);
4746 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4747 if (!argptr) {
4748 return -TARGET_EFAULT;
4750 arg_type++;
4751 assert(*arg_type == (int)STRUCT_rtentry);
4752 se = struct_entries + *arg_type++;
4753 assert(se->convert[0] == NULL);
4754 /* convert struct here to be able to catch rt_dev string */
4755 field_types = se->field_types;
4756 dst_offsets = se->field_offsets[THUNK_HOST];
4757 src_offsets = se->field_offsets[THUNK_TARGET];
4758 for (i = 0; i < se->nb_fields; i++) {
4759 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
4760 assert(*field_types == TYPE_PTRVOID);
4761 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
4762 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
4763 if (*target_rt_dev_ptr != 0) {
4764 *host_rt_dev_ptr = (unsigned long)lock_user_string(
4765 tswapal(*target_rt_dev_ptr));
4766 if (!*host_rt_dev_ptr) {
4767 unlock_user(argptr, arg, 0);
4768 return -TARGET_EFAULT;
4770 } else {
4771 *host_rt_dev_ptr = 0;
4773 field_types++;
4774 continue;
4776 field_types = thunk_convert(buf_temp + dst_offsets[i],
4777 argptr + src_offsets[i],
4778 field_types, THUNK_HOST);
4780 unlock_user(argptr, arg, 0);
4782 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4783 if (*host_rt_dev_ptr != 0) {
4784 unlock_user((void *)*host_rt_dev_ptr,
4785 *target_rt_dev_ptr, 0);
4787 return ret;
4790 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
4791 int fd, int cmd, abi_long arg)
4793 int sig = target_to_host_signal(arg);
4794 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
4797 #ifdef TIOCGPTPEER
4798 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
4799 int fd, int cmd, abi_long arg)
4801 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
4802 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
4804 #endif
4806 static IOCTLEntry ioctl_entries[] = {
4807 #define IOCTL(cmd, access, ...) \
4808 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
4809 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
4810 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
4811 #define IOCTL_IGNORE(cmd) \
4812 { TARGET_ ## cmd, 0, #cmd },
4813 #include "ioctls.h"
4814 { 0, 0, },
4817 /* ??? Implement proper locking for ioctls. */
4818 /* do_ioctl() Must return target values and target errnos. */
4819 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
4821 const IOCTLEntry *ie;
4822 const argtype *arg_type;
4823 abi_long ret;
4824 uint8_t buf_temp[MAX_STRUCT_SIZE];
4825 int target_size;
4826 void *argptr;
4828 ie = ioctl_entries;
4829 for(;;) {
4830 if (ie->target_cmd == 0) {
4831 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
4832 return -TARGET_ENOSYS;
4834 if (ie->target_cmd == cmd)
4835 break;
4836 ie++;
4838 arg_type = ie->arg_type;
4839 if (ie->do_ioctl) {
4840 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
4841 } else if (!ie->host_cmd) {
4842 /* Some architectures define BSD ioctls in their headers
4843 that are not implemented in Linux. */
4844 return -TARGET_ENOSYS;
4847 switch(arg_type[0]) {
4848 case TYPE_NULL:
4849 /* no argument */
4850 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
4851 break;
4852 case TYPE_PTRVOID:
4853 case TYPE_INT:
4854 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
4855 break;
4856 case TYPE_PTR:
4857 arg_type++;
4858 target_size = thunk_type_size(arg_type, 0);
4859 switch(ie->access) {
4860 case IOC_R:
4861 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4862 if (!is_error(ret)) {
4863 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4864 if (!argptr)
4865 return -TARGET_EFAULT;
4866 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4867 unlock_user(argptr, arg, target_size);
4869 break;
4870 case IOC_W:
4871 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4872 if (!argptr)
4873 return -TARGET_EFAULT;
4874 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4875 unlock_user(argptr, arg, 0);
4876 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4877 break;
4878 default:
4879 case IOC_RW:
4880 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4881 if (!argptr)
4882 return -TARGET_EFAULT;
4883 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4884 unlock_user(argptr, arg, 0);
4885 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4886 if (!is_error(ret)) {
4887 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4888 if (!argptr)
4889 return -TARGET_EFAULT;
4890 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
4891 unlock_user(argptr, arg, target_size);
4893 break;
4895 break;
4896 default:
4897 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
4898 (long)cmd, arg_type[0]);
4899 ret = -TARGET_ENOSYS;
4900 break;
4902 return ret;
4905 static const bitmask_transtbl iflag_tbl[] = {
4906 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
4907 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
4908 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
4909 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
4910 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
4911 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
4912 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
4913 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
4914 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
4915 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
4916 { TARGET_IXON, TARGET_IXON, IXON, IXON },
4917 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
4918 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
4919 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
4920 { 0, 0, 0, 0 }
4923 static const bitmask_transtbl oflag_tbl[] = {
4924 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
4925 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
4926 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
4927 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
4928 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
4929 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
4930 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
4931 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
4932 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
4933 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
4934 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
4935 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
4936 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
4937 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
4938 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
4939 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
4940 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
4941 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
4942 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
4943 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
4944 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
4945 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
4946 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
4947 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
4948 { 0, 0, 0, 0 }
4951 static const bitmask_transtbl cflag_tbl[] = {
4952 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
4953 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
4954 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
4955 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
4956 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
4957 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
4958 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
4959 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
4960 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
4961 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
4962 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
4963 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
4964 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
4965 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
4966 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
4967 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
4968 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
4969 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
4970 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
4971 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
4972 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
4973 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
4974 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
4975 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
4976 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
4977 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
4978 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
4979 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
4980 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
4981 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
4982 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
4983 { 0, 0, 0, 0 }
4986 static const bitmask_transtbl lflag_tbl[] = {
4987 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
4988 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
4989 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
4990 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
4991 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
4992 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
4993 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
4994 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
4995 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
4996 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
4997 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
4998 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
4999 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5000 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5001 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5002 { 0, 0, 0, 0 }
5005 static void target_to_host_termios (void *dst, const void *src)
5007 struct host_termios *host = dst;
5008 const struct target_termios *target = src;
5010 host->c_iflag =
5011 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5012 host->c_oflag =
5013 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5014 host->c_cflag =
5015 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5016 host->c_lflag =
5017 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5018 host->c_line = target->c_line;
5020 memset(host->c_cc, 0, sizeof(host->c_cc));
5021 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5022 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5023 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5024 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5025 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5026 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5027 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5028 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5029 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5030 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5031 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5032 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5033 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5034 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5035 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5036 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5037 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5040 static void host_to_target_termios (void *dst, const void *src)
5042 struct target_termios *target = dst;
5043 const struct host_termios *host = src;
5045 target->c_iflag =
5046 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5047 target->c_oflag =
5048 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5049 target->c_cflag =
5050 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5051 target->c_lflag =
5052 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5053 target->c_line = host->c_line;
5055 memset(target->c_cc, 0, sizeof(target->c_cc));
5056 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5057 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5058 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5059 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5060 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5061 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5062 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5063 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5064 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5065 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5066 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5067 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5068 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5069 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5070 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5071 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5072 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5075 static const StructEntry struct_termios_def = {
5076 .convert = { host_to_target_termios, target_to_host_termios },
5077 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5078 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5081 static bitmask_transtbl mmap_flags_tbl[] = {
5082 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5083 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5084 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5085 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5086 MAP_ANONYMOUS, MAP_ANONYMOUS },
5087 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5088 MAP_GROWSDOWN, MAP_GROWSDOWN },
5089 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5090 MAP_DENYWRITE, MAP_DENYWRITE },
5091 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5092 MAP_EXECUTABLE, MAP_EXECUTABLE },
5093 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5094 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5095 MAP_NORESERVE, MAP_NORESERVE },
5096 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5097 /* MAP_STACK had been ignored by the kernel for quite some time.
5098 Recognize it for the target insofar as we do not want to pass
5099 it through to the host. */
5100 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5101 { 0, 0, 0, 0 }
5104 #if defined(TARGET_I386)
5106 /* NOTE: there is really one LDT for all the threads */
5107 static uint8_t *ldt_table;
5109 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5111 int size;
5112 void *p;
5114 if (!ldt_table)
5115 return 0;
5116 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5117 if (size > bytecount)
5118 size = bytecount;
5119 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5120 if (!p)
5121 return -TARGET_EFAULT;
5122 /* ??? Should this by byteswapped? */
5123 memcpy(p, ldt_table, size);
5124 unlock_user(p, ptr, size);
5125 return size;
5128 /* XXX: add locking support */
5129 static abi_long write_ldt(CPUX86State *env,
5130 abi_ulong ptr, unsigned long bytecount, int oldmode)
5132 struct target_modify_ldt_ldt_s ldt_info;
5133 struct target_modify_ldt_ldt_s *target_ldt_info;
5134 int seg_32bit, contents, read_exec_only, limit_in_pages;
5135 int seg_not_present, useable, lm;
5136 uint32_t *lp, entry_1, entry_2;
5138 if (bytecount != sizeof(ldt_info))
5139 return -TARGET_EINVAL;
5140 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5141 return -TARGET_EFAULT;
5142 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5143 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5144 ldt_info.limit = tswap32(target_ldt_info->limit);
5145 ldt_info.flags = tswap32(target_ldt_info->flags);
5146 unlock_user_struct(target_ldt_info, ptr, 0);
5148 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5149 return -TARGET_EINVAL;
5150 seg_32bit = ldt_info.flags & 1;
5151 contents = (ldt_info.flags >> 1) & 3;
5152 read_exec_only = (ldt_info.flags >> 3) & 1;
5153 limit_in_pages = (ldt_info.flags >> 4) & 1;
5154 seg_not_present = (ldt_info.flags >> 5) & 1;
5155 useable = (ldt_info.flags >> 6) & 1;
5156 #ifdef TARGET_ABI32
5157 lm = 0;
5158 #else
5159 lm = (ldt_info.flags >> 7) & 1;
5160 #endif
5161 if (contents == 3) {
5162 if (oldmode)
5163 return -TARGET_EINVAL;
5164 if (seg_not_present == 0)
5165 return -TARGET_EINVAL;
5167 /* allocate the LDT */
5168 if (!ldt_table) {
5169 env->ldt.base = target_mmap(0,
5170 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5171 PROT_READ|PROT_WRITE,
5172 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5173 if (env->ldt.base == -1)
5174 return -TARGET_ENOMEM;
5175 memset(g2h(env->ldt.base), 0,
5176 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5177 env->ldt.limit = 0xffff;
5178 ldt_table = g2h(env->ldt.base);
5181 /* NOTE: same code as Linux kernel */
5182 /* Allow LDTs to be cleared by the user. */
5183 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5184 if (oldmode ||
5185 (contents == 0 &&
5186 read_exec_only == 1 &&
5187 seg_32bit == 0 &&
5188 limit_in_pages == 0 &&
5189 seg_not_present == 1 &&
5190 useable == 0 )) {
5191 entry_1 = 0;
5192 entry_2 = 0;
5193 goto install;
5197 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5198 (ldt_info.limit & 0x0ffff);
5199 entry_2 = (ldt_info.base_addr & 0xff000000) |
5200 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5201 (ldt_info.limit & 0xf0000) |
5202 ((read_exec_only ^ 1) << 9) |
5203 (contents << 10) |
5204 ((seg_not_present ^ 1) << 15) |
5205 (seg_32bit << 22) |
5206 (limit_in_pages << 23) |
5207 (lm << 21) |
5208 0x7000;
5209 if (!oldmode)
5210 entry_2 |= (useable << 20);
5212 /* Install the new entry ... */
5213 install:
5214 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5215 lp[0] = tswap32(entry_1);
5216 lp[1] = tswap32(entry_2);
5217 return 0;
5220 /* specific and weird i386 syscalls */
5221 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5222 unsigned long bytecount)
5224 abi_long ret;
5226 switch (func) {
5227 case 0:
5228 ret = read_ldt(ptr, bytecount);
5229 break;
5230 case 1:
5231 ret = write_ldt(env, ptr, bytecount, 1);
5232 break;
5233 case 0x11:
5234 ret = write_ldt(env, ptr, bytecount, 0);
5235 break;
5236 default:
5237 ret = -TARGET_ENOSYS;
5238 break;
5240 return ret;
5243 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5244 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5246 uint64_t *gdt_table = g2h(env->gdt.base);
5247 struct target_modify_ldt_ldt_s ldt_info;
5248 struct target_modify_ldt_ldt_s *target_ldt_info;
5249 int seg_32bit, contents, read_exec_only, limit_in_pages;
5250 int seg_not_present, useable, lm;
5251 uint32_t *lp, entry_1, entry_2;
5252 int i;
5254 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5255 if (!target_ldt_info)
5256 return -TARGET_EFAULT;
5257 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5258 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5259 ldt_info.limit = tswap32(target_ldt_info->limit);
5260 ldt_info.flags = tswap32(target_ldt_info->flags);
5261 if (ldt_info.entry_number == -1) {
5262 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5263 if (gdt_table[i] == 0) {
5264 ldt_info.entry_number = i;
5265 target_ldt_info->entry_number = tswap32(i);
5266 break;
5270 unlock_user_struct(target_ldt_info, ptr, 1);
5272 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5273 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5274 return -TARGET_EINVAL;
5275 seg_32bit = ldt_info.flags & 1;
5276 contents = (ldt_info.flags >> 1) & 3;
5277 read_exec_only = (ldt_info.flags >> 3) & 1;
5278 limit_in_pages = (ldt_info.flags >> 4) & 1;
5279 seg_not_present = (ldt_info.flags >> 5) & 1;
5280 useable = (ldt_info.flags >> 6) & 1;
5281 #ifdef TARGET_ABI32
5282 lm = 0;
5283 #else
5284 lm = (ldt_info.flags >> 7) & 1;
5285 #endif
5287 if (contents == 3) {
5288 if (seg_not_present == 0)
5289 return -TARGET_EINVAL;
5292 /* NOTE: same code as Linux kernel */
5293 /* Allow LDTs to be cleared by the user. */
5294 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5295 if ((contents == 0 &&
5296 read_exec_only == 1 &&
5297 seg_32bit == 0 &&
5298 limit_in_pages == 0 &&
5299 seg_not_present == 1 &&
5300 useable == 0 )) {
5301 entry_1 = 0;
5302 entry_2 = 0;
5303 goto install;
5307 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5308 (ldt_info.limit & 0x0ffff);
5309 entry_2 = (ldt_info.base_addr & 0xff000000) |
5310 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5311 (ldt_info.limit & 0xf0000) |
5312 ((read_exec_only ^ 1) << 9) |
5313 (contents << 10) |
5314 ((seg_not_present ^ 1) << 15) |
5315 (seg_32bit << 22) |
5316 (limit_in_pages << 23) |
5317 (useable << 20) |
5318 (lm << 21) |
5319 0x7000;
5321 /* Install the new entry ... */
5322 install:
5323 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5324 lp[0] = tswap32(entry_1);
5325 lp[1] = tswap32(entry_2);
5326 return 0;
5329 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5331 struct target_modify_ldt_ldt_s *target_ldt_info;
5332 uint64_t *gdt_table = g2h(env->gdt.base);
5333 uint32_t base_addr, limit, flags;
5334 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5335 int seg_not_present, useable, lm;
5336 uint32_t *lp, entry_1, entry_2;
5338 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5339 if (!target_ldt_info)
5340 return -TARGET_EFAULT;
5341 idx = tswap32(target_ldt_info->entry_number);
5342 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5343 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5344 unlock_user_struct(target_ldt_info, ptr, 1);
5345 return -TARGET_EINVAL;
5347 lp = (uint32_t *)(gdt_table + idx);
5348 entry_1 = tswap32(lp[0]);
5349 entry_2 = tswap32(lp[1]);
5351 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5352 contents = (entry_2 >> 10) & 3;
5353 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5354 seg_32bit = (entry_2 >> 22) & 1;
5355 limit_in_pages = (entry_2 >> 23) & 1;
5356 useable = (entry_2 >> 20) & 1;
5357 #ifdef TARGET_ABI32
5358 lm = 0;
5359 #else
5360 lm = (entry_2 >> 21) & 1;
5361 #endif
5362 flags = (seg_32bit << 0) | (contents << 1) |
5363 (read_exec_only << 3) | (limit_in_pages << 4) |
5364 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5365 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5366 base_addr = (entry_1 >> 16) |
5367 (entry_2 & 0xff000000) |
5368 ((entry_2 & 0xff) << 16);
5369 target_ldt_info->base_addr = tswapal(base_addr);
5370 target_ldt_info->limit = tswap32(limit);
5371 target_ldt_info->flags = tswap32(flags);
5372 unlock_user_struct(target_ldt_info, ptr, 1);
5373 return 0;
5375 #endif /* TARGET_I386 && TARGET_ABI32 */
5377 #ifndef TARGET_ABI32
5378 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5380 abi_long ret = 0;
5381 abi_ulong val;
5382 int idx;
5384 switch(code) {
5385 case TARGET_ARCH_SET_GS:
5386 case TARGET_ARCH_SET_FS:
5387 if (code == TARGET_ARCH_SET_GS)
5388 idx = R_GS;
5389 else
5390 idx = R_FS;
5391 cpu_x86_load_seg(env, idx, 0);
5392 env->segs[idx].base = addr;
5393 break;
5394 case TARGET_ARCH_GET_GS:
5395 case TARGET_ARCH_GET_FS:
5396 if (code == TARGET_ARCH_GET_GS)
5397 idx = R_GS;
5398 else
5399 idx = R_FS;
5400 val = env->segs[idx].base;
5401 if (put_user(val, addr, abi_ulong))
5402 ret = -TARGET_EFAULT;
5403 break;
5404 default:
5405 ret = -TARGET_EINVAL;
5406 break;
5408 return ret;
5410 #endif
5412 #endif /* defined(TARGET_I386) */
5414 #define NEW_STACK_SIZE 0x40000
5417 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5418 typedef struct {
5419 CPUArchState *env;
5420 pthread_mutex_t mutex;
5421 pthread_cond_t cond;
5422 pthread_t thread;
5423 uint32_t tid;
5424 abi_ulong child_tidptr;
5425 abi_ulong parent_tidptr;
5426 sigset_t sigmask;
5427 } new_thread_info;
5429 static void *clone_func(void *arg)
5431 new_thread_info *info = arg;
5432 CPUArchState *env;
5433 CPUState *cpu;
5434 TaskState *ts;
5436 rcu_register_thread();
5437 tcg_register_thread();
5438 env = info->env;
5439 cpu = ENV_GET_CPU(env);
5440 thread_cpu = cpu;
5441 ts = (TaskState *)cpu->opaque;
5442 info->tid = gettid();
5443 task_settid(ts);
5444 if (info->child_tidptr)
5445 put_user_u32(info->tid, info->child_tidptr);
5446 if (info->parent_tidptr)
5447 put_user_u32(info->tid, info->parent_tidptr);
5448 /* Enable signals. */
5449 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5450 /* Signal to the parent that we're ready. */
5451 pthread_mutex_lock(&info->mutex);
5452 pthread_cond_broadcast(&info->cond);
5453 pthread_mutex_unlock(&info->mutex);
5454 /* Wait until the parent has finished initializing the tls state. */
5455 pthread_mutex_lock(&clone_lock);
5456 pthread_mutex_unlock(&clone_lock);
5457 cpu_loop(env);
5458 /* never exits */
5459 return NULL;
5462 /* do_fork() Must return host values and target errnos (unlike most
5463 do_*() functions). */
5464 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5465 abi_ulong parent_tidptr, target_ulong newtls,
5466 abi_ulong child_tidptr)
5468 CPUState *cpu = ENV_GET_CPU(env);
5469 int ret;
5470 TaskState *ts;
5471 CPUState *new_cpu;
5472 CPUArchState *new_env;
5473 sigset_t sigmask;
5475 flags &= ~CLONE_IGNORED_FLAGS;
5477 /* Emulate vfork() with fork() */
5478 if (flags & CLONE_VFORK)
5479 flags &= ~(CLONE_VFORK | CLONE_VM);
5481 if (flags & CLONE_VM) {
5482 TaskState *parent_ts = (TaskState *)cpu->opaque;
5483 new_thread_info info;
5484 pthread_attr_t attr;
5486 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5487 (flags & CLONE_INVALID_THREAD_FLAGS)) {
5488 return -TARGET_EINVAL;
5491 ts = g_new0(TaskState, 1);
5492 init_task_state(ts);
5494 /* Grab a mutex so that thread setup appears atomic. */
5495 pthread_mutex_lock(&clone_lock);
5497 /* we create a new CPU instance. */
5498 new_env = cpu_copy(env);
5499 /* Init regs that differ from the parent. */
5500 cpu_clone_regs(new_env, newsp);
5501 new_cpu = ENV_GET_CPU(new_env);
5502 new_cpu->opaque = ts;
5503 ts->bprm = parent_ts->bprm;
5504 ts->info = parent_ts->info;
5505 ts->signal_mask = parent_ts->signal_mask;
5507 if (flags & CLONE_CHILD_CLEARTID) {
5508 ts->child_tidptr = child_tidptr;
5511 if (flags & CLONE_SETTLS) {
5512 cpu_set_tls (new_env, newtls);
5515 memset(&info, 0, sizeof(info));
5516 pthread_mutex_init(&info.mutex, NULL);
5517 pthread_mutex_lock(&info.mutex);
5518 pthread_cond_init(&info.cond, NULL);
5519 info.env = new_env;
5520 if (flags & CLONE_CHILD_SETTID) {
5521 info.child_tidptr = child_tidptr;
5523 if (flags & CLONE_PARENT_SETTID) {
5524 info.parent_tidptr = parent_tidptr;
5527 ret = pthread_attr_init(&attr);
5528 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5529 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5530 /* It is not safe to deliver signals until the child has finished
5531 initializing, so temporarily block all signals. */
5532 sigfillset(&sigmask);
5533 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5535 /* If this is our first additional thread, we need to ensure we
5536 * generate code for parallel execution and flush old translations.
5538 if (!parallel_cpus) {
5539 parallel_cpus = true;
5540 tb_flush(cpu);
5543 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5544 /* TODO: Free new CPU state if thread creation failed. */
5546 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5547 pthread_attr_destroy(&attr);
5548 if (ret == 0) {
5549 /* Wait for the child to initialize. */
5550 pthread_cond_wait(&info.cond, &info.mutex);
5551 ret = info.tid;
5552 } else {
5553 ret = -1;
5555 pthread_mutex_unlock(&info.mutex);
5556 pthread_cond_destroy(&info.cond);
5557 pthread_mutex_destroy(&info.mutex);
5558 pthread_mutex_unlock(&clone_lock);
5559 } else {
5560 /* if no CLONE_VM, we consider it is a fork */
5561 if (flags & CLONE_INVALID_FORK_FLAGS) {
5562 return -TARGET_EINVAL;
5565 /* We can't support custom termination signals */
5566 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5567 return -TARGET_EINVAL;
5570 if (block_signals()) {
5571 return -TARGET_ERESTARTSYS;
5574 fork_start();
5575 ret = fork();
5576 if (ret == 0) {
5577 /* Child Process. */
5578 cpu_clone_regs(env, newsp);
5579 fork_end(1);
5580 /* There is a race condition here. The parent process could
5581 theoretically read the TID in the child process before the child
5582 tid is set. This would require using either ptrace
5583 (not implemented) or having *_tidptr to point at a shared memory
5584 mapping. We can't repeat the spinlock hack used above because
5585 the child process gets its own copy of the lock. */
5586 if (flags & CLONE_CHILD_SETTID)
5587 put_user_u32(gettid(), child_tidptr);
5588 if (flags & CLONE_PARENT_SETTID)
5589 put_user_u32(gettid(), parent_tidptr);
5590 ts = (TaskState *)cpu->opaque;
5591 if (flags & CLONE_SETTLS)
5592 cpu_set_tls (env, newtls);
5593 if (flags & CLONE_CHILD_CLEARTID)
5594 ts->child_tidptr = child_tidptr;
5595 } else {
5596 fork_end(0);
5599 return ret;
5602 /* warning : doesn't handle linux specific flags... */
5603 static int target_to_host_fcntl_cmd(int cmd)
5605 int ret;
5607 switch(cmd) {
5608 case TARGET_F_DUPFD:
5609 case TARGET_F_GETFD:
5610 case TARGET_F_SETFD:
5611 case TARGET_F_GETFL:
5612 case TARGET_F_SETFL:
5613 ret = cmd;
5614 break;
5615 case TARGET_F_GETLK:
5616 ret = F_GETLK64;
5617 break;
5618 case TARGET_F_SETLK:
5619 ret = F_SETLK64;
5620 break;
5621 case TARGET_F_SETLKW:
5622 ret = F_SETLKW64;
5623 break;
5624 case TARGET_F_GETOWN:
5625 ret = F_GETOWN;
5626 break;
5627 case TARGET_F_SETOWN:
5628 ret = F_SETOWN;
5629 break;
5630 case TARGET_F_GETSIG:
5631 ret = F_GETSIG;
5632 break;
5633 case TARGET_F_SETSIG:
5634 ret = F_SETSIG;
5635 break;
5636 #if TARGET_ABI_BITS == 32
5637 case TARGET_F_GETLK64:
5638 ret = F_GETLK64;
5639 break;
5640 case TARGET_F_SETLK64:
5641 ret = F_SETLK64;
5642 break;
5643 case TARGET_F_SETLKW64:
5644 ret = F_SETLKW64;
5645 break;
5646 #endif
5647 case TARGET_F_SETLEASE:
5648 ret = F_SETLEASE;
5649 break;
5650 case TARGET_F_GETLEASE:
5651 ret = F_GETLEASE;
5652 break;
5653 #ifdef F_DUPFD_CLOEXEC
5654 case TARGET_F_DUPFD_CLOEXEC:
5655 ret = F_DUPFD_CLOEXEC;
5656 break;
5657 #endif
5658 case TARGET_F_NOTIFY:
5659 ret = F_NOTIFY;
5660 break;
5661 #ifdef F_GETOWN_EX
5662 case TARGET_F_GETOWN_EX:
5663 ret = F_GETOWN_EX;
5664 break;
5665 #endif
5666 #ifdef F_SETOWN_EX
5667 case TARGET_F_SETOWN_EX:
5668 ret = F_SETOWN_EX;
5669 break;
5670 #endif
5671 #ifdef F_SETPIPE_SZ
5672 case TARGET_F_SETPIPE_SZ:
5673 ret = F_SETPIPE_SZ;
5674 break;
5675 case TARGET_F_GETPIPE_SZ:
5676 ret = F_GETPIPE_SZ;
5677 break;
5678 #endif
5679 default:
5680 ret = -TARGET_EINVAL;
5681 break;
5684 #if defined(__powerpc64__)
5685 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
5686 * is not supported by kernel. The glibc fcntl call actually adjusts
5687 * them to 5, 6 and 7 before making the syscall(). Since we make the
5688 * syscall directly, adjust to what is supported by the kernel.
5690 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
5691 ret -= F_GETLK64 - 5;
5693 #endif
5695 return ret;
5698 #define FLOCK_TRANSTBL \
5699 switch (type) { \
5700 TRANSTBL_CONVERT(F_RDLCK); \
5701 TRANSTBL_CONVERT(F_WRLCK); \
5702 TRANSTBL_CONVERT(F_UNLCK); \
5703 TRANSTBL_CONVERT(F_EXLCK); \
5704 TRANSTBL_CONVERT(F_SHLCK); \
5707 static int target_to_host_flock(int type)
5709 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
5710 FLOCK_TRANSTBL
5711 #undef TRANSTBL_CONVERT
5712 return -TARGET_EINVAL;
5715 static int host_to_target_flock(int type)
5717 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
5718 FLOCK_TRANSTBL
5719 #undef TRANSTBL_CONVERT
5720 /* if we don't know how to convert the value coming
5721 * from the host we copy to the target field as-is
5723 return type;
5726 static inline abi_long copy_from_user_flock(struct flock64 *fl,
5727 abi_ulong target_flock_addr)
5729 struct target_flock *target_fl;
5730 int l_type;
5732 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5733 return -TARGET_EFAULT;
5736 __get_user(l_type, &target_fl->l_type);
5737 l_type = target_to_host_flock(l_type);
5738 if (l_type < 0) {
5739 return l_type;
5741 fl->l_type = l_type;
5742 __get_user(fl->l_whence, &target_fl->l_whence);
5743 __get_user(fl->l_start, &target_fl->l_start);
5744 __get_user(fl->l_len, &target_fl->l_len);
5745 __get_user(fl->l_pid, &target_fl->l_pid);
5746 unlock_user_struct(target_fl, target_flock_addr, 0);
5747 return 0;
5750 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
5751 const struct flock64 *fl)
5753 struct target_flock *target_fl;
5754 short l_type;
5756 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5757 return -TARGET_EFAULT;
5760 l_type = host_to_target_flock(fl->l_type);
5761 __put_user(l_type, &target_fl->l_type);
5762 __put_user(fl->l_whence, &target_fl->l_whence);
5763 __put_user(fl->l_start, &target_fl->l_start);
5764 __put_user(fl->l_len, &target_fl->l_len);
5765 __put_user(fl->l_pid, &target_fl->l_pid);
5766 unlock_user_struct(target_fl, target_flock_addr, 1);
5767 return 0;
5770 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
5771 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
5773 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
5774 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
5775 abi_ulong target_flock_addr)
5777 struct target_oabi_flock64 *target_fl;
5778 int l_type;
5780 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5781 return -TARGET_EFAULT;
5784 __get_user(l_type, &target_fl->l_type);
5785 l_type = target_to_host_flock(l_type);
5786 if (l_type < 0) {
5787 return l_type;
5789 fl->l_type = l_type;
5790 __get_user(fl->l_whence, &target_fl->l_whence);
5791 __get_user(fl->l_start, &target_fl->l_start);
5792 __get_user(fl->l_len, &target_fl->l_len);
5793 __get_user(fl->l_pid, &target_fl->l_pid);
5794 unlock_user_struct(target_fl, target_flock_addr, 0);
5795 return 0;
5798 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
5799 const struct flock64 *fl)
5801 struct target_oabi_flock64 *target_fl;
5802 short l_type;
5804 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5805 return -TARGET_EFAULT;
5808 l_type = host_to_target_flock(fl->l_type);
5809 __put_user(l_type, &target_fl->l_type);
5810 __put_user(fl->l_whence, &target_fl->l_whence);
5811 __put_user(fl->l_start, &target_fl->l_start);
5812 __put_user(fl->l_len, &target_fl->l_len);
5813 __put_user(fl->l_pid, &target_fl->l_pid);
5814 unlock_user_struct(target_fl, target_flock_addr, 1);
5815 return 0;
5817 #endif
5819 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
5820 abi_ulong target_flock_addr)
5822 struct target_flock64 *target_fl;
5823 int l_type;
5825 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
5826 return -TARGET_EFAULT;
5829 __get_user(l_type, &target_fl->l_type);
5830 l_type = target_to_host_flock(l_type);
5831 if (l_type < 0) {
5832 return l_type;
5834 fl->l_type = l_type;
5835 __get_user(fl->l_whence, &target_fl->l_whence);
5836 __get_user(fl->l_start, &target_fl->l_start);
5837 __get_user(fl->l_len, &target_fl->l_len);
5838 __get_user(fl->l_pid, &target_fl->l_pid);
5839 unlock_user_struct(target_fl, target_flock_addr, 0);
5840 return 0;
5843 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
5844 const struct flock64 *fl)
5846 struct target_flock64 *target_fl;
5847 short l_type;
5849 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
5850 return -TARGET_EFAULT;
5853 l_type = host_to_target_flock(fl->l_type);
5854 __put_user(l_type, &target_fl->l_type);
5855 __put_user(fl->l_whence, &target_fl->l_whence);
5856 __put_user(fl->l_start, &target_fl->l_start);
5857 __put_user(fl->l_len, &target_fl->l_len);
5858 __put_user(fl->l_pid, &target_fl->l_pid);
5859 unlock_user_struct(target_fl, target_flock_addr, 1);
5860 return 0;
5863 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
5865 struct flock64 fl64;
5866 #ifdef F_GETOWN_EX
5867 struct f_owner_ex fox;
5868 struct target_f_owner_ex *target_fox;
5869 #endif
5870 abi_long ret;
5871 int host_cmd = target_to_host_fcntl_cmd(cmd);
5873 if (host_cmd == -TARGET_EINVAL)
5874 return host_cmd;
5876 switch(cmd) {
5877 case TARGET_F_GETLK:
5878 ret = copy_from_user_flock(&fl64, arg);
5879 if (ret) {
5880 return ret;
5882 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5883 if (ret == 0) {
5884 ret = copy_to_user_flock(arg, &fl64);
5886 break;
5888 case TARGET_F_SETLK:
5889 case TARGET_F_SETLKW:
5890 ret = copy_from_user_flock(&fl64, arg);
5891 if (ret) {
5892 return ret;
5894 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5895 break;
5897 case TARGET_F_GETLK64:
5898 ret = copy_from_user_flock64(&fl64, arg);
5899 if (ret) {
5900 return ret;
5902 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5903 if (ret == 0) {
5904 ret = copy_to_user_flock64(arg, &fl64);
5906 break;
5907 case TARGET_F_SETLK64:
5908 case TARGET_F_SETLKW64:
5909 ret = copy_from_user_flock64(&fl64, arg);
5910 if (ret) {
5911 return ret;
5913 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
5914 break;
5916 case TARGET_F_GETFL:
5917 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5918 if (ret >= 0) {
5919 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
5921 break;
5923 case TARGET_F_SETFL:
5924 ret = get_errno(safe_fcntl(fd, host_cmd,
5925 target_to_host_bitmask(arg,
5926 fcntl_flags_tbl)));
5927 break;
5929 #ifdef F_GETOWN_EX
5930 case TARGET_F_GETOWN_EX:
5931 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5932 if (ret >= 0) {
5933 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
5934 return -TARGET_EFAULT;
5935 target_fox->type = tswap32(fox.type);
5936 target_fox->pid = tswap32(fox.pid);
5937 unlock_user_struct(target_fox, arg, 1);
5939 break;
5940 #endif
5942 #ifdef F_SETOWN_EX
5943 case TARGET_F_SETOWN_EX:
5944 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
5945 return -TARGET_EFAULT;
5946 fox.type = tswap32(target_fox->type);
5947 fox.pid = tswap32(target_fox->pid);
5948 unlock_user_struct(target_fox, arg, 0);
5949 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
5950 break;
5951 #endif
5953 case TARGET_F_SETOWN:
5954 case TARGET_F_GETOWN:
5955 case TARGET_F_SETSIG:
5956 case TARGET_F_GETSIG:
5957 case TARGET_F_SETLEASE:
5958 case TARGET_F_GETLEASE:
5959 case TARGET_F_SETPIPE_SZ:
5960 case TARGET_F_GETPIPE_SZ:
5961 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
5962 break;
5964 default:
5965 ret = get_errno(safe_fcntl(fd, cmd, arg));
5966 break;
5968 return ret;
5971 #ifdef USE_UID16
5973 static inline int high2lowuid(int uid)
5975 if (uid > 65535)
5976 return 65534;
5977 else
5978 return uid;
5981 static inline int high2lowgid(int gid)
5983 if (gid > 65535)
5984 return 65534;
5985 else
5986 return gid;
5989 static inline int low2highuid(int uid)
5991 if ((int16_t)uid == -1)
5992 return -1;
5993 else
5994 return uid;
5997 static inline int low2highgid(int gid)
5999 if ((int16_t)gid == -1)
6000 return -1;
6001 else
6002 return gid;
6004 static inline int tswapid(int id)
6006 return tswap16(id);
6009 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6011 #else /* !USE_UID16 */
6012 static inline int high2lowuid(int uid)
6014 return uid;
6016 static inline int high2lowgid(int gid)
6018 return gid;
6020 static inline int low2highuid(int uid)
6022 return uid;
6024 static inline int low2highgid(int gid)
6026 return gid;
6028 static inline int tswapid(int id)
6030 return tswap32(id);
6033 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6035 #endif /* USE_UID16 */
6037 /* We must do direct syscalls for setting UID/GID, because we want to
6038 * implement the Linux system call semantics of "change only for this thread",
6039 * not the libc/POSIX semantics of "change for all threads in process".
6040 * (See http://ewontfix.com/17/ for more details.)
6041 * We use the 32-bit version of the syscalls if present; if it is not
6042 * then either the host architecture supports 32-bit UIDs natively with
6043 * the standard syscall, or the 16-bit UID is the best we can do.
6045 #ifdef __NR_setuid32
6046 #define __NR_sys_setuid __NR_setuid32
6047 #else
6048 #define __NR_sys_setuid __NR_setuid
6049 #endif
6050 #ifdef __NR_setgid32
6051 #define __NR_sys_setgid __NR_setgid32
6052 #else
6053 #define __NR_sys_setgid __NR_setgid
6054 #endif
6055 #ifdef __NR_setresuid32
6056 #define __NR_sys_setresuid __NR_setresuid32
6057 #else
6058 #define __NR_sys_setresuid __NR_setresuid
6059 #endif
6060 #ifdef __NR_setresgid32
6061 #define __NR_sys_setresgid __NR_setresgid32
6062 #else
6063 #define __NR_sys_setresgid __NR_setresgid
6064 #endif
6066 _syscall1(int, sys_setuid, uid_t, uid)
6067 _syscall1(int, sys_setgid, gid_t, gid)
6068 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6069 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6071 void syscall_init(void)
6073 IOCTLEntry *ie;
6074 const argtype *arg_type;
6075 int size;
6076 int i;
6078 thunk_init(STRUCT_MAX);
6080 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6081 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6082 #include "syscall_types.h"
6083 #undef STRUCT
6084 #undef STRUCT_SPECIAL
6086 /* Build target_to_host_errno_table[] table from
6087 * host_to_target_errno_table[]. */
6088 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6089 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6092 /* we patch the ioctl size if necessary. We rely on the fact that
6093 no ioctl has all the bits at '1' in the size field */
6094 ie = ioctl_entries;
6095 while (ie->target_cmd != 0) {
6096 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6097 TARGET_IOC_SIZEMASK) {
6098 arg_type = ie->arg_type;
6099 if (arg_type[0] != TYPE_PTR) {
6100 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6101 ie->target_cmd);
6102 exit(1);
6104 arg_type++;
6105 size = thunk_type_size(arg_type, 0);
6106 ie->target_cmd = (ie->target_cmd &
6107 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6108 (size << TARGET_IOC_SIZESHIFT);
6111 /* automatic consistency check if same arch */
6112 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6113 (defined(__x86_64__) && defined(TARGET_X86_64))
6114 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6115 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6116 ie->name, ie->target_cmd, ie->host_cmd);
6118 #endif
6119 ie++;
6123 #if TARGET_ABI_BITS == 32
6124 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6126 #ifdef TARGET_WORDS_BIGENDIAN
6127 return ((uint64_t)word0 << 32) | word1;
6128 #else
6129 return ((uint64_t)word1 << 32) | word0;
6130 #endif
6132 #else /* TARGET_ABI_BITS == 32 */
6133 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6135 return word0;
6137 #endif /* TARGET_ABI_BITS != 32 */
6139 #ifdef TARGET_NR_truncate64
6140 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6141 abi_long arg2,
6142 abi_long arg3,
6143 abi_long arg4)
6145 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6146 arg2 = arg3;
6147 arg3 = arg4;
6149 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6151 #endif
6153 #ifdef TARGET_NR_ftruncate64
6154 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6155 abi_long arg2,
6156 abi_long arg3,
6157 abi_long arg4)
6159 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6160 arg2 = arg3;
6161 arg3 = arg4;
6163 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6165 #endif
6167 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6168 abi_ulong target_addr)
6170 struct target_timespec *target_ts;
6172 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6173 return -TARGET_EFAULT;
6174 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6175 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6176 unlock_user_struct(target_ts, target_addr, 0);
6177 return 0;
6180 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6181 struct timespec *host_ts)
6183 struct target_timespec *target_ts;
6185 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6186 return -TARGET_EFAULT;
6187 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6188 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6189 unlock_user_struct(target_ts, target_addr, 1);
6190 return 0;
6193 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6194 abi_ulong target_addr)
6196 struct target_itimerspec *target_itspec;
6198 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6199 return -TARGET_EFAULT;
6202 host_itspec->it_interval.tv_sec =
6203 tswapal(target_itspec->it_interval.tv_sec);
6204 host_itspec->it_interval.tv_nsec =
6205 tswapal(target_itspec->it_interval.tv_nsec);
6206 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6207 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6209 unlock_user_struct(target_itspec, target_addr, 1);
6210 return 0;
6213 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6214 struct itimerspec *host_its)
6216 struct target_itimerspec *target_itspec;
6218 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6219 return -TARGET_EFAULT;
6222 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6223 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6225 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6226 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6228 unlock_user_struct(target_itspec, target_addr, 0);
6229 return 0;
6232 static inline abi_long target_to_host_timex(struct timex *host_tx,
6233 abi_long target_addr)
6235 struct target_timex *target_tx;
6237 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6238 return -TARGET_EFAULT;
6241 __get_user(host_tx->modes, &target_tx->modes);
6242 __get_user(host_tx->offset, &target_tx->offset);
6243 __get_user(host_tx->freq, &target_tx->freq);
6244 __get_user(host_tx->maxerror, &target_tx->maxerror);
6245 __get_user(host_tx->esterror, &target_tx->esterror);
6246 __get_user(host_tx->status, &target_tx->status);
6247 __get_user(host_tx->constant, &target_tx->constant);
6248 __get_user(host_tx->precision, &target_tx->precision);
6249 __get_user(host_tx->tolerance, &target_tx->tolerance);
6250 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6251 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6252 __get_user(host_tx->tick, &target_tx->tick);
6253 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6254 __get_user(host_tx->jitter, &target_tx->jitter);
6255 __get_user(host_tx->shift, &target_tx->shift);
6256 __get_user(host_tx->stabil, &target_tx->stabil);
6257 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6258 __get_user(host_tx->calcnt, &target_tx->calcnt);
6259 __get_user(host_tx->errcnt, &target_tx->errcnt);
6260 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6261 __get_user(host_tx->tai, &target_tx->tai);
6263 unlock_user_struct(target_tx, target_addr, 0);
6264 return 0;
6267 static inline abi_long host_to_target_timex(abi_long target_addr,
6268 struct timex *host_tx)
6270 struct target_timex *target_tx;
6272 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6273 return -TARGET_EFAULT;
6276 __put_user(host_tx->modes, &target_tx->modes);
6277 __put_user(host_tx->offset, &target_tx->offset);
6278 __put_user(host_tx->freq, &target_tx->freq);
6279 __put_user(host_tx->maxerror, &target_tx->maxerror);
6280 __put_user(host_tx->esterror, &target_tx->esterror);
6281 __put_user(host_tx->status, &target_tx->status);
6282 __put_user(host_tx->constant, &target_tx->constant);
6283 __put_user(host_tx->precision, &target_tx->precision);
6284 __put_user(host_tx->tolerance, &target_tx->tolerance);
6285 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6286 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6287 __put_user(host_tx->tick, &target_tx->tick);
6288 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6289 __put_user(host_tx->jitter, &target_tx->jitter);
6290 __put_user(host_tx->shift, &target_tx->shift);
6291 __put_user(host_tx->stabil, &target_tx->stabil);
6292 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6293 __put_user(host_tx->calcnt, &target_tx->calcnt);
6294 __put_user(host_tx->errcnt, &target_tx->errcnt);
6295 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6296 __put_user(host_tx->tai, &target_tx->tai);
6298 unlock_user_struct(target_tx, target_addr, 1);
6299 return 0;
6303 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6304 abi_ulong target_addr)
6306 struct target_sigevent *target_sevp;
6308 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6309 return -TARGET_EFAULT;
6312 /* This union is awkward on 64 bit systems because it has a 32 bit
6313 * integer and a pointer in it; we follow the conversion approach
6314 * used for handling sigval types in signal.c so the guest should get
6315 * the correct value back even if we did a 64 bit byteswap and it's
6316 * using the 32 bit integer.
6318 host_sevp->sigev_value.sival_ptr =
6319 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6320 host_sevp->sigev_signo =
6321 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6322 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6323 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6325 unlock_user_struct(target_sevp, target_addr, 1);
6326 return 0;
6329 #if defined(TARGET_NR_mlockall)
6330 static inline int target_to_host_mlockall_arg(int arg)
6332 int result = 0;
6334 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6335 result |= MCL_CURRENT;
6337 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6338 result |= MCL_FUTURE;
6340 return result;
6342 #endif
6344 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6345 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6346 defined(TARGET_NR_newfstatat))
6347 static inline abi_long host_to_target_stat64(void *cpu_env,
6348 abi_ulong target_addr,
6349 struct stat *host_st)
6351 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6352 if (((CPUARMState *)cpu_env)->eabi) {
6353 struct target_eabi_stat64 *target_st;
6355 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6356 return -TARGET_EFAULT;
6357 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6358 __put_user(host_st->st_dev, &target_st->st_dev);
6359 __put_user(host_st->st_ino, &target_st->st_ino);
6360 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6361 __put_user(host_st->st_ino, &target_st->__st_ino);
6362 #endif
6363 __put_user(host_st->st_mode, &target_st->st_mode);
6364 __put_user(host_st->st_nlink, &target_st->st_nlink);
6365 __put_user(host_st->st_uid, &target_st->st_uid);
6366 __put_user(host_st->st_gid, &target_st->st_gid);
6367 __put_user(host_st->st_rdev, &target_st->st_rdev);
6368 __put_user(host_st->st_size, &target_st->st_size);
6369 __put_user(host_st->st_blksize, &target_st->st_blksize);
6370 __put_user(host_st->st_blocks, &target_st->st_blocks);
6371 __put_user(host_st->st_atime, &target_st->target_st_atime);
6372 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6373 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6374 unlock_user_struct(target_st, target_addr, 1);
6375 } else
6376 #endif
6378 #if defined(TARGET_HAS_STRUCT_STAT64)
6379 struct target_stat64 *target_st;
6380 #else
6381 struct target_stat *target_st;
6382 #endif
6384 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6385 return -TARGET_EFAULT;
6386 memset(target_st, 0, sizeof(*target_st));
6387 __put_user(host_st->st_dev, &target_st->st_dev);
6388 __put_user(host_st->st_ino, &target_st->st_ino);
6389 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6390 __put_user(host_st->st_ino, &target_st->__st_ino);
6391 #endif
6392 __put_user(host_st->st_mode, &target_st->st_mode);
6393 __put_user(host_st->st_nlink, &target_st->st_nlink);
6394 __put_user(host_st->st_uid, &target_st->st_uid);
6395 __put_user(host_st->st_gid, &target_st->st_gid);
6396 __put_user(host_st->st_rdev, &target_st->st_rdev);
6397 /* XXX: better use of kernel struct */
6398 __put_user(host_st->st_size, &target_st->st_size);
6399 __put_user(host_st->st_blksize, &target_st->st_blksize);
6400 __put_user(host_st->st_blocks, &target_st->st_blocks);
6401 __put_user(host_st->st_atime, &target_st->target_st_atime);
6402 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6403 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6404 unlock_user_struct(target_st, target_addr, 1);
6407 return 0;
6409 #endif
6411 /* ??? Using host futex calls even when target atomic operations
6412 are not really atomic probably breaks things. However implementing
6413 futexes locally would make futexes shared between multiple processes
6414 tricky. However they're probably useless because guest atomic
6415 operations won't work either. */
6416 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6417 target_ulong uaddr2, int val3)
6419 struct timespec ts, *pts;
6420 int base_op;
6422 /* ??? We assume FUTEX_* constants are the same on both host
6423 and target. */
6424 #ifdef FUTEX_CMD_MASK
6425 base_op = op & FUTEX_CMD_MASK;
6426 #else
6427 base_op = op;
6428 #endif
6429 switch (base_op) {
6430 case FUTEX_WAIT:
6431 case FUTEX_WAIT_BITSET:
6432 if (timeout) {
6433 pts = &ts;
6434 target_to_host_timespec(pts, timeout);
6435 } else {
6436 pts = NULL;
6438 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6439 pts, NULL, val3));
6440 case FUTEX_WAKE:
6441 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6442 case FUTEX_FD:
6443 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6444 case FUTEX_REQUEUE:
6445 case FUTEX_CMP_REQUEUE:
6446 case FUTEX_WAKE_OP:
6447 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6448 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6449 But the prototype takes a `struct timespec *'; insert casts
6450 to satisfy the compiler. We do not need to tswap TIMEOUT
6451 since it's not compared to guest memory. */
6452 pts = (struct timespec *)(uintptr_t) timeout;
6453 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6454 g2h(uaddr2),
6455 (base_op == FUTEX_CMP_REQUEUE
6456 ? tswap32(val3)
6457 : val3)));
6458 default:
6459 return -TARGET_ENOSYS;
6462 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6463 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6464 abi_long handle, abi_long mount_id,
6465 abi_long flags)
6467 struct file_handle *target_fh;
6468 struct file_handle *fh;
6469 int mid = 0;
6470 abi_long ret;
6471 char *name;
6472 unsigned int size, total_size;
6474 if (get_user_s32(size, handle)) {
6475 return -TARGET_EFAULT;
6478 name = lock_user_string(pathname);
6479 if (!name) {
6480 return -TARGET_EFAULT;
6483 total_size = sizeof(struct file_handle) + size;
6484 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6485 if (!target_fh) {
6486 unlock_user(name, pathname, 0);
6487 return -TARGET_EFAULT;
6490 fh = g_malloc0(total_size);
6491 fh->handle_bytes = size;
6493 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6494 unlock_user(name, pathname, 0);
6496 /* man name_to_handle_at(2):
6497 * Other than the use of the handle_bytes field, the caller should treat
6498 * the file_handle structure as an opaque data type
6501 memcpy(target_fh, fh, total_size);
6502 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6503 target_fh->handle_type = tswap32(fh->handle_type);
6504 g_free(fh);
6505 unlock_user(target_fh, handle, total_size);
6507 if (put_user_s32(mid, mount_id)) {
6508 return -TARGET_EFAULT;
6511 return ret;
6514 #endif
6516 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6517 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6518 abi_long flags)
6520 struct file_handle *target_fh;
6521 struct file_handle *fh;
6522 unsigned int size, total_size;
6523 abi_long ret;
6525 if (get_user_s32(size, handle)) {
6526 return -TARGET_EFAULT;
6529 total_size = sizeof(struct file_handle) + size;
6530 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6531 if (!target_fh) {
6532 return -TARGET_EFAULT;
6535 fh = g_memdup(target_fh, total_size);
6536 fh->handle_bytes = size;
6537 fh->handle_type = tswap32(target_fh->handle_type);
6539 ret = get_errno(open_by_handle_at(mount_fd, fh,
6540 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6542 g_free(fh);
6544 unlock_user(target_fh, handle, total_size);
6546 return ret;
6548 #endif
6550 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6552 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6554 int host_flags;
6555 target_sigset_t *target_mask;
6556 sigset_t host_mask;
6557 abi_long ret;
6559 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6560 return -TARGET_EINVAL;
6562 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6563 return -TARGET_EFAULT;
6566 target_to_host_sigset(&host_mask, target_mask);
6568 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6570 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6571 if (ret >= 0) {
6572 fd_trans_register(ret, &target_signalfd_trans);
6575 unlock_user_struct(target_mask, mask, 0);
6577 return ret;
6579 #endif
6581 /* Map host to target signal numbers for the wait family of syscalls.
6582 Assume all other status bits are the same. */
6583 int host_to_target_waitstatus(int status)
6585 if (WIFSIGNALED(status)) {
6586 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
6588 if (WIFSTOPPED(status)) {
6589 return (host_to_target_signal(WSTOPSIG(status)) << 8)
6590 | (status & 0xff);
6592 return status;
6595 static int open_self_cmdline(void *cpu_env, int fd)
6597 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6598 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
6599 int i;
6601 for (i = 0; i < bprm->argc; i++) {
6602 size_t len = strlen(bprm->argv[i]) + 1;
6604 if (write(fd, bprm->argv[i], len) != len) {
6605 return -1;
6609 return 0;
6612 static int open_self_maps(void *cpu_env, int fd)
6614 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6615 TaskState *ts = cpu->opaque;
6616 FILE *fp;
6617 char *line = NULL;
6618 size_t len = 0;
6619 ssize_t read;
6621 fp = fopen("/proc/self/maps", "r");
6622 if (fp == NULL) {
6623 return -1;
6626 while ((read = getline(&line, &len, fp)) != -1) {
6627 int fields, dev_maj, dev_min, inode;
6628 uint64_t min, max, offset;
6629 char flag_r, flag_w, flag_x, flag_p;
6630 char path[512] = "";
6631 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
6632 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
6633 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
6635 if ((fields < 10) || (fields > 11)) {
6636 continue;
6638 if (h2g_valid(min)) {
6639 int flags = page_get_flags(h2g(min));
6640 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
6641 if (page_check_range(h2g(min), max - min, flags) == -1) {
6642 continue;
6644 if (h2g(min) == ts->info->stack_limit) {
6645 pstrcpy(path, sizeof(path), " [stack]");
6647 dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
6648 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
6649 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
6650 flag_x, flag_p, offset, dev_maj, dev_min, inode,
6651 path[0] ? " " : "", path);
6655 free(line);
6656 fclose(fp);
6658 return 0;
6661 static int open_self_stat(void *cpu_env, int fd)
6663 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6664 TaskState *ts = cpu->opaque;
6665 abi_ulong start_stack = ts->info->start_stack;
6666 int i;
6668 for (i = 0; i < 44; i++) {
6669 char buf[128];
6670 int len;
6671 uint64_t val = 0;
6673 if (i == 0) {
6674 /* pid */
6675 val = getpid();
6676 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6677 } else if (i == 1) {
6678 /* app name */
6679 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
6680 } else if (i == 27) {
6681 /* stack bottom */
6682 val = start_stack;
6683 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
6684 } else {
6685 /* for the rest, there is MasterCard */
6686 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
6689 len = strlen(buf);
6690 if (write(fd, buf, len) != len) {
6691 return -1;
6695 return 0;
6698 static int open_self_auxv(void *cpu_env, int fd)
6700 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
6701 TaskState *ts = cpu->opaque;
6702 abi_ulong auxv = ts->info->saved_auxv;
6703 abi_ulong len = ts->info->auxv_len;
6704 char *ptr;
6707 * Auxiliary vector is stored in target process stack.
6708 * read in whole auxv vector and copy it to file
6710 ptr = lock_user(VERIFY_READ, auxv, len, 0);
6711 if (ptr != NULL) {
6712 while (len > 0) {
6713 ssize_t r;
6714 r = write(fd, ptr, len);
6715 if (r <= 0) {
6716 break;
6718 len -= r;
6719 ptr += r;
6721 lseek(fd, 0, SEEK_SET);
6722 unlock_user(ptr, auxv, len);
6725 return 0;
6728 static int is_proc_myself(const char *filename, const char *entry)
6730 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
6731 filename += strlen("/proc/");
6732 if (!strncmp(filename, "self/", strlen("self/"))) {
6733 filename += strlen("self/");
6734 } else if (*filename >= '1' && *filename <= '9') {
6735 char myself[80];
6736 snprintf(myself, sizeof(myself), "%d/", getpid());
6737 if (!strncmp(filename, myself, strlen(myself))) {
6738 filename += strlen(myself);
6739 } else {
6740 return 0;
6742 } else {
6743 return 0;
6745 if (!strcmp(filename, entry)) {
6746 return 1;
6749 return 0;
6752 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6753 static int is_proc(const char *filename, const char *entry)
6755 return strcmp(filename, entry) == 0;
6758 static int open_net_route(void *cpu_env, int fd)
6760 FILE *fp;
6761 char *line = NULL;
6762 size_t len = 0;
6763 ssize_t read;
6765 fp = fopen("/proc/net/route", "r");
6766 if (fp == NULL) {
6767 return -1;
6770 /* read header */
6772 read = getline(&line, &len, fp);
6773 dprintf(fd, "%s", line);
6775 /* read routes */
6777 while ((read = getline(&line, &len, fp)) != -1) {
6778 char iface[16];
6779 uint32_t dest, gw, mask;
6780 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
6781 int fields;
6783 fields = sscanf(line,
6784 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6785 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
6786 &mask, &mtu, &window, &irtt);
6787 if (fields != 11) {
6788 continue;
6790 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
6791 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
6792 metric, tswap32(mask), mtu, window, irtt);
6795 free(line);
6796 fclose(fp);
6798 return 0;
6800 #endif
6802 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
6804 struct fake_open {
6805 const char *filename;
6806 int (*fill)(void *cpu_env, int fd);
6807 int (*cmp)(const char *s1, const char *s2);
6809 const struct fake_open *fake_open;
6810 static const struct fake_open fakes[] = {
6811 { "maps", open_self_maps, is_proc_myself },
6812 { "stat", open_self_stat, is_proc_myself },
6813 { "auxv", open_self_auxv, is_proc_myself },
6814 { "cmdline", open_self_cmdline, is_proc_myself },
6815 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
6816 { "/proc/net/route", open_net_route, is_proc },
6817 #endif
6818 { NULL, NULL, NULL }
6821 if (is_proc_myself(pathname, "exe")) {
6822 int execfd = qemu_getauxval(AT_EXECFD);
6823 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
6826 for (fake_open = fakes; fake_open->filename; fake_open++) {
6827 if (fake_open->cmp(pathname, fake_open->filename)) {
6828 break;
6832 if (fake_open->filename) {
6833 const char *tmpdir;
6834 char filename[PATH_MAX];
6835 int fd, r;
6837 /* create temporary file to map stat to */
6838 tmpdir = getenv("TMPDIR");
6839 if (!tmpdir)
6840 tmpdir = "/tmp";
6841 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
6842 fd = mkstemp(filename);
6843 if (fd < 0) {
6844 return fd;
6846 unlink(filename);
6848 if ((r = fake_open->fill(cpu_env, fd))) {
6849 int e = errno;
6850 close(fd);
6851 errno = e;
6852 return r;
6854 lseek(fd, 0, SEEK_SET);
6856 return fd;
6859 return safe_openat(dirfd, path(pathname), flags, mode);
6862 #define TIMER_MAGIC 0x0caf0000
6863 #define TIMER_MAGIC_MASK 0xffff0000
6865 /* Convert QEMU provided timer ID back to internal 16bit index format */
6866 static target_timer_t get_timer_id(abi_long arg)
6868 target_timer_t timerid = arg;
6870 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
6871 return -TARGET_EINVAL;
6874 timerid &= 0xffff;
6876 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
6877 return -TARGET_EINVAL;
6880 return timerid;
6883 static int target_to_host_cpu_mask(unsigned long *host_mask,
6884 size_t host_size,
6885 abi_ulong target_addr,
6886 size_t target_size)
6888 unsigned target_bits = sizeof(abi_ulong) * 8;
6889 unsigned host_bits = sizeof(*host_mask) * 8;
6890 abi_ulong *target_mask;
6891 unsigned i, j;
6893 assert(host_size >= target_size);
6895 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
6896 if (!target_mask) {
6897 return -TARGET_EFAULT;
6899 memset(host_mask, 0, host_size);
6901 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6902 unsigned bit = i * target_bits;
6903 abi_ulong val;
6905 __get_user(val, &target_mask[i]);
6906 for (j = 0; j < target_bits; j++, bit++) {
6907 if (val & (1UL << j)) {
6908 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
6913 unlock_user(target_mask, target_addr, 0);
6914 return 0;
6917 static int host_to_target_cpu_mask(const unsigned long *host_mask,
6918 size_t host_size,
6919 abi_ulong target_addr,
6920 size_t target_size)
6922 unsigned target_bits = sizeof(abi_ulong) * 8;
6923 unsigned host_bits = sizeof(*host_mask) * 8;
6924 abi_ulong *target_mask;
6925 unsigned i, j;
6927 assert(host_size >= target_size);
6929 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
6930 if (!target_mask) {
6931 return -TARGET_EFAULT;
6934 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
6935 unsigned bit = i * target_bits;
6936 abi_ulong val = 0;
6938 for (j = 0; j < target_bits; j++, bit++) {
6939 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
6940 val |= 1UL << j;
6943 __put_user(val, &target_mask[i]);
6946 unlock_user(target_mask, target_addr, target_size);
6947 return 0;
6950 /* This is an internal helper for do_syscall so that it is easier
6951 * to have a single return point, so that actions, such as logging
6952 * of syscall results, can be performed.
6953 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
6955 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
6956 abi_long arg2, abi_long arg3, abi_long arg4,
6957 abi_long arg5, abi_long arg6, abi_long arg7,
6958 abi_long arg8)
6960 CPUState *cpu = ENV_GET_CPU(cpu_env);
6961 abi_long ret;
6962 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
6963 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
6964 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
6965 struct stat st;
6966 #endif
6967 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
6968 || defined(TARGET_NR_fstatfs)
6969 struct statfs stfs;
6970 #endif
6971 void *p;
6973 switch(num) {
6974 case TARGET_NR_exit:
6975 /* In old applications this may be used to implement _exit(2).
6976 However in threaded applictions it is used for thread termination,
6977 and _exit_group is used for application termination.
6978 Do thread termination if we have more then one thread. */
6980 if (block_signals()) {
6981 return -TARGET_ERESTARTSYS;
6984 cpu_list_lock();
6986 if (CPU_NEXT(first_cpu)) {
6987 TaskState *ts;
6989 /* Remove the CPU from the list. */
6990 QTAILQ_REMOVE_RCU(&cpus, cpu, node);
6992 cpu_list_unlock();
6994 ts = cpu->opaque;
6995 if (ts->child_tidptr) {
6996 put_user_u32(0, ts->child_tidptr);
6997 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
6998 NULL, NULL, 0);
7000 thread_cpu = NULL;
7001 object_unref(OBJECT(cpu));
7002 g_free(ts);
7003 rcu_unregister_thread();
7004 pthread_exit(NULL);
7007 cpu_list_unlock();
7008 preexit_cleanup(cpu_env, arg1);
7009 _exit(arg1);
7010 return 0; /* avoid warning */
7011 case TARGET_NR_read:
7012 if (arg3 == 0) {
7013 return 0;
7014 } else {
7015 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7016 return -TARGET_EFAULT;
7017 ret = get_errno(safe_read(arg1, p, arg3));
7018 if (ret >= 0 &&
7019 fd_trans_host_to_target_data(arg1)) {
7020 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7022 unlock_user(p, arg2, ret);
7024 return ret;
7025 case TARGET_NR_write:
7026 if (arg2 == 0 && arg3 == 0) {
7027 return get_errno(safe_write(arg1, 0, 0));
7029 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7030 return -TARGET_EFAULT;
7031 if (fd_trans_target_to_host_data(arg1)) {
7032 void *copy = g_malloc(arg3);
7033 memcpy(copy, p, arg3);
7034 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7035 if (ret >= 0) {
7036 ret = get_errno(safe_write(arg1, copy, ret));
7038 g_free(copy);
7039 } else {
7040 ret = get_errno(safe_write(arg1, p, arg3));
7042 unlock_user(p, arg2, 0);
7043 return ret;
7045 #ifdef TARGET_NR_open
7046 case TARGET_NR_open:
7047 if (!(p = lock_user_string(arg1)))
7048 return -TARGET_EFAULT;
7049 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7050 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7051 arg3));
7052 fd_trans_unregister(ret);
7053 unlock_user(p, arg1, 0);
7054 return ret;
7055 #endif
7056 case TARGET_NR_openat:
7057 if (!(p = lock_user_string(arg2)))
7058 return -TARGET_EFAULT;
7059 ret = get_errno(do_openat(cpu_env, arg1, p,
7060 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7061 arg4));
7062 fd_trans_unregister(ret);
7063 unlock_user(p, arg2, 0);
7064 return ret;
7065 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7066 case TARGET_NR_name_to_handle_at:
7067 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7068 return ret;
7069 #endif
7070 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7071 case TARGET_NR_open_by_handle_at:
7072 ret = do_open_by_handle_at(arg1, arg2, arg3);
7073 fd_trans_unregister(ret);
7074 return ret;
7075 #endif
7076 case TARGET_NR_close:
7077 fd_trans_unregister(arg1);
7078 return get_errno(close(arg1));
7080 case TARGET_NR_brk:
7081 return do_brk(arg1);
7082 #ifdef TARGET_NR_fork
7083 case TARGET_NR_fork:
7084 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7085 #endif
7086 #ifdef TARGET_NR_waitpid
7087 case TARGET_NR_waitpid:
7089 int status;
7090 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7091 if (!is_error(ret) && arg2 && ret
7092 && put_user_s32(host_to_target_waitstatus(status), arg2))
7093 return -TARGET_EFAULT;
7095 return ret;
7096 #endif
7097 #ifdef TARGET_NR_waitid
7098 case TARGET_NR_waitid:
7100 siginfo_t info;
7101 info.si_pid = 0;
7102 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7103 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7104 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7105 return -TARGET_EFAULT;
7106 host_to_target_siginfo(p, &info);
7107 unlock_user(p, arg3, sizeof(target_siginfo_t));
7110 return ret;
7111 #endif
7112 #ifdef TARGET_NR_creat /* not on alpha */
7113 case TARGET_NR_creat:
7114 if (!(p = lock_user_string(arg1)))
7115 return -TARGET_EFAULT;
7116 ret = get_errno(creat(p, arg2));
7117 fd_trans_unregister(ret);
7118 unlock_user(p, arg1, 0);
7119 return ret;
7120 #endif
7121 #ifdef TARGET_NR_link
7122 case TARGET_NR_link:
7124 void * p2;
7125 p = lock_user_string(arg1);
7126 p2 = lock_user_string(arg2);
7127 if (!p || !p2)
7128 ret = -TARGET_EFAULT;
7129 else
7130 ret = get_errno(link(p, p2));
7131 unlock_user(p2, arg2, 0);
7132 unlock_user(p, arg1, 0);
7134 return ret;
7135 #endif
7136 #if defined(TARGET_NR_linkat)
7137 case TARGET_NR_linkat:
7139 void * p2 = NULL;
7140 if (!arg2 || !arg4)
7141 return -TARGET_EFAULT;
7142 p = lock_user_string(arg2);
7143 p2 = lock_user_string(arg4);
7144 if (!p || !p2)
7145 ret = -TARGET_EFAULT;
7146 else
7147 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7148 unlock_user(p, arg2, 0);
7149 unlock_user(p2, arg4, 0);
7151 return ret;
7152 #endif
7153 #ifdef TARGET_NR_unlink
7154 case TARGET_NR_unlink:
7155 if (!(p = lock_user_string(arg1)))
7156 return -TARGET_EFAULT;
7157 ret = get_errno(unlink(p));
7158 unlock_user(p, arg1, 0);
7159 return ret;
7160 #endif
7161 #if defined(TARGET_NR_unlinkat)
7162 case TARGET_NR_unlinkat:
7163 if (!(p = lock_user_string(arg2)))
7164 return -TARGET_EFAULT;
7165 ret = get_errno(unlinkat(arg1, p, arg3));
7166 unlock_user(p, arg2, 0);
7167 return ret;
7168 #endif
7169 case TARGET_NR_execve:
7171 char **argp, **envp;
7172 int argc, envc;
7173 abi_ulong gp;
7174 abi_ulong guest_argp;
7175 abi_ulong guest_envp;
7176 abi_ulong addr;
7177 char **q;
7178 int total_size = 0;
7180 argc = 0;
7181 guest_argp = arg2;
7182 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7183 if (get_user_ual(addr, gp))
7184 return -TARGET_EFAULT;
7185 if (!addr)
7186 break;
7187 argc++;
7189 envc = 0;
7190 guest_envp = arg3;
7191 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7192 if (get_user_ual(addr, gp))
7193 return -TARGET_EFAULT;
7194 if (!addr)
7195 break;
7196 envc++;
7199 argp = g_new0(char *, argc + 1);
7200 envp = g_new0(char *, envc + 1);
7202 for (gp = guest_argp, q = argp; gp;
7203 gp += sizeof(abi_ulong), q++) {
7204 if (get_user_ual(addr, gp))
7205 goto execve_efault;
7206 if (!addr)
7207 break;
7208 if (!(*q = lock_user_string(addr)))
7209 goto execve_efault;
7210 total_size += strlen(*q) + 1;
7212 *q = NULL;
7214 for (gp = guest_envp, q = envp; gp;
7215 gp += sizeof(abi_ulong), q++) {
7216 if (get_user_ual(addr, gp))
7217 goto execve_efault;
7218 if (!addr)
7219 break;
7220 if (!(*q = lock_user_string(addr)))
7221 goto execve_efault;
7222 total_size += strlen(*q) + 1;
7224 *q = NULL;
7226 if (!(p = lock_user_string(arg1)))
7227 goto execve_efault;
7228 /* Although execve() is not an interruptible syscall it is
7229 * a special case where we must use the safe_syscall wrapper:
7230 * if we allow a signal to happen before we make the host
7231 * syscall then we will 'lose' it, because at the point of
7232 * execve the process leaves QEMU's control. So we use the
7233 * safe syscall wrapper to ensure that we either take the
7234 * signal as a guest signal, or else it does not happen
7235 * before the execve completes and makes it the other
7236 * program's problem.
7238 ret = get_errno(safe_execve(p, argp, envp));
7239 unlock_user(p, arg1, 0);
7241 goto execve_end;
7243 execve_efault:
7244 ret = -TARGET_EFAULT;
7246 execve_end:
7247 for (gp = guest_argp, q = argp; *q;
7248 gp += sizeof(abi_ulong), q++) {
7249 if (get_user_ual(addr, gp)
7250 || !addr)
7251 break;
7252 unlock_user(*q, addr, 0);
7254 for (gp = guest_envp, q = envp; *q;
7255 gp += sizeof(abi_ulong), q++) {
7256 if (get_user_ual(addr, gp)
7257 || !addr)
7258 break;
7259 unlock_user(*q, addr, 0);
7262 g_free(argp);
7263 g_free(envp);
7265 return ret;
7266 case TARGET_NR_chdir:
7267 if (!(p = lock_user_string(arg1)))
7268 return -TARGET_EFAULT;
7269 ret = get_errno(chdir(p));
7270 unlock_user(p, arg1, 0);
7271 return ret;
7272 #ifdef TARGET_NR_time
7273 case TARGET_NR_time:
7275 time_t host_time;
7276 ret = get_errno(time(&host_time));
7277 if (!is_error(ret)
7278 && arg1
7279 && put_user_sal(host_time, arg1))
7280 return -TARGET_EFAULT;
7282 return ret;
7283 #endif
7284 #ifdef TARGET_NR_mknod
7285 case TARGET_NR_mknod:
7286 if (!(p = lock_user_string(arg1)))
7287 return -TARGET_EFAULT;
7288 ret = get_errno(mknod(p, arg2, arg3));
7289 unlock_user(p, arg1, 0);
7290 return ret;
7291 #endif
7292 #if defined(TARGET_NR_mknodat)
7293 case TARGET_NR_mknodat:
7294 if (!(p = lock_user_string(arg2)))
7295 return -TARGET_EFAULT;
7296 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7297 unlock_user(p, arg2, 0);
7298 return ret;
7299 #endif
7300 #ifdef TARGET_NR_chmod
7301 case TARGET_NR_chmod:
7302 if (!(p = lock_user_string(arg1)))
7303 return -TARGET_EFAULT;
7304 ret = get_errno(chmod(p, arg2));
7305 unlock_user(p, arg1, 0);
7306 return ret;
7307 #endif
7308 #ifdef TARGET_NR_lseek
7309 case TARGET_NR_lseek:
7310 return get_errno(lseek(arg1, arg2, arg3));
7311 #endif
7312 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7313 /* Alpha specific */
7314 case TARGET_NR_getxpid:
7315 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7316 return get_errno(getpid());
7317 #endif
7318 #ifdef TARGET_NR_getpid
7319 case TARGET_NR_getpid:
7320 return get_errno(getpid());
7321 #endif
7322 case TARGET_NR_mount:
7324 /* need to look at the data field */
7325 void *p2, *p3;
7327 if (arg1) {
7328 p = lock_user_string(arg1);
7329 if (!p) {
7330 return -TARGET_EFAULT;
7332 } else {
7333 p = NULL;
7336 p2 = lock_user_string(arg2);
7337 if (!p2) {
7338 if (arg1) {
7339 unlock_user(p, arg1, 0);
7341 return -TARGET_EFAULT;
7344 if (arg3) {
7345 p3 = lock_user_string(arg3);
7346 if (!p3) {
7347 if (arg1) {
7348 unlock_user(p, arg1, 0);
7350 unlock_user(p2, arg2, 0);
7351 return -TARGET_EFAULT;
7353 } else {
7354 p3 = NULL;
7357 /* FIXME - arg5 should be locked, but it isn't clear how to
7358 * do that since it's not guaranteed to be a NULL-terminated
7359 * string.
7361 if (!arg5) {
7362 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7363 } else {
7364 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7366 ret = get_errno(ret);
7368 if (arg1) {
7369 unlock_user(p, arg1, 0);
7371 unlock_user(p2, arg2, 0);
7372 if (arg3) {
7373 unlock_user(p3, arg3, 0);
7376 return ret;
7377 #ifdef TARGET_NR_umount
7378 case TARGET_NR_umount:
7379 if (!(p = lock_user_string(arg1)))
7380 return -TARGET_EFAULT;
7381 ret = get_errno(umount(p));
7382 unlock_user(p, arg1, 0);
7383 return ret;
7384 #endif
7385 #ifdef TARGET_NR_stime /* not on alpha */
7386 case TARGET_NR_stime:
7388 time_t host_time;
7389 if (get_user_sal(host_time, arg1))
7390 return -TARGET_EFAULT;
7391 return get_errno(stime(&host_time));
7393 #endif
7394 #ifdef TARGET_NR_alarm /* not on alpha */
7395 case TARGET_NR_alarm:
7396 return alarm(arg1);
7397 #endif
7398 #ifdef TARGET_NR_pause /* not on alpha */
7399 case TARGET_NR_pause:
7400 if (!block_signals()) {
7401 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7403 return -TARGET_EINTR;
7404 #endif
7405 #ifdef TARGET_NR_utime
7406 case TARGET_NR_utime:
7408 struct utimbuf tbuf, *host_tbuf;
7409 struct target_utimbuf *target_tbuf;
7410 if (arg2) {
7411 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7412 return -TARGET_EFAULT;
7413 tbuf.actime = tswapal(target_tbuf->actime);
7414 tbuf.modtime = tswapal(target_tbuf->modtime);
7415 unlock_user_struct(target_tbuf, arg2, 0);
7416 host_tbuf = &tbuf;
7417 } else {
7418 host_tbuf = NULL;
7420 if (!(p = lock_user_string(arg1)))
7421 return -TARGET_EFAULT;
7422 ret = get_errno(utime(p, host_tbuf));
7423 unlock_user(p, arg1, 0);
7425 return ret;
7426 #endif
7427 #ifdef TARGET_NR_utimes
7428 case TARGET_NR_utimes:
7430 struct timeval *tvp, tv[2];
7431 if (arg2) {
7432 if (copy_from_user_timeval(&tv[0], arg2)
7433 || copy_from_user_timeval(&tv[1],
7434 arg2 + sizeof(struct target_timeval)))
7435 return -TARGET_EFAULT;
7436 tvp = tv;
7437 } else {
7438 tvp = NULL;
7440 if (!(p = lock_user_string(arg1)))
7441 return -TARGET_EFAULT;
7442 ret = get_errno(utimes(p, tvp));
7443 unlock_user(p, arg1, 0);
7445 return ret;
7446 #endif
7447 #if defined(TARGET_NR_futimesat)
7448 case TARGET_NR_futimesat:
7450 struct timeval *tvp, tv[2];
7451 if (arg3) {
7452 if (copy_from_user_timeval(&tv[0], arg3)
7453 || copy_from_user_timeval(&tv[1],
7454 arg3 + sizeof(struct target_timeval)))
7455 return -TARGET_EFAULT;
7456 tvp = tv;
7457 } else {
7458 tvp = NULL;
7460 if (!(p = lock_user_string(arg2))) {
7461 return -TARGET_EFAULT;
7463 ret = get_errno(futimesat(arg1, path(p), tvp));
7464 unlock_user(p, arg2, 0);
7466 return ret;
7467 #endif
7468 #ifdef TARGET_NR_access
7469 case TARGET_NR_access:
7470 if (!(p = lock_user_string(arg1))) {
7471 return -TARGET_EFAULT;
7473 ret = get_errno(access(path(p), arg2));
7474 unlock_user(p, arg1, 0);
7475 return ret;
7476 #endif
7477 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7478 case TARGET_NR_faccessat:
7479 if (!(p = lock_user_string(arg2))) {
7480 return -TARGET_EFAULT;
7482 ret = get_errno(faccessat(arg1, p, arg3, 0));
7483 unlock_user(p, arg2, 0);
7484 return ret;
7485 #endif
7486 #ifdef TARGET_NR_nice /* not on alpha */
7487 case TARGET_NR_nice:
7488 return get_errno(nice(arg1));
7489 #endif
7490 case TARGET_NR_sync:
7491 sync();
7492 return 0;
7493 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7494 case TARGET_NR_syncfs:
7495 return get_errno(syncfs(arg1));
7496 #endif
7497 case TARGET_NR_kill:
7498 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7499 #ifdef TARGET_NR_rename
7500 case TARGET_NR_rename:
7502 void *p2;
7503 p = lock_user_string(arg1);
7504 p2 = lock_user_string(arg2);
7505 if (!p || !p2)
7506 ret = -TARGET_EFAULT;
7507 else
7508 ret = get_errno(rename(p, p2));
7509 unlock_user(p2, arg2, 0);
7510 unlock_user(p, arg1, 0);
7512 return ret;
7513 #endif
7514 #if defined(TARGET_NR_renameat)
7515 case TARGET_NR_renameat:
7517 void *p2;
7518 p = lock_user_string(arg2);
7519 p2 = lock_user_string(arg4);
7520 if (!p || !p2)
7521 ret = -TARGET_EFAULT;
7522 else
7523 ret = get_errno(renameat(arg1, p, arg3, p2));
7524 unlock_user(p2, arg4, 0);
7525 unlock_user(p, arg2, 0);
7527 return ret;
7528 #endif
7529 #if defined(TARGET_NR_renameat2)
7530 case TARGET_NR_renameat2:
7532 void *p2;
7533 p = lock_user_string(arg2);
7534 p2 = lock_user_string(arg4);
7535 if (!p || !p2) {
7536 ret = -TARGET_EFAULT;
7537 } else {
7538 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7540 unlock_user(p2, arg4, 0);
7541 unlock_user(p, arg2, 0);
7543 return ret;
7544 #endif
7545 #ifdef TARGET_NR_mkdir
7546 case TARGET_NR_mkdir:
7547 if (!(p = lock_user_string(arg1)))
7548 return -TARGET_EFAULT;
7549 ret = get_errno(mkdir(p, arg2));
7550 unlock_user(p, arg1, 0);
7551 return ret;
7552 #endif
7553 #if defined(TARGET_NR_mkdirat)
7554 case TARGET_NR_mkdirat:
7555 if (!(p = lock_user_string(arg2)))
7556 return -TARGET_EFAULT;
7557 ret = get_errno(mkdirat(arg1, p, arg3));
7558 unlock_user(p, arg2, 0);
7559 return ret;
7560 #endif
7561 #ifdef TARGET_NR_rmdir
7562 case TARGET_NR_rmdir:
7563 if (!(p = lock_user_string(arg1)))
7564 return -TARGET_EFAULT;
7565 ret = get_errno(rmdir(p));
7566 unlock_user(p, arg1, 0);
7567 return ret;
7568 #endif
7569 case TARGET_NR_dup:
7570 ret = get_errno(dup(arg1));
7571 if (ret >= 0) {
7572 fd_trans_dup(arg1, ret);
7574 return ret;
7575 #ifdef TARGET_NR_pipe
7576 case TARGET_NR_pipe:
7577 return do_pipe(cpu_env, arg1, 0, 0);
7578 #endif
7579 #ifdef TARGET_NR_pipe2
7580 case TARGET_NR_pipe2:
7581 return do_pipe(cpu_env, arg1,
7582 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7583 #endif
7584 case TARGET_NR_times:
7586 struct target_tms *tmsp;
7587 struct tms tms;
7588 ret = get_errno(times(&tms));
7589 if (arg1) {
7590 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
7591 if (!tmsp)
7592 return -TARGET_EFAULT;
7593 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
7594 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
7595 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
7596 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
7598 if (!is_error(ret))
7599 ret = host_to_target_clock_t(ret);
7601 return ret;
7602 case TARGET_NR_acct:
7603 if (arg1 == 0) {
7604 ret = get_errno(acct(NULL));
7605 } else {
7606 if (!(p = lock_user_string(arg1))) {
7607 return -TARGET_EFAULT;
7609 ret = get_errno(acct(path(p)));
7610 unlock_user(p, arg1, 0);
7612 return ret;
7613 #ifdef TARGET_NR_umount2
7614 case TARGET_NR_umount2:
7615 if (!(p = lock_user_string(arg1)))
7616 return -TARGET_EFAULT;
7617 ret = get_errno(umount2(p, arg2));
7618 unlock_user(p, arg1, 0);
7619 return ret;
7620 #endif
7621 case TARGET_NR_ioctl:
7622 return do_ioctl(arg1, arg2, arg3);
7623 #ifdef TARGET_NR_fcntl
7624 case TARGET_NR_fcntl:
7625 return do_fcntl(arg1, arg2, arg3);
7626 #endif
7627 case TARGET_NR_setpgid:
7628 return get_errno(setpgid(arg1, arg2));
7629 case TARGET_NR_umask:
7630 return get_errno(umask(arg1));
7631 case TARGET_NR_chroot:
7632 if (!(p = lock_user_string(arg1)))
7633 return -TARGET_EFAULT;
7634 ret = get_errno(chroot(p));
7635 unlock_user(p, arg1, 0);
7636 return ret;
7637 #ifdef TARGET_NR_dup2
7638 case TARGET_NR_dup2:
7639 ret = get_errno(dup2(arg1, arg2));
7640 if (ret >= 0) {
7641 fd_trans_dup(arg1, arg2);
7643 return ret;
7644 #endif
7645 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
7646 case TARGET_NR_dup3:
7648 int host_flags;
7650 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
7651 return -EINVAL;
7653 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
7654 ret = get_errno(dup3(arg1, arg2, host_flags));
7655 if (ret >= 0) {
7656 fd_trans_dup(arg1, arg2);
7658 return ret;
7660 #endif
7661 #ifdef TARGET_NR_getppid /* not on alpha */
7662 case TARGET_NR_getppid:
7663 return get_errno(getppid());
7664 #endif
7665 #ifdef TARGET_NR_getpgrp
7666 case TARGET_NR_getpgrp:
7667 return get_errno(getpgrp());
7668 #endif
7669 case TARGET_NR_setsid:
7670 return get_errno(setsid());
7671 #ifdef TARGET_NR_sigaction
7672 case TARGET_NR_sigaction:
7674 #if defined(TARGET_ALPHA)
7675 struct target_sigaction act, oact, *pact = 0;
7676 struct target_old_sigaction *old_act;
7677 if (arg2) {
7678 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7679 return -TARGET_EFAULT;
7680 act._sa_handler = old_act->_sa_handler;
7681 target_siginitset(&act.sa_mask, old_act->sa_mask);
7682 act.sa_flags = old_act->sa_flags;
7683 act.sa_restorer = 0;
7684 unlock_user_struct(old_act, arg2, 0);
7685 pact = &act;
7687 ret = get_errno(do_sigaction(arg1, pact, &oact));
7688 if (!is_error(ret) && arg3) {
7689 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7690 return -TARGET_EFAULT;
7691 old_act->_sa_handler = oact._sa_handler;
7692 old_act->sa_mask = oact.sa_mask.sig[0];
7693 old_act->sa_flags = oact.sa_flags;
7694 unlock_user_struct(old_act, arg3, 1);
7696 #elif defined(TARGET_MIPS)
7697 struct target_sigaction act, oact, *pact, *old_act;
7699 if (arg2) {
7700 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7701 return -TARGET_EFAULT;
7702 act._sa_handler = old_act->_sa_handler;
7703 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
7704 act.sa_flags = old_act->sa_flags;
7705 unlock_user_struct(old_act, arg2, 0);
7706 pact = &act;
7707 } else {
7708 pact = NULL;
7711 ret = get_errno(do_sigaction(arg1, pact, &oact));
7713 if (!is_error(ret) && arg3) {
7714 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7715 return -TARGET_EFAULT;
7716 old_act->_sa_handler = oact._sa_handler;
7717 old_act->sa_flags = oact.sa_flags;
7718 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
7719 old_act->sa_mask.sig[1] = 0;
7720 old_act->sa_mask.sig[2] = 0;
7721 old_act->sa_mask.sig[3] = 0;
7722 unlock_user_struct(old_act, arg3, 1);
7724 #else
7725 struct target_old_sigaction *old_act;
7726 struct target_sigaction act, oact, *pact;
7727 if (arg2) {
7728 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
7729 return -TARGET_EFAULT;
7730 act._sa_handler = old_act->_sa_handler;
7731 target_siginitset(&act.sa_mask, old_act->sa_mask);
7732 act.sa_flags = old_act->sa_flags;
7733 act.sa_restorer = old_act->sa_restorer;
7734 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7735 act.ka_restorer = 0;
7736 #endif
7737 unlock_user_struct(old_act, arg2, 0);
7738 pact = &act;
7739 } else {
7740 pact = NULL;
7742 ret = get_errno(do_sigaction(arg1, pact, &oact));
7743 if (!is_error(ret) && arg3) {
7744 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
7745 return -TARGET_EFAULT;
7746 old_act->_sa_handler = oact._sa_handler;
7747 old_act->sa_mask = oact.sa_mask.sig[0];
7748 old_act->sa_flags = oact.sa_flags;
7749 old_act->sa_restorer = oact.sa_restorer;
7750 unlock_user_struct(old_act, arg3, 1);
7752 #endif
7754 return ret;
7755 #endif
7756 case TARGET_NR_rt_sigaction:
7758 #if defined(TARGET_ALPHA)
7759 /* For Alpha and SPARC this is a 5 argument syscall, with
7760 * a 'restorer' parameter which must be copied into the
7761 * sa_restorer field of the sigaction struct.
7762 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
7763 * and arg5 is the sigsetsize.
7764 * Alpha also has a separate rt_sigaction struct that it uses
7765 * here; SPARC uses the usual sigaction struct.
7767 struct target_rt_sigaction *rt_act;
7768 struct target_sigaction act, oact, *pact = 0;
7770 if (arg4 != sizeof(target_sigset_t)) {
7771 return -TARGET_EINVAL;
7773 if (arg2) {
7774 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
7775 return -TARGET_EFAULT;
7776 act._sa_handler = rt_act->_sa_handler;
7777 act.sa_mask = rt_act->sa_mask;
7778 act.sa_flags = rt_act->sa_flags;
7779 act.sa_restorer = arg5;
7780 unlock_user_struct(rt_act, arg2, 0);
7781 pact = &act;
7783 ret = get_errno(do_sigaction(arg1, pact, &oact));
7784 if (!is_error(ret) && arg3) {
7785 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
7786 return -TARGET_EFAULT;
7787 rt_act->_sa_handler = oact._sa_handler;
7788 rt_act->sa_mask = oact.sa_mask;
7789 rt_act->sa_flags = oact.sa_flags;
7790 unlock_user_struct(rt_act, arg3, 1);
7792 #else
7793 #ifdef TARGET_SPARC
7794 target_ulong restorer = arg4;
7795 target_ulong sigsetsize = arg5;
7796 #else
7797 target_ulong sigsetsize = arg4;
7798 #endif
7799 struct target_sigaction *act;
7800 struct target_sigaction *oact;
7802 if (sigsetsize != sizeof(target_sigset_t)) {
7803 return -TARGET_EINVAL;
7805 if (arg2) {
7806 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
7807 return -TARGET_EFAULT;
7809 #ifdef TARGET_ARCH_HAS_KA_RESTORER
7810 act->ka_restorer = restorer;
7811 #endif
7812 } else {
7813 act = NULL;
7815 if (arg3) {
7816 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
7817 ret = -TARGET_EFAULT;
7818 goto rt_sigaction_fail;
7820 } else
7821 oact = NULL;
7822 ret = get_errno(do_sigaction(arg1, act, oact));
7823 rt_sigaction_fail:
7824 if (act)
7825 unlock_user_struct(act, arg2, 0);
7826 if (oact)
7827 unlock_user_struct(oact, arg3, 1);
7828 #endif
7830 return ret;
7831 #ifdef TARGET_NR_sgetmask /* not on alpha */
7832 case TARGET_NR_sgetmask:
7834 sigset_t cur_set;
7835 abi_ulong target_set;
7836 ret = do_sigprocmask(0, NULL, &cur_set);
7837 if (!ret) {
7838 host_to_target_old_sigset(&target_set, &cur_set);
7839 ret = target_set;
7842 return ret;
7843 #endif
7844 #ifdef TARGET_NR_ssetmask /* not on alpha */
7845 case TARGET_NR_ssetmask:
7847 sigset_t set, oset;
7848 abi_ulong target_set = arg1;
7849 target_to_host_old_sigset(&set, &target_set);
7850 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
7851 if (!ret) {
7852 host_to_target_old_sigset(&target_set, &oset);
7853 ret = target_set;
7856 return ret;
7857 #endif
7858 #ifdef TARGET_NR_sigprocmask
7859 case TARGET_NR_sigprocmask:
7861 #if defined(TARGET_ALPHA)
7862 sigset_t set, oldset;
7863 abi_ulong mask;
7864 int how;
7866 switch (arg1) {
7867 case TARGET_SIG_BLOCK:
7868 how = SIG_BLOCK;
7869 break;
7870 case TARGET_SIG_UNBLOCK:
7871 how = SIG_UNBLOCK;
7872 break;
7873 case TARGET_SIG_SETMASK:
7874 how = SIG_SETMASK;
7875 break;
7876 default:
7877 return -TARGET_EINVAL;
7879 mask = arg2;
7880 target_to_host_old_sigset(&set, &mask);
7882 ret = do_sigprocmask(how, &set, &oldset);
7883 if (!is_error(ret)) {
7884 host_to_target_old_sigset(&mask, &oldset);
7885 ret = mask;
7886 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
7888 #else
7889 sigset_t set, oldset, *set_ptr;
7890 int how;
7892 if (arg2) {
7893 switch (arg1) {
7894 case TARGET_SIG_BLOCK:
7895 how = SIG_BLOCK;
7896 break;
7897 case TARGET_SIG_UNBLOCK:
7898 how = SIG_UNBLOCK;
7899 break;
7900 case TARGET_SIG_SETMASK:
7901 how = SIG_SETMASK;
7902 break;
7903 default:
7904 return -TARGET_EINVAL;
7906 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7907 return -TARGET_EFAULT;
7908 target_to_host_old_sigset(&set, p);
7909 unlock_user(p, arg2, 0);
7910 set_ptr = &set;
7911 } else {
7912 how = 0;
7913 set_ptr = NULL;
7915 ret = do_sigprocmask(how, set_ptr, &oldset);
7916 if (!is_error(ret) && arg3) {
7917 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7918 return -TARGET_EFAULT;
7919 host_to_target_old_sigset(p, &oldset);
7920 unlock_user(p, arg3, sizeof(target_sigset_t));
7922 #endif
7924 return ret;
7925 #endif
7926 case TARGET_NR_rt_sigprocmask:
7928 int how = arg1;
7929 sigset_t set, oldset, *set_ptr;
7931 if (arg4 != sizeof(target_sigset_t)) {
7932 return -TARGET_EINVAL;
7935 if (arg2) {
7936 switch(how) {
7937 case TARGET_SIG_BLOCK:
7938 how = SIG_BLOCK;
7939 break;
7940 case TARGET_SIG_UNBLOCK:
7941 how = SIG_UNBLOCK;
7942 break;
7943 case TARGET_SIG_SETMASK:
7944 how = SIG_SETMASK;
7945 break;
7946 default:
7947 return -TARGET_EINVAL;
7949 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
7950 return -TARGET_EFAULT;
7951 target_to_host_sigset(&set, p);
7952 unlock_user(p, arg2, 0);
7953 set_ptr = &set;
7954 } else {
7955 how = 0;
7956 set_ptr = NULL;
7958 ret = do_sigprocmask(how, set_ptr, &oldset);
7959 if (!is_error(ret) && arg3) {
7960 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
7961 return -TARGET_EFAULT;
7962 host_to_target_sigset(p, &oldset);
7963 unlock_user(p, arg3, sizeof(target_sigset_t));
7966 return ret;
7967 #ifdef TARGET_NR_sigpending
7968 case TARGET_NR_sigpending:
7970 sigset_t set;
7971 ret = get_errno(sigpending(&set));
7972 if (!is_error(ret)) {
7973 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7974 return -TARGET_EFAULT;
7975 host_to_target_old_sigset(p, &set);
7976 unlock_user(p, arg1, sizeof(target_sigset_t));
7979 return ret;
7980 #endif
7981 case TARGET_NR_rt_sigpending:
7983 sigset_t set;
7985 /* Yes, this check is >, not != like most. We follow the kernel's
7986 * logic and it does it like this because it implements
7987 * NR_sigpending through the same code path, and in that case
7988 * the old_sigset_t is smaller in size.
7990 if (arg2 > sizeof(target_sigset_t)) {
7991 return -TARGET_EINVAL;
7994 ret = get_errno(sigpending(&set));
7995 if (!is_error(ret)) {
7996 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
7997 return -TARGET_EFAULT;
7998 host_to_target_sigset(p, &set);
7999 unlock_user(p, arg1, sizeof(target_sigset_t));
8002 return ret;
8003 #ifdef TARGET_NR_sigsuspend
8004 case TARGET_NR_sigsuspend:
8006 TaskState *ts = cpu->opaque;
8007 #if defined(TARGET_ALPHA)
8008 abi_ulong mask = arg1;
8009 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8010 #else
8011 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8012 return -TARGET_EFAULT;
8013 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8014 unlock_user(p, arg1, 0);
8015 #endif
8016 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8017 SIGSET_T_SIZE));
8018 if (ret != -TARGET_ERESTARTSYS) {
8019 ts->in_sigsuspend = 1;
8022 return ret;
8023 #endif
8024 case TARGET_NR_rt_sigsuspend:
8026 TaskState *ts = cpu->opaque;
8028 if (arg2 != sizeof(target_sigset_t)) {
8029 return -TARGET_EINVAL;
8031 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8032 return -TARGET_EFAULT;
8033 target_to_host_sigset(&ts->sigsuspend_mask, p);
8034 unlock_user(p, arg1, 0);
8035 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8036 SIGSET_T_SIZE));
8037 if (ret != -TARGET_ERESTARTSYS) {
8038 ts->in_sigsuspend = 1;
8041 return ret;
8042 case TARGET_NR_rt_sigtimedwait:
8044 sigset_t set;
8045 struct timespec uts, *puts;
8046 siginfo_t uinfo;
8048 if (arg4 != sizeof(target_sigset_t)) {
8049 return -TARGET_EINVAL;
8052 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8053 return -TARGET_EFAULT;
8054 target_to_host_sigset(&set, p);
8055 unlock_user(p, arg1, 0);
8056 if (arg3) {
8057 puts = &uts;
8058 target_to_host_timespec(puts, arg3);
8059 } else {
8060 puts = NULL;
8062 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8063 SIGSET_T_SIZE));
8064 if (!is_error(ret)) {
8065 if (arg2) {
8066 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8068 if (!p) {
8069 return -TARGET_EFAULT;
8071 host_to_target_siginfo(p, &uinfo);
8072 unlock_user(p, arg2, sizeof(target_siginfo_t));
8074 ret = host_to_target_signal(ret);
8077 return ret;
8078 case TARGET_NR_rt_sigqueueinfo:
8080 siginfo_t uinfo;
8082 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8083 if (!p) {
8084 return -TARGET_EFAULT;
8086 target_to_host_siginfo(&uinfo, p);
8087 unlock_user(p, arg3, 0);
8088 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8090 return ret;
8091 case TARGET_NR_rt_tgsigqueueinfo:
8093 siginfo_t uinfo;
8095 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8096 if (!p) {
8097 return -TARGET_EFAULT;
8099 target_to_host_siginfo(&uinfo, p);
8100 unlock_user(p, arg4, 0);
8101 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8103 return ret;
8104 #ifdef TARGET_NR_sigreturn
8105 case TARGET_NR_sigreturn:
8106 if (block_signals()) {
8107 return -TARGET_ERESTARTSYS;
8109 return do_sigreturn(cpu_env);
8110 #endif
8111 case TARGET_NR_rt_sigreturn:
8112 if (block_signals()) {
8113 return -TARGET_ERESTARTSYS;
8115 return do_rt_sigreturn(cpu_env);
8116 case TARGET_NR_sethostname:
8117 if (!(p = lock_user_string(arg1)))
8118 return -TARGET_EFAULT;
8119 ret = get_errno(sethostname(p, arg2));
8120 unlock_user(p, arg1, 0);
8121 return ret;
8122 #ifdef TARGET_NR_setrlimit
8123 case TARGET_NR_setrlimit:
8125 int resource = target_to_host_resource(arg1);
8126 struct target_rlimit *target_rlim;
8127 struct rlimit rlim;
8128 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8129 return -TARGET_EFAULT;
8130 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8131 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8132 unlock_user_struct(target_rlim, arg2, 0);
8134 * If we just passed through resource limit settings for memory then
8135 * they would also apply to QEMU's own allocations, and QEMU will
8136 * crash or hang or die if its allocations fail. Ideally we would
8137 * track the guest allocations in QEMU and apply the limits ourselves.
8138 * For now, just tell the guest the call succeeded but don't actually
8139 * limit anything.
8141 if (resource != RLIMIT_AS &&
8142 resource != RLIMIT_DATA &&
8143 resource != RLIMIT_STACK) {
8144 return get_errno(setrlimit(resource, &rlim));
8145 } else {
8146 return 0;
8149 #endif
8150 #ifdef TARGET_NR_getrlimit
8151 case TARGET_NR_getrlimit:
8153 int resource = target_to_host_resource(arg1);
8154 struct target_rlimit *target_rlim;
8155 struct rlimit rlim;
8157 ret = get_errno(getrlimit(resource, &rlim));
8158 if (!is_error(ret)) {
8159 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8160 return -TARGET_EFAULT;
8161 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8162 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8163 unlock_user_struct(target_rlim, arg2, 1);
8166 return ret;
8167 #endif
8168 case TARGET_NR_getrusage:
8170 struct rusage rusage;
8171 ret = get_errno(getrusage(arg1, &rusage));
8172 if (!is_error(ret)) {
8173 ret = host_to_target_rusage(arg2, &rusage);
8176 return ret;
8177 case TARGET_NR_gettimeofday:
8179 struct timeval tv;
8180 ret = get_errno(gettimeofday(&tv, NULL));
8181 if (!is_error(ret)) {
8182 if (copy_to_user_timeval(arg1, &tv))
8183 return -TARGET_EFAULT;
8186 return ret;
8187 case TARGET_NR_settimeofday:
8189 struct timeval tv, *ptv = NULL;
8190 struct timezone tz, *ptz = NULL;
8192 if (arg1) {
8193 if (copy_from_user_timeval(&tv, arg1)) {
8194 return -TARGET_EFAULT;
8196 ptv = &tv;
8199 if (arg2) {
8200 if (copy_from_user_timezone(&tz, arg2)) {
8201 return -TARGET_EFAULT;
8203 ptz = &tz;
8206 return get_errno(settimeofday(ptv, ptz));
8208 #if defined(TARGET_NR_select)
8209 case TARGET_NR_select:
8210 #if defined(TARGET_WANT_NI_OLD_SELECT)
8211 /* some architectures used to have old_select here
8212 * but now ENOSYS it.
8214 ret = -TARGET_ENOSYS;
8215 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8216 ret = do_old_select(arg1);
8217 #else
8218 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8219 #endif
8220 return ret;
8221 #endif
8222 #ifdef TARGET_NR_pselect6
8223 case TARGET_NR_pselect6:
8225 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8226 fd_set rfds, wfds, efds;
8227 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8228 struct timespec ts, *ts_ptr;
8231 * The 6th arg is actually two args smashed together,
8232 * so we cannot use the C library.
8234 sigset_t set;
8235 struct {
8236 sigset_t *set;
8237 size_t size;
8238 } sig, *sig_ptr;
8240 abi_ulong arg_sigset, arg_sigsize, *arg7;
8241 target_sigset_t *target_sigset;
8243 n = arg1;
8244 rfd_addr = arg2;
8245 wfd_addr = arg3;
8246 efd_addr = arg4;
8247 ts_addr = arg5;
8249 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8250 if (ret) {
8251 return ret;
8253 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8254 if (ret) {
8255 return ret;
8257 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8258 if (ret) {
8259 return ret;
8263 * This takes a timespec, and not a timeval, so we cannot
8264 * use the do_select() helper ...
8266 if (ts_addr) {
8267 if (target_to_host_timespec(&ts, ts_addr)) {
8268 return -TARGET_EFAULT;
8270 ts_ptr = &ts;
8271 } else {
8272 ts_ptr = NULL;
8275 /* Extract the two packed args for the sigset */
8276 if (arg6) {
8277 sig_ptr = &sig;
8278 sig.size = SIGSET_T_SIZE;
8280 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8281 if (!arg7) {
8282 return -TARGET_EFAULT;
8284 arg_sigset = tswapal(arg7[0]);
8285 arg_sigsize = tswapal(arg7[1]);
8286 unlock_user(arg7, arg6, 0);
8288 if (arg_sigset) {
8289 sig.set = &set;
8290 if (arg_sigsize != sizeof(*target_sigset)) {
8291 /* Like the kernel, we enforce correct size sigsets */
8292 return -TARGET_EINVAL;
8294 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8295 sizeof(*target_sigset), 1);
8296 if (!target_sigset) {
8297 return -TARGET_EFAULT;
8299 target_to_host_sigset(&set, target_sigset);
8300 unlock_user(target_sigset, arg_sigset, 0);
8301 } else {
8302 sig.set = NULL;
8304 } else {
8305 sig_ptr = NULL;
8308 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8309 ts_ptr, sig_ptr));
8311 if (!is_error(ret)) {
8312 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8313 return -TARGET_EFAULT;
8314 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8315 return -TARGET_EFAULT;
8316 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8317 return -TARGET_EFAULT;
8319 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8320 return -TARGET_EFAULT;
8323 return ret;
8324 #endif
8325 #ifdef TARGET_NR_symlink
8326 case TARGET_NR_symlink:
8328 void *p2;
8329 p = lock_user_string(arg1);
8330 p2 = lock_user_string(arg2);
8331 if (!p || !p2)
8332 ret = -TARGET_EFAULT;
8333 else
8334 ret = get_errno(symlink(p, p2));
8335 unlock_user(p2, arg2, 0);
8336 unlock_user(p, arg1, 0);
8338 return ret;
8339 #endif
8340 #if defined(TARGET_NR_symlinkat)
8341 case TARGET_NR_symlinkat:
8343 void *p2;
8344 p = lock_user_string(arg1);
8345 p2 = lock_user_string(arg3);
8346 if (!p || !p2)
8347 ret = -TARGET_EFAULT;
8348 else
8349 ret = get_errno(symlinkat(p, arg2, p2));
8350 unlock_user(p2, arg3, 0);
8351 unlock_user(p, arg1, 0);
8353 return ret;
8354 #endif
8355 #ifdef TARGET_NR_readlink
8356 case TARGET_NR_readlink:
8358 void *p2;
8359 p = lock_user_string(arg1);
8360 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8361 if (!p || !p2) {
8362 ret = -TARGET_EFAULT;
8363 } else if (!arg3) {
8364 /* Short circuit this for the magic exe check. */
8365 ret = -TARGET_EINVAL;
8366 } else if (is_proc_myself((const char *)p, "exe")) {
8367 char real[PATH_MAX], *temp;
8368 temp = realpath(exec_path, real);
8369 /* Return value is # of bytes that we wrote to the buffer. */
8370 if (temp == NULL) {
8371 ret = get_errno(-1);
8372 } else {
8373 /* Don't worry about sign mismatch as earlier mapping
8374 * logic would have thrown a bad address error. */
8375 ret = MIN(strlen(real), arg3);
8376 /* We cannot NUL terminate the string. */
8377 memcpy(p2, real, ret);
8379 } else {
8380 ret = get_errno(readlink(path(p), p2, arg3));
8382 unlock_user(p2, arg2, ret);
8383 unlock_user(p, arg1, 0);
8385 return ret;
8386 #endif
8387 #if defined(TARGET_NR_readlinkat)
8388 case TARGET_NR_readlinkat:
8390 void *p2;
8391 p = lock_user_string(arg2);
8392 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8393 if (!p || !p2) {
8394 ret = -TARGET_EFAULT;
8395 } else if (is_proc_myself((const char *)p, "exe")) {
8396 char real[PATH_MAX], *temp;
8397 temp = realpath(exec_path, real);
8398 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8399 snprintf((char *)p2, arg4, "%s", real);
8400 } else {
8401 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8403 unlock_user(p2, arg3, ret);
8404 unlock_user(p, arg2, 0);
8406 return ret;
8407 #endif
8408 #ifdef TARGET_NR_swapon
8409 case TARGET_NR_swapon:
8410 if (!(p = lock_user_string(arg1)))
8411 return -TARGET_EFAULT;
8412 ret = get_errno(swapon(p, arg2));
8413 unlock_user(p, arg1, 0);
8414 return ret;
8415 #endif
8416 case TARGET_NR_reboot:
8417 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8418 /* arg4 must be ignored in all other cases */
8419 p = lock_user_string(arg4);
8420 if (!p) {
8421 return -TARGET_EFAULT;
8423 ret = get_errno(reboot(arg1, arg2, arg3, p));
8424 unlock_user(p, arg4, 0);
8425 } else {
8426 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8428 return ret;
8429 #ifdef TARGET_NR_mmap
8430 case TARGET_NR_mmap:
8431 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8432 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8433 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8434 || defined(TARGET_S390X)
8436 abi_ulong *v;
8437 abi_ulong v1, v2, v3, v4, v5, v6;
8438 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8439 return -TARGET_EFAULT;
8440 v1 = tswapal(v[0]);
8441 v2 = tswapal(v[1]);
8442 v3 = tswapal(v[2]);
8443 v4 = tswapal(v[3]);
8444 v5 = tswapal(v[4]);
8445 v6 = tswapal(v[5]);
8446 unlock_user(v, arg1, 0);
8447 ret = get_errno(target_mmap(v1, v2, v3,
8448 target_to_host_bitmask(v4, mmap_flags_tbl),
8449 v5, v6));
8451 #else
8452 ret = get_errno(target_mmap(arg1, arg2, arg3,
8453 target_to_host_bitmask(arg4, mmap_flags_tbl),
8454 arg5,
8455 arg6));
8456 #endif
8457 return ret;
8458 #endif
8459 #ifdef TARGET_NR_mmap2
8460 case TARGET_NR_mmap2:
8461 #ifndef MMAP_SHIFT
8462 #define MMAP_SHIFT 12
8463 #endif
8464 ret = target_mmap(arg1, arg2, arg3,
8465 target_to_host_bitmask(arg4, mmap_flags_tbl),
8466 arg5, arg6 << MMAP_SHIFT);
8467 return get_errno(ret);
8468 #endif
8469 case TARGET_NR_munmap:
8470 return get_errno(target_munmap(arg1, arg2));
8471 case TARGET_NR_mprotect:
8473 TaskState *ts = cpu->opaque;
8474 /* Special hack to detect libc making the stack executable. */
8475 if ((arg3 & PROT_GROWSDOWN)
8476 && arg1 >= ts->info->stack_limit
8477 && arg1 <= ts->info->start_stack) {
8478 arg3 &= ~PROT_GROWSDOWN;
8479 arg2 = arg2 + arg1 - ts->info->stack_limit;
8480 arg1 = ts->info->stack_limit;
8483 return get_errno(target_mprotect(arg1, arg2, arg3));
8484 #ifdef TARGET_NR_mremap
8485 case TARGET_NR_mremap:
8486 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8487 #endif
8488 /* ??? msync/mlock/munlock are broken for softmmu. */
8489 #ifdef TARGET_NR_msync
8490 case TARGET_NR_msync:
8491 return get_errno(msync(g2h(arg1), arg2, arg3));
8492 #endif
8493 #ifdef TARGET_NR_mlock
8494 case TARGET_NR_mlock:
8495 return get_errno(mlock(g2h(arg1), arg2));
8496 #endif
8497 #ifdef TARGET_NR_munlock
8498 case TARGET_NR_munlock:
8499 return get_errno(munlock(g2h(arg1), arg2));
8500 #endif
8501 #ifdef TARGET_NR_mlockall
8502 case TARGET_NR_mlockall:
8503 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8504 #endif
8505 #ifdef TARGET_NR_munlockall
8506 case TARGET_NR_munlockall:
8507 return get_errno(munlockall());
8508 #endif
8509 #ifdef TARGET_NR_truncate
8510 case TARGET_NR_truncate:
8511 if (!(p = lock_user_string(arg1)))
8512 return -TARGET_EFAULT;
8513 ret = get_errno(truncate(p, arg2));
8514 unlock_user(p, arg1, 0);
8515 return ret;
8516 #endif
8517 #ifdef TARGET_NR_ftruncate
8518 case TARGET_NR_ftruncate:
8519 return get_errno(ftruncate(arg1, arg2));
8520 #endif
8521 case TARGET_NR_fchmod:
8522 return get_errno(fchmod(arg1, arg2));
8523 #if defined(TARGET_NR_fchmodat)
8524 case TARGET_NR_fchmodat:
8525 if (!(p = lock_user_string(arg2)))
8526 return -TARGET_EFAULT;
8527 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8528 unlock_user(p, arg2, 0);
8529 return ret;
8530 #endif
8531 case TARGET_NR_getpriority:
8532 /* Note that negative values are valid for getpriority, so we must
8533 differentiate based on errno settings. */
8534 errno = 0;
8535 ret = getpriority(arg1, arg2);
8536 if (ret == -1 && errno != 0) {
8537 return -host_to_target_errno(errno);
8539 #ifdef TARGET_ALPHA
8540 /* Return value is the unbiased priority. Signal no error. */
8541 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8542 #else
8543 /* Return value is a biased priority to avoid negative numbers. */
8544 ret = 20 - ret;
8545 #endif
8546 return ret;
8547 case TARGET_NR_setpriority:
8548 return get_errno(setpriority(arg1, arg2, arg3));
8549 #ifdef TARGET_NR_statfs
8550 case TARGET_NR_statfs:
8551 if (!(p = lock_user_string(arg1))) {
8552 return -TARGET_EFAULT;
8554 ret = get_errno(statfs(path(p), &stfs));
8555 unlock_user(p, arg1, 0);
8556 convert_statfs:
8557 if (!is_error(ret)) {
8558 struct target_statfs *target_stfs;
8560 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8561 return -TARGET_EFAULT;
8562 __put_user(stfs.f_type, &target_stfs->f_type);
8563 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8564 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8565 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8566 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8567 __put_user(stfs.f_files, &target_stfs->f_files);
8568 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8569 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8570 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8571 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8572 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8573 #ifdef _STATFS_F_FLAGS
8574 __put_user(stfs.f_flags, &target_stfs->f_flags);
8575 #else
8576 __put_user(0, &target_stfs->f_flags);
8577 #endif
8578 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8579 unlock_user_struct(target_stfs, arg2, 1);
8581 return ret;
8582 #endif
8583 #ifdef TARGET_NR_fstatfs
8584 case TARGET_NR_fstatfs:
8585 ret = get_errno(fstatfs(arg1, &stfs));
8586 goto convert_statfs;
8587 #endif
8588 #ifdef TARGET_NR_statfs64
8589 case TARGET_NR_statfs64:
8590 if (!(p = lock_user_string(arg1))) {
8591 return -TARGET_EFAULT;
8593 ret = get_errno(statfs(path(p), &stfs));
8594 unlock_user(p, arg1, 0);
8595 convert_statfs64:
8596 if (!is_error(ret)) {
8597 struct target_statfs64 *target_stfs;
8599 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
8600 return -TARGET_EFAULT;
8601 __put_user(stfs.f_type, &target_stfs->f_type);
8602 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
8603 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
8604 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
8605 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
8606 __put_user(stfs.f_files, &target_stfs->f_files);
8607 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
8608 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
8609 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
8610 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
8611 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
8612 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
8613 unlock_user_struct(target_stfs, arg3, 1);
8615 return ret;
8616 case TARGET_NR_fstatfs64:
8617 ret = get_errno(fstatfs(arg1, &stfs));
8618 goto convert_statfs64;
8619 #endif
8620 #ifdef TARGET_NR_socketcall
8621 case TARGET_NR_socketcall:
8622 return do_socketcall(arg1, arg2);
8623 #endif
8624 #ifdef TARGET_NR_accept
8625 case TARGET_NR_accept:
8626 return do_accept4(arg1, arg2, arg3, 0);
8627 #endif
8628 #ifdef TARGET_NR_accept4
8629 case TARGET_NR_accept4:
8630 return do_accept4(arg1, arg2, arg3, arg4);
8631 #endif
8632 #ifdef TARGET_NR_bind
8633 case TARGET_NR_bind:
8634 return do_bind(arg1, arg2, arg3);
8635 #endif
8636 #ifdef TARGET_NR_connect
8637 case TARGET_NR_connect:
8638 return do_connect(arg1, arg2, arg3);
8639 #endif
8640 #ifdef TARGET_NR_getpeername
8641 case TARGET_NR_getpeername:
8642 return do_getpeername(arg1, arg2, arg3);
8643 #endif
8644 #ifdef TARGET_NR_getsockname
8645 case TARGET_NR_getsockname:
8646 return do_getsockname(arg1, arg2, arg3);
8647 #endif
8648 #ifdef TARGET_NR_getsockopt
8649 case TARGET_NR_getsockopt:
8650 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
8651 #endif
8652 #ifdef TARGET_NR_listen
8653 case TARGET_NR_listen:
8654 return get_errno(listen(arg1, arg2));
8655 #endif
8656 #ifdef TARGET_NR_recv
8657 case TARGET_NR_recv:
8658 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
8659 #endif
8660 #ifdef TARGET_NR_recvfrom
8661 case TARGET_NR_recvfrom:
8662 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
8663 #endif
8664 #ifdef TARGET_NR_recvmsg
8665 case TARGET_NR_recvmsg:
8666 return do_sendrecvmsg(arg1, arg2, arg3, 0);
8667 #endif
8668 #ifdef TARGET_NR_send
8669 case TARGET_NR_send:
8670 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
8671 #endif
8672 #ifdef TARGET_NR_sendmsg
8673 case TARGET_NR_sendmsg:
8674 return do_sendrecvmsg(arg1, arg2, arg3, 1);
8675 #endif
8676 #ifdef TARGET_NR_sendmmsg
8677 case TARGET_NR_sendmmsg:
8678 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
8679 case TARGET_NR_recvmmsg:
8680 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
8681 #endif
8682 #ifdef TARGET_NR_sendto
8683 case TARGET_NR_sendto:
8684 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
8685 #endif
8686 #ifdef TARGET_NR_shutdown
8687 case TARGET_NR_shutdown:
8688 return get_errno(shutdown(arg1, arg2));
8689 #endif
8690 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
8691 case TARGET_NR_getrandom:
8692 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
8693 if (!p) {
8694 return -TARGET_EFAULT;
8696 ret = get_errno(getrandom(p, arg2, arg3));
8697 unlock_user(p, arg1, ret);
8698 return ret;
8699 #endif
8700 #ifdef TARGET_NR_socket
8701 case TARGET_NR_socket:
8702 return do_socket(arg1, arg2, arg3);
8703 #endif
8704 #ifdef TARGET_NR_socketpair
8705 case TARGET_NR_socketpair:
8706 return do_socketpair(arg1, arg2, arg3, arg4);
8707 #endif
8708 #ifdef TARGET_NR_setsockopt
8709 case TARGET_NR_setsockopt:
8710 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
8711 #endif
8712 #if defined(TARGET_NR_syslog)
8713 case TARGET_NR_syslog:
8715 int len = arg2;
8717 switch (arg1) {
8718 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
8719 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
8720 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
8721 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
8722 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
8723 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
8724 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
8725 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
8726 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
8727 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
8728 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
8729 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
8731 if (len < 0) {
8732 return -TARGET_EINVAL;
8734 if (len == 0) {
8735 return 0;
8737 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8738 if (!p) {
8739 return -TARGET_EFAULT;
8741 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
8742 unlock_user(p, arg2, arg3);
8744 return ret;
8745 default:
8746 return -TARGET_EINVAL;
8749 break;
8750 #endif
8751 case TARGET_NR_setitimer:
8753 struct itimerval value, ovalue, *pvalue;
8755 if (arg2) {
8756 pvalue = &value;
8757 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
8758 || copy_from_user_timeval(&pvalue->it_value,
8759 arg2 + sizeof(struct target_timeval)))
8760 return -TARGET_EFAULT;
8761 } else {
8762 pvalue = NULL;
8764 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
8765 if (!is_error(ret) && arg3) {
8766 if (copy_to_user_timeval(arg3,
8767 &ovalue.it_interval)
8768 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
8769 &ovalue.it_value))
8770 return -TARGET_EFAULT;
8773 return ret;
8774 case TARGET_NR_getitimer:
8776 struct itimerval value;
8778 ret = get_errno(getitimer(arg1, &value));
8779 if (!is_error(ret) && arg2) {
8780 if (copy_to_user_timeval(arg2,
8781 &value.it_interval)
8782 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
8783 &value.it_value))
8784 return -TARGET_EFAULT;
8787 return ret;
8788 #ifdef TARGET_NR_stat
8789 case TARGET_NR_stat:
8790 if (!(p = lock_user_string(arg1))) {
8791 return -TARGET_EFAULT;
8793 ret = get_errno(stat(path(p), &st));
8794 unlock_user(p, arg1, 0);
8795 goto do_stat;
8796 #endif
8797 #ifdef TARGET_NR_lstat
8798 case TARGET_NR_lstat:
8799 if (!(p = lock_user_string(arg1))) {
8800 return -TARGET_EFAULT;
8802 ret = get_errno(lstat(path(p), &st));
8803 unlock_user(p, arg1, 0);
8804 goto do_stat;
8805 #endif
8806 #ifdef TARGET_NR_fstat
8807 case TARGET_NR_fstat:
8809 ret = get_errno(fstat(arg1, &st));
8810 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
8811 do_stat:
8812 #endif
8813 if (!is_error(ret)) {
8814 struct target_stat *target_st;
8816 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
8817 return -TARGET_EFAULT;
8818 memset(target_st, 0, sizeof(*target_st));
8819 __put_user(st.st_dev, &target_st->st_dev);
8820 __put_user(st.st_ino, &target_st->st_ino);
8821 __put_user(st.st_mode, &target_st->st_mode);
8822 __put_user(st.st_uid, &target_st->st_uid);
8823 __put_user(st.st_gid, &target_st->st_gid);
8824 __put_user(st.st_nlink, &target_st->st_nlink);
8825 __put_user(st.st_rdev, &target_st->st_rdev);
8826 __put_user(st.st_size, &target_st->st_size);
8827 __put_user(st.st_blksize, &target_st->st_blksize);
8828 __put_user(st.st_blocks, &target_st->st_blocks);
8829 __put_user(st.st_atime, &target_st->target_st_atime);
8830 __put_user(st.st_mtime, &target_st->target_st_mtime);
8831 __put_user(st.st_ctime, &target_st->target_st_ctime);
8832 unlock_user_struct(target_st, arg2, 1);
8835 return ret;
8836 #endif
8837 case TARGET_NR_vhangup:
8838 return get_errno(vhangup());
8839 #ifdef TARGET_NR_syscall
8840 case TARGET_NR_syscall:
8841 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
8842 arg6, arg7, arg8, 0);
8843 #endif
8844 case TARGET_NR_wait4:
8846 int status;
8847 abi_long status_ptr = arg2;
8848 struct rusage rusage, *rusage_ptr;
8849 abi_ulong target_rusage = arg4;
8850 abi_long rusage_err;
8851 if (target_rusage)
8852 rusage_ptr = &rusage;
8853 else
8854 rusage_ptr = NULL;
8855 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
8856 if (!is_error(ret)) {
8857 if (status_ptr && ret) {
8858 status = host_to_target_waitstatus(status);
8859 if (put_user_s32(status, status_ptr))
8860 return -TARGET_EFAULT;
8862 if (target_rusage) {
8863 rusage_err = host_to_target_rusage(target_rusage, &rusage);
8864 if (rusage_err) {
8865 ret = rusage_err;
8870 return ret;
8871 #ifdef TARGET_NR_swapoff
8872 case TARGET_NR_swapoff:
8873 if (!(p = lock_user_string(arg1)))
8874 return -TARGET_EFAULT;
8875 ret = get_errno(swapoff(p));
8876 unlock_user(p, arg1, 0);
8877 return ret;
8878 #endif
8879 case TARGET_NR_sysinfo:
8881 struct target_sysinfo *target_value;
8882 struct sysinfo value;
8883 ret = get_errno(sysinfo(&value));
8884 if (!is_error(ret) && arg1)
8886 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
8887 return -TARGET_EFAULT;
8888 __put_user(value.uptime, &target_value->uptime);
8889 __put_user(value.loads[0], &target_value->loads[0]);
8890 __put_user(value.loads[1], &target_value->loads[1]);
8891 __put_user(value.loads[2], &target_value->loads[2]);
8892 __put_user(value.totalram, &target_value->totalram);
8893 __put_user(value.freeram, &target_value->freeram);
8894 __put_user(value.sharedram, &target_value->sharedram);
8895 __put_user(value.bufferram, &target_value->bufferram);
8896 __put_user(value.totalswap, &target_value->totalswap);
8897 __put_user(value.freeswap, &target_value->freeswap);
8898 __put_user(value.procs, &target_value->procs);
8899 __put_user(value.totalhigh, &target_value->totalhigh);
8900 __put_user(value.freehigh, &target_value->freehigh);
8901 __put_user(value.mem_unit, &target_value->mem_unit);
8902 unlock_user_struct(target_value, arg1, 1);
8905 return ret;
8906 #ifdef TARGET_NR_ipc
8907 case TARGET_NR_ipc:
8908 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
8909 #endif
8910 #ifdef TARGET_NR_semget
8911 case TARGET_NR_semget:
8912 return get_errno(semget(arg1, arg2, arg3));
8913 #endif
8914 #ifdef TARGET_NR_semop
8915 case TARGET_NR_semop:
8916 return do_semop(arg1, arg2, arg3);
8917 #endif
8918 #ifdef TARGET_NR_semctl
8919 case TARGET_NR_semctl:
8920 return do_semctl(arg1, arg2, arg3, arg4);
8921 #endif
8922 #ifdef TARGET_NR_msgctl
8923 case TARGET_NR_msgctl:
8924 return do_msgctl(arg1, arg2, arg3);
8925 #endif
8926 #ifdef TARGET_NR_msgget
8927 case TARGET_NR_msgget:
8928 return get_errno(msgget(arg1, arg2));
8929 #endif
8930 #ifdef TARGET_NR_msgrcv
8931 case TARGET_NR_msgrcv:
8932 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
8933 #endif
8934 #ifdef TARGET_NR_msgsnd
8935 case TARGET_NR_msgsnd:
8936 return do_msgsnd(arg1, arg2, arg3, arg4);
8937 #endif
8938 #ifdef TARGET_NR_shmget
8939 case TARGET_NR_shmget:
8940 return get_errno(shmget(arg1, arg2, arg3));
8941 #endif
8942 #ifdef TARGET_NR_shmctl
8943 case TARGET_NR_shmctl:
8944 return do_shmctl(arg1, arg2, arg3);
8945 #endif
8946 #ifdef TARGET_NR_shmat
8947 case TARGET_NR_shmat:
8948 return do_shmat(cpu_env, arg1, arg2, arg3);
8949 #endif
8950 #ifdef TARGET_NR_shmdt
8951 case TARGET_NR_shmdt:
8952 return do_shmdt(arg1);
8953 #endif
8954 case TARGET_NR_fsync:
8955 return get_errno(fsync(arg1));
8956 case TARGET_NR_clone:
8957 /* Linux manages to have three different orderings for its
8958 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
8959 * match the kernel's CONFIG_CLONE_* settings.
8960 * Microblaze is further special in that it uses a sixth
8961 * implicit argument to clone for the TLS pointer.
8963 #if defined(TARGET_MICROBLAZE)
8964 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
8965 #elif defined(TARGET_CLONE_BACKWARDS)
8966 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
8967 #elif defined(TARGET_CLONE_BACKWARDS2)
8968 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
8969 #else
8970 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
8971 #endif
8972 return ret;
8973 #ifdef __NR_exit_group
8974 /* new thread calls */
8975 case TARGET_NR_exit_group:
8976 preexit_cleanup(cpu_env, arg1);
8977 return get_errno(exit_group(arg1));
8978 #endif
8979 case TARGET_NR_setdomainname:
8980 if (!(p = lock_user_string(arg1)))
8981 return -TARGET_EFAULT;
8982 ret = get_errno(setdomainname(p, arg2));
8983 unlock_user(p, arg1, 0);
8984 return ret;
8985 case TARGET_NR_uname:
8986 /* no need to transcode because we use the linux syscall */
8988 struct new_utsname * buf;
8990 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
8991 return -TARGET_EFAULT;
8992 ret = get_errno(sys_uname(buf));
8993 if (!is_error(ret)) {
8994 /* Overwrite the native machine name with whatever is being
8995 emulated. */
8996 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
8997 sizeof(buf->machine));
8998 /* Allow the user to override the reported release. */
8999 if (qemu_uname_release && *qemu_uname_release) {
9000 g_strlcpy(buf->release, qemu_uname_release,
9001 sizeof(buf->release));
9004 unlock_user_struct(buf, arg1, 1);
9006 return ret;
9007 #ifdef TARGET_I386
9008 case TARGET_NR_modify_ldt:
9009 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9010 #if !defined(TARGET_X86_64)
9011 case TARGET_NR_vm86:
9012 return do_vm86(cpu_env, arg1, arg2);
9013 #endif
9014 #endif
9015 case TARGET_NR_adjtimex:
9017 struct timex host_buf;
9019 if (target_to_host_timex(&host_buf, arg1) != 0) {
9020 return -TARGET_EFAULT;
9022 ret = get_errno(adjtimex(&host_buf));
9023 if (!is_error(ret)) {
9024 if (host_to_target_timex(arg1, &host_buf) != 0) {
9025 return -TARGET_EFAULT;
9029 return ret;
9030 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9031 case TARGET_NR_clock_adjtime:
9033 struct timex htx, *phtx = &htx;
9035 if (target_to_host_timex(phtx, arg2) != 0) {
9036 return -TARGET_EFAULT;
9038 ret = get_errno(clock_adjtime(arg1, phtx));
9039 if (!is_error(ret) && phtx) {
9040 if (host_to_target_timex(arg2, phtx) != 0) {
9041 return -TARGET_EFAULT;
9045 return ret;
9046 #endif
9047 case TARGET_NR_getpgid:
9048 return get_errno(getpgid(arg1));
9049 case TARGET_NR_fchdir:
9050 return get_errno(fchdir(arg1));
9051 case TARGET_NR_personality:
9052 return get_errno(personality(arg1));
9053 #ifdef TARGET_NR__llseek /* Not on alpha */
9054 case TARGET_NR__llseek:
9056 int64_t res;
9057 #if !defined(__NR_llseek)
9058 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9059 if (res == -1) {
9060 ret = get_errno(res);
9061 } else {
9062 ret = 0;
9064 #else
9065 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9066 #endif
9067 if ((ret == 0) && put_user_s64(res, arg4)) {
9068 return -TARGET_EFAULT;
9071 return ret;
9072 #endif
9073 #ifdef TARGET_NR_getdents
9074 case TARGET_NR_getdents:
9075 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9076 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9078 struct target_dirent *target_dirp;
9079 struct linux_dirent *dirp;
9080 abi_long count = arg3;
9082 dirp = g_try_malloc(count);
9083 if (!dirp) {
9084 return -TARGET_ENOMEM;
9087 ret = get_errno(sys_getdents(arg1, dirp, count));
9088 if (!is_error(ret)) {
9089 struct linux_dirent *de;
9090 struct target_dirent *tde;
9091 int len = ret;
9092 int reclen, treclen;
9093 int count1, tnamelen;
9095 count1 = 0;
9096 de = dirp;
9097 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9098 return -TARGET_EFAULT;
9099 tde = target_dirp;
9100 while (len > 0) {
9101 reclen = de->d_reclen;
9102 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9103 assert(tnamelen >= 0);
9104 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9105 assert(count1 + treclen <= count);
9106 tde->d_reclen = tswap16(treclen);
9107 tde->d_ino = tswapal(de->d_ino);
9108 tde->d_off = tswapal(de->d_off);
9109 memcpy(tde->d_name, de->d_name, tnamelen);
9110 de = (struct linux_dirent *)((char *)de + reclen);
9111 len -= reclen;
9112 tde = (struct target_dirent *)((char *)tde + treclen);
9113 count1 += treclen;
9115 ret = count1;
9116 unlock_user(target_dirp, arg2, ret);
9118 g_free(dirp);
9120 #else
9122 struct linux_dirent *dirp;
9123 abi_long count = arg3;
9125 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9126 return -TARGET_EFAULT;
9127 ret = get_errno(sys_getdents(arg1, dirp, count));
9128 if (!is_error(ret)) {
9129 struct linux_dirent *de;
9130 int len = ret;
9131 int reclen;
9132 de = dirp;
9133 while (len > 0) {
9134 reclen = de->d_reclen;
9135 if (reclen > len)
9136 break;
9137 de->d_reclen = tswap16(reclen);
9138 tswapls(&de->d_ino);
9139 tswapls(&de->d_off);
9140 de = (struct linux_dirent *)((char *)de + reclen);
9141 len -= reclen;
9144 unlock_user(dirp, arg2, ret);
9146 #endif
9147 #else
9148 /* Implement getdents in terms of getdents64 */
9150 struct linux_dirent64 *dirp;
9151 abi_long count = arg3;
9153 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9154 if (!dirp) {
9155 return -TARGET_EFAULT;
9157 ret = get_errno(sys_getdents64(arg1, dirp, count));
9158 if (!is_error(ret)) {
9159 /* Convert the dirent64 structs to target dirent. We do this
9160 * in-place, since we can guarantee that a target_dirent is no
9161 * larger than a dirent64; however this means we have to be
9162 * careful to read everything before writing in the new format.
9164 struct linux_dirent64 *de;
9165 struct target_dirent *tde;
9166 int len = ret;
9167 int tlen = 0;
9169 de = dirp;
9170 tde = (struct target_dirent *)dirp;
9171 while (len > 0) {
9172 int namelen, treclen;
9173 int reclen = de->d_reclen;
9174 uint64_t ino = de->d_ino;
9175 int64_t off = de->d_off;
9176 uint8_t type = de->d_type;
9178 namelen = strlen(de->d_name);
9179 treclen = offsetof(struct target_dirent, d_name)
9180 + namelen + 2;
9181 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9183 memmove(tde->d_name, de->d_name, namelen + 1);
9184 tde->d_ino = tswapal(ino);
9185 tde->d_off = tswapal(off);
9186 tde->d_reclen = tswap16(treclen);
9187 /* The target_dirent type is in what was formerly a padding
9188 * byte at the end of the structure:
9190 *(((char *)tde) + treclen - 1) = type;
9192 de = (struct linux_dirent64 *)((char *)de + reclen);
9193 tde = (struct target_dirent *)((char *)tde + treclen);
9194 len -= reclen;
9195 tlen += treclen;
9197 ret = tlen;
9199 unlock_user(dirp, arg2, ret);
9201 #endif
9202 return ret;
9203 #endif /* TARGET_NR_getdents */
9204 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9205 case TARGET_NR_getdents64:
9207 struct linux_dirent64 *dirp;
9208 abi_long count = arg3;
9209 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9210 return -TARGET_EFAULT;
9211 ret = get_errno(sys_getdents64(arg1, dirp, count));
9212 if (!is_error(ret)) {
9213 struct linux_dirent64 *de;
9214 int len = ret;
9215 int reclen;
9216 de = dirp;
9217 while (len > 0) {
9218 reclen = de->d_reclen;
9219 if (reclen > len)
9220 break;
9221 de->d_reclen = tswap16(reclen);
9222 tswap64s((uint64_t *)&de->d_ino);
9223 tswap64s((uint64_t *)&de->d_off);
9224 de = (struct linux_dirent64 *)((char *)de + reclen);
9225 len -= reclen;
9228 unlock_user(dirp, arg2, ret);
9230 return ret;
9231 #endif /* TARGET_NR_getdents64 */
9232 #if defined(TARGET_NR__newselect)
9233 case TARGET_NR__newselect:
9234 return do_select(arg1, arg2, arg3, arg4, arg5);
9235 #endif
9236 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9237 # ifdef TARGET_NR_poll
9238 case TARGET_NR_poll:
9239 # endif
9240 # ifdef TARGET_NR_ppoll
9241 case TARGET_NR_ppoll:
9242 # endif
9244 struct target_pollfd *target_pfd;
9245 unsigned int nfds = arg2;
9246 struct pollfd *pfd;
9247 unsigned int i;
9249 pfd = NULL;
9250 target_pfd = NULL;
9251 if (nfds) {
9252 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9253 return -TARGET_EINVAL;
9256 target_pfd = lock_user(VERIFY_WRITE, arg1,
9257 sizeof(struct target_pollfd) * nfds, 1);
9258 if (!target_pfd) {
9259 return -TARGET_EFAULT;
9262 pfd = alloca(sizeof(struct pollfd) * nfds);
9263 for (i = 0; i < nfds; i++) {
9264 pfd[i].fd = tswap32(target_pfd[i].fd);
9265 pfd[i].events = tswap16(target_pfd[i].events);
9269 switch (num) {
9270 # ifdef TARGET_NR_ppoll
9271 case TARGET_NR_ppoll:
9273 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9274 target_sigset_t *target_set;
9275 sigset_t _set, *set = &_set;
9277 if (arg3) {
9278 if (target_to_host_timespec(timeout_ts, arg3)) {
9279 unlock_user(target_pfd, arg1, 0);
9280 return -TARGET_EFAULT;
9282 } else {
9283 timeout_ts = NULL;
9286 if (arg4) {
9287 if (arg5 != sizeof(target_sigset_t)) {
9288 unlock_user(target_pfd, arg1, 0);
9289 return -TARGET_EINVAL;
9292 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9293 if (!target_set) {
9294 unlock_user(target_pfd, arg1, 0);
9295 return -TARGET_EFAULT;
9297 target_to_host_sigset(set, target_set);
9298 } else {
9299 set = NULL;
9302 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9303 set, SIGSET_T_SIZE));
9305 if (!is_error(ret) && arg3) {
9306 host_to_target_timespec(arg3, timeout_ts);
9308 if (arg4) {
9309 unlock_user(target_set, arg4, 0);
9311 break;
9313 # endif
9314 # ifdef TARGET_NR_poll
9315 case TARGET_NR_poll:
9317 struct timespec ts, *pts;
9319 if (arg3 >= 0) {
9320 /* Convert ms to secs, ns */
9321 ts.tv_sec = arg3 / 1000;
9322 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9323 pts = &ts;
9324 } else {
9325 /* -ve poll() timeout means "infinite" */
9326 pts = NULL;
9328 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9329 break;
9331 # endif
9332 default:
9333 g_assert_not_reached();
9336 if (!is_error(ret)) {
9337 for(i = 0; i < nfds; i++) {
9338 target_pfd[i].revents = tswap16(pfd[i].revents);
9341 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9343 return ret;
9344 #endif
9345 case TARGET_NR_flock:
9346 /* NOTE: the flock constant seems to be the same for every
9347 Linux platform */
9348 return get_errno(safe_flock(arg1, arg2));
9349 case TARGET_NR_readv:
9351 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9352 if (vec != NULL) {
9353 ret = get_errno(safe_readv(arg1, vec, arg3));
9354 unlock_iovec(vec, arg2, arg3, 1);
9355 } else {
9356 ret = -host_to_target_errno(errno);
9359 return ret;
9360 case TARGET_NR_writev:
9362 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9363 if (vec != NULL) {
9364 ret = get_errno(safe_writev(arg1, vec, arg3));
9365 unlock_iovec(vec, arg2, arg3, 0);
9366 } else {
9367 ret = -host_to_target_errno(errno);
9370 return ret;
9371 #if defined(TARGET_NR_preadv)
9372 case TARGET_NR_preadv:
9374 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9375 if (vec != NULL) {
9376 unsigned long low, high;
9378 target_to_host_low_high(arg4, arg5, &low, &high);
9379 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9380 unlock_iovec(vec, arg2, arg3, 1);
9381 } else {
9382 ret = -host_to_target_errno(errno);
9385 return ret;
9386 #endif
9387 #if defined(TARGET_NR_pwritev)
9388 case TARGET_NR_pwritev:
9390 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9391 if (vec != NULL) {
9392 unsigned long low, high;
9394 target_to_host_low_high(arg4, arg5, &low, &high);
9395 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9396 unlock_iovec(vec, arg2, arg3, 0);
9397 } else {
9398 ret = -host_to_target_errno(errno);
9401 return ret;
9402 #endif
9403 case TARGET_NR_getsid:
9404 return get_errno(getsid(arg1));
9405 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9406 case TARGET_NR_fdatasync:
9407 return get_errno(fdatasync(arg1));
9408 #endif
9409 #ifdef TARGET_NR__sysctl
9410 case TARGET_NR__sysctl:
9411 /* We don't implement this, but ENOTDIR is always a safe
9412 return value. */
9413 return -TARGET_ENOTDIR;
9414 #endif
9415 case TARGET_NR_sched_getaffinity:
9417 unsigned int mask_size;
9418 unsigned long *mask;
9421 * sched_getaffinity needs multiples of ulong, so need to take
9422 * care of mismatches between target ulong and host ulong sizes.
9424 if (arg2 & (sizeof(abi_ulong) - 1)) {
9425 return -TARGET_EINVAL;
9427 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9429 mask = alloca(mask_size);
9430 memset(mask, 0, mask_size);
9431 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9433 if (!is_error(ret)) {
9434 if (ret > arg2) {
9435 /* More data returned than the caller's buffer will fit.
9436 * This only happens if sizeof(abi_long) < sizeof(long)
9437 * and the caller passed us a buffer holding an odd number
9438 * of abi_longs. If the host kernel is actually using the
9439 * extra 4 bytes then fail EINVAL; otherwise we can just
9440 * ignore them and only copy the interesting part.
9442 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9443 if (numcpus > arg2 * 8) {
9444 return -TARGET_EINVAL;
9446 ret = arg2;
9449 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9450 return -TARGET_EFAULT;
9454 return ret;
9455 case TARGET_NR_sched_setaffinity:
9457 unsigned int mask_size;
9458 unsigned long *mask;
9461 * sched_setaffinity needs multiples of ulong, so need to take
9462 * care of mismatches between target ulong and host ulong sizes.
9464 if (arg2 & (sizeof(abi_ulong) - 1)) {
9465 return -TARGET_EINVAL;
9467 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9468 mask = alloca(mask_size);
9470 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9471 if (ret) {
9472 return ret;
9475 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9477 case TARGET_NR_getcpu:
9479 unsigned cpu, node;
9480 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9481 arg2 ? &node : NULL,
9482 NULL));
9483 if (is_error(ret)) {
9484 return ret;
9486 if (arg1 && put_user_u32(cpu, arg1)) {
9487 return -TARGET_EFAULT;
9489 if (arg2 && put_user_u32(node, arg2)) {
9490 return -TARGET_EFAULT;
9493 return ret;
9494 case TARGET_NR_sched_setparam:
9496 struct sched_param *target_schp;
9497 struct sched_param schp;
9499 if (arg2 == 0) {
9500 return -TARGET_EINVAL;
9502 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9503 return -TARGET_EFAULT;
9504 schp.sched_priority = tswap32(target_schp->sched_priority);
9505 unlock_user_struct(target_schp, arg2, 0);
9506 return get_errno(sched_setparam(arg1, &schp));
9508 case TARGET_NR_sched_getparam:
9510 struct sched_param *target_schp;
9511 struct sched_param schp;
9513 if (arg2 == 0) {
9514 return -TARGET_EINVAL;
9516 ret = get_errno(sched_getparam(arg1, &schp));
9517 if (!is_error(ret)) {
9518 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9519 return -TARGET_EFAULT;
9520 target_schp->sched_priority = tswap32(schp.sched_priority);
9521 unlock_user_struct(target_schp, arg2, 1);
9524 return ret;
9525 case TARGET_NR_sched_setscheduler:
9527 struct sched_param *target_schp;
9528 struct sched_param schp;
9529 if (arg3 == 0) {
9530 return -TARGET_EINVAL;
9532 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9533 return -TARGET_EFAULT;
9534 schp.sched_priority = tswap32(target_schp->sched_priority);
9535 unlock_user_struct(target_schp, arg3, 0);
9536 return get_errno(sched_setscheduler(arg1, arg2, &schp));
9538 case TARGET_NR_sched_getscheduler:
9539 return get_errno(sched_getscheduler(arg1));
9540 case TARGET_NR_sched_yield:
9541 return get_errno(sched_yield());
9542 case TARGET_NR_sched_get_priority_max:
9543 return get_errno(sched_get_priority_max(arg1));
9544 case TARGET_NR_sched_get_priority_min:
9545 return get_errno(sched_get_priority_min(arg1));
9546 case TARGET_NR_sched_rr_get_interval:
9548 struct timespec ts;
9549 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9550 if (!is_error(ret)) {
9551 ret = host_to_target_timespec(arg2, &ts);
9554 return ret;
9555 case TARGET_NR_nanosleep:
9557 struct timespec req, rem;
9558 target_to_host_timespec(&req, arg1);
9559 ret = get_errno(safe_nanosleep(&req, &rem));
9560 if (is_error(ret) && arg2) {
9561 host_to_target_timespec(arg2, &rem);
9564 return ret;
9565 case TARGET_NR_prctl:
9566 switch (arg1) {
9567 case PR_GET_PDEATHSIG:
9569 int deathsig;
9570 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9571 if (!is_error(ret) && arg2
9572 && put_user_ual(deathsig, arg2)) {
9573 return -TARGET_EFAULT;
9575 return ret;
9577 #ifdef PR_GET_NAME
9578 case PR_GET_NAME:
9580 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
9581 if (!name) {
9582 return -TARGET_EFAULT;
9584 ret = get_errno(prctl(arg1, (unsigned long)name,
9585 arg3, arg4, arg5));
9586 unlock_user(name, arg2, 16);
9587 return ret;
9589 case PR_SET_NAME:
9591 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
9592 if (!name) {
9593 return -TARGET_EFAULT;
9595 ret = get_errno(prctl(arg1, (unsigned long)name,
9596 arg3, arg4, arg5));
9597 unlock_user(name, arg2, 0);
9598 return ret;
9600 #endif
9601 #ifdef TARGET_MIPS
9602 case TARGET_PR_GET_FP_MODE:
9604 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9605 ret = 0;
9606 if (env->CP0_Status & (1 << CP0St_FR)) {
9607 ret |= TARGET_PR_FP_MODE_FR;
9609 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
9610 ret |= TARGET_PR_FP_MODE_FRE;
9612 return ret;
9614 case TARGET_PR_SET_FP_MODE:
9616 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
9617 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
9618 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
9619 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
9620 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
9622 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
9623 TARGET_PR_FP_MODE_FRE;
9625 /* If nothing to change, return right away, successfully. */
9626 if (old_fr == new_fr && old_fre == new_fre) {
9627 return 0;
9629 /* Check the value is valid */
9630 if (arg2 & ~known_bits) {
9631 return -TARGET_EOPNOTSUPP;
9633 /* Setting FRE without FR is not supported. */
9634 if (new_fre && !new_fr) {
9635 return -TARGET_EOPNOTSUPP;
9637 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
9638 /* FR1 is not supported */
9639 return -TARGET_EOPNOTSUPP;
9641 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
9642 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
9643 /* cannot set FR=0 */
9644 return -TARGET_EOPNOTSUPP;
9646 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
9647 /* Cannot set FRE=1 */
9648 return -TARGET_EOPNOTSUPP;
9651 int i;
9652 fpr_t *fpr = env->active_fpu.fpr;
9653 for (i = 0; i < 32 ; i += 2) {
9654 if (!old_fr && new_fr) {
9655 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
9656 } else if (old_fr && !new_fr) {
9657 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
9661 if (new_fr) {
9662 env->CP0_Status |= (1 << CP0St_FR);
9663 env->hflags |= MIPS_HFLAG_F64;
9664 } else {
9665 env->CP0_Status &= ~(1 << CP0St_FR);
9666 env->hflags &= ~MIPS_HFLAG_F64;
9668 if (new_fre) {
9669 env->CP0_Config5 |= (1 << CP0C5_FRE);
9670 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
9671 env->hflags |= MIPS_HFLAG_FRE;
9673 } else {
9674 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
9675 env->hflags &= ~MIPS_HFLAG_FRE;
9678 return 0;
9680 #endif /* MIPS */
9681 #ifdef TARGET_AARCH64
9682 case TARGET_PR_SVE_SET_VL:
9684 * We cannot support either PR_SVE_SET_VL_ONEXEC or
9685 * PR_SVE_VL_INHERIT. Note the kernel definition
9686 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
9687 * even though the current architectural maximum is VQ=16.
9689 ret = -TARGET_EINVAL;
9690 if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
9691 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
9692 CPUARMState *env = cpu_env;
9693 ARMCPU *cpu = arm_env_get_cpu(env);
9694 uint32_t vq, old_vq;
9696 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
9697 vq = MAX(arg2 / 16, 1);
9698 vq = MIN(vq, cpu->sve_max_vq);
9700 if (vq < old_vq) {
9701 aarch64_sve_narrow_vq(env, vq);
9703 env->vfp.zcr_el[1] = vq - 1;
9704 ret = vq * 16;
9706 return ret;
9707 case TARGET_PR_SVE_GET_VL:
9708 ret = -TARGET_EINVAL;
9710 ARMCPU *cpu = arm_env_get_cpu(cpu_env);
9711 if (cpu_isar_feature(aa64_sve, cpu)) {
9712 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
9715 return ret;
9716 case TARGET_PR_PAC_RESET_KEYS:
9718 CPUARMState *env = cpu_env;
9719 ARMCPU *cpu = arm_env_get_cpu(env);
9721 if (arg3 || arg4 || arg5) {
9722 return -TARGET_EINVAL;
9724 if (cpu_isar_feature(aa64_pauth, cpu)) {
9725 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
9726 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
9727 TARGET_PR_PAC_APGAKEY);
9728 if (arg2 == 0) {
9729 arg2 = all;
9730 } else if (arg2 & ~all) {
9731 return -TARGET_EINVAL;
9733 if (arg2 & TARGET_PR_PAC_APIAKEY) {
9734 arm_init_pauth_key(&env->apia_key);
9736 if (arg2 & TARGET_PR_PAC_APIBKEY) {
9737 arm_init_pauth_key(&env->apib_key);
9739 if (arg2 & TARGET_PR_PAC_APDAKEY) {
9740 arm_init_pauth_key(&env->apda_key);
9742 if (arg2 & TARGET_PR_PAC_APDBKEY) {
9743 arm_init_pauth_key(&env->apdb_key);
9745 if (arg2 & TARGET_PR_PAC_APGAKEY) {
9746 arm_init_pauth_key(&env->apga_key);
9748 return 0;
9751 return -TARGET_EINVAL;
9752 #endif /* AARCH64 */
9753 case PR_GET_SECCOMP:
9754 case PR_SET_SECCOMP:
9755 /* Disable seccomp to prevent the target disabling syscalls we
9756 * need. */
9757 return -TARGET_EINVAL;
9758 default:
9759 /* Most prctl options have no pointer arguments */
9760 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
9762 break;
9763 #ifdef TARGET_NR_arch_prctl
9764 case TARGET_NR_arch_prctl:
9765 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9766 return do_arch_prctl(cpu_env, arg1, arg2);
9767 #else
9768 #error unreachable
9769 #endif
9770 #endif
9771 #ifdef TARGET_NR_pread64
9772 case TARGET_NR_pread64:
9773 if (regpairs_aligned(cpu_env, num)) {
9774 arg4 = arg5;
9775 arg5 = arg6;
9777 if (arg2 == 0 && arg3 == 0) {
9778 /* Special-case NULL buffer and zero length, which should succeed */
9779 p = 0;
9780 } else {
9781 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9782 if (!p) {
9783 return -TARGET_EFAULT;
9786 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
9787 unlock_user(p, arg2, ret);
9788 return ret;
9789 case TARGET_NR_pwrite64:
9790 if (regpairs_aligned(cpu_env, num)) {
9791 arg4 = arg5;
9792 arg5 = arg6;
9794 if (arg2 == 0 && arg3 == 0) {
9795 /* Special-case NULL buffer and zero length, which should succeed */
9796 p = 0;
9797 } else {
9798 p = lock_user(VERIFY_READ, arg2, arg3, 1);
9799 if (!p) {
9800 return -TARGET_EFAULT;
9803 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
9804 unlock_user(p, arg2, 0);
9805 return ret;
9806 #endif
9807 case TARGET_NR_getcwd:
9808 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
9809 return -TARGET_EFAULT;
9810 ret = get_errno(sys_getcwd1(p, arg2));
9811 unlock_user(p, arg1, ret);
9812 return ret;
9813 case TARGET_NR_capget:
9814 case TARGET_NR_capset:
9816 struct target_user_cap_header *target_header;
9817 struct target_user_cap_data *target_data = NULL;
9818 struct __user_cap_header_struct header;
9819 struct __user_cap_data_struct data[2];
9820 struct __user_cap_data_struct *dataptr = NULL;
9821 int i, target_datalen;
9822 int data_items = 1;
9824 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
9825 return -TARGET_EFAULT;
9827 header.version = tswap32(target_header->version);
9828 header.pid = tswap32(target_header->pid);
9830 if (header.version != _LINUX_CAPABILITY_VERSION) {
9831 /* Version 2 and up takes pointer to two user_data structs */
9832 data_items = 2;
9835 target_datalen = sizeof(*target_data) * data_items;
9837 if (arg2) {
9838 if (num == TARGET_NR_capget) {
9839 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
9840 } else {
9841 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
9843 if (!target_data) {
9844 unlock_user_struct(target_header, arg1, 0);
9845 return -TARGET_EFAULT;
9848 if (num == TARGET_NR_capset) {
9849 for (i = 0; i < data_items; i++) {
9850 data[i].effective = tswap32(target_data[i].effective);
9851 data[i].permitted = tswap32(target_data[i].permitted);
9852 data[i].inheritable = tswap32(target_data[i].inheritable);
9856 dataptr = data;
9859 if (num == TARGET_NR_capget) {
9860 ret = get_errno(capget(&header, dataptr));
9861 } else {
9862 ret = get_errno(capset(&header, dataptr));
9865 /* The kernel always updates version for both capget and capset */
9866 target_header->version = tswap32(header.version);
9867 unlock_user_struct(target_header, arg1, 1);
9869 if (arg2) {
9870 if (num == TARGET_NR_capget) {
9871 for (i = 0; i < data_items; i++) {
9872 target_data[i].effective = tswap32(data[i].effective);
9873 target_data[i].permitted = tswap32(data[i].permitted);
9874 target_data[i].inheritable = tswap32(data[i].inheritable);
9876 unlock_user(target_data, arg2, target_datalen);
9877 } else {
9878 unlock_user(target_data, arg2, 0);
9881 return ret;
9883 case TARGET_NR_sigaltstack:
9884 return do_sigaltstack(arg1, arg2,
9885 get_sp_from_cpustate((CPUArchState *)cpu_env));
9887 #ifdef CONFIG_SENDFILE
9888 #ifdef TARGET_NR_sendfile
9889 case TARGET_NR_sendfile:
9891 off_t *offp = NULL;
9892 off_t off;
9893 if (arg3) {
9894 ret = get_user_sal(off, arg3);
9895 if (is_error(ret)) {
9896 return ret;
9898 offp = &off;
9900 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9901 if (!is_error(ret) && arg3) {
9902 abi_long ret2 = put_user_sal(off, arg3);
9903 if (is_error(ret2)) {
9904 ret = ret2;
9907 return ret;
9909 #endif
9910 #ifdef TARGET_NR_sendfile64
9911 case TARGET_NR_sendfile64:
9913 off_t *offp = NULL;
9914 off_t off;
9915 if (arg3) {
9916 ret = get_user_s64(off, arg3);
9917 if (is_error(ret)) {
9918 return ret;
9920 offp = &off;
9922 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
9923 if (!is_error(ret) && arg3) {
9924 abi_long ret2 = put_user_s64(off, arg3);
9925 if (is_error(ret2)) {
9926 ret = ret2;
9929 return ret;
9931 #endif
9932 #endif
9933 #ifdef TARGET_NR_vfork
9934 case TARGET_NR_vfork:
9935 return get_errno(do_fork(cpu_env,
9936 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
9937 0, 0, 0, 0));
9938 #endif
9939 #ifdef TARGET_NR_ugetrlimit
9940 case TARGET_NR_ugetrlimit:
9942 struct rlimit rlim;
9943 int resource = target_to_host_resource(arg1);
9944 ret = get_errno(getrlimit(resource, &rlim));
9945 if (!is_error(ret)) {
9946 struct target_rlimit *target_rlim;
9947 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9948 return -TARGET_EFAULT;
9949 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9950 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9951 unlock_user_struct(target_rlim, arg2, 1);
9953 return ret;
9955 #endif
9956 #ifdef TARGET_NR_truncate64
9957 case TARGET_NR_truncate64:
9958 if (!(p = lock_user_string(arg1)))
9959 return -TARGET_EFAULT;
9960 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
9961 unlock_user(p, arg1, 0);
9962 return ret;
9963 #endif
9964 #ifdef TARGET_NR_ftruncate64
9965 case TARGET_NR_ftruncate64:
9966 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
9967 #endif
9968 #ifdef TARGET_NR_stat64
9969 case TARGET_NR_stat64:
9970 if (!(p = lock_user_string(arg1))) {
9971 return -TARGET_EFAULT;
9973 ret = get_errno(stat(path(p), &st));
9974 unlock_user(p, arg1, 0);
9975 if (!is_error(ret))
9976 ret = host_to_target_stat64(cpu_env, arg2, &st);
9977 return ret;
9978 #endif
9979 #ifdef TARGET_NR_lstat64
9980 case TARGET_NR_lstat64:
9981 if (!(p = lock_user_string(arg1))) {
9982 return -TARGET_EFAULT;
9984 ret = get_errno(lstat(path(p), &st));
9985 unlock_user(p, arg1, 0);
9986 if (!is_error(ret))
9987 ret = host_to_target_stat64(cpu_env, arg2, &st);
9988 return ret;
9989 #endif
9990 #ifdef TARGET_NR_fstat64
9991 case TARGET_NR_fstat64:
9992 ret = get_errno(fstat(arg1, &st));
9993 if (!is_error(ret))
9994 ret = host_to_target_stat64(cpu_env, arg2, &st);
9995 return ret;
9996 #endif
9997 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
9998 #ifdef TARGET_NR_fstatat64
9999 case TARGET_NR_fstatat64:
10000 #endif
10001 #ifdef TARGET_NR_newfstatat
10002 case TARGET_NR_newfstatat:
10003 #endif
10004 if (!(p = lock_user_string(arg2))) {
10005 return -TARGET_EFAULT;
10007 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10008 unlock_user(p, arg2, 0);
10009 if (!is_error(ret))
10010 ret = host_to_target_stat64(cpu_env, arg3, &st);
10011 return ret;
10012 #endif
10013 #ifdef TARGET_NR_lchown
10014 case TARGET_NR_lchown:
10015 if (!(p = lock_user_string(arg1)))
10016 return -TARGET_EFAULT;
10017 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10018 unlock_user(p, arg1, 0);
10019 return ret;
10020 #endif
10021 #ifdef TARGET_NR_getuid
10022 case TARGET_NR_getuid:
10023 return get_errno(high2lowuid(getuid()));
10024 #endif
10025 #ifdef TARGET_NR_getgid
10026 case TARGET_NR_getgid:
10027 return get_errno(high2lowgid(getgid()));
10028 #endif
10029 #ifdef TARGET_NR_geteuid
10030 case TARGET_NR_geteuid:
10031 return get_errno(high2lowuid(geteuid()));
10032 #endif
10033 #ifdef TARGET_NR_getegid
10034 case TARGET_NR_getegid:
10035 return get_errno(high2lowgid(getegid()));
10036 #endif
10037 case TARGET_NR_setreuid:
10038 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10039 case TARGET_NR_setregid:
10040 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10041 case TARGET_NR_getgroups:
10043 int gidsetsize = arg1;
10044 target_id *target_grouplist;
10045 gid_t *grouplist;
10046 int i;
10048 grouplist = alloca(gidsetsize * sizeof(gid_t));
10049 ret = get_errno(getgroups(gidsetsize, grouplist));
10050 if (gidsetsize == 0)
10051 return ret;
10052 if (!is_error(ret)) {
10053 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10054 if (!target_grouplist)
10055 return -TARGET_EFAULT;
10056 for(i = 0;i < ret; i++)
10057 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10058 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10061 return ret;
10062 case TARGET_NR_setgroups:
10064 int gidsetsize = arg1;
10065 target_id *target_grouplist;
10066 gid_t *grouplist = NULL;
10067 int i;
10068 if (gidsetsize) {
10069 grouplist = alloca(gidsetsize * sizeof(gid_t));
10070 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10071 if (!target_grouplist) {
10072 return -TARGET_EFAULT;
10074 for (i = 0; i < gidsetsize; i++) {
10075 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10077 unlock_user(target_grouplist, arg2, 0);
10079 return get_errno(setgroups(gidsetsize, grouplist));
10081 case TARGET_NR_fchown:
10082 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10083 #if defined(TARGET_NR_fchownat)
10084 case TARGET_NR_fchownat:
10085 if (!(p = lock_user_string(arg2)))
10086 return -TARGET_EFAULT;
10087 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10088 low2highgid(arg4), arg5));
10089 unlock_user(p, arg2, 0);
10090 return ret;
10091 #endif
10092 #ifdef TARGET_NR_setresuid
10093 case TARGET_NR_setresuid:
10094 return get_errno(sys_setresuid(low2highuid(arg1),
10095 low2highuid(arg2),
10096 low2highuid(arg3)));
10097 #endif
10098 #ifdef TARGET_NR_getresuid
10099 case TARGET_NR_getresuid:
10101 uid_t ruid, euid, suid;
10102 ret = get_errno(getresuid(&ruid, &euid, &suid));
10103 if (!is_error(ret)) {
10104 if (put_user_id(high2lowuid(ruid), arg1)
10105 || put_user_id(high2lowuid(euid), arg2)
10106 || put_user_id(high2lowuid(suid), arg3))
10107 return -TARGET_EFAULT;
10110 return ret;
10111 #endif
10112 #ifdef TARGET_NR_getresgid
10113 case TARGET_NR_setresgid:
10114 return get_errno(sys_setresgid(low2highgid(arg1),
10115 low2highgid(arg2),
10116 low2highgid(arg3)));
10117 #endif
10118 #ifdef TARGET_NR_getresgid
10119 case TARGET_NR_getresgid:
10121 gid_t rgid, egid, sgid;
10122 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10123 if (!is_error(ret)) {
10124 if (put_user_id(high2lowgid(rgid), arg1)
10125 || put_user_id(high2lowgid(egid), arg2)
10126 || put_user_id(high2lowgid(sgid), arg3))
10127 return -TARGET_EFAULT;
10130 return ret;
10131 #endif
10132 #ifdef TARGET_NR_chown
10133 case TARGET_NR_chown:
10134 if (!(p = lock_user_string(arg1)))
10135 return -TARGET_EFAULT;
10136 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10137 unlock_user(p, arg1, 0);
10138 return ret;
10139 #endif
10140 case TARGET_NR_setuid:
10141 return get_errno(sys_setuid(low2highuid(arg1)));
10142 case TARGET_NR_setgid:
10143 return get_errno(sys_setgid(low2highgid(arg1)));
10144 case TARGET_NR_setfsuid:
10145 return get_errno(setfsuid(arg1));
10146 case TARGET_NR_setfsgid:
10147 return get_errno(setfsgid(arg1));
10149 #ifdef TARGET_NR_lchown32
10150 case TARGET_NR_lchown32:
10151 if (!(p = lock_user_string(arg1)))
10152 return -TARGET_EFAULT;
10153 ret = get_errno(lchown(p, arg2, arg3));
10154 unlock_user(p, arg1, 0);
10155 return ret;
10156 #endif
10157 #ifdef TARGET_NR_getuid32
10158 case TARGET_NR_getuid32:
10159 return get_errno(getuid());
10160 #endif
10162 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10163 /* Alpha specific */
10164 case TARGET_NR_getxuid:
10166 uid_t euid;
10167 euid=geteuid();
10168 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10170 return get_errno(getuid());
10171 #endif
10172 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10173 /* Alpha specific */
10174 case TARGET_NR_getxgid:
10176 uid_t egid;
10177 egid=getegid();
10178 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10180 return get_errno(getgid());
10181 #endif
10182 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10183 /* Alpha specific */
10184 case TARGET_NR_osf_getsysinfo:
10185 ret = -TARGET_EOPNOTSUPP;
10186 switch (arg1) {
10187 case TARGET_GSI_IEEE_FP_CONTROL:
10189 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10191 /* Copied from linux ieee_fpcr_to_swcr. */
10192 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10193 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10194 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10195 | SWCR_TRAP_ENABLE_DZE
10196 | SWCR_TRAP_ENABLE_OVF);
10197 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10198 | SWCR_TRAP_ENABLE_INE);
10199 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10200 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10202 if (put_user_u64 (swcr, arg2))
10203 return -TARGET_EFAULT;
10204 ret = 0;
10206 break;
10208 /* case GSI_IEEE_STATE_AT_SIGNAL:
10209 -- Not implemented in linux kernel.
10210 case GSI_UACPROC:
10211 -- Retrieves current unaligned access state; not much used.
10212 case GSI_PROC_TYPE:
10213 -- Retrieves implver information; surely not used.
10214 case GSI_GET_HWRPB:
10215 -- Grabs a copy of the HWRPB; surely not used.
10218 return ret;
10219 #endif
10220 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10221 /* Alpha specific */
10222 case TARGET_NR_osf_setsysinfo:
10223 ret = -TARGET_EOPNOTSUPP;
10224 switch (arg1) {
10225 case TARGET_SSI_IEEE_FP_CONTROL:
10227 uint64_t swcr, fpcr, orig_fpcr;
10229 if (get_user_u64 (swcr, arg2)) {
10230 return -TARGET_EFAULT;
10232 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10233 fpcr = orig_fpcr & FPCR_DYN_MASK;
10235 /* Copied from linux ieee_swcr_to_fpcr. */
10236 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10237 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10238 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10239 | SWCR_TRAP_ENABLE_DZE
10240 | SWCR_TRAP_ENABLE_OVF)) << 48;
10241 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10242 | SWCR_TRAP_ENABLE_INE)) << 57;
10243 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10244 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10246 cpu_alpha_store_fpcr(cpu_env, fpcr);
10247 ret = 0;
10249 break;
10251 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10253 uint64_t exc, fpcr, orig_fpcr;
10254 int si_code;
10256 if (get_user_u64(exc, arg2)) {
10257 return -TARGET_EFAULT;
10260 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10262 /* We only add to the exception status here. */
10263 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10265 cpu_alpha_store_fpcr(cpu_env, fpcr);
10266 ret = 0;
10268 /* Old exceptions are not signaled. */
10269 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10271 /* If any exceptions set by this call,
10272 and are unmasked, send a signal. */
10273 si_code = 0;
10274 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10275 si_code = TARGET_FPE_FLTRES;
10277 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10278 si_code = TARGET_FPE_FLTUND;
10280 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10281 si_code = TARGET_FPE_FLTOVF;
10283 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10284 si_code = TARGET_FPE_FLTDIV;
10286 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10287 si_code = TARGET_FPE_FLTINV;
10289 if (si_code != 0) {
10290 target_siginfo_t info;
10291 info.si_signo = SIGFPE;
10292 info.si_errno = 0;
10293 info.si_code = si_code;
10294 info._sifields._sigfault._addr
10295 = ((CPUArchState *)cpu_env)->pc;
10296 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10297 QEMU_SI_FAULT, &info);
10300 break;
10302 /* case SSI_NVPAIRS:
10303 -- Used with SSIN_UACPROC to enable unaligned accesses.
10304 case SSI_IEEE_STATE_AT_SIGNAL:
10305 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10306 -- Not implemented in linux kernel
10309 return ret;
10310 #endif
10311 #ifdef TARGET_NR_osf_sigprocmask
10312 /* Alpha specific. */
10313 case TARGET_NR_osf_sigprocmask:
10315 abi_ulong mask;
10316 int how;
10317 sigset_t set, oldset;
10319 switch(arg1) {
10320 case TARGET_SIG_BLOCK:
10321 how = SIG_BLOCK;
10322 break;
10323 case TARGET_SIG_UNBLOCK:
10324 how = SIG_UNBLOCK;
10325 break;
10326 case TARGET_SIG_SETMASK:
10327 how = SIG_SETMASK;
10328 break;
10329 default:
10330 return -TARGET_EINVAL;
10332 mask = arg2;
10333 target_to_host_old_sigset(&set, &mask);
10334 ret = do_sigprocmask(how, &set, &oldset);
10335 if (!ret) {
10336 host_to_target_old_sigset(&mask, &oldset);
10337 ret = mask;
10340 return ret;
10341 #endif
10343 #ifdef TARGET_NR_getgid32
10344 case TARGET_NR_getgid32:
10345 return get_errno(getgid());
10346 #endif
10347 #ifdef TARGET_NR_geteuid32
10348 case TARGET_NR_geteuid32:
10349 return get_errno(geteuid());
10350 #endif
10351 #ifdef TARGET_NR_getegid32
10352 case TARGET_NR_getegid32:
10353 return get_errno(getegid());
10354 #endif
10355 #ifdef TARGET_NR_setreuid32
10356 case TARGET_NR_setreuid32:
10357 return get_errno(setreuid(arg1, arg2));
10358 #endif
10359 #ifdef TARGET_NR_setregid32
10360 case TARGET_NR_setregid32:
10361 return get_errno(setregid(arg1, arg2));
10362 #endif
10363 #ifdef TARGET_NR_getgroups32
10364 case TARGET_NR_getgroups32:
10366 int gidsetsize = arg1;
10367 uint32_t *target_grouplist;
10368 gid_t *grouplist;
10369 int i;
10371 grouplist = alloca(gidsetsize * sizeof(gid_t));
10372 ret = get_errno(getgroups(gidsetsize, grouplist));
10373 if (gidsetsize == 0)
10374 return ret;
10375 if (!is_error(ret)) {
10376 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10377 if (!target_grouplist) {
10378 return -TARGET_EFAULT;
10380 for(i = 0;i < ret; i++)
10381 target_grouplist[i] = tswap32(grouplist[i]);
10382 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10385 return ret;
10386 #endif
10387 #ifdef TARGET_NR_setgroups32
10388 case TARGET_NR_setgroups32:
10390 int gidsetsize = arg1;
10391 uint32_t *target_grouplist;
10392 gid_t *grouplist;
10393 int i;
10395 grouplist = alloca(gidsetsize * sizeof(gid_t));
10396 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10397 if (!target_grouplist) {
10398 return -TARGET_EFAULT;
10400 for(i = 0;i < gidsetsize; i++)
10401 grouplist[i] = tswap32(target_grouplist[i]);
10402 unlock_user(target_grouplist, arg2, 0);
10403 return get_errno(setgroups(gidsetsize, grouplist));
10405 #endif
10406 #ifdef TARGET_NR_fchown32
10407 case TARGET_NR_fchown32:
10408 return get_errno(fchown(arg1, arg2, arg3));
10409 #endif
10410 #ifdef TARGET_NR_setresuid32
10411 case TARGET_NR_setresuid32:
10412 return get_errno(sys_setresuid(arg1, arg2, arg3));
10413 #endif
10414 #ifdef TARGET_NR_getresuid32
10415 case TARGET_NR_getresuid32:
10417 uid_t ruid, euid, suid;
10418 ret = get_errno(getresuid(&ruid, &euid, &suid));
10419 if (!is_error(ret)) {
10420 if (put_user_u32(ruid, arg1)
10421 || put_user_u32(euid, arg2)
10422 || put_user_u32(suid, arg3))
10423 return -TARGET_EFAULT;
10426 return ret;
10427 #endif
10428 #ifdef TARGET_NR_setresgid32
10429 case TARGET_NR_setresgid32:
10430 return get_errno(sys_setresgid(arg1, arg2, arg3));
10431 #endif
10432 #ifdef TARGET_NR_getresgid32
10433 case TARGET_NR_getresgid32:
10435 gid_t rgid, egid, sgid;
10436 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10437 if (!is_error(ret)) {
10438 if (put_user_u32(rgid, arg1)
10439 || put_user_u32(egid, arg2)
10440 || put_user_u32(sgid, arg3))
10441 return -TARGET_EFAULT;
10444 return ret;
10445 #endif
10446 #ifdef TARGET_NR_chown32
10447 case TARGET_NR_chown32:
10448 if (!(p = lock_user_string(arg1)))
10449 return -TARGET_EFAULT;
10450 ret = get_errno(chown(p, arg2, arg3));
10451 unlock_user(p, arg1, 0);
10452 return ret;
10453 #endif
10454 #ifdef TARGET_NR_setuid32
10455 case TARGET_NR_setuid32:
10456 return get_errno(sys_setuid(arg1));
10457 #endif
10458 #ifdef TARGET_NR_setgid32
10459 case TARGET_NR_setgid32:
10460 return get_errno(sys_setgid(arg1));
10461 #endif
10462 #ifdef TARGET_NR_setfsuid32
10463 case TARGET_NR_setfsuid32:
10464 return get_errno(setfsuid(arg1));
10465 #endif
10466 #ifdef TARGET_NR_setfsgid32
10467 case TARGET_NR_setfsgid32:
10468 return get_errno(setfsgid(arg1));
10469 #endif
10470 #ifdef TARGET_NR_mincore
10471 case TARGET_NR_mincore:
10473 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
10474 if (!a) {
10475 return -TARGET_ENOMEM;
10477 p = lock_user_string(arg3);
10478 if (!p) {
10479 ret = -TARGET_EFAULT;
10480 } else {
10481 ret = get_errno(mincore(a, arg2, p));
10482 unlock_user(p, arg3, ret);
10484 unlock_user(a, arg1, 0);
10486 return ret;
10487 #endif
10488 #ifdef TARGET_NR_arm_fadvise64_64
10489 case TARGET_NR_arm_fadvise64_64:
10490 /* arm_fadvise64_64 looks like fadvise64_64 but
10491 * with different argument order: fd, advice, offset, len
10492 * rather than the usual fd, offset, len, advice.
10493 * Note that offset and len are both 64-bit so appear as
10494 * pairs of 32-bit registers.
10496 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10497 target_offset64(arg5, arg6), arg2);
10498 return -host_to_target_errno(ret);
10499 #endif
10501 #if TARGET_ABI_BITS == 32
10503 #ifdef TARGET_NR_fadvise64_64
10504 case TARGET_NR_fadvise64_64:
10505 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
10506 /* 6 args: fd, advice, offset (high, low), len (high, low) */
10507 ret = arg2;
10508 arg2 = arg3;
10509 arg3 = arg4;
10510 arg4 = arg5;
10511 arg5 = arg6;
10512 arg6 = ret;
10513 #else
10514 /* 6 args: fd, offset (high, low), len (high, low), advice */
10515 if (regpairs_aligned(cpu_env, num)) {
10516 /* offset is in (3,4), len in (5,6) and advice in 7 */
10517 arg2 = arg3;
10518 arg3 = arg4;
10519 arg4 = arg5;
10520 arg5 = arg6;
10521 arg6 = arg7;
10523 #endif
10524 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
10525 target_offset64(arg4, arg5), arg6);
10526 return -host_to_target_errno(ret);
10527 #endif
10529 #ifdef TARGET_NR_fadvise64
10530 case TARGET_NR_fadvise64:
10531 /* 5 args: fd, offset (high, low), len, advice */
10532 if (regpairs_aligned(cpu_env, num)) {
10533 /* offset is in (3,4), len in 5 and advice in 6 */
10534 arg2 = arg3;
10535 arg3 = arg4;
10536 arg4 = arg5;
10537 arg5 = arg6;
10539 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
10540 return -host_to_target_errno(ret);
10541 #endif
10543 #else /* not a 32-bit ABI */
10544 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10545 #ifdef TARGET_NR_fadvise64_64
10546 case TARGET_NR_fadvise64_64:
10547 #endif
10548 #ifdef TARGET_NR_fadvise64
10549 case TARGET_NR_fadvise64:
10550 #endif
10551 #ifdef TARGET_S390X
10552 switch (arg4) {
10553 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10554 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10555 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10556 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10557 default: break;
10559 #endif
10560 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10561 #endif
10562 #endif /* end of 64-bit ABI fadvise handling */
10564 #ifdef TARGET_NR_madvise
10565 case TARGET_NR_madvise:
10566 /* A straight passthrough may not be safe because qemu sometimes
10567 turns private file-backed mappings into anonymous mappings.
10568 This will break MADV_DONTNEED.
10569 This is a hint, so ignoring and returning success is ok. */
10570 return 0;
10571 #endif
10572 #if TARGET_ABI_BITS == 32
10573 case TARGET_NR_fcntl64:
10575 int cmd;
10576 struct flock64 fl;
10577 from_flock64_fn *copyfrom = copy_from_user_flock64;
10578 to_flock64_fn *copyto = copy_to_user_flock64;
10580 #ifdef TARGET_ARM
10581 if (!((CPUARMState *)cpu_env)->eabi) {
10582 copyfrom = copy_from_user_oabi_flock64;
10583 copyto = copy_to_user_oabi_flock64;
10585 #endif
10587 cmd = target_to_host_fcntl_cmd(arg2);
10588 if (cmd == -TARGET_EINVAL) {
10589 return cmd;
10592 switch(arg2) {
10593 case TARGET_F_GETLK64:
10594 ret = copyfrom(&fl, arg3);
10595 if (ret) {
10596 break;
10598 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10599 if (ret == 0) {
10600 ret = copyto(arg3, &fl);
10602 break;
10604 case TARGET_F_SETLK64:
10605 case TARGET_F_SETLKW64:
10606 ret = copyfrom(&fl, arg3);
10607 if (ret) {
10608 break;
10610 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10611 break;
10612 default:
10613 ret = do_fcntl(arg1, arg2, arg3);
10614 break;
10616 return ret;
10618 #endif
10619 #ifdef TARGET_NR_cacheflush
10620 case TARGET_NR_cacheflush:
10621 /* self-modifying code is handled automatically, so nothing needed */
10622 return 0;
10623 #endif
10624 #ifdef TARGET_NR_getpagesize
10625 case TARGET_NR_getpagesize:
10626 return TARGET_PAGE_SIZE;
10627 #endif
10628 case TARGET_NR_gettid:
10629 return get_errno(gettid());
10630 #ifdef TARGET_NR_readahead
10631 case TARGET_NR_readahead:
10632 #if TARGET_ABI_BITS == 32
10633 if (regpairs_aligned(cpu_env, num)) {
10634 arg2 = arg3;
10635 arg3 = arg4;
10636 arg4 = arg5;
10638 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
10639 #else
10640 ret = get_errno(readahead(arg1, arg2, arg3));
10641 #endif
10642 return ret;
10643 #endif
10644 #ifdef CONFIG_ATTR
10645 #ifdef TARGET_NR_setxattr
10646 case TARGET_NR_listxattr:
10647 case TARGET_NR_llistxattr:
10649 void *p, *b = 0;
10650 if (arg2) {
10651 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10652 if (!b) {
10653 return -TARGET_EFAULT;
10656 p = lock_user_string(arg1);
10657 if (p) {
10658 if (num == TARGET_NR_listxattr) {
10659 ret = get_errno(listxattr(p, b, arg3));
10660 } else {
10661 ret = get_errno(llistxattr(p, b, arg3));
10663 } else {
10664 ret = -TARGET_EFAULT;
10666 unlock_user(p, arg1, 0);
10667 unlock_user(b, arg2, arg3);
10668 return ret;
10670 case TARGET_NR_flistxattr:
10672 void *b = 0;
10673 if (arg2) {
10674 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10675 if (!b) {
10676 return -TARGET_EFAULT;
10679 ret = get_errno(flistxattr(arg1, b, arg3));
10680 unlock_user(b, arg2, arg3);
10681 return ret;
10683 case TARGET_NR_setxattr:
10684 case TARGET_NR_lsetxattr:
10686 void *p, *n, *v = 0;
10687 if (arg3) {
10688 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10689 if (!v) {
10690 return -TARGET_EFAULT;
10693 p = lock_user_string(arg1);
10694 n = lock_user_string(arg2);
10695 if (p && n) {
10696 if (num == TARGET_NR_setxattr) {
10697 ret = get_errno(setxattr(p, n, v, arg4, arg5));
10698 } else {
10699 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
10701 } else {
10702 ret = -TARGET_EFAULT;
10704 unlock_user(p, arg1, 0);
10705 unlock_user(n, arg2, 0);
10706 unlock_user(v, arg3, 0);
10708 return ret;
10709 case TARGET_NR_fsetxattr:
10711 void *n, *v = 0;
10712 if (arg3) {
10713 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10714 if (!v) {
10715 return -TARGET_EFAULT;
10718 n = lock_user_string(arg2);
10719 if (n) {
10720 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
10721 } else {
10722 ret = -TARGET_EFAULT;
10724 unlock_user(n, arg2, 0);
10725 unlock_user(v, arg3, 0);
10727 return ret;
10728 case TARGET_NR_getxattr:
10729 case TARGET_NR_lgetxattr:
10731 void *p, *n, *v = 0;
10732 if (arg3) {
10733 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10734 if (!v) {
10735 return -TARGET_EFAULT;
10738 p = lock_user_string(arg1);
10739 n = lock_user_string(arg2);
10740 if (p && n) {
10741 if (num == TARGET_NR_getxattr) {
10742 ret = get_errno(getxattr(p, n, v, arg4));
10743 } else {
10744 ret = get_errno(lgetxattr(p, n, v, arg4));
10746 } else {
10747 ret = -TARGET_EFAULT;
10749 unlock_user(p, arg1, 0);
10750 unlock_user(n, arg2, 0);
10751 unlock_user(v, arg3, arg4);
10753 return ret;
10754 case TARGET_NR_fgetxattr:
10756 void *n, *v = 0;
10757 if (arg3) {
10758 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10759 if (!v) {
10760 return -TARGET_EFAULT;
10763 n = lock_user_string(arg2);
10764 if (n) {
10765 ret = get_errno(fgetxattr(arg1, n, v, arg4));
10766 } else {
10767 ret = -TARGET_EFAULT;
10769 unlock_user(n, arg2, 0);
10770 unlock_user(v, arg3, arg4);
10772 return ret;
10773 case TARGET_NR_removexattr:
10774 case TARGET_NR_lremovexattr:
10776 void *p, *n;
10777 p = lock_user_string(arg1);
10778 n = lock_user_string(arg2);
10779 if (p && n) {
10780 if (num == TARGET_NR_removexattr) {
10781 ret = get_errno(removexattr(p, n));
10782 } else {
10783 ret = get_errno(lremovexattr(p, n));
10785 } else {
10786 ret = -TARGET_EFAULT;
10788 unlock_user(p, arg1, 0);
10789 unlock_user(n, arg2, 0);
10791 return ret;
10792 case TARGET_NR_fremovexattr:
10794 void *n;
10795 n = lock_user_string(arg2);
10796 if (n) {
10797 ret = get_errno(fremovexattr(arg1, n));
10798 } else {
10799 ret = -TARGET_EFAULT;
10801 unlock_user(n, arg2, 0);
10803 return ret;
10804 #endif
10805 #endif /* CONFIG_ATTR */
10806 #ifdef TARGET_NR_set_thread_area
10807 case TARGET_NR_set_thread_area:
10808 #if defined(TARGET_MIPS)
10809 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
10810 return 0;
10811 #elif defined(TARGET_CRIS)
10812 if (arg1 & 0xff)
10813 ret = -TARGET_EINVAL;
10814 else {
10815 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
10816 ret = 0;
10818 return ret;
10819 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
10820 return do_set_thread_area(cpu_env, arg1);
10821 #elif defined(TARGET_M68K)
10823 TaskState *ts = cpu->opaque;
10824 ts->tp_value = arg1;
10825 return 0;
10827 #else
10828 return -TARGET_ENOSYS;
10829 #endif
10830 #endif
10831 #ifdef TARGET_NR_get_thread_area
10832 case TARGET_NR_get_thread_area:
10833 #if defined(TARGET_I386) && defined(TARGET_ABI32)
10834 return do_get_thread_area(cpu_env, arg1);
10835 #elif defined(TARGET_M68K)
10837 TaskState *ts = cpu->opaque;
10838 return ts->tp_value;
10840 #else
10841 return -TARGET_ENOSYS;
10842 #endif
10843 #endif
10844 #ifdef TARGET_NR_getdomainname
10845 case TARGET_NR_getdomainname:
10846 return -TARGET_ENOSYS;
10847 #endif
10849 #ifdef TARGET_NR_clock_settime
10850 case TARGET_NR_clock_settime:
10852 struct timespec ts;
10854 ret = target_to_host_timespec(&ts, arg2);
10855 if (!is_error(ret)) {
10856 ret = get_errno(clock_settime(arg1, &ts));
10858 return ret;
10860 #endif
10861 #ifdef TARGET_NR_clock_gettime
10862 case TARGET_NR_clock_gettime:
10864 struct timespec ts;
10865 ret = get_errno(clock_gettime(arg1, &ts));
10866 if (!is_error(ret)) {
10867 ret = host_to_target_timespec(arg2, &ts);
10869 return ret;
10871 #endif
10872 #ifdef TARGET_NR_clock_getres
10873 case TARGET_NR_clock_getres:
10875 struct timespec ts;
10876 ret = get_errno(clock_getres(arg1, &ts));
10877 if (!is_error(ret)) {
10878 host_to_target_timespec(arg2, &ts);
10880 return ret;
10882 #endif
10883 #ifdef TARGET_NR_clock_nanosleep
10884 case TARGET_NR_clock_nanosleep:
10886 struct timespec ts;
10887 target_to_host_timespec(&ts, arg3);
10888 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
10889 &ts, arg4 ? &ts : NULL));
10890 if (arg4)
10891 host_to_target_timespec(arg4, &ts);
10893 #if defined(TARGET_PPC)
10894 /* clock_nanosleep is odd in that it returns positive errno values.
10895 * On PPC, CR0 bit 3 should be set in such a situation. */
10896 if (ret && ret != -TARGET_ERESTARTSYS) {
10897 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
10899 #endif
10900 return ret;
10902 #endif
10904 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
10905 case TARGET_NR_set_tid_address:
10906 return get_errno(set_tid_address((int *)g2h(arg1)));
10907 #endif
10909 case TARGET_NR_tkill:
10910 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
10912 case TARGET_NR_tgkill:
10913 return get_errno(safe_tgkill((int)arg1, (int)arg2,
10914 target_to_host_signal(arg3)));
10916 #ifdef TARGET_NR_set_robust_list
10917 case TARGET_NR_set_robust_list:
10918 case TARGET_NR_get_robust_list:
10919 /* The ABI for supporting robust futexes has userspace pass
10920 * the kernel a pointer to a linked list which is updated by
10921 * userspace after the syscall; the list is walked by the kernel
10922 * when the thread exits. Since the linked list in QEMU guest
10923 * memory isn't a valid linked list for the host and we have
10924 * no way to reliably intercept the thread-death event, we can't
10925 * support these. Silently return ENOSYS so that guest userspace
10926 * falls back to a non-robust futex implementation (which should
10927 * be OK except in the corner case of the guest crashing while
10928 * holding a mutex that is shared with another process via
10929 * shared memory).
10931 return -TARGET_ENOSYS;
10932 #endif
10934 #if defined(TARGET_NR_utimensat)
10935 case TARGET_NR_utimensat:
10937 struct timespec *tsp, ts[2];
10938 if (!arg3) {
10939 tsp = NULL;
10940 } else {
10941 target_to_host_timespec(ts, arg3);
10942 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
10943 tsp = ts;
10945 if (!arg2)
10946 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
10947 else {
10948 if (!(p = lock_user_string(arg2))) {
10949 return -TARGET_EFAULT;
10951 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
10952 unlock_user(p, arg2, 0);
10955 return ret;
10956 #endif
10957 case TARGET_NR_futex:
10958 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
10959 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
10960 case TARGET_NR_inotify_init:
10961 ret = get_errno(sys_inotify_init());
10962 if (ret >= 0) {
10963 fd_trans_register(ret, &target_inotify_trans);
10965 return ret;
10966 #endif
10967 #ifdef CONFIG_INOTIFY1
10968 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
10969 case TARGET_NR_inotify_init1:
10970 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
10971 fcntl_flags_tbl)));
10972 if (ret >= 0) {
10973 fd_trans_register(ret, &target_inotify_trans);
10975 return ret;
10976 #endif
10977 #endif
10978 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
10979 case TARGET_NR_inotify_add_watch:
10980 p = lock_user_string(arg2);
10981 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
10982 unlock_user(p, arg2, 0);
10983 return ret;
10984 #endif
10985 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
10986 case TARGET_NR_inotify_rm_watch:
10987 return get_errno(sys_inotify_rm_watch(arg1, arg2));
10988 #endif
10990 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
10991 case TARGET_NR_mq_open:
10993 struct mq_attr posix_mq_attr;
10994 struct mq_attr *pposix_mq_attr;
10995 int host_flags;
10997 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
10998 pposix_mq_attr = NULL;
10999 if (arg4) {
11000 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11001 return -TARGET_EFAULT;
11003 pposix_mq_attr = &posix_mq_attr;
11005 p = lock_user_string(arg1 - 1);
11006 if (!p) {
11007 return -TARGET_EFAULT;
11009 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11010 unlock_user (p, arg1, 0);
11012 return ret;
11014 case TARGET_NR_mq_unlink:
11015 p = lock_user_string(arg1 - 1);
11016 if (!p) {
11017 return -TARGET_EFAULT;
11019 ret = get_errno(mq_unlink(p));
11020 unlock_user (p, arg1, 0);
11021 return ret;
11023 case TARGET_NR_mq_timedsend:
11025 struct timespec ts;
11027 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11028 if (arg5 != 0) {
11029 target_to_host_timespec(&ts, arg5);
11030 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11031 host_to_target_timespec(arg5, &ts);
11032 } else {
11033 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11035 unlock_user (p, arg2, arg3);
11037 return ret;
11039 case TARGET_NR_mq_timedreceive:
11041 struct timespec ts;
11042 unsigned int prio;
11044 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11045 if (arg5 != 0) {
11046 target_to_host_timespec(&ts, arg5);
11047 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11048 &prio, &ts));
11049 host_to_target_timespec(arg5, &ts);
11050 } else {
11051 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11052 &prio, NULL));
11054 unlock_user (p, arg2, arg3);
11055 if (arg4 != 0)
11056 put_user_u32(prio, arg4);
11058 return ret;
11060 /* Not implemented for now... */
11061 /* case TARGET_NR_mq_notify: */
11062 /* break; */
11064 case TARGET_NR_mq_getsetattr:
11066 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11067 ret = 0;
11068 if (arg2 != 0) {
11069 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11070 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11071 &posix_mq_attr_out));
11072 } else if (arg3 != 0) {
11073 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11075 if (ret == 0 && arg3 != 0) {
11076 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11079 return ret;
11080 #endif
11082 #ifdef CONFIG_SPLICE
11083 #ifdef TARGET_NR_tee
11084 case TARGET_NR_tee:
11086 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11088 return ret;
11089 #endif
11090 #ifdef TARGET_NR_splice
11091 case TARGET_NR_splice:
11093 loff_t loff_in, loff_out;
11094 loff_t *ploff_in = NULL, *ploff_out = NULL;
11095 if (arg2) {
11096 if (get_user_u64(loff_in, arg2)) {
11097 return -TARGET_EFAULT;
11099 ploff_in = &loff_in;
11101 if (arg4) {
11102 if (get_user_u64(loff_out, arg4)) {
11103 return -TARGET_EFAULT;
11105 ploff_out = &loff_out;
11107 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11108 if (arg2) {
11109 if (put_user_u64(loff_in, arg2)) {
11110 return -TARGET_EFAULT;
11113 if (arg4) {
11114 if (put_user_u64(loff_out, arg4)) {
11115 return -TARGET_EFAULT;
11119 return ret;
11120 #endif
11121 #ifdef TARGET_NR_vmsplice
11122 case TARGET_NR_vmsplice:
11124 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11125 if (vec != NULL) {
11126 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11127 unlock_iovec(vec, arg2, arg3, 0);
11128 } else {
11129 ret = -host_to_target_errno(errno);
11132 return ret;
11133 #endif
11134 #endif /* CONFIG_SPLICE */
11135 #ifdef CONFIG_EVENTFD
11136 #if defined(TARGET_NR_eventfd)
11137 case TARGET_NR_eventfd:
11138 ret = get_errno(eventfd(arg1, 0));
11139 if (ret >= 0) {
11140 fd_trans_register(ret, &target_eventfd_trans);
11142 return ret;
11143 #endif
11144 #if defined(TARGET_NR_eventfd2)
11145 case TARGET_NR_eventfd2:
11147 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11148 if (arg2 & TARGET_O_NONBLOCK) {
11149 host_flags |= O_NONBLOCK;
11151 if (arg2 & TARGET_O_CLOEXEC) {
11152 host_flags |= O_CLOEXEC;
11154 ret = get_errno(eventfd(arg1, host_flags));
11155 if (ret >= 0) {
11156 fd_trans_register(ret, &target_eventfd_trans);
11158 return ret;
11160 #endif
11161 #endif /* CONFIG_EVENTFD */
11162 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11163 case TARGET_NR_fallocate:
11164 #if TARGET_ABI_BITS == 32
11165 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11166 target_offset64(arg5, arg6)));
11167 #else
11168 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11169 #endif
11170 return ret;
11171 #endif
11172 #if defined(CONFIG_SYNC_FILE_RANGE)
11173 #if defined(TARGET_NR_sync_file_range)
11174 case TARGET_NR_sync_file_range:
11175 #if TARGET_ABI_BITS == 32
11176 #if defined(TARGET_MIPS)
11177 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11178 target_offset64(arg5, arg6), arg7));
11179 #else
11180 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11181 target_offset64(arg4, arg5), arg6));
11182 #endif /* !TARGET_MIPS */
11183 #else
11184 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11185 #endif
11186 return ret;
11187 #endif
11188 #if defined(TARGET_NR_sync_file_range2)
11189 case TARGET_NR_sync_file_range2:
11190 /* This is like sync_file_range but the arguments are reordered */
11191 #if TARGET_ABI_BITS == 32
11192 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11193 target_offset64(arg5, arg6), arg2));
11194 #else
11195 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11196 #endif
11197 return ret;
11198 #endif
11199 #endif
11200 #if defined(TARGET_NR_signalfd4)
11201 case TARGET_NR_signalfd4:
11202 return do_signalfd4(arg1, arg2, arg4);
11203 #endif
11204 #if defined(TARGET_NR_signalfd)
11205 case TARGET_NR_signalfd:
11206 return do_signalfd4(arg1, arg2, 0);
11207 #endif
11208 #if defined(CONFIG_EPOLL)
11209 #if defined(TARGET_NR_epoll_create)
11210 case TARGET_NR_epoll_create:
11211 return get_errno(epoll_create(arg1));
11212 #endif
11213 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11214 case TARGET_NR_epoll_create1:
11215 return get_errno(epoll_create1(arg1));
11216 #endif
11217 #if defined(TARGET_NR_epoll_ctl)
11218 case TARGET_NR_epoll_ctl:
11220 struct epoll_event ep;
11221 struct epoll_event *epp = 0;
11222 if (arg4) {
11223 struct target_epoll_event *target_ep;
11224 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11225 return -TARGET_EFAULT;
11227 ep.events = tswap32(target_ep->events);
11228 /* The epoll_data_t union is just opaque data to the kernel,
11229 * so we transfer all 64 bits across and need not worry what
11230 * actual data type it is.
11232 ep.data.u64 = tswap64(target_ep->data.u64);
11233 unlock_user_struct(target_ep, arg4, 0);
11234 epp = &ep;
11236 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11238 #endif
11240 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11241 #if defined(TARGET_NR_epoll_wait)
11242 case TARGET_NR_epoll_wait:
11243 #endif
11244 #if defined(TARGET_NR_epoll_pwait)
11245 case TARGET_NR_epoll_pwait:
11246 #endif
11248 struct target_epoll_event *target_ep;
11249 struct epoll_event *ep;
11250 int epfd = arg1;
11251 int maxevents = arg3;
11252 int timeout = arg4;
11254 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11255 return -TARGET_EINVAL;
11258 target_ep = lock_user(VERIFY_WRITE, arg2,
11259 maxevents * sizeof(struct target_epoll_event), 1);
11260 if (!target_ep) {
11261 return -TARGET_EFAULT;
11264 ep = g_try_new(struct epoll_event, maxevents);
11265 if (!ep) {
11266 unlock_user(target_ep, arg2, 0);
11267 return -TARGET_ENOMEM;
11270 switch (num) {
11271 #if defined(TARGET_NR_epoll_pwait)
11272 case TARGET_NR_epoll_pwait:
11274 target_sigset_t *target_set;
11275 sigset_t _set, *set = &_set;
11277 if (arg5) {
11278 if (arg6 != sizeof(target_sigset_t)) {
11279 ret = -TARGET_EINVAL;
11280 break;
11283 target_set = lock_user(VERIFY_READ, arg5,
11284 sizeof(target_sigset_t), 1);
11285 if (!target_set) {
11286 ret = -TARGET_EFAULT;
11287 break;
11289 target_to_host_sigset(set, target_set);
11290 unlock_user(target_set, arg5, 0);
11291 } else {
11292 set = NULL;
11295 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11296 set, SIGSET_T_SIZE));
11297 break;
11299 #endif
11300 #if defined(TARGET_NR_epoll_wait)
11301 case TARGET_NR_epoll_wait:
11302 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11303 NULL, 0));
11304 break;
11305 #endif
11306 default:
11307 ret = -TARGET_ENOSYS;
11309 if (!is_error(ret)) {
11310 int i;
11311 for (i = 0; i < ret; i++) {
11312 target_ep[i].events = tswap32(ep[i].events);
11313 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11315 unlock_user(target_ep, arg2,
11316 ret * sizeof(struct target_epoll_event));
11317 } else {
11318 unlock_user(target_ep, arg2, 0);
11320 g_free(ep);
11321 return ret;
11323 #endif
11324 #endif
11325 #ifdef TARGET_NR_prlimit64
11326 case TARGET_NR_prlimit64:
11328 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11329 struct target_rlimit64 *target_rnew, *target_rold;
11330 struct host_rlimit64 rnew, rold, *rnewp = 0;
11331 int resource = target_to_host_resource(arg2);
11332 if (arg3) {
11333 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11334 return -TARGET_EFAULT;
11336 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11337 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11338 unlock_user_struct(target_rnew, arg3, 0);
11339 rnewp = &rnew;
11342 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11343 if (!is_error(ret) && arg4) {
11344 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11345 return -TARGET_EFAULT;
11347 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11348 target_rold->rlim_max = tswap64(rold.rlim_max);
11349 unlock_user_struct(target_rold, arg4, 1);
11351 return ret;
11353 #endif
11354 #ifdef TARGET_NR_gethostname
11355 case TARGET_NR_gethostname:
11357 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11358 if (name) {
11359 ret = get_errno(gethostname(name, arg2));
11360 unlock_user(name, arg1, arg2);
11361 } else {
11362 ret = -TARGET_EFAULT;
11364 return ret;
11366 #endif
11367 #ifdef TARGET_NR_atomic_cmpxchg_32
11368 case TARGET_NR_atomic_cmpxchg_32:
11370 /* should use start_exclusive from main.c */
11371 abi_ulong mem_value;
11372 if (get_user_u32(mem_value, arg6)) {
11373 target_siginfo_t info;
11374 info.si_signo = SIGSEGV;
11375 info.si_errno = 0;
11376 info.si_code = TARGET_SEGV_MAPERR;
11377 info._sifields._sigfault._addr = arg6;
11378 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11379 QEMU_SI_FAULT, &info);
11380 ret = 0xdeadbeef;
11383 if (mem_value == arg2)
11384 put_user_u32(arg1, arg6);
11385 return mem_value;
11387 #endif
11388 #ifdef TARGET_NR_atomic_barrier
11389 case TARGET_NR_atomic_barrier:
11390 /* Like the kernel implementation and the
11391 qemu arm barrier, no-op this? */
11392 return 0;
11393 #endif
11395 #ifdef TARGET_NR_timer_create
11396 case TARGET_NR_timer_create:
11398 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11400 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11402 int clkid = arg1;
11403 int timer_index = next_free_host_timer();
11405 if (timer_index < 0) {
11406 ret = -TARGET_EAGAIN;
11407 } else {
11408 timer_t *phtimer = g_posix_timers + timer_index;
11410 if (arg2) {
11411 phost_sevp = &host_sevp;
11412 ret = target_to_host_sigevent(phost_sevp, arg2);
11413 if (ret != 0) {
11414 return ret;
11418 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11419 if (ret) {
11420 phtimer = NULL;
11421 } else {
11422 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11423 return -TARGET_EFAULT;
11427 return ret;
11429 #endif
11431 #ifdef TARGET_NR_timer_settime
11432 case TARGET_NR_timer_settime:
11434 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11435 * struct itimerspec * old_value */
11436 target_timer_t timerid = get_timer_id(arg1);
11438 if (timerid < 0) {
11439 ret = timerid;
11440 } else if (arg3 == 0) {
11441 ret = -TARGET_EINVAL;
11442 } else {
11443 timer_t htimer = g_posix_timers[timerid];
11444 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11446 if (target_to_host_itimerspec(&hspec_new, arg3)) {
11447 return -TARGET_EFAULT;
11449 ret = get_errno(
11450 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11451 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11452 return -TARGET_EFAULT;
11455 return ret;
11457 #endif
11459 #ifdef TARGET_NR_timer_gettime
11460 case TARGET_NR_timer_gettime:
11462 /* args: timer_t timerid, struct itimerspec *curr_value */
11463 target_timer_t timerid = get_timer_id(arg1);
11465 if (timerid < 0) {
11466 ret = timerid;
11467 } else if (!arg2) {
11468 ret = -TARGET_EFAULT;
11469 } else {
11470 timer_t htimer = g_posix_timers[timerid];
11471 struct itimerspec hspec;
11472 ret = get_errno(timer_gettime(htimer, &hspec));
11474 if (host_to_target_itimerspec(arg2, &hspec)) {
11475 ret = -TARGET_EFAULT;
11478 return ret;
11480 #endif
11482 #ifdef TARGET_NR_timer_getoverrun
11483 case TARGET_NR_timer_getoverrun:
11485 /* args: timer_t timerid */
11486 target_timer_t timerid = get_timer_id(arg1);
11488 if (timerid < 0) {
11489 ret = timerid;
11490 } else {
11491 timer_t htimer = g_posix_timers[timerid];
11492 ret = get_errno(timer_getoverrun(htimer));
11494 fd_trans_unregister(ret);
11495 return ret;
11497 #endif
11499 #ifdef TARGET_NR_timer_delete
11500 case TARGET_NR_timer_delete:
11502 /* args: timer_t timerid */
11503 target_timer_t timerid = get_timer_id(arg1);
11505 if (timerid < 0) {
11506 ret = timerid;
11507 } else {
11508 timer_t htimer = g_posix_timers[timerid];
11509 ret = get_errno(timer_delete(htimer));
11510 g_posix_timers[timerid] = 0;
11512 return ret;
11514 #endif
11516 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11517 case TARGET_NR_timerfd_create:
11518 return get_errno(timerfd_create(arg1,
11519 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11520 #endif
11522 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11523 case TARGET_NR_timerfd_gettime:
11525 struct itimerspec its_curr;
11527 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11529 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11530 return -TARGET_EFAULT;
11533 return ret;
11534 #endif
11536 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11537 case TARGET_NR_timerfd_settime:
11539 struct itimerspec its_new, its_old, *p_new;
11541 if (arg3) {
11542 if (target_to_host_itimerspec(&its_new, arg3)) {
11543 return -TARGET_EFAULT;
11545 p_new = &its_new;
11546 } else {
11547 p_new = NULL;
11550 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11552 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11553 return -TARGET_EFAULT;
11556 return ret;
11557 #endif
11559 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11560 case TARGET_NR_ioprio_get:
11561 return get_errno(ioprio_get(arg1, arg2));
11562 #endif
11564 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11565 case TARGET_NR_ioprio_set:
11566 return get_errno(ioprio_set(arg1, arg2, arg3));
11567 #endif
11569 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11570 case TARGET_NR_setns:
11571 return get_errno(setns(arg1, arg2));
11572 #endif
11573 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11574 case TARGET_NR_unshare:
11575 return get_errno(unshare(arg1));
11576 #endif
11577 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
11578 case TARGET_NR_kcmp:
11579 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
11580 #endif
11581 #ifdef TARGET_NR_swapcontext
11582 case TARGET_NR_swapcontext:
11583 /* PowerPC specific. */
11584 return do_swapcontext(cpu_env, arg1, arg2, arg3);
11585 #endif
11587 default:
11588 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
11589 return -TARGET_ENOSYS;
11591 return ret;
11594 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
11595 abi_long arg2, abi_long arg3, abi_long arg4,
11596 abi_long arg5, abi_long arg6, abi_long arg7,
11597 abi_long arg8)
11599 CPUState *cpu = ENV_GET_CPU(cpu_env);
11600 abi_long ret;
11602 #ifdef DEBUG_ERESTARTSYS
11603 /* Debug-only code for exercising the syscall-restart code paths
11604 * in the per-architecture cpu main loops: restart every syscall
11605 * the guest makes once before letting it through.
11608 static bool flag;
11609 flag = !flag;
11610 if (flag) {
11611 return -TARGET_ERESTARTSYS;
11614 #endif
11616 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4,
11617 arg5, arg6, arg7, arg8);
11619 if (unlikely(do_strace)) {
11620 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
11621 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11622 arg5, arg6, arg7, arg8);
11623 print_syscall_ret(num, ret);
11624 } else {
11625 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
11626 arg5, arg6, arg7, arg8);
11629 trace_guest_user_syscall_ret(cpu, num, ret);
11630 return ret;