linux-user: Use `qemu_log' for non-strace logging
[qemu/kevin.git] / linux-user / syscall.c
blob16d98c1ff5ff2f2bc9ddcb399018f34323dcae68
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include "linux_loop.h"
115 #include "uname.h"
117 #include "qemu.h"
118 #include "qemu/guest-random.h"
119 #include "user/syscall-trace.h"
120 #include "qapi/error.h"
121 #include "fd-trans.h"
122 #include "tcg/tcg.h"
124 #ifndef CLONE_IO
125 #define CLONE_IO 0x80000000 /* Clone io context */
126 #endif
128 /* We can't directly call the host clone syscall, because this will
129 * badly confuse libc (breaking mutexes, for example). So we must
130 * divide clone flags into:
131 * * flag combinations that look like pthread_create()
132 * * flag combinations that look like fork()
133 * * flags we can implement within QEMU itself
134 * * flags we can't support and will return an error for
136 /* For thread creation, all these flags must be present; for
137 * fork, none must be present.
139 #define CLONE_THREAD_FLAGS \
140 (CLONE_VM | CLONE_FS | CLONE_FILES | \
141 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
143 /* These flags are ignored:
144 * CLONE_DETACHED is now ignored by the kernel;
145 * CLONE_IO is just an optimisation hint to the I/O scheduler
147 #define CLONE_IGNORED_FLAGS \
148 (CLONE_DETACHED | CLONE_IO)
150 /* Flags for fork which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_FORK_FLAGS \
152 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
153 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
155 /* Flags for thread creation which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_THREAD_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
160 #define CLONE_INVALID_FORK_FLAGS \
161 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
163 #define CLONE_INVALID_THREAD_FLAGS \
164 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
165 CLONE_IGNORED_FLAGS))
167 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
168 * have almost all been allocated. We cannot support any of
169 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
170 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
171 * The checks against the invalid thread masks above will catch these.
172 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
175 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
176 * once. This exercises the codepaths for restart.
178 //#define DEBUG_ERESTARTSYS
180 //#include <linux/msdos_fs.h>
181 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
182 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #undef _syscall0
185 #undef _syscall1
186 #undef _syscall2
187 #undef _syscall3
188 #undef _syscall4
189 #undef _syscall5
190 #undef _syscall6
192 #define _syscall0(type,name) \
193 static type name (void) \
195 return syscall(__NR_##name); \
198 #define _syscall1(type,name,type1,arg1) \
199 static type name (type1 arg1) \
201 return syscall(__NR_##name, arg1); \
204 #define _syscall2(type,name,type1,arg1,type2,arg2) \
205 static type name (type1 arg1,type2 arg2) \
207 return syscall(__NR_##name, arg1, arg2); \
210 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
211 static type name (type1 arg1,type2 arg2,type3 arg3) \
213 return syscall(__NR_##name, arg1, arg2, arg3); \
216 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
222 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
230 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
231 type5,arg5,type6,arg6) \
232 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
233 type6 arg6) \
235 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
239 #define __NR_sys_uname __NR_uname
240 #define __NR_sys_getcwd1 __NR_getcwd
241 #define __NR_sys_getdents __NR_getdents
242 #define __NR_sys_getdents64 __NR_getdents64
243 #define __NR_sys_getpriority __NR_getpriority
244 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
245 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
246 #define __NR_sys_syslog __NR_syslog
247 #define __NR_sys_futex __NR_futex
248 #define __NR_sys_inotify_init __NR_inotify_init
249 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
250 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
251 #define __NR_sys_statx __NR_statx
253 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
254 #define __NR__llseek __NR_lseek
255 #endif
257 /* Newer kernel ports have llseek() instead of _llseek() */
258 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
259 #define TARGET_NR__llseek TARGET_NR_llseek
260 #endif
262 #define __NR_sys_gettid __NR_gettid
263 _syscall0(int, sys_gettid)
265 /* For the 64-bit guest on 32-bit host case we must emulate
266 * getdents using getdents64, because otherwise the host
267 * might hand us back more dirent records than we can fit
268 * into the guest buffer after structure format conversion.
269 * Otherwise we emulate getdents with getdents if the host has it.
271 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
272 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #endif
275 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
276 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
277 #endif
278 #if (defined(TARGET_NR_getdents) && \
279 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
280 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
281 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
282 #endif
283 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
284 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
285 loff_t *, res, uint, wh);
286 #endif
287 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
288 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
289 siginfo_t *, uinfo)
290 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
291 #ifdef __NR_exit_group
292 _syscall1(int,exit_group,int,error_code)
293 #endif
294 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
295 _syscall1(int,set_tid_address,int *,tidptr)
296 #endif
297 #if defined(TARGET_NR_futex) && defined(__NR_futex)
298 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
299 const struct timespec *,timeout,int *,uaddr2,int,val3)
300 #endif
301 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
302 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
303 unsigned long *, user_mask_ptr);
304 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
305 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
306 unsigned long *, user_mask_ptr);
307 #define __NR_sys_getcpu __NR_getcpu
308 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
309 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
310 void *, arg);
311 _syscall2(int, capget, struct __user_cap_header_struct *, header,
312 struct __user_cap_data_struct *, data);
313 _syscall2(int, capset, struct __user_cap_header_struct *, header,
314 struct __user_cap_data_struct *, data);
315 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
316 _syscall2(int, ioprio_get, int, which, int, who)
317 #endif
318 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
319 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
320 #endif
321 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
322 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
323 #endif
325 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
326 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
327 unsigned long, idx1, unsigned long, idx2)
328 #endif
331 * It is assumed that struct statx is architecture independent.
333 #if defined(TARGET_NR_statx) && defined(__NR_statx)
334 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
335 unsigned int, mask, struct target_statx *, statxbuf)
336 #endif
337 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
338 _syscall2(int, membarrier, int, cmd, int, flags)
339 #endif
341 static bitmask_transtbl fcntl_flags_tbl[] = {
342 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
343 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
344 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
345 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
346 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
347 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
348 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
349 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
350 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
351 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
352 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
353 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
354 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
355 #if defined(O_DIRECT)
356 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
357 #endif
358 #if defined(O_NOATIME)
359 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
360 #endif
361 #if defined(O_CLOEXEC)
362 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
363 #endif
364 #if defined(O_PATH)
365 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
366 #endif
367 #if defined(O_TMPFILE)
368 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
369 #endif
370 /* Don't terminate the list prematurely on 64-bit host+guest. */
371 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
372 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
373 #endif
374 { 0, 0, 0, 0 }
377 static int sys_getcwd1(char *buf, size_t size)
379 if (getcwd(buf, size) == NULL) {
380 /* getcwd() sets errno */
381 return (-1);
383 return strlen(buf)+1;
386 #ifdef TARGET_NR_utimensat
387 #if defined(__NR_utimensat)
388 #define __NR_sys_utimensat __NR_utimensat
389 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
390 const struct timespec *,tsp,int,flags)
391 #else
392 static int sys_utimensat(int dirfd, const char *pathname,
393 const struct timespec times[2], int flags)
395 errno = ENOSYS;
396 return -1;
398 #endif
399 #endif /* TARGET_NR_utimensat */
401 #ifdef TARGET_NR_renameat2
402 #if defined(__NR_renameat2)
403 #define __NR_sys_renameat2 __NR_renameat2
404 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
405 const char *, new, unsigned int, flags)
406 #else
407 static int sys_renameat2(int oldfd, const char *old,
408 int newfd, const char *new, int flags)
410 if (flags == 0) {
411 return renameat(oldfd, old, newfd, new);
413 errno = ENOSYS;
414 return -1;
416 #endif
417 #endif /* TARGET_NR_renameat2 */
419 #ifdef CONFIG_INOTIFY
420 #include <sys/inotify.h>
422 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
423 static int sys_inotify_init(void)
425 return (inotify_init());
427 #endif
428 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
429 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
431 return (inotify_add_watch(fd, pathname, mask));
433 #endif
434 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
435 static int sys_inotify_rm_watch(int fd, int32_t wd)
437 return (inotify_rm_watch(fd, wd));
439 #endif
440 #ifdef CONFIG_INOTIFY1
441 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
442 static int sys_inotify_init1(int flags)
444 return (inotify_init1(flags));
446 #endif
447 #endif
448 #else
449 /* Userspace can usually survive runtime without inotify */
450 #undef TARGET_NR_inotify_init
451 #undef TARGET_NR_inotify_init1
452 #undef TARGET_NR_inotify_add_watch
453 #undef TARGET_NR_inotify_rm_watch
454 #endif /* CONFIG_INOTIFY */
456 #if defined(TARGET_NR_prlimit64)
457 #ifndef __NR_prlimit64
458 # define __NR_prlimit64 -1
459 #endif
460 #define __NR_sys_prlimit64 __NR_prlimit64
461 /* The glibc rlimit structure may not be that used by the underlying syscall */
462 struct host_rlimit64 {
463 uint64_t rlim_cur;
464 uint64_t rlim_max;
466 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
467 const struct host_rlimit64 *, new_limit,
468 struct host_rlimit64 *, old_limit)
469 #endif
472 #if defined(TARGET_NR_timer_create)
473 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
474 static timer_t g_posix_timers[32] = { 0, } ;
476 static inline int next_free_host_timer(void)
478 int k ;
479 /* FIXME: Does finding the next free slot require a lock? */
480 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
481 if (g_posix_timers[k] == 0) {
482 g_posix_timers[k] = (timer_t) 1;
483 return k;
486 return -1;
488 #endif
490 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
491 #ifdef TARGET_ARM
492 static inline int regpairs_aligned(void *cpu_env, int num)
494 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
496 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
497 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
498 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
499 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
500 * of registers which translates to the same as ARM/MIPS, because we start with
501 * r3 as arg1 */
502 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
503 #elif defined(TARGET_SH4)
504 /* SH4 doesn't align register pairs, except for p{read,write}64 */
505 static inline int regpairs_aligned(void *cpu_env, int num)
507 switch (num) {
508 case TARGET_NR_pread64:
509 case TARGET_NR_pwrite64:
510 return 1;
512 default:
513 return 0;
516 #elif defined(TARGET_XTENSA)
517 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
518 #else
519 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
520 #endif
522 #define ERRNO_TABLE_SIZE 1200
524 /* target_to_host_errno_table[] is initialized from
525 * host_to_target_errno_table[] in syscall_init(). */
526 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
530 * This list is the union of errno values overridden in asm-<arch>/errno.h
531 * minus the errnos that are not actually generic to all archs.
533 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
534 [EAGAIN] = TARGET_EAGAIN,
535 [EIDRM] = TARGET_EIDRM,
536 [ECHRNG] = TARGET_ECHRNG,
537 [EL2NSYNC] = TARGET_EL2NSYNC,
538 [EL3HLT] = TARGET_EL3HLT,
539 [EL3RST] = TARGET_EL3RST,
540 [ELNRNG] = TARGET_ELNRNG,
541 [EUNATCH] = TARGET_EUNATCH,
542 [ENOCSI] = TARGET_ENOCSI,
543 [EL2HLT] = TARGET_EL2HLT,
544 [EDEADLK] = TARGET_EDEADLK,
545 [ENOLCK] = TARGET_ENOLCK,
546 [EBADE] = TARGET_EBADE,
547 [EBADR] = TARGET_EBADR,
548 [EXFULL] = TARGET_EXFULL,
549 [ENOANO] = TARGET_ENOANO,
550 [EBADRQC] = TARGET_EBADRQC,
551 [EBADSLT] = TARGET_EBADSLT,
552 [EBFONT] = TARGET_EBFONT,
553 [ENOSTR] = TARGET_ENOSTR,
554 [ENODATA] = TARGET_ENODATA,
555 [ETIME] = TARGET_ETIME,
556 [ENOSR] = TARGET_ENOSR,
557 [ENONET] = TARGET_ENONET,
558 [ENOPKG] = TARGET_ENOPKG,
559 [EREMOTE] = TARGET_EREMOTE,
560 [ENOLINK] = TARGET_ENOLINK,
561 [EADV] = TARGET_EADV,
562 [ESRMNT] = TARGET_ESRMNT,
563 [ECOMM] = TARGET_ECOMM,
564 [EPROTO] = TARGET_EPROTO,
565 [EDOTDOT] = TARGET_EDOTDOT,
566 [EMULTIHOP] = TARGET_EMULTIHOP,
567 [EBADMSG] = TARGET_EBADMSG,
568 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
569 [EOVERFLOW] = TARGET_EOVERFLOW,
570 [ENOTUNIQ] = TARGET_ENOTUNIQ,
571 [EBADFD] = TARGET_EBADFD,
572 [EREMCHG] = TARGET_EREMCHG,
573 [ELIBACC] = TARGET_ELIBACC,
574 [ELIBBAD] = TARGET_ELIBBAD,
575 [ELIBSCN] = TARGET_ELIBSCN,
576 [ELIBMAX] = TARGET_ELIBMAX,
577 [ELIBEXEC] = TARGET_ELIBEXEC,
578 [EILSEQ] = TARGET_EILSEQ,
579 [ENOSYS] = TARGET_ENOSYS,
580 [ELOOP] = TARGET_ELOOP,
581 [ERESTART] = TARGET_ERESTART,
582 [ESTRPIPE] = TARGET_ESTRPIPE,
583 [ENOTEMPTY] = TARGET_ENOTEMPTY,
584 [EUSERS] = TARGET_EUSERS,
585 [ENOTSOCK] = TARGET_ENOTSOCK,
586 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
587 [EMSGSIZE] = TARGET_EMSGSIZE,
588 [EPROTOTYPE] = TARGET_EPROTOTYPE,
589 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
590 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
591 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
592 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
593 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
594 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
595 [EADDRINUSE] = TARGET_EADDRINUSE,
596 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
597 [ENETDOWN] = TARGET_ENETDOWN,
598 [ENETUNREACH] = TARGET_ENETUNREACH,
599 [ENETRESET] = TARGET_ENETRESET,
600 [ECONNABORTED] = TARGET_ECONNABORTED,
601 [ECONNRESET] = TARGET_ECONNRESET,
602 [ENOBUFS] = TARGET_ENOBUFS,
603 [EISCONN] = TARGET_EISCONN,
604 [ENOTCONN] = TARGET_ENOTCONN,
605 [EUCLEAN] = TARGET_EUCLEAN,
606 [ENOTNAM] = TARGET_ENOTNAM,
607 [ENAVAIL] = TARGET_ENAVAIL,
608 [EISNAM] = TARGET_EISNAM,
609 [EREMOTEIO] = TARGET_EREMOTEIO,
610 [EDQUOT] = TARGET_EDQUOT,
611 [ESHUTDOWN] = TARGET_ESHUTDOWN,
612 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
613 [ETIMEDOUT] = TARGET_ETIMEDOUT,
614 [ECONNREFUSED] = TARGET_ECONNREFUSED,
615 [EHOSTDOWN] = TARGET_EHOSTDOWN,
616 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
617 [EALREADY] = TARGET_EALREADY,
618 [EINPROGRESS] = TARGET_EINPROGRESS,
619 [ESTALE] = TARGET_ESTALE,
620 [ECANCELED] = TARGET_ECANCELED,
621 [ENOMEDIUM] = TARGET_ENOMEDIUM,
622 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
623 #ifdef ENOKEY
624 [ENOKEY] = TARGET_ENOKEY,
625 #endif
626 #ifdef EKEYEXPIRED
627 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
628 #endif
629 #ifdef EKEYREVOKED
630 [EKEYREVOKED] = TARGET_EKEYREVOKED,
631 #endif
632 #ifdef EKEYREJECTED
633 [EKEYREJECTED] = TARGET_EKEYREJECTED,
634 #endif
635 #ifdef EOWNERDEAD
636 [EOWNERDEAD] = TARGET_EOWNERDEAD,
637 #endif
638 #ifdef ENOTRECOVERABLE
639 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
640 #endif
641 #ifdef ENOMSG
642 [ENOMSG] = TARGET_ENOMSG,
643 #endif
644 #ifdef ERKFILL
645 [ERFKILL] = TARGET_ERFKILL,
646 #endif
647 #ifdef EHWPOISON
648 [EHWPOISON] = TARGET_EHWPOISON,
649 #endif
652 static inline int host_to_target_errno(int err)
654 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
655 host_to_target_errno_table[err]) {
656 return host_to_target_errno_table[err];
658 return err;
661 static inline int target_to_host_errno(int err)
663 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
664 target_to_host_errno_table[err]) {
665 return target_to_host_errno_table[err];
667 return err;
670 static inline abi_long get_errno(abi_long ret)
672 if (ret == -1)
673 return -host_to_target_errno(errno);
674 else
675 return ret;
678 const char *target_strerror(int err)
680 if (err == TARGET_ERESTARTSYS) {
681 return "To be restarted";
683 if (err == TARGET_QEMU_ESIGRETURN) {
684 return "Successful exit from sigreturn";
687 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
688 return NULL;
690 return strerror(target_to_host_errno(err));
693 #define safe_syscall0(type, name) \
694 static type safe_##name(void) \
696 return safe_syscall(__NR_##name); \
699 #define safe_syscall1(type, name, type1, arg1) \
700 static type safe_##name(type1 arg1) \
702 return safe_syscall(__NR_##name, arg1); \
705 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
706 static type safe_##name(type1 arg1, type2 arg2) \
708 return safe_syscall(__NR_##name, arg1, arg2); \
711 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
712 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
714 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
717 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
718 type4, arg4) \
719 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
721 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
724 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
725 type4, arg4, type5, arg5) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
727 type5 arg5) \
729 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
732 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
733 type4, arg4, type5, arg5, type6, arg6) \
734 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
735 type5 arg5, type6 arg6) \
737 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
740 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
741 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
742 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
743 int, flags, mode_t, mode)
744 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
745 struct rusage *, rusage)
746 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
747 int, options, struct rusage *, rusage)
748 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
749 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
750 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
751 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
752 struct timespec *, tsp, const sigset_t *, sigmask,
753 size_t, sigsetsize)
754 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
755 int, maxevents, int, timeout, const sigset_t *, sigmask,
756 size_t, sigsetsize)
757 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
758 const struct timespec *,timeout,int *,uaddr2,int,val3)
759 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
760 safe_syscall2(int, kill, pid_t, pid, int, sig)
761 safe_syscall2(int, tkill, int, tid, int, sig)
762 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
763 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
764 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
765 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
766 unsigned long, pos_l, unsigned long, pos_h)
767 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
768 unsigned long, pos_l, unsigned long, pos_h)
769 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
770 socklen_t, addrlen)
771 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
772 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
773 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
774 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
775 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
776 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
777 safe_syscall2(int, flock, int, fd, int, operation)
778 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
779 const struct timespec *, uts, size_t, sigsetsize)
780 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
781 int, flags)
782 safe_syscall2(int, nanosleep, const struct timespec *, req,
783 struct timespec *, rem)
784 #ifdef TARGET_NR_clock_nanosleep
785 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
786 const struct timespec *, req, struct timespec *, rem)
787 #endif
788 #ifdef __NR_ipc
789 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
790 void *, ptr, long, fifth)
791 #endif
792 #ifdef __NR_msgsnd
793 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
794 int, flags)
795 #endif
796 #ifdef __NR_msgrcv
797 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
798 long, msgtype, int, flags)
799 #endif
800 #ifdef __NR_semtimedop
801 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
802 unsigned, nsops, const struct timespec *, timeout)
803 #endif
804 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
805 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
806 size_t, len, unsigned, prio, const struct timespec *, timeout)
807 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
808 size_t, len, unsigned *, prio, const struct timespec *, timeout)
809 #endif
810 /* We do ioctl like this rather than via safe_syscall3 to preserve the
811 * "third argument might be integer or pointer or not present" behaviour of
812 * the libc function.
814 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
815 /* Similarly for fcntl. Note that callers must always:
816 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
817 * use the flock64 struct rather than unsuffixed flock
818 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
820 #ifdef __NR_fcntl64
821 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
822 #else
823 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
824 #endif
826 static inline int host_to_target_sock_type(int host_type)
828 int target_type;
830 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
831 case SOCK_DGRAM:
832 target_type = TARGET_SOCK_DGRAM;
833 break;
834 case SOCK_STREAM:
835 target_type = TARGET_SOCK_STREAM;
836 break;
837 default:
838 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
839 break;
842 #if defined(SOCK_CLOEXEC)
843 if (host_type & SOCK_CLOEXEC) {
844 target_type |= TARGET_SOCK_CLOEXEC;
846 #endif
848 #if defined(SOCK_NONBLOCK)
849 if (host_type & SOCK_NONBLOCK) {
850 target_type |= TARGET_SOCK_NONBLOCK;
852 #endif
854 return target_type;
857 static abi_ulong target_brk;
858 static abi_ulong target_original_brk;
859 static abi_ulong brk_page;
861 void target_set_brk(abi_ulong new_brk)
863 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
864 brk_page = HOST_PAGE_ALIGN(target_brk);
867 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
868 #define DEBUGF_BRK(message, args...)
870 /* do_brk() must return target values and target errnos. */
871 abi_long do_brk(abi_ulong new_brk)
873 abi_long mapped_addr;
874 abi_ulong new_alloc_size;
876 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
878 if (!new_brk) {
879 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
880 return target_brk;
882 if (new_brk < target_original_brk) {
883 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
884 target_brk);
885 return target_brk;
888 /* If the new brk is less than the highest page reserved to the
889 * target heap allocation, set it and we're almost done... */
890 if (new_brk <= brk_page) {
891 /* Heap contents are initialized to zero, as for anonymous
892 * mapped pages. */
893 if (new_brk > target_brk) {
894 memset(g2h(target_brk), 0, new_brk - target_brk);
896 target_brk = new_brk;
897 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
898 return target_brk;
901 /* We need to allocate more memory after the brk... Note that
902 * we don't use MAP_FIXED because that will map over the top of
903 * any existing mapping (like the one with the host libc or qemu
904 * itself); instead we treat "mapped but at wrong address" as
905 * a failure and unmap again.
907 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
908 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
909 PROT_READ|PROT_WRITE,
910 MAP_ANON|MAP_PRIVATE, 0, 0));
912 if (mapped_addr == brk_page) {
913 /* Heap contents are initialized to zero, as for anonymous
914 * mapped pages. Technically the new pages are already
915 * initialized to zero since they *are* anonymous mapped
916 * pages, however we have to take care with the contents that
917 * come from the remaining part of the previous page: it may
918 * contains garbage data due to a previous heap usage (grown
919 * then shrunken). */
920 memset(g2h(target_brk), 0, brk_page - target_brk);
922 target_brk = new_brk;
923 brk_page = HOST_PAGE_ALIGN(target_brk);
924 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
925 target_brk);
926 return target_brk;
927 } else if (mapped_addr != -1) {
928 /* Mapped but at wrong address, meaning there wasn't actually
929 * enough space for this brk.
931 target_munmap(mapped_addr, new_alloc_size);
932 mapped_addr = -1;
933 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
935 else {
936 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
939 #if defined(TARGET_ALPHA)
940 /* We (partially) emulate OSF/1 on Alpha, which requires we
941 return a proper errno, not an unchanged brk value. */
942 return -TARGET_ENOMEM;
943 #endif
944 /* For everything else, return the previous break. */
945 return target_brk;
948 static inline abi_long copy_from_user_fdset(fd_set *fds,
949 abi_ulong target_fds_addr,
950 int n)
952 int i, nw, j, k;
953 abi_ulong b, *target_fds;
955 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
956 if (!(target_fds = lock_user(VERIFY_READ,
957 target_fds_addr,
958 sizeof(abi_ulong) * nw,
959 1)))
960 return -TARGET_EFAULT;
962 FD_ZERO(fds);
963 k = 0;
964 for (i = 0; i < nw; i++) {
965 /* grab the abi_ulong */
966 __get_user(b, &target_fds[i]);
967 for (j = 0; j < TARGET_ABI_BITS; j++) {
968 /* check the bit inside the abi_ulong */
969 if ((b >> j) & 1)
970 FD_SET(k, fds);
971 k++;
975 unlock_user(target_fds, target_fds_addr, 0);
977 return 0;
980 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
981 abi_ulong target_fds_addr,
982 int n)
984 if (target_fds_addr) {
985 if (copy_from_user_fdset(fds, target_fds_addr, n))
986 return -TARGET_EFAULT;
987 *fds_ptr = fds;
988 } else {
989 *fds_ptr = NULL;
991 return 0;
994 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
995 const fd_set *fds,
996 int n)
998 int i, nw, j, k;
999 abi_long v;
1000 abi_ulong *target_fds;
1002 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1003 if (!(target_fds = lock_user(VERIFY_WRITE,
1004 target_fds_addr,
1005 sizeof(abi_ulong) * nw,
1006 0)))
1007 return -TARGET_EFAULT;
1009 k = 0;
1010 for (i = 0; i < nw; i++) {
1011 v = 0;
1012 for (j = 0; j < TARGET_ABI_BITS; j++) {
1013 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1014 k++;
1016 __put_user(v, &target_fds[i]);
1019 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1021 return 0;
1024 #if defined(__alpha__)
1025 #define HOST_HZ 1024
1026 #else
1027 #define HOST_HZ 100
1028 #endif
1030 static inline abi_long host_to_target_clock_t(long ticks)
1032 #if HOST_HZ == TARGET_HZ
1033 return ticks;
1034 #else
1035 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1036 #endif
1039 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1040 const struct rusage *rusage)
1042 struct target_rusage *target_rusage;
1044 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1045 return -TARGET_EFAULT;
1046 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1047 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1048 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1049 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1050 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1051 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1052 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1053 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1054 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1055 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1056 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1057 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1058 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1059 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1060 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1061 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1062 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1063 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1064 unlock_user_struct(target_rusage, target_addr, 1);
1066 return 0;
1069 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1071 abi_ulong target_rlim_swap;
1072 rlim_t result;
1074 target_rlim_swap = tswapal(target_rlim);
1075 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1076 return RLIM_INFINITY;
1078 result = target_rlim_swap;
1079 if (target_rlim_swap != (rlim_t)result)
1080 return RLIM_INFINITY;
1082 return result;
1085 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1087 abi_ulong target_rlim_swap;
1088 abi_ulong result;
1090 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1091 target_rlim_swap = TARGET_RLIM_INFINITY;
1092 else
1093 target_rlim_swap = rlim;
1094 result = tswapal(target_rlim_swap);
1096 return result;
1099 static inline int target_to_host_resource(int code)
1101 switch (code) {
1102 case TARGET_RLIMIT_AS:
1103 return RLIMIT_AS;
1104 case TARGET_RLIMIT_CORE:
1105 return RLIMIT_CORE;
1106 case TARGET_RLIMIT_CPU:
1107 return RLIMIT_CPU;
1108 case TARGET_RLIMIT_DATA:
1109 return RLIMIT_DATA;
1110 case TARGET_RLIMIT_FSIZE:
1111 return RLIMIT_FSIZE;
1112 case TARGET_RLIMIT_LOCKS:
1113 return RLIMIT_LOCKS;
1114 case TARGET_RLIMIT_MEMLOCK:
1115 return RLIMIT_MEMLOCK;
1116 case TARGET_RLIMIT_MSGQUEUE:
1117 return RLIMIT_MSGQUEUE;
1118 case TARGET_RLIMIT_NICE:
1119 return RLIMIT_NICE;
1120 case TARGET_RLIMIT_NOFILE:
1121 return RLIMIT_NOFILE;
1122 case TARGET_RLIMIT_NPROC:
1123 return RLIMIT_NPROC;
1124 case TARGET_RLIMIT_RSS:
1125 return RLIMIT_RSS;
1126 case TARGET_RLIMIT_RTPRIO:
1127 return RLIMIT_RTPRIO;
1128 case TARGET_RLIMIT_SIGPENDING:
1129 return RLIMIT_SIGPENDING;
1130 case TARGET_RLIMIT_STACK:
1131 return RLIMIT_STACK;
1132 default:
1133 return code;
1137 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1138 abi_ulong target_tv_addr)
1140 struct target_timeval *target_tv;
1142 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1143 return -TARGET_EFAULT;
1146 __get_user(tv->tv_sec, &target_tv->tv_sec);
1147 __get_user(tv->tv_usec, &target_tv->tv_usec);
1149 unlock_user_struct(target_tv, target_tv_addr, 0);
1151 return 0;
1154 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1155 const struct timeval *tv)
1157 struct target_timeval *target_tv;
1159 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1160 return -TARGET_EFAULT;
1163 __put_user(tv->tv_sec, &target_tv->tv_sec);
1164 __put_user(tv->tv_usec, &target_tv->tv_usec);
1166 unlock_user_struct(target_tv, target_tv_addr, 1);
1168 return 0;
1171 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1172 const struct timeval *tv)
1174 struct target__kernel_sock_timeval *target_tv;
1176 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1177 return -TARGET_EFAULT;
1180 __put_user(tv->tv_sec, &target_tv->tv_sec);
1181 __put_user(tv->tv_usec, &target_tv->tv_usec);
1183 unlock_user_struct(target_tv, target_tv_addr, 1);
1185 return 0;
1188 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1189 abi_ulong target_addr)
1191 struct target_timespec *target_ts;
1193 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1194 return -TARGET_EFAULT;
1196 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1197 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1198 unlock_user_struct(target_ts, target_addr, 0);
1199 return 0;
1202 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1203 struct timespec *host_ts)
1205 struct target_timespec *target_ts;
1207 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1208 return -TARGET_EFAULT;
1210 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1211 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212 unlock_user_struct(target_ts, target_addr, 1);
1213 return 0;
1216 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1217 struct timespec *host_ts)
1219 struct target__kernel_timespec *target_ts;
1221 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1222 return -TARGET_EFAULT;
1224 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1225 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1226 unlock_user_struct(target_ts, target_addr, 1);
1227 return 0;
1230 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1231 abi_ulong target_tz_addr)
1233 struct target_timezone *target_tz;
1235 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1236 return -TARGET_EFAULT;
1239 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1240 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1242 unlock_user_struct(target_tz, target_tz_addr, 0);
1244 return 0;
1247 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1248 #include <mqueue.h>
1250 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1251 abi_ulong target_mq_attr_addr)
1253 struct target_mq_attr *target_mq_attr;
1255 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1256 target_mq_attr_addr, 1))
1257 return -TARGET_EFAULT;
1259 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1260 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1261 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1262 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1264 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1266 return 0;
1269 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1270 const struct mq_attr *attr)
1272 struct target_mq_attr *target_mq_attr;
1274 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1275 target_mq_attr_addr, 0))
1276 return -TARGET_EFAULT;
1278 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1279 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1280 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1281 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1283 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1285 return 0;
1287 #endif
1289 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1290 /* do_select() must return target values and target errnos. */
1291 static abi_long do_select(int n,
1292 abi_ulong rfd_addr, abi_ulong wfd_addr,
1293 abi_ulong efd_addr, abi_ulong target_tv_addr)
1295 fd_set rfds, wfds, efds;
1296 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1297 struct timeval tv;
1298 struct timespec ts, *ts_ptr;
1299 abi_long ret;
1301 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1302 if (ret) {
1303 return ret;
1305 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1306 if (ret) {
1307 return ret;
1309 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1310 if (ret) {
1311 return ret;
1314 if (target_tv_addr) {
1315 if (copy_from_user_timeval(&tv, target_tv_addr))
1316 return -TARGET_EFAULT;
1317 ts.tv_sec = tv.tv_sec;
1318 ts.tv_nsec = tv.tv_usec * 1000;
1319 ts_ptr = &ts;
1320 } else {
1321 ts_ptr = NULL;
1324 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1325 ts_ptr, NULL));
1327 if (!is_error(ret)) {
1328 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1329 return -TARGET_EFAULT;
1330 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1331 return -TARGET_EFAULT;
1332 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1333 return -TARGET_EFAULT;
1335 if (target_tv_addr) {
1336 tv.tv_sec = ts.tv_sec;
1337 tv.tv_usec = ts.tv_nsec / 1000;
1338 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1339 return -TARGET_EFAULT;
1344 return ret;
1347 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1348 static abi_long do_old_select(abi_ulong arg1)
1350 struct target_sel_arg_struct *sel;
1351 abi_ulong inp, outp, exp, tvp;
1352 long nsel;
1354 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1355 return -TARGET_EFAULT;
1358 nsel = tswapal(sel->n);
1359 inp = tswapal(sel->inp);
1360 outp = tswapal(sel->outp);
1361 exp = tswapal(sel->exp);
1362 tvp = tswapal(sel->tvp);
1364 unlock_user_struct(sel, arg1, 0);
1366 return do_select(nsel, inp, outp, exp, tvp);
1368 #endif
1369 #endif
1371 static abi_long do_pipe2(int host_pipe[], int flags)
1373 #ifdef CONFIG_PIPE2
1374 return pipe2(host_pipe, flags);
1375 #else
1376 return -ENOSYS;
1377 #endif
1380 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1381 int flags, int is_pipe2)
1383 int host_pipe[2];
1384 abi_long ret;
1385 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1387 if (is_error(ret))
1388 return get_errno(ret);
1390 /* Several targets have special calling conventions for the original
1391 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1392 if (!is_pipe2) {
1393 #if defined(TARGET_ALPHA)
1394 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1395 return host_pipe[0];
1396 #elif defined(TARGET_MIPS)
1397 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1398 return host_pipe[0];
1399 #elif defined(TARGET_SH4)
1400 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1401 return host_pipe[0];
1402 #elif defined(TARGET_SPARC)
1403 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1404 return host_pipe[0];
1405 #endif
1408 if (put_user_s32(host_pipe[0], pipedes)
1409 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1410 return -TARGET_EFAULT;
1411 return get_errno(ret);
1414 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1415 abi_ulong target_addr,
1416 socklen_t len)
1418 struct target_ip_mreqn *target_smreqn;
1420 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1421 if (!target_smreqn)
1422 return -TARGET_EFAULT;
1423 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1424 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1425 if (len == sizeof(struct target_ip_mreqn))
1426 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1427 unlock_user(target_smreqn, target_addr, 0);
1429 return 0;
1432 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1433 abi_ulong target_addr,
1434 socklen_t len)
1436 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1437 sa_family_t sa_family;
1438 struct target_sockaddr *target_saddr;
1440 if (fd_trans_target_to_host_addr(fd)) {
1441 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1444 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1445 if (!target_saddr)
1446 return -TARGET_EFAULT;
1448 sa_family = tswap16(target_saddr->sa_family);
1450 /* Oops. The caller might send a incomplete sun_path; sun_path
1451 * must be terminated by \0 (see the manual page), but
1452 * unfortunately it is quite common to specify sockaddr_un
1453 * length as "strlen(x->sun_path)" while it should be
1454 * "strlen(...) + 1". We'll fix that here if needed.
1455 * Linux kernel has a similar feature.
1458 if (sa_family == AF_UNIX) {
1459 if (len < unix_maxlen && len > 0) {
1460 char *cp = (char*)target_saddr;
1462 if ( cp[len-1] && !cp[len] )
1463 len++;
1465 if (len > unix_maxlen)
1466 len = unix_maxlen;
1469 memcpy(addr, target_saddr, len);
1470 addr->sa_family = sa_family;
1471 if (sa_family == AF_NETLINK) {
1472 struct sockaddr_nl *nladdr;
1474 nladdr = (struct sockaddr_nl *)addr;
1475 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1476 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1477 } else if (sa_family == AF_PACKET) {
1478 struct target_sockaddr_ll *lladdr;
1480 lladdr = (struct target_sockaddr_ll *)addr;
1481 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1482 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1484 unlock_user(target_saddr, target_addr, 0);
1486 return 0;
1489 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1490 struct sockaddr *addr,
1491 socklen_t len)
1493 struct target_sockaddr *target_saddr;
1495 if (len == 0) {
1496 return 0;
1498 assert(addr);
1500 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1501 if (!target_saddr)
1502 return -TARGET_EFAULT;
1503 memcpy(target_saddr, addr, len);
1504 if (len >= offsetof(struct target_sockaddr, sa_family) +
1505 sizeof(target_saddr->sa_family)) {
1506 target_saddr->sa_family = tswap16(addr->sa_family);
1508 if (addr->sa_family == AF_NETLINK &&
1509 len >= sizeof(struct target_sockaddr_nl)) {
1510 struct target_sockaddr_nl *target_nl =
1511 (struct target_sockaddr_nl *)target_saddr;
1512 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1513 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1514 } else if (addr->sa_family == AF_PACKET) {
1515 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1516 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1517 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1518 } else if (addr->sa_family == AF_INET6 &&
1519 len >= sizeof(struct target_sockaddr_in6)) {
1520 struct target_sockaddr_in6 *target_in6 =
1521 (struct target_sockaddr_in6 *)target_saddr;
1522 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1524 unlock_user(target_saddr, target_addr, len);
1526 return 0;
1529 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1530 struct target_msghdr *target_msgh)
1532 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1533 abi_long msg_controllen;
1534 abi_ulong target_cmsg_addr;
1535 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1536 socklen_t space = 0;
1538 msg_controllen = tswapal(target_msgh->msg_controllen);
1539 if (msg_controllen < sizeof (struct target_cmsghdr))
1540 goto the_end;
1541 target_cmsg_addr = tswapal(target_msgh->msg_control);
1542 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1543 target_cmsg_start = target_cmsg;
1544 if (!target_cmsg)
1545 return -TARGET_EFAULT;
1547 while (cmsg && target_cmsg) {
1548 void *data = CMSG_DATA(cmsg);
1549 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1551 int len = tswapal(target_cmsg->cmsg_len)
1552 - sizeof(struct target_cmsghdr);
1554 space += CMSG_SPACE(len);
1555 if (space > msgh->msg_controllen) {
1556 space -= CMSG_SPACE(len);
1557 /* This is a QEMU bug, since we allocated the payload
1558 * area ourselves (unlike overflow in host-to-target
1559 * conversion, which is just the guest giving us a buffer
1560 * that's too small). It can't happen for the payload types
1561 * we currently support; if it becomes an issue in future
1562 * we would need to improve our allocation strategy to
1563 * something more intelligent than "twice the size of the
1564 * target buffer we're reading from".
1566 qemu_log_mask(LOG_UNIMP,
1567 ("Unsupported ancillary data %d/%d: "
1568 "unhandled msg size\n"),
1569 tswap32(target_cmsg->cmsg_level),
1570 tswap32(target_cmsg->cmsg_type));
1571 break;
1574 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1575 cmsg->cmsg_level = SOL_SOCKET;
1576 } else {
1577 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1579 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1580 cmsg->cmsg_len = CMSG_LEN(len);
1582 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1583 int *fd = (int *)data;
1584 int *target_fd = (int *)target_data;
1585 int i, numfds = len / sizeof(int);
1587 for (i = 0; i < numfds; i++) {
1588 __get_user(fd[i], target_fd + i);
1590 } else if (cmsg->cmsg_level == SOL_SOCKET
1591 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1592 struct ucred *cred = (struct ucred *)data;
1593 struct target_ucred *target_cred =
1594 (struct target_ucred *)target_data;
1596 __get_user(cred->pid, &target_cred->pid);
1597 __get_user(cred->uid, &target_cred->uid);
1598 __get_user(cred->gid, &target_cred->gid);
1599 } else {
1600 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1601 cmsg->cmsg_level, cmsg->cmsg_type);
1602 memcpy(data, target_data, len);
1605 cmsg = CMSG_NXTHDR(msgh, cmsg);
1606 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1607 target_cmsg_start);
1609 unlock_user(target_cmsg, target_cmsg_addr, 0);
1610 the_end:
1611 msgh->msg_controllen = space;
1612 return 0;
1615 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1616 struct msghdr *msgh)
1618 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1619 abi_long msg_controllen;
1620 abi_ulong target_cmsg_addr;
1621 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1622 socklen_t space = 0;
1624 msg_controllen = tswapal(target_msgh->msg_controllen);
1625 if (msg_controllen < sizeof (struct target_cmsghdr))
1626 goto the_end;
1627 target_cmsg_addr = tswapal(target_msgh->msg_control);
1628 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1629 target_cmsg_start = target_cmsg;
1630 if (!target_cmsg)
1631 return -TARGET_EFAULT;
1633 while (cmsg && target_cmsg) {
1634 void *data = CMSG_DATA(cmsg);
1635 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1637 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1638 int tgt_len, tgt_space;
1640 /* We never copy a half-header but may copy half-data;
1641 * this is Linux's behaviour in put_cmsg(). Note that
1642 * truncation here is a guest problem (which we report
1643 * to the guest via the CTRUNC bit), unlike truncation
1644 * in target_to_host_cmsg, which is a QEMU bug.
1646 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1647 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1648 break;
1651 if (cmsg->cmsg_level == SOL_SOCKET) {
1652 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1653 } else {
1654 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1656 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1658 /* Payload types which need a different size of payload on
1659 * the target must adjust tgt_len here.
1661 tgt_len = len;
1662 switch (cmsg->cmsg_level) {
1663 case SOL_SOCKET:
1664 switch (cmsg->cmsg_type) {
1665 case SO_TIMESTAMP:
1666 tgt_len = sizeof(struct target_timeval);
1667 break;
1668 default:
1669 break;
1671 break;
1672 default:
1673 break;
1676 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1677 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1678 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1681 /* We must now copy-and-convert len bytes of payload
1682 * into tgt_len bytes of destination space. Bear in mind
1683 * that in both source and destination we may be dealing
1684 * with a truncated value!
1686 switch (cmsg->cmsg_level) {
1687 case SOL_SOCKET:
1688 switch (cmsg->cmsg_type) {
1689 case SCM_RIGHTS:
1691 int *fd = (int *)data;
1692 int *target_fd = (int *)target_data;
1693 int i, numfds = tgt_len / sizeof(int);
1695 for (i = 0; i < numfds; i++) {
1696 __put_user(fd[i], target_fd + i);
1698 break;
1700 case SO_TIMESTAMP:
1702 struct timeval *tv = (struct timeval *)data;
1703 struct target_timeval *target_tv =
1704 (struct target_timeval *)target_data;
1706 if (len != sizeof(struct timeval) ||
1707 tgt_len != sizeof(struct target_timeval)) {
1708 goto unimplemented;
1711 /* copy struct timeval to target */
1712 __put_user(tv->tv_sec, &target_tv->tv_sec);
1713 __put_user(tv->tv_usec, &target_tv->tv_usec);
1714 break;
1716 case SCM_CREDENTIALS:
1718 struct ucred *cred = (struct ucred *)data;
1719 struct target_ucred *target_cred =
1720 (struct target_ucred *)target_data;
1722 __put_user(cred->pid, &target_cred->pid);
1723 __put_user(cred->uid, &target_cred->uid);
1724 __put_user(cred->gid, &target_cred->gid);
1725 break;
1727 default:
1728 goto unimplemented;
1730 break;
1732 case SOL_IP:
1733 switch (cmsg->cmsg_type) {
1734 case IP_TTL:
1736 uint32_t *v = (uint32_t *)data;
1737 uint32_t *t_int = (uint32_t *)target_data;
1739 if (len != sizeof(uint32_t) ||
1740 tgt_len != sizeof(uint32_t)) {
1741 goto unimplemented;
1743 __put_user(*v, t_int);
1744 break;
1746 case IP_RECVERR:
1748 struct errhdr_t {
1749 struct sock_extended_err ee;
1750 struct sockaddr_in offender;
1752 struct errhdr_t *errh = (struct errhdr_t *)data;
1753 struct errhdr_t *target_errh =
1754 (struct errhdr_t *)target_data;
1756 if (len != sizeof(struct errhdr_t) ||
1757 tgt_len != sizeof(struct errhdr_t)) {
1758 goto unimplemented;
1760 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1761 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1762 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1763 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1764 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1765 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1766 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1767 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1768 (void *) &errh->offender, sizeof(errh->offender));
1769 break;
1771 default:
1772 goto unimplemented;
1774 break;
1776 case SOL_IPV6:
1777 switch (cmsg->cmsg_type) {
1778 case IPV6_HOPLIMIT:
1780 uint32_t *v = (uint32_t *)data;
1781 uint32_t *t_int = (uint32_t *)target_data;
1783 if (len != sizeof(uint32_t) ||
1784 tgt_len != sizeof(uint32_t)) {
1785 goto unimplemented;
1787 __put_user(*v, t_int);
1788 break;
1790 case IPV6_RECVERR:
1792 struct errhdr6_t {
1793 struct sock_extended_err ee;
1794 struct sockaddr_in6 offender;
1796 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1797 struct errhdr6_t *target_errh =
1798 (struct errhdr6_t *)target_data;
1800 if (len != sizeof(struct errhdr6_t) ||
1801 tgt_len != sizeof(struct errhdr6_t)) {
1802 goto unimplemented;
1804 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1805 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1806 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1807 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1808 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1809 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1810 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1811 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1812 (void *) &errh->offender, sizeof(errh->offender));
1813 break;
1815 default:
1816 goto unimplemented;
1818 break;
1820 default:
1821 unimplemented:
1822 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1823 cmsg->cmsg_level, cmsg->cmsg_type);
1824 memcpy(target_data, data, MIN(len, tgt_len));
1825 if (tgt_len > len) {
1826 memset(target_data + len, 0, tgt_len - len);
1830 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1831 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1832 if (msg_controllen < tgt_space) {
1833 tgt_space = msg_controllen;
1835 msg_controllen -= tgt_space;
1836 space += tgt_space;
1837 cmsg = CMSG_NXTHDR(msgh, cmsg);
1838 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1839 target_cmsg_start);
1841 unlock_user(target_cmsg, target_cmsg_addr, space);
1842 the_end:
1843 target_msgh->msg_controllen = tswapal(space);
1844 return 0;
1847 /* do_setsockopt() Must return target values and target errnos. */
1848 static abi_long do_setsockopt(int sockfd, int level, int optname,
1849 abi_ulong optval_addr, socklen_t optlen)
1851 abi_long ret;
1852 int val;
1853 struct ip_mreqn *ip_mreq;
1854 struct ip_mreq_source *ip_mreq_source;
1856 switch(level) {
1857 case SOL_TCP:
1858 /* TCP options all take an 'int' value. */
1859 if (optlen < sizeof(uint32_t))
1860 return -TARGET_EINVAL;
1862 if (get_user_u32(val, optval_addr))
1863 return -TARGET_EFAULT;
1864 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1865 break;
1866 case SOL_IP:
1867 switch(optname) {
1868 case IP_TOS:
1869 case IP_TTL:
1870 case IP_HDRINCL:
1871 case IP_ROUTER_ALERT:
1872 case IP_RECVOPTS:
1873 case IP_RETOPTS:
1874 case IP_PKTINFO:
1875 case IP_MTU_DISCOVER:
1876 case IP_RECVERR:
1877 case IP_RECVTTL:
1878 case IP_RECVTOS:
1879 #ifdef IP_FREEBIND
1880 case IP_FREEBIND:
1881 #endif
1882 case IP_MULTICAST_TTL:
1883 case IP_MULTICAST_LOOP:
1884 val = 0;
1885 if (optlen >= sizeof(uint32_t)) {
1886 if (get_user_u32(val, optval_addr))
1887 return -TARGET_EFAULT;
1888 } else if (optlen >= 1) {
1889 if (get_user_u8(val, optval_addr))
1890 return -TARGET_EFAULT;
1892 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1893 break;
1894 case IP_ADD_MEMBERSHIP:
1895 case IP_DROP_MEMBERSHIP:
1896 if (optlen < sizeof (struct target_ip_mreq) ||
1897 optlen > sizeof (struct target_ip_mreqn))
1898 return -TARGET_EINVAL;
1900 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1901 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1902 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1903 break;
1905 case IP_BLOCK_SOURCE:
1906 case IP_UNBLOCK_SOURCE:
1907 case IP_ADD_SOURCE_MEMBERSHIP:
1908 case IP_DROP_SOURCE_MEMBERSHIP:
1909 if (optlen != sizeof (struct target_ip_mreq_source))
1910 return -TARGET_EINVAL;
1912 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1913 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1914 unlock_user (ip_mreq_source, optval_addr, 0);
1915 break;
1917 default:
1918 goto unimplemented;
1920 break;
1921 case SOL_IPV6:
1922 switch (optname) {
1923 case IPV6_MTU_DISCOVER:
1924 case IPV6_MTU:
1925 case IPV6_V6ONLY:
1926 case IPV6_RECVPKTINFO:
1927 case IPV6_UNICAST_HOPS:
1928 case IPV6_MULTICAST_HOPS:
1929 case IPV6_MULTICAST_LOOP:
1930 case IPV6_RECVERR:
1931 case IPV6_RECVHOPLIMIT:
1932 case IPV6_2292HOPLIMIT:
1933 case IPV6_CHECKSUM:
1934 case IPV6_ADDRFORM:
1935 case IPV6_2292PKTINFO:
1936 case IPV6_RECVTCLASS:
1937 case IPV6_RECVRTHDR:
1938 case IPV6_2292RTHDR:
1939 case IPV6_RECVHOPOPTS:
1940 case IPV6_2292HOPOPTS:
1941 case IPV6_RECVDSTOPTS:
1942 case IPV6_2292DSTOPTS:
1943 case IPV6_TCLASS:
1944 #ifdef IPV6_RECVPATHMTU
1945 case IPV6_RECVPATHMTU:
1946 #endif
1947 #ifdef IPV6_TRANSPARENT
1948 case IPV6_TRANSPARENT:
1949 #endif
1950 #ifdef IPV6_FREEBIND
1951 case IPV6_FREEBIND:
1952 #endif
1953 #ifdef IPV6_RECVORIGDSTADDR
1954 case IPV6_RECVORIGDSTADDR:
1955 #endif
1956 val = 0;
1957 if (optlen < sizeof(uint32_t)) {
1958 return -TARGET_EINVAL;
1960 if (get_user_u32(val, optval_addr)) {
1961 return -TARGET_EFAULT;
1963 ret = get_errno(setsockopt(sockfd, level, optname,
1964 &val, sizeof(val)));
1965 break;
1966 case IPV6_PKTINFO:
1968 struct in6_pktinfo pki;
1970 if (optlen < sizeof(pki)) {
1971 return -TARGET_EINVAL;
1974 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
1975 return -TARGET_EFAULT;
1978 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
1980 ret = get_errno(setsockopt(sockfd, level, optname,
1981 &pki, sizeof(pki)));
1982 break;
1984 case IPV6_ADD_MEMBERSHIP:
1985 case IPV6_DROP_MEMBERSHIP:
1987 struct ipv6_mreq ipv6mreq;
1989 if (optlen < sizeof(ipv6mreq)) {
1990 return -TARGET_EINVAL;
1993 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
1994 return -TARGET_EFAULT;
1997 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
1999 ret = get_errno(setsockopt(sockfd, level, optname,
2000 &ipv6mreq, sizeof(ipv6mreq)));
2001 break;
2003 default:
2004 goto unimplemented;
2006 break;
2007 case SOL_ICMPV6:
2008 switch (optname) {
2009 case ICMPV6_FILTER:
2011 struct icmp6_filter icmp6f;
2013 if (optlen > sizeof(icmp6f)) {
2014 optlen = sizeof(icmp6f);
2017 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2018 return -TARGET_EFAULT;
2021 for (val = 0; val < 8; val++) {
2022 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2025 ret = get_errno(setsockopt(sockfd, level, optname,
2026 &icmp6f, optlen));
2027 break;
2029 default:
2030 goto unimplemented;
2032 break;
2033 case SOL_RAW:
2034 switch (optname) {
2035 case ICMP_FILTER:
2036 case IPV6_CHECKSUM:
2037 /* those take an u32 value */
2038 if (optlen < sizeof(uint32_t)) {
2039 return -TARGET_EINVAL;
2042 if (get_user_u32(val, optval_addr)) {
2043 return -TARGET_EFAULT;
2045 ret = get_errno(setsockopt(sockfd, level, optname,
2046 &val, sizeof(val)));
2047 break;
2049 default:
2050 goto unimplemented;
2052 break;
2053 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2054 case SOL_ALG:
2055 switch (optname) {
2056 case ALG_SET_KEY:
2058 char *alg_key = g_malloc(optlen);
2060 if (!alg_key) {
2061 return -TARGET_ENOMEM;
2063 if (copy_from_user(alg_key, optval_addr, optlen)) {
2064 g_free(alg_key);
2065 return -TARGET_EFAULT;
2067 ret = get_errno(setsockopt(sockfd, level, optname,
2068 alg_key, optlen));
2069 g_free(alg_key);
2070 break;
2072 case ALG_SET_AEAD_AUTHSIZE:
2074 ret = get_errno(setsockopt(sockfd, level, optname,
2075 NULL, optlen));
2076 break;
2078 default:
2079 goto unimplemented;
2081 break;
2082 #endif
2083 case TARGET_SOL_SOCKET:
2084 switch (optname) {
2085 case TARGET_SO_RCVTIMEO:
2087 struct timeval tv;
2089 optname = SO_RCVTIMEO;
2091 set_timeout:
2092 if (optlen != sizeof(struct target_timeval)) {
2093 return -TARGET_EINVAL;
2096 if (copy_from_user_timeval(&tv, optval_addr)) {
2097 return -TARGET_EFAULT;
2100 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2101 &tv, sizeof(tv)));
2102 return ret;
2104 case TARGET_SO_SNDTIMEO:
2105 optname = SO_SNDTIMEO;
2106 goto set_timeout;
2107 case TARGET_SO_ATTACH_FILTER:
2109 struct target_sock_fprog *tfprog;
2110 struct target_sock_filter *tfilter;
2111 struct sock_fprog fprog;
2112 struct sock_filter *filter;
2113 int i;
2115 if (optlen != sizeof(*tfprog)) {
2116 return -TARGET_EINVAL;
2118 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2119 return -TARGET_EFAULT;
2121 if (!lock_user_struct(VERIFY_READ, tfilter,
2122 tswapal(tfprog->filter), 0)) {
2123 unlock_user_struct(tfprog, optval_addr, 1);
2124 return -TARGET_EFAULT;
2127 fprog.len = tswap16(tfprog->len);
2128 filter = g_try_new(struct sock_filter, fprog.len);
2129 if (filter == NULL) {
2130 unlock_user_struct(tfilter, tfprog->filter, 1);
2131 unlock_user_struct(tfprog, optval_addr, 1);
2132 return -TARGET_ENOMEM;
2134 for (i = 0; i < fprog.len; i++) {
2135 filter[i].code = tswap16(tfilter[i].code);
2136 filter[i].jt = tfilter[i].jt;
2137 filter[i].jf = tfilter[i].jf;
2138 filter[i].k = tswap32(tfilter[i].k);
2140 fprog.filter = filter;
2142 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2143 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2144 g_free(filter);
2146 unlock_user_struct(tfilter, tfprog->filter, 1);
2147 unlock_user_struct(tfprog, optval_addr, 1);
2148 return ret;
2150 case TARGET_SO_BINDTODEVICE:
2152 char *dev_ifname, *addr_ifname;
2154 if (optlen > IFNAMSIZ - 1) {
2155 optlen = IFNAMSIZ - 1;
2157 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2158 if (!dev_ifname) {
2159 return -TARGET_EFAULT;
2161 optname = SO_BINDTODEVICE;
2162 addr_ifname = alloca(IFNAMSIZ);
2163 memcpy(addr_ifname, dev_ifname, optlen);
2164 addr_ifname[optlen] = 0;
2165 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2166 addr_ifname, optlen));
2167 unlock_user (dev_ifname, optval_addr, 0);
2168 return ret;
2170 case TARGET_SO_LINGER:
2172 struct linger lg;
2173 struct target_linger *tlg;
2175 if (optlen != sizeof(struct target_linger)) {
2176 return -TARGET_EINVAL;
2178 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2179 return -TARGET_EFAULT;
2181 __get_user(lg.l_onoff, &tlg->l_onoff);
2182 __get_user(lg.l_linger, &tlg->l_linger);
2183 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2184 &lg, sizeof(lg)));
2185 unlock_user_struct(tlg, optval_addr, 0);
2186 return ret;
2188 /* Options with 'int' argument. */
2189 case TARGET_SO_DEBUG:
2190 optname = SO_DEBUG;
2191 break;
2192 case TARGET_SO_REUSEADDR:
2193 optname = SO_REUSEADDR;
2194 break;
2195 #ifdef SO_REUSEPORT
2196 case TARGET_SO_REUSEPORT:
2197 optname = SO_REUSEPORT;
2198 break;
2199 #endif
2200 case TARGET_SO_TYPE:
2201 optname = SO_TYPE;
2202 break;
2203 case TARGET_SO_ERROR:
2204 optname = SO_ERROR;
2205 break;
2206 case TARGET_SO_DONTROUTE:
2207 optname = SO_DONTROUTE;
2208 break;
2209 case TARGET_SO_BROADCAST:
2210 optname = SO_BROADCAST;
2211 break;
2212 case TARGET_SO_SNDBUF:
2213 optname = SO_SNDBUF;
2214 break;
2215 case TARGET_SO_SNDBUFFORCE:
2216 optname = SO_SNDBUFFORCE;
2217 break;
2218 case TARGET_SO_RCVBUF:
2219 optname = SO_RCVBUF;
2220 break;
2221 case TARGET_SO_RCVBUFFORCE:
2222 optname = SO_RCVBUFFORCE;
2223 break;
2224 case TARGET_SO_KEEPALIVE:
2225 optname = SO_KEEPALIVE;
2226 break;
2227 case TARGET_SO_OOBINLINE:
2228 optname = SO_OOBINLINE;
2229 break;
2230 case TARGET_SO_NO_CHECK:
2231 optname = SO_NO_CHECK;
2232 break;
2233 case TARGET_SO_PRIORITY:
2234 optname = SO_PRIORITY;
2235 break;
2236 #ifdef SO_BSDCOMPAT
2237 case TARGET_SO_BSDCOMPAT:
2238 optname = SO_BSDCOMPAT;
2239 break;
2240 #endif
2241 case TARGET_SO_PASSCRED:
2242 optname = SO_PASSCRED;
2243 break;
2244 case TARGET_SO_PASSSEC:
2245 optname = SO_PASSSEC;
2246 break;
2247 case TARGET_SO_TIMESTAMP:
2248 optname = SO_TIMESTAMP;
2249 break;
2250 case TARGET_SO_RCVLOWAT:
2251 optname = SO_RCVLOWAT;
2252 break;
2253 default:
2254 goto unimplemented;
2256 if (optlen < sizeof(uint32_t))
2257 return -TARGET_EINVAL;
2259 if (get_user_u32(val, optval_addr))
2260 return -TARGET_EFAULT;
2261 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2262 break;
2263 #ifdef SOL_NETLINK
2264 case SOL_NETLINK:
2265 switch (optname) {
2266 case NETLINK_PKTINFO:
2267 case NETLINK_ADD_MEMBERSHIP:
2268 case NETLINK_DROP_MEMBERSHIP:
2269 case NETLINK_BROADCAST_ERROR:
2270 case NETLINK_NO_ENOBUFS:
2271 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2272 case NETLINK_LISTEN_ALL_NSID:
2273 case NETLINK_CAP_ACK:
2274 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2275 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2276 case NETLINK_EXT_ACK:
2277 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2278 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2279 case NETLINK_GET_STRICT_CHK:
2280 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2281 break;
2282 default:
2283 goto unimplemented;
2285 val = 0;
2286 if (optlen < sizeof(uint32_t)) {
2287 return -TARGET_EINVAL;
2289 if (get_user_u32(val, optval_addr)) {
2290 return -TARGET_EFAULT;
2292 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2293 sizeof(val)));
2294 break;
2295 #endif /* SOL_NETLINK */
2296 default:
2297 unimplemented:
2298 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2299 level, optname);
2300 ret = -TARGET_ENOPROTOOPT;
2302 return ret;
2305 /* do_getsockopt() Must return target values and target errnos. */
2306 static abi_long do_getsockopt(int sockfd, int level, int optname,
2307 abi_ulong optval_addr, abi_ulong optlen)
2309 abi_long ret;
2310 int len, val;
2311 socklen_t lv;
2313 switch(level) {
2314 case TARGET_SOL_SOCKET:
2315 level = SOL_SOCKET;
2316 switch (optname) {
2317 /* These don't just return a single integer */
2318 case TARGET_SO_PEERNAME:
2319 goto unimplemented;
2320 case TARGET_SO_RCVTIMEO: {
2321 struct timeval tv;
2322 socklen_t tvlen;
2324 optname = SO_RCVTIMEO;
2326 get_timeout:
2327 if (get_user_u32(len, optlen)) {
2328 return -TARGET_EFAULT;
2330 if (len < 0) {
2331 return -TARGET_EINVAL;
2334 tvlen = sizeof(tv);
2335 ret = get_errno(getsockopt(sockfd, level, optname,
2336 &tv, &tvlen));
2337 if (ret < 0) {
2338 return ret;
2340 if (len > sizeof(struct target_timeval)) {
2341 len = sizeof(struct target_timeval);
2343 if (copy_to_user_timeval(optval_addr, &tv)) {
2344 return -TARGET_EFAULT;
2346 if (put_user_u32(len, optlen)) {
2347 return -TARGET_EFAULT;
2349 break;
2351 case TARGET_SO_SNDTIMEO:
2352 optname = SO_SNDTIMEO;
2353 goto get_timeout;
2354 case TARGET_SO_PEERCRED: {
2355 struct ucred cr;
2356 socklen_t crlen;
2357 struct target_ucred *tcr;
2359 if (get_user_u32(len, optlen)) {
2360 return -TARGET_EFAULT;
2362 if (len < 0) {
2363 return -TARGET_EINVAL;
2366 crlen = sizeof(cr);
2367 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2368 &cr, &crlen));
2369 if (ret < 0) {
2370 return ret;
2372 if (len > crlen) {
2373 len = crlen;
2375 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2376 return -TARGET_EFAULT;
2378 __put_user(cr.pid, &tcr->pid);
2379 __put_user(cr.uid, &tcr->uid);
2380 __put_user(cr.gid, &tcr->gid);
2381 unlock_user_struct(tcr, optval_addr, 1);
2382 if (put_user_u32(len, optlen)) {
2383 return -TARGET_EFAULT;
2385 break;
2387 case TARGET_SO_PEERSEC: {
2388 char *name;
2390 if (get_user_u32(len, optlen)) {
2391 return -TARGET_EFAULT;
2393 if (len < 0) {
2394 return -TARGET_EINVAL;
2396 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2397 if (!name) {
2398 return -TARGET_EFAULT;
2400 lv = len;
2401 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2402 name, &lv));
2403 if (put_user_u32(lv, optlen)) {
2404 ret = -TARGET_EFAULT;
2406 unlock_user(name, optval_addr, lv);
2407 break;
2409 case TARGET_SO_LINGER:
2411 struct linger lg;
2412 socklen_t lglen;
2413 struct target_linger *tlg;
2415 if (get_user_u32(len, optlen)) {
2416 return -TARGET_EFAULT;
2418 if (len < 0) {
2419 return -TARGET_EINVAL;
2422 lglen = sizeof(lg);
2423 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2424 &lg, &lglen));
2425 if (ret < 0) {
2426 return ret;
2428 if (len > lglen) {
2429 len = lglen;
2431 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2432 return -TARGET_EFAULT;
2434 __put_user(lg.l_onoff, &tlg->l_onoff);
2435 __put_user(lg.l_linger, &tlg->l_linger);
2436 unlock_user_struct(tlg, optval_addr, 1);
2437 if (put_user_u32(len, optlen)) {
2438 return -TARGET_EFAULT;
2440 break;
2442 /* Options with 'int' argument. */
2443 case TARGET_SO_DEBUG:
2444 optname = SO_DEBUG;
2445 goto int_case;
2446 case TARGET_SO_REUSEADDR:
2447 optname = SO_REUSEADDR;
2448 goto int_case;
2449 #ifdef SO_REUSEPORT
2450 case TARGET_SO_REUSEPORT:
2451 optname = SO_REUSEPORT;
2452 goto int_case;
2453 #endif
2454 case TARGET_SO_TYPE:
2455 optname = SO_TYPE;
2456 goto int_case;
2457 case TARGET_SO_ERROR:
2458 optname = SO_ERROR;
2459 goto int_case;
2460 case TARGET_SO_DONTROUTE:
2461 optname = SO_DONTROUTE;
2462 goto int_case;
2463 case TARGET_SO_BROADCAST:
2464 optname = SO_BROADCAST;
2465 goto int_case;
2466 case TARGET_SO_SNDBUF:
2467 optname = SO_SNDBUF;
2468 goto int_case;
2469 case TARGET_SO_RCVBUF:
2470 optname = SO_RCVBUF;
2471 goto int_case;
2472 case TARGET_SO_KEEPALIVE:
2473 optname = SO_KEEPALIVE;
2474 goto int_case;
2475 case TARGET_SO_OOBINLINE:
2476 optname = SO_OOBINLINE;
2477 goto int_case;
2478 case TARGET_SO_NO_CHECK:
2479 optname = SO_NO_CHECK;
2480 goto int_case;
2481 case TARGET_SO_PRIORITY:
2482 optname = SO_PRIORITY;
2483 goto int_case;
2484 #ifdef SO_BSDCOMPAT
2485 case TARGET_SO_BSDCOMPAT:
2486 optname = SO_BSDCOMPAT;
2487 goto int_case;
2488 #endif
2489 case TARGET_SO_PASSCRED:
2490 optname = SO_PASSCRED;
2491 goto int_case;
2492 case TARGET_SO_TIMESTAMP:
2493 optname = SO_TIMESTAMP;
2494 goto int_case;
2495 case TARGET_SO_RCVLOWAT:
2496 optname = SO_RCVLOWAT;
2497 goto int_case;
2498 case TARGET_SO_ACCEPTCONN:
2499 optname = SO_ACCEPTCONN;
2500 goto int_case;
2501 default:
2502 goto int_case;
2504 break;
2505 case SOL_TCP:
2506 /* TCP options all take an 'int' value. */
2507 int_case:
2508 if (get_user_u32(len, optlen))
2509 return -TARGET_EFAULT;
2510 if (len < 0)
2511 return -TARGET_EINVAL;
2512 lv = sizeof(lv);
2513 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2514 if (ret < 0)
2515 return ret;
2516 if (optname == SO_TYPE) {
2517 val = host_to_target_sock_type(val);
2519 if (len > lv)
2520 len = lv;
2521 if (len == 4) {
2522 if (put_user_u32(val, optval_addr))
2523 return -TARGET_EFAULT;
2524 } else {
2525 if (put_user_u8(val, optval_addr))
2526 return -TARGET_EFAULT;
2528 if (put_user_u32(len, optlen))
2529 return -TARGET_EFAULT;
2530 break;
2531 case SOL_IP:
2532 switch(optname) {
2533 case IP_TOS:
2534 case IP_TTL:
2535 case IP_HDRINCL:
2536 case IP_ROUTER_ALERT:
2537 case IP_RECVOPTS:
2538 case IP_RETOPTS:
2539 case IP_PKTINFO:
2540 case IP_MTU_DISCOVER:
2541 case IP_RECVERR:
2542 case IP_RECVTOS:
2543 #ifdef IP_FREEBIND
2544 case IP_FREEBIND:
2545 #endif
2546 case IP_MULTICAST_TTL:
2547 case IP_MULTICAST_LOOP:
2548 if (get_user_u32(len, optlen))
2549 return -TARGET_EFAULT;
2550 if (len < 0)
2551 return -TARGET_EINVAL;
2552 lv = sizeof(lv);
2553 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2554 if (ret < 0)
2555 return ret;
2556 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2557 len = 1;
2558 if (put_user_u32(len, optlen)
2559 || put_user_u8(val, optval_addr))
2560 return -TARGET_EFAULT;
2561 } else {
2562 if (len > sizeof(int))
2563 len = sizeof(int);
2564 if (put_user_u32(len, optlen)
2565 || put_user_u32(val, optval_addr))
2566 return -TARGET_EFAULT;
2568 break;
2569 default:
2570 ret = -TARGET_ENOPROTOOPT;
2571 break;
2573 break;
2574 case SOL_IPV6:
2575 switch (optname) {
2576 case IPV6_MTU_DISCOVER:
2577 case IPV6_MTU:
2578 case IPV6_V6ONLY:
2579 case IPV6_RECVPKTINFO:
2580 case IPV6_UNICAST_HOPS:
2581 case IPV6_MULTICAST_HOPS:
2582 case IPV6_MULTICAST_LOOP:
2583 case IPV6_RECVERR:
2584 case IPV6_RECVHOPLIMIT:
2585 case IPV6_2292HOPLIMIT:
2586 case IPV6_CHECKSUM:
2587 case IPV6_ADDRFORM:
2588 case IPV6_2292PKTINFO:
2589 case IPV6_RECVTCLASS:
2590 case IPV6_RECVRTHDR:
2591 case IPV6_2292RTHDR:
2592 case IPV6_RECVHOPOPTS:
2593 case IPV6_2292HOPOPTS:
2594 case IPV6_RECVDSTOPTS:
2595 case IPV6_2292DSTOPTS:
2596 case IPV6_TCLASS:
2597 #ifdef IPV6_RECVPATHMTU
2598 case IPV6_RECVPATHMTU:
2599 #endif
2600 #ifdef IPV6_TRANSPARENT
2601 case IPV6_TRANSPARENT:
2602 #endif
2603 #ifdef IPV6_FREEBIND
2604 case IPV6_FREEBIND:
2605 #endif
2606 #ifdef IPV6_RECVORIGDSTADDR
2607 case IPV6_RECVORIGDSTADDR:
2608 #endif
2609 if (get_user_u32(len, optlen))
2610 return -TARGET_EFAULT;
2611 if (len < 0)
2612 return -TARGET_EINVAL;
2613 lv = sizeof(lv);
2614 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2615 if (ret < 0)
2616 return ret;
2617 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2618 len = 1;
2619 if (put_user_u32(len, optlen)
2620 || put_user_u8(val, optval_addr))
2621 return -TARGET_EFAULT;
2622 } else {
2623 if (len > sizeof(int))
2624 len = sizeof(int);
2625 if (put_user_u32(len, optlen)
2626 || put_user_u32(val, optval_addr))
2627 return -TARGET_EFAULT;
2629 break;
2630 default:
2631 ret = -TARGET_ENOPROTOOPT;
2632 break;
2634 break;
2635 #ifdef SOL_NETLINK
2636 case SOL_NETLINK:
2637 switch (optname) {
2638 case NETLINK_PKTINFO:
2639 case NETLINK_BROADCAST_ERROR:
2640 case NETLINK_NO_ENOBUFS:
2641 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2642 case NETLINK_LISTEN_ALL_NSID:
2643 case NETLINK_CAP_ACK:
2644 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2645 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2646 case NETLINK_EXT_ACK:
2647 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2648 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2649 case NETLINK_GET_STRICT_CHK:
2650 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2651 if (get_user_u32(len, optlen)) {
2652 return -TARGET_EFAULT;
2654 if (len != sizeof(val)) {
2655 return -TARGET_EINVAL;
2657 lv = len;
2658 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2659 if (ret < 0) {
2660 return ret;
2662 if (put_user_u32(lv, optlen)
2663 || put_user_u32(val, optval_addr)) {
2664 return -TARGET_EFAULT;
2666 break;
2667 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2668 case NETLINK_LIST_MEMBERSHIPS:
2670 uint32_t *results;
2671 int i;
2672 if (get_user_u32(len, optlen)) {
2673 return -TARGET_EFAULT;
2675 if (len < 0) {
2676 return -TARGET_EINVAL;
2678 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2679 if (!results) {
2680 return -TARGET_EFAULT;
2682 lv = len;
2683 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2684 if (ret < 0) {
2685 unlock_user(results, optval_addr, 0);
2686 return ret;
2688 /* swap host endianess to target endianess. */
2689 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2690 results[i] = tswap32(results[i]);
2692 if (put_user_u32(lv, optlen)) {
2693 return -TARGET_EFAULT;
2695 unlock_user(results, optval_addr, 0);
2696 break;
2698 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2699 default:
2700 goto unimplemented;
2702 break;
2703 #endif /* SOL_NETLINK */
2704 default:
2705 unimplemented:
2706 qemu_log_mask(LOG_UNIMP,
2707 "getsockopt level=%d optname=%d not yet supported\n",
2708 level, optname);
2709 ret = -TARGET_EOPNOTSUPP;
2710 break;
2712 return ret;
2715 /* Convert target low/high pair representing file offset into the host
2716 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2717 * as the kernel doesn't handle them either.
2719 static void target_to_host_low_high(abi_ulong tlow,
2720 abi_ulong thigh,
2721 unsigned long *hlow,
2722 unsigned long *hhigh)
2724 uint64_t off = tlow |
2725 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2726 TARGET_LONG_BITS / 2;
2728 *hlow = off;
2729 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2732 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2733 abi_ulong count, int copy)
2735 struct target_iovec *target_vec;
2736 struct iovec *vec;
2737 abi_ulong total_len, max_len;
2738 int i;
2739 int err = 0;
2740 bool bad_address = false;
2742 if (count == 0) {
2743 errno = 0;
2744 return NULL;
2746 if (count > IOV_MAX) {
2747 errno = EINVAL;
2748 return NULL;
2751 vec = g_try_new0(struct iovec, count);
2752 if (vec == NULL) {
2753 errno = ENOMEM;
2754 return NULL;
2757 target_vec = lock_user(VERIFY_READ, target_addr,
2758 count * sizeof(struct target_iovec), 1);
2759 if (target_vec == NULL) {
2760 err = EFAULT;
2761 goto fail2;
2764 /* ??? If host page size > target page size, this will result in a
2765 value larger than what we can actually support. */
2766 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2767 total_len = 0;
2769 for (i = 0; i < count; i++) {
2770 abi_ulong base = tswapal(target_vec[i].iov_base);
2771 abi_long len = tswapal(target_vec[i].iov_len);
2773 if (len < 0) {
2774 err = EINVAL;
2775 goto fail;
2776 } else if (len == 0) {
2777 /* Zero length pointer is ignored. */
2778 vec[i].iov_base = 0;
2779 } else {
2780 vec[i].iov_base = lock_user(type, base, len, copy);
2781 /* If the first buffer pointer is bad, this is a fault. But
2782 * subsequent bad buffers will result in a partial write; this
2783 * is realized by filling the vector with null pointers and
2784 * zero lengths. */
2785 if (!vec[i].iov_base) {
2786 if (i == 0) {
2787 err = EFAULT;
2788 goto fail;
2789 } else {
2790 bad_address = true;
2793 if (bad_address) {
2794 len = 0;
2796 if (len > max_len - total_len) {
2797 len = max_len - total_len;
2800 vec[i].iov_len = len;
2801 total_len += len;
2804 unlock_user(target_vec, target_addr, 0);
2805 return vec;
2807 fail:
2808 while (--i >= 0) {
2809 if (tswapal(target_vec[i].iov_len) > 0) {
2810 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2813 unlock_user(target_vec, target_addr, 0);
2814 fail2:
2815 g_free(vec);
2816 errno = err;
2817 return NULL;
2820 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2821 abi_ulong count, int copy)
2823 struct target_iovec *target_vec;
2824 int i;
2826 target_vec = lock_user(VERIFY_READ, target_addr,
2827 count * sizeof(struct target_iovec), 1);
2828 if (target_vec) {
2829 for (i = 0; i < count; i++) {
2830 abi_ulong base = tswapal(target_vec[i].iov_base);
2831 abi_long len = tswapal(target_vec[i].iov_len);
2832 if (len < 0) {
2833 break;
2835 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2837 unlock_user(target_vec, target_addr, 0);
2840 g_free(vec);
2843 static inline int target_to_host_sock_type(int *type)
2845 int host_type = 0;
2846 int target_type = *type;
2848 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2849 case TARGET_SOCK_DGRAM:
2850 host_type = SOCK_DGRAM;
2851 break;
2852 case TARGET_SOCK_STREAM:
2853 host_type = SOCK_STREAM;
2854 break;
2855 default:
2856 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2857 break;
2859 if (target_type & TARGET_SOCK_CLOEXEC) {
2860 #if defined(SOCK_CLOEXEC)
2861 host_type |= SOCK_CLOEXEC;
2862 #else
2863 return -TARGET_EINVAL;
2864 #endif
2866 if (target_type & TARGET_SOCK_NONBLOCK) {
2867 #if defined(SOCK_NONBLOCK)
2868 host_type |= SOCK_NONBLOCK;
2869 #elif !defined(O_NONBLOCK)
2870 return -TARGET_EINVAL;
2871 #endif
2873 *type = host_type;
2874 return 0;
2877 /* Try to emulate socket type flags after socket creation. */
2878 static int sock_flags_fixup(int fd, int target_type)
2880 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2881 if (target_type & TARGET_SOCK_NONBLOCK) {
2882 int flags = fcntl(fd, F_GETFL);
2883 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2884 close(fd);
2885 return -TARGET_EINVAL;
2888 #endif
2889 return fd;
2892 /* do_socket() Must return target values and target errnos. */
2893 static abi_long do_socket(int domain, int type, int protocol)
2895 int target_type = type;
2896 int ret;
2898 ret = target_to_host_sock_type(&type);
2899 if (ret) {
2900 return ret;
2903 if (domain == PF_NETLINK && !(
2904 #ifdef CONFIG_RTNETLINK
2905 protocol == NETLINK_ROUTE ||
2906 #endif
2907 protocol == NETLINK_KOBJECT_UEVENT ||
2908 protocol == NETLINK_AUDIT)) {
2909 return -EPFNOSUPPORT;
2912 if (domain == AF_PACKET ||
2913 (domain == AF_INET && type == SOCK_PACKET)) {
2914 protocol = tswap16(protocol);
2917 ret = get_errno(socket(domain, type, protocol));
2918 if (ret >= 0) {
2919 ret = sock_flags_fixup(ret, target_type);
2920 if (type == SOCK_PACKET) {
2921 /* Manage an obsolete case :
2922 * if socket type is SOCK_PACKET, bind by name
2924 fd_trans_register(ret, &target_packet_trans);
2925 } else if (domain == PF_NETLINK) {
2926 switch (protocol) {
2927 #ifdef CONFIG_RTNETLINK
2928 case NETLINK_ROUTE:
2929 fd_trans_register(ret, &target_netlink_route_trans);
2930 break;
2931 #endif
2932 case NETLINK_KOBJECT_UEVENT:
2933 /* nothing to do: messages are strings */
2934 break;
2935 case NETLINK_AUDIT:
2936 fd_trans_register(ret, &target_netlink_audit_trans);
2937 break;
2938 default:
2939 g_assert_not_reached();
2943 return ret;
2946 /* do_bind() Must return target values and target errnos. */
2947 static abi_long do_bind(int sockfd, abi_ulong target_addr,
2948 socklen_t addrlen)
2950 void *addr;
2951 abi_long ret;
2953 if ((int)addrlen < 0) {
2954 return -TARGET_EINVAL;
2957 addr = alloca(addrlen+1);
2959 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2960 if (ret)
2961 return ret;
2963 return get_errno(bind(sockfd, addr, addrlen));
2966 /* do_connect() Must return target values and target errnos. */
2967 static abi_long do_connect(int sockfd, abi_ulong target_addr,
2968 socklen_t addrlen)
2970 void *addr;
2971 abi_long ret;
2973 if ((int)addrlen < 0) {
2974 return -TARGET_EINVAL;
2977 addr = alloca(addrlen+1);
2979 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
2980 if (ret)
2981 return ret;
2983 return get_errno(safe_connect(sockfd, addr, addrlen));
2986 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2987 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
2988 int flags, int send)
2990 abi_long ret, len;
2991 struct msghdr msg;
2992 abi_ulong count;
2993 struct iovec *vec;
2994 abi_ulong target_vec;
2996 if (msgp->msg_name) {
2997 msg.msg_namelen = tswap32(msgp->msg_namelen);
2998 msg.msg_name = alloca(msg.msg_namelen+1);
2999 ret = target_to_host_sockaddr(fd, msg.msg_name,
3000 tswapal(msgp->msg_name),
3001 msg.msg_namelen);
3002 if (ret == -TARGET_EFAULT) {
3003 /* For connected sockets msg_name and msg_namelen must
3004 * be ignored, so returning EFAULT immediately is wrong.
3005 * Instead, pass a bad msg_name to the host kernel, and
3006 * let it decide whether to return EFAULT or not.
3008 msg.msg_name = (void *)-1;
3009 } else if (ret) {
3010 goto out2;
3012 } else {
3013 msg.msg_name = NULL;
3014 msg.msg_namelen = 0;
3016 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3017 msg.msg_control = alloca(msg.msg_controllen);
3018 memset(msg.msg_control, 0, msg.msg_controllen);
3020 msg.msg_flags = tswap32(msgp->msg_flags);
3022 count = tswapal(msgp->msg_iovlen);
3023 target_vec = tswapal(msgp->msg_iov);
3025 if (count > IOV_MAX) {
3026 /* sendrcvmsg returns a different errno for this condition than
3027 * readv/writev, so we must catch it here before lock_iovec() does.
3029 ret = -TARGET_EMSGSIZE;
3030 goto out2;
3033 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3034 target_vec, count, send);
3035 if (vec == NULL) {
3036 ret = -host_to_target_errno(errno);
3037 goto out2;
3039 msg.msg_iovlen = count;
3040 msg.msg_iov = vec;
3042 if (send) {
3043 if (fd_trans_target_to_host_data(fd)) {
3044 void *host_msg;
3046 host_msg = g_malloc(msg.msg_iov->iov_len);
3047 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3048 ret = fd_trans_target_to_host_data(fd)(host_msg,
3049 msg.msg_iov->iov_len);
3050 if (ret >= 0) {
3051 msg.msg_iov->iov_base = host_msg;
3052 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3054 g_free(host_msg);
3055 } else {
3056 ret = target_to_host_cmsg(&msg, msgp);
3057 if (ret == 0) {
3058 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3061 } else {
3062 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3063 if (!is_error(ret)) {
3064 len = ret;
3065 if (fd_trans_host_to_target_data(fd)) {
3066 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3067 MIN(msg.msg_iov->iov_len, len));
3068 } else {
3069 ret = host_to_target_cmsg(msgp, &msg);
3071 if (!is_error(ret)) {
3072 msgp->msg_namelen = tswap32(msg.msg_namelen);
3073 msgp->msg_flags = tswap32(msg.msg_flags);
3074 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3075 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3076 msg.msg_name, msg.msg_namelen);
3077 if (ret) {
3078 goto out;
3082 ret = len;
3087 out:
3088 unlock_iovec(vec, target_vec, count, !send);
3089 out2:
3090 return ret;
3093 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3094 int flags, int send)
3096 abi_long ret;
3097 struct target_msghdr *msgp;
3099 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3100 msgp,
3101 target_msg,
3102 send ? 1 : 0)) {
3103 return -TARGET_EFAULT;
3105 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3106 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3107 return ret;
3110 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3111 * so it might not have this *mmsg-specific flag either.
3113 #ifndef MSG_WAITFORONE
3114 #define MSG_WAITFORONE 0x10000
3115 #endif
3117 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3118 unsigned int vlen, unsigned int flags,
3119 int send)
3121 struct target_mmsghdr *mmsgp;
3122 abi_long ret = 0;
3123 int i;
3125 if (vlen > UIO_MAXIOV) {
3126 vlen = UIO_MAXIOV;
3129 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3130 if (!mmsgp) {
3131 return -TARGET_EFAULT;
3134 for (i = 0; i < vlen; i++) {
3135 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3136 if (is_error(ret)) {
3137 break;
3139 mmsgp[i].msg_len = tswap32(ret);
3140 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3141 if (flags & MSG_WAITFORONE) {
3142 flags |= MSG_DONTWAIT;
3146 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3148 /* Return number of datagrams sent if we sent any at all;
3149 * otherwise return the error.
3151 if (i) {
3152 return i;
3154 return ret;
3157 /* do_accept4() Must return target values and target errnos. */
3158 static abi_long do_accept4(int fd, abi_ulong target_addr,
3159 abi_ulong target_addrlen_addr, int flags)
3161 socklen_t addrlen, ret_addrlen;
3162 void *addr;
3163 abi_long ret;
3164 int host_flags;
3166 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3168 if (target_addr == 0) {
3169 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3172 /* linux returns EINVAL if addrlen pointer is invalid */
3173 if (get_user_u32(addrlen, target_addrlen_addr))
3174 return -TARGET_EINVAL;
3176 if ((int)addrlen < 0) {
3177 return -TARGET_EINVAL;
3180 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3181 return -TARGET_EINVAL;
3183 addr = alloca(addrlen);
3185 ret_addrlen = addrlen;
3186 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3187 if (!is_error(ret)) {
3188 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3189 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3190 ret = -TARGET_EFAULT;
3193 return ret;
3196 /* do_getpeername() Must return target values and target errnos. */
3197 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3198 abi_ulong target_addrlen_addr)
3200 socklen_t addrlen, ret_addrlen;
3201 void *addr;
3202 abi_long ret;
3204 if (get_user_u32(addrlen, target_addrlen_addr))
3205 return -TARGET_EFAULT;
3207 if ((int)addrlen < 0) {
3208 return -TARGET_EINVAL;
3211 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3212 return -TARGET_EFAULT;
3214 addr = alloca(addrlen);
3216 ret_addrlen = addrlen;
3217 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3218 if (!is_error(ret)) {
3219 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3220 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3221 ret = -TARGET_EFAULT;
3224 return ret;
3227 /* do_getsockname() Must return target values and target errnos. */
3228 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3229 abi_ulong target_addrlen_addr)
3231 socklen_t addrlen, ret_addrlen;
3232 void *addr;
3233 abi_long ret;
3235 if (get_user_u32(addrlen, target_addrlen_addr))
3236 return -TARGET_EFAULT;
3238 if ((int)addrlen < 0) {
3239 return -TARGET_EINVAL;
3242 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3243 return -TARGET_EFAULT;
3245 addr = alloca(addrlen);
3247 ret_addrlen = addrlen;
3248 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3249 if (!is_error(ret)) {
3250 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3251 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3252 ret = -TARGET_EFAULT;
3255 return ret;
3258 /* do_socketpair() Must return target values and target errnos. */
3259 static abi_long do_socketpair(int domain, int type, int protocol,
3260 abi_ulong target_tab_addr)
3262 int tab[2];
3263 abi_long ret;
3265 target_to_host_sock_type(&type);
3267 ret = get_errno(socketpair(domain, type, protocol, tab));
3268 if (!is_error(ret)) {
3269 if (put_user_s32(tab[0], target_tab_addr)
3270 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3271 ret = -TARGET_EFAULT;
3273 return ret;
3276 /* do_sendto() Must return target values and target errnos. */
3277 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3278 abi_ulong target_addr, socklen_t addrlen)
3280 void *addr;
3281 void *host_msg;
3282 void *copy_msg = NULL;
3283 abi_long ret;
3285 if ((int)addrlen < 0) {
3286 return -TARGET_EINVAL;
3289 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3290 if (!host_msg)
3291 return -TARGET_EFAULT;
3292 if (fd_trans_target_to_host_data(fd)) {
3293 copy_msg = host_msg;
3294 host_msg = g_malloc(len);
3295 memcpy(host_msg, copy_msg, len);
3296 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3297 if (ret < 0) {
3298 goto fail;
3301 if (target_addr) {
3302 addr = alloca(addrlen+1);
3303 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3304 if (ret) {
3305 goto fail;
3307 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3308 } else {
3309 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3311 fail:
3312 if (copy_msg) {
3313 g_free(host_msg);
3314 host_msg = copy_msg;
3316 unlock_user(host_msg, msg, 0);
3317 return ret;
3320 /* do_recvfrom() Must return target values and target errnos. */
3321 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3322 abi_ulong target_addr,
3323 abi_ulong target_addrlen)
3325 socklen_t addrlen, ret_addrlen;
3326 void *addr;
3327 void *host_msg;
3328 abi_long ret;
3330 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3331 if (!host_msg)
3332 return -TARGET_EFAULT;
3333 if (target_addr) {
3334 if (get_user_u32(addrlen, target_addrlen)) {
3335 ret = -TARGET_EFAULT;
3336 goto fail;
3338 if ((int)addrlen < 0) {
3339 ret = -TARGET_EINVAL;
3340 goto fail;
3342 addr = alloca(addrlen);
3343 ret_addrlen = addrlen;
3344 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3345 addr, &ret_addrlen));
3346 } else {
3347 addr = NULL; /* To keep compiler quiet. */
3348 addrlen = 0; /* To keep compiler quiet. */
3349 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3351 if (!is_error(ret)) {
3352 if (fd_trans_host_to_target_data(fd)) {
3353 abi_long trans;
3354 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3355 if (is_error(trans)) {
3356 ret = trans;
3357 goto fail;
3360 if (target_addr) {
3361 host_to_target_sockaddr(target_addr, addr,
3362 MIN(addrlen, ret_addrlen));
3363 if (put_user_u32(ret_addrlen, target_addrlen)) {
3364 ret = -TARGET_EFAULT;
3365 goto fail;
3368 unlock_user(host_msg, msg, len);
3369 } else {
3370 fail:
3371 unlock_user(host_msg, msg, 0);
3373 return ret;
3376 #ifdef TARGET_NR_socketcall
3377 /* do_socketcall() must return target values and target errnos. */
3378 static abi_long do_socketcall(int num, abi_ulong vptr)
3380 static const unsigned nargs[] = { /* number of arguments per operation */
3381 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3382 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3383 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3384 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3385 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3386 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3387 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3388 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3389 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3390 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3391 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3392 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3393 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3394 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3395 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3396 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3397 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3398 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3399 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3400 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3402 abi_long a[6]; /* max 6 args */
3403 unsigned i;
3405 /* check the range of the first argument num */
3406 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3407 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3408 return -TARGET_EINVAL;
3410 /* ensure we have space for args */
3411 if (nargs[num] > ARRAY_SIZE(a)) {
3412 return -TARGET_EINVAL;
3414 /* collect the arguments in a[] according to nargs[] */
3415 for (i = 0; i < nargs[num]; ++i) {
3416 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3417 return -TARGET_EFAULT;
3420 /* now when we have the args, invoke the appropriate underlying function */
3421 switch (num) {
3422 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3423 return do_socket(a[0], a[1], a[2]);
3424 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3425 return do_bind(a[0], a[1], a[2]);
3426 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3427 return do_connect(a[0], a[1], a[2]);
3428 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3429 return get_errno(listen(a[0], a[1]));
3430 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3431 return do_accept4(a[0], a[1], a[2], 0);
3432 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3433 return do_getsockname(a[0], a[1], a[2]);
3434 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3435 return do_getpeername(a[0], a[1], a[2]);
3436 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3437 return do_socketpair(a[0], a[1], a[2], a[3]);
3438 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3439 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3440 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3441 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3442 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3443 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3444 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3445 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3446 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3447 return get_errno(shutdown(a[0], a[1]));
3448 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3449 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3450 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3451 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3452 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3453 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3454 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3455 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3456 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3457 return do_accept4(a[0], a[1], a[2], a[3]);
3458 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3459 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3460 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3461 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3462 default:
3463 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3464 return -TARGET_EINVAL;
3467 #endif
3469 #define N_SHM_REGIONS 32
3471 static struct shm_region {
3472 abi_ulong start;
3473 abi_ulong size;
3474 bool in_use;
3475 } shm_regions[N_SHM_REGIONS];
3477 #ifndef TARGET_SEMID64_DS
3478 /* asm-generic version of this struct */
3479 struct target_semid64_ds
3481 struct target_ipc_perm sem_perm;
3482 abi_ulong sem_otime;
3483 #if TARGET_ABI_BITS == 32
3484 abi_ulong __unused1;
3485 #endif
3486 abi_ulong sem_ctime;
3487 #if TARGET_ABI_BITS == 32
3488 abi_ulong __unused2;
3489 #endif
3490 abi_ulong sem_nsems;
3491 abi_ulong __unused3;
3492 abi_ulong __unused4;
3494 #endif
3496 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3497 abi_ulong target_addr)
3499 struct target_ipc_perm *target_ip;
3500 struct target_semid64_ds *target_sd;
3502 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3503 return -TARGET_EFAULT;
3504 target_ip = &(target_sd->sem_perm);
3505 host_ip->__key = tswap32(target_ip->__key);
3506 host_ip->uid = tswap32(target_ip->uid);
3507 host_ip->gid = tswap32(target_ip->gid);
3508 host_ip->cuid = tswap32(target_ip->cuid);
3509 host_ip->cgid = tswap32(target_ip->cgid);
3510 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3511 host_ip->mode = tswap32(target_ip->mode);
3512 #else
3513 host_ip->mode = tswap16(target_ip->mode);
3514 #endif
3515 #if defined(TARGET_PPC)
3516 host_ip->__seq = tswap32(target_ip->__seq);
3517 #else
3518 host_ip->__seq = tswap16(target_ip->__seq);
3519 #endif
3520 unlock_user_struct(target_sd, target_addr, 0);
3521 return 0;
3524 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3525 struct ipc_perm *host_ip)
3527 struct target_ipc_perm *target_ip;
3528 struct target_semid64_ds *target_sd;
3530 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3531 return -TARGET_EFAULT;
3532 target_ip = &(target_sd->sem_perm);
3533 target_ip->__key = tswap32(host_ip->__key);
3534 target_ip->uid = tswap32(host_ip->uid);
3535 target_ip->gid = tswap32(host_ip->gid);
3536 target_ip->cuid = tswap32(host_ip->cuid);
3537 target_ip->cgid = tswap32(host_ip->cgid);
3538 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3539 target_ip->mode = tswap32(host_ip->mode);
3540 #else
3541 target_ip->mode = tswap16(host_ip->mode);
3542 #endif
3543 #if defined(TARGET_PPC)
3544 target_ip->__seq = tswap32(host_ip->__seq);
3545 #else
3546 target_ip->__seq = tswap16(host_ip->__seq);
3547 #endif
3548 unlock_user_struct(target_sd, target_addr, 1);
3549 return 0;
3552 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3553 abi_ulong target_addr)
3555 struct target_semid64_ds *target_sd;
3557 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3558 return -TARGET_EFAULT;
3559 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3560 return -TARGET_EFAULT;
3561 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3562 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3563 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3564 unlock_user_struct(target_sd, target_addr, 0);
3565 return 0;
3568 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3569 struct semid_ds *host_sd)
3571 struct target_semid64_ds *target_sd;
3573 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3574 return -TARGET_EFAULT;
3575 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3576 return -TARGET_EFAULT;
3577 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3578 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3579 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3580 unlock_user_struct(target_sd, target_addr, 1);
3581 return 0;
3584 struct target_seminfo {
3585 int semmap;
3586 int semmni;
3587 int semmns;
3588 int semmnu;
3589 int semmsl;
3590 int semopm;
3591 int semume;
3592 int semusz;
3593 int semvmx;
3594 int semaem;
3597 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3598 struct seminfo *host_seminfo)
3600 struct target_seminfo *target_seminfo;
3601 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3602 return -TARGET_EFAULT;
3603 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3604 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3605 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3606 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3607 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3608 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3609 __put_user(host_seminfo->semume, &target_seminfo->semume);
3610 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3611 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3612 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3613 unlock_user_struct(target_seminfo, target_addr, 1);
3614 return 0;
3617 union semun {
3618 int val;
3619 struct semid_ds *buf;
3620 unsigned short *array;
3621 struct seminfo *__buf;
3624 union target_semun {
3625 int val;
3626 abi_ulong buf;
3627 abi_ulong array;
3628 abi_ulong __buf;
3631 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3632 abi_ulong target_addr)
3634 int nsems;
3635 unsigned short *array;
3636 union semun semun;
3637 struct semid_ds semid_ds;
3638 int i, ret;
3640 semun.buf = &semid_ds;
3642 ret = semctl(semid, 0, IPC_STAT, semun);
3643 if (ret == -1)
3644 return get_errno(ret);
3646 nsems = semid_ds.sem_nsems;
3648 *host_array = g_try_new(unsigned short, nsems);
3649 if (!*host_array) {
3650 return -TARGET_ENOMEM;
3652 array = lock_user(VERIFY_READ, target_addr,
3653 nsems*sizeof(unsigned short), 1);
3654 if (!array) {
3655 g_free(*host_array);
3656 return -TARGET_EFAULT;
3659 for(i=0; i<nsems; i++) {
3660 __get_user((*host_array)[i], &array[i]);
3662 unlock_user(array, target_addr, 0);
3664 return 0;
3667 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3668 unsigned short **host_array)
3670 int nsems;
3671 unsigned short *array;
3672 union semun semun;
3673 struct semid_ds semid_ds;
3674 int i, ret;
3676 semun.buf = &semid_ds;
3678 ret = semctl(semid, 0, IPC_STAT, semun);
3679 if (ret == -1)
3680 return get_errno(ret);
3682 nsems = semid_ds.sem_nsems;
3684 array = lock_user(VERIFY_WRITE, target_addr,
3685 nsems*sizeof(unsigned short), 0);
3686 if (!array)
3687 return -TARGET_EFAULT;
3689 for(i=0; i<nsems; i++) {
3690 __put_user((*host_array)[i], &array[i]);
3692 g_free(*host_array);
3693 unlock_user(array, target_addr, 1);
3695 return 0;
3698 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3699 abi_ulong target_arg)
3701 union target_semun target_su = { .buf = target_arg };
3702 union semun arg;
3703 struct semid_ds dsarg;
3704 unsigned short *array = NULL;
3705 struct seminfo seminfo;
3706 abi_long ret = -TARGET_EINVAL;
3707 abi_long err;
3708 cmd &= 0xff;
3710 switch( cmd ) {
3711 case GETVAL:
3712 case SETVAL:
3713 /* In 64 bit cross-endian situations, we will erroneously pick up
3714 * the wrong half of the union for the "val" element. To rectify
3715 * this, the entire 8-byte structure is byteswapped, followed by
3716 * a swap of the 4 byte val field. In other cases, the data is
3717 * already in proper host byte order. */
3718 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3719 target_su.buf = tswapal(target_su.buf);
3720 arg.val = tswap32(target_su.val);
3721 } else {
3722 arg.val = target_su.val;
3724 ret = get_errno(semctl(semid, semnum, cmd, arg));
3725 break;
3726 case GETALL:
3727 case SETALL:
3728 err = target_to_host_semarray(semid, &array, target_su.array);
3729 if (err)
3730 return err;
3731 arg.array = array;
3732 ret = get_errno(semctl(semid, semnum, cmd, arg));
3733 err = host_to_target_semarray(semid, target_su.array, &array);
3734 if (err)
3735 return err;
3736 break;
3737 case IPC_STAT:
3738 case IPC_SET:
3739 case SEM_STAT:
3740 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3741 if (err)
3742 return err;
3743 arg.buf = &dsarg;
3744 ret = get_errno(semctl(semid, semnum, cmd, arg));
3745 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3746 if (err)
3747 return err;
3748 break;
3749 case IPC_INFO:
3750 case SEM_INFO:
3751 arg.__buf = &seminfo;
3752 ret = get_errno(semctl(semid, semnum, cmd, arg));
3753 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3754 if (err)
3755 return err;
3756 break;
3757 case IPC_RMID:
3758 case GETPID:
3759 case GETNCNT:
3760 case GETZCNT:
3761 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3762 break;
3765 return ret;
3768 struct target_sembuf {
3769 unsigned short sem_num;
3770 short sem_op;
3771 short sem_flg;
3774 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3775 abi_ulong target_addr,
3776 unsigned nsops)
3778 struct target_sembuf *target_sembuf;
3779 int i;
3781 target_sembuf = lock_user(VERIFY_READ, target_addr,
3782 nsops*sizeof(struct target_sembuf), 1);
3783 if (!target_sembuf)
3784 return -TARGET_EFAULT;
3786 for(i=0; i<nsops; i++) {
3787 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3788 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3789 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3792 unlock_user(target_sembuf, target_addr, 0);
3794 return 0;
3797 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
3799 struct sembuf sops[nsops];
3800 abi_long ret;
3802 if (target_to_host_sembuf(sops, ptr, nsops))
3803 return -TARGET_EFAULT;
3805 ret = -TARGET_ENOSYS;
3806 #ifdef __NR_semtimedop
3807 ret = get_errno(safe_semtimedop(semid, sops, nsops, NULL));
3808 #endif
3809 #ifdef __NR_ipc
3810 if (ret == -TARGET_ENOSYS) {
3811 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid, nsops, 0, sops, 0));
3813 #endif
3814 return ret;
3817 struct target_msqid_ds
3819 struct target_ipc_perm msg_perm;
3820 abi_ulong msg_stime;
3821 #if TARGET_ABI_BITS == 32
3822 abi_ulong __unused1;
3823 #endif
3824 abi_ulong msg_rtime;
3825 #if TARGET_ABI_BITS == 32
3826 abi_ulong __unused2;
3827 #endif
3828 abi_ulong msg_ctime;
3829 #if TARGET_ABI_BITS == 32
3830 abi_ulong __unused3;
3831 #endif
3832 abi_ulong __msg_cbytes;
3833 abi_ulong msg_qnum;
3834 abi_ulong msg_qbytes;
3835 abi_ulong msg_lspid;
3836 abi_ulong msg_lrpid;
3837 abi_ulong __unused4;
3838 abi_ulong __unused5;
3841 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3842 abi_ulong target_addr)
3844 struct target_msqid_ds *target_md;
3846 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3847 return -TARGET_EFAULT;
3848 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3849 return -TARGET_EFAULT;
3850 host_md->msg_stime = tswapal(target_md->msg_stime);
3851 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3852 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3853 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3854 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3855 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3856 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3857 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3858 unlock_user_struct(target_md, target_addr, 0);
3859 return 0;
3862 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3863 struct msqid_ds *host_md)
3865 struct target_msqid_ds *target_md;
3867 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3868 return -TARGET_EFAULT;
3869 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3870 return -TARGET_EFAULT;
3871 target_md->msg_stime = tswapal(host_md->msg_stime);
3872 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3873 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3874 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
3875 target_md->msg_qnum = tswapal(host_md->msg_qnum);
3876 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
3877 target_md->msg_lspid = tswapal(host_md->msg_lspid);
3878 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
3879 unlock_user_struct(target_md, target_addr, 1);
3880 return 0;
3883 struct target_msginfo {
3884 int msgpool;
3885 int msgmap;
3886 int msgmax;
3887 int msgmnb;
3888 int msgmni;
3889 int msgssz;
3890 int msgtql;
3891 unsigned short int msgseg;
3894 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
3895 struct msginfo *host_msginfo)
3897 struct target_msginfo *target_msginfo;
3898 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
3899 return -TARGET_EFAULT;
3900 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
3901 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
3902 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
3903 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
3904 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
3905 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
3906 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
3907 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
3908 unlock_user_struct(target_msginfo, target_addr, 1);
3909 return 0;
3912 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
3914 struct msqid_ds dsarg;
3915 struct msginfo msginfo;
3916 abi_long ret = -TARGET_EINVAL;
3918 cmd &= 0xff;
3920 switch (cmd) {
3921 case IPC_STAT:
3922 case IPC_SET:
3923 case MSG_STAT:
3924 if (target_to_host_msqid_ds(&dsarg,ptr))
3925 return -TARGET_EFAULT;
3926 ret = get_errno(msgctl(msgid, cmd, &dsarg));
3927 if (host_to_target_msqid_ds(ptr,&dsarg))
3928 return -TARGET_EFAULT;
3929 break;
3930 case IPC_RMID:
3931 ret = get_errno(msgctl(msgid, cmd, NULL));
3932 break;
3933 case IPC_INFO:
3934 case MSG_INFO:
3935 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
3936 if (host_to_target_msginfo(ptr, &msginfo))
3937 return -TARGET_EFAULT;
3938 break;
3941 return ret;
3944 struct target_msgbuf {
3945 abi_long mtype;
3946 char mtext[1];
3949 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
3950 ssize_t msgsz, int msgflg)
3952 struct target_msgbuf *target_mb;
3953 struct msgbuf *host_mb;
3954 abi_long ret = 0;
3956 if (msgsz < 0) {
3957 return -TARGET_EINVAL;
3960 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
3961 return -TARGET_EFAULT;
3962 host_mb = g_try_malloc(msgsz + sizeof(long));
3963 if (!host_mb) {
3964 unlock_user_struct(target_mb, msgp, 0);
3965 return -TARGET_ENOMEM;
3967 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
3968 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
3969 ret = -TARGET_ENOSYS;
3970 #ifdef __NR_msgsnd
3971 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
3972 #endif
3973 #ifdef __NR_ipc
3974 if (ret == -TARGET_ENOSYS) {
3975 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
3976 host_mb, 0));
3978 #endif
3979 g_free(host_mb);
3980 unlock_user_struct(target_mb, msgp, 0);
3982 return ret;
3985 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
3986 ssize_t msgsz, abi_long msgtyp,
3987 int msgflg)
3989 struct target_msgbuf *target_mb;
3990 char *target_mtext;
3991 struct msgbuf *host_mb;
3992 abi_long ret = 0;
3994 if (msgsz < 0) {
3995 return -TARGET_EINVAL;
3998 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
3999 return -TARGET_EFAULT;
4001 host_mb = g_try_malloc(msgsz + sizeof(long));
4002 if (!host_mb) {
4003 ret = -TARGET_ENOMEM;
4004 goto end;
4006 ret = -TARGET_ENOSYS;
4007 #ifdef __NR_msgrcv
4008 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4009 #endif
4010 #ifdef __NR_ipc
4011 if (ret == -TARGET_ENOSYS) {
4012 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4013 msgflg, host_mb, msgtyp));
4015 #endif
4017 if (ret > 0) {
4018 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4019 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4020 if (!target_mtext) {
4021 ret = -TARGET_EFAULT;
4022 goto end;
4024 memcpy(target_mb->mtext, host_mb->mtext, ret);
4025 unlock_user(target_mtext, target_mtext_addr, ret);
4028 target_mb->mtype = tswapal(host_mb->mtype);
4030 end:
4031 if (target_mb)
4032 unlock_user_struct(target_mb, msgp, 1);
4033 g_free(host_mb);
4034 return ret;
4037 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4038 abi_ulong target_addr)
4040 struct target_shmid_ds *target_sd;
4042 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4043 return -TARGET_EFAULT;
4044 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4045 return -TARGET_EFAULT;
4046 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4047 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4048 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4049 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4050 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4051 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4052 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4053 unlock_user_struct(target_sd, target_addr, 0);
4054 return 0;
4057 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4058 struct shmid_ds *host_sd)
4060 struct target_shmid_ds *target_sd;
4062 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4063 return -TARGET_EFAULT;
4064 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4065 return -TARGET_EFAULT;
4066 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4067 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4068 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4069 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4070 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4071 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4072 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4073 unlock_user_struct(target_sd, target_addr, 1);
4074 return 0;
4077 struct target_shminfo {
4078 abi_ulong shmmax;
4079 abi_ulong shmmin;
4080 abi_ulong shmmni;
4081 abi_ulong shmseg;
4082 abi_ulong shmall;
4085 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4086 struct shminfo *host_shminfo)
4088 struct target_shminfo *target_shminfo;
4089 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4090 return -TARGET_EFAULT;
4091 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4092 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4093 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4094 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4095 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4096 unlock_user_struct(target_shminfo, target_addr, 1);
4097 return 0;
4100 struct target_shm_info {
4101 int used_ids;
4102 abi_ulong shm_tot;
4103 abi_ulong shm_rss;
4104 abi_ulong shm_swp;
4105 abi_ulong swap_attempts;
4106 abi_ulong swap_successes;
4109 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4110 struct shm_info *host_shm_info)
4112 struct target_shm_info *target_shm_info;
4113 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4114 return -TARGET_EFAULT;
4115 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4116 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4117 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4118 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4119 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4120 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4121 unlock_user_struct(target_shm_info, target_addr, 1);
4122 return 0;
4125 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4127 struct shmid_ds dsarg;
4128 struct shminfo shminfo;
4129 struct shm_info shm_info;
4130 abi_long ret = -TARGET_EINVAL;
4132 cmd &= 0xff;
4134 switch(cmd) {
4135 case IPC_STAT:
4136 case IPC_SET:
4137 case SHM_STAT:
4138 if (target_to_host_shmid_ds(&dsarg, buf))
4139 return -TARGET_EFAULT;
4140 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4141 if (host_to_target_shmid_ds(buf, &dsarg))
4142 return -TARGET_EFAULT;
4143 break;
4144 case IPC_INFO:
4145 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4146 if (host_to_target_shminfo(buf, &shminfo))
4147 return -TARGET_EFAULT;
4148 break;
4149 case SHM_INFO:
4150 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4151 if (host_to_target_shm_info(buf, &shm_info))
4152 return -TARGET_EFAULT;
4153 break;
4154 case IPC_RMID:
4155 case SHM_LOCK:
4156 case SHM_UNLOCK:
4157 ret = get_errno(shmctl(shmid, cmd, NULL));
4158 break;
4161 return ret;
4164 #ifndef TARGET_FORCE_SHMLBA
4165 /* For most architectures, SHMLBA is the same as the page size;
4166 * some architectures have larger values, in which case they should
4167 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4168 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4169 * and defining its own value for SHMLBA.
4171 * The kernel also permits SHMLBA to be set by the architecture to a
4172 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4173 * this means that addresses are rounded to the large size if
4174 * SHM_RND is set but addresses not aligned to that size are not rejected
4175 * as long as they are at least page-aligned. Since the only architecture
4176 * which uses this is ia64 this code doesn't provide for that oddity.
4178 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4180 return TARGET_PAGE_SIZE;
4182 #endif
4184 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4185 int shmid, abi_ulong shmaddr, int shmflg)
4187 abi_long raddr;
4188 void *host_raddr;
4189 struct shmid_ds shm_info;
4190 int i,ret;
4191 abi_ulong shmlba;
4193 /* find out the length of the shared memory segment */
4194 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4195 if (is_error(ret)) {
4196 /* can't get length, bail out */
4197 return ret;
4200 shmlba = target_shmlba(cpu_env);
4202 if (shmaddr & (shmlba - 1)) {
4203 if (shmflg & SHM_RND) {
4204 shmaddr &= ~(shmlba - 1);
4205 } else {
4206 return -TARGET_EINVAL;
4209 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4210 return -TARGET_EINVAL;
4213 mmap_lock();
4215 if (shmaddr)
4216 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4217 else {
4218 abi_ulong mmap_start;
4220 /* In order to use the host shmat, we need to honor host SHMLBA. */
4221 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4223 if (mmap_start == -1) {
4224 errno = ENOMEM;
4225 host_raddr = (void *)-1;
4226 } else
4227 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4230 if (host_raddr == (void *)-1) {
4231 mmap_unlock();
4232 return get_errno((long)host_raddr);
4234 raddr=h2g((unsigned long)host_raddr);
4236 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4237 PAGE_VALID | PAGE_READ |
4238 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4240 for (i = 0; i < N_SHM_REGIONS; i++) {
4241 if (!shm_regions[i].in_use) {
4242 shm_regions[i].in_use = true;
4243 shm_regions[i].start = raddr;
4244 shm_regions[i].size = shm_info.shm_segsz;
4245 break;
4249 mmap_unlock();
4250 return raddr;
4254 static inline abi_long do_shmdt(abi_ulong shmaddr)
4256 int i;
4257 abi_long rv;
4259 mmap_lock();
4261 for (i = 0; i < N_SHM_REGIONS; ++i) {
4262 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4263 shm_regions[i].in_use = false;
4264 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4265 break;
4268 rv = get_errno(shmdt(g2h(shmaddr)));
4270 mmap_unlock();
4272 return rv;
4275 #ifdef TARGET_NR_ipc
4276 /* ??? This only works with linear mappings. */
4277 /* do_ipc() must return target values and target errnos. */
4278 static abi_long do_ipc(CPUArchState *cpu_env,
4279 unsigned int call, abi_long first,
4280 abi_long second, abi_long third,
4281 abi_long ptr, abi_long fifth)
4283 int version;
4284 abi_long ret = 0;
4286 version = call >> 16;
4287 call &= 0xffff;
4289 switch (call) {
4290 case IPCOP_semop:
4291 ret = do_semop(first, ptr, second);
4292 break;
4294 case IPCOP_semget:
4295 ret = get_errno(semget(first, second, third));
4296 break;
4298 case IPCOP_semctl: {
4299 /* The semun argument to semctl is passed by value, so dereference the
4300 * ptr argument. */
4301 abi_ulong atptr;
4302 get_user_ual(atptr, ptr);
4303 ret = do_semctl(first, second, third, atptr);
4304 break;
4307 case IPCOP_msgget:
4308 ret = get_errno(msgget(first, second));
4309 break;
4311 case IPCOP_msgsnd:
4312 ret = do_msgsnd(first, ptr, second, third);
4313 break;
4315 case IPCOP_msgctl:
4316 ret = do_msgctl(first, second, ptr);
4317 break;
4319 case IPCOP_msgrcv:
4320 switch (version) {
4321 case 0:
4323 struct target_ipc_kludge {
4324 abi_long msgp;
4325 abi_long msgtyp;
4326 } *tmp;
4328 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4329 ret = -TARGET_EFAULT;
4330 break;
4333 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4335 unlock_user_struct(tmp, ptr, 0);
4336 break;
4338 default:
4339 ret = do_msgrcv(first, ptr, second, fifth, third);
4341 break;
4343 case IPCOP_shmat:
4344 switch (version) {
4345 default:
4347 abi_ulong raddr;
4348 raddr = do_shmat(cpu_env, first, ptr, second);
4349 if (is_error(raddr))
4350 return get_errno(raddr);
4351 if (put_user_ual(raddr, third))
4352 return -TARGET_EFAULT;
4353 break;
4355 case 1:
4356 ret = -TARGET_EINVAL;
4357 break;
4359 break;
4360 case IPCOP_shmdt:
4361 ret = do_shmdt(ptr);
4362 break;
4364 case IPCOP_shmget:
4365 /* IPC_* flag values are the same on all linux platforms */
4366 ret = get_errno(shmget(first, second, third));
4367 break;
4369 /* IPC_* and SHM_* command values are the same on all linux platforms */
4370 case IPCOP_shmctl:
4371 ret = do_shmctl(first, second, ptr);
4372 break;
4373 default:
4374 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4375 call, version);
4376 ret = -TARGET_ENOSYS;
4377 break;
4379 return ret;
4381 #endif
4383 /* kernel structure types definitions */
4385 #define STRUCT(name, ...) STRUCT_ ## name,
4386 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4387 enum {
4388 #include "syscall_types.h"
4389 STRUCT_MAX
4391 #undef STRUCT
4392 #undef STRUCT_SPECIAL
4394 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4395 #define STRUCT_SPECIAL(name)
4396 #include "syscall_types.h"
4397 #undef STRUCT
4398 #undef STRUCT_SPECIAL
4400 typedef struct IOCTLEntry IOCTLEntry;
4402 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4403 int fd, int cmd, abi_long arg);
4405 struct IOCTLEntry {
4406 int target_cmd;
4407 unsigned int host_cmd;
4408 const char *name;
4409 int access;
4410 do_ioctl_fn *do_ioctl;
4411 const argtype arg_type[5];
4414 #define IOC_R 0x0001
4415 #define IOC_W 0x0002
4416 #define IOC_RW (IOC_R | IOC_W)
4418 #define MAX_STRUCT_SIZE 4096
4420 #ifdef CONFIG_FIEMAP
4421 /* So fiemap access checks don't overflow on 32 bit systems.
4422 * This is very slightly smaller than the limit imposed by
4423 * the underlying kernel.
4425 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4426 / sizeof(struct fiemap_extent))
4428 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4429 int fd, int cmd, abi_long arg)
4431 /* The parameter for this ioctl is a struct fiemap followed
4432 * by an array of struct fiemap_extent whose size is set
4433 * in fiemap->fm_extent_count. The array is filled in by the
4434 * ioctl.
4436 int target_size_in, target_size_out;
4437 struct fiemap *fm;
4438 const argtype *arg_type = ie->arg_type;
4439 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4440 void *argptr, *p;
4441 abi_long ret;
4442 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4443 uint32_t outbufsz;
4444 int free_fm = 0;
4446 assert(arg_type[0] == TYPE_PTR);
4447 assert(ie->access == IOC_RW);
4448 arg_type++;
4449 target_size_in = thunk_type_size(arg_type, 0);
4450 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4451 if (!argptr) {
4452 return -TARGET_EFAULT;
4454 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4455 unlock_user(argptr, arg, 0);
4456 fm = (struct fiemap *)buf_temp;
4457 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4458 return -TARGET_EINVAL;
4461 outbufsz = sizeof (*fm) +
4462 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4464 if (outbufsz > MAX_STRUCT_SIZE) {
4465 /* We can't fit all the extents into the fixed size buffer.
4466 * Allocate one that is large enough and use it instead.
4468 fm = g_try_malloc(outbufsz);
4469 if (!fm) {
4470 return -TARGET_ENOMEM;
4472 memcpy(fm, buf_temp, sizeof(struct fiemap));
4473 free_fm = 1;
4475 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4476 if (!is_error(ret)) {
4477 target_size_out = target_size_in;
4478 /* An extent_count of 0 means we were only counting the extents
4479 * so there are no structs to copy
4481 if (fm->fm_extent_count != 0) {
4482 target_size_out += fm->fm_mapped_extents * extent_size;
4484 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4485 if (!argptr) {
4486 ret = -TARGET_EFAULT;
4487 } else {
4488 /* Convert the struct fiemap */
4489 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4490 if (fm->fm_extent_count != 0) {
4491 p = argptr + target_size_in;
4492 /* ...and then all the struct fiemap_extents */
4493 for (i = 0; i < fm->fm_mapped_extents; i++) {
4494 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4495 THUNK_TARGET);
4496 p += extent_size;
4499 unlock_user(argptr, arg, target_size_out);
4502 if (free_fm) {
4503 g_free(fm);
4505 return ret;
4507 #endif
4509 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4510 int fd, int cmd, abi_long arg)
4512 const argtype *arg_type = ie->arg_type;
4513 int target_size;
4514 void *argptr;
4515 int ret;
4516 struct ifconf *host_ifconf;
4517 uint32_t outbufsz;
4518 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4519 int target_ifreq_size;
4520 int nb_ifreq;
4521 int free_buf = 0;
4522 int i;
4523 int target_ifc_len;
4524 abi_long target_ifc_buf;
4525 int host_ifc_len;
4526 char *host_ifc_buf;
4528 assert(arg_type[0] == TYPE_PTR);
4529 assert(ie->access == IOC_RW);
4531 arg_type++;
4532 target_size = thunk_type_size(arg_type, 0);
4534 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4535 if (!argptr)
4536 return -TARGET_EFAULT;
4537 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4538 unlock_user(argptr, arg, 0);
4540 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4541 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4542 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4544 if (target_ifc_buf != 0) {
4545 target_ifc_len = host_ifconf->ifc_len;
4546 nb_ifreq = target_ifc_len / target_ifreq_size;
4547 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4549 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4550 if (outbufsz > MAX_STRUCT_SIZE) {
4552 * We can't fit all the extents into the fixed size buffer.
4553 * Allocate one that is large enough and use it instead.
4555 host_ifconf = malloc(outbufsz);
4556 if (!host_ifconf) {
4557 return -TARGET_ENOMEM;
4559 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4560 free_buf = 1;
4562 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4564 host_ifconf->ifc_len = host_ifc_len;
4565 } else {
4566 host_ifc_buf = NULL;
4568 host_ifconf->ifc_buf = host_ifc_buf;
4570 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4571 if (!is_error(ret)) {
4572 /* convert host ifc_len to target ifc_len */
4574 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4575 target_ifc_len = nb_ifreq * target_ifreq_size;
4576 host_ifconf->ifc_len = target_ifc_len;
4578 /* restore target ifc_buf */
4580 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4582 /* copy struct ifconf to target user */
4584 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4585 if (!argptr)
4586 return -TARGET_EFAULT;
4587 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4588 unlock_user(argptr, arg, target_size);
4590 if (target_ifc_buf != 0) {
4591 /* copy ifreq[] to target user */
4592 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4593 for (i = 0; i < nb_ifreq ; i++) {
4594 thunk_convert(argptr + i * target_ifreq_size,
4595 host_ifc_buf + i * sizeof(struct ifreq),
4596 ifreq_arg_type, THUNK_TARGET);
4598 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4602 if (free_buf) {
4603 free(host_ifconf);
4606 return ret;
4609 #if defined(CONFIG_USBFS)
4610 #if HOST_LONG_BITS > 64
4611 #error USBDEVFS thunks do not support >64 bit hosts yet.
4612 #endif
4613 struct live_urb {
4614 uint64_t target_urb_adr;
4615 uint64_t target_buf_adr;
4616 char *target_buf_ptr;
4617 struct usbdevfs_urb host_urb;
4620 static GHashTable *usbdevfs_urb_hashtable(void)
4622 static GHashTable *urb_hashtable;
4624 if (!urb_hashtable) {
4625 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4627 return urb_hashtable;
4630 static void urb_hashtable_insert(struct live_urb *urb)
4632 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4633 g_hash_table_insert(urb_hashtable, urb, urb);
4636 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4638 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4639 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4642 static void urb_hashtable_remove(struct live_urb *urb)
4644 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4645 g_hash_table_remove(urb_hashtable, urb);
4648 static abi_long
4649 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4650 int fd, int cmd, abi_long arg)
4652 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4653 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4654 struct live_urb *lurb;
4655 void *argptr;
4656 uint64_t hurb;
4657 int target_size;
4658 uintptr_t target_urb_adr;
4659 abi_long ret;
4661 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4663 memset(buf_temp, 0, sizeof(uint64_t));
4664 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4665 if (is_error(ret)) {
4666 return ret;
4669 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4670 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4671 if (!lurb->target_urb_adr) {
4672 return -TARGET_EFAULT;
4674 urb_hashtable_remove(lurb);
4675 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4676 lurb->host_urb.buffer_length);
4677 lurb->target_buf_ptr = NULL;
4679 /* restore the guest buffer pointer */
4680 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4682 /* update the guest urb struct */
4683 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4684 if (!argptr) {
4685 g_free(lurb);
4686 return -TARGET_EFAULT;
4688 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4689 unlock_user(argptr, lurb->target_urb_adr, target_size);
4691 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4692 /* write back the urb handle */
4693 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4694 if (!argptr) {
4695 g_free(lurb);
4696 return -TARGET_EFAULT;
4699 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4700 target_urb_adr = lurb->target_urb_adr;
4701 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4702 unlock_user(argptr, arg, target_size);
4704 g_free(lurb);
4705 return ret;
4708 static abi_long
4709 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4710 uint8_t *buf_temp __attribute__((unused)),
4711 int fd, int cmd, abi_long arg)
4713 struct live_urb *lurb;
4715 /* map target address back to host URB with metadata. */
4716 lurb = urb_hashtable_lookup(arg);
4717 if (!lurb) {
4718 return -TARGET_EFAULT;
4720 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4723 static abi_long
4724 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4725 int fd, int cmd, abi_long arg)
4727 const argtype *arg_type = ie->arg_type;
4728 int target_size;
4729 abi_long ret;
4730 void *argptr;
4731 int rw_dir;
4732 struct live_urb *lurb;
4735 * each submitted URB needs to map to a unique ID for the
4736 * kernel, and that unique ID needs to be a pointer to
4737 * host memory. hence, we need to malloc for each URB.
4738 * isochronous transfers have a variable length struct.
4740 arg_type++;
4741 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4743 /* construct host copy of urb and metadata */
4744 lurb = g_try_malloc0(sizeof(struct live_urb));
4745 if (!lurb) {
4746 return -TARGET_ENOMEM;
4749 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4750 if (!argptr) {
4751 g_free(lurb);
4752 return -TARGET_EFAULT;
4754 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4755 unlock_user(argptr, arg, 0);
4757 lurb->target_urb_adr = arg;
4758 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4760 /* buffer space used depends on endpoint type so lock the entire buffer */
4761 /* control type urbs should check the buffer contents for true direction */
4762 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4763 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4764 lurb->host_urb.buffer_length, 1);
4765 if (lurb->target_buf_ptr == NULL) {
4766 g_free(lurb);
4767 return -TARGET_EFAULT;
4770 /* update buffer pointer in host copy */
4771 lurb->host_urb.buffer = lurb->target_buf_ptr;
4773 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4774 if (is_error(ret)) {
4775 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4776 g_free(lurb);
4777 } else {
4778 urb_hashtable_insert(lurb);
4781 return ret;
4783 #endif /* CONFIG_USBFS */
4785 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4786 int cmd, abi_long arg)
4788 void *argptr;
4789 struct dm_ioctl *host_dm;
4790 abi_long guest_data;
4791 uint32_t guest_data_size;
4792 int target_size;
4793 const argtype *arg_type = ie->arg_type;
4794 abi_long ret;
4795 void *big_buf = NULL;
4796 char *host_data;
4798 arg_type++;
4799 target_size = thunk_type_size(arg_type, 0);
4800 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4801 if (!argptr) {
4802 ret = -TARGET_EFAULT;
4803 goto out;
4805 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4806 unlock_user(argptr, arg, 0);
4808 /* buf_temp is too small, so fetch things into a bigger buffer */
4809 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4810 memcpy(big_buf, buf_temp, target_size);
4811 buf_temp = big_buf;
4812 host_dm = big_buf;
4814 guest_data = arg + host_dm->data_start;
4815 if ((guest_data - arg) < 0) {
4816 ret = -TARGET_EINVAL;
4817 goto out;
4819 guest_data_size = host_dm->data_size - host_dm->data_start;
4820 host_data = (char*)host_dm + host_dm->data_start;
4822 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4823 if (!argptr) {
4824 ret = -TARGET_EFAULT;
4825 goto out;
4828 switch (ie->host_cmd) {
4829 case DM_REMOVE_ALL:
4830 case DM_LIST_DEVICES:
4831 case DM_DEV_CREATE:
4832 case DM_DEV_REMOVE:
4833 case DM_DEV_SUSPEND:
4834 case DM_DEV_STATUS:
4835 case DM_DEV_WAIT:
4836 case DM_TABLE_STATUS:
4837 case DM_TABLE_CLEAR:
4838 case DM_TABLE_DEPS:
4839 case DM_LIST_VERSIONS:
4840 /* no input data */
4841 break;
4842 case DM_DEV_RENAME:
4843 case DM_DEV_SET_GEOMETRY:
4844 /* data contains only strings */
4845 memcpy(host_data, argptr, guest_data_size);
4846 break;
4847 case DM_TARGET_MSG:
4848 memcpy(host_data, argptr, guest_data_size);
4849 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4850 break;
4851 case DM_TABLE_LOAD:
4853 void *gspec = argptr;
4854 void *cur_data = host_data;
4855 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4856 int spec_size = thunk_type_size(arg_type, 0);
4857 int i;
4859 for (i = 0; i < host_dm->target_count; i++) {
4860 struct dm_target_spec *spec = cur_data;
4861 uint32_t next;
4862 int slen;
4864 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
4865 slen = strlen((char*)gspec + spec_size) + 1;
4866 next = spec->next;
4867 spec->next = sizeof(*spec) + slen;
4868 strcpy((char*)&spec[1], gspec + spec_size);
4869 gspec += next;
4870 cur_data += spec->next;
4872 break;
4874 default:
4875 ret = -TARGET_EINVAL;
4876 unlock_user(argptr, guest_data, 0);
4877 goto out;
4879 unlock_user(argptr, guest_data, 0);
4881 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4882 if (!is_error(ret)) {
4883 guest_data = arg + host_dm->data_start;
4884 guest_data_size = host_dm->data_size - host_dm->data_start;
4885 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
4886 switch (ie->host_cmd) {
4887 case DM_REMOVE_ALL:
4888 case DM_DEV_CREATE:
4889 case DM_DEV_REMOVE:
4890 case DM_DEV_RENAME:
4891 case DM_DEV_SUSPEND:
4892 case DM_DEV_STATUS:
4893 case DM_TABLE_LOAD:
4894 case DM_TABLE_CLEAR:
4895 case DM_TARGET_MSG:
4896 case DM_DEV_SET_GEOMETRY:
4897 /* no return data */
4898 break;
4899 case DM_LIST_DEVICES:
4901 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
4902 uint32_t remaining_data = guest_data_size;
4903 void *cur_data = argptr;
4904 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
4905 int nl_size = 12; /* can't use thunk_size due to alignment */
4907 while (1) {
4908 uint32_t next = nl->next;
4909 if (next) {
4910 nl->next = nl_size + (strlen(nl->name) + 1);
4912 if (remaining_data < nl->next) {
4913 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4914 break;
4916 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
4917 strcpy(cur_data + nl_size, nl->name);
4918 cur_data += nl->next;
4919 remaining_data -= nl->next;
4920 if (!next) {
4921 break;
4923 nl = (void*)nl + next;
4925 break;
4927 case DM_DEV_WAIT:
4928 case DM_TABLE_STATUS:
4930 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
4931 void *cur_data = argptr;
4932 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4933 int spec_size = thunk_type_size(arg_type, 0);
4934 int i;
4936 for (i = 0; i < host_dm->target_count; i++) {
4937 uint32_t next = spec->next;
4938 int slen = strlen((char*)&spec[1]) + 1;
4939 spec->next = (cur_data - argptr) + spec_size + slen;
4940 if (guest_data_size < spec->next) {
4941 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4942 break;
4944 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
4945 strcpy(cur_data + spec_size, (char*)&spec[1]);
4946 cur_data = argptr + spec->next;
4947 spec = (void*)host_dm + host_dm->data_start + next;
4949 break;
4951 case DM_TABLE_DEPS:
4953 void *hdata = (void*)host_dm + host_dm->data_start;
4954 int count = *(uint32_t*)hdata;
4955 uint64_t *hdev = hdata + 8;
4956 uint64_t *gdev = argptr + 8;
4957 int i;
4959 *(uint32_t*)argptr = tswap32(count);
4960 for (i = 0; i < count; i++) {
4961 *gdev = tswap64(*hdev);
4962 gdev++;
4963 hdev++;
4965 break;
4967 case DM_LIST_VERSIONS:
4969 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
4970 uint32_t remaining_data = guest_data_size;
4971 void *cur_data = argptr;
4972 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
4973 int vers_size = thunk_type_size(arg_type, 0);
4975 while (1) {
4976 uint32_t next = vers->next;
4977 if (next) {
4978 vers->next = vers_size + (strlen(vers->name) + 1);
4980 if (remaining_data < vers->next) {
4981 host_dm->flags |= DM_BUFFER_FULL_FLAG;
4982 break;
4984 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
4985 strcpy(cur_data + vers_size, vers->name);
4986 cur_data += vers->next;
4987 remaining_data -= vers->next;
4988 if (!next) {
4989 break;
4991 vers = (void*)vers + next;
4993 break;
4995 default:
4996 unlock_user(argptr, guest_data, 0);
4997 ret = -TARGET_EINVAL;
4998 goto out;
5000 unlock_user(argptr, guest_data, guest_data_size);
5002 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5003 if (!argptr) {
5004 ret = -TARGET_EFAULT;
5005 goto out;
5007 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5008 unlock_user(argptr, arg, target_size);
5010 out:
5011 g_free(big_buf);
5012 return ret;
5015 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5016 int cmd, abi_long arg)
5018 void *argptr;
5019 int target_size;
5020 const argtype *arg_type = ie->arg_type;
5021 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5022 abi_long ret;
5024 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5025 struct blkpg_partition host_part;
5027 /* Read and convert blkpg */
5028 arg_type++;
5029 target_size = thunk_type_size(arg_type, 0);
5030 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5031 if (!argptr) {
5032 ret = -TARGET_EFAULT;
5033 goto out;
5035 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5036 unlock_user(argptr, arg, 0);
5038 switch (host_blkpg->op) {
5039 case BLKPG_ADD_PARTITION:
5040 case BLKPG_DEL_PARTITION:
5041 /* payload is struct blkpg_partition */
5042 break;
5043 default:
5044 /* Unknown opcode */
5045 ret = -TARGET_EINVAL;
5046 goto out;
5049 /* Read and convert blkpg->data */
5050 arg = (abi_long)(uintptr_t)host_blkpg->data;
5051 target_size = thunk_type_size(part_arg_type, 0);
5052 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5053 if (!argptr) {
5054 ret = -TARGET_EFAULT;
5055 goto out;
5057 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5058 unlock_user(argptr, arg, 0);
5060 /* Swizzle the data pointer to our local copy and call! */
5061 host_blkpg->data = &host_part;
5062 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5064 out:
5065 return ret;
5068 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5069 int fd, int cmd, abi_long arg)
5071 const argtype *arg_type = ie->arg_type;
5072 const StructEntry *se;
5073 const argtype *field_types;
5074 const int *dst_offsets, *src_offsets;
5075 int target_size;
5076 void *argptr;
5077 abi_ulong *target_rt_dev_ptr = NULL;
5078 unsigned long *host_rt_dev_ptr = NULL;
5079 abi_long ret;
5080 int i;
5082 assert(ie->access == IOC_W);
5083 assert(*arg_type == TYPE_PTR);
5084 arg_type++;
5085 assert(*arg_type == TYPE_STRUCT);
5086 target_size = thunk_type_size(arg_type, 0);
5087 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5088 if (!argptr) {
5089 return -TARGET_EFAULT;
5091 arg_type++;
5092 assert(*arg_type == (int)STRUCT_rtentry);
5093 se = struct_entries + *arg_type++;
5094 assert(se->convert[0] == NULL);
5095 /* convert struct here to be able to catch rt_dev string */
5096 field_types = se->field_types;
5097 dst_offsets = se->field_offsets[THUNK_HOST];
5098 src_offsets = se->field_offsets[THUNK_TARGET];
5099 for (i = 0; i < se->nb_fields; i++) {
5100 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5101 assert(*field_types == TYPE_PTRVOID);
5102 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5103 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5104 if (*target_rt_dev_ptr != 0) {
5105 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5106 tswapal(*target_rt_dev_ptr));
5107 if (!*host_rt_dev_ptr) {
5108 unlock_user(argptr, arg, 0);
5109 return -TARGET_EFAULT;
5111 } else {
5112 *host_rt_dev_ptr = 0;
5114 field_types++;
5115 continue;
5117 field_types = thunk_convert(buf_temp + dst_offsets[i],
5118 argptr + src_offsets[i],
5119 field_types, THUNK_HOST);
5121 unlock_user(argptr, arg, 0);
5123 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5125 assert(host_rt_dev_ptr != NULL);
5126 assert(target_rt_dev_ptr != NULL);
5127 if (*host_rt_dev_ptr != 0) {
5128 unlock_user((void *)*host_rt_dev_ptr,
5129 *target_rt_dev_ptr, 0);
5131 return ret;
5134 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5135 int fd, int cmd, abi_long arg)
5137 int sig = target_to_host_signal(arg);
5138 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5141 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5142 int fd, int cmd, abi_long arg)
5144 struct timeval tv;
5145 abi_long ret;
5147 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5148 if (is_error(ret)) {
5149 return ret;
5152 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5153 if (copy_to_user_timeval(arg, &tv)) {
5154 return -TARGET_EFAULT;
5156 } else {
5157 if (copy_to_user_timeval64(arg, &tv)) {
5158 return -TARGET_EFAULT;
5162 return ret;
5165 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5166 int fd, int cmd, abi_long arg)
5168 struct timespec ts;
5169 abi_long ret;
5171 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5172 if (is_error(ret)) {
5173 return ret;
5176 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5177 if (host_to_target_timespec(arg, &ts)) {
5178 return -TARGET_EFAULT;
5180 } else{
5181 if (host_to_target_timespec64(arg, &ts)) {
5182 return -TARGET_EFAULT;
5186 return ret;
5189 #ifdef TIOCGPTPEER
5190 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5191 int fd, int cmd, abi_long arg)
5193 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5194 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5196 #endif
5198 static IOCTLEntry ioctl_entries[] = {
5199 #define IOCTL(cmd, access, ...) \
5200 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5201 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5202 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5203 #define IOCTL_IGNORE(cmd) \
5204 { TARGET_ ## cmd, 0, #cmd },
5205 #include "ioctls.h"
5206 { 0, 0, },
5209 /* ??? Implement proper locking for ioctls. */
5210 /* do_ioctl() Must return target values and target errnos. */
5211 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5213 const IOCTLEntry *ie;
5214 const argtype *arg_type;
5215 abi_long ret;
5216 uint8_t buf_temp[MAX_STRUCT_SIZE];
5217 int target_size;
5218 void *argptr;
5220 ie = ioctl_entries;
5221 for(;;) {
5222 if (ie->target_cmd == 0) {
5223 qemu_log_mask(
5224 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5225 return -TARGET_ENOSYS;
5227 if (ie->target_cmd == cmd)
5228 break;
5229 ie++;
5231 arg_type = ie->arg_type;
5232 if (ie->do_ioctl) {
5233 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5234 } else if (!ie->host_cmd) {
5235 /* Some architectures define BSD ioctls in their headers
5236 that are not implemented in Linux. */
5237 return -TARGET_ENOSYS;
5240 switch(arg_type[0]) {
5241 case TYPE_NULL:
5242 /* no argument */
5243 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5244 break;
5245 case TYPE_PTRVOID:
5246 case TYPE_INT:
5247 case TYPE_LONG:
5248 case TYPE_ULONG:
5249 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5250 break;
5251 case TYPE_PTR:
5252 arg_type++;
5253 target_size = thunk_type_size(arg_type, 0);
5254 switch(ie->access) {
5255 case IOC_R:
5256 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5257 if (!is_error(ret)) {
5258 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5259 if (!argptr)
5260 return -TARGET_EFAULT;
5261 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5262 unlock_user(argptr, arg, target_size);
5264 break;
5265 case IOC_W:
5266 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5267 if (!argptr)
5268 return -TARGET_EFAULT;
5269 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5270 unlock_user(argptr, arg, 0);
5271 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5272 break;
5273 default:
5274 case IOC_RW:
5275 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5276 if (!argptr)
5277 return -TARGET_EFAULT;
5278 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5279 unlock_user(argptr, arg, 0);
5280 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5281 if (!is_error(ret)) {
5282 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5283 if (!argptr)
5284 return -TARGET_EFAULT;
5285 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5286 unlock_user(argptr, arg, target_size);
5288 break;
5290 break;
5291 default:
5292 qemu_log_mask(LOG_UNIMP,
5293 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5294 (long)cmd, arg_type[0]);
5295 ret = -TARGET_ENOSYS;
5296 break;
5298 return ret;
5301 static const bitmask_transtbl iflag_tbl[] = {
5302 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5303 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5304 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5305 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5306 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5307 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5308 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5309 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5310 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5311 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5312 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5313 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5314 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5315 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5316 { 0, 0, 0, 0 }
5319 static const bitmask_transtbl oflag_tbl[] = {
5320 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5321 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5322 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5323 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5324 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5325 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5326 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5327 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5328 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5329 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5330 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5331 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5332 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5333 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5334 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5335 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5336 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5337 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5338 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5339 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5340 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5341 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5342 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5343 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5344 { 0, 0, 0, 0 }
5347 static const bitmask_transtbl cflag_tbl[] = {
5348 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5349 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5350 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5351 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5352 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5353 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5354 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5355 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5356 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5357 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5358 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5359 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5360 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5361 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5362 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5363 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5364 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5365 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5366 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5367 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5368 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5369 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5370 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5371 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5372 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5373 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5374 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5375 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5376 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5377 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5378 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5379 { 0, 0, 0, 0 }
5382 static const bitmask_transtbl lflag_tbl[] = {
5383 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5384 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5385 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5386 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5387 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5388 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5389 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5390 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5391 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5392 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5393 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5394 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5395 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5396 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5397 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5398 { 0, 0, 0, 0 }
5401 static void target_to_host_termios (void *dst, const void *src)
5403 struct host_termios *host = dst;
5404 const struct target_termios *target = src;
5406 host->c_iflag =
5407 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5408 host->c_oflag =
5409 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5410 host->c_cflag =
5411 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5412 host->c_lflag =
5413 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5414 host->c_line = target->c_line;
5416 memset(host->c_cc, 0, sizeof(host->c_cc));
5417 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5418 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5419 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5420 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5421 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5422 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5423 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5424 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5425 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5426 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5427 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5428 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5429 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5430 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5431 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5432 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5433 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5436 static void host_to_target_termios (void *dst, const void *src)
5438 struct target_termios *target = dst;
5439 const struct host_termios *host = src;
5441 target->c_iflag =
5442 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5443 target->c_oflag =
5444 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5445 target->c_cflag =
5446 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5447 target->c_lflag =
5448 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5449 target->c_line = host->c_line;
5451 memset(target->c_cc, 0, sizeof(target->c_cc));
5452 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5453 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5454 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5455 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5456 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5457 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5458 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5459 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5460 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5461 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5462 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5463 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5464 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5465 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5466 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5467 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5468 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5471 static const StructEntry struct_termios_def = {
5472 .convert = { host_to_target_termios, target_to_host_termios },
5473 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5474 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5477 static bitmask_transtbl mmap_flags_tbl[] = {
5478 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5479 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5480 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5481 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5482 MAP_ANONYMOUS, MAP_ANONYMOUS },
5483 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5484 MAP_GROWSDOWN, MAP_GROWSDOWN },
5485 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5486 MAP_DENYWRITE, MAP_DENYWRITE },
5487 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5488 MAP_EXECUTABLE, MAP_EXECUTABLE },
5489 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5490 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5491 MAP_NORESERVE, MAP_NORESERVE },
5492 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5493 /* MAP_STACK had been ignored by the kernel for quite some time.
5494 Recognize it for the target insofar as we do not want to pass
5495 it through to the host. */
5496 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5497 { 0, 0, 0, 0 }
5500 #if defined(TARGET_I386)
5502 /* NOTE: there is really one LDT for all the threads */
5503 static uint8_t *ldt_table;
5505 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5507 int size;
5508 void *p;
5510 if (!ldt_table)
5511 return 0;
5512 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5513 if (size > bytecount)
5514 size = bytecount;
5515 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5516 if (!p)
5517 return -TARGET_EFAULT;
5518 /* ??? Should this by byteswapped? */
5519 memcpy(p, ldt_table, size);
5520 unlock_user(p, ptr, size);
5521 return size;
5524 /* XXX: add locking support */
5525 static abi_long write_ldt(CPUX86State *env,
5526 abi_ulong ptr, unsigned long bytecount, int oldmode)
5528 struct target_modify_ldt_ldt_s ldt_info;
5529 struct target_modify_ldt_ldt_s *target_ldt_info;
5530 int seg_32bit, contents, read_exec_only, limit_in_pages;
5531 int seg_not_present, useable, lm;
5532 uint32_t *lp, entry_1, entry_2;
5534 if (bytecount != sizeof(ldt_info))
5535 return -TARGET_EINVAL;
5536 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5537 return -TARGET_EFAULT;
5538 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5539 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5540 ldt_info.limit = tswap32(target_ldt_info->limit);
5541 ldt_info.flags = tswap32(target_ldt_info->flags);
5542 unlock_user_struct(target_ldt_info, ptr, 0);
5544 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5545 return -TARGET_EINVAL;
5546 seg_32bit = ldt_info.flags & 1;
5547 contents = (ldt_info.flags >> 1) & 3;
5548 read_exec_only = (ldt_info.flags >> 3) & 1;
5549 limit_in_pages = (ldt_info.flags >> 4) & 1;
5550 seg_not_present = (ldt_info.flags >> 5) & 1;
5551 useable = (ldt_info.flags >> 6) & 1;
5552 #ifdef TARGET_ABI32
5553 lm = 0;
5554 #else
5555 lm = (ldt_info.flags >> 7) & 1;
5556 #endif
5557 if (contents == 3) {
5558 if (oldmode)
5559 return -TARGET_EINVAL;
5560 if (seg_not_present == 0)
5561 return -TARGET_EINVAL;
5563 /* allocate the LDT */
5564 if (!ldt_table) {
5565 env->ldt.base = target_mmap(0,
5566 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5567 PROT_READ|PROT_WRITE,
5568 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5569 if (env->ldt.base == -1)
5570 return -TARGET_ENOMEM;
5571 memset(g2h(env->ldt.base), 0,
5572 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5573 env->ldt.limit = 0xffff;
5574 ldt_table = g2h(env->ldt.base);
5577 /* NOTE: same code as Linux kernel */
5578 /* Allow LDTs to be cleared by the user. */
5579 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5580 if (oldmode ||
5581 (contents == 0 &&
5582 read_exec_only == 1 &&
5583 seg_32bit == 0 &&
5584 limit_in_pages == 0 &&
5585 seg_not_present == 1 &&
5586 useable == 0 )) {
5587 entry_1 = 0;
5588 entry_2 = 0;
5589 goto install;
5593 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5594 (ldt_info.limit & 0x0ffff);
5595 entry_2 = (ldt_info.base_addr & 0xff000000) |
5596 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5597 (ldt_info.limit & 0xf0000) |
5598 ((read_exec_only ^ 1) << 9) |
5599 (contents << 10) |
5600 ((seg_not_present ^ 1) << 15) |
5601 (seg_32bit << 22) |
5602 (limit_in_pages << 23) |
5603 (lm << 21) |
5604 0x7000;
5605 if (!oldmode)
5606 entry_2 |= (useable << 20);
5608 /* Install the new entry ... */
5609 install:
5610 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5611 lp[0] = tswap32(entry_1);
5612 lp[1] = tswap32(entry_2);
5613 return 0;
5616 /* specific and weird i386 syscalls */
5617 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5618 unsigned long bytecount)
5620 abi_long ret;
5622 switch (func) {
5623 case 0:
5624 ret = read_ldt(ptr, bytecount);
5625 break;
5626 case 1:
5627 ret = write_ldt(env, ptr, bytecount, 1);
5628 break;
5629 case 0x11:
5630 ret = write_ldt(env, ptr, bytecount, 0);
5631 break;
5632 default:
5633 ret = -TARGET_ENOSYS;
5634 break;
5636 return ret;
5639 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5640 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5642 uint64_t *gdt_table = g2h(env->gdt.base);
5643 struct target_modify_ldt_ldt_s ldt_info;
5644 struct target_modify_ldt_ldt_s *target_ldt_info;
5645 int seg_32bit, contents, read_exec_only, limit_in_pages;
5646 int seg_not_present, useable, lm;
5647 uint32_t *lp, entry_1, entry_2;
5648 int i;
5650 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5651 if (!target_ldt_info)
5652 return -TARGET_EFAULT;
5653 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5654 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5655 ldt_info.limit = tswap32(target_ldt_info->limit);
5656 ldt_info.flags = tswap32(target_ldt_info->flags);
5657 if (ldt_info.entry_number == -1) {
5658 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5659 if (gdt_table[i] == 0) {
5660 ldt_info.entry_number = i;
5661 target_ldt_info->entry_number = tswap32(i);
5662 break;
5666 unlock_user_struct(target_ldt_info, ptr, 1);
5668 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5669 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5670 return -TARGET_EINVAL;
5671 seg_32bit = ldt_info.flags & 1;
5672 contents = (ldt_info.flags >> 1) & 3;
5673 read_exec_only = (ldt_info.flags >> 3) & 1;
5674 limit_in_pages = (ldt_info.flags >> 4) & 1;
5675 seg_not_present = (ldt_info.flags >> 5) & 1;
5676 useable = (ldt_info.flags >> 6) & 1;
5677 #ifdef TARGET_ABI32
5678 lm = 0;
5679 #else
5680 lm = (ldt_info.flags >> 7) & 1;
5681 #endif
5683 if (contents == 3) {
5684 if (seg_not_present == 0)
5685 return -TARGET_EINVAL;
5688 /* NOTE: same code as Linux kernel */
5689 /* Allow LDTs to be cleared by the user. */
5690 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5691 if ((contents == 0 &&
5692 read_exec_only == 1 &&
5693 seg_32bit == 0 &&
5694 limit_in_pages == 0 &&
5695 seg_not_present == 1 &&
5696 useable == 0 )) {
5697 entry_1 = 0;
5698 entry_2 = 0;
5699 goto install;
5703 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5704 (ldt_info.limit & 0x0ffff);
5705 entry_2 = (ldt_info.base_addr & 0xff000000) |
5706 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5707 (ldt_info.limit & 0xf0000) |
5708 ((read_exec_only ^ 1) << 9) |
5709 (contents << 10) |
5710 ((seg_not_present ^ 1) << 15) |
5711 (seg_32bit << 22) |
5712 (limit_in_pages << 23) |
5713 (useable << 20) |
5714 (lm << 21) |
5715 0x7000;
5717 /* Install the new entry ... */
5718 install:
5719 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5720 lp[0] = tswap32(entry_1);
5721 lp[1] = tswap32(entry_2);
5722 return 0;
5725 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5727 struct target_modify_ldt_ldt_s *target_ldt_info;
5728 uint64_t *gdt_table = g2h(env->gdt.base);
5729 uint32_t base_addr, limit, flags;
5730 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5731 int seg_not_present, useable, lm;
5732 uint32_t *lp, entry_1, entry_2;
5734 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5735 if (!target_ldt_info)
5736 return -TARGET_EFAULT;
5737 idx = tswap32(target_ldt_info->entry_number);
5738 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5739 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5740 unlock_user_struct(target_ldt_info, ptr, 1);
5741 return -TARGET_EINVAL;
5743 lp = (uint32_t *)(gdt_table + idx);
5744 entry_1 = tswap32(lp[0]);
5745 entry_2 = tswap32(lp[1]);
5747 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5748 contents = (entry_2 >> 10) & 3;
5749 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5750 seg_32bit = (entry_2 >> 22) & 1;
5751 limit_in_pages = (entry_2 >> 23) & 1;
5752 useable = (entry_2 >> 20) & 1;
5753 #ifdef TARGET_ABI32
5754 lm = 0;
5755 #else
5756 lm = (entry_2 >> 21) & 1;
5757 #endif
5758 flags = (seg_32bit << 0) | (contents << 1) |
5759 (read_exec_only << 3) | (limit_in_pages << 4) |
5760 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5761 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5762 base_addr = (entry_1 >> 16) |
5763 (entry_2 & 0xff000000) |
5764 ((entry_2 & 0xff) << 16);
5765 target_ldt_info->base_addr = tswapal(base_addr);
5766 target_ldt_info->limit = tswap32(limit);
5767 target_ldt_info->flags = tswap32(flags);
5768 unlock_user_struct(target_ldt_info, ptr, 1);
5769 return 0;
5771 #endif /* TARGET_I386 && TARGET_ABI32 */
5773 #ifndef TARGET_ABI32
5774 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5776 abi_long ret = 0;
5777 abi_ulong val;
5778 int idx;
5780 switch(code) {
5781 case TARGET_ARCH_SET_GS:
5782 case TARGET_ARCH_SET_FS:
5783 if (code == TARGET_ARCH_SET_GS)
5784 idx = R_GS;
5785 else
5786 idx = R_FS;
5787 cpu_x86_load_seg(env, idx, 0);
5788 env->segs[idx].base = addr;
5789 break;
5790 case TARGET_ARCH_GET_GS:
5791 case TARGET_ARCH_GET_FS:
5792 if (code == TARGET_ARCH_GET_GS)
5793 idx = R_GS;
5794 else
5795 idx = R_FS;
5796 val = env->segs[idx].base;
5797 if (put_user(val, addr, abi_ulong))
5798 ret = -TARGET_EFAULT;
5799 break;
5800 default:
5801 ret = -TARGET_EINVAL;
5802 break;
5804 return ret;
5806 #endif
5808 #endif /* defined(TARGET_I386) */
5810 #define NEW_STACK_SIZE 0x40000
5813 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5814 typedef struct {
5815 CPUArchState *env;
5816 pthread_mutex_t mutex;
5817 pthread_cond_t cond;
5818 pthread_t thread;
5819 uint32_t tid;
5820 abi_ulong child_tidptr;
5821 abi_ulong parent_tidptr;
5822 sigset_t sigmask;
5823 } new_thread_info;
5825 static void *clone_func(void *arg)
5827 new_thread_info *info = arg;
5828 CPUArchState *env;
5829 CPUState *cpu;
5830 TaskState *ts;
5832 rcu_register_thread();
5833 tcg_register_thread();
5834 env = info->env;
5835 cpu = env_cpu(env);
5836 thread_cpu = cpu;
5837 ts = (TaskState *)cpu->opaque;
5838 info->tid = sys_gettid();
5839 task_settid(ts);
5840 if (info->child_tidptr)
5841 put_user_u32(info->tid, info->child_tidptr);
5842 if (info->parent_tidptr)
5843 put_user_u32(info->tid, info->parent_tidptr);
5844 qemu_guest_random_seed_thread_part2(cpu->random_seed);
5845 /* Enable signals. */
5846 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5847 /* Signal to the parent that we're ready. */
5848 pthread_mutex_lock(&info->mutex);
5849 pthread_cond_broadcast(&info->cond);
5850 pthread_mutex_unlock(&info->mutex);
5851 /* Wait until the parent has finished initializing the tls state. */
5852 pthread_mutex_lock(&clone_lock);
5853 pthread_mutex_unlock(&clone_lock);
5854 cpu_loop(env);
5855 /* never exits */
5856 return NULL;
5859 /* do_fork() Must return host values and target errnos (unlike most
5860 do_*() functions). */
5861 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
5862 abi_ulong parent_tidptr, target_ulong newtls,
5863 abi_ulong child_tidptr)
5865 CPUState *cpu = env_cpu(env);
5866 int ret;
5867 TaskState *ts;
5868 CPUState *new_cpu;
5869 CPUArchState *new_env;
5870 sigset_t sigmask;
5872 flags &= ~CLONE_IGNORED_FLAGS;
5874 /* Emulate vfork() with fork() */
5875 if (flags & CLONE_VFORK)
5876 flags &= ~(CLONE_VFORK | CLONE_VM);
5878 if (flags & CLONE_VM) {
5879 TaskState *parent_ts = (TaskState *)cpu->opaque;
5880 new_thread_info info;
5881 pthread_attr_t attr;
5883 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
5884 (flags & CLONE_INVALID_THREAD_FLAGS)) {
5885 return -TARGET_EINVAL;
5888 ts = g_new0(TaskState, 1);
5889 init_task_state(ts);
5891 /* Grab a mutex so that thread setup appears atomic. */
5892 pthread_mutex_lock(&clone_lock);
5894 /* we create a new CPU instance. */
5895 new_env = cpu_copy(env);
5896 /* Init regs that differ from the parent. */
5897 cpu_clone_regs_child(new_env, newsp, flags);
5898 cpu_clone_regs_parent(env, flags);
5899 new_cpu = env_cpu(new_env);
5900 new_cpu->opaque = ts;
5901 ts->bprm = parent_ts->bprm;
5902 ts->info = parent_ts->info;
5903 ts->signal_mask = parent_ts->signal_mask;
5905 if (flags & CLONE_CHILD_CLEARTID) {
5906 ts->child_tidptr = child_tidptr;
5909 if (flags & CLONE_SETTLS) {
5910 cpu_set_tls (new_env, newtls);
5913 memset(&info, 0, sizeof(info));
5914 pthread_mutex_init(&info.mutex, NULL);
5915 pthread_mutex_lock(&info.mutex);
5916 pthread_cond_init(&info.cond, NULL);
5917 info.env = new_env;
5918 if (flags & CLONE_CHILD_SETTID) {
5919 info.child_tidptr = child_tidptr;
5921 if (flags & CLONE_PARENT_SETTID) {
5922 info.parent_tidptr = parent_tidptr;
5925 ret = pthread_attr_init(&attr);
5926 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
5927 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
5928 /* It is not safe to deliver signals until the child has finished
5929 initializing, so temporarily block all signals. */
5930 sigfillset(&sigmask);
5931 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
5932 cpu->random_seed = qemu_guest_random_seed_thread_part1();
5934 /* If this is our first additional thread, we need to ensure we
5935 * generate code for parallel execution and flush old translations.
5937 if (!parallel_cpus) {
5938 parallel_cpus = true;
5939 tb_flush(cpu);
5942 ret = pthread_create(&info.thread, &attr, clone_func, &info);
5943 /* TODO: Free new CPU state if thread creation failed. */
5945 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
5946 pthread_attr_destroy(&attr);
5947 if (ret == 0) {
5948 /* Wait for the child to initialize. */
5949 pthread_cond_wait(&info.cond, &info.mutex);
5950 ret = info.tid;
5951 } else {
5952 ret = -1;
5954 pthread_mutex_unlock(&info.mutex);
5955 pthread_cond_destroy(&info.cond);
5956 pthread_mutex_destroy(&info.mutex);
5957 pthread_mutex_unlock(&clone_lock);
5958 } else {
5959 /* if no CLONE_VM, we consider it is a fork */
5960 if (flags & CLONE_INVALID_FORK_FLAGS) {
5961 return -TARGET_EINVAL;
5964 /* We can't support custom termination signals */
5965 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
5966 return -TARGET_EINVAL;
5969 if (block_signals()) {
5970 return -TARGET_ERESTARTSYS;
5973 fork_start();
5974 ret = fork();
5975 if (ret == 0) {
5976 /* Child Process. */
5977 cpu_clone_regs_child(env, newsp, flags);
5978 fork_end(1);
5979 /* There is a race condition here. The parent process could
5980 theoretically read the TID in the child process before the child
5981 tid is set. This would require using either ptrace
5982 (not implemented) or having *_tidptr to point at a shared memory
5983 mapping. We can't repeat the spinlock hack used above because
5984 the child process gets its own copy of the lock. */
5985 if (flags & CLONE_CHILD_SETTID)
5986 put_user_u32(sys_gettid(), child_tidptr);
5987 if (flags & CLONE_PARENT_SETTID)
5988 put_user_u32(sys_gettid(), parent_tidptr);
5989 ts = (TaskState *)cpu->opaque;
5990 if (flags & CLONE_SETTLS)
5991 cpu_set_tls (env, newtls);
5992 if (flags & CLONE_CHILD_CLEARTID)
5993 ts->child_tidptr = child_tidptr;
5994 } else {
5995 cpu_clone_regs_parent(env, flags);
5996 fork_end(0);
5999 return ret;
6002 /* warning : doesn't handle linux specific flags... */
6003 static int target_to_host_fcntl_cmd(int cmd)
6005 int ret;
6007 switch(cmd) {
6008 case TARGET_F_DUPFD:
6009 case TARGET_F_GETFD:
6010 case TARGET_F_SETFD:
6011 case TARGET_F_GETFL:
6012 case TARGET_F_SETFL:
6013 ret = cmd;
6014 break;
6015 case TARGET_F_GETLK:
6016 ret = F_GETLK64;
6017 break;
6018 case TARGET_F_SETLK:
6019 ret = F_SETLK64;
6020 break;
6021 case TARGET_F_SETLKW:
6022 ret = F_SETLKW64;
6023 break;
6024 case TARGET_F_GETOWN:
6025 ret = F_GETOWN;
6026 break;
6027 case TARGET_F_SETOWN:
6028 ret = F_SETOWN;
6029 break;
6030 case TARGET_F_GETSIG:
6031 ret = F_GETSIG;
6032 break;
6033 case TARGET_F_SETSIG:
6034 ret = F_SETSIG;
6035 break;
6036 #if TARGET_ABI_BITS == 32
6037 case TARGET_F_GETLK64:
6038 ret = F_GETLK64;
6039 break;
6040 case TARGET_F_SETLK64:
6041 ret = F_SETLK64;
6042 break;
6043 case TARGET_F_SETLKW64:
6044 ret = F_SETLKW64;
6045 break;
6046 #endif
6047 case TARGET_F_SETLEASE:
6048 ret = F_SETLEASE;
6049 break;
6050 case TARGET_F_GETLEASE:
6051 ret = F_GETLEASE;
6052 break;
6053 #ifdef F_DUPFD_CLOEXEC
6054 case TARGET_F_DUPFD_CLOEXEC:
6055 ret = F_DUPFD_CLOEXEC;
6056 break;
6057 #endif
6058 case TARGET_F_NOTIFY:
6059 ret = F_NOTIFY;
6060 break;
6061 #ifdef F_GETOWN_EX
6062 case TARGET_F_GETOWN_EX:
6063 ret = F_GETOWN_EX;
6064 break;
6065 #endif
6066 #ifdef F_SETOWN_EX
6067 case TARGET_F_SETOWN_EX:
6068 ret = F_SETOWN_EX;
6069 break;
6070 #endif
6071 #ifdef F_SETPIPE_SZ
6072 case TARGET_F_SETPIPE_SZ:
6073 ret = F_SETPIPE_SZ;
6074 break;
6075 case TARGET_F_GETPIPE_SZ:
6076 ret = F_GETPIPE_SZ;
6077 break;
6078 #endif
6079 default:
6080 ret = -TARGET_EINVAL;
6081 break;
6084 #if defined(__powerpc64__)
6085 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6086 * is not supported by kernel. The glibc fcntl call actually adjusts
6087 * them to 5, 6 and 7 before making the syscall(). Since we make the
6088 * syscall directly, adjust to what is supported by the kernel.
6090 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6091 ret -= F_GETLK64 - 5;
6093 #endif
6095 return ret;
6098 #define FLOCK_TRANSTBL \
6099 switch (type) { \
6100 TRANSTBL_CONVERT(F_RDLCK); \
6101 TRANSTBL_CONVERT(F_WRLCK); \
6102 TRANSTBL_CONVERT(F_UNLCK); \
6103 TRANSTBL_CONVERT(F_EXLCK); \
6104 TRANSTBL_CONVERT(F_SHLCK); \
6107 static int target_to_host_flock(int type)
6109 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6110 FLOCK_TRANSTBL
6111 #undef TRANSTBL_CONVERT
6112 return -TARGET_EINVAL;
6115 static int host_to_target_flock(int type)
6117 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6118 FLOCK_TRANSTBL
6119 #undef TRANSTBL_CONVERT
6120 /* if we don't know how to convert the value coming
6121 * from the host we copy to the target field as-is
6123 return type;
6126 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6127 abi_ulong target_flock_addr)
6129 struct target_flock *target_fl;
6130 int l_type;
6132 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6133 return -TARGET_EFAULT;
6136 __get_user(l_type, &target_fl->l_type);
6137 l_type = target_to_host_flock(l_type);
6138 if (l_type < 0) {
6139 return l_type;
6141 fl->l_type = l_type;
6142 __get_user(fl->l_whence, &target_fl->l_whence);
6143 __get_user(fl->l_start, &target_fl->l_start);
6144 __get_user(fl->l_len, &target_fl->l_len);
6145 __get_user(fl->l_pid, &target_fl->l_pid);
6146 unlock_user_struct(target_fl, target_flock_addr, 0);
6147 return 0;
6150 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6151 const struct flock64 *fl)
6153 struct target_flock *target_fl;
6154 short l_type;
6156 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6157 return -TARGET_EFAULT;
6160 l_type = host_to_target_flock(fl->l_type);
6161 __put_user(l_type, &target_fl->l_type);
6162 __put_user(fl->l_whence, &target_fl->l_whence);
6163 __put_user(fl->l_start, &target_fl->l_start);
6164 __put_user(fl->l_len, &target_fl->l_len);
6165 __put_user(fl->l_pid, &target_fl->l_pid);
6166 unlock_user_struct(target_fl, target_flock_addr, 1);
6167 return 0;
6170 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6171 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6173 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6174 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6175 abi_ulong target_flock_addr)
6177 struct target_oabi_flock64 *target_fl;
6178 int l_type;
6180 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6181 return -TARGET_EFAULT;
6184 __get_user(l_type, &target_fl->l_type);
6185 l_type = target_to_host_flock(l_type);
6186 if (l_type < 0) {
6187 return l_type;
6189 fl->l_type = l_type;
6190 __get_user(fl->l_whence, &target_fl->l_whence);
6191 __get_user(fl->l_start, &target_fl->l_start);
6192 __get_user(fl->l_len, &target_fl->l_len);
6193 __get_user(fl->l_pid, &target_fl->l_pid);
6194 unlock_user_struct(target_fl, target_flock_addr, 0);
6195 return 0;
6198 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6199 const struct flock64 *fl)
6201 struct target_oabi_flock64 *target_fl;
6202 short l_type;
6204 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6205 return -TARGET_EFAULT;
6208 l_type = host_to_target_flock(fl->l_type);
6209 __put_user(l_type, &target_fl->l_type);
6210 __put_user(fl->l_whence, &target_fl->l_whence);
6211 __put_user(fl->l_start, &target_fl->l_start);
6212 __put_user(fl->l_len, &target_fl->l_len);
6213 __put_user(fl->l_pid, &target_fl->l_pid);
6214 unlock_user_struct(target_fl, target_flock_addr, 1);
6215 return 0;
6217 #endif
6219 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6220 abi_ulong target_flock_addr)
6222 struct target_flock64 *target_fl;
6223 int l_type;
6225 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6226 return -TARGET_EFAULT;
6229 __get_user(l_type, &target_fl->l_type);
6230 l_type = target_to_host_flock(l_type);
6231 if (l_type < 0) {
6232 return l_type;
6234 fl->l_type = l_type;
6235 __get_user(fl->l_whence, &target_fl->l_whence);
6236 __get_user(fl->l_start, &target_fl->l_start);
6237 __get_user(fl->l_len, &target_fl->l_len);
6238 __get_user(fl->l_pid, &target_fl->l_pid);
6239 unlock_user_struct(target_fl, target_flock_addr, 0);
6240 return 0;
6243 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6244 const struct flock64 *fl)
6246 struct target_flock64 *target_fl;
6247 short l_type;
6249 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6250 return -TARGET_EFAULT;
6253 l_type = host_to_target_flock(fl->l_type);
6254 __put_user(l_type, &target_fl->l_type);
6255 __put_user(fl->l_whence, &target_fl->l_whence);
6256 __put_user(fl->l_start, &target_fl->l_start);
6257 __put_user(fl->l_len, &target_fl->l_len);
6258 __put_user(fl->l_pid, &target_fl->l_pid);
6259 unlock_user_struct(target_fl, target_flock_addr, 1);
6260 return 0;
6263 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6265 struct flock64 fl64;
6266 #ifdef F_GETOWN_EX
6267 struct f_owner_ex fox;
6268 struct target_f_owner_ex *target_fox;
6269 #endif
6270 abi_long ret;
6271 int host_cmd = target_to_host_fcntl_cmd(cmd);
6273 if (host_cmd == -TARGET_EINVAL)
6274 return host_cmd;
6276 switch(cmd) {
6277 case TARGET_F_GETLK:
6278 ret = copy_from_user_flock(&fl64, arg);
6279 if (ret) {
6280 return ret;
6282 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6283 if (ret == 0) {
6284 ret = copy_to_user_flock(arg, &fl64);
6286 break;
6288 case TARGET_F_SETLK:
6289 case TARGET_F_SETLKW:
6290 ret = copy_from_user_flock(&fl64, arg);
6291 if (ret) {
6292 return ret;
6294 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6295 break;
6297 case TARGET_F_GETLK64:
6298 ret = copy_from_user_flock64(&fl64, arg);
6299 if (ret) {
6300 return ret;
6302 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6303 if (ret == 0) {
6304 ret = copy_to_user_flock64(arg, &fl64);
6306 break;
6307 case TARGET_F_SETLK64:
6308 case TARGET_F_SETLKW64:
6309 ret = copy_from_user_flock64(&fl64, arg);
6310 if (ret) {
6311 return ret;
6313 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6314 break;
6316 case TARGET_F_GETFL:
6317 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6318 if (ret >= 0) {
6319 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6321 break;
6323 case TARGET_F_SETFL:
6324 ret = get_errno(safe_fcntl(fd, host_cmd,
6325 target_to_host_bitmask(arg,
6326 fcntl_flags_tbl)));
6327 break;
6329 #ifdef F_GETOWN_EX
6330 case TARGET_F_GETOWN_EX:
6331 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6332 if (ret >= 0) {
6333 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6334 return -TARGET_EFAULT;
6335 target_fox->type = tswap32(fox.type);
6336 target_fox->pid = tswap32(fox.pid);
6337 unlock_user_struct(target_fox, arg, 1);
6339 break;
6340 #endif
6342 #ifdef F_SETOWN_EX
6343 case TARGET_F_SETOWN_EX:
6344 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6345 return -TARGET_EFAULT;
6346 fox.type = tswap32(target_fox->type);
6347 fox.pid = tswap32(target_fox->pid);
6348 unlock_user_struct(target_fox, arg, 0);
6349 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6350 break;
6351 #endif
6353 case TARGET_F_SETOWN:
6354 case TARGET_F_GETOWN:
6355 case TARGET_F_SETSIG:
6356 case TARGET_F_GETSIG:
6357 case TARGET_F_SETLEASE:
6358 case TARGET_F_GETLEASE:
6359 case TARGET_F_SETPIPE_SZ:
6360 case TARGET_F_GETPIPE_SZ:
6361 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6362 break;
6364 default:
6365 ret = get_errno(safe_fcntl(fd, cmd, arg));
6366 break;
6368 return ret;
6371 #ifdef USE_UID16
6373 static inline int high2lowuid(int uid)
6375 if (uid > 65535)
6376 return 65534;
6377 else
6378 return uid;
6381 static inline int high2lowgid(int gid)
6383 if (gid > 65535)
6384 return 65534;
6385 else
6386 return gid;
6389 static inline int low2highuid(int uid)
6391 if ((int16_t)uid == -1)
6392 return -1;
6393 else
6394 return uid;
6397 static inline int low2highgid(int gid)
6399 if ((int16_t)gid == -1)
6400 return -1;
6401 else
6402 return gid;
6404 static inline int tswapid(int id)
6406 return tswap16(id);
6409 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6411 #else /* !USE_UID16 */
6412 static inline int high2lowuid(int uid)
6414 return uid;
6416 static inline int high2lowgid(int gid)
6418 return gid;
6420 static inline int low2highuid(int uid)
6422 return uid;
6424 static inline int low2highgid(int gid)
6426 return gid;
6428 static inline int tswapid(int id)
6430 return tswap32(id);
6433 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6435 #endif /* USE_UID16 */
6437 /* We must do direct syscalls for setting UID/GID, because we want to
6438 * implement the Linux system call semantics of "change only for this thread",
6439 * not the libc/POSIX semantics of "change for all threads in process".
6440 * (See http://ewontfix.com/17/ for more details.)
6441 * We use the 32-bit version of the syscalls if present; if it is not
6442 * then either the host architecture supports 32-bit UIDs natively with
6443 * the standard syscall, or the 16-bit UID is the best we can do.
6445 #ifdef __NR_setuid32
6446 #define __NR_sys_setuid __NR_setuid32
6447 #else
6448 #define __NR_sys_setuid __NR_setuid
6449 #endif
6450 #ifdef __NR_setgid32
6451 #define __NR_sys_setgid __NR_setgid32
6452 #else
6453 #define __NR_sys_setgid __NR_setgid
6454 #endif
6455 #ifdef __NR_setresuid32
6456 #define __NR_sys_setresuid __NR_setresuid32
6457 #else
6458 #define __NR_sys_setresuid __NR_setresuid
6459 #endif
6460 #ifdef __NR_setresgid32
6461 #define __NR_sys_setresgid __NR_setresgid32
6462 #else
6463 #define __NR_sys_setresgid __NR_setresgid
6464 #endif
6466 _syscall1(int, sys_setuid, uid_t, uid)
6467 _syscall1(int, sys_setgid, gid_t, gid)
6468 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6469 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6471 void syscall_init(void)
6473 IOCTLEntry *ie;
6474 const argtype *arg_type;
6475 int size;
6476 int i;
6478 thunk_init(STRUCT_MAX);
6480 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6481 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6482 #include "syscall_types.h"
6483 #undef STRUCT
6484 #undef STRUCT_SPECIAL
6486 /* Build target_to_host_errno_table[] table from
6487 * host_to_target_errno_table[]. */
6488 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6489 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6492 /* we patch the ioctl size if necessary. We rely on the fact that
6493 no ioctl has all the bits at '1' in the size field */
6494 ie = ioctl_entries;
6495 while (ie->target_cmd != 0) {
6496 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6497 TARGET_IOC_SIZEMASK) {
6498 arg_type = ie->arg_type;
6499 if (arg_type[0] != TYPE_PTR) {
6500 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6501 ie->target_cmd);
6502 exit(1);
6504 arg_type++;
6505 size = thunk_type_size(arg_type, 0);
6506 ie->target_cmd = (ie->target_cmd &
6507 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6508 (size << TARGET_IOC_SIZESHIFT);
6511 /* automatic consistency check if same arch */
6512 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6513 (defined(__x86_64__) && defined(TARGET_X86_64))
6514 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6515 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6516 ie->name, ie->target_cmd, ie->host_cmd);
6518 #endif
6519 ie++;
6523 #if TARGET_ABI_BITS == 32
6524 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6526 #ifdef TARGET_WORDS_BIGENDIAN
6527 return ((uint64_t)word0 << 32) | word1;
6528 #else
6529 return ((uint64_t)word1 << 32) | word0;
6530 #endif
6532 #else /* TARGET_ABI_BITS == 32 */
6533 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6535 return word0;
6537 #endif /* TARGET_ABI_BITS != 32 */
6539 #ifdef TARGET_NR_truncate64
6540 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6541 abi_long arg2,
6542 abi_long arg3,
6543 abi_long arg4)
6545 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6546 arg2 = arg3;
6547 arg3 = arg4;
6549 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6551 #endif
6553 #ifdef TARGET_NR_ftruncate64
6554 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6555 abi_long arg2,
6556 abi_long arg3,
6557 abi_long arg4)
6559 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6560 arg2 = arg3;
6561 arg3 = arg4;
6563 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6565 #endif
6567 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6568 abi_ulong target_addr)
6570 struct target_itimerspec *target_itspec;
6572 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6573 return -TARGET_EFAULT;
6576 host_itspec->it_interval.tv_sec =
6577 tswapal(target_itspec->it_interval.tv_sec);
6578 host_itspec->it_interval.tv_nsec =
6579 tswapal(target_itspec->it_interval.tv_nsec);
6580 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6581 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6583 unlock_user_struct(target_itspec, target_addr, 1);
6584 return 0;
6587 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6588 struct itimerspec *host_its)
6590 struct target_itimerspec *target_itspec;
6592 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6593 return -TARGET_EFAULT;
6596 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6597 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6599 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6600 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6602 unlock_user_struct(target_itspec, target_addr, 0);
6603 return 0;
6606 static inline abi_long target_to_host_timex(struct timex *host_tx,
6607 abi_long target_addr)
6609 struct target_timex *target_tx;
6611 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6612 return -TARGET_EFAULT;
6615 __get_user(host_tx->modes, &target_tx->modes);
6616 __get_user(host_tx->offset, &target_tx->offset);
6617 __get_user(host_tx->freq, &target_tx->freq);
6618 __get_user(host_tx->maxerror, &target_tx->maxerror);
6619 __get_user(host_tx->esterror, &target_tx->esterror);
6620 __get_user(host_tx->status, &target_tx->status);
6621 __get_user(host_tx->constant, &target_tx->constant);
6622 __get_user(host_tx->precision, &target_tx->precision);
6623 __get_user(host_tx->tolerance, &target_tx->tolerance);
6624 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6625 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6626 __get_user(host_tx->tick, &target_tx->tick);
6627 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6628 __get_user(host_tx->jitter, &target_tx->jitter);
6629 __get_user(host_tx->shift, &target_tx->shift);
6630 __get_user(host_tx->stabil, &target_tx->stabil);
6631 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6632 __get_user(host_tx->calcnt, &target_tx->calcnt);
6633 __get_user(host_tx->errcnt, &target_tx->errcnt);
6634 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6635 __get_user(host_tx->tai, &target_tx->tai);
6637 unlock_user_struct(target_tx, target_addr, 0);
6638 return 0;
6641 static inline abi_long host_to_target_timex(abi_long target_addr,
6642 struct timex *host_tx)
6644 struct target_timex *target_tx;
6646 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6647 return -TARGET_EFAULT;
6650 __put_user(host_tx->modes, &target_tx->modes);
6651 __put_user(host_tx->offset, &target_tx->offset);
6652 __put_user(host_tx->freq, &target_tx->freq);
6653 __put_user(host_tx->maxerror, &target_tx->maxerror);
6654 __put_user(host_tx->esterror, &target_tx->esterror);
6655 __put_user(host_tx->status, &target_tx->status);
6656 __put_user(host_tx->constant, &target_tx->constant);
6657 __put_user(host_tx->precision, &target_tx->precision);
6658 __put_user(host_tx->tolerance, &target_tx->tolerance);
6659 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6660 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6661 __put_user(host_tx->tick, &target_tx->tick);
6662 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6663 __put_user(host_tx->jitter, &target_tx->jitter);
6664 __put_user(host_tx->shift, &target_tx->shift);
6665 __put_user(host_tx->stabil, &target_tx->stabil);
6666 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6667 __put_user(host_tx->calcnt, &target_tx->calcnt);
6668 __put_user(host_tx->errcnt, &target_tx->errcnt);
6669 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6670 __put_user(host_tx->tai, &target_tx->tai);
6672 unlock_user_struct(target_tx, target_addr, 1);
6673 return 0;
6677 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6678 abi_ulong target_addr)
6680 struct target_sigevent *target_sevp;
6682 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6683 return -TARGET_EFAULT;
6686 /* This union is awkward on 64 bit systems because it has a 32 bit
6687 * integer and a pointer in it; we follow the conversion approach
6688 * used for handling sigval types in signal.c so the guest should get
6689 * the correct value back even if we did a 64 bit byteswap and it's
6690 * using the 32 bit integer.
6692 host_sevp->sigev_value.sival_ptr =
6693 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6694 host_sevp->sigev_signo =
6695 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6696 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6697 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6699 unlock_user_struct(target_sevp, target_addr, 1);
6700 return 0;
6703 #if defined(TARGET_NR_mlockall)
6704 static inline int target_to_host_mlockall_arg(int arg)
6706 int result = 0;
6708 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6709 result |= MCL_CURRENT;
6711 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6712 result |= MCL_FUTURE;
6714 return result;
6716 #endif
6718 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
6719 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
6720 defined(TARGET_NR_newfstatat))
6721 static inline abi_long host_to_target_stat64(void *cpu_env,
6722 abi_ulong target_addr,
6723 struct stat *host_st)
6725 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6726 if (((CPUARMState *)cpu_env)->eabi) {
6727 struct target_eabi_stat64 *target_st;
6729 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6730 return -TARGET_EFAULT;
6731 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6732 __put_user(host_st->st_dev, &target_st->st_dev);
6733 __put_user(host_st->st_ino, &target_st->st_ino);
6734 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6735 __put_user(host_st->st_ino, &target_st->__st_ino);
6736 #endif
6737 __put_user(host_st->st_mode, &target_st->st_mode);
6738 __put_user(host_st->st_nlink, &target_st->st_nlink);
6739 __put_user(host_st->st_uid, &target_st->st_uid);
6740 __put_user(host_st->st_gid, &target_st->st_gid);
6741 __put_user(host_st->st_rdev, &target_st->st_rdev);
6742 __put_user(host_st->st_size, &target_st->st_size);
6743 __put_user(host_st->st_blksize, &target_st->st_blksize);
6744 __put_user(host_st->st_blocks, &target_st->st_blocks);
6745 __put_user(host_st->st_atime, &target_st->target_st_atime);
6746 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6747 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6748 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6749 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6750 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6751 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6752 #endif
6753 unlock_user_struct(target_st, target_addr, 1);
6754 } else
6755 #endif
6757 #if defined(TARGET_HAS_STRUCT_STAT64)
6758 struct target_stat64 *target_st;
6759 #else
6760 struct target_stat *target_st;
6761 #endif
6763 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6764 return -TARGET_EFAULT;
6765 memset(target_st, 0, sizeof(*target_st));
6766 __put_user(host_st->st_dev, &target_st->st_dev);
6767 __put_user(host_st->st_ino, &target_st->st_ino);
6768 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6769 __put_user(host_st->st_ino, &target_st->__st_ino);
6770 #endif
6771 __put_user(host_st->st_mode, &target_st->st_mode);
6772 __put_user(host_st->st_nlink, &target_st->st_nlink);
6773 __put_user(host_st->st_uid, &target_st->st_uid);
6774 __put_user(host_st->st_gid, &target_st->st_gid);
6775 __put_user(host_st->st_rdev, &target_st->st_rdev);
6776 /* XXX: better use of kernel struct */
6777 __put_user(host_st->st_size, &target_st->st_size);
6778 __put_user(host_st->st_blksize, &target_st->st_blksize);
6779 __put_user(host_st->st_blocks, &target_st->st_blocks);
6780 __put_user(host_st->st_atime, &target_st->target_st_atime);
6781 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6782 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6783 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
6784 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
6785 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
6786 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
6787 #endif
6788 unlock_user_struct(target_st, target_addr, 1);
6791 return 0;
6793 #endif
6795 #if defined(TARGET_NR_statx) && defined(__NR_statx)
6796 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
6797 abi_ulong target_addr)
6799 struct target_statx *target_stx;
6801 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
6802 return -TARGET_EFAULT;
6804 memset(target_stx, 0, sizeof(*target_stx));
6806 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
6807 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
6808 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
6809 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
6810 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
6811 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
6812 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
6813 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
6814 __put_user(host_stx->stx_size, &target_stx->stx_size);
6815 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
6816 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
6817 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
6818 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
6819 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
6820 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
6821 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
6822 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
6823 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
6824 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
6825 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
6826 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
6827 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
6828 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
6830 unlock_user_struct(target_stx, target_addr, 1);
6832 return 0;
6834 #endif
6837 /* ??? Using host futex calls even when target atomic operations
6838 are not really atomic probably breaks things. However implementing
6839 futexes locally would make futexes shared between multiple processes
6840 tricky. However they're probably useless because guest atomic
6841 operations won't work either. */
6842 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6843 target_ulong uaddr2, int val3)
6845 struct timespec ts, *pts;
6846 int base_op;
6848 /* ??? We assume FUTEX_* constants are the same on both host
6849 and target. */
6850 #ifdef FUTEX_CMD_MASK
6851 base_op = op & FUTEX_CMD_MASK;
6852 #else
6853 base_op = op;
6854 #endif
6855 switch (base_op) {
6856 case FUTEX_WAIT:
6857 case FUTEX_WAIT_BITSET:
6858 if (timeout) {
6859 pts = &ts;
6860 target_to_host_timespec(pts, timeout);
6861 } else {
6862 pts = NULL;
6864 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6865 pts, NULL, val3));
6866 case FUTEX_WAKE:
6867 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6868 case FUTEX_FD:
6869 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6870 case FUTEX_REQUEUE:
6871 case FUTEX_CMP_REQUEUE:
6872 case FUTEX_WAKE_OP:
6873 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6874 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6875 But the prototype takes a `struct timespec *'; insert casts
6876 to satisfy the compiler. We do not need to tswap TIMEOUT
6877 since it's not compared to guest memory. */
6878 pts = (struct timespec *)(uintptr_t) timeout;
6879 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6880 g2h(uaddr2),
6881 (base_op == FUTEX_CMP_REQUEUE
6882 ? tswap32(val3)
6883 : val3)));
6884 default:
6885 return -TARGET_ENOSYS;
6888 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6889 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6890 abi_long handle, abi_long mount_id,
6891 abi_long flags)
6893 struct file_handle *target_fh;
6894 struct file_handle *fh;
6895 int mid = 0;
6896 abi_long ret;
6897 char *name;
6898 unsigned int size, total_size;
6900 if (get_user_s32(size, handle)) {
6901 return -TARGET_EFAULT;
6904 name = lock_user_string(pathname);
6905 if (!name) {
6906 return -TARGET_EFAULT;
6909 total_size = sizeof(struct file_handle) + size;
6910 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6911 if (!target_fh) {
6912 unlock_user(name, pathname, 0);
6913 return -TARGET_EFAULT;
6916 fh = g_malloc0(total_size);
6917 fh->handle_bytes = size;
6919 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6920 unlock_user(name, pathname, 0);
6922 /* man name_to_handle_at(2):
6923 * Other than the use of the handle_bytes field, the caller should treat
6924 * the file_handle structure as an opaque data type
6927 memcpy(target_fh, fh, total_size);
6928 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6929 target_fh->handle_type = tswap32(fh->handle_type);
6930 g_free(fh);
6931 unlock_user(target_fh, handle, total_size);
6933 if (put_user_s32(mid, mount_id)) {
6934 return -TARGET_EFAULT;
6937 return ret;
6940 #endif
6942 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6943 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6944 abi_long flags)
6946 struct file_handle *target_fh;
6947 struct file_handle *fh;
6948 unsigned int size, total_size;
6949 abi_long ret;
6951 if (get_user_s32(size, handle)) {
6952 return -TARGET_EFAULT;
6955 total_size = sizeof(struct file_handle) + size;
6956 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6957 if (!target_fh) {
6958 return -TARGET_EFAULT;
6961 fh = g_memdup(target_fh, total_size);
6962 fh->handle_bytes = size;
6963 fh->handle_type = tswap32(target_fh->handle_type);
6965 ret = get_errno(open_by_handle_at(mount_fd, fh,
6966 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6968 g_free(fh);
6970 unlock_user(target_fh, handle, total_size);
6972 return ret;
6974 #endif
6976 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6978 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6980 int host_flags;
6981 target_sigset_t *target_mask;
6982 sigset_t host_mask;
6983 abi_long ret;
6985 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6986 return -TARGET_EINVAL;
6988 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
6989 return -TARGET_EFAULT;
6992 target_to_host_sigset(&host_mask, target_mask);
6994 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
6996 ret = get_errno(signalfd(fd, &host_mask, host_flags));
6997 if (ret >= 0) {
6998 fd_trans_register(ret, &target_signalfd_trans);
7001 unlock_user_struct(target_mask, mask, 0);
7003 return ret;
7005 #endif
7007 /* Map host to target signal numbers for the wait family of syscalls.
7008 Assume all other status bits are the same. */
7009 int host_to_target_waitstatus(int status)
7011 if (WIFSIGNALED(status)) {
7012 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7014 if (WIFSTOPPED(status)) {
7015 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7016 | (status & 0xff);
7018 return status;
7021 static int open_self_cmdline(void *cpu_env, int fd)
7023 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7024 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7025 int i;
7027 for (i = 0; i < bprm->argc; i++) {
7028 size_t len = strlen(bprm->argv[i]) + 1;
7030 if (write(fd, bprm->argv[i], len) != len) {
7031 return -1;
7035 return 0;
7038 static int open_self_maps(void *cpu_env, int fd)
7040 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7041 TaskState *ts = cpu->opaque;
7042 FILE *fp;
7043 char *line = NULL;
7044 size_t len = 0;
7045 ssize_t read;
7047 fp = fopen("/proc/self/maps", "r");
7048 if (fp == NULL) {
7049 return -1;
7052 while ((read = getline(&line, &len, fp)) != -1) {
7053 int fields, dev_maj, dev_min, inode;
7054 uint64_t min, max, offset;
7055 char flag_r, flag_w, flag_x, flag_p;
7056 char path[512] = "";
7057 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7058 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7059 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7061 if ((fields < 10) || (fields > 11)) {
7062 continue;
7064 if (h2g_valid(min)) {
7065 int flags = page_get_flags(h2g(min));
7066 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX) + 1;
7067 if (page_check_range(h2g(min), max - min, flags) == -1) {
7068 continue;
7070 if (h2g(min) == ts->info->stack_limit) {
7071 pstrcpy(path, sizeof(path), " [stack]");
7073 dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7074 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7075 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7076 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7077 path[0] ? " " : "", path);
7081 free(line);
7082 fclose(fp);
7084 return 0;
7087 static int open_self_stat(void *cpu_env, int fd)
7089 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7090 TaskState *ts = cpu->opaque;
7091 abi_ulong start_stack = ts->info->start_stack;
7092 int i;
7094 for (i = 0; i < 44; i++) {
7095 char buf[128];
7096 int len;
7097 uint64_t val = 0;
7099 if (i == 0) {
7100 /* pid */
7101 val = getpid();
7102 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7103 } else if (i == 1) {
7104 /* app name */
7105 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7106 } else if (i == 27) {
7107 /* stack bottom */
7108 val = start_stack;
7109 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7110 } else {
7111 /* for the rest, there is MasterCard */
7112 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7115 len = strlen(buf);
7116 if (write(fd, buf, len) != len) {
7117 return -1;
7121 return 0;
7124 static int open_self_auxv(void *cpu_env, int fd)
7126 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7127 TaskState *ts = cpu->opaque;
7128 abi_ulong auxv = ts->info->saved_auxv;
7129 abi_ulong len = ts->info->auxv_len;
7130 char *ptr;
7133 * Auxiliary vector is stored in target process stack.
7134 * read in whole auxv vector and copy it to file
7136 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7137 if (ptr != NULL) {
7138 while (len > 0) {
7139 ssize_t r;
7140 r = write(fd, ptr, len);
7141 if (r <= 0) {
7142 break;
7144 len -= r;
7145 ptr += r;
7147 lseek(fd, 0, SEEK_SET);
7148 unlock_user(ptr, auxv, len);
7151 return 0;
7154 static int is_proc_myself(const char *filename, const char *entry)
7156 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7157 filename += strlen("/proc/");
7158 if (!strncmp(filename, "self/", strlen("self/"))) {
7159 filename += strlen("self/");
7160 } else if (*filename >= '1' && *filename <= '9') {
7161 char myself[80];
7162 snprintf(myself, sizeof(myself), "%d/", getpid());
7163 if (!strncmp(filename, myself, strlen(myself))) {
7164 filename += strlen(myself);
7165 } else {
7166 return 0;
7168 } else {
7169 return 0;
7171 if (!strcmp(filename, entry)) {
7172 return 1;
7175 return 0;
7178 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7179 defined(TARGET_SPARC) || defined(TARGET_M68K)
7180 static int is_proc(const char *filename, const char *entry)
7182 return strcmp(filename, entry) == 0;
7184 #endif
7186 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7187 static int open_net_route(void *cpu_env, int fd)
7189 FILE *fp;
7190 char *line = NULL;
7191 size_t len = 0;
7192 ssize_t read;
7194 fp = fopen("/proc/net/route", "r");
7195 if (fp == NULL) {
7196 return -1;
7199 /* read header */
7201 read = getline(&line, &len, fp);
7202 dprintf(fd, "%s", line);
7204 /* read routes */
7206 while ((read = getline(&line, &len, fp)) != -1) {
7207 char iface[16];
7208 uint32_t dest, gw, mask;
7209 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7210 int fields;
7212 fields = sscanf(line,
7213 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7214 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7215 &mask, &mtu, &window, &irtt);
7216 if (fields != 11) {
7217 continue;
7219 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7220 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7221 metric, tswap32(mask), mtu, window, irtt);
7224 free(line);
7225 fclose(fp);
7227 return 0;
7229 #endif
7231 #if defined(TARGET_SPARC)
7232 static int open_cpuinfo(void *cpu_env, int fd)
7234 dprintf(fd, "type\t\t: sun4u\n");
7235 return 0;
7237 #endif
7239 #if defined(TARGET_M68K)
7240 static int open_hardware(void *cpu_env, int fd)
7242 dprintf(fd, "Model:\t\tqemu-m68k\n");
7243 return 0;
7245 #endif
7247 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7249 struct fake_open {
7250 const char *filename;
7251 int (*fill)(void *cpu_env, int fd);
7252 int (*cmp)(const char *s1, const char *s2);
7254 const struct fake_open *fake_open;
7255 static const struct fake_open fakes[] = {
7256 { "maps", open_self_maps, is_proc_myself },
7257 { "stat", open_self_stat, is_proc_myself },
7258 { "auxv", open_self_auxv, is_proc_myself },
7259 { "cmdline", open_self_cmdline, is_proc_myself },
7260 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7261 { "/proc/net/route", open_net_route, is_proc },
7262 #endif
7263 #if defined(TARGET_SPARC)
7264 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7265 #endif
7266 #if defined(TARGET_M68K)
7267 { "/proc/hardware", open_hardware, is_proc },
7268 #endif
7269 { NULL, NULL, NULL }
7272 if (is_proc_myself(pathname, "exe")) {
7273 int execfd = qemu_getauxval(AT_EXECFD);
7274 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7277 for (fake_open = fakes; fake_open->filename; fake_open++) {
7278 if (fake_open->cmp(pathname, fake_open->filename)) {
7279 break;
7283 if (fake_open->filename) {
7284 const char *tmpdir;
7285 char filename[PATH_MAX];
7286 int fd, r;
7288 /* create temporary file to map stat to */
7289 tmpdir = getenv("TMPDIR");
7290 if (!tmpdir)
7291 tmpdir = "/tmp";
7292 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7293 fd = mkstemp(filename);
7294 if (fd < 0) {
7295 return fd;
7297 unlink(filename);
7299 if ((r = fake_open->fill(cpu_env, fd))) {
7300 int e = errno;
7301 close(fd);
7302 errno = e;
7303 return r;
7305 lseek(fd, 0, SEEK_SET);
7307 return fd;
7310 return safe_openat(dirfd, path(pathname), flags, mode);
7313 #define TIMER_MAGIC 0x0caf0000
7314 #define TIMER_MAGIC_MASK 0xffff0000
7316 /* Convert QEMU provided timer ID back to internal 16bit index format */
7317 static target_timer_t get_timer_id(abi_long arg)
7319 target_timer_t timerid = arg;
7321 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7322 return -TARGET_EINVAL;
7325 timerid &= 0xffff;
7327 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7328 return -TARGET_EINVAL;
7331 return timerid;
7334 static int target_to_host_cpu_mask(unsigned long *host_mask,
7335 size_t host_size,
7336 abi_ulong target_addr,
7337 size_t target_size)
7339 unsigned target_bits = sizeof(abi_ulong) * 8;
7340 unsigned host_bits = sizeof(*host_mask) * 8;
7341 abi_ulong *target_mask;
7342 unsigned i, j;
7344 assert(host_size >= target_size);
7346 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7347 if (!target_mask) {
7348 return -TARGET_EFAULT;
7350 memset(host_mask, 0, host_size);
7352 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7353 unsigned bit = i * target_bits;
7354 abi_ulong val;
7356 __get_user(val, &target_mask[i]);
7357 for (j = 0; j < target_bits; j++, bit++) {
7358 if (val & (1UL << j)) {
7359 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7364 unlock_user(target_mask, target_addr, 0);
7365 return 0;
7368 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7369 size_t host_size,
7370 abi_ulong target_addr,
7371 size_t target_size)
7373 unsigned target_bits = sizeof(abi_ulong) * 8;
7374 unsigned host_bits = sizeof(*host_mask) * 8;
7375 abi_ulong *target_mask;
7376 unsigned i, j;
7378 assert(host_size >= target_size);
7380 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7381 if (!target_mask) {
7382 return -TARGET_EFAULT;
7385 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7386 unsigned bit = i * target_bits;
7387 abi_ulong val = 0;
7389 for (j = 0; j < target_bits; j++, bit++) {
7390 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7391 val |= 1UL << j;
7394 __put_user(val, &target_mask[i]);
7397 unlock_user(target_mask, target_addr, target_size);
7398 return 0;
7401 /* This is an internal helper for do_syscall so that it is easier
7402 * to have a single return point, so that actions, such as logging
7403 * of syscall results, can be performed.
7404 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7406 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7407 abi_long arg2, abi_long arg3, abi_long arg4,
7408 abi_long arg5, abi_long arg6, abi_long arg7,
7409 abi_long arg8)
7411 CPUState *cpu = env_cpu(cpu_env);
7412 abi_long ret;
7413 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7414 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7415 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7416 || defined(TARGET_NR_statx)
7417 struct stat st;
7418 #endif
7419 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7420 || defined(TARGET_NR_fstatfs)
7421 struct statfs stfs;
7422 #endif
7423 void *p;
7425 switch(num) {
7426 case TARGET_NR_exit:
7427 /* In old applications this may be used to implement _exit(2).
7428 However in threaded applictions it is used for thread termination,
7429 and _exit_group is used for application termination.
7430 Do thread termination if we have more then one thread. */
7432 if (block_signals()) {
7433 return -TARGET_ERESTARTSYS;
7436 cpu_list_lock();
7438 if (CPU_NEXT(first_cpu)) {
7439 TaskState *ts;
7441 /* Remove the CPU from the list. */
7442 QTAILQ_REMOVE_RCU(&cpus, cpu, node);
7444 cpu_list_unlock();
7446 ts = cpu->opaque;
7447 if (ts->child_tidptr) {
7448 put_user_u32(0, ts->child_tidptr);
7449 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7450 NULL, NULL, 0);
7452 thread_cpu = NULL;
7453 object_unref(OBJECT(cpu));
7454 g_free(ts);
7455 rcu_unregister_thread();
7456 pthread_exit(NULL);
7459 cpu_list_unlock();
7460 preexit_cleanup(cpu_env, arg1);
7461 _exit(arg1);
7462 return 0; /* avoid warning */
7463 case TARGET_NR_read:
7464 if (arg2 == 0 && arg3 == 0) {
7465 return get_errno(safe_read(arg1, 0, 0));
7466 } else {
7467 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7468 return -TARGET_EFAULT;
7469 ret = get_errno(safe_read(arg1, p, arg3));
7470 if (ret >= 0 &&
7471 fd_trans_host_to_target_data(arg1)) {
7472 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7474 unlock_user(p, arg2, ret);
7476 return ret;
7477 case TARGET_NR_write:
7478 if (arg2 == 0 && arg3 == 0) {
7479 return get_errno(safe_write(arg1, 0, 0));
7481 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7482 return -TARGET_EFAULT;
7483 if (fd_trans_target_to_host_data(arg1)) {
7484 void *copy = g_malloc(arg3);
7485 memcpy(copy, p, arg3);
7486 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7487 if (ret >= 0) {
7488 ret = get_errno(safe_write(arg1, copy, ret));
7490 g_free(copy);
7491 } else {
7492 ret = get_errno(safe_write(arg1, p, arg3));
7494 unlock_user(p, arg2, 0);
7495 return ret;
7497 #ifdef TARGET_NR_open
7498 case TARGET_NR_open:
7499 if (!(p = lock_user_string(arg1)))
7500 return -TARGET_EFAULT;
7501 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7502 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7503 arg3));
7504 fd_trans_unregister(ret);
7505 unlock_user(p, arg1, 0);
7506 return ret;
7507 #endif
7508 case TARGET_NR_openat:
7509 if (!(p = lock_user_string(arg2)))
7510 return -TARGET_EFAULT;
7511 ret = get_errno(do_openat(cpu_env, arg1, p,
7512 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7513 arg4));
7514 fd_trans_unregister(ret);
7515 unlock_user(p, arg2, 0);
7516 return ret;
7517 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7518 case TARGET_NR_name_to_handle_at:
7519 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7520 return ret;
7521 #endif
7522 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7523 case TARGET_NR_open_by_handle_at:
7524 ret = do_open_by_handle_at(arg1, arg2, arg3);
7525 fd_trans_unregister(ret);
7526 return ret;
7527 #endif
7528 case TARGET_NR_close:
7529 fd_trans_unregister(arg1);
7530 return get_errno(close(arg1));
7532 case TARGET_NR_brk:
7533 return do_brk(arg1);
7534 #ifdef TARGET_NR_fork
7535 case TARGET_NR_fork:
7536 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7537 #endif
7538 #ifdef TARGET_NR_waitpid
7539 case TARGET_NR_waitpid:
7541 int status;
7542 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7543 if (!is_error(ret) && arg2 && ret
7544 && put_user_s32(host_to_target_waitstatus(status), arg2))
7545 return -TARGET_EFAULT;
7547 return ret;
7548 #endif
7549 #ifdef TARGET_NR_waitid
7550 case TARGET_NR_waitid:
7552 siginfo_t info;
7553 info.si_pid = 0;
7554 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7555 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7556 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7557 return -TARGET_EFAULT;
7558 host_to_target_siginfo(p, &info);
7559 unlock_user(p, arg3, sizeof(target_siginfo_t));
7562 return ret;
7563 #endif
7564 #ifdef TARGET_NR_creat /* not on alpha */
7565 case TARGET_NR_creat:
7566 if (!(p = lock_user_string(arg1)))
7567 return -TARGET_EFAULT;
7568 ret = get_errno(creat(p, arg2));
7569 fd_trans_unregister(ret);
7570 unlock_user(p, arg1, 0);
7571 return ret;
7572 #endif
7573 #ifdef TARGET_NR_link
7574 case TARGET_NR_link:
7576 void * p2;
7577 p = lock_user_string(arg1);
7578 p2 = lock_user_string(arg2);
7579 if (!p || !p2)
7580 ret = -TARGET_EFAULT;
7581 else
7582 ret = get_errno(link(p, p2));
7583 unlock_user(p2, arg2, 0);
7584 unlock_user(p, arg1, 0);
7586 return ret;
7587 #endif
7588 #if defined(TARGET_NR_linkat)
7589 case TARGET_NR_linkat:
7591 void * p2 = NULL;
7592 if (!arg2 || !arg4)
7593 return -TARGET_EFAULT;
7594 p = lock_user_string(arg2);
7595 p2 = lock_user_string(arg4);
7596 if (!p || !p2)
7597 ret = -TARGET_EFAULT;
7598 else
7599 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7600 unlock_user(p, arg2, 0);
7601 unlock_user(p2, arg4, 0);
7603 return ret;
7604 #endif
7605 #ifdef TARGET_NR_unlink
7606 case TARGET_NR_unlink:
7607 if (!(p = lock_user_string(arg1)))
7608 return -TARGET_EFAULT;
7609 ret = get_errno(unlink(p));
7610 unlock_user(p, arg1, 0);
7611 return ret;
7612 #endif
7613 #if defined(TARGET_NR_unlinkat)
7614 case TARGET_NR_unlinkat:
7615 if (!(p = lock_user_string(arg2)))
7616 return -TARGET_EFAULT;
7617 ret = get_errno(unlinkat(arg1, p, arg3));
7618 unlock_user(p, arg2, 0);
7619 return ret;
7620 #endif
7621 case TARGET_NR_execve:
7623 char **argp, **envp;
7624 int argc, envc;
7625 abi_ulong gp;
7626 abi_ulong guest_argp;
7627 abi_ulong guest_envp;
7628 abi_ulong addr;
7629 char **q;
7630 int total_size = 0;
7632 argc = 0;
7633 guest_argp = arg2;
7634 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7635 if (get_user_ual(addr, gp))
7636 return -TARGET_EFAULT;
7637 if (!addr)
7638 break;
7639 argc++;
7641 envc = 0;
7642 guest_envp = arg3;
7643 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7644 if (get_user_ual(addr, gp))
7645 return -TARGET_EFAULT;
7646 if (!addr)
7647 break;
7648 envc++;
7651 argp = g_new0(char *, argc + 1);
7652 envp = g_new0(char *, envc + 1);
7654 for (gp = guest_argp, q = argp; gp;
7655 gp += sizeof(abi_ulong), q++) {
7656 if (get_user_ual(addr, gp))
7657 goto execve_efault;
7658 if (!addr)
7659 break;
7660 if (!(*q = lock_user_string(addr)))
7661 goto execve_efault;
7662 total_size += strlen(*q) + 1;
7664 *q = NULL;
7666 for (gp = guest_envp, q = envp; gp;
7667 gp += sizeof(abi_ulong), q++) {
7668 if (get_user_ual(addr, gp))
7669 goto execve_efault;
7670 if (!addr)
7671 break;
7672 if (!(*q = lock_user_string(addr)))
7673 goto execve_efault;
7674 total_size += strlen(*q) + 1;
7676 *q = NULL;
7678 if (!(p = lock_user_string(arg1)))
7679 goto execve_efault;
7680 /* Although execve() is not an interruptible syscall it is
7681 * a special case where we must use the safe_syscall wrapper:
7682 * if we allow a signal to happen before we make the host
7683 * syscall then we will 'lose' it, because at the point of
7684 * execve the process leaves QEMU's control. So we use the
7685 * safe syscall wrapper to ensure that we either take the
7686 * signal as a guest signal, or else it does not happen
7687 * before the execve completes and makes it the other
7688 * program's problem.
7690 ret = get_errno(safe_execve(p, argp, envp));
7691 unlock_user(p, arg1, 0);
7693 goto execve_end;
7695 execve_efault:
7696 ret = -TARGET_EFAULT;
7698 execve_end:
7699 for (gp = guest_argp, q = argp; *q;
7700 gp += sizeof(abi_ulong), q++) {
7701 if (get_user_ual(addr, gp)
7702 || !addr)
7703 break;
7704 unlock_user(*q, addr, 0);
7706 for (gp = guest_envp, q = envp; *q;
7707 gp += sizeof(abi_ulong), q++) {
7708 if (get_user_ual(addr, gp)
7709 || !addr)
7710 break;
7711 unlock_user(*q, addr, 0);
7714 g_free(argp);
7715 g_free(envp);
7717 return ret;
7718 case TARGET_NR_chdir:
7719 if (!(p = lock_user_string(arg1)))
7720 return -TARGET_EFAULT;
7721 ret = get_errno(chdir(p));
7722 unlock_user(p, arg1, 0);
7723 return ret;
7724 #ifdef TARGET_NR_time
7725 case TARGET_NR_time:
7727 time_t host_time;
7728 ret = get_errno(time(&host_time));
7729 if (!is_error(ret)
7730 && arg1
7731 && put_user_sal(host_time, arg1))
7732 return -TARGET_EFAULT;
7734 return ret;
7735 #endif
7736 #ifdef TARGET_NR_mknod
7737 case TARGET_NR_mknod:
7738 if (!(p = lock_user_string(arg1)))
7739 return -TARGET_EFAULT;
7740 ret = get_errno(mknod(p, arg2, arg3));
7741 unlock_user(p, arg1, 0);
7742 return ret;
7743 #endif
7744 #if defined(TARGET_NR_mknodat)
7745 case TARGET_NR_mknodat:
7746 if (!(p = lock_user_string(arg2)))
7747 return -TARGET_EFAULT;
7748 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7749 unlock_user(p, arg2, 0);
7750 return ret;
7751 #endif
7752 #ifdef TARGET_NR_chmod
7753 case TARGET_NR_chmod:
7754 if (!(p = lock_user_string(arg1)))
7755 return -TARGET_EFAULT;
7756 ret = get_errno(chmod(p, arg2));
7757 unlock_user(p, arg1, 0);
7758 return ret;
7759 #endif
7760 #ifdef TARGET_NR_lseek
7761 case TARGET_NR_lseek:
7762 return get_errno(lseek(arg1, arg2, arg3));
7763 #endif
7764 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7765 /* Alpha specific */
7766 case TARGET_NR_getxpid:
7767 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7768 return get_errno(getpid());
7769 #endif
7770 #ifdef TARGET_NR_getpid
7771 case TARGET_NR_getpid:
7772 return get_errno(getpid());
7773 #endif
7774 case TARGET_NR_mount:
7776 /* need to look at the data field */
7777 void *p2, *p3;
7779 if (arg1) {
7780 p = lock_user_string(arg1);
7781 if (!p) {
7782 return -TARGET_EFAULT;
7784 } else {
7785 p = NULL;
7788 p2 = lock_user_string(arg2);
7789 if (!p2) {
7790 if (arg1) {
7791 unlock_user(p, arg1, 0);
7793 return -TARGET_EFAULT;
7796 if (arg3) {
7797 p3 = lock_user_string(arg3);
7798 if (!p3) {
7799 if (arg1) {
7800 unlock_user(p, arg1, 0);
7802 unlock_user(p2, arg2, 0);
7803 return -TARGET_EFAULT;
7805 } else {
7806 p3 = NULL;
7809 /* FIXME - arg5 should be locked, but it isn't clear how to
7810 * do that since it's not guaranteed to be a NULL-terminated
7811 * string.
7813 if (!arg5) {
7814 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7815 } else {
7816 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7818 ret = get_errno(ret);
7820 if (arg1) {
7821 unlock_user(p, arg1, 0);
7823 unlock_user(p2, arg2, 0);
7824 if (arg3) {
7825 unlock_user(p3, arg3, 0);
7828 return ret;
7829 #ifdef TARGET_NR_umount
7830 case TARGET_NR_umount:
7831 if (!(p = lock_user_string(arg1)))
7832 return -TARGET_EFAULT;
7833 ret = get_errno(umount(p));
7834 unlock_user(p, arg1, 0);
7835 return ret;
7836 #endif
7837 #ifdef TARGET_NR_stime /* not on alpha */
7838 case TARGET_NR_stime:
7840 struct timespec ts;
7841 ts.tv_nsec = 0;
7842 if (get_user_sal(ts.tv_sec, arg1)) {
7843 return -TARGET_EFAULT;
7845 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
7847 #endif
7848 #ifdef TARGET_NR_alarm /* not on alpha */
7849 case TARGET_NR_alarm:
7850 return alarm(arg1);
7851 #endif
7852 #ifdef TARGET_NR_pause /* not on alpha */
7853 case TARGET_NR_pause:
7854 if (!block_signals()) {
7855 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7857 return -TARGET_EINTR;
7858 #endif
7859 #ifdef TARGET_NR_utime
7860 case TARGET_NR_utime:
7862 struct utimbuf tbuf, *host_tbuf;
7863 struct target_utimbuf *target_tbuf;
7864 if (arg2) {
7865 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7866 return -TARGET_EFAULT;
7867 tbuf.actime = tswapal(target_tbuf->actime);
7868 tbuf.modtime = tswapal(target_tbuf->modtime);
7869 unlock_user_struct(target_tbuf, arg2, 0);
7870 host_tbuf = &tbuf;
7871 } else {
7872 host_tbuf = NULL;
7874 if (!(p = lock_user_string(arg1)))
7875 return -TARGET_EFAULT;
7876 ret = get_errno(utime(p, host_tbuf));
7877 unlock_user(p, arg1, 0);
7879 return ret;
7880 #endif
7881 #ifdef TARGET_NR_utimes
7882 case TARGET_NR_utimes:
7884 struct timeval *tvp, tv[2];
7885 if (arg2) {
7886 if (copy_from_user_timeval(&tv[0], arg2)
7887 || copy_from_user_timeval(&tv[1],
7888 arg2 + sizeof(struct target_timeval)))
7889 return -TARGET_EFAULT;
7890 tvp = tv;
7891 } else {
7892 tvp = NULL;
7894 if (!(p = lock_user_string(arg1)))
7895 return -TARGET_EFAULT;
7896 ret = get_errno(utimes(p, tvp));
7897 unlock_user(p, arg1, 0);
7899 return ret;
7900 #endif
7901 #if defined(TARGET_NR_futimesat)
7902 case TARGET_NR_futimesat:
7904 struct timeval *tvp, tv[2];
7905 if (arg3) {
7906 if (copy_from_user_timeval(&tv[0], arg3)
7907 || copy_from_user_timeval(&tv[1],
7908 arg3 + sizeof(struct target_timeval)))
7909 return -TARGET_EFAULT;
7910 tvp = tv;
7911 } else {
7912 tvp = NULL;
7914 if (!(p = lock_user_string(arg2))) {
7915 return -TARGET_EFAULT;
7917 ret = get_errno(futimesat(arg1, path(p), tvp));
7918 unlock_user(p, arg2, 0);
7920 return ret;
7921 #endif
7922 #ifdef TARGET_NR_access
7923 case TARGET_NR_access:
7924 if (!(p = lock_user_string(arg1))) {
7925 return -TARGET_EFAULT;
7927 ret = get_errno(access(path(p), arg2));
7928 unlock_user(p, arg1, 0);
7929 return ret;
7930 #endif
7931 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7932 case TARGET_NR_faccessat:
7933 if (!(p = lock_user_string(arg2))) {
7934 return -TARGET_EFAULT;
7936 ret = get_errno(faccessat(arg1, p, arg3, 0));
7937 unlock_user(p, arg2, 0);
7938 return ret;
7939 #endif
7940 #ifdef TARGET_NR_nice /* not on alpha */
7941 case TARGET_NR_nice:
7942 return get_errno(nice(arg1));
7943 #endif
7944 case TARGET_NR_sync:
7945 sync();
7946 return 0;
7947 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
7948 case TARGET_NR_syncfs:
7949 return get_errno(syncfs(arg1));
7950 #endif
7951 case TARGET_NR_kill:
7952 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7953 #ifdef TARGET_NR_rename
7954 case TARGET_NR_rename:
7956 void *p2;
7957 p = lock_user_string(arg1);
7958 p2 = lock_user_string(arg2);
7959 if (!p || !p2)
7960 ret = -TARGET_EFAULT;
7961 else
7962 ret = get_errno(rename(p, p2));
7963 unlock_user(p2, arg2, 0);
7964 unlock_user(p, arg1, 0);
7966 return ret;
7967 #endif
7968 #if defined(TARGET_NR_renameat)
7969 case TARGET_NR_renameat:
7971 void *p2;
7972 p = lock_user_string(arg2);
7973 p2 = lock_user_string(arg4);
7974 if (!p || !p2)
7975 ret = -TARGET_EFAULT;
7976 else
7977 ret = get_errno(renameat(arg1, p, arg3, p2));
7978 unlock_user(p2, arg4, 0);
7979 unlock_user(p, arg2, 0);
7981 return ret;
7982 #endif
7983 #if defined(TARGET_NR_renameat2)
7984 case TARGET_NR_renameat2:
7986 void *p2;
7987 p = lock_user_string(arg2);
7988 p2 = lock_user_string(arg4);
7989 if (!p || !p2) {
7990 ret = -TARGET_EFAULT;
7991 } else {
7992 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
7994 unlock_user(p2, arg4, 0);
7995 unlock_user(p, arg2, 0);
7997 return ret;
7998 #endif
7999 #ifdef TARGET_NR_mkdir
8000 case TARGET_NR_mkdir:
8001 if (!(p = lock_user_string(arg1)))
8002 return -TARGET_EFAULT;
8003 ret = get_errno(mkdir(p, arg2));
8004 unlock_user(p, arg1, 0);
8005 return ret;
8006 #endif
8007 #if defined(TARGET_NR_mkdirat)
8008 case TARGET_NR_mkdirat:
8009 if (!(p = lock_user_string(arg2)))
8010 return -TARGET_EFAULT;
8011 ret = get_errno(mkdirat(arg1, p, arg3));
8012 unlock_user(p, arg2, 0);
8013 return ret;
8014 #endif
8015 #ifdef TARGET_NR_rmdir
8016 case TARGET_NR_rmdir:
8017 if (!(p = lock_user_string(arg1)))
8018 return -TARGET_EFAULT;
8019 ret = get_errno(rmdir(p));
8020 unlock_user(p, arg1, 0);
8021 return ret;
8022 #endif
8023 case TARGET_NR_dup:
8024 ret = get_errno(dup(arg1));
8025 if (ret >= 0) {
8026 fd_trans_dup(arg1, ret);
8028 return ret;
8029 #ifdef TARGET_NR_pipe
8030 case TARGET_NR_pipe:
8031 return do_pipe(cpu_env, arg1, 0, 0);
8032 #endif
8033 #ifdef TARGET_NR_pipe2
8034 case TARGET_NR_pipe2:
8035 return do_pipe(cpu_env, arg1,
8036 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8037 #endif
8038 case TARGET_NR_times:
8040 struct target_tms *tmsp;
8041 struct tms tms;
8042 ret = get_errno(times(&tms));
8043 if (arg1) {
8044 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8045 if (!tmsp)
8046 return -TARGET_EFAULT;
8047 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8048 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8049 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8050 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8052 if (!is_error(ret))
8053 ret = host_to_target_clock_t(ret);
8055 return ret;
8056 case TARGET_NR_acct:
8057 if (arg1 == 0) {
8058 ret = get_errno(acct(NULL));
8059 } else {
8060 if (!(p = lock_user_string(arg1))) {
8061 return -TARGET_EFAULT;
8063 ret = get_errno(acct(path(p)));
8064 unlock_user(p, arg1, 0);
8066 return ret;
8067 #ifdef TARGET_NR_umount2
8068 case TARGET_NR_umount2:
8069 if (!(p = lock_user_string(arg1)))
8070 return -TARGET_EFAULT;
8071 ret = get_errno(umount2(p, arg2));
8072 unlock_user(p, arg1, 0);
8073 return ret;
8074 #endif
8075 case TARGET_NR_ioctl:
8076 return do_ioctl(arg1, arg2, arg3);
8077 #ifdef TARGET_NR_fcntl
8078 case TARGET_NR_fcntl:
8079 return do_fcntl(arg1, arg2, arg3);
8080 #endif
8081 case TARGET_NR_setpgid:
8082 return get_errno(setpgid(arg1, arg2));
8083 case TARGET_NR_umask:
8084 return get_errno(umask(arg1));
8085 case TARGET_NR_chroot:
8086 if (!(p = lock_user_string(arg1)))
8087 return -TARGET_EFAULT;
8088 ret = get_errno(chroot(p));
8089 unlock_user(p, arg1, 0);
8090 return ret;
8091 #ifdef TARGET_NR_dup2
8092 case TARGET_NR_dup2:
8093 ret = get_errno(dup2(arg1, arg2));
8094 if (ret >= 0) {
8095 fd_trans_dup(arg1, arg2);
8097 return ret;
8098 #endif
8099 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8100 case TARGET_NR_dup3:
8102 int host_flags;
8104 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8105 return -EINVAL;
8107 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8108 ret = get_errno(dup3(arg1, arg2, host_flags));
8109 if (ret >= 0) {
8110 fd_trans_dup(arg1, arg2);
8112 return ret;
8114 #endif
8115 #ifdef TARGET_NR_getppid /* not on alpha */
8116 case TARGET_NR_getppid:
8117 return get_errno(getppid());
8118 #endif
8119 #ifdef TARGET_NR_getpgrp
8120 case TARGET_NR_getpgrp:
8121 return get_errno(getpgrp());
8122 #endif
8123 case TARGET_NR_setsid:
8124 return get_errno(setsid());
8125 #ifdef TARGET_NR_sigaction
8126 case TARGET_NR_sigaction:
8128 #if defined(TARGET_ALPHA)
8129 struct target_sigaction act, oact, *pact = 0;
8130 struct target_old_sigaction *old_act;
8131 if (arg2) {
8132 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8133 return -TARGET_EFAULT;
8134 act._sa_handler = old_act->_sa_handler;
8135 target_siginitset(&act.sa_mask, old_act->sa_mask);
8136 act.sa_flags = old_act->sa_flags;
8137 act.sa_restorer = 0;
8138 unlock_user_struct(old_act, arg2, 0);
8139 pact = &act;
8141 ret = get_errno(do_sigaction(arg1, pact, &oact));
8142 if (!is_error(ret) && arg3) {
8143 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8144 return -TARGET_EFAULT;
8145 old_act->_sa_handler = oact._sa_handler;
8146 old_act->sa_mask = oact.sa_mask.sig[0];
8147 old_act->sa_flags = oact.sa_flags;
8148 unlock_user_struct(old_act, arg3, 1);
8150 #elif defined(TARGET_MIPS)
8151 struct target_sigaction act, oact, *pact, *old_act;
8153 if (arg2) {
8154 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8155 return -TARGET_EFAULT;
8156 act._sa_handler = old_act->_sa_handler;
8157 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8158 act.sa_flags = old_act->sa_flags;
8159 unlock_user_struct(old_act, arg2, 0);
8160 pact = &act;
8161 } else {
8162 pact = NULL;
8165 ret = get_errno(do_sigaction(arg1, pact, &oact));
8167 if (!is_error(ret) && arg3) {
8168 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8169 return -TARGET_EFAULT;
8170 old_act->_sa_handler = oact._sa_handler;
8171 old_act->sa_flags = oact.sa_flags;
8172 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8173 old_act->sa_mask.sig[1] = 0;
8174 old_act->sa_mask.sig[2] = 0;
8175 old_act->sa_mask.sig[3] = 0;
8176 unlock_user_struct(old_act, arg3, 1);
8178 #else
8179 struct target_old_sigaction *old_act;
8180 struct target_sigaction act, oact, *pact;
8181 if (arg2) {
8182 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8183 return -TARGET_EFAULT;
8184 act._sa_handler = old_act->_sa_handler;
8185 target_siginitset(&act.sa_mask, old_act->sa_mask);
8186 act.sa_flags = old_act->sa_flags;
8187 act.sa_restorer = old_act->sa_restorer;
8188 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8189 act.ka_restorer = 0;
8190 #endif
8191 unlock_user_struct(old_act, arg2, 0);
8192 pact = &act;
8193 } else {
8194 pact = NULL;
8196 ret = get_errno(do_sigaction(arg1, pact, &oact));
8197 if (!is_error(ret) && arg3) {
8198 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8199 return -TARGET_EFAULT;
8200 old_act->_sa_handler = oact._sa_handler;
8201 old_act->sa_mask = oact.sa_mask.sig[0];
8202 old_act->sa_flags = oact.sa_flags;
8203 old_act->sa_restorer = oact.sa_restorer;
8204 unlock_user_struct(old_act, arg3, 1);
8206 #endif
8208 return ret;
8209 #endif
8210 case TARGET_NR_rt_sigaction:
8212 #if defined(TARGET_ALPHA)
8213 /* For Alpha and SPARC this is a 5 argument syscall, with
8214 * a 'restorer' parameter which must be copied into the
8215 * sa_restorer field of the sigaction struct.
8216 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8217 * and arg5 is the sigsetsize.
8218 * Alpha also has a separate rt_sigaction struct that it uses
8219 * here; SPARC uses the usual sigaction struct.
8221 struct target_rt_sigaction *rt_act;
8222 struct target_sigaction act, oact, *pact = 0;
8224 if (arg4 != sizeof(target_sigset_t)) {
8225 return -TARGET_EINVAL;
8227 if (arg2) {
8228 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8229 return -TARGET_EFAULT;
8230 act._sa_handler = rt_act->_sa_handler;
8231 act.sa_mask = rt_act->sa_mask;
8232 act.sa_flags = rt_act->sa_flags;
8233 act.sa_restorer = arg5;
8234 unlock_user_struct(rt_act, arg2, 0);
8235 pact = &act;
8237 ret = get_errno(do_sigaction(arg1, pact, &oact));
8238 if (!is_error(ret) && arg3) {
8239 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8240 return -TARGET_EFAULT;
8241 rt_act->_sa_handler = oact._sa_handler;
8242 rt_act->sa_mask = oact.sa_mask;
8243 rt_act->sa_flags = oact.sa_flags;
8244 unlock_user_struct(rt_act, arg3, 1);
8246 #else
8247 #ifdef TARGET_SPARC
8248 target_ulong restorer = arg4;
8249 target_ulong sigsetsize = arg5;
8250 #else
8251 target_ulong sigsetsize = arg4;
8252 #endif
8253 struct target_sigaction *act;
8254 struct target_sigaction *oact;
8256 if (sigsetsize != sizeof(target_sigset_t)) {
8257 return -TARGET_EINVAL;
8259 if (arg2) {
8260 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8261 return -TARGET_EFAULT;
8263 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8264 act->ka_restorer = restorer;
8265 #endif
8266 } else {
8267 act = NULL;
8269 if (arg3) {
8270 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8271 ret = -TARGET_EFAULT;
8272 goto rt_sigaction_fail;
8274 } else
8275 oact = NULL;
8276 ret = get_errno(do_sigaction(arg1, act, oact));
8277 rt_sigaction_fail:
8278 if (act)
8279 unlock_user_struct(act, arg2, 0);
8280 if (oact)
8281 unlock_user_struct(oact, arg3, 1);
8282 #endif
8284 return ret;
8285 #ifdef TARGET_NR_sgetmask /* not on alpha */
8286 case TARGET_NR_sgetmask:
8288 sigset_t cur_set;
8289 abi_ulong target_set;
8290 ret = do_sigprocmask(0, NULL, &cur_set);
8291 if (!ret) {
8292 host_to_target_old_sigset(&target_set, &cur_set);
8293 ret = target_set;
8296 return ret;
8297 #endif
8298 #ifdef TARGET_NR_ssetmask /* not on alpha */
8299 case TARGET_NR_ssetmask:
8301 sigset_t set, oset;
8302 abi_ulong target_set = arg1;
8303 target_to_host_old_sigset(&set, &target_set);
8304 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8305 if (!ret) {
8306 host_to_target_old_sigset(&target_set, &oset);
8307 ret = target_set;
8310 return ret;
8311 #endif
8312 #ifdef TARGET_NR_sigprocmask
8313 case TARGET_NR_sigprocmask:
8315 #if defined(TARGET_ALPHA)
8316 sigset_t set, oldset;
8317 abi_ulong mask;
8318 int how;
8320 switch (arg1) {
8321 case TARGET_SIG_BLOCK:
8322 how = SIG_BLOCK;
8323 break;
8324 case TARGET_SIG_UNBLOCK:
8325 how = SIG_UNBLOCK;
8326 break;
8327 case TARGET_SIG_SETMASK:
8328 how = SIG_SETMASK;
8329 break;
8330 default:
8331 return -TARGET_EINVAL;
8333 mask = arg2;
8334 target_to_host_old_sigset(&set, &mask);
8336 ret = do_sigprocmask(how, &set, &oldset);
8337 if (!is_error(ret)) {
8338 host_to_target_old_sigset(&mask, &oldset);
8339 ret = mask;
8340 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8342 #else
8343 sigset_t set, oldset, *set_ptr;
8344 int how;
8346 if (arg2) {
8347 switch (arg1) {
8348 case TARGET_SIG_BLOCK:
8349 how = SIG_BLOCK;
8350 break;
8351 case TARGET_SIG_UNBLOCK:
8352 how = SIG_UNBLOCK;
8353 break;
8354 case TARGET_SIG_SETMASK:
8355 how = SIG_SETMASK;
8356 break;
8357 default:
8358 return -TARGET_EINVAL;
8360 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8361 return -TARGET_EFAULT;
8362 target_to_host_old_sigset(&set, p);
8363 unlock_user(p, arg2, 0);
8364 set_ptr = &set;
8365 } else {
8366 how = 0;
8367 set_ptr = NULL;
8369 ret = do_sigprocmask(how, set_ptr, &oldset);
8370 if (!is_error(ret) && arg3) {
8371 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8372 return -TARGET_EFAULT;
8373 host_to_target_old_sigset(p, &oldset);
8374 unlock_user(p, arg3, sizeof(target_sigset_t));
8376 #endif
8378 return ret;
8379 #endif
8380 case TARGET_NR_rt_sigprocmask:
8382 int how = arg1;
8383 sigset_t set, oldset, *set_ptr;
8385 if (arg4 != sizeof(target_sigset_t)) {
8386 return -TARGET_EINVAL;
8389 if (arg2) {
8390 switch(how) {
8391 case TARGET_SIG_BLOCK:
8392 how = SIG_BLOCK;
8393 break;
8394 case TARGET_SIG_UNBLOCK:
8395 how = SIG_UNBLOCK;
8396 break;
8397 case TARGET_SIG_SETMASK:
8398 how = SIG_SETMASK;
8399 break;
8400 default:
8401 return -TARGET_EINVAL;
8403 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8404 return -TARGET_EFAULT;
8405 target_to_host_sigset(&set, p);
8406 unlock_user(p, arg2, 0);
8407 set_ptr = &set;
8408 } else {
8409 how = 0;
8410 set_ptr = NULL;
8412 ret = do_sigprocmask(how, set_ptr, &oldset);
8413 if (!is_error(ret) && arg3) {
8414 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8415 return -TARGET_EFAULT;
8416 host_to_target_sigset(p, &oldset);
8417 unlock_user(p, arg3, sizeof(target_sigset_t));
8420 return ret;
8421 #ifdef TARGET_NR_sigpending
8422 case TARGET_NR_sigpending:
8424 sigset_t set;
8425 ret = get_errno(sigpending(&set));
8426 if (!is_error(ret)) {
8427 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8428 return -TARGET_EFAULT;
8429 host_to_target_old_sigset(p, &set);
8430 unlock_user(p, arg1, sizeof(target_sigset_t));
8433 return ret;
8434 #endif
8435 case TARGET_NR_rt_sigpending:
8437 sigset_t set;
8439 /* Yes, this check is >, not != like most. We follow the kernel's
8440 * logic and it does it like this because it implements
8441 * NR_sigpending through the same code path, and in that case
8442 * the old_sigset_t is smaller in size.
8444 if (arg2 > sizeof(target_sigset_t)) {
8445 return -TARGET_EINVAL;
8448 ret = get_errno(sigpending(&set));
8449 if (!is_error(ret)) {
8450 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8451 return -TARGET_EFAULT;
8452 host_to_target_sigset(p, &set);
8453 unlock_user(p, arg1, sizeof(target_sigset_t));
8456 return ret;
8457 #ifdef TARGET_NR_sigsuspend
8458 case TARGET_NR_sigsuspend:
8460 TaskState *ts = cpu->opaque;
8461 #if defined(TARGET_ALPHA)
8462 abi_ulong mask = arg1;
8463 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8464 #else
8465 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8466 return -TARGET_EFAULT;
8467 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8468 unlock_user(p, arg1, 0);
8469 #endif
8470 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8471 SIGSET_T_SIZE));
8472 if (ret != -TARGET_ERESTARTSYS) {
8473 ts->in_sigsuspend = 1;
8476 return ret;
8477 #endif
8478 case TARGET_NR_rt_sigsuspend:
8480 TaskState *ts = cpu->opaque;
8482 if (arg2 != sizeof(target_sigset_t)) {
8483 return -TARGET_EINVAL;
8485 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8486 return -TARGET_EFAULT;
8487 target_to_host_sigset(&ts->sigsuspend_mask, p);
8488 unlock_user(p, arg1, 0);
8489 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8490 SIGSET_T_SIZE));
8491 if (ret != -TARGET_ERESTARTSYS) {
8492 ts->in_sigsuspend = 1;
8495 return ret;
8496 case TARGET_NR_rt_sigtimedwait:
8498 sigset_t set;
8499 struct timespec uts, *puts;
8500 siginfo_t uinfo;
8502 if (arg4 != sizeof(target_sigset_t)) {
8503 return -TARGET_EINVAL;
8506 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8507 return -TARGET_EFAULT;
8508 target_to_host_sigset(&set, p);
8509 unlock_user(p, arg1, 0);
8510 if (arg3) {
8511 puts = &uts;
8512 target_to_host_timespec(puts, arg3);
8513 } else {
8514 puts = NULL;
8516 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8517 SIGSET_T_SIZE));
8518 if (!is_error(ret)) {
8519 if (arg2) {
8520 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8522 if (!p) {
8523 return -TARGET_EFAULT;
8525 host_to_target_siginfo(p, &uinfo);
8526 unlock_user(p, arg2, sizeof(target_siginfo_t));
8528 ret = host_to_target_signal(ret);
8531 return ret;
8532 case TARGET_NR_rt_sigqueueinfo:
8534 siginfo_t uinfo;
8536 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8537 if (!p) {
8538 return -TARGET_EFAULT;
8540 target_to_host_siginfo(&uinfo, p);
8541 unlock_user(p, arg3, 0);
8542 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8544 return ret;
8545 case TARGET_NR_rt_tgsigqueueinfo:
8547 siginfo_t uinfo;
8549 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8550 if (!p) {
8551 return -TARGET_EFAULT;
8553 target_to_host_siginfo(&uinfo, p);
8554 unlock_user(p, arg4, 0);
8555 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8557 return ret;
8558 #ifdef TARGET_NR_sigreturn
8559 case TARGET_NR_sigreturn:
8560 if (block_signals()) {
8561 return -TARGET_ERESTARTSYS;
8563 return do_sigreturn(cpu_env);
8564 #endif
8565 case TARGET_NR_rt_sigreturn:
8566 if (block_signals()) {
8567 return -TARGET_ERESTARTSYS;
8569 return do_rt_sigreturn(cpu_env);
8570 case TARGET_NR_sethostname:
8571 if (!(p = lock_user_string(arg1)))
8572 return -TARGET_EFAULT;
8573 ret = get_errno(sethostname(p, arg2));
8574 unlock_user(p, arg1, 0);
8575 return ret;
8576 #ifdef TARGET_NR_setrlimit
8577 case TARGET_NR_setrlimit:
8579 int resource = target_to_host_resource(arg1);
8580 struct target_rlimit *target_rlim;
8581 struct rlimit rlim;
8582 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8583 return -TARGET_EFAULT;
8584 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8585 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8586 unlock_user_struct(target_rlim, arg2, 0);
8588 * If we just passed through resource limit settings for memory then
8589 * they would also apply to QEMU's own allocations, and QEMU will
8590 * crash or hang or die if its allocations fail. Ideally we would
8591 * track the guest allocations in QEMU and apply the limits ourselves.
8592 * For now, just tell the guest the call succeeded but don't actually
8593 * limit anything.
8595 if (resource != RLIMIT_AS &&
8596 resource != RLIMIT_DATA &&
8597 resource != RLIMIT_STACK) {
8598 return get_errno(setrlimit(resource, &rlim));
8599 } else {
8600 return 0;
8603 #endif
8604 #ifdef TARGET_NR_getrlimit
8605 case TARGET_NR_getrlimit:
8607 int resource = target_to_host_resource(arg1);
8608 struct target_rlimit *target_rlim;
8609 struct rlimit rlim;
8611 ret = get_errno(getrlimit(resource, &rlim));
8612 if (!is_error(ret)) {
8613 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8614 return -TARGET_EFAULT;
8615 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8616 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8617 unlock_user_struct(target_rlim, arg2, 1);
8620 return ret;
8621 #endif
8622 case TARGET_NR_getrusage:
8624 struct rusage rusage;
8625 ret = get_errno(getrusage(arg1, &rusage));
8626 if (!is_error(ret)) {
8627 ret = host_to_target_rusage(arg2, &rusage);
8630 return ret;
8631 case TARGET_NR_gettimeofday:
8633 struct timeval tv;
8634 ret = get_errno(gettimeofday(&tv, NULL));
8635 if (!is_error(ret)) {
8636 if (copy_to_user_timeval(arg1, &tv))
8637 return -TARGET_EFAULT;
8640 return ret;
8641 case TARGET_NR_settimeofday:
8643 struct timeval tv, *ptv = NULL;
8644 struct timezone tz, *ptz = NULL;
8646 if (arg1) {
8647 if (copy_from_user_timeval(&tv, arg1)) {
8648 return -TARGET_EFAULT;
8650 ptv = &tv;
8653 if (arg2) {
8654 if (copy_from_user_timezone(&tz, arg2)) {
8655 return -TARGET_EFAULT;
8657 ptz = &tz;
8660 return get_errno(settimeofday(ptv, ptz));
8662 #if defined(TARGET_NR_select)
8663 case TARGET_NR_select:
8664 #if defined(TARGET_WANT_NI_OLD_SELECT)
8665 /* some architectures used to have old_select here
8666 * but now ENOSYS it.
8668 ret = -TARGET_ENOSYS;
8669 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8670 ret = do_old_select(arg1);
8671 #else
8672 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8673 #endif
8674 return ret;
8675 #endif
8676 #ifdef TARGET_NR_pselect6
8677 case TARGET_NR_pselect6:
8679 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8680 fd_set rfds, wfds, efds;
8681 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8682 struct timespec ts, *ts_ptr;
8685 * The 6th arg is actually two args smashed together,
8686 * so we cannot use the C library.
8688 sigset_t set;
8689 struct {
8690 sigset_t *set;
8691 size_t size;
8692 } sig, *sig_ptr;
8694 abi_ulong arg_sigset, arg_sigsize, *arg7;
8695 target_sigset_t *target_sigset;
8697 n = arg1;
8698 rfd_addr = arg2;
8699 wfd_addr = arg3;
8700 efd_addr = arg4;
8701 ts_addr = arg5;
8703 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8704 if (ret) {
8705 return ret;
8707 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8708 if (ret) {
8709 return ret;
8711 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8712 if (ret) {
8713 return ret;
8717 * This takes a timespec, and not a timeval, so we cannot
8718 * use the do_select() helper ...
8720 if (ts_addr) {
8721 if (target_to_host_timespec(&ts, ts_addr)) {
8722 return -TARGET_EFAULT;
8724 ts_ptr = &ts;
8725 } else {
8726 ts_ptr = NULL;
8729 /* Extract the two packed args for the sigset */
8730 if (arg6) {
8731 sig_ptr = &sig;
8732 sig.size = SIGSET_T_SIZE;
8734 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8735 if (!arg7) {
8736 return -TARGET_EFAULT;
8738 arg_sigset = tswapal(arg7[0]);
8739 arg_sigsize = tswapal(arg7[1]);
8740 unlock_user(arg7, arg6, 0);
8742 if (arg_sigset) {
8743 sig.set = &set;
8744 if (arg_sigsize != sizeof(*target_sigset)) {
8745 /* Like the kernel, we enforce correct size sigsets */
8746 return -TARGET_EINVAL;
8748 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8749 sizeof(*target_sigset), 1);
8750 if (!target_sigset) {
8751 return -TARGET_EFAULT;
8753 target_to_host_sigset(&set, target_sigset);
8754 unlock_user(target_sigset, arg_sigset, 0);
8755 } else {
8756 sig.set = NULL;
8758 } else {
8759 sig_ptr = NULL;
8762 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8763 ts_ptr, sig_ptr));
8765 if (!is_error(ret)) {
8766 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8767 return -TARGET_EFAULT;
8768 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8769 return -TARGET_EFAULT;
8770 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8771 return -TARGET_EFAULT;
8773 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8774 return -TARGET_EFAULT;
8777 return ret;
8778 #endif
8779 #ifdef TARGET_NR_symlink
8780 case TARGET_NR_symlink:
8782 void *p2;
8783 p = lock_user_string(arg1);
8784 p2 = lock_user_string(arg2);
8785 if (!p || !p2)
8786 ret = -TARGET_EFAULT;
8787 else
8788 ret = get_errno(symlink(p, p2));
8789 unlock_user(p2, arg2, 0);
8790 unlock_user(p, arg1, 0);
8792 return ret;
8793 #endif
8794 #if defined(TARGET_NR_symlinkat)
8795 case TARGET_NR_symlinkat:
8797 void *p2;
8798 p = lock_user_string(arg1);
8799 p2 = lock_user_string(arg3);
8800 if (!p || !p2)
8801 ret = -TARGET_EFAULT;
8802 else
8803 ret = get_errno(symlinkat(p, arg2, p2));
8804 unlock_user(p2, arg3, 0);
8805 unlock_user(p, arg1, 0);
8807 return ret;
8808 #endif
8809 #ifdef TARGET_NR_readlink
8810 case TARGET_NR_readlink:
8812 void *p2;
8813 p = lock_user_string(arg1);
8814 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8815 if (!p || !p2) {
8816 ret = -TARGET_EFAULT;
8817 } else if (!arg3) {
8818 /* Short circuit this for the magic exe check. */
8819 ret = -TARGET_EINVAL;
8820 } else if (is_proc_myself((const char *)p, "exe")) {
8821 char real[PATH_MAX], *temp;
8822 temp = realpath(exec_path, real);
8823 /* Return value is # of bytes that we wrote to the buffer. */
8824 if (temp == NULL) {
8825 ret = get_errno(-1);
8826 } else {
8827 /* Don't worry about sign mismatch as earlier mapping
8828 * logic would have thrown a bad address error. */
8829 ret = MIN(strlen(real), arg3);
8830 /* We cannot NUL terminate the string. */
8831 memcpy(p2, real, ret);
8833 } else {
8834 ret = get_errno(readlink(path(p), p2, arg3));
8836 unlock_user(p2, arg2, ret);
8837 unlock_user(p, arg1, 0);
8839 return ret;
8840 #endif
8841 #if defined(TARGET_NR_readlinkat)
8842 case TARGET_NR_readlinkat:
8844 void *p2;
8845 p = lock_user_string(arg2);
8846 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8847 if (!p || !p2) {
8848 ret = -TARGET_EFAULT;
8849 } else if (is_proc_myself((const char *)p, "exe")) {
8850 char real[PATH_MAX], *temp;
8851 temp = realpath(exec_path, real);
8852 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8853 snprintf((char *)p2, arg4, "%s", real);
8854 } else {
8855 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8857 unlock_user(p2, arg3, ret);
8858 unlock_user(p, arg2, 0);
8860 return ret;
8861 #endif
8862 #ifdef TARGET_NR_swapon
8863 case TARGET_NR_swapon:
8864 if (!(p = lock_user_string(arg1)))
8865 return -TARGET_EFAULT;
8866 ret = get_errno(swapon(p, arg2));
8867 unlock_user(p, arg1, 0);
8868 return ret;
8869 #endif
8870 case TARGET_NR_reboot:
8871 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8872 /* arg4 must be ignored in all other cases */
8873 p = lock_user_string(arg4);
8874 if (!p) {
8875 return -TARGET_EFAULT;
8877 ret = get_errno(reboot(arg1, arg2, arg3, p));
8878 unlock_user(p, arg4, 0);
8879 } else {
8880 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8882 return ret;
8883 #ifdef TARGET_NR_mmap
8884 case TARGET_NR_mmap:
8885 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8886 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8887 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8888 || defined(TARGET_S390X)
8890 abi_ulong *v;
8891 abi_ulong v1, v2, v3, v4, v5, v6;
8892 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8893 return -TARGET_EFAULT;
8894 v1 = tswapal(v[0]);
8895 v2 = tswapal(v[1]);
8896 v3 = tswapal(v[2]);
8897 v4 = tswapal(v[3]);
8898 v5 = tswapal(v[4]);
8899 v6 = tswapal(v[5]);
8900 unlock_user(v, arg1, 0);
8901 ret = get_errno(target_mmap(v1, v2, v3,
8902 target_to_host_bitmask(v4, mmap_flags_tbl),
8903 v5, v6));
8905 #else
8906 ret = get_errno(target_mmap(arg1, arg2, arg3,
8907 target_to_host_bitmask(arg4, mmap_flags_tbl),
8908 arg5,
8909 arg6));
8910 #endif
8911 return ret;
8912 #endif
8913 #ifdef TARGET_NR_mmap2
8914 case TARGET_NR_mmap2:
8915 #ifndef MMAP_SHIFT
8916 #define MMAP_SHIFT 12
8917 #endif
8918 ret = target_mmap(arg1, arg2, arg3,
8919 target_to_host_bitmask(arg4, mmap_flags_tbl),
8920 arg5, arg6 << MMAP_SHIFT);
8921 return get_errno(ret);
8922 #endif
8923 case TARGET_NR_munmap:
8924 return get_errno(target_munmap(arg1, arg2));
8925 case TARGET_NR_mprotect:
8927 TaskState *ts = cpu->opaque;
8928 /* Special hack to detect libc making the stack executable. */
8929 if ((arg3 & PROT_GROWSDOWN)
8930 && arg1 >= ts->info->stack_limit
8931 && arg1 <= ts->info->start_stack) {
8932 arg3 &= ~PROT_GROWSDOWN;
8933 arg2 = arg2 + arg1 - ts->info->stack_limit;
8934 arg1 = ts->info->stack_limit;
8937 return get_errno(target_mprotect(arg1, arg2, arg3));
8938 #ifdef TARGET_NR_mremap
8939 case TARGET_NR_mremap:
8940 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8941 #endif
8942 /* ??? msync/mlock/munlock are broken for softmmu. */
8943 #ifdef TARGET_NR_msync
8944 case TARGET_NR_msync:
8945 return get_errno(msync(g2h(arg1), arg2, arg3));
8946 #endif
8947 #ifdef TARGET_NR_mlock
8948 case TARGET_NR_mlock:
8949 return get_errno(mlock(g2h(arg1), arg2));
8950 #endif
8951 #ifdef TARGET_NR_munlock
8952 case TARGET_NR_munlock:
8953 return get_errno(munlock(g2h(arg1), arg2));
8954 #endif
8955 #ifdef TARGET_NR_mlockall
8956 case TARGET_NR_mlockall:
8957 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8958 #endif
8959 #ifdef TARGET_NR_munlockall
8960 case TARGET_NR_munlockall:
8961 return get_errno(munlockall());
8962 #endif
8963 #ifdef TARGET_NR_truncate
8964 case TARGET_NR_truncate:
8965 if (!(p = lock_user_string(arg1)))
8966 return -TARGET_EFAULT;
8967 ret = get_errno(truncate(p, arg2));
8968 unlock_user(p, arg1, 0);
8969 return ret;
8970 #endif
8971 #ifdef TARGET_NR_ftruncate
8972 case TARGET_NR_ftruncate:
8973 return get_errno(ftruncate(arg1, arg2));
8974 #endif
8975 case TARGET_NR_fchmod:
8976 return get_errno(fchmod(arg1, arg2));
8977 #if defined(TARGET_NR_fchmodat)
8978 case TARGET_NR_fchmodat:
8979 if (!(p = lock_user_string(arg2)))
8980 return -TARGET_EFAULT;
8981 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8982 unlock_user(p, arg2, 0);
8983 return ret;
8984 #endif
8985 case TARGET_NR_getpriority:
8986 /* Note that negative values are valid for getpriority, so we must
8987 differentiate based on errno settings. */
8988 errno = 0;
8989 ret = getpriority(arg1, arg2);
8990 if (ret == -1 && errno != 0) {
8991 return -host_to_target_errno(errno);
8993 #ifdef TARGET_ALPHA
8994 /* Return value is the unbiased priority. Signal no error. */
8995 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8996 #else
8997 /* Return value is a biased priority to avoid negative numbers. */
8998 ret = 20 - ret;
8999 #endif
9000 return ret;
9001 case TARGET_NR_setpriority:
9002 return get_errno(setpriority(arg1, arg2, arg3));
9003 #ifdef TARGET_NR_statfs
9004 case TARGET_NR_statfs:
9005 if (!(p = lock_user_string(arg1))) {
9006 return -TARGET_EFAULT;
9008 ret = get_errno(statfs(path(p), &stfs));
9009 unlock_user(p, arg1, 0);
9010 convert_statfs:
9011 if (!is_error(ret)) {
9012 struct target_statfs *target_stfs;
9014 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9015 return -TARGET_EFAULT;
9016 __put_user(stfs.f_type, &target_stfs->f_type);
9017 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9018 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9019 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9020 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9021 __put_user(stfs.f_files, &target_stfs->f_files);
9022 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9023 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9024 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9025 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9026 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9027 #ifdef _STATFS_F_FLAGS
9028 __put_user(stfs.f_flags, &target_stfs->f_flags);
9029 #else
9030 __put_user(0, &target_stfs->f_flags);
9031 #endif
9032 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9033 unlock_user_struct(target_stfs, arg2, 1);
9035 return ret;
9036 #endif
9037 #ifdef TARGET_NR_fstatfs
9038 case TARGET_NR_fstatfs:
9039 ret = get_errno(fstatfs(arg1, &stfs));
9040 goto convert_statfs;
9041 #endif
9042 #ifdef TARGET_NR_statfs64
9043 case TARGET_NR_statfs64:
9044 if (!(p = lock_user_string(arg1))) {
9045 return -TARGET_EFAULT;
9047 ret = get_errno(statfs(path(p), &stfs));
9048 unlock_user(p, arg1, 0);
9049 convert_statfs64:
9050 if (!is_error(ret)) {
9051 struct target_statfs64 *target_stfs;
9053 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9054 return -TARGET_EFAULT;
9055 __put_user(stfs.f_type, &target_stfs->f_type);
9056 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9057 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9058 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9059 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9060 __put_user(stfs.f_files, &target_stfs->f_files);
9061 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9062 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9063 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9064 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9065 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9066 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9067 unlock_user_struct(target_stfs, arg3, 1);
9069 return ret;
9070 case TARGET_NR_fstatfs64:
9071 ret = get_errno(fstatfs(arg1, &stfs));
9072 goto convert_statfs64;
9073 #endif
9074 #ifdef TARGET_NR_socketcall
9075 case TARGET_NR_socketcall:
9076 return do_socketcall(arg1, arg2);
9077 #endif
9078 #ifdef TARGET_NR_accept
9079 case TARGET_NR_accept:
9080 return do_accept4(arg1, arg2, arg3, 0);
9081 #endif
9082 #ifdef TARGET_NR_accept4
9083 case TARGET_NR_accept4:
9084 return do_accept4(arg1, arg2, arg3, arg4);
9085 #endif
9086 #ifdef TARGET_NR_bind
9087 case TARGET_NR_bind:
9088 return do_bind(arg1, arg2, arg3);
9089 #endif
9090 #ifdef TARGET_NR_connect
9091 case TARGET_NR_connect:
9092 return do_connect(arg1, arg2, arg3);
9093 #endif
9094 #ifdef TARGET_NR_getpeername
9095 case TARGET_NR_getpeername:
9096 return do_getpeername(arg1, arg2, arg3);
9097 #endif
9098 #ifdef TARGET_NR_getsockname
9099 case TARGET_NR_getsockname:
9100 return do_getsockname(arg1, arg2, arg3);
9101 #endif
9102 #ifdef TARGET_NR_getsockopt
9103 case TARGET_NR_getsockopt:
9104 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9105 #endif
9106 #ifdef TARGET_NR_listen
9107 case TARGET_NR_listen:
9108 return get_errno(listen(arg1, arg2));
9109 #endif
9110 #ifdef TARGET_NR_recv
9111 case TARGET_NR_recv:
9112 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9113 #endif
9114 #ifdef TARGET_NR_recvfrom
9115 case TARGET_NR_recvfrom:
9116 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9117 #endif
9118 #ifdef TARGET_NR_recvmsg
9119 case TARGET_NR_recvmsg:
9120 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9121 #endif
9122 #ifdef TARGET_NR_send
9123 case TARGET_NR_send:
9124 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9125 #endif
9126 #ifdef TARGET_NR_sendmsg
9127 case TARGET_NR_sendmsg:
9128 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9129 #endif
9130 #ifdef TARGET_NR_sendmmsg
9131 case TARGET_NR_sendmmsg:
9132 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9133 case TARGET_NR_recvmmsg:
9134 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9135 #endif
9136 #ifdef TARGET_NR_sendto
9137 case TARGET_NR_sendto:
9138 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9139 #endif
9140 #ifdef TARGET_NR_shutdown
9141 case TARGET_NR_shutdown:
9142 return get_errno(shutdown(arg1, arg2));
9143 #endif
9144 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9145 case TARGET_NR_getrandom:
9146 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9147 if (!p) {
9148 return -TARGET_EFAULT;
9150 ret = get_errno(getrandom(p, arg2, arg3));
9151 unlock_user(p, arg1, ret);
9152 return ret;
9153 #endif
9154 #ifdef TARGET_NR_socket
9155 case TARGET_NR_socket:
9156 return do_socket(arg1, arg2, arg3);
9157 #endif
9158 #ifdef TARGET_NR_socketpair
9159 case TARGET_NR_socketpair:
9160 return do_socketpair(arg1, arg2, arg3, arg4);
9161 #endif
9162 #ifdef TARGET_NR_setsockopt
9163 case TARGET_NR_setsockopt:
9164 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9165 #endif
9166 #if defined(TARGET_NR_syslog)
9167 case TARGET_NR_syslog:
9169 int len = arg2;
9171 switch (arg1) {
9172 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9173 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9174 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9175 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9176 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9177 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9178 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9179 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9180 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9181 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9182 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9183 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9185 if (len < 0) {
9186 return -TARGET_EINVAL;
9188 if (len == 0) {
9189 return 0;
9191 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9192 if (!p) {
9193 return -TARGET_EFAULT;
9195 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9196 unlock_user(p, arg2, arg3);
9198 return ret;
9199 default:
9200 return -TARGET_EINVAL;
9203 break;
9204 #endif
9205 case TARGET_NR_setitimer:
9207 struct itimerval value, ovalue, *pvalue;
9209 if (arg2) {
9210 pvalue = &value;
9211 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9212 || copy_from_user_timeval(&pvalue->it_value,
9213 arg2 + sizeof(struct target_timeval)))
9214 return -TARGET_EFAULT;
9215 } else {
9216 pvalue = NULL;
9218 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9219 if (!is_error(ret) && arg3) {
9220 if (copy_to_user_timeval(arg3,
9221 &ovalue.it_interval)
9222 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9223 &ovalue.it_value))
9224 return -TARGET_EFAULT;
9227 return ret;
9228 case TARGET_NR_getitimer:
9230 struct itimerval value;
9232 ret = get_errno(getitimer(arg1, &value));
9233 if (!is_error(ret) && arg2) {
9234 if (copy_to_user_timeval(arg2,
9235 &value.it_interval)
9236 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9237 &value.it_value))
9238 return -TARGET_EFAULT;
9241 return ret;
9242 #ifdef TARGET_NR_stat
9243 case TARGET_NR_stat:
9244 if (!(p = lock_user_string(arg1))) {
9245 return -TARGET_EFAULT;
9247 ret = get_errno(stat(path(p), &st));
9248 unlock_user(p, arg1, 0);
9249 goto do_stat;
9250 #endif
9251 #ifdef TARGET_NR_lstat
9252 case TARGET_NR_lstat:
9253 if (!(p = lock_user_string(arg1))) {
9254 return -TARGET_EFAULT;
9256 ret = get_errno(lstat(path(p), &st));
9257 unlock_user(p, arg1, 0);
9258 goto do_stat;
9259 #endif
9260 #ifdef TARGET_NR_fstat
9261 case TARGET_NR_fstat:
9263 ret = get_errno(fstat(arg1, &st));
9264 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9265 do_stat:
9266 #endif
9267 if (!is_error(ret)) {
9268 struct target_stat *target_st;
9270 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9271 return -TARGET_EFAULT;
9272 memset(target_st, 0, sizeof(*target_st));
9273 __put_user(st.st_dev, &target_st->st_dev);
9274 __put_user(st.st_ino, &target_st->st_ino);
9275 __put_user(st.st_mode, &target_st->st_mode);
9276 __put_user(st.st_uid, &target_st->st_uid);
9277 __put_user(st.st_gid, &target_st->st_gid);
9278 __put_user(st.st_nlink, &target_st->st_nlink);
9279 __put_user(st.st_rdev, &target_st->st_rdev);
9280 __put_user(st.st_size, &target_st->st_size);
9281 __put_user(st.st_blksize, &target_st->st_blksize);
9282 __put_user(st.st_blocks, &target_st->st_blocks);
9283 __put_user(st.st_atime, &target_st->target_st_atime);
9284 __put_user(st.st_mtime, &target_st->target_st_mtime);
9285 __put_user(st.st_ctime, &target_st->target_st_ctime);
9286 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9287 defined(TARGET_STAT_HAVE_NSEC)
9288 __put_user(st.st_atim.tv_nsec,
9289 &target_st->target_st_atime_nsec);
9290 __put_user(st.st_mtim.tv_nsec,
9291 &target_st->target_st_mtime_nsec);
9292 __put_user(st.st_ctim.tv_nsec,
9293 &target_st->target_st_ctime_nsec);
9294 #endif
9295 unlock_user_struct(target_st, arg2, 1);
9298 return ret;
9299 #endif
9300 case TARGET_NR_vhangup:
9301 return get_errno(vhangup());
9302 #ifdef TARGET_NR_syscall
9303 case TARGET_NR_syscall:
9304 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9305 arg6, arg7, arg8, 0);
9306 #endif
9307 case TARGET_NR_wait4:
9309 int status;
9310 abi_long status_ptr = arg2;
9311 struct rusage rusage, *rusage_ptr;
9312 abi_ulong target_rusage = arg4;
9313 abi_long rusage_err;
9314 if (target_rusage)
9315 rusage_ptr = &rusage;
9316 else
9317 rusage_ptr = NULL;
9318 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9319 if (!is_error(ret)) {
9320 if (status_ptr && ret) {
9321 status = host_to_target_waitstatus(status);
9322 if (put_user_s32(status, status_ptr))
9323 return -TARGET_EFAULT;
9325 if (target_rusage) {
9326 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9327 if (rusage_err) {
9328 ret = rusage_err;
9333 return ret;
9334 #ifdef TARGET_NR_swapoff
9335 case TARGET_NR_swapoff:
9336 if (!(p = lock_user_string(arg1)))
9337 return -TARGET_EFAULT;
9338 ret = get_errno(swapoff(p));
9339 unlock_user(p, arg1, 0);
9340 return ret;
9341 #endif
9342 case TARGET_NR_sysinfo:
9344 struct target_sysinfo *target_value;
9345 struct sysinfo value;
9346 ret = get_errno(sysinfo(&value));
9347 if (!is_error(ret) && arg1)
9349 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9350 return -TARGET_EFAULT;
9351 __put_user(value.uptime, &target_value->uptime);
9352 __put_user(value.loads[0], &target_value->loads[0]);
9353 __put_user(value.loads[1], &target_value->loads[1]);
9354 __put_user(value.loads[2], &target_value->loads[2]);
9355 __put_user(value.totalram, &target_value->totalram);
9356 __put_user(value.freeram, &target_value->freeram);
9357 __put_user(value.sharedram, &target_value->sharedram);
9358 __put_user(value.bufferram, &target_value->bufferram);
9359 __put_user(value.totalswap, &target_value->totalswap);
9360 __put_user(value.freeswap, &target_value->freeswap);
9361 __put_user(value.procs, &target_value->procs);
9362 __put_user(value.totalhigh, &target_value->totalhigh);
9363 __put_user(value.freehigh, &target_value->freehigh);
9364 __put_user(value.mem_unit, &target_value->mem_unit);
9365 unlock_user_struct(target_value, arg1, 1);
9368 return ret;
9369 #ifdef TARGET_NR_ipc
9370 case TARGET_NR_ipc:
9371 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9372 #endif
9373 #ifdef TARGET_NR_semget
9374 case TARGET_NR_semget:
9375 return get_errno(semget(arg1, arg2, arg3));
9376 #endif
9377 #ifdef TARGET_NR_semop
9378 case TARGET_NR_semop:
9379 return do_semop(arg1, arg2, arg3);
9380 #endif
9381 #ifdef TARGET_NR_semctl
9382 case TARGET_NR_semctl:
9383 return do_semctl(arg1, arg2, arg3, arg4);
9384 #endif
9385 #ifdef TARGET_NR_msgctl
9386 case TARGET_NR_msgctl:
9387 return do_msgctl(arg1, arg2, arg3);
9388 #endif
9389 #ifdef TARGET_NR_msgget
9390 case TARGET_NR_msgget:
9391 return get_errno(msgget(arg1, arg2));
9392 #endif
9393 #ifdef TARGET_NR_msgrcv
9394 case TARGET_NR_msgrcv:
9395 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9396 #endif
9397 #ifdef TARGET_NR_msgsnd
9398 case TARGET_NR_msgsnd:
9399 return do_msgsnd(arg1, arg2, arg3, arg4);
9400 #endif
9401 #ifdef TARGET_NR_shmget
9402 case TARGET_NR_shmget:
9403 return get_errno(shmget(arg1, arg2, arg3));
9404 #endif
9405 #ifdef TARGET_NR_shmctl
9406 case TARGET_NR_shmctl:
9407 return do_shmctl(arg1, arg2, arg3);
9408 #endif
9409 #ifdef TARGET_NR_shmat
9410 case TARGET_NR_shmat:
9411 return do_shmat(cpu_env, arg1, arg2, arg3);
9412 #endif
9413 #ifdef TARGET_NR_shmdt
9414 case TARGET_NR_shmdt:
9415 return do_shmdt(arg1);
9416 #endif
9417 case TARGET_NR_fsync:
9418 return get_errno(fsync(arg1));
9419 case TARGET_NR_clone:
9420 /* Linux manages to have three different orderings for its
9421 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9422 * match the kernel's CONFIG_CLONE_* settings.
9423 * Microblaze is further special in that it uses a sixth
9424 * implicit argument to clone for the TLS pointer.
9426 #if defined(TARGET_MICROBLAZE)
9427 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9428 #elif defined(TARGET_CLONE_BACKWARDS)
9429 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9430 #elif defined(TARGET_CLONE_BACKWARDS2)
9431 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9432 #else
9433 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9434 #endif
9435 return ret;
9436 #ifdef __NR_exit_group
9437 /* new thread calls */
9438 case TARGET_NR_exit_group:
9439 preexit_cleanup(cpu_env, arg1);
9440 return get_errno(exit_group(arg1));
9441 #endif
9442 case TARGET_NR_setdomainname:
9443 if (!(p = lock_user_string(arg1)))
9444 return -TARGET_EFAULT;
9445 ret = get_errno(setdomainname(p, arg2));
9446 unlock_user(p, arg1, 0);
9447 return ret;
9448 case TARGET_NR_uname:
9449 /* no need to transcode because we use the linux syscall */
9451 struct new_utsname * buf;
9453 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9454 return -TARGET_EFAULT;
9455 ret = get_errno(sys_uname(buf));
9456 if (!is_error(ret)) {
9457 /* Overwrite the native machine name with whatever is being
9458 emulated. */
9459 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9460 sizeof(buf->machine));
9461 /* Allow the user to override the reported release. */
9462 if (qemu_uname_release && *qemu_uname_release) {
9463 g_strlcpy(buf->release, qemu_uname_release,
9464 sizeof(buf->release));
9467 unlock_user_struct(buf, arg1, 1);
9469 return ret;
9470 #ifdef TARGET_I386
9471 case TARGET_NR_modify_ldt:
9472 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9473 #if !defined(TARGET_X86_64)
9474 case TARGET_NR_vm86:
9475 return do_vm86(cpu_env, arg1, arg2);
9476 #endif
9477 #endif
9478 case TARGET_NR_adjtimex:
9480 struct timex host_buf;
9482 if (target_to_host_timex(&host_buf, arg1) != 0) {
9483 return -TARGET_EFAULT;
9485 ret = get_errno(adjtimex(&host_buf));
9486 if (!is_error(ret)) {
9487 if (host_to_target_timex(arg1, &host_buf) != 0) {
9488 return -TARGET_EFAULT;
9492 return ret;
9493 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9494 case TARGET_NR_clock_adjtime:
9496 struct timex htx, *phtx = &htx;
9498 if (target_to_host_timex(phtx, arg2) != 0) {
9499 return -TARGET_EFAULT;
9501 ret = get_errno(clock_adjtime(arg1, phtx));
9502 if (!is_error(ret) && phtx) {
9503 if (host_to_target_timex(arg2, phtx) != 0) {
9504 return -TARGET_EFAULT;
9508 return ret;
9509 #endif
9510 case TARGET_NR_getpgid:
9511 return get_errno(getpgid(arg1));
9512 case TARGET_NR_fchdir:
9513 return get_errno(fchdir(arg1));
9514 case TARGET_NR_personality:
9515 return get_errno(personality(arg1));
9516 #ifdef TARGET_NR__llseek /* Not on alpha */
9517 case TARGET_NR__llseek:
9519 int64_t res;
9520 #if !defined(__NR_llseek)
9521 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9522 if (res == -1) {
9523 ret = get_errno(res);
9524 } else {
9525 ret = 0;
9527 #else
9528 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9529 #endif
9530 if ((ret == 0) && put_user_s64(res, arg4)) {
9531 return -TARGET_EFAULT;
9534 return ret;
9535 #endif
9536 #ifdef TARGET_NR_getdents
9537 case TARGET_NR_getdents:
9538 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9539 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9541 struct target_dirent *target_dirp;
9542 struct linux_dirent *dirp;
9543 abi_long count = arg3;
9545 dirp = g_try_malloc(count);
9546 if (!dirp) {
9547 return -TARGET_ENOMEM;
9550 ret = get_errno(sys_getdents(arg1, dirp, count));
9551 if (!is_error(ret)) {
9552 struct linux_dirent *de;
9553 struct target_dirent *tde;
9554 int len = ret;
9555 int reclen, treclen;
9556 int count1, tnamelen;
9558 count1 = 0;
9559 de = dirp;
9560 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9561 return -TARGET_EFAULT;
9562 tde = target_dirp;
9563 while (len > 0) {
9564 reclen = de->d_reclen;
9565 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9566 assert(tnamelen >= 0);
9567 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9568 assert(count1 + treclen <= count);
9569 tde->d_reclen = tswap16(treclen);
9570 tde->d_ino = tswapal(de->d_ino);
9571 tde->d_off = tswapal(de->d_off);
9572 memcpy(tde->d_name, de->d_name, tnamelen);
9573 de = (struct linux_dirent *)((char *)de + reclen);
9574 len -= reclen;
9575 tde = (struct target_dirent *)((char *)tde + treclen);
9576 count1 += treclen;
9578 ret = count1;
9579 unlock_user(target_dirp, arg2, ret);
9581 g_free(dirp);
9583 #else
9585 struct linux_dirent *dirp;
9586 abi_long count = arg3;
9588 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9589 return -TARGET_EFAULT;
9590 ret = get_errno(sys_getdents(arg1, dirp, count));
9591 if (!is_error(ret)) {
9592 struct linux_dirent *de;
9593 int len = ret;
9594 int reclen;
9595 de = dirp;
9596 while (len > 0) {
9597 reclen = de->d_reclen;
9598 if (reclen > len)
9599 break;
9600 de->d_reclen = tswap16(reclen);
9601 tswapls(&de->d_ino);
9602 tswapls(&de->d_off);
9603 de = (struct linux_dirent *)((char *)de + reclen);
9604 len -= reclen;
9607 unlock_user(dirp, arg2, ret);
9609 #endif
9610 #else
9611 /* Implement getdents in terms of getdents64 */
9613 struct linux_dirent64 *dirp;
9614 abi_long count = arg3;
9616 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9617 if (!dirp) {
9618 return -TARGET_EFAULT;
9620 ret = get_errno(sys_getdents64(arg1, dirp, count));
9621 if (!is_error(ret)) {
9622 /* Convert the dirent64 structs to target dirent. We do this
9623 * in-place, since we can guarantee that a target_dirent is no
9624 * larger than a dirent64; however this means we have to be
9625 * careful to read everything before writing in the new format.
9627 struct linux_dirent64 *de;
9628 struct target_dirent *tde;
9629 int len = ret;
9630 int tlen = 0;
9632 de = dirp;
9633 tde = (struct target_dirent *)dirp;
9634 while (len > 0) {
9635 int namelen, treclen;
9636 int reclen = de->d_reclen;
9637 uint64_t ino = de->d_ino;
9638 int64_t off = de->d_off;
9639 uint8_t type = de->d_type;
9641 namelen = strlen(de->d_name);
9642 treclen = offsetof(struct target_dirent, d_name)
9643 + namelen + 2;
9644 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9646 memmove(tde->d_name, de->d_name, namelen + 1);
9647 tde->d_ino = tswapal(ino);
9648 tde->d_off = tswapal(off);
9649 tde->d_reclen = tswap16(treclen);
9650 /* The target_dirent type is in what was formerly a padding
9651 * byte at the end of the structure:
9653 *(((char *)tde) + treclen - 1) = type;
9655 de = (struct linux_dirent64 *)((char *)de + reclen);
9656 tde = (struct target_dirent *)((char *)tde + treclen);
9657 len -= reclen;
9658 tlen += treclen;
9660 ret = tlen;
9662 unlock_user(dirp, arg2, ret);
9664 #endif
9665 return ret;
9666 #endif /* TARGET_NR_getdents */
9667 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9668 case TARGET_NR_getdents64:
9670 struct linux_dirent64 *dirp;
9671 abi_long count = arg3;
9672 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9673 return -TARGET_EFAULT;
9674 ret = get_errno(sys_getdents64(arg1, dirp, count));
9675 if (!is_error(ret)) {
9676 struct linux_dirent64 *de;
9677 int len = ret;
9678 int reclen;
9679 de = dirp;
9680 while (len > 0) {
9681 reclen = de->d_reclen;
9682 if (reclen > len)
9683 break;
9684 de->d_reclen = tswap16(reclen);
9685 tswap64s((uint64_t *)&de->d_ino);
9686 tswap64s((uint64_t *)&de->d_off);
9687 de = (struct linux_dirent64 *)((char *)de + reclen);
9688 len -= reclen;
9691 unlock_user(dirp, arg2, ret);
9693 return ret;
9694 #endif /* TARGET_NR_getdents64 */
9695 #if defined(TARGET_NR__newselect)
9696 case TARGET_NR__newselect:
9697 return do_select(arg1, arg2, arg3, arg4, arg5);
9698 #endif
9699 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9700 # ifdef TARGET_NR_poll
9701 case TARGET_NR_poll:
9702 # endif
9703 # ifdef TARGET_NR_ppoll
9704 case TARGET_NR_ppoll:
9705 # endif
9707 struct target_pollfd *target_pfd;
9708 unsigned int nfds = arg2;
9709 struct pollfd *pfd;
9710 unsigned int i;
9712 pfd = NULL;
9713 target_pfd = NULL;
9714 if (nfds) {
9715 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9716 return -TARGET_EINVAL;
9719 target_pfd = lock_user(VERIFY_WRITE, arg1,
9720 sizeof(struct target_pollfd) * nfds, 1);
9721 if (!target_pfd) {
9722 return -TARGET_EFAULT;
9725 pfd = alloca(sizeof(struct pollfd) * nfds);
9726 for (i = 0; i < nfds; i++) {
9727 pfd[i].fd = tswap32(target_pfd[i].fd);
9728 pfd[i].events = tswap16(target_pfd[i].events);
9732 switch (num) {
9733 # ifdef TARGET_NR_ppoll
9734 case TARGET_NR_ppoll:
9736 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9737 target_sigset_t *target_set;
9738 sigset_t _set, *set = &_set;
9740 if (arg3) {
9741 if (target_to_host_timespec(timeout_ts, arg3)) {
9742 unlock_user(target_pfd, arg1, 0);
9743 return -TARGET_EFAULT;
9745 } else {
9746 timeout_ts = NULL;
9749 if (arg4) {
9750 if (arg5 != sizeof(target_sigset_t)) {
9751 unlock_user(target_pfd, arg1, 0);
9752 return -TARGET_EINVAL;
9755 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9756 if (!target_set) {
9757 unlock_user(target_pfd, arg1, 0);
9758 return -TARGET_EFAULT;
9760 target_to_host_sigset(set, target_set);
9761 } else {
9762 set = NULL;
9765 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9766 set, SIGSET_T_SIZE));
9768 if (!is_error(ret) && arg3) {
9769 host_to_target_timespec(arg3, timeout_ts);
9771 if (arg4) {
9772 unlock_user(target_set, arg4, 0);
9774 break;
9776 # endif
9777 # ifdef TARGET_NR_poll
9778 case TARGET_NR_poll:
9780 struct timespec ts, *pts;
9782 if (arg3 >= 0) {
9783 /* Convert ms to secs, ns */
9784 ts.tv_sec = arg3 / 1000;
9785 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9786 pts = &ts;
9787 } else {
9788 /* -ve poll() timeout means "infinite" */
9789 pts = NULL;
9791 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9792 break;
9794 # endif
9795 default:
9796 g_assert_not_reached();
9799 if (!is_error(ret)) {
9800 for(i = 0; i < nfds; i++) {
9801 target_pfd[i].revents = tswap16(pfd[i].revents);
9804 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9806 return ret;
9807 #endif
9808 case TARGET_NR_flock:
9809 /* NOTE: the flock constant seems to be the same for every
9810 Linux platform */
9811 return get_errno(safe_flock(arg1, arg2));
9812 case TARGET_NR_readv:
9814 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9815 if (vec != NULL) {
9816 ret = get_errno(safe_readv(arg1, vec, arg3));
9817 unlock_iovec(vec, arg2, arg3, 1);
9818 } else {
9819 ret = -host_to_target_errno(errno);
9822 return ret;
9823 case TARGET_NR_writev:
9825 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9826 if (vec != NULL) {
9827 ret = get_errno(safe_writev(arg1, vec, arg3));
9828 unlock_iovec(vec, arg2, arg3, 0);
9829 } else {
9830 ret = -host_to_target_errno(errno);
9833 return ret;
9834 #if defined(TARGET_NR_preadv)
9835 case TARGET_NR_preadv:
9837 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9838 if (vec != NULL) {
9839 unsigned long low, high;
9841 target_to_host_low_high(arg4, arg5, &low, &high);
9842 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
9843 unlock_iovec(vec, arg2, arg3, 1);
9844 } else {
9845 ret = -host_to_target_errno(errno);
9848 return ret;
9849 #endif
9850 #if defined(TARGET_NR_pwritev)
9851 case TARGET_NR_pwritev:
9853 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9854 if (vec != NULL) {
9855 unsigned long low, high;
9857 target_to_host_low_high(arg4, arg5, &low, &high);
9858 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
9859 unlock_iovec(vec, arg2, arg3, 0);
9860 } else {
9861 ret = -host_to_target_errno(errno);
9864 return ret;
9865 #endif
9866 case TARGET_NR_getsid:
9867 return get_errno(getsid(arg1));
9868 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9869 case TARGET_NR_fdatasync:
9870 return get_errno(fdatasync(arg1));
9871 #endif
9872 #ifdef TARGET_NR__sysctl
9873 case TARGET_NR__sysctl:
9874 /* We don't implement this, but ENOTDIR is always a safe
9875 return value. */
9876 return -TARGET_ENOTDIR;
9877 #endif
9878 case TARGET_NR_sched_getaffinity:
9880 unsigned int mask_size;
9881 unsigned long *mask;
9884 * sched_getaffinity needs multiples of ulong, so need to take
9885 * care of mismatches between target ulong and host ulong sizes.
9887 if (arg2 & (sizeof(abi_ulong) - 1)) {
9888 return -TARGET_EINVAL;
9890 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9892 mask = alloca(mask_size);
9893 memset(mask, 0, mask_size);
9894 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9896 if (!is_error(ret)) {
9897 if (ret > arg2) {
9898 /* More data returned than the caller's buffer will fit.
9899 * This only happens if sizeof(abi_long) < sizeof(long)
9900 * and the caller passed us a buffer holding an odd number
9901 * of abi_longs. If the host kernel is actually using the
9902 * extra 4 bytes then fail EINVAL; otherwise we can just
9903 * ignore them and only copy the interesting part.
9905 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9906 if (numcpus > arg2 * 8) {
9907 return -TARGET_EINVAL;
9909 ret = arg2;
9912 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
9913 return -TARGET_EFAULT;
9917 return ret;
9918 case TARGET_NR_sched_setaffinity:
9920 unsigned int mask_size;
9921 unsigned long *mask;
9924 * sched_setaffinity needs multiples of ulong, so need to take
9925 * care of mismatches between target ulong and host ulong sizes.
9927 if (arg2 & (sizeof(abi_ulong) - 1)) {
9928 return -TARGET_EINVAL;
9930 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9931 mask = alloca(mask_size);
9933 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
9934 if (ret) {
9935 return ret;
9938 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9940 case TARGET_NR_getcpu:
9942 unsigned cpu, node;
9943 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
9944 arg2 ? &node : NULL,
9945 NULL));
9946 if (is_error(ret)) {
9947 return ret;
9949 if (arg1 && put_user_u32(cpu, arg1)) {
9950 return -TARGET_EFAULT;
9952 if (arg2 && put_user_u32(node, arg2)) {
9953 return -TARGET_EFAULT;
9956 return ret;
9957 case TARGET_NR_sched_setparam:
9959 struct sched_param *target_schp;
9960 struct sched_param schp;
9962 if (arg2 == 0) {
9963 return -TARGET_EINVAL;
9965 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9966 return -TARGET_EFAULT;
9967 schp.sched_priority = tswap32(target_schp->sched_priority);
9968 unlock_user_struct(target_schp, arg2, 0);
9969 return get_errno(sched_setparam(arg1, &schp));
9971 case TARGET_NR_sched_getparam:
9973 struct sched_param *target_schp;
9974 struct sched_param schp;
9976 if (arg2 == 0) {
9977 return -TARGET_EINVAL;
9979 ret = get_errno(sched_getparam(arg1, &schp));
9980 if (!is_error(ret)) {
9981 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9982 return -TARGET_EFAULT;
9983 target_schp->sched_priority = tswap32(schp.sched_priority);
9984 unlock_user_struct(target_schp, arg2, 1);
9987 return ret;
9988 case TARGET_NR_sched_setscheduler:
9990 struct sched_param *target_schp;
9991 struct sched_param schp;
9992 if (arg3 == 0) {
9993 return -TARGET_EINVAL;
9995 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9996 return -TARGET_EFAULT;
9997 schp.sched_priority = tswap32(target_schp->sched_priority);
9998 unlock_user_struct(target_schp, arg3, 0);
9999 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10001 case TARGET_NR_sched_getscheduler:
10002 return get_errno(sched_getscheduler(arg1));
10003 case TARGET_NR_sched_yield:
10004 return get_errno(sched_yield());
10005 case TARGET_NR_sched_get_priority_max:
10006 return get_errno(sched_get_priority_max(arg1));
10007 case TARGET_NR_sched_get_priority_min:
10008 return get_errno(sched_get_priority_min(arg1));
10009 case TARGET_NR_sched_rr_get_interval:
10011 struct timespec ts;
10012 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10013 if (!is_error(ret)) {
10014 ret = host_to_target_timespec(arg2, &ts);
10017 return ret;
10018 case TARGET_NR_nanosleep:
10020 struct timespec req, rem;
10021 target_to_host_timespec(&req, arg1);
10022 ret = get_errno(safe_nanosleep(&req, &rem));
10023 if (is_error(ret) && arg2) {
10024 host_to_target_timespec(arg2, &rem);
10027 return ret;
10028 case TARGET_NR_prctl:
10029 switch (arg1) {
10030 case PR_GET_PDEATHSIG:
10032 int deathsig;
10033 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10034 if (!is_error(ret) && arg2
10035 && put_user_ual(deathsig, arg2)) {
10036 return -TARGET_EFAULT;
10038 return ret;
10040 #ifdef PR_GET_NAME
10041 case PR_GET_NAME:
10043 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10044 if (!name) {
10045 return -TARGET_EFAULT;
10047 ret = get_errno(prctl(arg1, (unsigned long)name,
10048 arg3, arg4, arg5));
10049 unlock_user(name, arg2, 16);
10050 return ret;
10052 case PR_SET_NAME:
10054 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10055 if (!name) {
10056 return -TARGET_EFAULT;
10058 ret = get_errno(prctl(arg1, (unsigned long)name,
10059 arg3, arg4, arg5));
10060 unlock_user(name, arg2, 0);
10061 return ret;
10063 #endif
10064 #ifdef TARGET_MIPS
10065 case TARGET_PR_GET_FP_MODE:
10067 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10068 ret = 0;
10069 if (env->CP0_Status & (1 << CP0St_FR)) {
10070 ret |= TARGET_PR_FP_MODE_FR;
10072 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10073 ret |= TARGET_PR_FP_MODE_FRE;
10075 return ret;
10077 case TARGET_PR_SET_FP_MODE:
10079 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10080 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10081 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10082 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10083 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10085 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10086 TARGET_PR_FP_MODE_FRE;
10088 /* If nothing to change, return right away, successfully. */
10089 if (old_fr == new_fr && old_fre == new_fre) {
10090 return 0;
10092 /* Check the value is valid */
10093 if (arg2 & ~known_bits) {
10094 return -TARGET_EOPNOTSUPP;
10096 /* Setting FRE without FR is not supported. */
10097 if (new_fre && !new_fr) {
10098 return -TARGET_EOPNOTSUPP;
10100 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10101 /* FR1 is not supported */
10102 return -TARGET_EOPNOTSUPP;
10104 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10105 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10106 /* cannot set FR=0 */
10107 return -TARGET_EOPNOTSUPP;
10109 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10110 /* Cannot set FRE=1 */
10111 return -TARGET_EOPNOTSUPP;
10114 int i;
10115 fpr_t *fpr = env->active_fpu.fpr;
10116 for (i = 0; i < 32 ; i += 2) {
10117 if (!old_fr && new_fr) {
10118 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10119 } else if (old_fr && !new_fr) {
10120 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10124 if (new_fr) {
10125 env->CP0_Status |= (1 << CP0St_FR);
10126 env->hflags |= MIPS_HFLAG_F64;
10127 } else {
10128 env->CP0_Status &= ~(1 << CP0St_FR);
10129 env->hflags &= ~MIPS_HFLAG_F64;
10131 if (new_fre) {
10132 env->CP0_Config5 |= (1 << CP0C5_FRE);
10133 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10134 env->hflags |= MIPS_HFLAG_FRE;
10136 } else {
10137 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10138 env->hflags &= ~MIPS_HFLAG_FRE;
10141 return 0;
10143 #endif /* MIPS */
10144 #ifdef TARGET_AARCH64
10145 case TARGET_PR_SVE_SET_VL:
10147 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10148 * PR_SVE_VL_INHERIT. Note the kernel definition
10149 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10150 * even though the current architectural maximum is VQ=16.
10152 ret = -TARGET_EINVAL;
10153 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10154 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10155 CPUARMState *env = cpu_env;
10156 ARMCPU *cpu = env_archcpu(env);
10157 uint32_t vq, old_vq;
10159 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10160 vq = MAX(arg2 / 16, 1);
10161 vq = MIN(vq, cpu->sve_max_vq);
10163 if (vq < old_vq) {
10164 aarch64_sve_narrow_vq(env, vq);
10166 env->vfp.zcr_el[1] = vq - 1;
10167 arm_rebuild_hflags(env);
10168 ret = vq * 16;
10170 return ret;
10171 case TARGET_PR_SVE_GET_VL:
10172 ret = -TARGET_EINVAL;
10174 ARMCPU *cpu = env_archcpu(cpu_env);
10175 if (cpu_isar_feature(aa64_sve, cpu)) {
10176 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10179 return ret;
10180 case TARGET_PR_PAC_RESET_KEYS:
10182 CPUARMState *env = cpu_env;
10183 ARMCPU *cpu = env_archcpu(env);
10185 if (arg3 || arg4 || arg5) {
10186 return -TARGET_EINVAL;
10188 if (cpu_isar_feature(aa64_pauth, cpu)) {
10189 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10190 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10191 TARGET_PR_PAC_APGAKEY);
10192 int ret = 0;
10193 Error *err = NULL;
10195 if (arg2 == 0) {
10196 arg2 = all;
10197 } else if (arg2 & ~all) {
10198 return -TARGET_EINVAL;
10200 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10201 ret |= qemu_guest_getrandom(&env->keys.apia,
10202 sizeof(ARMPACKey), &err);
10204 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10205 ret |= qemu_guest_getrandom(&env->keys.apib,
10206 sizeof(ARMPACKey), &err);
10208 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10209 ret |= qemu_guest_getrandom(&env->keys.apda,
10210 sizeof(ARMPACKey), &err);
10212 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10213 ret |= qemu_guest_getrandom(&env->keys.apdb,
10214 sizeof(ARMPACKey), &err);
10216 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10217 ret |= qemu_guest_getrandom(&env->keys.apga,
10218 sizeof(ARMPACKey), &err);
10220 if (ret != 0) {
10222 * Some unknown failure in the crypto. The best
10223 * we can do is log it and fail the syscall.
10224 * The real syscall cannot fail this way.
10226 qemu_log_mask(LOG_UNIMP,
10227 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10228 error_get_pretty(err));
10229 error_free(err);
10230 return -TARGET_EIO;
10232 return 0;
10235 return -TARGET_EINVAL;
10236 #endif /* AARCH64 */
10237 case PR_GET_SECCOMP:
10238 case PR_SET_SECCOMP:
10239 /* Disable seccomp to prevent the target disabling syscalls we
10240 * need. */
10241 return -TARGET_EINVAL;
10242 default:
10243 /* Most prctl options have no pointer arguments */
10244 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10246 break;
10247 #ifdef TARGET_NR_arch_prctl
10248 case TARGET_NR_arch_prctl:
10249 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10250 return do_arch_prctl(cpu_env, arg1, arg2);
10251 #else
10252 #error unreachable
10253 #endif
10254 #endif
10255 #ifdef TARGET_NR_pread64
10256 case TARGET_NR_pread64:
10257 if (regpairs_aligned(cpu_env, num)) {
10258 arg4 = arg5;
10259 arg5 = arg6;
10261 if (arg2 == 0 && arg3 == 0) {
10262 /* Special-case NULL buffer and zero length, which should succeed */
10263 p = 0;
10264 } else {
10265 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10266 if (!p) {
10267 return -TARGET_EFAULT;
10270 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10271 unlock_user(p, arg2, ret);
10272 return ret;
10273 case TARGET_NR_pwrite64:
10274 if (regpairs_aligned(cpu_env, num)) {
10275 arg4 = arg5;
10276 arg5 = arg6;
10278 if (arg2 == 0 && arg3 == 0) {
10279 /* Special-case NULL buffer and zero length, which should succeed */
10280 p = 0;
10281 } else {
10282 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10283 if (!p) {
10284 return -TARGET_EFAULT;
10287 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10288 unlock_user(p, arg2, 0);
10289 return ret;
10290 #endif
10291 case TARGET_NR_getcwd:
10292 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10293 return -TARGET_EFAULT;
10294 ret = get_errno(sys_getcwd1(p, arg2));
10295 unlock_user(p, arg1, ret);
10296 return ret;
10297 case TARGET_NR_capget:
10298 case TARGET_NR_capset:
10300 struct target_user_cap_header *target_header;
10301 struct target_user_cap_data *target_data = NULL;
10302 struct __user_cap_header_struct header;
10303 struct __user_cap_data_struct data[2];
10304 struct __user_cap_data_struct *dataptr = NULL;
10305 int i, target_datalen;
10306 int data_items = 1;
10308 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10309 return -TARGET_EFAULT;
10311 header.version = tswap32(target_header->version);
10312 header.pid = tswap32(target_header->pid);
10314 if (header.version != _LINUX_CAPABILITY_VERSION) {
10315 /* Version 2 and up takes pointer to two user_data structs */
10316 data_items = 2;
10319 target_datalen = sizeof(*target_data) * data_items;
10321 if (arg2) {
10322 if (num == TARGET_NR_capget) {
10323 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10324 } else {
10325 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10327 if (!target_data) {
10328 unlock_user_struct(target_header, arg1, 0);
10329 return -TARGET_EFAULT;
10332 if (num == TARGET_NR_capset) {
10333 for (i = 0; i < data_items; i++) {
10334 data[i].effective = tswap32(target_data[i].effective);
10335 data[i].permitted = tswap32(target_data[i].permitted);
10336 data[i].inheritable = tswap32(target_data[i].inheritable);
10340 dataptr = data;
10343 if (num == TARGET_NR_capget) {
10344 ret = get_errno(capget(&header, dataptr));
10345 } else {
10346 ret = get_errno(capset(&header, dataptr));
10349 /* The kernel always updates version for both capget and capset */
10350 target_header->version = tswap32(header.version);
10351 unlock_user_struct(target_header, arg1, 1);
10353 if (arg2) {
10354 if (num == TARGET_NR_capget) {
10355 for (i = 0; i < data_items; i++) {
10356 target_data[i].effective = tswap32(data[i].effective);
10357 target_data[i].permitted = tswap32(data[i].permitted);
10358 target_data[i].inheritable = tswap32(data[i].inheritable);
10360 unlock_user(target_data, arg2, target_datalen);
10361 } else {
10362 unlock_user(target_data, arg2, 0);
10365 return ret;
10367 case TARGET_NR_sigaltstack:
10368 return do_sigaltstack(arg1, arg2,
10369 get_sp_from_cpustate((CPUArchState *)cpu_env));
10371 #ifdef CONFIG_SENDFILE
10372 #ifdef TARGET_NR_sendfile
10373 case TARGET_NR_sendfile:
10375 off_t *offp = NULL;
10376 off_t off;
10377 if (arg3) {
10378 ret = get_user_sal(off, arg3);
10379 if (is_error(ret)) {
10380 return ret;
10382 offp = &off;
10384 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10385 if (!is_error(ret) && arg3) {
10386 abi_long ret2 = put_user_sal(off, arg3);
10387 if (is_error(ret2)) {
10388 ret = ret2;
10391 return ret;
10393 #endif
10394 #ifdef TARGET_NR_sendfile64
10395 case TARGET_NR_sendfile64:
10397 off_t *offp = NULL;
10398 off_t off;
10399 if (arg3) {
10400 ret = get_user_s64(off, arg3);
10401 if (is_error(ret)) {
10402 return ret;
10404 offp = &off;
10406 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10407 if (!is_error(ret) && arg3) {
10408 abi_long ret2 = put_user_s64(off, arg3);
10409 if (is_error(ret2)) {
10410 ret = ret2;
10413 return ret;
10415 #endif
10416 #endif
10417 #ifdef TARGET_NR_vfork
10418 case TARGET_NR_vfork:
10419 return get_errno(do_fork(cpu_env,
10420 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10421 0, 0, 0, 0));
10422 #endif
10423 #ifdef TARGET_NR_ugetrlimit
10424 case TARGET_NR_ugetrlimit:
10426 struct rlimit rlim;
10427 int resource = target_to_host_resource(arg1);
10428 ret = get_errno(getrlimit(resource, &rlim));
10429 if (!is_error(ret)) {
10430 struct target_rlimit *target_rlim;
10431 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10432 return -TARGET_EFAULT;
10433 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10434 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10435 unlock_user_struct(target_rlim, arg2, 1);
10437 return ret;
10439 #endif
10440 #ifdef TARGET_NR_truncate64
10441 case TARGET_NR_truncate64:
10442 if (!(p = lock_user_string(arg1)))
10443 return -TARGET_EFAULT;
10444 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10445 unlock_user(p, arg1, 0);
10446 return ret;
10447 #endif
10448 #ifdef TARGET_NR_ftruncate64
10449 case TARGET_NR_ftruncate64:
10450 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10451 #endif
10452 #ifdef TARGET_NR_stat64
10453 case TARGET_NR_stat64:
10454 if (!(p = lock_user_string(arg1))) {
10455 return -TARGET_EFAULT;
10457 ret = get_errno(stat(path(p), &st));
10458 unlock_user(p, arg1, 0);
10459 if (!is_error(ret))
10460 ret = host_to_target_stat64(cpu_env, arg2, &st);
10461 return ret;
10462 #endif
10463 #ifdef TARGET_NR_lstat64
10464 case TARGET_NR_lstat64:
10465 if (!(p = lock_user_string(arg1))) {
10466 return -TARGET_EFAULT;
10468 ret = get_errno(lstat(path(p), &st));
10469 unlock_user(p, arg1, 0);
10470 if (!is_error(ret))
10471 ret = host_to_target_stat64(cpu_env, arg2, &st);
10472 return ret;
10473 #endif
10474 #ifdef TARGET_NR_fstat64
10475 case TARGET_NR_fstat64:
10476 ret = get_errno(fstat(arg1, &st));
10477 if (!is_error(ret))
10478 ret = host_to_target_stat64(cpu_env, arg2, &st);
10479 return ret;
10480 #endif
10481 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10482 #ifdef TARGET_NR_fstatat64
10483 case TARGET_NR_fstatat64:
10484 #endif
10485 #ifdef TARGET_NR_newfstatat
10486 case TARGET_NR_newfstatat:
10487 #endif
10488 if (!(p = lock_user_string(arg2))) {
10489 return -TARGET_EFAULT;
10491 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10492 unlock_user(p, arg2, 0);
10493 if (!is_error(ret))
10494 ret = host_to_target_stat64(cpu_env, arg3, &st);
10495 return ret;
10496 #endif
10497 #if defined(TARGET_NR_statx)
10498 case TARGET_NR_statx:
10500 struct target_statx *target_stx;
10501 int dirfd = arg1;
10502 int flags = arg3;
10504 p = lock_user_string(arg2);
10505 if (p == NULL) {
10506 return -TARGET_EFAULT;
10508 #if defined(__NR_statx)
10511 * It is assumed that struct statx is architecture independent.
10513 struct target_statx host_stx;
10514 int mask = arg4;
10516 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10517 if (!is_error(ret)) {
10518 if (host_to_target_statx(&host_stx, arg5) != 0) {
10519 unlock_user(p, arg2, 0);
10520 return -TARGET_EFAULT;
10524 if (ret != -TARGET_ENOSYS) {
10525 unlock_user(p, arg2, 0);
10526 return ret;
10529 #endif
10530 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10531 unlock_user(p, arg2, 0);
10533 if (!is_error(ret)) {
10534 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10535 return -TARGET_EFAULT;
10537 memset(target_stx, 0, sizeof(*target_stx));
10538 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10539 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10540 __put_user(st.st_ino, &target_stx->stx_ino);
10541 __put_user(st.st_mode, &target_stx->stx_mode);
10542 __put_user(st.st_uid, &target_stx->stx_uid);
10543 __put_user(st.st_gid, &target_stx->stx_gid);
10544 __put_user(st.st_nlink, &target_stx->stx_nlink);
10545 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10546 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10547 __put_user(st.st_size, &target_stx->stx_size);
10548 __put_user(st.st_blksize, &target_stx->stx_blksize);
10549 __put_user(st.st_blocks, &target_stx->stx_blocks);
10550 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
10551 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
10552 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
10553 unlock_user_struct(target_stx, arg5, 1);
10556 return ret;
10557 #endif
10558 #ifdef TARGET_NR_lchown
10559 case TARGET_NR_lchown:
10560 if (!(p = lock_user_string(arg1)))
10561 return -TARGET_EFAULT;
10562 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10563 unlock_user(p, arg1, 0);
10564 return ret;
10565 #endif
10566 #ifdef TARGET_NR_getuid
10567 case TARGET_NR_getuid:
10568 return get_errno(high2lowuid(getuid()));
10569 #endif
10570 #ifdef TARGET_NR_getgid
10571 case TARGET_NR_getgid:
10572 return get_errno(high2lowgid(getgid()));
10573 #endif
10574 #ifdef TARGET_NR_geteuid
10575 case TARGET_NR_geteuid:
10576 return get_errno(high2lowuid(geteuid()));
10577 #endif
10578 #ifdef TARGET_NR_getegid
10579 case TARGET_NR_getegid:
10580 return get_errno(high2lowgid(getegid()));
10581 #endif
10582 case TARGET_NR_setreuid:
10583 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10584 case TARGET_NR_setregid:
10585 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10586 case TARGET_NR_getgroups:
10588 int gidsetsize = arg1;
10589 target_id *target_grouplist;
10590 gid_t *grouplist;
10591 int i;
10593 grouplist = alloca(gidsetsize * sizeof(gid_t));
10594 ret = get_errno(getgroups(gidsetsize, grouplist));
10595 if (gidsetsize == 0)
10596 return ret;
10597 if (!is_error(ret)) {
10598 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10599 if (!target_grouplist)
10600 return -TARGET_EFAULT;
10601 for(i = 0;i < ret; i++)
10602 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10603 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10606 return ret;
10607 case TARGET_NR_setgroups:
10609 int gidsetsize = arg1;
10610 target_id *target_grouplist;
10611 gid_t *grouplist = NULL;
10612 int i;
10613 if (gidsetsize) {
10614 grouplist = alloca(gidsetsize * sizeof(gid_t));
10615 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10616 if (!target_grouplist) {
10617 return -TARGET_EFAULT;
10619 for (i = 0; i < gidsetsize; i++) {
10620 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10622 unlock_user(target_grouplist, arg2, 0);
10624 return get_errno(setgroups(gidsetsize, grouplist));
10626 case TARGET_NR_fchown:
10627 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10628 #if defined(TARGET_NR_fchownat)
10629 case TARGET_NR_fchownat:
10630 if (!(p = lock_user_string(arg2)))
10631 return -TARGET_EFAULT;
10632 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10633 low2highgid(arg4), arg5));
10634 unlock_user(p, arg2, 0);
10635 return ret;
10636 #endif
10637 #ifdef TARGET_NR_setresuid
10638 case TARGET_NR_setresuid:
10639 return get_errno(sys_setresuid(low2highuid(arg1),
10640 low2highuid(arg2),
10641 low2highuid(arg3)));
10642 #endif
10643 #ifdef TARGET_NR_getresuid
10644 case TARGET_NR_getresuid:
10646 uid_t ruid, euid, suid;
10647 ret = get_errno(getresuid(&ruid, &euid, &suid));
10648 if (!is_error(ret)) {
10649 if (put_user_id(high2lowuid(ruid), arg1)
10650 || put_user_id(high2lowuid(euid), arg2)
10651 || put_user_id(high2lowuid(suid), arg3))
10652 return -TARGET_EFAULT;
10655 return ret;
10656 #endif
10657 #ifdef TARGET_NR_getresgid
10658 case TARGET_NR_setresgid:
10659 return get_errno(sys_setresgid(low2highgid(arg1),
10660 low2highgid(arg2),
10661 low2highgid(arg3)));
10662 #endif
10663 #ifdef TARGET_NR_getresgid
10664 case TARGET_NR_getresgid:
10666 gid_t rgid, egid, sgid;
10667 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10668 if (!is_error(ret)) {
10669 if (put_user_id(high2lowgid(rgid), arg1)
10670 || put_user_id(high2lowgid(egid), arg2)
10671 || put_user_id(high2lowgid(sgid), arg3))
10672 return -TARGET_EFAULT;
10675 return ret;
10676 #endif
10677 #ifdef TARGET_NR_chown
10678 case TARGET_NR_chown:
10679 if (!(p = lock_user_string(arg1)))
10680 return -TARGET_EFAULT;
10681 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10682 unlock_user(p, arg1, 0);
10683 return ret;
10684 #endif
10685 case TARGET_NR_setuid:
10686 return get_errno(sys_setuid(low2highuid(arg1)));
10687 case TARGET_NR_setgid:
10688 return get_errno(sys_setgid(low2highgid(arg1)));
10689 case TARGET_NR_setfsuid:
10690 return get_errno(setfsuid(arg1));
10691 case TARGET_NR_setfsgid:
10692 return get_errno(setfsgid(arg1));
10694 #ifdef TARGET_NR_lchown32
10695 case TARGET_NR_lchown32:
10696 if (!(p = lock_user_string(arg1)))
10697 return -TARGET_EFAULT;
10698 ret = get_errno(lchown(p, arg2, arg3));
10699 unlock_user(p, arg1, 0);
10700 return ret;
10701 #endif
10702 #ifdef TARGET_NR_getuid32
10703 case TARGET_NR_getuid32:
10704 return get_errno(getuid());
10705 #endif
10707 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10708 /* Alpha specific */
10709 case TARGET_NR_getxuid:
10711 uid_t euid;
10712 euid=geteuid();
10713 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10715 return get_errno(getuid());
10716 #endif
10717 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10718 /* Alpha specific */
10719 case TARGET_NR_getxgid:
10721 uid_t egid;
10722 egid=getegid();
10723 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10725 return get_errno(getgid());
10726 #endif
10727 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10728 /* Alpha specific */
10729 case TARGET_NR_osf_getsysinfo:
10730 ret = -TARGET_EOPNOTSUPP;
10731 switch (arg1) {
10732 case TARGET_GSI_IEEE_FP_CONTROL:
10734 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
10735 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
10737 swcr &= ~SWCR_STATUS_MASK;
10738 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
10740 if (put_user_u64 (swcr, arg2))
10741 return -TARGET_EFAULT;
10742 ret = 0;
10744 break;
10746 /* case GSI_IEEE_STATE_AT_SIGNAL:
10747 -- Not implemented in linux kernel.
10748 case GSI_UACPROC:
10749 -- Retrieves current unaligned access state; not much used.
10750 case GSI_PROC_TYPE:
10751 -- Retrieves implver information; surely not used.
10752 case GSI_GET_HWRPB:
10753 -- Grabs a copy of the HWRPB; surely not used.
10756 return ret;
10757 #endif
10758 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10759 /* Alpha specific */
10760 case TARGET_NR_osf_setsysinfo:
10761 ret = -TARGET_EOPNOTSUPP;
10762 switch (arg1) {
10763 case TARGET_SSI_IEEE_FP_CONTROL:
10765 uint64_t swcr, fpcr;
10767 if (get_user_u64 (swcr, arg2)) {
10768 return -TARGET_EFAULT;
10772 * The kernel calls swcr_update_status to update the
10773 * status bits from the fpcr at every point that it
10774 * could be queried. Therefore, we store the status
10775 * bits only in FPCR.
10777 ((CPUAlphaState *)cpu_env)->swcr
10778 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
10780 fpcr = cpu_alpha_load_fpcr(cpu_env);
10781 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
10782 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
10783 cpu_alpha_store_fpcr(cpu_env, fpcr);
10784 ret = 0;
10786 break;
10788 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10790 uint64_t exc, fpcr, fex;
10792 if (get_user_u64(exc, arg2)) {
10793 return -TARGET_EFAULT;
10795 exc &= SWCR_STATUS_MASK;
10796 fpcr = cpu_alpha_load_fpcr(cpu_env);
10798 /* Old exceptions are not signaled. */
10799 fex = alpha_ieee_fpcr_to_swcr(fpcr);
10800 fex = exc & ~fex;
10801 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
10802 fex &= ((CPUArchState *)cpu_env)->swcr;
10804 /* Update the hardware fpcr. */
10805 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
10806 cpu_alpha_store_fpcr(cpu_env, fpcr);
10808 if (fex) {
10809 int si_code = TARGET_FPE_FLTUNK;
10810 target_siginfo_t info;
10812 if (fex & SWCR_TRAP_ENABLE_DNO) {
10813 si_code = TARGET_FPE_FLTUND;
10815 if (fex & SWCR_TRAP_ENABLE_INE) {
10816 si_code = TARGET_FPE_FLTRES;
10818 if (fex & SWCR_TRAP_ENABLE_UNF) {
10819 si_code = TARGET_FPE_FLTUND;
10821 if (fex & SWCR_TRAP_ENABLE_OVF) {
10822 si_code = TARGET_FPE_FLTOVF;
10824 if (fex & SWCR_TRAP_ENABLE_DZE) {
10825 si_code = TARGET_FPE_FLTDIV;
10827 if (fex & SWCR_TRAP_ENABLE_INV) {
10828 si_code = TARGET_FPE_FLTINV;
10831 info.si_signo = SIGFPE;
10832 info.si_errno = 0;
10833 info.si_code = si_code;
10834 info._sifields._sigfault._addr
10835 = ((CPUArchState *)cpu_env)->pc;
10836 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10837 QEMU_SI_FAULT, &info);
10839 ret = 0;
10841 break;
10843 /* case SSI_NVPAIRS:
10844 -- Used with SSIN_UACPROC to enable unaligned accesses.
10845 case SSI_IEEE_STATE_AT_SIGNAL:
10846 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10847 -- Not implemented in linux kernel
10850 return ret;
10851 #endif
10852 #ifdef TARGET_NR_osf_sigprocmask
10853 /* Alpha specific. */
10854 case TARGET_NR_osf_sigprocmask:
10856 abi_ulong mask;
10857 int how;
10858 sigset_t set, oldset;
10860 switch(arg1) {
10861 case TARGET_SIG_BLOCK:
10862 how = SIG_BLOCK;
10863 break;
10864 case TARGET_SIG_UNBLOCK:
10865 how = SIG_UNBLOCK;
10866 break;
10867 case TARGET_SIG_SETMASK:
10868 how = SIG_SETMASK;
10869 break;
10870 default:
10871 return -TARGET_EINVAL;
10873 mask = arg2;
10874 target_to_host_old_sigset(&set, &mask);
10875 ret = do_sigprocmask(how, &set, &oldset);
10876 if (!ret) {
10877 host_to_target_old_sigset(&mask, &oldset);
10878 ret = mask;
10881 return ret;
10882 #endif
10884 #ifdef TARGET_NR_getgid32
10885 case TARGET_NR_getgid32:
10886 return get_errno(getgid());
10887 #endif
10888 #ifdef TARGET_NR_geteuid32
10889 case TARGET_NR_geteuid32:
10890 return get_errno(geteuid());
10891 #endif
10892 #ifdef TARGET_NR_getegid32
10893 case TARGET_NR_getegid32:
10894 return get_errno(getegid());
10895 #endif
10896 #ifdef TARGET_NR_setreuid32
10897 case TARGET_NR_setreuid32:
10898 return get_errno(setreuid(arg1, arg2));
10899 #endif
10900 #ifdef TARGET_NR_setregid32
10901 case TARGET_NR_setregid32:
10902 return get_errno(setregid(arg1, arg2));
10903 #endif
10904 #ifdef TARGET_NR_getgroups32
10905 case TARGET_NR_getgroups32:
10907 int gidsetsize = arg1;
10908 uint32_t *target_grouplist;
10909 gid_t *grouplist;
10910 int i;
10912 grouplist = alloca(gidsetsize * sizeof(gid_t));
10913 ret = get_errno(getgroups(gidsetsize, grouplist));
10914 if (gidsetsize == 0)
10915 return ret;
10916 if (!is_error(ret)) {
10917 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10918 if (!target_grouplist) {
10919 return -TARGET_EFAULT;
10921 for(i = 0;i < ret; i++)
10922 target_grouplist[i] = tswap32(grouplist[i]);
10923 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10926 return ret;
10927 #endif
10928 #ifdef TARGET_NR_setgroups32
10929 case TARGET_NR_setgroups32:
10931 int gidsetsize = arg1;
10932 uint32_t *target_grouplist;
10933 gid_t *grouplist;
10934 int i;
10936 grouplist = alloca(gidsetsize * sizeof(gid_t));
10937 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10938 if (!target_grouplist) {
10939 return -TARGET_EFAULT;
10941 for(i = 0;i < gidsetsize; i++)
10942 grouplist[i] = tswap32(target_grouplist[i]);
10943 unlock_user(target_grouplist, arg2, 0);
10944 return get_errno(setgroups(gidsetsize, grouplist));
10946 #endif
10947 #ifdef TARGET_NR_fchown32
10948 case TARGET_NR_fchown32:
10949 return get_errno(fchown(arg1, arg2, arg3));
10950 #endif
10951 #ifdef TARGET_NR_setresuid32
10952 case TARGET_NR_setresuid32:
10953 return get_errno(sys_setresuid(arg1, arg2, arg3));
10954 #endif
10955 #ifdef TARGET_NR_getresuid32
10956 case TARGET_NR_getresuid32:
10958 uid_t ruid, euid, suid;
10959 ret = get_errno(getresuid(&ruid, &euid, &suid));
10960 if (!is_error(ret)) {
10961 if (put_user_u32(ruid, arg1)
10962 || put_user_u32(euid, arg2)
10963 || put_user_u32(suid, arg3))
10964 return -TARGET_EFAULT;
10967 return ret;
10968 #endif
10969 #ifdef TARGET_NR_setresgid32
10970 case TARGET_NR_setresgid32:
10971 return get_errno(sys_setresgid(arg1, arg2, arg3));
10972 #endif
10973 #ifdef TARGET_NR_getresgid32
10974 case TARGET_NR_getresgid32:
10976 gid_t rgid, egid, sgid;
10977 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10978 if (!is_error(ret)) {
10979 if (put_user_u32(rgid, arg1)
10980 || put_user_u32(egid, arg2)
10981 || put_user_u32(sgid, arg3))
10982 return -TARGET_EFAULT;
10985 return ret;
10986 #endif
10987 #ifdef TARGET_NR_chown32
10988 case TARGET_NR_chown32:
10989 if (!(p = lock_user_string(arg1)))
10990 return -TARGET_EFAULT;
10991 ret = get_errno(chown(p, arg2, arg3));
10992 unlock_user(p, arg1, 0);
10993 return ret;
10994 #endif
10995 #ifdef TARGET_NR_setuid32
10996 case TARGET_NR_setuid32:
10997 return get_errno(sys_setuid(arg1));
10998 #endif
10999 #ifdef TARGET_NR_setgid32
11000 case TARGET_NR_setgid32:
11001 return get_errno(sys_setgid(arg1));
11002 #endif
11003 #ifdef TARGET_NR_setfsuid32
11004 case TARGET_NR_setfsuid32:
11005 return get_errno(setfsuid(arg1));
11006 #endif
11007 #ifdef TARGET_NR_setfsgid32
11008 case TARGET_NR_setfsgid32:
11009 return get_errno(setfsgid(arg1));
11010 #endif
11011 #ifdef TARGET_NR_mincore
11012 case TARGET_NR_mincore:
11014 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11015 if (!a) {
11016 return -TARGET_ENOMEM;
11018 p = lock_user_string(arg3);
11019 if (!p) {
11020 ret = -TARGET_EFAULT;
11021 } else {
11022 ret = get_errno(mincore(a, arg2, p));
11023 unlock_user(p, arg3, ret);
11025 unlock_user(a, arg1, 0);
11027 return ret;
11028 #endif
11029 #ifdef TARGET_NR_arm_fadvise64_64
11030 case TARGET_NR_arm_fadvise64_64:
11031 /* arm_fadvise64_64 looks like fadvise64_64 but
11032 * with different argument order: fd, advice, offset, len
11033 * rather than the usual fd, offset, len, advice.
11034 * Note that offset and len are both 64-bit so appear as
11035 * pairs of 32-bit registers.
11037 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11038 target_offset64(arg5, arg6), arg2);
11039 return -host_to_target_errno(ret);
11040 #endif
11042 #if TARGET_ABI_BITS == 32
11044 #ifdef TARGET_NR_fadvise64_64
11045 case TARGET_NR_fadvise64_64:
11046 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11047 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11048 ret = arg2;
11049 arg2 = arg3;
11050 arg3 = arg4;
11051 arg4 = arg5;
11052 arg5 = arg6;
11053 arg6 = ret;
11054 #else
11055 /* 6 args: fd, offset (high, low), len (high, low), advice */
11056 if (regpairs_aligned(cpu_env, num)) {
11057 /* offset is in (3,4), len in (5,6) and advice in 7 */
11058 arg2 = arg3;
11059 arg3 = arg4;
11060 arg4 = arg5;
11061 arg5 = arg6;
11062 arg6 = arg7;
11064 #endif
11065 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11066 target_offset64(arg4, arg5), arg6);
11067 return -host_to_target_errno(ret);
11068 #endif
11070 #ifdef TARGET_NR_fadvise64
11071 case TARGET_NR_fadvise64:
11072 /* 5 args: fd, offset (high, low), len, advice */
11073 if (regpairs_aligned(cpu_env, num)) {
11074 /* offset is in (3,4), len in 5 and advice in 6 */
11075 arg2 = arg3;
11076 arg3 = arg4;
11077 arg4 = arg5;
11078 arg5 = arg6;
11080 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11081 return -host_to_target_errno(ret);
11082 #endif
11084 #else /* not a 32-bit ABI */
11085 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11086 #ifdef TARGET_NR_fadvise64_64
11087 case TARGET_NR_fadvise64_64:
11088 #endif
11089 #ifdef TARGET_NR_fadvise64
11090 case TARGET_NR_fadvise64:
11091 #endif
11092 #ifdef TARGET_S390X
11093 switch (arg4) {
11094 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11095 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11096 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11097 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11098 default: break;
11100 #endif
11101 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11102 #endif
11103 #endif /* end of 64-bit ABI fadvise handling */
11105 #ifdef TARGET_NR_madvise
11106 case TARGET_NR_madvise:
11107 /* A straight passthrough may not be safe because qemu sometimes
11108 turns private file-backed mappings into anonymous mappings.
11109 This will break MADV_DONTNEED.
11110 This is a hint, so ignoring and returning success is ok. */
11111 return 0;
11112 #endif
11113 #if TARGET_ABI_BITS == 32
11114 case TARGET_NR_fcntl64:
11116 int cmd;
11117 struct flock64 fl;
11118 from_flock64_fn *copyfrom = copy_from_user_flock64;
11119 to_flock64_fn *copyto = copy_to_user_flock64;
11121 #ifdef TARGET_ARM
11122 if (!((CPUARMState *)cpu_env)->eabi) {
11123 copyfrom = copy_from_user_oabi_flock64;
11124 copyto = copy_to_user_oabi_flock64;
11126 #endif
11128 cmd = target_to_host_fcntl_cmd(arg2);
11129 if (cmd == -TARGET_EINVAL) {
11130 return cmd;
11133 switch(arg2) {
11134 case TARGET_F_GETLK64:
11135 ret = copyfrom(&fl, arg3);
11136 if (ret) {
11137 break;
11139 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11140 if (ret == 0) {
11141 ret = copyto(arg3, &fl);
11143 break;
11145 case TARGET_F_SETLK64:
11146 case TARGET_F_SETLKW64:
11147 ret = copyfrom(&fl, arg3);
11148 if (ret) {
11149 break;
11151 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11152 break;
11153 default:
11154 ret = do_fcntl(arg1, arg2, arg3);
11155 break;
11157 return ret;
11159 #endif
11160 #ifdef TARGET_NR_cacheflush
11161 case TARGET_NR_cacheflush:
11162 /* self-modifying code is handled automatically, so nothing needed */
11163 return 0;
11164 #endif
11165 #ifdef TARGET_NR_getpagesize
11166 case TARGET_NR_getpagesize:
11167 return TARGET_PAGE_SIZE;
11168 #endif
11169 case TARGET_NR_gettid:
11170 return get_errno(sys_gettid());
11171 #ifdef TARGET_NR_readahead
11172 case TARGET_NR_readahead:
11173 #if TARGET_ABI_BITS == 32
11174 if (regpairs_aligned(cpu_env, num)) {
11175 arg2 = arg3;
11176 arg3 = arg4;
11177 arg4 = arg5;
11179 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11180 #else
11181 ret = get_errno(readahead(arg1, arg2, arg3));
11182 #endif
11183 return ret;
11184 #endif
11185 #ifdef CONFIG_ATTR
11186 #ifdef TARGET_NR_setxattr
11187 case TARGET_NR_listxattr:
11188 case TARGET_NR_llistxattr:
11190 void *p, *b = 0;
11191 if (arg2) {
11192 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11193 if (!b) {
11194 return -TARGET_EFAULT;
11197 p = lock_user_string(arg1);
11198 if (p) {
11199 if (num == TARGET_NR_listxattr) {
11200 ret = get_errno(listxattr(p, b, arg3));
11201 } else {
11202 ret = get_errno(llistxattr(p, b, arg3));
11204 } else {
11205 ret = -TARGET_EFAULT;
11207 unlock_user(p, arg1, 0);
11208 unlock_user(b, arg2, arg3);
11209 return ret;
11211 case TARGET_NR_flistxattr:
11213 void *b = 0;
11214 if (arg2) {
11215 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11216 if (!b) {
11217 return -TARGET_EFAULT;
11220 ret = get_errno(flistxattr(arg1, b, arg3));
11221 unlock_user(b, arg2, arg3);
11222 return ret;
11224 case TARGET_NR_setxattr:
11225 case TARGET_NR_lsetxattr:
11227 void *p, *n, *v = 0;
11228 if (arg3) {
11229 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11230 if (!v) {
11231 return -TARGET_EFAULT;
11234 p = lock_user_string(arg1);
11235 n = lock_user_string(arg2);
11236 if (p && n) {
11237 if (num == TARGET_NR_setxattr) {
11238 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11239 } else {
11240 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11242 } else {
11243 ret = -TARGET_EFAULT;
11245 unlock_user(p, arg1, 0);
11246 unlock_user(n, arg2, 0);
11247 unlock_user(v, arg3, 0);
11249 return ret;
11250 case TARGET_NR_fsetxattr:
11252 void *n, *v = 0;
11253 if (arg3) {
11254 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11255 if (!v) {
11256 return -TARGET_EFAULT;
11259 n = lock_user_string(arg2);
11260 if (n) {
11261 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11262 } else {
11263 ret = -TARGET_EFAULT;
11265 unlock_user(n, arg2, 0);
11266 unlock_user(v, arg3, 0);
11268 return ret;
11269 case TARGET_NR_getxattr:
11270 case TARGET_NR_lgetxattr:
11272 void *p, *n, *v = 0;
11273 if (arg3) {
11274 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11275 if (!v) {
11276 return -TARGET_EFAULT;
11279 p = lock_user_string(arg1);
11280 n = lock_user_string(arg2);
11281 if (p && n) {
11282 if (num == TARGET_NR_getxattr) {
11283 ret = get_errno(getxattr(p, n, v, arg4));
11284 } else {
11285 ret = get_errno(lgetxattr(p, n, v, arg4));
11287 } else {
11288 ret = -TARGET_EFAULT;
11290 unlock_user(p, arg1, 0);
11291 unlock_user(n, arg2, 0);
11292 unlock_user(v, arg3, arg4);
11294 return ret;
11295 case TARGET_NR_fgetxattr:
11297 void *n, *v = 0;
11298 if (arg3) {
11299 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11300 if (!v) {
11301 return -TARGET_EFAULT;
11304 n = lock_user_string(arg2);
11305 if (n) {
11306 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11307 } else {
11308 ret = -TARGET_EFAULT;
11310 unlock_user(n, arg2, 0);
11311 unlock_user(v, arg3, arg4);
11313 return ret;
11314 case TARGET_NR_removexattr:
11315 case TARGET_NR_lremovexattr:
11317 void *p, *n;
11318 p = lock_user_string(arg1);
11319 n = lock_user_string(arg2);
11320 if (p && n) {
11321 if (num == TARGET_NR_removexattr) {
11322 ret = get_errno(removexattr(p, n));
11323 } else {
11324 ret = get_errno(lremovexattr(p, n));
11326 } else {
11327 ret = -TARGET_EFAULT;
11329 unlock_user(p, arg1, 0);
11330 unlock_user(n, arg2, 0);
11332 return ret;
11333 case TARGET_NR_fremovexattr:
11335 void *n;
11336 n = lock_user_string(arg2);
11337 if (n) {
11338 ret = get_errno(fremovexattr(arg1, n));
11339 } else {
11340 ret = -TARGET_EFAULT;
11342 unlock_user(n, arg2, 0);
11344 return ret;
11345 #endif
11346 #endif /* CONFIG_ATTR */
11347 #ifdef TARGET_NR_set_thread_area
11348 case TARGET_NR_set_thread_area:
11349 #if defined(TARGET_MIPS)
11350 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11351 return 0;
11352 #elif defined(TARGET_CRIS)
11353 if (arg1 & 0xff)
11354 ret = -TARGET_EINVAL;
11355 else {
11356 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11357 ret = 0;
11359 return ret;
11360 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11361 return do_set_thread_area(cpu_env, arg1);
11362 #elif defined(TARGET_M68K)
11364 TaskState *ts = cpu->opaque;
11365 ts->tp_value = arg1;
11366 return 0;
11368 #else
11369 return -TARGET_ENOSYS;
11370 #endif
11371 #endif
11372 #ifdef TARGET_NR_get_thread_area
11373 case TARGET_NR_get_thread_area:
11374 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11375 return do_get_thread_area(cpu_env, arg1);
11376 #elif defined(TARGET_M68K)
11378 TaskState *ts = cpu->opaque;
11379 return ts->tp_value;
11381 #else
11382 return -TARGET_ENOSYS;
11383 #endif
11384 #endif
11385 #ifdef TARGET_NR_getdomainname
11386 case TARGET_NR_getdomainname:
11387 return -TARGET_ENOSYS;
11388 #endif
11390 #ifdef TARGET_NR_clock_settime
11391 case TARGET_NR_clock_settime:
11393 struct timespec ts;
11395 ret = target_to_host_timespec(&ts, arg2);
11396 if (!is_error(ret)) {
11397 ret = get_errno(clock_settime(arg1, &ts));
11399 return ret;
11401 #endif
11402 #ifdef TARGET_NR_clock_gettime
11403 case TARGET_NR_clock_gettime:
11405 struct timespec ts;
11406 ret = get_errno(clock_gettime(arg1, &ts));
11407 if (!is_error(ret)) {
11408 ret = host_to_target_timespec(arg2, &ts);
11410 return ret;
11412 #endif
11413 #ifdef TARGET_NR_clock_getres
11414 case TARGET_NR_clock_getres:
11416 struct timespec ts;
11417 ret = get_errno(clock_getres(arg1, &ts));
11418 if (!is_error(ret)) {
11419 host_to_target_timespec(arg2, &ts);
11421 return ret;
11423 #endif
11424 #ifdef TARGET_NR_clock_nanosleep
11425 case TARGET_NR_clock_nanosleep:
11427 struct timespec ts;
11428 target_to_host_timespec(&ts, arg3);
11429 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11430 &ts, arg4 ? &ts : NULL));
11431 if (arg4)
11432 host_to_target_timespec(arg4, &ts);
11434 #if defined(TARGET_PPC)
11435 /* clock_nanosleep is odd in that it returns positive errno values.
11436 * On PPC, CR0 bit 3 should be set in such a situation. */
11437 if (ret && ret != -TARGET_ERESTARTSYS) {
11438 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11440 #endif
11441 return ret;
11443 #endif
11445 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11446 case TARGET_NR_set_tid_address:
11447 return get_errno(set_tid_address((int *)g2h(arg1)));
11448 #endif
11450 case TARGET_NR_tkill:
11451 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11453 case TARGET_NR_tgkill:
11454 return get_errno(safe_tgkill((int)arg1, (int)arg2,
11455 target_to_host_signal(arg3)));
11457 #ifdef TARGET_NR_set_robust_list
11458 case TARGET_NR_set_robust_list:
11459 case TARGET_NR_get_robust_list:
11460 /* The ABI for supporting robust futexes has userspace pass
11461 * the kernel a pointer to a linked list which is updated by
11462 * userspace after the syscall; the list is walked by the kernel
11463 * when the thread exits. Since the linked list in QEMU guest
11464 * memory isn't a valid linked list for the host and we have
11465 * no way to reliably intercept the thread-death event, we can't
11466 * support these. Silently return ENOSYS so that guest userspace
11467 * falls back to a non-robust futex implementation (which should
11468 * be OK except in the corner case of the guest crashing while
11469 * holding a mutex that is shared with another process via
11470 * shared memory).
11472 return -TARGET_ENOSYS;
11473 #endif
11475 #if defined(TARGET_NR_utimensat)
11476 case TARGET_NR_utimensat:
11478 struct timespec *tsp, ts[2];
11479 if (!arg3) {
11480 tsp = NULL;
11481 } else {
11482 target_to_host_timespec(ts, arg3);
11483 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11484 tsp = ts;
11486 if (!arg2)
11487 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11488 else {
11489 if (!(p = lock_user_string(arg2))) {
11490 return -TARGET_EFAULT;
11492 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11493 unlock_user(p, arg2, 0);
11496 return ret;
11497 #endif
11498 case TARGET_NR_futex:
11499 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11500 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11501 case TARGET_NR_inotify_init:
11502 ret = get_errno(sys_inotify_init());
11503 if (ret >= 0) {
11504 fd_trans_register(ret, &target_inotify_trans);
11506 return ret;
11507 #endif
11508 #ifdef CONFIG_INOTIFY1
11509 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11510 case TARGET_NR_inotify_init1:
11511 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
11512 fcntl_flags_tbl)));
11513 if (ret >= 0) {
11514 fd_trans_register(ret, &target_inotify_trans);
11516 return ret;
11517 #endif
11518 #endif
11519 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11520 case TARGET_NR_inotify_add_watch:
11521 p = lock_user_string(arg2);
11522 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11523 unlock_user(p, arg2, 0);
11524 return ret;
11525 #endif
11526 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11527 case TARGET_NR_inotify_rm_watch:
11528 return get_errno(sys_inotify_rm_watch(arg1, arg2));
11529 #endif
11531 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11532 case TARGET_NR_mq_open:
11534 struct mq_attr posix_mq_attr;
11535 struct mq_attr *pposix_mq_attr;
11536 int host_flags;
11538 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11539 pposix_mq_attr = NULL;
11540 if (arg4) {
11541 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11542 return -TARGET_EFAULT;
11544 pposix_mq_attr = &posix_mq_attr;
11546 p = lock_user_string(arg1 - 1);
11547 if (!p) {
11548 return -TARGET_EFAULT;
11550 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
11551 unlock_user (p, arg1, 0);
11553 return ret;
11555 case TARGET_NR_mq_unlink:
11556 p = lock_user_string(arg1 - 1);
11557 if (!p) {
11558 return -TARGET_EFAULT;
11560 ret = get_errno(mq_unlink(p));
11561 unlock_user (p, arg1, 0);
11562 return ret;
11564 case TARGET_NR_mq_timedsend:
11566 struct timespec ts;
11568 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11569 if (arg5 != 0) {
11570 target_to_host_timespec(&ts, arg5);
11571 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11572 host_to_target_timespec(arg5, &ts);
11573 } else {
11574 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11576 unlock_user (p, arg2, arg3);
11578 return ret;
11580 case TARGET_NR_mq_timedreceive:
11582 struct timespec ts;
11583 unsigned int prio;
11585 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11586 if (arg5 != 0) {
11587 target_to_host_timespec(&ts, arg5);
11588 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11589 &prio, &ts));
11590 host_to_target_timespec(arg5, &ts);
11591 } else {
11592 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11593 &prio, NULL));
11595 unlock_user (p, arg2, arg3);
11596 if (arg4 != 0)
11597 put_user_u32(prio, arg4);
11599 return ret;
11601 /* Not implemented for now... */
11602 /* case TARGET_NR_mq_notify: */
11603 /* break; */
11605 case TARGET_NR_mq_getsetattr:
11607 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11608 ret = 0;
11609 if (arg2 != 0) {
11610 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11611 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
11612 &posix_mq_attr_out));
11613 } else if (arg3 != 0) {
11614 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
11616 if (ret == 0 && arg3 != 0) {
11617 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11620 return ret;
11621 #endif
11623 #ifdef CONFIG_SPLICE
11624 #ifdef TARGET_NR_tee
11625 case TARGET_NR_tee:
11627 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11629 return ret;
11630 #endif
11631 #ifdef TARGET_NR_splice
11632 case TARGET_NR_splice:
11634 loff_t loff_in, loff_out;
11635 loff_t *ploff_in = NULL, *ploff_out = NULL;
11636 if (arg2) {
11637 if (get_user_u64(loff_in, arg2)) {
11638 return -TARGET_EFAULT;
11640 ploff_in = &loff_in;
11642 if (arg4) {
11643 if (get_user_u64(loff_out, arg4)) {
11644 return -TARGET_EFAULT;
11646 ploff_out = &loff_out;
11648 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11649 if (arg2) {
11650 if (put_user_u64(loff_in, arg2)) {
11651 return -TARGET_EFAULT;
11654 if (arg4) {
11655 if (put_user_u64(loff_out, arg4)) {
11656 return -TARGET_EFAULT;
11660 return ret;
11661 #endif
11662 #ifdef TARGET_NR_vmsplice
11663 case TARGET_NR_vmsplice:
11665 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11666 if (vec != NULL) {
11667 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11668 unlock_iovec(vec, arg2, arg3, 0);
11669 } else {
11670 ret = -host_to_target_errno(errno);
11673 return ret;
11674 #endif
11675 #endif /* CONFIG_SPLICE */
11676 #ifdef CONFIG_EVENTFD
11677 #if defined(TARGET_NR_eventfd)
11678 case TARGET_NR_eventfd:
11679 ret = get_errno(eventfd(arg1, 0));
11680 if (ret >= 0) {
11681 fd_trans_register(ret, &target_eventfd_trans);
11683 return ret;
11684 #endif
11685 #if defined(TARGET_NR_eventfd2)
11686 case TARGET_NR_eventfd2:
11688 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11689 if (arg2 & TARGET_O_NONBLOCK) {
11690 host_flags |= O_NONBLOCK;
11692 if (arg2 & TARGET_O_CLOEXEC) {
11693 host_flags |= O_CLOEXEC;
11695 ret = get_errno(eventfd(arg1, host_flags));
11696 if (ret >= 0) {
11697 fd_trans_register(ret, &target_eventfd_trans);
11699 return ret;
11701 #endif
11702 #endif /* CONFIG_EVENTFD */
11703 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11704 case TARGET_NR_fallocate:
11705 #if TARGET_ABI_BITS == 32
11706 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11707 target_offset64(arg5, arg6)));
11708 #else
11709 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11710 #endif
11711 return ret;
11712 #endif
11713 #if defined(CONFIG_SYNC_FILE_RANGE)
11714 #if defined(TARGET_NR_sync_file_range)
11715 case TARGET_NR_sync_file_range:
11716 #if TARGET_ABI_BITS == 32
11717 #if defined(TARGET_MIPS)
11718 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11719 target_offset64(arg5, arg6), arg7));
11720 #else
11721 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11722 target_offset64(arg4, arg5), arg6));
11723 #endif /* !TARGET_MIPS */
11724 #else
11725 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11726 #endif
11727 return ret;
11728 #endif
11729 #if defined(TARGET_NR_sync_file_range2)
11730 case TARGET_NR_sync_file_range2:
11731 /* This is like sync_file_range but the arguments are reordered */
11732 #if TARGET_ABI_BITS == 32
11733 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11734 target_offset64(arg5, arg6), arg2));
11735 #else
11736 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11737 #endif
11738 return ret;
11739 #endif
11740 #endif
11741 #if defined(TARGET_NR_signalfd4)
11742 case TARGET_NR_signalfd4:
11743 return do_signalfd4(arg1, arg2, arg4);
11744 #endif
11745 #if defined(TARGET_NR_signalfd)
11746 case TARGET_NR_signalfd:
11747 return do_signalfd4(arg1, arg2, 0);
11748 #endif
11749 #if defined(CONFIG_EPOLL)
11750 #if defined(TARGET_NR_epoll_create)
11751 case TARGET_NR_epoll_create:
11752 return get_errno(epoll_create(arg1));
11753 #endif
11754 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11755 case TARGET_NR_epoll_create1:
11756 return get_errno(epoll_create1(arg1));
11757 #endif
11758 #if defined(TARGET_NR_epoll_ctl)
11759 case TARGET_NR_epoll_ctl:
11761 struct epoll_event ep;
11762 struct epoll_event *epp = 0;
11763 if (arg4) {
11764 struct target_epoll_event *target_ep;
11765 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11766 return -TARGET_EFAULT;
11768 ep.events = tswap32(target_ep->events);
11769 /* The epoll_data_t union is just opaque data to the kernel,
11770 * so we transfer all 64 bits across and need not worry what
11771 * actual data type it is.
11773 ep.data.u64 = tswap64(target_ep->data.u64);
11774 unlock_user_struct(target_ep, arg4, 0);
11775 epp = &ep;
11777 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11779 #endif
11781 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11782 #if defined(TARGET_NR_epoll_wait)
11783 case TARGET_NR_epoll_wait:
11784 #endif
11785 #if defined(TARGET_NR_epoll_pwait)
11786 case TARGET_NR_epoll_pwait:
11787 #endif
11789 struct target_epoll_event *target_ep;
11790 struct epoll_event *ep;
11791 int epfd = arg1;
11792 int maxevents = arg3;
11793 int timeout = arg4;
11795 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11796 return -TARGET_EINVAL;
11799 target_ep = lock_user(VERIFY_WRITE, arg2,
11800 maxevents * sizeof(struct target_epoll_event), 1);
11801 if (!target_ep) {
11802 return -TARGET_EFAULT;
11805 ep = g_try_new(struct epoll_event, maxevents);
11806 if (!ep) {
11807 unlock_user(target_ep, arg2, 0);
11808 return -TARGET_ENOMEM;
11811 switch (num) {
11812 #if defined(TARGET_NR_epoll_pwait)
11813 case TARGET_NR_epoll_pwait:
11815 target_sigset_t *target_set;
11816 sigset_t _set, *set = &_set;
11818 if (arg5) {
11819 if (arg6 != sizeof(target_sigset_t)) {
11820 ret = -TARGET_EINVAL;
11821 break;
11824 target_set = lock_user(VERIFY_READ, arg5,
11825 sizeof(target_sigset_t), 1);
11826 if (!target_set) {
11827 ret = -TARGET_EFAULT;
11828 break;
11830 target_to_host_sigset(set, target_set);
11831 unlock_user(target_set, arg5, 0);
11832 } else {
11833 set = NULL;
11836 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11837 set, SIGSET_T_SIZE));
11838 break;
11840 #endif
11841 #if defined(TARGET_NR_epoll_wait)
11842 case TARGET_NR_epoll_wait:
11843 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11844 NULL, 0));
11845 break;
11846 #endif
11847 default:
11848 ret = -TARGET_ENOSYS;
11850 if (!is_error(ret)) {
11851 int i;
11852 for (i = 0; i < ret; i++) {
11853 target_ep[i].events = tswap32(ep[i].events);
11854 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11856 unlock_user(target_ep, arg2,
11857 ret * sizeof(struct target_epoll_event));
11858 } else {
11859 unlock_user(target_ep, arg2, 0);
11861 g_free(ep);
11862 return ret;
11864 #endif
11865 #endif
11866 #ifdef TARGET_NR_prlimit64
11867 case TARGET_NR_prlimit64:
11869 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11870 struct target_rlimit64 *target_rnew, *target_rold;
11871 struct host_rlimit64 rnew, rold, *rnewp = 0;
11872 int resource = target_to_host_resource(arg2);
11873 if (arg3) {
11874 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11875 return -TARGET_EFAULT;
11877 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11878 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11879 unlock_user_struct(target_rnew, arg3, 0);
11880 rnewp = &rnew;
11883 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11884 if (!is_error(ret) && arg4) {
11885 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11886 return -TARGET_EFAULT;
11888 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11889 target_rold->rlim_max = tswap64(rold.rlim_max);
11890 unlock_user_struct(target_rold, arg4, 1);
11892 return ret;
11894 #endif
11895 #ifdef TARGET_NR_gethostname
11896 case TARGET_NR_gethostname:
11898 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11899 if (name) {
11900 ret = get_errno(gethostname(name, arg2));
11901 unlock_user(name, arg1, arg2);
11902 } else {
11903 ret = -TARGET_EFAULT;
11905 return ret;
11907 #endif
11908 #ifdef TARGET_NR_atomic_cmpxchg_32
11909 case TARGET_NR_atomic_cmpxchg_32:
11911 /* should use start_exclusive from main.c */
11912 abi_ulong mem_value;
11913 if (get_user_u32(mem_value, arg6)) {
11914 target_siginfo_t info;
11915 info.si_signo = SIGSEGV;
11916 info.si_errno = 0;
11917 info.si_code = TARGET_SEGV_MAPERR;
11918 info._sifields._sigfault._addr = arg6;
11919 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11920 QEMU_SI_FAULT, &info);
11921 ret = 0xdeadbeef;
11924 if (mem_value == arg2)
11925 put_user_u32(arg1, arg6);
11926 return mem_value;
11928 #endif
11929 #ifdef TARGET_NR_atomic_barrier
11930 case TARGET_NR_atomic_barrier:
11931 /* Like the kernel implementation and the
11932 qemu arm barrier, no-op this? */
11933 return 0;
11934 #endif
11936 #ifdef TARGET_NR_timer_create
11937 case TARGET_NR_timer_create:
11939 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11941 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11943 int clkid = arg1;
11944 int timer_index = next_free_host_timer();
11946 if (timer_index < 0) {
11947 ret = -TARGET_EAGAIN;
11948 } else {
11949 timer_t *phtimer = g_posix_timers + timer_index;
11951 if (arg2) {
11952 phost_sevp = &host_sevp;
11953 ret = target_to_host_sigevent(phost_sevp, arg2);
11954 if (ret != 0) {
11955 return ret;
11959 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11960 if (ret) {
11961 phtimer = NULL;
11962 } else {
11963 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11964 return -TARGET_EFAULT;
11968 return ret;
11970 #endif
11972 #ifdef TARGET_NR_timer_settime
11973 case TARGET_NR_timer_settime:
11975 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11976 * struct itimerspec * old_value */
11977 target_timer_t timerid = get_timer_id(arg1);
11979 if (timerid < 0) {
11980 ret = timerid;
11981 } else if (arg3 == 0) {
11982 ret = -TARGET_EINVAL;
11983 } else {
11984 timer_t htimer = g_posix_timers[timerid];
11985 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11987 if (target_to_host_itimerspec(&hspec_new, arg3)) {
11988 return -TARGET_EFAULT;
11990 ret = get_errno(
11991 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11992 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
11993 return -TARGET_EFAULT;
11996 return ret;
11998 #endif
12000 #ifdef TARGET_NR_timer_gettime
12001 case TARGET_NR_timer_gettime:
12003 /* args: timer_t timerid, struct itimerspec *curr_value */
12004 target_timer_t timerid = get_timer_id(arg1);
12006 if (timerid < 0) {
12007 ret = timerid;
12008 } else if (!arg2) {
12009 ret = -TARGET_EFAULT;
12010 } else {
12011 timer_t htimer = g_posix_timers[timerid];
12012 struct itimerspec hspec;
12013 ret = get_errno(timer_gettime(htimer, &hspec));
12015 if (host_to_target_itimerspec(arg2, &hspec)) {
12016 ret = -TARGET_EFAULT;
12019 return ret;
12021 #endif
12023 #ifdef TARGET_NR_timer_getoverrun
12024 case TARGET_NR_timer_getoverrun:
12026 /* args: timer_t timerid */
12027 target_timer_t timerid = get_timer_id(arg1);
12029 if (timerid < 0) {
12030 ret = timerid;
12031 } else {
12032 timer_t htimer = g_posix_timers[timerid];
12033 ret = get_errno(timer_getoverrun(htimer));
12035 return ret;
12037 #endif
12039 #ifdef TARGET_NR_timer_delete
12040 case TARGET_NR_timer_delete:
12042 /* args: timer_t timerid */
12043 target_timer_t timerid = get_timer_id(arg1);
12045 if (timerid < 0) {
12046 ret = timerid;
12047 } else {
12048 timer_t htimer = g_posix_timers[timerid];
12049 ret = get_errno(timer_delete(htimer));
12050 g_posix_timers[timerid] = 0;
12052 return ret;
12054 #endif
12056 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12057 case TARGET_NR_timerfd_create:
12058 return get_errno(timerfd_create(arg1,
12059 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12060 #endif
12062 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12063 case TARGET_NR_timerfd_gettime:
12065 struct itimerspec its_curr;
12067 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12069 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12070 return -TARGET_EFAULT;
12073 return ret;
12074 #endif
12076 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12077 case TARGET_NR_timerfd_settime:
12079 struct itimerspec its_new, its_old, *p_new;
12081 if (arg3) {
12082 if (target_to_host_itimerspec(&its_new, arg3)) {
12083 return -TARGET_EFAULT;
12085 p_new = &its_new;
12086 } else {
12087 p_new = NULL;
12090 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12092 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12093 return -TARGET_EFAULT;
12096 return ret;
12097 #endif
12099 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12100 case TARGET_NR_ioprio_get:
12101 return get_errno(ioprio_get(arg1, arg2));
12102 #endif
12104 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12105 case TARGET_NR_ioprio_set:
12106 return get_errno(ioprio_set(arg1, arg2, arg3));
12107 #endif
12109 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12110 case TARGET_NR_setns:
12111 return get_errno(setns(arg1, arg2));
12112 #endif
12113 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12114 case TARGET_NR_unshare:
12115 return get_errno(unshare(arg1));
12116 #endif
12117 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12118 case TARGET_NR_kcmp:
12119 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12120 #endif
12121 #ifdef TARGET_NR_swapcontext
12122 case TARGET_NR_swapcontext:
12123 /* PowerPC specific. */
12124 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12125 #endif
12126 #ifdef TARGET_NR_memfd_create
12127 case TARGET_NR_memfd_create:
12128 p = lock_user_string(arg1);
12129 if (!p) {
12130 return -TARGET_EFAULT;
12132 ret = get_errno(memfd_create(p, arg2));
12133 fd_trans_unregister(ret);
12134 unlock_user(p, arg1, 0);
12135 return ret;
12136 #endif
12137 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12138 case TARGET_NR_membarrier:
12139 return get_errno(membarrier(arg1, arg2));
12140 #endif
12142 default:
12143 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12144 return -TARGET_ENOSYS;
12146 return ret;
12149 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12150 abi_long arg2, abi_long arg3, abi_long arg4,
12151 abi_long arg5, abi_long arg6, abi_long arg7,
12152 abi_long arg8)
12154 CPUState *cpu = env_cpu(cpu_env);
12155 abi_long ret;
12157 #ifdef DEBUG_ERESTARTSYS
12158 /* Debug-only code for exercising the syscall-restart code paths
12159 * in the per-architecture cpu main loops: restart every syscall
12160 * the guest makes once before letting it through.
12163 static bool flag;
12164 flag = !flag;
12165 if (flag) {
12166 return -TARGET_ERESTARTSYS;
12169 #endif
12171 record_syscall_start(cpu, num, arg1,
12172 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12174 if (unlikely(do_strace)) {
12175 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
12176 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12177 arg5, arg6, arg7, arg8);
12178 print_syscall_ret(num, ret);
12179 } else {
12180 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12181 arg5, arg6, arg7, arg8);
12184 record_syscall_return(cpu, num, ret);
12185 return ret;