linux-user: Make cpu_env accessible in strace.c
[qemu/ar7.git] / linux-user / syscall.c
blob42107f37e3973687bf6da7076c971b7c416290ce
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef CONFIG_EVENTFD
65 #include <sys/eventfd.h>
66 #endif
67 #ifdef CONFIG_EPOLL
68 #include <sys/epoll.h>
69 #endif
70 #ifdef CONFIG_ATTR
71 #include "qemu/xattr.h"
72 #endif
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
75 #endif
76 #ifdef CONFIG_KCOV
77 #include <sys/kcov.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #include <linux/fd.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
98 #endif
99 #include <linux/fb.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
103 #endif
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
115 #ifdef HAVE_DRM_H
116 #include <libdrm/drm.h>
117 #include <libdrm/i915_drm.h>
118 #endif
119 #include "linux_loop.h"
120 #include "uname.h"
122 #include "qemu.h"
123 #include "qemu/guest-random.h"
124 #include "qemu/selfmap.h"
125 #include "user/syscall-trace.h"
126 #include "qapi/error.h"
127 #include "fd-trans.h"
128 #include "tcg/tcg.h"
130 #ifndef CLONE_IO
131 #define CLONE_IO 0x80000000 /* Clone io context */
132 #endif
134 /* We can't directly call the host clone syscall, because this will
135 * badly confuse libc (breaking mutexes, for example). So we must
136 * divide clone flags into:
137 * * flag combinations that look like pthread_create()
138 * * flag combinations that look like fork()
139 * * flags we can implement within QEMU itself
140 * * flags we can't support and will return an error for
142 /* For thread creation, all these flags must be present; for
143 * fork, none must be present.
145 #define CLONE_THREAD_FLAGS \
146 (CLONE_VM | CLONE_FS | CLONE_FILES | \
147 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
149 /* These flags are ignored:
150 * CLONE_DETACHED is now ignored by the kernel;
151 * CLONE_IO is just an optimisation hint to the I/O scheduler
153 #define CLONE_IGNORED_FLAGS \
154 (CLONE_DETACHED | CLONE_IO)
156 /* Flags for fork which we can implement within QEMU itself */
157 #define CLONE_OPTIONAL_FORK_FLAGS \
158 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
159 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
161 /* Flags for thread creation which we can implement within QEMU itself */
162 #define CLONE_OPTIONAL_THREAD_FLAGS \
163 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
164 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
166 #define CLONE_INVALID_FORK_FLAGS \
167 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
169 #define CLONE_INVALID_THREAD_FLAGS \
170 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
171 CLONE_IGNORED_FLAGS))
173 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
174 * have almost all been allocated. We cannot support any of
175 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
176 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
177 * The checks against the invalid thread masks above will catch these.
178 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
181 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
182 * once. This exercises the codepaths for restart.
184 //#define DEBUG_ERESTARTSYS
186 //#include <linux/msdos_fs.h>
187 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
188 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
190 #undef _syscall0
191 #undef _syscall1
192 #undef _syscall2
193 #undef _syscall3
194 #undef _syscall4
195 #undef _syscall5
196 #undef _syscall6
198 #define _syscall0(type,name) \
199 static type name (void) \
201 return syscall(__NR_##name); \
204 #define _syscall1(type,name,type1,arg1) \
205 static type name (type1 arg1) \
207 return syscall(__NR_##name, arg1); \
210 #define _syscall2(type,name,type1,arg1,type2,arg2) \
211 static type name (type1 arg1,type2 arg2) \
213 return syscall(__NR_##name, arg1, arg2); \
216 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
217 static type name (type1 arg1,type2 arg2,type3 arg3) \
219 return syscall(__NR_##name, arg1, arg2, arg3); \
222 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
225 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
228 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 type5,arg5) \
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
236 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
237 type5,arg5,type6,arg6) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
239 type6 arg6) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
245 #define __NR_sys_uname __NR_uname
246 #define __NR_sys_getcwd1 __NR_getcwd
247 #define __NR_sys_getdents __NR_getdents
248 #define __NR_sys_getdents64 __NR_getdents64
249 #define __NR_sys_getpriority __NR_getpriority
250 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
251 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
252 #define __NR_sys_syslog __NR_syslog
253 #if defined(__NR_futex)
254 # define __NR_sys_futex __NR_futex
255 #endif
256 #if defined(__NR_futex_time64)
257 # define __NR_sys_futex_time64 __NR_futex_time64
258 #endif
259 #define __NR_sys_inotify_init __NR_inotify_init
260 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
261 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
262 #define __NR_sys_statx __NR_statx
264 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
265 #define __NR__llseek __NR_lseek
266 #endif
268 /* Newer kernel ports have llseek() instead of _llseek() */
269 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
270 #define TARGET_NR__llseek TARGET_NR_llseek
271 #endif
273 #define __NR_sys_gettid __NR_gettid
274 _syscall0(int, sys_gettid)
276 /* For the 64-bit guest on 32-bit host case we must emulate
277 * getdents using getdents64, because otherwise the host
278 * might hand us back more dirent records than we can fit
279 * into the guest buffer after structure format conversion.
280 * Otherwise we emulate getdents with getdents if the host has it.
282 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
283 #define EMULATE_GETDENTS_WITH_GETDENTS
284 #endif
286 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
287 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
288 #endif
289 #if (defined(TARGET_NR_getdents) && \
290 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
291 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
292 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
293 #endif
294 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
295 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
296 loff_t *, res, uint, wh);
297 #endif
298 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
299 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
300 siginfo_t *, uinfo)
301 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
302 #ifdef __NR_exit_group
303 _syscall1(int,exit_group,int,error_code)
304 #endif
305 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
306 _syscall1(int,set_tid_address,int *,tidptr)
307 #endif
308 #if defined(__NR_futex)
309 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
310 const struct timespec *,timeout,int *,uaddr2,int,val3)
311 #endif
312 #if defined(__NR_futex_time64)
313 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
314 const struct timespec *,timeout,int *,uaddr2,int,val3)
315 #endif
316 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
317 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
318 unsigned long *, user_mask_ptr);
319 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
320 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
321 unsigned long *, user_mask_ptr);
322 #define __NR_sys_getcpu __NR_getcpu
323 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
324 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
325 void *, arg);
326 _syscall2(int, capget, struct __user_cap_header_struct *, header,
327 struct __user_cap_data_struct *, data);
328 _syscall2(int, capset, struct __user_cap_header_struct *, header,
329 struct __user_cap_data_struct *, data);
330 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
331 _syscall2(int, ioprio_get, int, which, int, who)
332 #endif
333 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
334 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
335 #endif
336 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
337 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
338 #endif
340 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
341 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
342 unsigned long, idx1, unsigned long, idx2)
343 #endif
346 * It is assumed that struct statx is architecture independent.
348 #if defined(TARGET_NR_statx) && defined(__NR_statx)
349 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
350 unsigned int, mask, struct target_statx *, statxbuf)
351 #endif
352 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
353 _syscall2(int, membarrier, int, cmd, int, flags)
354 #endif
356 static bitmask_transtbl fcntl_flags_tbl[] = {
357 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
358 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
359 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
360 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
361 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
362 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
363 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
364 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
365 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
366 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
367 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
368 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
369 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
370 #if defined(O_DIRECT)
371 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
372 #endif
373 #if defined(O_NOATIME)
374 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
375 #endif
376 #if defined(O_CLOEXEC)
377 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
378 #endif
379 #if defined(O_PATH)
380 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
381 #endif
382 #if defined(O_TMPFILE)
383 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
384 #endif
385 /* Don't terminate the list prematurely on 64-bit host+guest. */
386 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
387 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
388 #endif
389 { 0, 0, 0, 0 }
392 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
394 #ifdef TARGET_NR_utimensat
395 #if defined(__NR_utimensat)
396 #define __NR_sys_utimensat __NR_utimensat
397 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
398 const struct timespec *,tsp,int,flags)
399 #else
400 static int sys_utimensat(int dirfd, const char *pathname,
401 const struct timespec times[2], int flags)
403 errno = ENOSYS;
404 return -1;
406 #endif
407 #endif /* TARGET_NR_utimensat */
409 #ifdef TARGET_NR_renameat2
410 #if defined(__NR_renameat2)
411 #define __NR_sys_renameat2 __NR_renameat2
412 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
413 const char *, new, unsigned int, flags)
414 #else
415 static int sys_renameat2(int oldfd, const char *old,
416 int newfd, const char *new, int flags)
418 if (flags == 0) {
419 return renameat(oldfd, old, newfd, new);
421 errno = ENOSYS;
422 return -1;
424 #endif
425 #endif /* TARGET_NR_renameat2 */
427 #ifdef CONFIG_INOTIFY
428 #include <sys/inotify.h>
430 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
431 static int sys_inotify_init(void)
433 return (inotify_init());
435 #endif
436 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
437 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
439 return (inotify_add_watch(fd, pathname, mask));
441 #endif
442 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
443 static int sys_inotify_rm_watch(int fd, int32_t wd)
445 return (inotify_rm_watch(fd, wd));
447 #endif
448 #ifdef CONFIG_INOTIFY1
449 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
450 static int sys_inotify_init1(int flags)
452 return (inotify_init1(flags));
454 #endif
455 #endif
456 #else
457 /* Userspace can usually survive runtime without inotify */
458 #undef TARGET_NR_inotify_init
459 #undef TARGET_NR_inotify_init1
460 #undef TARGET_NR_inotify_add_watch
461 #undef TARGET_NR_inotify_rm_watch
462 #endif /* CONFIG_INOTIFY */
464 #if defined(TARGET_NR_prlimit64)
465 #ifndef __NR_prlimit64
466 # define __NR_prlimit64 -1
467 #endif
468 #define __NR_sys_prlimit64 __NR_prlimit64
469 /* The glibc rlimit structure may not be that used by the underlying syscall */
470 struct host_rlimit64 {
471 uint64_t rlim_cur;
472 uint64_t rlim_max;
474 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
475 const struct host_rlimit64 *, new_limit,
476 struct host_rlimit64 *, old_limit)
477 #endif
480 #if defined(TARGET_NR_timer_create)
481 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
482 static timer_t g_posix_timers[32] = { 0, } ;
484 static inline int next_free_host_timer(void)
486 int k ;
487 /* FIXME: Does finding the next free slot require a lock? */
488 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
489 if (g_posix_timers[k] == 0) {
490 g_posix_timers[k] = (timer_t) 1;
491 return k;
494 return -1;
496 #endif
498 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
499 #ifdef TARGET_ARM
500 static inline int regpairs_aligned(void *cpu_env, int num)
502 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
504 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
505 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
506 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
507 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
508 * of registers which translates to the same as ARM/MIPS, because we start with
509 * r3 as arg1 */
510 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
511 #elif defined(TARGET_SH4)
512 /* SH4 doesn't align register pairs, except for p{read,write}64 */
513 static inline int regpairs_aligned(void *cpu_env, int num)
515 switch (num) {
516 case TARGET_NR_pread64:
517 case TARGET_NR_pwrite64:
518 return 1;
520 default:
521 return 0;
524 #elif defined(TARGET_XTENSA)
525 static inline int regpairs_aligned(void *cpu_env, int num) { return 1; }
526 #else
527 static inline int regpairs_aligned(void *cpu_env, int num) { return 0; }
528 #endif
530 #define ERRNO_TABLE_SIZE 1200
532 /* target_to_host_errno_table[] is initialized from
533 * host_to_target_errno_table[] in syscall_init(). */
534 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
538 * This list is the union of errno values overridden in asm-<arch>/errno.h
539 * minus the errnos that are not actually generic to all archs.
541 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
542 [EAGAIN] = TARGET_EAGAIN,
543 [EIDRM] = TARGET_EIDRM,
544 [ECHRNG] = TARGET_ECHRNG,
545 [EL2NSYNC] = TARGET_EL2NSYNC,
546 [EL3HLT] = TARGET_EL3HLT,
547 [EL3RST] = TARGET_EL3RST,
548 [ELNRNG] = TARGET_ELNRNG,
549 [EUNATCH] = TARGET_EUNATCH,
550 [ENOCSI] = TARGET_ENOCSI,
551 [EL2HLT] = TARGET_EL2HLT,
552 [EDEADLK] = TARGET_EDEADLK,
553 [ENOLCK] = TARGET_ENOLCK,
554 [EBADE] = TARGET_EBADE,
555 [EBADR] = TARGET_EBADR,
556 [EXFULL] = TARGET_EXFULL,
557 [ENOANO] = TARGET_ENOANO,
558 [EBADRQC] = TARGET_EBADRQC,
559 [EBADSLT] = TARGET_EBADSLT,
560 [EBFONT] = TARGET_EBFONT,
561 [ENOSTR] = TARGET_ENOSTR,
562 [ENODATA] = TARGET_ENODATA,
563 [ETIME] = TARGET_ETIME,
564 [ENOSR] = TARGET_ENOSR,
565 [ENONET] = TARGET_ENONET,
566 [ENOPKG] = TARGET_ENOPKG,
567 [EREMOTE] = TARGET_EREMOTE,
568 [ENOLINK] = TARGET_ENOLINK,
569 [EADV] = TARGET_EADV,
570 [ESRMNT] = TARGET_ESRMNT,
571 [ECOMM] = TARGET_ECOMM,
572 [EPROTO] = TARGET_EPROTO,
573 [EDOTDOT] = TARGET_EDOTDOT,
574 [EMULTIHOP] = TARGET_EMULTIHOP,
575 [EBADMSG] = TARGET_EBADMSG,
576 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
577 [EOVERFLOW] = TARGET_EOVERFLOW,
578 [ENOTUNIQ] = TARGET_ENOTUNIQ,
579 [EBADFD] = TARGET_EBADFD,
580 [EREMCHG] = TARGET_EREMCHG,
581 [ELIBACC] = TARGET_ELIBACC,
582 [ELIBBAD] = TARGET_ELIBBAD,
583 [ELIBSCN] = TARGET_ELIBSCN,
584 [ELIBMAX] = TARGET_ELIBMAX,
585 [ELIBEXEC] = TARGET_ELIBEXEC,
586 [EILSEQ] = TARGET_EILSEQ,
587 [ENOSYS] = TARGET_ENOSYS,
588 [ELOOP] = TARGET_ELOOP,
589 [ERESTART] = TARGET_ERESTART,
590 [ESTRPIPE] = TARGET_ESTRPIPE,
591 [ENOTEMPTY] = TARGET_ENOTEMPTY,
592 [EUSERS] = TARGET_EUSERS,
593 [ENOTSOCK] = TARGET_ENOTSOCK,
594 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
595 [EMSGSIZE] = TARGET_EMSGSIZE,
596 [EPROTOTYPE] = TARGET_EPROTOTYPE,
597 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
598 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
599 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
600 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
601 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
602 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
603 [EADDRINUSE] = TARGET_EADDRINUSE,
604 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
605 [ENETDOWN] = TARGET_ENETDOWN,
606 [ENETUNREACH] = TARGET_ENETUNREACH,
607 [ENETRESET] = TARGET_ENETRESET,
608 [ECONNABORTED] = TARGET_ECONNABORTED,
609 [ECONNRESET] = TARGET_ECONNRESET,
610 [ENOBUFS] = TARGET_ENOBUFS,
611 [EISCONN] = TARGET_EISCONN,
612 [ENOTCONN] = TARGET_ENOTCONN,
613 [EUCLEAN] = TARGET_EUCLEAN,
614 [ENOTNAM] = TARGET_ENOTNAM,
615 [ENAVAIL] = TARGET_ENAVAIL,
616 [EISNAM] = TARGET_EISNAM,
617 [EREMOTEIO] = TARGET_EREMOTEIO,
618 [EDQUOT] = TARGET_EDQUOT,
619 [ESHUTDOWN] = TARGET_ESHUTDOWN,
620 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
621 [ETIMEDOUT] = TARGET_ETIMEDOUT,
622 [ECONNREFUSED] = TARGET_ECONNREFUSED,
623 [EHOSTDOWN] = TARGET_EHOSTDOWN,
624 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
625 [EALREADY] = TARGET_EALREADY,
626 [EINPROGRESS] = TARGET_EINPROGRESS,
627 [ESTALE] = TARGET_ESTALE,
628 [ECANCELED] = TARGET_ECANCELED,
629 [ENOMEDIUM] = TARGET_ENOMEDIUM,
630 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
631 #ifdef ENOKEY
632 [ENOKEY] = TARGET_ENOKEY,
633 #endif
634 #ifdef EKEYEXPIRED
635 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
636 #endif
637 #ifdef EKEYREVOKED
638 [EKEYREVOKED] = TARGET_EKEYREVOKED,
639 #endif
640 #ifdef EKEYREJECTED
641 [EKEYREJECTED] = TARGET_EKEYREJECTED,
642 #endif
643 #ifdef EOWNERDEAD
644 [EOWNERDEAD] = TARGET_EOWNERDEAD,
645 #endif
646 #ifdef ENOTRECOVERABLE
647 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
648 #endif
649 #ifdef ENOMSG
650 [ENOMSG] = TARGET_ENOMSG,
651 #endif
652 #ifdef ERKFILL
653 [ERFKILL] = TARGET_ERFKILL,
654 #endif
655 #ifdef EHWPOISON
656 [EHWPOISON] = TARGET_EHWPOISON,
657 #endif
660 static inline int host_to_target_errno(int err)
662 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
663 host_to_target_errno_table[err]) {
664 return host_to_target_errno_table[err];
666 return err;
669 static inline int target_to_host_errno(int err)
671 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
672 target_to_host_errno_table[err]) {
673 return target_to_host_errno_table[err];
675 return err;
678 static inline abi_long get_errno(abi_long ret)
680 if (ret == -1)
681 return -host_to_target_errno(errno);
682 else
683 return ret;
686 const char *target_strerror(int err)
688 if (err == TARGET_ERESTARTSYS) {
689 return "To be restarted";
691 if (err == TARGET_QEMU_ESIGRETURN) {
692 return "Successful exit from sigreturn";
695 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
696 return NULL;
698 return strerror(target_to_host_errno(err));
701 #define safe_syscall0(type, name) \
702 static type safe_##name(void) \
704 return safe_syscall(__NR_##name); \
707 #define safe_syscall1(type, name, type1, arg1) \
708 static type safe_##name(type1 arg1) \
710 return safe_syscall(__NR_##name, arg1); \
713 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
714 static type safe_##name(type1 arg1, type2 arg2) \
716 return safe_syscall(__NR_##name, arg1, arg2); \
719 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
720 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
722 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
725 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
726 type4, arg4) \
727 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
729 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
732 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
733 type4, arg4, type5, arg5) \
734 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
735 type5 arg5) \
737 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
740 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
741 type4, arg4, type5, arg5, type6, arg6) \
742 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
743 type5 arg5, type6 arg6) \
745 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
748 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
749 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
750 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
751 int, flags, mode_t, mode)
752 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
753 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
754 struct rusage *, rusage)
755 #endif
756 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
757 int, options, struct rusage *, rusage)
758 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
759 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
760 defined(TARGET_NR_pselect6)
761 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
762 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
763 #endif
764 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
765 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
766 struct timespec *, tsp, const sigset_t *, sigmask,
767 size_t, sigsetsize)
768 #endif
769 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
770 int, maxevents, int, timeout, const sigset_t *, sigmask,
771 size_t, sigsetsize)
772 #if defined(__NR_futex)
773 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
774 const struct timespec *,timeout,int *,uaddr2,int,val3)
775 #endif
776 #if defined(__NR_futex_time64)
777 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
778 const struct timespec *,timeout,int *,uaddr2,int,val3)
779 #endif
780 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
781 safe_syscall2(int, kill, pid_t, pid, int, sig)
782 safe_syscall2(int, tkill, int, tid, int, sig)
783 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
784 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
785 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
786 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
787 unsigned long, pos_l, unsigned long, pos_h)
788 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
789 unsigned long, pos_l, unsigned long, pos_h)
790 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
791 socklen_t, addrlen)
792 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
793 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
794 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
795 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
796 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
797 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
798 safe_syscall2(int, flock, int, fd, int, operation)
799 #ifdef TARGET_NR_rt_sigtimedwait
800 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
801 const struct timespec *, uts, size_t, sigsetsize)
802 #endif
803 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
804 int, flags)
805 #if defined(TARGET_NR_nanosleep)
806 safe_syscall2(int, nanosleep, const struct timespec *, req,
807 struct timespec *, rem)
808 #endif
809 #ifdef TARGET_NR_clock_nanosleep
810 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
811 const struct timespec *, req, struct timespec *, rem)
812 #endif
813 #ifdef __NR_ipc
814 #ifdef __s390x__
815 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
816 void *, ptr)
817 #else
818 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
819 void *, ptr, long, fifth)
820 #endif
821 #endif
822 #ifdef __NR_msgsnd
823 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
824 int, flags)
825 #endif
826 #ifdef __NR_msgrcv
827 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
828 long, msgtype, int, flags)
829 #endif
830 #ifdef __NR_semtimedop
831 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
832 unsigned, nsops, const struct timespec *, timeout)
833 #endif
834 #ifdef TARGET_NR_mq_timedsend
835 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
836 size_t, len, unsigned, prio, const struct timespec *, timeout)
837 #endif
838 #ifdef TARGET_NR_mq_timedreceive
839 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
840 size_t, len, unsigned *, prio, const struct timespec *, timeout)
841 #endif
842 /* We do ioctl like this rather than via safe_syscall3 to preserve the
843 * "third argument might be integer or pointer or not present" behaviour of
844 * the libc function.
846 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
847 /* Similarly for fcntl. Note that callers must always:
848 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
849 * use the flock64 struct rather than unsuffixed flock
850 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
852 #ifdef __NR_fcntl64
853 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
854 #else
855 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
856 #endif
858 static inline int host_to_target_sock_type(int host_type)
860 int target_type;
862 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
863 case SOCK_DGRAM:
864 target_type = TARGET_SOCK_DGRAM;
865 break;
866 case SOCK_STREAM:
867 target_type = TARGET_SOCK_STREAM;
868 break;
869 default:
870 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
871 break;
874 #if defined(SOCK_CLOEXEC)
875 if (host_type & SOCK_CLOEXEC) {
876 target_type |= TARGET_SOCK_CLOEXEC;
878 #endif
880 #if defined(SOCK_NONBLOCK)
881 if (host_type & SOCK_NONBLOCK) {
882 target_type |= TARGET_SOCK_NONBLOCK;
884 #endif
886 return target_type;
889 static abi_ulong target_brk;
890 static abi_ulong target_original_brk;
891 static abi_ulong brk_page;
893 void target_set_brk(abi_ulong new_brk)
895 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
896 brk_page = HOST_PAGE_ALIGN(target_brk);
899 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
900 #define DEBUGF_BRK(message, args...)
902 /* do_brk() must return target values and target errnos. */
903 abi_long do_brk(abi_ulong new_brk)
905 abi_long mapped_addr;
906 abi_ulong new_alloc_size;
908 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
910 if (!new_brk) {
911 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
912 return target_brk;
914 if (new_brk < target_original_brk) {
915 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
916 target_brk);
917 return target_brk;
920 /* If the new brk is less than the highest page reserved to the
921 * target heap allocation, set it and we're almost done... */
922 if (new_brk <= brk_page) {
923 /* Heap contents are initialized to zero, as for anonymous
924 * mapped pages. */
925 if (new_brk > target_brk) {
926 memset(g2h(target_brk), 0, new_brk - target_brk);
928 target_brk = new_brk;
929 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
930 return target_brk;
933 /* We need to allocate more memory after the brk... Note that
934 * we don't use MAP_FIXED because that will map over the top of
935 * any existing mapping (like the one with the host libc or qemu
936 * itself); instead we treat "mapped but at wrong address" as
937 * a failure and unmap again.
939 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
940 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
941 PROT_READ|PROT_WRITE,
942 MAP_ANON|MAP_PRIVATE, 0, 0));
944 if (mapped_addr == brk_page) {
945 /* Heap contents are initialized to zero, as for anonymous
946 * mapped pages. Technically the new pages are already
947 * initialized to zero since they *are* anonymous mapped
948 * pages, however we have to take care with the contents that
949 * come from the remaining part of the previous page: it may
950 * contains garbage data due to a previous heap usage (grown
951 * then shrunken). */
952 memset(g2h(target_brk), 0, brk_page - target_brk);
954 target_brk = new_brk;
955 brk_page = HOST_PAGE_ALIGN(target_brk);
956 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
957 target_brk);
958 return target_brk;
959 } else if (mapped_addr != -1) {
960 /* Mapped but at wrong address, meaning there wasn't actually
961 * enough space for this brk.
963 target_munmap(mapped_addr, new_alloc_size);
964 mapped_addr = -1;
965 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
967 else {
968 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
971 #if defined(TARGET_ALPHA)
972 /* We (partially) emulate OSF/1 on Alpha, which requires we
973 return a proper errno, not an unchanged brk value. */
974 return -TARGET_ENOMEM;
975 #endif
976 /* For everything else, return the previous break. */
977 return target_brk;
980 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
981 defined(TARGET_NR_pselect6)
982 static inline abi_long copy_from_user_fdset(fd_set *fds,
983 abi_ulong target_fds_addr,
984 int n)
986 int i, nw, j, k;
987 abi_ulong b, *target_fds;
989 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
990 if (!(target_fds = lock_user(VERIFY_READ,
991 target_fds_addr,
992 sizeof(abi_ulong) * nw,
993 1)))
994 return -TARGET_EFAULT;
996 FD_ZERO(fds);
997 k = 0;
998 for (i = 0; i < nw; i++) {
999 /* grab the abi_ulong */
1000 __get_user(b, &target_fds[i]);
1001 for (j = 0; j < TARGET_ABI_BITS; j++) {
1002 /* check the bit inside the abi_ulong */
1003 if ((b >> j) & 1)
1004 FD_SET(k, fds);
1005 k++;
1009 unlock_user(target_fds, target_fds_addr, 0);
1011 return 0;
1014 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1015 abi_ulong target_fds_addr,
1016 int n)
1018 if (target_fds_addr) {
1019 if (copy_from_user_fdset(fds, target_fds_addr, n))
1020 return -TARGET_EFAULT;
1021 *fds_ptr = fds;
1022 } else {
1023 *fds_ptr = NULL;
1025 return 0;
1028 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1029 const fd_set *fds,
1030 int n)
1032 int i, nw, j, k;
1033 abi_long v;
1034 abi_ulong *target_fds;
1036 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1037 if (!(target_fds = lock_user(VERIFY_WRITE,
1038 target_fds_addr,
1039 sizeof(abi_ulong) * nw,
1040 0)))
1041 return -TARGET_EFAULT;
1043 k = 0;
1044 for (i = 0; i < nw; i++) {
1045 v = 0;
1046 for (j = 0; j < TARGET_ABI_BITS; j++) {
1047 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1048 k++;
1050 __put_user(v, &target_fds[i]);
1053 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1055 return 0;
1057 #endif
1059 #if defined(__alpha__)
1060 #define HOST_HZ 1024
1061 #else
1062 #define HOST_HZ 100
1063 #endif
1065 static inline abi_long host_to_target_clock_t(long ticks)
1067 #if HOST_HZ == TARGET_HZ
1068 return ticks;
1069 #else
1070 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1071 #endif
1074 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1075 const struct rusage *rusage)
1077 struct target_rusage *target_rusage;
1079 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1080 return -TARGET_EFAULT;
1081 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1082 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1083 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1084 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1085 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1086 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1087 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1088 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1089 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1090 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1091 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1092 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1093 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1094 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1095 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1096 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1097 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1098 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1099 unlock_user_struct(target_rusage, target_addr, 1);
1101 return 0;
1104 #ifdef TARGET_NR_setrlimit
1105 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1107 abi_ulong target_rlim_swap;
1108 rlim_t result;
1110 target_rlim_swap = tswapal(target_rlim);
1111 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1112 return RLIM_INFINITY;
1114 result = target_rlim_swap;
1115 if (target_rlim_swap != (rlim_t)result)
1116 return RLIM_INFINITY;
1118 return result;
1120 #endif
1122 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1123 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1125 abi_ulong target_rlim_swap;
1126 abi_ulong result;
1128 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1129 target_rlim_swap = TARGET_RLIM_INFINITY;
1130 else
1131 target_rlim_swap = rlim;
1132 result = tswapal(target_rlim_swap);
1134 return result;
1136 #endif
1138 static inline int target_to_host_resource(int code)
1140 switch (code) {
1141 case TARGET_RLIMIT_AS:
1142 return RLIMIT_AS;
1143 case TARGET_RLIMIT_CORE:
1144 return RLIMIT_CORE;
1145 case TARGET_RLIMIT_CPU:
1146 return RLIMIT_CPU;
1147 case TARGET_RLIMIT_DATA:
1148 return RLIMIT_DATA;
1149 case TARGET_RLIMIT_FSIZE:
1150 return RLIMIT_FSIZE;
1151 case TARGET_RLIMIT_LOCKS:
1152 return RLIMIT_LOCKS;
1153 case TARGET_RLIMIT_MEMLOCK:
1154 return RLIMIT_MEMLOCK;
1155 case TARGET_RLIMIT_MSGQUEUE:
1156 return RLIMIT_MSGQUEUE;
1157 case TARGET_RLIMIT_NICE:
1158 return RLIMIT_NICE;
1159 case TARGET_RLIMIT_NOFILE:
1160 return RLIMIT_NOFILE;
1161 case TARGET_RLIMIT_NPROC:
1162 return RLIMIT_NPROC;
1163 case TARGET_RLIMIT_RSS:
1164 return RLIMIT_RSS;
1165 case TARGET_RLIMIT_RTPRIO:
1166 return RLIMIT_RTPRIO;
1167 case TARGET_RLIMIT_SIGPENDING:
1168 return RLIMIT_SIGPENDING;
1169 case TARGET_RLIMIT_STACK:
1170 return RLIMIT_STACK;
1171 default:
1172 return code;
1176 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1177 abi_ulong target_tv_addr)
1179 struct target_timeval *target_tv;
1181 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1182 return -TARGET_EFAULT;
1185 __get_user(tv->tv_sec, &target_tv->tv_sec);
1186 __get_user(tv->tv_usec, &target_tv->tv_usec);
1188 unlock_user_struct(target_tv, target_tv_addr, 0);
1190 return 0;
1193 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1194 const struct timeval *tv)
1196 struct target_timeval *target_tv;
1198 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1199 return -TARGET_EFAULT;
1202 __put_user(tv->tv_sec, &target_tv->tv_sec);
1203 __put_user(tv->tv_usec, &target_tv->tv_usec);
1205 unlock_user_struct(target_tv, target_tv_addr, 1);
1207 return 0;
1210 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1211 const struct timeval *tv)
1213 struct target__kernel_sock_timeval *target_tv;
1215 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1216 return -TARGET_EFAULT;
1219 __put_user(tv->tv_sec, &target_tv->tv_sec);
1220 __put_user(tv->tv_usec, &target_tv->tv_usec);
1222 unlock_user_struct(target_tv, target_tv_addr, 1);
1224 return 0;
1227 #if defined(TARGET_NR_futex) || \
1228 defined(TARGET_NR_rt_sigtimedwait) || \
1229 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1230 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1231 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1232 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1233 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1234 defined(TARGET_NR_timer_settime) || \
1235 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1236 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1237 abi_ulong target_addr)
1239 struct target_timespec *target_ts;
1241 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1242 return -TARGET_EFAULT;
1244 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1245 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1246 unlock_user_struct(target_ts, target_addr, 0);
1247 return 0;
1249 #endif
1251 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1252 defined(TARGET_NR_timer_settime64) || \
1253 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
1254 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1255 abi_ulong target_addr)
1257 struct target__kernel_timespec *target_ts;
1259 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1260 return -TARGET_EFAULT;
1262 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1263 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1264 unlock_user_struct(target_ts, target_addr, 0);
1265 return 0;
1267 #endif
1269 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1270 struct timespec *host_ts)
1272 struct target_timespec *target_ts;
1274 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1275 return -TARGET_EFAULT;
1277 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1278 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1279 unlock_user_struct(target_ts, target_addr, 1);
1280 return 0;
1283 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1284 struct timespec *host_ts)
1286 struct target__kernel_timespec *target_ts;
1288 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1289 return -TARGET_EFAULT;
1291 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1292 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1293 unlock_user_struct(target_ts, target_addr, 1);
1294 return 0;
1297 #if defined(TARGET_NR_gettimeofday)
1298 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1299 struct timezone *tz)
1301 struct target_timezone *target_tz;
1303 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1304 return -TARGET_EFAULT;
1307 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1308 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1310 unlock_user_struct(target_tz, target_tz_addr, 1);
1312 return 0;
1314 #endif
1316 #if defined(TARGET_NR_settimeofday)
1317 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1318 abi_ulong target_tz_addr)
1320 struct target_timezone *target_tz;
1322 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1323 return -TARGET_EFAULT;
1326 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1327 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1329 unlock_user_struct(target_tz, target_tz_addr, 0);
1331 return 0;
1333 #endif
1335 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1336 #include <mqueue.h>
1338 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1339 abi_ulong target_mq_attr_addr)
1341 struct target_mq_attr *target_mq_attr;
1343 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1344 target_mq_attr_addr, 1))
1345 return -TARGET_EFAULT;
1347 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1348 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1349 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1350 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1352 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1354 return 0;
1357 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1358 const struct mq_attr *attr)
1360 struct target_mq_attr *target_mq_attr;
1362 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1363 target_mq_attr_addr, 0))
1364 return -TARGET_EFAULT;
1366 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1367 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1368 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1369 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1371 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1373 return 0;
1375 #endif
1377 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1378 /* do_select() must return target values and target errnos. */
1379 static abi_long do_select(int n,
1380 abi_ulong rfd_addr, abi_ulong wfd_addr,
1381 abi_ulong efd_addr, abi_ulong target_tv_addr)
1383 fd_set rfds, wfds, efds;
1384 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1385 struct timeval tv;
1386 struct timespec ts, *ts_ptr;
1387 abi_long ret;
1389 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1390 if (ret) {
1391 return ret;
1393 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1394 if (ret) {
1395 return ret;
1397 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1398 if (ret) {
1399 return ret;
1402 if (target_tv_addr) {
1403 if (copy_from_user_timeval(&tv, target_tv_addr))
1404 return -TARGET_EFAULT;
1405 ts.tv_sec = tv.tv_sec;
1406 ts.tv_nsec = tv.tv_usec * 1000;
1407 ts_ptr = &ts;
1408 } else {
1409 ts_ptr = NULL;
1412 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1413 ts_ptr, NULL));
1415 if (!is_error(ret)) {
1416 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1417 return -TARGET_EFAULT;
1418 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1419 return -TARGET_EFAULT;
1420 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1421 return -TARGET_EFAULT;
1423 if (target_tv_addr) {
1424 tv.tv_sec = ts.tv_sec;
1425 tv.tv_usec = ts.tv_nsec / 1000;
1426 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1427 return -TARGET_EFAULT;
1432 return ret;
1435 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1436 static abi_long do_old_select(abi_ulong arg1)
1438 struct target_sel_arg_struct *sel;
1439 abi_ulong inp, outp, exp, tvp;
1440 long nsel;
1442 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1443 return -TARGET_EFAULT;
1446 nsel = tswapal(sel->n);
1447 inp = tswapal(sel->inp);
1448 outp = tswapal(sel->outp);
1449 exp = tswapal(sel->exp);
1450 tvp = tswapal(sel->tvp);
1452 unlock_user_struct(sel, arg1, 0);
1454 return do_select(nsel, inp, outp, exp, tvp);
1456 #endif
1457 #endif
1459 static abi_long do_pipe2(int host_pipe[], int flags)
1461 #ifdef CONFIG_PIPE2
1462 return pipe2(host_pipe, flags);
1463 #else
1464 return -ENOSYS;
1465 #endif
1468 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1469 int flags, int is_pipe2)
1471 int host_pipe[2];
1472 abi_long ret;
1473 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1475 if (is_error(ret))
1476 return get_errno(ret);
1478 /* Several targets have special calling conventions for the original
1479 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1480 if (!is_pipe2) {
1481 #if defined(TARGET_ALPHA)
1482 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1483 return host_pipe[0];
1484 #elif defined(TARGET_MIPS)
1485 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1486 return host_pipe[0];
1487 #elif defined(TARGET_SH4)
1488 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1489 return host_pipe[0];
1490 #elif defined(TARGET_SPARC)
1491 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1492 return host_pipe[0];
1493 #endif
1496 if (put_user_s32(host_pipe[0], pipedes)
1497 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1498 return -TARGET_EFAULT;
1499 return get_errno(ret);
1502 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1503 abi_ulong target_addr,
1504 socklen_t len)
1506 struct target_ip_mreqn *target_smreqn;
1508 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1509 if (!target_smreqn)
1510 return -TARGET_EFAULT;
1511 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1512 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1513 if (len == sizeof(struct target_ip_mreqn))
1514 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1515 unlock_user(target_smreqn, target_addr, 0);
1517 return 0;
1520 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1521 abi_ulong target_addr,
1522 socklen_t len)
1524 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1525 sa_family_t sa_family;
1526 struct target_sockaddr *target_saddr;
1528 if (fd_trans_target_to_host_addr(fd)) {
1529 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1532 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1533 if (!target_saddr)
1534 return -TARGET_EFAULT;
1536 sa_family = tswap16(target_saddr->sa_family);
1538 /* Oops. The caller might send a incomplete sun_path; sun_path
1539 * must be terminated by \0 (see the manual page), but
1540 * unfortunately it is quite common to specify sockaddr_un
1541 * length as "strlen(x->sun_path)" while it should be
1542 * "strlen(...) + 1". We'll fix that here if needed.
1543 * Linux kernel has a similar feature.
1546 if (sa_family == AF_UNIX) {
1547 if (len < unix_maxlen && len > 0) {
1548 char *cp = (char*)target_saddr;
1550 if ( cp[len-1] && !cp[len] )
1551 len++;
1553 if (len > unix_maxlen)
1554 len = unix_maxlen;
1557 memcpy(addr, target_saddr, len);
1558 addr->sa_family = sa_family;
1559 if (sa_family == AF_NETLINK) {
1560 struct sockaddr_nl *nladdr;
1562 nladdr = (struct sockaddr_nl *)addr;
1563 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1564 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1565 } else if (sa_family == AF_PACKET) {
1566 struct target_sockaddr_ll *lladdr;
1568 lladdr = (struct target_sockaddr_ll *)addr;
1569 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1570 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1572 unlock_user(target_saddr, target_addr, 0);
1574 return 0;
1577 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1578 struct sockaddr *addr,
1579 socklen_t len)
1581 struct target_sockaddr *target_saddr;
1583 if (len == 0) {
1584 return 0;
1586 assert(addr);
1588 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1589 if (!target_saddr)
1590 return -TARGET_EFAULT;
1591 memcpy(target_saddr, addr, len);
1592 if (len >= offsetof(struct target_sockaddr, sa_family) +
1593 sizeof(target_saddr->sa_family)) {
1594 target_saddr->sa_family = tswap16(addr->sa_family);
1596 if (addr->sa_family == AF_NETLINK &&
1597 len >= sizeof(struct target_sockaddr_nl)) {
1598 struct target_sockaddr_nl *target_nl =
1599 (struct target_sockaddr_nl *)target_saddr;
1600 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1601 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1602 } else if (addr->sa_family == AF_PACKET) {
1603 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1604 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1605 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1606 } else if (addr->sa_family == AF_INET6 &&
1607 len >= sizeof(struct target_sockaddr_in6)) {
1608 struct target_sockaddr_in6 *target_in6 =
1609 (struct target_sockaddr_in6 *)target_saddr;
1610 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1612 unlock_user(target_saddr, target_addr, len);
1614 return 0;
1617 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1618 struct target_msghdr *target_msgh)
1620 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1621 abi_long msg_controllen;
1622 abi_ulong target_cmsg_addr;
1623 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1624 socklen_t space = 0;
1626 msg_controllen = tswapal(target_msgh->msg_controllen);
1627 if (msg_controllen < sizeof (struct target_cmsghdr))
1628 goto the_end;
1629 target_cmsg_addr = tswapal(target_msgh->msg_control);
1630 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1631 target_cmsg_start = target_cmsg;
1632 if (!target_cmsg)
1633 return -TARGET_EFAULT;
1635 while (cmsg && target_cmsg) {
1636 void *data = CMSG_DATA(cmsg);
1637 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1639 int len = tswapal(target_cmsg->cmsg_len)
1640 - sizeof(struct target_cmsghdr);
1642 space += CMSG_SPACE(len);
1643 if (space > msgh->msg_controllen) {
1644 space -= CMSG_SPACE(len);
1645 /* This is a QEMU bug, since we allocated the payload
1646 * area ourselves (unlike overflow in host-to-target
1647 * conversion, which is just the guest giving us a buffer
1648 * that's too small). It can't happen for the payload types
1649 * we currently support; if it becomes an issue in future
1650 * we would need to improve our allocation strategy to
1651 * something more intelligent than "twice the size of the
1652 * target buffer we're reading from".
1654 qemu_log_mask(LOG_UNIMP,
1655 ("Unsupported ancillary data %d/%d: "
1656 "unhandled msg size\n"),
1657 tswap32(target_cmsg->cmsg_level),
1658 tswap32(target_cmsg->cmsg_type));
1659 break;
1662 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1663 cmsg->cmsg_level = SOL_SOCKET;
1664 } else {
1665 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1667 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1668 cmsg->cmsg_len = CMSG_LEN(len);
1670 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1671 int *fd = (int *)data;
1672 int *target_fd = (int *)target_data;
1673 int i, numfds = len / sizeof(int);
1675 for (i = 0; i < numfds; i++) {
1676 __get_user(fd[i], target_fd + i);
1678 } else if (cmsg->cmsg_level == SOL_SOCKET
1679 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1680 struct ucred *cred = (struct ucred *)data;
1681 struct target_ucred *target_cred =
1682 (struct target_ucred *)target_data;
1684 __get_user(cred->pid, &target_cred->pid);
1685 __get_user(cred->uid, &target_cred->uid);
1686 __get_user(cred->gid, &target_cred->gid);
1687 } else {
1688 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1689 cmsg->cmsg_level, cmsg->cmsg_type);
1690 memcpy(data, target_data, len);
1693 cmsg = CMSG_NXTHDR(msgh, cmsg);
1694 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1695 target_cmsg_start);
1697 unlock_user(target_cmsg, target_cmsg_addr, 0);
1698 the_end:
1699 msgh->msg_controllen = space;
1700 return 0;
1703 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1704 struct msghdr *msgh)
1706 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1707 abi_long msg_controllen;
1708 abi_ulong target_cmsg_addr;
1709 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1710 socklen_t space = 0;
1712 msg_controllen = tswapal(target_msgh->msg_controllen);
1713 if (msg_controllen < sizeof (struct target_cmsghdr))
1714 goto the_end;
1715 target_cmsg_addr = tswapal(target_msgh->msg_control);
1716 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1717 target_cmsg_start = target_cmsg;
1718 if (!target_cmsg)
1719 return -TARGET_EFAULT;
1721 while (cmsg && target_cmsg) {
1722 void *data = CMSG_DATA(cmsg);
1723 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1725 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1726 int tgt_len, tgt_space;
1728 /* We never copy a half-header but may copy half-data;
1729 * this is Linux's behaviour in put_cmsg(). Note that
1730 * truncation here is a guest problem (which we report
1731 * to the guest via the CTRUNC bit), unlike truncation
1732 * in target_to_host_cmsg, which is a QEMU bug.
1734 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1735 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1736 break;
1739 if (cmsg->cmsg_level == SOL_SOCKET) {
1740 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1741 } else {
1742 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1744 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1746 /* Payload types which need a different size of payload on
1747 * the target must adjust tgt_len here.
1749 tgt_len = len;
1750 switch (cmsg->cmsg_level) {
1751 case SOL_SOCKET:
1752 switch (cmsg->cmsg_type) {
1753 case SO_TIMESTAMP:
1754 tgt_len = sizeof(struct target_timeval);
1755 break;
1756 default:
1757 break;
1759 break;
1760 default:
1761 break;
1764 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1765 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1766 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1769 /* We must now copy-and-convert len bytes of payload
1770 * into tgt_len bytes of destination space. Bear in mind
1771 * that in both source and destination we may be dealing
1772 * with a truncated value!
1774 switch (cmsg->cmsg_level) {
1775 case SOL_SOCKET:
1776 switch (cmsg->cmsg_type) {
1777 case SCM_RIGHTS:
1779 int *fd = (int *)data;
1780 int *target_fd = (int *)target_data;
1781 int i, numfds = tgt_len / sizeof(int);
1783 for (i = 0; i < numfds; i++) {
1784 __put_user(fd[i], target_fd + i);
1786 break;
1788 case SO_TIMESTAMP:
1790 struct timeval *tv = (struct timeval *)data;
1791 struct target_timeval *target_tv =
1792 (struct target_timeval *)target_data;
1794 if (len != sizeof(struct timeval) ||
1795 tgt_len != sizeof(struct target_timeval)) {
1796 goto unimplemented;
1799 /* copy struct timeval to target */
1800 __put_user(tv->tv_sec, &target_tv->tv_sec);
1801 __put_user(tv->tv_usec, &target_tv->tv_usec);
1802 break;
1804 case SCM_CREDENTIALS:
1806 struct ucred *cred = (struct ucred *)data;
1807 struct target_ucred *target_cred =
1808 (struct target_ucred *)target_data;
1810 __put_user(cred->pid, &target_cred->pid);
1811 __put_user(cred->uid, &target_cred->uid);
1812 __put_user(cred->gid, &target_cred->gid);
1813 break;
1815 default:
1816 goto unimplemented;
1818 break;
1820 case SOL_IP:
1821 switch (cmsg->cmsg_type) {
1822 case IP_TTL:
1824 uint32_t *v = (uint32_t *)data;
1825 uint32_t *t_int = (uint32_t *)target_data;
1827 if (len != sizeof(uint32_t) ||
1828 tgt_len != sizeof(uint32_t)) {
1829 goto unimplemented;
1831 __put_user(*v, t_int);
1832 break;
1834 case IP_RECVERR:
1836 struct errhdr_t {
1837 struct sock_extended_err ee;
1838 struct sockaddr_in offender;
1840 struct errhdr_t *errh = (struct errhdr_t *)data;
1841 struct errhdr_t *target_errh =
1842 (struct errhdr_t *)target_data;
1844 if (len != sizeof(struct errhdr_t) ||
1845 tgt_len != sizeof(struct errhdr_t)) {
1846 goto unimplemented;
1848 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1849 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1850 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1851 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1852 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1853 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1854 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1855 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1856 (void *) &errh->offender, sizeof(errh->offender));
1857 break;
1859 default:
1860 goto unimplemented;
1862 break;
1864 case SOL_IPV6:
1865 switch (cmsg->cmsg_type) {
1866 case IPV6_HOPLIMIT:
1868 uint32_t *v = (uint32_t *)data;
1869 uint32_t *t_int = (uint32_t *)target_data;
1871 if (len != sizeof(uint32_t) ||
1872 tgt_len != sizeof(uint32_t)) {
1873 goto unimplemented;
1875 __put_user(*v, t_int);
1876 break;
1878 case IPV6_RECVERR:
1880 struct errhdr6_t {
1881 struct sock_extended_err ee;
1882 struct sockaddr_in6 offender;
1884 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1885 struct errhdr6_t *target_errh =
1886 (struct errhdr6_t *)target_data;
1888 if (len != sizeof(struct errhdr6_t) ||
1889 tgt_len != sizeof(struct errhdr6_t)) {
1890 goto unimplemented;
1892 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1893 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1894 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1895 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1896 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1897 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1898 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1899 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1900 (void *) &errh->offender, sizeof(errh->offender));
1901 break;
1903 default:
1904 goto unimplemented;
1906 break;
1908 default:
1909 unimplemented:
1910 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1911 cmsg->cmsg_level, cmsg->cmsg_type);
1912 memcpy(target_data, data, MIN(len, tgt_len));
1913 if (tgt_len > len) {
1914 memset(target_data + len, 0, tgt_len - len);
1918 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
1919 tgt_space = TARGET_CMSG_SPACE(tgt_len);
1920 if (msg_controllen < tgt_space) {
1921 tgt_space = msg_controllen;
1923 msg_controllen -= tgt_space;
1924 space += tgt_space;
1925 cmsg = CMSG_NXTHDR(msgh, cmsg);
1926 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1927 target_cmsg_start);
1929 unlock_user(target_cmsg, target_cmsg_addr, space);
1930 the_end:
1931 target_msgh->msg_controllen = tswapal(space);
1932 return 0;
1935 /* do_setsockopt() Must return target values and target errnos. */
1936 static abi_long do_setsockopt(int sockfd, int level, int optname,
1937 abi_ulong optval_addr, socklen_t optlen)
1939 abi_long ret;
1940 int val;
1941 struct ip_mreqn *ip_mreq;
1942 struct ip_mreq_source *ip_mreq_source;
1944 switch(level) {
1945 case SOL_TCP:
1946 /* TCP options all take an 'int' value. */
1947 if (optlen < sizeof(uint32_t))
1948 return -TARGET_EINVAL;
1950 if (get_user_u32(val, optval_addr))
1951 return -TARGET_EFAULT;
1952 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1953 break;
1954 case SOL_IP:
1955 switch(optname) {
1956 case IP_TOS:
1957 case IP_TTL:
1958 case IP_HDRINCL:
1959 case IP_ROUTER_ALERT:
1960 case IP_RECVOPTS:
1961 case IP_RETOPTS:
1962 case IP_PKTINFO:
1963 case IP_MTU_DISCOVER:
1964 case IP_RECVERR:
1965 case IP_RECVTTL:
1966 case IP_RECVTOS:
1967 #ifdef IP_FREEBIND
1968 case IP_FREEBIND:
1969 #endif
1970 case IP_MULTICAST_TTL:
1971 case IP_MULTICAST_LOOP:
1972 val = 0;
1973 if (optlen >= sizeof(uint32_t)) {
1974 if (get_user_u32(val, optval_addr))
1975 return -TARGET_EFAULT;
1976 } else if (optlen >= 1) {
1977 if (get_user_u8(val, optval_addr))
1978 return -TARGET_EFAULT;
1980 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1981 break;
1982 case IP_ADD_MEMBERSHIP:
1983 case IP_DROP_MEMBERSHIP:
1984 if (optlen < sizeof (struct target_ip_mreq) ||
1985 optlen > sizeof (struct target_ip_mreqn))
1986 return -TARGET_EINVAL;
1988 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1989 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1990 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1991 break;
1993 case IP_BLOCK_SOURCE:
1994 case IP_UNBLOCK_SOURCE:
1995 case IP_ADD_SOURCE_MEMBERSHIP:
1996 case IP_DROP_SOURCE_MEMBERSHIP:
1997 if (optlen != sizeof (struct target_ip_mreq_source))
1998 return -TARGET_EINVAL;
2000 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2001 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2002 unlock_user (ip_mreq_source, optval_addr, 0);
2003 break;
2005 default:
2006 goto unimplemented;
2008 break;
2009 case SOL_IPV6:
2010 switch (optname) {
2011 case IPV6_MTU_DISCOVER:
2012 case IPV6_MTU:
2013 case IPV6_V6ONLY:
2014 case IPV6_RECVPKTINFO:
2015 case IPV6_UNICAST_HOPS:
2016 case IPV6_MULTICAST_HOPS:
2017 case IPV6_MULTICAST_LOOP:
2018 case IPV6_RECVERR:
2019 case IPV6_RECVHOPLIMIT:
2020 case IPV6_2292HOPLIMIT:
2021 case IPV6_CHECKSUM:
2022 case IPV6_ADDRFORM:
2023 case IPV6_2292PKTINFO:
2024 case IPV6_RECVTCLASS:
2025 case IPV6_RECVRTHDR:
2026 case IPV6_2292RTHDR:
2027 case IPV6_RECVHOPOPTS:
2028 case IPV6_2292HOPOPTS:
2029 case IPV6_RECVDSTOPTS:
2030 case IPV6_2292DSTOPTS:
2031 case IPV6_TCLASS:
2032 #ifdef IPV6_RECVPATHMTU
2033 case IPV6_RECVPATHMTU:
2034 #endif
2035 #ifdef IPV6_TRANSPARENT
2036 case IPV6_TRANSPARENT:
2037 #endif
2038 #ifdef IPV6_FREEBIND
2039 case IPV6_FREEBIND:
2040 #endif
2041 #ifdef IPV6_RECVORIGDSTADDR
2042 case IPV6_RECVORIGDSTADDR:
2043 #endif
2044 val = 0;
2045 if (optlen < sizeof(uint32_t)) {
2046 return -TARGET_EINVAL;
2048 if (get_user_u32(val, optval_addr)) {
2049 return -TARGET_EFAULT;
2051 ret = get_errno(setsockopt(sockfd, level, optname,
2052 &val, sizeof(val)));
2053 break;
2054 case IPV6_PKTINFO:
2056 struct in6_pktinfo pki;
2058 if (optlen < sizeof(pki)) {
2059 return -TARGET_EINVAL;
2062 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2063 return -TARGET_EFAULT;
2066 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2068 ret = get_errno(setsockopt(sockfd, level, optname,
2069 &pki, sizeof(pki)));
2070 break;
2072 case IPV6_ADD_MEMBERSHIP:
2073 case IPV6_DROP_MEMBERSHIP:
2075 struct ipv6_mreq ipv6mreq;
2077 if (optlen < sizeof(ipv6mreq)) {
2078 return -TARGET_EINVAL;
2081 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2082 return -TARGET_EFAULT;
2085 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2087 ret = get_errno(setsockopt(sockfd, level, optname,
2088 &ipv6mreq, sizeof(ipv6mreq)));
2089 break;
2091 default:
2092 goto unimplemented;
2094 break;
2095 case SOL_ICMPV6:
2096 switch (optname) {
2097 case ICMPV6_FILTER:
2099 struct icmp6_filter icmp6f;
2101 if (optlen > sizeof(icmp6f)) {
2102 optlen = sizeof(icmp6f);
2105 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2106 return -TARGET_EFAULT;
2109 for (val = 0; val < 8; val++) {
2110 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2113 ret = get_errno(setsockopt(sockfd, level, optname,
2114 &icmp6f, optlen));
2115 break;
2117 default:
2118 goto unimplemented;
2120 break;
2121 case SOL_RAW:
2122 switch (optname) {
2123 case ICMP_FILTER:
2124 case IPV6_CHECKSUM:
2125 /* those take an u32 value */
2126 if (optlen < sizeof(uint32_t)) {
2127 return -TARGET_EINVAL;
2130 if (get_user_u32(val, optval_addr)) {
2131 return -TARGET_EFAULT;
2133 ret = get_errno(setsockopt(sockfd, level, optname,
2134 &val, sizeof(val)));
2135 break;
2137 default:
2138 goto unimplemented;
2140 break;
2141 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2142 case SOL_ALG:
2143 switch (optname) {
2144 case ALG_SET_KEY:
2146 char *alg_key = g_malloc(optlen);
2148 if (!alg_key) {
2149 return -TARGET_ENOMEM;
2151 if (copy_from_user(alg_key, optval_addr, optlen)) {
2152 g_free(alg_key);
2153 return -TARGET_EFAULT;
2155 ret = get_errno(setsockopt(sockfd, level, optname,
2156 alg_key, optlen));
2157 g_free(alg_key);
2158 break;
2160 case ALG_SET_AEAD_AUTHSIZE:
2162 ret = get_errno(setsockopt(sockfd, level, optname,
2163 NULL, optlen));
2164 break;
2166 default:
2167 goto unimplemented;
2169 break;
2170 #endif
2171 case TARGET_SOL_SOCKET:
2172 switch (optname) {
2173 case TARGET_SO_RCVTIMEO:
2175 struct timeval tv;
2177 optname = SO_RCVTIMEO;
2179 set_timeout:
2180 if (optlen != sizeof(struct target_timeval)) {
2181 return -TARGET_EINVAL;
2184 if (copy_from_user_timeval(&tv, optval_addr)) {
2185 return -TARGET_EFAULT;
2188 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2189 &tv, sizeof(tv)));
2190 return ret;
2192 case TARGET_SO_SNDTIMEO:
2193 optname = SO_SNDTIMEO;
2194 goto set_timeout;
2195 case TARGET_SO_ATTACH_FILTER:
2197 struct target_sock_fprog *tfprog;
2198 struct target_sock_filter *tfilter;
2199 struct sock_fprog fprog;
2200 struct sock_filter *filter;
2201 int i;
2203 if (optlen != sizeof(*tfprog)) {
2204 return -TARGET_EINVAL;
2206 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2207 return -TARGET_EFAULT;
2209 if (!lock_user_struct(VERIFY_READ, tfilter,
2210 tswapal(tfprog->filter), 0)) {
2211 unlock_user_struct(tfprog, optval_addr, 1);
2212 return -TARGET_EFAULT;
2215 fprog.len = tswap16(tfprog->len);
2216 filter = g_try_new(struct sock_filter, fprog.len);
2217 if (filter == NULL) {
2218 unlock_user_struct(tfilter, tfprog->filter, 1);
2219 unlock_user_struct(tfprog, optval_addr, 1);
2220 return -TARGET_ENOMEM;
2222 for (i = 0; i < fprog.len; i++) {
2223 filter[i].code = tswap16(tfilter[i].code);
2224 filter[i].jt = tfilter[i].jt;
2225 filter[i].jf = tfilter[i].jf;
2226 filter[i].k = tswap32(tfilter[i].k);
2228 fprog.filter = filter;
2230 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2231 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2232 g_free(filter);
2234 unlock_user_struct(tfilter, tfprog->filter, 1);
2235 unlock_user_struct(tfprog, optval_addr, 1);
2236 return ret;
2238 case TARGET_SO_BINDTODEVICE:
2240 char *dev_ifname, *addr_ifname;
2242 if (optlen > IFNAMSIZ - 1) {
2243 optlen = IFNAMSIZ - 1;
2245 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2246 if (!dev_ifname) {
2247 return -TARGET_EFAULT;
2249 optname = SO_BINDTODEVICE;
2250 addr_ifname = alloca(IFNAMSIZ);
2251 memcpy(addr_ifname, dev_ifname, optlen);
2252 addr_ifname[optlen] = 0;
2253 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2254 addr_ifname, optlen));
2255 unlock_user (dev_ifname, optval_addr, 0);
2256 return ret;
2258 case TARGET_SO_LINGER:
2260 struct linger lg;
2261 struct target_linger *tlg;
2263 if (optlen != sizeof(struct target_linger)) {
2264 return -TARGET_EINVAL;
2266 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2267 return -TARGET_EFAULT;
2269 __get_user(lg.l_onoff, &tlg->l_onoff);
2270 __get_user(lg.l_linger, &tlg->l_linger);
2271 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2272 &lg, sizeof(lg)));
2273 unlock_user_struct(tlg, optval_addr, 0);
2274 return ret;
2276 /* Options with 'int' argument. */
2277 case TARGET_SO_DEBUG:
2278 optname = SO_DEBUG;
2279 break;
2280 case TARGET_SO_REUSEADDR:
2281 optname = SO_REUSEADDR;
2282 break;
2283 #ifdef SO_REUSEPORT
2284 case TARGET_SO_REUSEPORT:
2285 optname = SO_REUSEPORT;
2286 break;
2287 #endif
2288 case TARGET_SO_TYPE:
2289 optname = SO_TYPE;
2290 break;
2291 case TARGET_SO_ERROR:
2292 optname = SO_ERROR;
2293 break;
2294 case TARGET_SO_DONTROUTE:
2295 optname = SO_DONTROUTE;
2296 break;
2297 case TARGET_SO_BROADCAST:
2298 optname = SO_BROADCAST;
2299 break;
2300 case TARGET_SO_SNDBUF:
2301 optname = SO_SNDBUF;
2302 break;
2303 case TARGET_SO_SNDBUFFORCE:
2304 optname = SO_SNDBUFFORCE;
2305 break;
2306 case TARGET_SO_RCVBUF:
2307 optname = SO_RCVBUF;
2308 break;
2309 case TARGET_SO_RCVBUFFORCE:
2310 optname = SO_RCVBUFFORCE;
2311 break;
2312 case TARGET_SO_KEEPALIVE:
2313 optname = SO_KEEPALIVE;
2314 break;
2315 case TARGET_SO_OOBINLINE:
2316 optname = SO_OOBINLINE;
2317 break;
2318 case TARGET_SO_NO_CHECK:
2319 optname = SO_NO_CHECK;
2320 break;
2321 case TARGET_SO_PRIORITY:
2322 optname = SO_PRIORITY;
2323 break;
2324 #ifdef SO_BSDCOMPAT
2325 case TARGET_SO_BSDCOMPAT:
2326 optname = SO_BSDCOMPAT;
2327 break;
2328 #endif
2329 case TARGET_SO_PASSCRED:
2330 optname = SO_PASSCRED;
2331 break;
2332 case TARGET_SO_PASSSEC:
2333 optname = SO_PASSSEC;
2334 break;
2335 case TARGET_SO_TIMESTAMP:
2336 optname = SO_TIMESTAMP;
2337 break;
2338 case TARGET_SO_RCVLOWAT:
2339 optname = SO_RCVLOWAT;
2340 break;
2341 default:
2342 goto unimplemented;
2344 if (optlen < sizeof(uint32_t))
2345 return -TARGET_EINVAL;
2347 if (get_user_u32(val, optval_addr))
2348 return -TARGET_EFAULT;
2349 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2350 break;
2351 #ifdef SOL_NETLINK
2352 case SOL_NETLINK:
2353 switch (optname) {
2354 case NETLINK_PKTINFO:
2355 case NETLINK_ADD_MEMBERSHIP:
2356 case NETLINK_DROP_MEMBERSHIP:
2357 case NETLINK_BROADCAST_ERROR:
2358 case NETLINK_NO_ENOBUFS:
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2360 case NETLINK_LISTEN_ALL_NSID:
2361 case NETLINK_CAP_ACK:
2362 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2363 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2364 case NETLINK_EXT_ACK:
2365 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2366 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2367 case NETLINK_GET_STRICT_CHK:
2368 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2369 break;
2370 default:
2371 goto unimplemented;
2373 val = 0;
2374 if (optlen < sizeof(uint32_t)) {
2375 return -TARGET_EINVAL;
2377 if (get_user_u32(val, optval_addr)) {
2378 return -TARGET_EFAULT;
2380 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2381 sizeof(val)));
2382 break;
2383 #endif /* SOL_NETLINK */
2384 default:
2385 unimplemented:
2386 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2387 level, optname);
2388 ret = -TARGET_ENOPROTOOPT;
2390 return ret;
2393 /* do_getsockopt() Must return target values and target errnos. */
2394 static abi_long do_getsockopt(int sockfd, int level, int optname,
2395 abi_ulong optval_addr, abi_ulong optlen)
2397 abi_long ret;
2398 int len, val;
2399 socklen_t lv;
2401 switch(level) {
2402 case TARGET_SOL_SOCKET:
2403 level = SOL_SOCKET;
2404 switch (optname) {
2405 /* These don't just return a single integer */
2406 case TARGET_SO_PEERNAME:
2407 goto unimplemented;
2408 case TARGET_SO_RCVTIMEO: {
2409 struct timeval tv;
2410 socklen_t tvlen;
2412 optname = SO_RCVTIMEO;
2414 get_timeout:
2415 if (get_user_u32(len, optlen)) {
2416 return -TARGET_EFAULT;
2418 if (len < 0) {
2419 return -TARGET_EINVAL;
2422 tvlen = sizeof(tv);
2423 ret = get_errno(getsockopt(sockfd, level, optname,
2424 &tv, &tvlen));
2425 if (ret < 0) {
2426 return ret;
2428 if (len > sizeof(struct target_timeval)) {
2429 len = sizeof(struct target_timeval);
2431 if (copy_to_user_timeval(optval_addr, &tv)) {
2432 return -TARGET_EFAULT;
2434 if (put_user_u32(len, optlen)) {
2435 return -TARGET_EFAULT;
2437 break;
2439 case TARGET_SO_SNDTIMEO:
2440 optname = SO_SNDTIMEO;
2441 goto get_timeout;
2442 case TARGET_SO_PEERCRED: {
2443 struct ucred cr;
2444 socklen_t crlen;
2445 struct target_ucred *tcr;
2447 if (get_user_u32(len, optlen)) {
2448 return -TARGET_EFAULT;
2450 if (len < 0) {
2451 return -TARGET_EINVAL;
2454 crlen = sizeof(cr);
2455 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2456 &cr, &crlen));
2457 if (ret < 0) {
2458 return ret;
2460 if (len > crlen) {
2461 len = crlen;
2463 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2464 return -TARGET_EFAULT;
2466 __put_user(cr.pid, &tcr->pid);
2467 __put_user(cr.uid, &tcr->uid);
2468 __put_user(cr.gid, &tcr->gid);
2469 unlock_user_struct(tcr, optval_addr, 1);
2470 if (put_user_u32(len, optlen)) {
2471 return -TARGET_EFAULT;
2473 break;
2475 case TARGET_SO_PEERSEC: {
2476 char *name;
2478 if (get_user_u32(len, optlen)) {
2479 return -TARGET_EFAULT;
2481 if (len < 0) {
2482 return -TARGET_EINVAL;
2484 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2485 if (!name) {
2486 return -TARGET_EFAULT;
2488 lv = len;
2489 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2490 name, &lv));
2491 if (put_user_u32(lv, optlen)) {
2492 ret = -TARGET_EFAULT;
2494 unlock_user(name, optval_addr, lv);
2495 break;
2497 case TARGET_SO_LINGER:
2499 struct linger lg;
2500 socklen_t lglen;
2501 struct target_linger *tlg;
2503 if (get_user_u32(len, optlen)) {
2504 return -TARGET_EFAULT;
2506 if (len < 0) {
2507 return -TARGET_EINVAL;
2510 lglen = sizeof(lg);
2511 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2512 &lg, &lglen));
2513 if (ret < 0) {
2514 return ret;
2516 if (len > lglen) {
2517 len = lglen;
2519 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2520 return -TARGET_EFAULT;
2522 __put_user(lg.l_onoff, &tlg->l_onoff);
2523 __put_user(lg.l_linger, &tlg->l_linger);
2524 unlock_user_struct(tlg, optval_addr, 1);
2525 if (put_user_u32(len, optlen)) {
2526 return -TARGET_EFAULT;
2528 break;
2530 /* Options with 'int' argument. */
2531 case TARGET_SO_DEBUG:
2532 optname = SO_DEBUG;
2533 goto int_case;
2534 case TARGET_SO_REUSEADDR:
2535 optname = SO_REUSEADDR;
2536 goto int_case;
2537 #ifdef SO_REUSEPORT
2538 case TARGET_SO_REUSEPORT:
2539 optname = SO_REUSEPORT;
2540 goto int_case;
2541 #endif
2542 case TARGET_SO_TYPE:
2543 optname = SO_TYPE;
2544 goto int_case;
2545 case TARGET_SO_ERROR:
2546 optname = SO_ERROR;
2547 goto int_case;
2548 case TARGET_SO_DONTROUTE:
2549 optname = SO_DONTROUTE;
2550 goto int_case;
2551 case TARGET_SO_BROADCAST:
2552 optname = SO_BROADCAST;
2553 goto int_case;
2554 case TARGET_SO_SNDBUF:
2555 optname = SO_SNDBUF;
2556 goto int_case;
2557 case TARGET_SO_RCVBUF:
2558 optname = SO_RCVBUF;
2559 goto int_case;
2560 case TARGET_SO_KEEPALIVE:
2561 optname = SO_KEEPALIVE;
2562 goto int_case;
2563 case TARGET_SO_OOBINLINE:
2564 optname = SO_OOBINLINE;
2565 goto int_case;
2566 case TARGET_SO_NO_CHECK:
2567 optname = SO_NO_CHECK;
2568 goto int_case;
2569 case TARGET_SO_PRIORITY:
2570 optname = SO_PRIORITY;
2571 goto int_case;
2572 #ifdef SO_BSDCOMPAT
2573 case TARGET_SO_BSDCOMPAT:
2574 optname = SO_BSDCOMPAT;
2575 goto int_case;
2576 #endif
2577 case TARGET_SO_PASSCRED:
2578 optname = SO_PASSCRED;
2579 goto int_case;
2580 case TARGET_SO_TIMESTAMP:
2581 optname = SO_TIMESTAMP;
2582 goto int_case;
2583 case TARGET_SO_RCVLOWAT:
2584 optname = SO_RCVLOWAT;
2585 goto int_case;
2586 case TARGET_SO_ACCEPTCONN:
2587 optname = SO_ACCEPTCONN;
2588 goto int_case;
2589 default:
2590 goto int_case;
2592 break;
2593 case SOL_TCP:
2594 /* TCP options all take an 'int' value. */
2595 int_case:
2596 if (get_user_u32(len, optlen))
2597 return -TARGET_EFAULT;
2598 if (len < 0)
2599 return -TARGET_EINVAL;
2600 lv = sizeof(lv);
2601 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2602 if (ret < 0)
2603 return ret;
2604 if (optname == SO_TYPE) {
2605 val = host_to_target_sock_type(val);
2607 if (len > lv)
2608 len = lv;
2609 if (len == 4) {
2610 if (put_user_u32(val, optval_addr))
2611 return -TARGET_EFAULT;
2612 } else {
2613 if (put_user_u8(val, optval_addr))
2614 return -TARGET_EFAULT;
2616 if (put_user_u32(len, optlen))
2617 return -TARGET_EFAULT;
2618 break;
2619 case SOL_IP:
2620 switch(optname) {
2621 case IP_TOS:
2622 case IP_TTL:
2623 case IP_HDRINCL:
2624 case IP_ROUTER_ALERT:
2625 case IP_RECVOPTS:
2626 case IP_RETOPTS:
2627 case IP_PKTINFO:
2628 case IP_MTU_DISCOVER:
2629 case IP_RECVERR:
2630 case IP_RECVTOS:
2631 #ifdef IP_FREEBIND
2632 case IP_FREEBIND:
2633 #endif
2634 case IP_MULTICAST_TTL:
2635 case IP_MULTICAST_LOOP:
2636 if (get_user_u32(len, optlen))
2637 return -TARGET_EFAULT;
2638 if (len < 0)
2639 return -TARGET_EINVAL;
2640 lv = sizeof(lv);
2641 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2642 if (ret < 0)
2643 return ret;
2644 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2645 len = 1;
2646 if (put_user_u32(len, optlen)
2647 || put_user_u8(val, optval_addr))
2648 return -TARGET_EFAULT;
2649 } else {
2650 if (len > sizeof(int))
2651 len = sizeof(int);
2652 if (put_user_u32(len, optlen)
2653 || put_user_u32(val, optval_addr))
2654 return -TARGET_EFAULT;
2656 break;
2657 default:
2658 ret = -TARGET_ENOPROTOOPT;
2659 break;
2661 break;
2662 case SOL_IPV6:
2663 switch (optname) {
2664 case IPV6_MTU_DISCOVER:
2665 case IPV6_MTU:
2666 case IPV6_V6ONLY:
2667 case IPV6_RECVPKTINFO:
2668 case IPV6_UNICAST_HOPS:
2669 case IPV6_MULTICAST_HOPS:
2670 case IPV6_MULTICAST_LOOP:
2671 case IPV6_RECVERR:
2672 case IPV6_RECVHOPLIMIT:
2673 case IPV6_2292HOPLIMIT:
2674 case IPV6_CHECKSUM:
2675 case IPV6_ADDRFORM:
2676 case IPV6_2292PKTINFO:
2677 case IPV6_RECVTCLASS:
2678 case IPV6_RECVRTHDR:
2679 case IPV6_2292RTHDR:
2680 case IPV6_RECVHOPOPTS:
2681 case IPV6_2292HOPOPTS:
2682 case IPV6_RECVDSTOPTS:
2683 case IPV6_2292DSTOPTS:
2684 case IPV6_TCLASS:
2685 #ifdef IPV6_RECVPATHMTU
2686 case IPV6_RECVPATHMTU:
2687 #endif
2688 #ifdef IPV6_TRANSPARENT
2689 case IPV6_TRANSPARENT:
2690 #endif
2691 #ifdef IPV6_FREEBIND
2692 case IPV6_FREEBIND:
2693 #endif
2694 #ifdef IPV6_RECVORIGDSTADDR
2695 case IPV6_RECVORIGDSTADDR:
2696 #endif
2697 if (get_user_u32(len, optlen))
2698 return -TARGET_EFAULT;
2699 if (len < 0)
2700 return -TARGET_EINVAL;
2701 lv = sizeof(lv);
2702 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2703 if (ret < 0)
2704 return ret;
2705 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2706 len = 1;
2707 if (put_user_u32(len, optlen)
2708 || put_user_u8(val, optval_addr))
2709 return -TARGET_EFAULT;
2710 } else {
2711 if (len > sizeof(int))
2712 len = sizeof(int);
2713 if (put_user_u32(len, optlen)
2714 || put_user_u32(val, optval_addr))
2715 return -TARGET_EFAULT;
2717 break;
2718 default:
2719 ret = -TARGET_ENOPROTOOPT;
2720 break;
2722 break;
2723 #ifdef SOL_NETLINK
2724 case SOL_NETLINK:
2725 switch (optname) {
2726 case NETLINK_PKTINFO:
2727 case NETLINK_BROADCAST_ERROR:
2728 case NETLINK_NO_ENOBUFS:
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2730 case NETLINK_LISTEN_ALL_NSID:
2731 case NETLINK_CAP_ACK:
2732 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2733 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2734 case NETLINK_EXT_ACK:
2735 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2736 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2737 case NETLINK_GET_STRICT_CHK:
2738 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2739 if (get_user_u32(len, optlen)) {
2740 return -TARGET_EFAULT;
2742 if (len != sizeof(val)) {
2743 return -TARGET_EINVAL;
2745 lv = len;
2746 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2747 if (ret < 0) {
2748 return ret;
2750 if (put_user_u32(lv, optlen)
2751 || put_user_u32(val, optval_addr)) {
2752 return -TARGET_EFAULT;
2754 break;
2755 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2756 case NETLINK_LIST_MEMBERSHIPS:
2758 uint32_t *results;
2759 int i;
2760 if (get_user_u32(len, optlen)) {
2761 return -TARGET_EFAULT;
2763 if (len < 0) {
2764 return -TARGET_EINVAL;
2766 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2767 if (!results) {
2768 return -TARGET_EFAULT;
2770 lv = len;
2771 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2772 if (ret < 0) {
2773 unlock_user(results, optval_addr, 0);
2774 return ret;
2776 /* swap host endianess to target endianess. */
2777 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2778 results[i] = tswap32(results[i]);
2780 if (put_user_u32(lv, optlen)) {
2781 return -TARGET_EFAULT;
2783 unlock_user(results, optval_addr, 0);
2784 break;
2786 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2787 default:
2788 goto unimplemented;
2790 break;
2791 #endif /* SOL_NETLINK */
2792 default:
2793 unimplemented:
2794 qemu_log_mask(LOG_UNIMP,
2795 "getsockopt level=%d optname=%d not yet supported\n",
2796 level, optname);
2797 ret = -TARGET_EOPNOTSUPP;
2798 break;
2800 return ret;
2803 /* Convert target low/high pair representing file offset into the host
2804 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2805 * as the kernel doesn't handle them either.
2807 static void target_to_host_low_high(abi_ulong tlow,
2808 abi_ulong thigh,
2809 unsigned long *hlow,
2810 unsigned long *hhigh)
2812 uint64_t off = tlow |
2813 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2814 TARGET_LONG_BITS / 2;
2816 *hlow = off;
2817 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2820 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2821 abi_ulong count, int copy)
2823 struct target_iovec *target_vec;
2824 struct iovec *vec;
2825 abi_ulong total_len, max_len;
2826 int i;
2827 int err = 0;
2828 bool bad_address = false;
2830 if (count == 0) {
2831 errno = 0;
2832 return NULL;
2834 if (count > IOV_MAX) {
2835 errno = EINVAL;
2836 return NULL;
2839 vec = g_try_new0(struct iovec, count);
2840 if (vec == NULL) {
2841 errno = ENOMEM;
2842 return NULL;
2845 target_vec = lock_user(VERIFY_READ, target_addr,
2846 count * sizeof(struct target_iovec), 1);
2847 if (target_vec == NULL) {
2848 err = EFAULT;
2849 goto fail2;
2852 /* ??? If host page size > target page size, this will result in a
2853 value larger than what we can actually support. */
2854 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2855 total_len = 0;
2857 for (i = 0; i < count; i++) {
2858 abi_ulong base = tswapal(target_vec[i].iov_base);
2859 abi_long len = tswapal(target_vec[i].iov_len);
2861 if (len < 0) {
2862 err = EINVAL;
2863 goto fail;
2864 } else if (len == 0) {
2865 /* Zero length pointer is ignored. */
2866 vec[i].iov_base = 0;
2867 } else {
2868 vec[i].iov_base = lock_user(type, base, len, copy);
2869 /* If the first buffer pointer is bad, this is a fault. But
2870 * subsequent bad buffers will result in a partial write; this
2871 * is realized by filling the vector with null pointers and
2872 * zero lengths. */
2873 if (!vec[i].iov_base) {
2874 if (i == 0) {
2875 err = EFAULT;
2876 goto fail;
2877 } else {
2878 bad_address = true;
2881 if (bad_address) {
2882 len = 0;
2884 if (len > max_len - total_len) {
2885 len = max_len - total_len;
2888 vec[i].iov_len = len;
2889 total_len += len;
2892 unlock_user(target_vec, target_addr, 0);
2893 return vec;
2895 fail:
2896 while (--i >= 0) {
2897 if (tswapal(target_vec[i].iov_len) > 0) {
2898 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
2901 unlock_user(target_vec, target_addr, 0);
2902 fail2:
2903 g_free(vec);
2904 errno = err;
2905 return NULL;
2908 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
2909 abi_ulong count, int copy)
2911 struct target_iovec *target_vec;
2912 int i;
2914 target_vec = lock_user(VERIFY_READ, target_addr,
2915 count * sizeof(struct target_iovec), 1);
2916 if (target_vec) {
2917 for (i = 0; i < count; i++) {
2918 abi_ulong base = tswapal(target_vec[i].iov_base);
2919 abi_long len = tswapal(target_vec[i].iov_len);
2920 if (len < 0) {
2921 break;
2923 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
2925 unlock_user(target_vec, target_addr, 0);
2928 g_free(vec);
2931 static inline int target_to_host_sock_type(int *type)
2933 int host_type = 0;
2934 int target_type = *type;
2936 switch (target_type & TARGET_SOCK_TYPE_MASK) {
2937 case TARGET_SOCK_DGRAM:
2938 host_type = SOCK_DGRAM;
2939 break;
2940 case TARGET_SOCK_STREAM:
2941 host_type = SOCK_STREAM;
2942 break;
2943 default:
2944 host_type = target_type & TARGET_SOCK_TYPE_MASK;
2945 break;
2947 if (target_type & TARGET_SOCK_CLOEXEC) {
2948 #if defined(SOCK_CLOEXEC)
2949 host_type |= SOCK_CLOEXEC;
2950 #else
2951 return -TARGET_EINVAL;
2952 #endif
2954 if (target_type & TARGET_SOCK_NONBLOCK) {
2955 #if defined(SOCK_NONBLOCK)
2956 host_type |= SOCK_NONBLOCK;
2957 #elif !defined(O_NONBLOCK)
2958 return -TARGET_EINVAL;
2959 #endif
2961 *type = host_type;
2962 return 0;
2965 /* Try to emulate socket type flags after socket creation. */
2966 static int sock_flags_fixup(int fd, int target_type)
2968 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2969 if (target_type & TARGET_SOCK_NONBLOCK) {
2970 int flags = fcntl(fd, F_GETFL);
2971 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
2972 close(fd);
2973 return -TARGET_EINVAL;
2976 #endif
2977 return fd;
2980 /* do_socket() Must return target values and target errnos. */
2981 static abi_long do_socket(int domain, int type, int protocol)
2983 int target_type = type;
2984 int ret;
2986 ret = target_to_host_sock_type(&type);
2987 if (ret) {
2988 return ret;
2991 if (domain == PF_NETLINK && !(
2992 #ifdef CONFIG_RTNETLINK
2993 protocol == NETLINK_ROUTE ||
2994 #endif
2995 protocol == NETLINK_KOBJECT_UEVENT ||
2996 protocol == NETLINK_AUDIT)) {
2997 return -TARGET_EPROTONOSUPPORT;
3000 if (domain == AF_PACKET ||
3001 (domain == AF_INET && type == SOCK_PACKET)) {
3002 protocol = tswap16(protocol);
3005 ret = get_errno(socket(domain, type, protocol));
3006 if (ret >= 0) {
3007 ret = sock_flags_fixup(ret, target_type);
3008 if (type == SOCK_PACKET) {
3009 /* Manage an obsolete case :
3010 * if socket type is SOCK_PACKET, bind by name
3012 fd_trans_register(ret, &target_packet_trans);
3013 } else if (domain == PF_NETLINK) {
3014 switch (protocol) {
3015 #ifdef CONFIG_RTNETLINK
3016 case NETLINK_ROUTE:
3017 fd_trans_register(ret, &target_netlink_route_trans);
3018 break;
3019 #endif
3020 case NETLINK_KOBJECT_UEVENT:
3021 /* nothing to do: messages are strings */
3022 break;
3023 case NETLINK_AUDIT:
3024 fd_trans_register(ret, &target_netlink_audit_trans);
3025 break;
3026 default:
3027 g_assert_not_reached();
3031 return ret;
3034 /* do_bind() Must return target values and target errnos. */
3035 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3036 socklen_t addrlen)
3038 void *addr;
3039 abi_long ret;
3041 if ((int)addrlen < 0) {
3042 return -TARGET_EINVAL;
3045 addr = alloca(addrlen+1);
3047 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3048 if (ret)
3049 return ret;
3051 return get_errno(bind(sockfd, addr, addrlen));
3054 /* do_connect() Must return target values and target errnos. */
3055 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3056 socklen_t addrlen)
3058 void *addr;
3059 abi_long ret;
3061 if ((int)addrlen < 0) {
3062 return -TARGET_EINVAL;
3065 addr = alloca(addrlen+1);
3067 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3068 if (ret)
3069 return ret;
3071 return get_errno(safe_connect(sockfd, addr, addrlen));
3074 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3075 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3076 int flags, int send)
3078 abi_long ret, len;
3079 struct msghdr msg;
3080 abi_ulong count;
3081 struct iovec *vec;
3082 abi_ulong target_vec;
3084 if (msgp->msg_name) {
3085 msg.msg_namelen = tswap32(msgp->msg_namelen);
3086 msg.msg_name = alloca(msg.msg_namelen+1);
3087 ret = target_to_host_sockaddr(fd, msg.msg_name,
3088 tswapal(msgp->msg_name),
3089 msg.msg_namelen);
3090 if (ret == -TARGET_EFAULT) {
3091 /* For connected sockets msg_name and msg_namelen must
3092 * be ignored, so returning EFAULT immediately is wrong.
3093 * Instead, pass a bad msg_name to the host kernel, and
3094 * let it decide whether to return EFAULT or not.
3096 msg.msg_name = (void *)-1;
3097 } else if (ret) {
3098 goto out2;
3100 } else {
3101 msg.msg_name = NULL;
3102 msg.msg_namelen = 0;
3104 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3105 msg.msg_control = alloca(msg.msg_controllen);
3106 memset(msg.msg_control, 0, msg.msg_controllen);
3108 msg.msg_flags = tswap32(msgp->msg_flags);
3110 count = tswapal(msgp->msg_iovlen);
3111 target_vec = tswapal(msgp->msg_iov);
3113 if (count > IOV_MAX) {
3114 /* sendrcvmsg returns a different errno for this condition than
3115 * readv/writev, so we must catch it here before lock_iovec() does.
3117 ret = -TARGET_EMSGSIZE;
3118 goto out2;
3121 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3122 target_vec, count, send);
3123 if (vec == NULL) {
3124 ret = -host_to_target_errno(errno);
3125 goto out2;
3127 msg.msg_iovlen = count;
3128 msg.msg_iov = vec;
3130 if (send) {
3131 if (fd_trans_target_to_host_data(fd)) {
3132 void *host_msg;
3134 host_msg = g_malloc(msg.msg_iov->iov_len);
3135 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3136 ret = fd_trans_target_to_host_data(fd)(host_msg,
3137 msg.msg_iov->iov_len);
3138 if (ret >= 0) {
3139 msg.msg_iov->iov_base = host_msg;
3140 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3142 g_free(host_msg);
3143 } else {
3144 ret = target_to_host_cmsg(&msg, msgp);
3145 if (ret == 0) {
3146 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3149 } else {
3150 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3151 if (!is_error(ret)) {
3152 len = ret;
3153 if (fd_trans_host_to_target_data(fd)) {
3154 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3155 MIN(msg.msg_iov->iov_len, len));
3156 } else {
3157 ret = host_to_target_cmsg(msgp, &msg);
3159 if (!is_error(ret)) {
3160 msgp->msg_namelen = tswap32(msg.msg_namelen);
3161 msgp->msg_flags = tswap32(msg.msg_flags);
3162 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3163 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3164 msg.msg_name, msg.msg_namelen);
3165 if (ret) {
3166 goto out;
3170 ret = len;
3175 out:
3176 unlock_iovec(vec, target_vec, count, !send);
3177 out2:
3178 return ret;
3181 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3182 int flags, int send)
3184 abi_long ret;
3185 struct target_msghdr *msgp;
3187 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3188 msgp,
3189 target_msg,
3190 send ? 1 : 0)) {
3191 return -TARGET_EFAULT;
3193 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3194 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3195 return ret;
3198 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3199 * so it might not have this *mmsg-specific flag either.
3201 #ifndef MSG_WAITFORONE
3202 #define MSG_WAITFORONE 0x10000
3203 #endif
3205 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3206 unsigned int vlen, unsigned int flags,
3207 int send)
3209 struct target_mmsghdr *mmsgp;
3210 abi_long ret = 0;
3211 int i;
3213 if (vlen > UIO_MAXIOV) {
3214 vlen = UIO_MAXIOV;
3217 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3218 if (!mmsgp) {
3219 return -TARGET_EFAULT;
3222 for (i = 0; i < vlen; i++) {
3223 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3224 if (is_error(ret)) {
3225 break;
3227 mmsgp[i].msg_len = tswap32(ret);
3228 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3229 if (flags & MSG_WAITFORONE) {
3230 flags |= MSG_DONTWAIT;
3234 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3236 /* Return number of datagrams sent if we sent any at all;
3237 * otherwise return the error.
3239 if (i) {
3240 return i;
3242 return ret;
3245 /* do_accept4() Must return target values and target errnos. */
3246 static abi_long do_accept4(int fd, abi_ulong target_addr,
3247 abi_ulong target_addrlen_addr, int flags)
3249 socklen_t addrlen, ret_addrlen;
3250 void *addr;
3251 abi_long ret;
3252 int host_flags;
3254 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3256 if (target_addr == 0) {
3257 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3260 /* linux returns EINVAL if addrlen pointer is invalid */
3261 if (get_user_u32(addrlen, target_addrlen_addr))
3262 return -TARGET_EINVAL;
3264 if ((int)addrlen < 0) {
3265 return -TARGET_EINVAL;
3268 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3269 return -TARGET_EINVAL;
3271 addr = alloca(addrlen);
3273 ret_addrlen = addrlen;
3274 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3275 if (!is_error(ret)) {
3276 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3277 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3278 ret = -TARGET_EFAULT;
3281 return ret;
3284 /* do_getpeername() Must return target values and target errnos. */
3285 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3286 abi_ulong target_addrlen_addr)
3288 socklen_t addrlen, ret_addrlen;
3289 void *addr;
3290 abi_long ret;
3292 if (get_user_u32(addrlen, target_addrlen_addr))
3293 return -TARGET_EFAULT;
3295 if ((int)addrlen < 0) {
3296 return -TARGET_EINVAL;
3299 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3300 return -TARGET_EFAULT;
3302 addr = alloca(addrlen);
3304 ret_addrlen = addrlen;
3305 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3306 if (!is_error(ret)) {
3307 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3308 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3309 ret = -TARGET_EFAULT;
3312 return ret;
3315 /* do_getsockname() Must return target values and target errnos. */
3316 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3317 abi_ulong target_addrlen_addr)
3319 socklen_t addrlen, ret_addrlen;
3320 void *addr;
3321 abi_long ret;
3323 if (get_user_u32(addrlen, target_addrlen_addr))
3324 return -TARGET_EFAULT;
3326 if ((int)addrlen < 0) {
3327 return -TARGET_EINVAL;
3330 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3331 return -TARGET_EFAULT;
3333 addr = alloca(addrlen);
3335 ret_addrlen = addrlen;
3336 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3337 if (!is_error(ret)) {
3338 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3339 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3340 ret = -TARGET_EFAULT;
3343 return ret;
3346 /* do_socketpair() Must return target values and target errnos. */
3347 static abi_long do_socketpair(int domain, int type, int protocol,
3348 abi_ulong target_tab_addr)
3350 int tab[2];
3351 abi_long ret;
3353 target_to_host_sock_type(&type);
3355 ret = get_errno(socketpair(domain, type, protocol, tab));
3356 if (!is_error(ret)) {
3357 if (put_user_s32(tab[0], target_tab_addr)
3358 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3359 ret = -TARGET_EFAULT;
3361 return ret;
3364 /* do_sendto() Must return target values and target errnos. */
3365 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3366 abi_ulong target_addr, socklen_t addrlen)
3368 void *addr;
3369 void *host_msg;
3370 void *copy_msg = NULL;
3371 abi_long ret;
3373 if ((int)addrlen < 0) {
3374 return -TARGET_EINVAL;
3377 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3378 if (!host_msg)
3379 return -TARGET_EFAULT;
3380 if (fd_trans_target_to_host_data(fd)) {
3381 copy_msg = host_msg;
3382 host_msg = g_malloc(len);
3383 memcpy(host_msg, copy_msg, len);
3384 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3385 if (ret < 0) {
3386 goto fail;
3389 if (target_addr) {
3390 addr = alloca(addrlen+1);
3391 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3392 if (ret) {
3393 goto fail;
3395 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3396 } else {
3397 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3399 fail:
3400 if (copy_msg) {
3401 g_free(host_msg);
3402 host_msg = copy_msg;
3404 unlock_user(host_msg, msg, 0);
3405 return ret;
3408 /* do_recvfrom() Must return target values and target errnos. */
3409 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3410 abi_ulong target_addr,
3411 abi_ulong target_addrlen)
3413 socklen_t addrlen, ret_addrlen;
3414 void *addr;
3415 void *host_msg;
3416 abi_long ret;
3418 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3419 if (!host_msg)
3420 return -TARGET_EFAULT;
3421 if (target_addr) {
3422 if (get_user_u32(addrlen, target_addrlen)) {
3423 ret = -TARGET_EFAULT;
3424 goto fail;
3426 if ((int)addrlen < 0) {
3427 ret = -TARGET_EINVAL;
3428 goto fail;
3430 addr = alloca(addrlen);
3431 ret_addrlen = addrlen;
3432 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3433 addr, &ret_addrlen));
3434 } else {
3435 addr = NULL; /* To keep compiler quiet. */
3436 addrlen = 0; /* To keep compiler quiet. */
3437 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3439 if (!is_error(ret)) {
3440 if (fd_trans_host_to_target_data(fd)) {
3441 abi_long trans;
3442 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3443 if (is_error(trans)) {
3444 ret = trans;
3445 goto fail;
3448 if (target_addr) {
3449 host_to_target_sockaddr(target_addr, addr,
3450 MIN(addrlen, ret_addrlen));
3451 if (put_user_u32(ret_addrlen, target_addrlen)) {
3452 ret = -TARGET_EFAULT;
3453 goto fail;
3456 unlock_user(host_msg, msg, len);
3457 } else {
3458 fail:
3459 unlock_user(host_msg, msg, 0);
3461 return ret;
3464 #ifdef TARGET_NR_socketcall
3465 /* do_socketcall() must return target values and target errnos. */
3466 static abi_long do_socketcall(int num, abi_ulong vptr)
3468 static const unsigned nargs[] = { /* number of arguments per operation */
3469 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3470 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3471 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3472 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3473 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3474 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3475 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3476 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3477 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3478 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3479 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3480 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3481 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3482 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3483 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3484 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3485 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3486 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3487 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3488 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3490 abi_long a[6]; /* max 6 args */
3491 unsigned i;
3493 /* check the range of the first argument num */
3494 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3495 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3496 return -TARGET_EINVAL;
3498 /* ensure we have space for args */
3499 if (nargs[num] > ARRAY_SIZE(a)) {
3500 return -TARGET_EINVAL;
3502 /* collect the arguments in a[] according to nargs[] */
3503 for (i = 0; i < nargs[num]; ++i) {
3504 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3505 return -TARGET_EFAULT;
3508 /* now when we have the args, invoke the appropriate underlying function */
3509 switch (num) {
3510 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3511 return do_socket(a[0], a[1], a[2]);
3512 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3513 return do_bind(a[0], a[1], a[2]);
3514 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3515 return do_connect(a[0], a[1], a[2]);
3516 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3517 return get_errno(listen(a[0], a[1]));
3518 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3519 return do_accept4(a[0], a[1], a[2], 0);
3520 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3521 return do_getsockname(a[0], a[1], a[2]);
3522 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3523 return do_getpeername(a[0], a[1], a[2]);
3524 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3525 return do_socketpair(a[0], a[1], a[2], a[3]);
3526 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3527 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3528 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3529 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3530 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3531 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3532 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3533 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3534 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3535 return get_errno(shutdown(a[0], a[1]));
3536 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3537 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3538 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3539 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3540 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3541 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3542 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3543 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3544 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3545 return do_accept4(a[0], a[1], a[2], a[3]);
3546 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3547 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3548 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3549 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3550 default:
3551 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3552 return -TARGET_EINVAL;
3555 #endif
3557 #define N_SHM_REGIONS 32
3559 static struct shm_region {
3560 abi_ulong start;
3561 abi_ulong size;
3562 bool in_use;
3563 } shm_regions[N_SHM_REGIONS];
3565 #ifndef TARGET_SEMID64_DS
3566 /* asm-generic version of this struct */
3567 struct target_semid64_ds
3569 struct target_ipc_perm sem_perm;
3570 abi_ulong sem_otime;
3571 #if TARGET_ABI_BITS == 32
3572 abi_ulong __unused1;
3573 #endif
3574 abi_ulong sem_ctime;
3575 #if TARGET_ABI_BITS == 32
3576 abi_ulong __unused2;
3577 #endif
3578 abi_ulong sem_nsems;
3579 abi_ulong __unused3;
3580 abi_ulong __unused4;
3582 #endif
3584 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3585 abi_ulong target_addr)
3587 struct target_ipc_perm *target_ip;
3588 struct target_semid64_ds *target_sd;
3590 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3591 return -TARGET_EFAULT;
3592 target_ip = &(target_sd->sem_perm);
3593 host_ip->__key = tswap32(target_ip->__key);
3594 host_ip->uid = tswap32(target_ip->uid);
3595 host_ip->gid = tswap32(target_ip->gid);
3596 host_ip->cuid = tswap32(target_ip->cuid);
3597 host_ip->cgid = tswap32(target_ip->cgid);
3598 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3599 host_ip->mode = tswap32(target_ip->mode);
3600 #else
3601 host_ip->mode = tswap16(target_ip->mode);
3602 #endif
3603 #if defined(TARGET_PPC)
3604 host_ip->__seq = tswap32(target_ip->__seq);
3605 #else
3606 host_ip->__seq = tswap16(target_ip->__seq);
3607 #endif
3608 unlock_user_struct(target_sd, target_addr, 0);
3609 return 0;
3612 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3613 struct ipc_perm *host_ip)
3615 struct target_ipc_perm *target_ip;
3616 struct target_semid64_ds *target_sd;
3618 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3619 return -TARGET_EFAULT;
3620 target_ip = &(target_sd->sem_perm);
3621 target_ip->__key = tswap32(host_ip->__key);
3622 target_ip->uid = tswap32(host_ip->uid);
3623 target_ip->gid = tswap32(host_ip->gid);
3624 target_ip->cuid = tswap32(host_ip->cuid);
3625 target_ip->cgid = tswap32(host_ip->cgid);
3626 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3627 target_ip->mode = tswap32(host_ip->mode);
3628 #else
3629 target_ip->mode = tswap16(host_ip->mode);
3630 #endif
3631 #if defined(TARGET_PPC)
3632 target_ip->__seq = tswap32(host_ip->__seq);
3633 #else
3634 target_ip->__seq = tswap16(host_ip->__seq);
3635 #endif
3636 unlock_user_struct(target_sd, target_addr, 1);
3637 return 0;
3640 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3641 abi_ulong target_addr)
3643 struct target_semid64_ds *target_sd;
3645 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3646 return -TARGET_EFAULT;
3647 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3648 return -TARGET_EFAULT;
3649 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3650 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3651 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3652 unlock_user_struct(target_sd, target_addr, 0);
3653 return 0;
3656 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3657 struct semid_ds *host_sd)
3659 struct target_semid64_ds *target_sd;
3661 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3662 return -TARGET_EFAULT;
3663 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3664 return -TARGET_EFAULT;
3665 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3666 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3667 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3668 unlock_user_struct(target_sd, target_addr, 1);
3669 return 0;
3672 struct target_seminfo {
3673 int semmap;
3674 int semmni;
3675 int semmns;
3676 int semmnu;
3677 int semmsl;
3678 int semopm;
3679 int semume;
3680 int semusz;
3681 int semvmx;
3682 int semaem;
3685 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3686 struct seminfo *host_seminfo)
3688 struct target_seminfo *target_seminfo;
3689 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3690 return -TARGET_EFAULT;
3691 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3692 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3693 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3694 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3695 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3696 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3697 __put_user(host_seminfo->semume, &target_seminfo->semume);
3698 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3699 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3700 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3701 unlock_user_struct(target_seminfo, target_addr, 1);
3702 return 0;
3705 union semun {
3706 int val;
3707 struct semid_ds *buf;
3708 unsigned short *array;
3709 struct seminfo *__buf;
3712 union target_semun {
3713 int val;
3714 abi_ulong buf;
3715 abi_ulong array;
3716 abi_ulong __buf;
3719 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3720 abi_ulong target_addr)
3722 int nsems;
3723 unsigned short *array;
3724 union semun semun;
3725 struct semid_ds semid_ds;
3726 int i, ret;
3728 semun.buf = &semid_ds;
3730 ret = semctl(semid, 0, IPC_STAT, semun);
3731 if (ret == -1)
3732 return get_errno(ret);
3734 nsems = semid_ds.sem_nsems;
3736 *host_array = g_try_new(unsigned short, nsems);
3737 if (!*host_array) {
3738 return -TARGET_ENOMEM;
3740 array = lock_user(VERIFY_READ, target_addr,
3741 nsems*sizeof(unsigned short), 1);
3742 if (!array) {
3743 g_free(*host_array);
3744 return -TARGET_EFAULT;
3747 for(i=0; i<nsems; i++) {
3748 __get_user((*host_array)[i], &array[i]);
3750 unlock_user(array, target_addr, 0);
3752 return 0;
3755 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3756 unsigned short **host_array)
3758 int nsems;
3759 unsigned short *array;
3760 union semun semun;
3761 struct semid_ds semid_ds;
3762 int i, ret;
3764 semun.buf = &semid_ds;
3766 ret = semctl(semid, 0, IPC_STAT, semun);
3767 if (ret == -1)
3768 return get_errno(ret);
3770 nsems = semid_ds.sem_nsems;
3772 array = lock_user(VERIFY_WRITE, target_addr,
3773 nsems*sizeof(unsigned short), 0);
3774 if (!array)
3775 return -TARGET_EFAULT;
3777 for(i=0; i<nsems; i++) {
3778 __put_user((*host_array)[i], &array[i]);
3780 g_free(*host_array);
3781 unlock_user(array, target_addr, 1);
3783 return 0;
3786 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3787 abi_ulong target_arg)
3789 union target_semun target_su = { .buf = target_arg };
3790 union semun arg;
3791 struct semid_ds dsarg;
3792 unsigned short *array = NULL;
3793 struct seminfo seminfo;
3794 abi_long ret = -TARGET_EINVAL;
3795 abi_long err;
3796 cmd &= 0xff;
3798 switch( cmd ) {
3799 case GETVAL:
3800 case SETVAL:
3801 /* In 64 bit cross-endian situations, we will erroneously pick up
3802 * the wrong half of the union for the "val" element. To rectify
3803 * this, the entire 8-byte structure is byteswapped, followed by
3804 * a swap of the 4 byte val field. In other cases, the data is
3805 * already in proper host byte order. */
3806 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3807 target_su.buf = tswapal(target_su.buf);
3808 arg.val = tswap32(target_su.val);
3809 } else {
3810 arg.val = target_su.val;
3812 ret = get_errno(semctl(semid, semnum, cmd, arg));
3813 break;
3814 case GETALL:
3815 case SETALL:
3816 err = target_to_host_semarray(semid, &array, target_su.array);
3817 if (err)
3818 return err;
3819 arg.array = array;
3820 ret = get_errno(semctl(semid, semnum, cmd, arg));
3821 err = host_to_target_semarray(semid, target_su.array, &array);
3822 if (err)
3823 return err;
3824 break;
3825 case IPC_STAT:
3826 case IPC_SET:
3827 case SEM_STAT:
3828 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3829 if (err)
3830 return err;
3831 arg.buf = &dsarg;
3832 ret = get_errno(semctl(semid, semnum, cmd, arg));
3833 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3834 if (err)
3835 return err;
3836 break;
3837 case IPC_INFO:
3838 case SEM_INFO:
3839 arg.__buf = &seminfo;
3840 ret = get_errno(semctl(semid, semnum, cmd, arg));
3841 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3842 if (err)
3843 return err;
3844 break;
3845 case IPC_RMID:
3846 case GETPID:
3847 case GETNCNT:
3848 case GETZCNT:
3849 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3850 break;
3853 return ret;
3856 struct target_sembuf {
3857 unsigned short sem_num;
3858 short sem_op;
3859 short sem_flg;
3862 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
3863 abi_ulong target_addr,
3864 unsigned nsops)
3866 struct target_sembuf *target_sembuf;
3867 int i;
3869 target_sembuf = lock_user(VERIFY_READ, target_addr,
3870 nsops*sizeof(struct target_sembuf), 1);
3871 if (!target_sembuf)
3872 return -TARGET_EFAULT;
3874 for(i=0; i<nsops; i++) {
3875 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
3876 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
3877 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
3880 unlock_user(target_sembuf, target_addr, 0);
3882 return 0;
3885 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
3886 defined(TARGET_NR_semtimedop)
3889 * This macro is required to handle the s390 variants, which passes the
3890 * arguments in a different order than default.
3892 #ifdef __s390x__
3893 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3894 (__nsops), (__timeout), (__sops)
3895 #else
3896 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
3897 (__nsops), 0, (__sops), (__timeout)
3898 #endif
3900 static inline abi_long do_semtimedop(int semid,
3901 abi_long ptr,
3902 unsigned nsops,
3903 abi_long timeout)
3905 struct sembuf *sops;
3906 struct timespec ts, *pts = NULL;
3907 abi_long ret;
3909 if (timeout) {
3910 pts = &ts;
3911 if (target_to_host_timespec(pts, timeout)) {
3912 return -TARGET_EFAULT;
3916 if (nsops > TARGET_SEMOPM) {
3917 return -TARGET_E2BIG;
3920 sops = g_new(struct sembuf, nsops);
3922 if (target_to_host_sembuf(sops, ptr, nsops)) {
3923 g_free(sops);
3924 return -TARGET_EFAULT;
3927 ret = -TARGET_ENOSYS;
3928 #ifdef __NR_semtimedop
3929 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
3930 #endif
3931 #ifdef __NR_ipc
3932 if (ret == -TARGET_ENOSYS) {
3933 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
3934 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
3936 #endif
3937 g_free(sops);
3938 return ret;
3940 #endif
3942 struct target_msqid_ds
3944 struct target_ipc_perm msg_perm;
3945 abi_ulong msg_stime;
3946 #if TARGET_ABI_BITS == 32
3947 abi_ulong __unused1;
3948 #endif
3949 abi_ulong msg_rtime;
3950 #if TARGET_ABI_BITS == 32
3951 abi_ulong __unused2;
3952 #endif
3953 abi_ulong msg_ctime;
3954 #if TARGET_ABI_BITS == 32
3955 abi_ulong __unused3;
3956 #endif
3957 abi_ulong __msg_cbytes;
3958 abi_ulong msg_qnum;
3959 abi_ulong msg_qbytes;
3960 abi_ulong msg_lspid;
3961 abi_ulong msg_lrpid;
3962 abi_ulong __unused4;
3963 abi_ulong __unused5;
3966 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
3967 abi_ulong target_addr)
3969 struct target_msqid_ds *target_md;
3971 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
3972 return -TARGET_EFAULT;
3973 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
3974 return -TARGET_EFAULT;
3975 host_md->msg_stime = tswapal(target_md->msg_stime);
3976 host_md->msg_rtime = tswapal(target_md->msg_rtime);
3977 host_md->msg_ctime = tswapal(target_md->msg_ctime);
3978 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
3979 host_md->msg_qnum = tswapal(target_md->msg_qnum);
3980 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
3981 host_md->msg_lspid = tswapal(target_md->msg_lspid);
3982 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
3983 unlock_user_struct(target_md, target_addr, 0);
3984 return 0;
3987 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
3988 struct msqid_ds *host_md)
3990 struct target_msqid_ds *target_md;
3992 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
3993 return -TARGET_EFAULT;
3994 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
3995 return -TARGET_EFAULT;
3996 target_md->msg_stime = tswapal(host_md->msg_stime);
3997 target_md->msg_rtime = tswapal(host_md->msg_rtime);
3998 target_md->msg_ctime = tswapal(host_md->msg_ctime);
3999 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4000 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4001 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4002 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4003 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4004 unlock_user_struct(target_md, target_addr, 1);
4005 return 0;
4008 struct target_msginfo {
4009 int msgpool;
4010 int msgmap;
4011 int msgmax;
4012 int msgmnb;
4013 int msgmni;
4014 int msgssz;
4015 int msgtql;
4016 unsigned short int msgseg;
4019 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4020 struct msginfo *host_msginfo)
4022 struct target_msginfo *target_msginfo;
4023 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4024 return -TARGET_EFAULT;
4025 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4026 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4027 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4028 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4029 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4030 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4031 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4032 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4033 unlock_user_struct(target_msginfo, target_addr, 1);
4034 return 0;
4037 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4039 struct msqid_ds dsarg;
4040 struct msginfo msginfo;
4041 abi_long ret = -TARGET_EINVAL;
4043 cmd &= 0xff;
4045 switch (cmd) {
4046 case IPC_STAT:
4047 case IPC_SET:
4048 case MSG_STAT:
4049 if (target_to_host_msqid_ds(&dsarg,ptr))
4050 return -TARGET_EFAULT;
4051 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4052 if (host_to_target_msqid_ds(ptr,&dsarg))
4053 return -TARGET_EFAULT;
4054 break;
4055 case IPC_RMID:
4056 ret = get_errno(msgctl(msgid, cmd, NULL));
4057 break;
4058 case IPC_INFO:
4059 case MSG_INFO:
4060 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4061 if (host_to_target_msginfo(ptr, &msginfo))
4062 return -TARGET_EFAULT;
4063 break;
4066 return ret;
4069 struct target_msgbuf {
4070 abi_long mtype;
4071 char mtext[1];
4074 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4075 ssize_t msgsz, int msgflg)
4077 struct target_msgbuf *target_mb;
4078 struct msgbuf *host_mb;
4079 abi_long ret = 0;
4081 if (msgsz < 0) {
4082 return -TARGET_EINVAL;
4085 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4086 return -TARGET_EFAULT;
4087 host_mb = g_try_malloc(msgsz + sizeof(long));
4088 if (!host_mb) {
4089 unlock_user_struct(target_mb, msgp, 0);
4090 return -TARGET_ENOMEM;
4092 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4093 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4094 ret = -TARGET_ENOSYS;
4095 #ifdef __NR_msgsnd
4096 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4097 #endif
4098 #ifdef __NR_ipc
4099 if (ret == -TARGET_ENOSYS) {
4100 #ifdef __s390x__
4101 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4102 host_mb));
4103 #else
4104 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4105 host_mb, 0));
4106 #endif
4108 #endif
4109 g_free(host_mb);
4110 unlock_user_struct(target_mb, msgp, 0);
4112 return ret;
4115 #ifdef __NR_ipc
4116 #if defined(__sparc__)
4117 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4118 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4119 #elif defined(__s390x__)
4120 /* The s390 sys_ipc variant has only five parameters. */
4121 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4122 ((long int[]){(long int)__msgp, __msgtyp})
4123 #else
4124 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4125 ((long int[]){(long int)__msgp, __msgtyp}), 0
4126 #endif
4127 #endif
4129 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4130 ssize_t msgsz, abi_long msgtyp,
4131 int msgflg)
4133 struct target_msgbuf *target_mb;
4134 char *target_mtext;
4135 struct msgbuf *host_mb;
4136 abi_long ret = 0;
4138 if (msgsz < 0) {
4139 return -TARGET_EINVAL;
4142 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4143 return -TARGET_EFAULT;
4145 host_mb = g_try_malloc(msgsz + sizeof(long));
4146 if (!host_mb) {
4147 ret = -TARGET_ENOMEM;
4148 goto end;
4150 ret = -TARGET_ENOSYS;
4151 #ifdef __NR_msgrcv
4152 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4153 #endif
4154 #ifdef __NR_ipc
4155 if (ret == -TARGET_ENOSYS) {
4156 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4157 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4159 #endif
4161 if (ret > 0) {
4162 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4163 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4164 if (!target_mtext) {
4165 ret = -TARGET_EFAULT;
4166 goto end;
4168 memcpy(target_mb->mtext, host_mb->mtext, ret);
4169 unlock_user(target_mtext, target_mtext_addr, ret);
4172 target_mb->mtype = tswapal(host_mb->mtype);
4174 end:
4175 if (target_mb)
4176 unlock_user_struct(target_mb, msgp, 1);
4177 g_free(host_mb);
4178 return ret;
4181 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4182 abi_ulong target_addr)
4184 struct target_shmid_ds *target_sd;
4186 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4187 return -TARGET_EFAULT;
4188 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4189 return -TARGET_EFAULT;
4190 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4191 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4192 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4193 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4194 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4195 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4196 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4197 unlock_user_struct(target_sd, target_addr, 0);
4198 return 0;
4201 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4202 struct shmid_ds *host_sd)
4204 struct target_shmid_ds *target_sd;
4206 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4207 return -TARGET_EFAULT;
4208 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4209 return -TARGET_EFAULT;
4210 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4211 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4212 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4213 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4214 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4215 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4216 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4217 unlock_user_struct(target_sd, target_addr, 1);
4218 return 0;
4221 struct target_shminfo {
4222 abi_ulong shmmax;
4223 abi_ulong shmmin;
4224 abi_ulong shmmni;
4225 abi_ulong shmseg;
4226 abi_ulong shmall;
4229 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4230 struct shminfo *host_shminfo)
4232 struct target_shminfo *target_shminfo;
4233 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4234 return -TARGET_EFAULT;
4235 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4236 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4237 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4238 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4239 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4240 unlock_user_struct(target_shminfo, target_addr, 1);
4241 return 0;
4244 struct target_shm_info {
4245 int used_ids;
4246 abi_ulong shm_tot;
4247 abi_ulong shm_rss;
4248 abi_ulong shm_swp;
4249 abi_ulong swap_attempts;
4250 abi_ulong swap_successes;
4253 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4254 struct shm_info *host_shm_info)
4256 struct target_shm_info *target_shm_info;
4257 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4258 return -TARGET_EFAULT;
4259 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4260 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4261 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4262 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4263 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4264 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4265 unlock_user_struct(target_shm_info, target_addr, 1);
4266 return 0;
4269 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4271 struct shmid_ds dsarg;
4272 struct shminfo shminfo;
4273 struct shm_info shm_info;
4274 abi_long ret = -TARGET_EINVAL;
4276 cmd &= 0xff;
4278 switch(cmd) {
4279 case IPC_STAT:
4280 case IPC_SET:
4281 case SHM_STAT:
4282 if (target_to_host_shmid_ds(&dsarg, buf))
4283 return -TARGET_EFAULT;
4284 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4285 if (host_to_target_shmid_ds(buf, &dsarg))
4286 return -TARGET_EFAULT;
4287 break;
4288 case IPC_INFO:
4289 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4290 if (host_to_target_shminfo(buf, &shminfo))
4291 return -TARGET_EFAULT;
4292 break;
4293 case SHM_INFO:
4294 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4295 if (host_to_target_shm_info(buf, &shm_info))
4296 return -TARGET_EFAULT;
4297 break;
4298 case IPC_RMID:
4299 case SHM_LOCK:
4300 case SHM_UNLOCK:
4301 ret = get_errno(shmctl(shmid, cmd, NULL));
4302 break;
4305 return ret;
4308 #ifndef TARGET_FORCE_SHMLBA
4309 /* For most architectures, SHMLBA is the same as the page size;
4310 * some architectures have larger values, in which case they should
4311 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4312 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4313 * and defining its own value for SHMLBA.
4315 * The kernel also permits SHMLBA to be set by the architecture to a
4316 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4317 * this means that addresses are rounded to the large size if
4318 * SHM_RND is set but addresses not aligned to that size are not rejected
4319 * as long as they are at least page-aligned. Since the only architecture
4320 * which uses this is ia64 this code doesn't provide for that oddity.
4322 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4324 return TARGET_PAGE_SIZE;
4326 #endif
4328 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4329 int shmid, abi_ulong shmaddr, int shmflg)
4331 abi_long raddr;
4332 void *host_raddr;
4333 struct shmid_ds shm_info;
4334 int i,ret;
4335 abi_ulong shmlba;
4337 /* find out the length of the shared memory segment */
4338 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4339 if (is_error(ret)) {
4340 /* can't get length, bail out */
4341 return ret;
4344 shmlba = target_shmlba(cpu_env);
4346 if (shmaddr & (shmlba - 1)) {
4347 if (shmflg & SHM_RND) {
4348 shmaddr &= ~(shmlba - 1);
4349 } else {
4350 return -TARGET_EINVAL;
4353 if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
4354 return -TARGET_EINVAL;
4357 mmap_lock();
4359 if (shmaddr)
4360 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4361 else {
4362 abi_ulong mmap_start;
4364 /* In order to use the host shmat, we need to honor host SHMLBA. */
4365 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4367 if (mmap_start == -1) {
4368 errno = ENOMEM;
4369 host_raddr = (void *)-1;
4370 } else
4371 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4374 if (host_raddr == (void *)-1) {
4375 mmap_unlock();
4376 return get_errno((long)host_raddr);
4378 raddr=h2g((unsigned long)host_raddr);
4380 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4381 PAGE_VALID | PAGE_READ |
4382 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4384 for (i = 0; i < N_SHM_REGIONS; i++) {
4385 if (!shm_regions[i].in_use) {
4386 shm_regions[i].in_use = true;
4387 shm_regions[i].start = raddr;
4388 shm_regions[i].size = shm_info.shm_segsz;
4389 break;
4393 mmap_unlock();
4394 return raddr;
4398 static inline abi_long do_shmdt(abi_ulong shmaddr)
4400 int i;
4401 abi_long rv;
4403 mmap_lock();
4405 for (i = 0; i < N_SHM_REGIONS; ++i) {
4406 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4407 shm_regions[i].in_use = false;
4408 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4409 break;
4412 rv = get_errno(shmdt(g2h(shmaddr)));
4414 mmap_unlock();
4416 return rv;
4419 #ifdef TARGET_NR_ipc
4420 /* ??? This only works with linear mappings. */
4421 /* do_ipc() must return target values and target errnos. */
4422 static abi_long do_ipc(CPUArchState *cpu_env,
4423 unsigned int call, abi_long first,
4424 abi_long second, abi_long third,
4425 abi_long ptr, abi_long fifth)
4427 int version;
4428 abi_long ret = 0;
4430 version = call >> 16;
4431 call &= 0xffff;
4433 switch (call) {
4434 case IPCOP_semop:
4435 ret = do_semtimedop(first, ptr, second, 0);
4436 break;
4437 case IPCOP_semtimedop:
4439 * The s390 sys_ipc variant has only five parameters instead of six
4440 * (as for default variant) and the only difference is the handling of
4441 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4442 * to a struct timespec where the generic variant uses fifth parameter.
4444 #if defined(TARGET_S390X)
4445 ret = do_semtimedop(first, ptr, second, third);
4446 #else
4447 ret = do_semtimedop(first, ptr, second, fifth);
4448 #endif
4449 break;
4451 case IPCOP_semget:
4452 ret = get_errno(semget(first, second, third));
4453 break;
4455 case IPCOP_semctl: {
4456 /* The semun argument to semctl is passed by value, so dereference the
4457 * ptr argument. */
4458 abi_ulong atptr;
4459 get_user_ual(atptr, ptr);
4460 ret = do_semctl(first, second, third, atptr);
4461 break;
4464 case IPCOP_msgget:
4465 ret = get_errno(msgget(first, second));
4466 break;
4468 case IPCOP_msgsnd:
4469 ret = do_msgsnd(first, ptr, second, third);
4470 break;
4472 case IPCOP_msgctl:
4473 ret = do_msgctl(first, second, ptr);
4474 break;
4476 case IPCOP_msgrcv:
4477 switch (version) {
4478 case 0:
4480 struct target_ipc_kludge {
4481 abi_long msgp;
4482 abi_long msgtyp;
4483 } *tmp;
4485 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4486 ret = -TARGET_EFAULT;
4487 break;
4490 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4492 unlock_user_struct(tmp, ptr, 0);
4493 break;
4495 default:
4496 ret = do_msgrcv(first, ptr, second, fifth, third);
4498 break;
4500 case IPCOP_shmat:
4501 switch (version) {
4502 default:
4504 abi_ulong raddr;
4505 raddr = do_shmat(cpu_env, first, ptr, second);
4506 if (is_error(raddr))
4507 return get_errno(raddr);
4508 if (put_user_ual(raddr, third))
4509 return -TARGET_EFAULT;
4510 break;
4512 case 1:
4513 ret = -TARGET_EINVAL;
4514 break;
4516 break;
4517 case IPCOP_shmdt:
4518 ret = do_shmdt(ptr);
4519 break;
4521 case IPCOP_shmget:
4522 /* IPC_* flag values are the same on all linux platforms */
4523 ret = get_errno(shmget(first, second, third));
4524 break;
4526 /* IPC_* and SHM_* command values are the same on all linux platforms */
4527 case IPCOP_shmctl:
4528 ret = do_shmctl(first, second, ptr);
4529 break;
4530 default:
4531 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4532 call, version);
4533 ret = -TARGET_ENOSYS;
4534 break;
4536 return ret;
4538 #endif
4540 /* kernel structure types definitions */
4542 #define STRUCT(name, ...) STRUCT_ ## name,
4543 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4544 enum {
4545 #include "syscall_types.h"
4546 STRUCT_MAX
4548 #undef STRUCT
4549 #undef STRUCT_SPECIAL
4551 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4552 #define STRUCT_SPECIAL(name)
4553 #include "syscall_types.h"
4554 #undef STRUCT
4555 #undef STRUCT_SPECIAL
4557 #define MAX_STRUCT_SIZE 4096
4559 #ifdef CONFIG_FIEMAP
4560 /* So fiemap access checks don't overflow on 32 bit systems.
4561 * This is very slightly smaller than the limit imposed by
4562 * the underlying kernel.
4564 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4565 / sizeof(struct fiemap_extent))
4567 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4568 int fd, int cmd, abi_long arg)
4570 /* The parameter for this ioctl is a struct fiemap followed
4571 * by an array of struct fiemap_extent whose size is set
4572 * in fiemap->fm_extent_count. The array is filled in by the
4573 * ioctl.
4575 int target_size_in, target_size_out;
4576 struct fiemap *fm;
4577 const argtype *arg_type = ie->arg_type;
4578 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4579 void *argptr, *p;
4580 abi_long ret;
4581 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4582 uint32_t outbufsz;
4583 int free_fm = 0;
4585 assert(arg_type[0] == TYPE_PTR);
4586 assert(ie->access == IOC_RW);
4587 arg_type++;
4588 target_size_in = thunk_type_size(arg_type, 0);
4589 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4590 if (!argptr) {
4591 return -TARGET_EFAULT;
4593 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4594 unlock_user(argptr, arg, 0);
4595 fm = (struct fiemap *)buf_temp;
4596 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4597 return -TARGET_EINVAL;
4600 outbufsz = sizeof (*fm) +
4601 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4603 if (outbufsz > MAX_STRUCT_SIZE) {
4604 /* We can't fit all the extents into the fixed size buffer.
4605 * Allocate one that is large enough and use it instead.
4607 fm = g_try_malloc(outbufsz);
4608 if (!fm) {
4609 return -TARGET_ENOMEM;
4611 memcpy(fm, buf_temp, sizeof(struct fiemap));
4612 free_fm = 1;
4614 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4615 if (!is_error(ret)) {
4616 target_size_out = target_size_in;
4617 /* An extent_count of 0 means we were only counting the extents
4618 * so there are no structs to copy
4620 if (fm->fm_extent_count != 0) {
4621 target_size_out += fm->fm_mapped_extents * extent_size;
4623 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4624 if (!argptr) {
4625 ret = -TARGET_EFAULT;
4626 } else {
4627 /* Convert the struct fiemap */
4628 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4629 if (fm->fm_extent_count != 0) {
4630 p = argptr + target_size_in;
4631 /* ...and then all the struct fiemap_extents */
4632 for (i = 0; i < fm->fm_mapped_extents; i++) {
4633 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4634 THUNK_TARGET);
4635 p += extent_size;
4638 unlock_user(argptr, arg, target_size_out);
4641 if (free_fm) {
4642 g_free(fm);
4644 return ret;
4646 #endif
4648 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4649 int fd, int cmd, abi_long arg)
4651 const argtype *arg_type = ie->arg_type;
4652 int target_size;
4653 void *argptr;
4654 int ret;
4655 struct ifconf *host_ifconf;
4656 uint32_t outbufsz;
4657 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4658 int target_ifreq_size;
4659 int nb_ifreq;
4660 int free_buf = 0;
4661 int i;
4662 int target_ifc_len;
4663 abi_long target_ifc_buf;
4664 int host_ifc_len;
4665 char *host_ifc_buf;
4667 assert(arg_type[0] == TYPE_PTR);
4668 assert(ie->access == IOC_RW);
4670 arg_type++;
4671 target_size = thunk_type_size(arg_type, 0);
4673 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4674 if (!argptr)
4675 return -TARGET_EFAULT;
4676 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4677 unlock_user(argptr, arg, 0);
4679 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4680 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4681 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4683 if (target_ifc_buf != 0) {
4684 target_ifc_len = host_ifconf->ifc_len;
4685 nb_ifreq = target_ifc_len / target_ifreq_size;
4686 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4688 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4689 if (outbufsz > MAX_STRUCT_SIZE) {
4691 * We can't fit all the extents into the fixed size buffer.
4692 * Allocate one that is large enough and use it instead.
4694 host_ifconf = malloc(outbufsz);
4695 if (!host_ifconf) {
4696 return -TARGET_ENOMEM;
4698 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4699 free_buf = 1;
4701 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4703 host_ifconf->ifc_len = host_ifc_len;
4704 } else {
4705 host_ifc_buf = NULL;
4707 host_ifconf->ifc_buf = host_ifc_buf;
4709 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4710 if (!is_error(ret)) {
4711 /* convert host ifc_len to target ifc_len */
4713 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4714 target_ifc_len = nb_ifreq * target_ifreq_size;
4715 host_ifconf->ifc_len = target_ifc_len;
4717 /* restore target ifc_buf */
4719 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4721 /* copy struct ifconf to target user */
4723 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4724 if (!argptr)
4725 return -TARGET_EFAULT;
4726 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4727 unlock_user(argptr, arg, target_size);
4729 if (target_ifc_buf != 0) {
4730 /* copy ifreq[] to target user */
4731 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4732 for (i = 0; i < nb_ifreq ; i++) {
4733 thunk_convert(argptr + i * target_ifreq_size,
4734 host_ifc_buf + i * sizeof(struct ifreq),
4735 ifreq_arg_type, THUNK_TARGET);
4737 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4741 if (free_buf) {
4742 free(host_ifconf);
4745 return ret;
4748 #if defined(CONFIG_USBFS)
4749 #if HOST_LONG_BITS > 64
4750 #error USBDEVFS thunks do not support >64 bit hosts yet.
4751 #endif
4752 struct live_urb {
4753 uint64_t target_urb_adr;
4754 uint64_t target_buf_adr;
4755 char *target_buf_ptr;
4756 struct usbdevfs_urb host_urb;
4759 static GHashTable *usbdevfs_urb_hashtable(void)
4761 static GHashTable *urb_hashtable;
4763 if (!urb_hashtable) {
4764 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4766 return urb_hashtable;
4769 static void urb_hashtable_insert(struct live_urb *urb)
4771 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4772 g_hash_table_insert(urb_hashtable, urb, urb);
4775 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4777 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4778 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4781 static void urb_hashtable_remove(struct live_urb *urb)
4783 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4784 g_hash_table_remove(urb_hashtable, urb);
4787 static abi_long
4788 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4789 int fd, int cmd, abi_long arg)
4791 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4792 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4793 struct live_urb *lurb;
4794 void *argptr;
4795 uint64_t hurb;
4796 int target_size;
4797 uintptr_t target_urb_adr;
4798 abi_long ret;
4800 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4802 memset(buf_temp, 0, sizeof(uint64_t));
4803 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4804 if (is_error(ret)) {
4805 return ret;
4808 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4809 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4810 if (!lurb->target_urb_adr) {
4811 return -TARGET_EFAULT;
4813 urb_hashtable_remove(lurb);
4814 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4815 lurb->host_urb.buffer_length);
4816 lurb->target_buf_ptr = NULL;
4818 /* restore the guest buffer pointer */
4819 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4821 /* update the guest urb struct */
4822 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4823 if (!argptr) {
4824 g_free(lurb);
4825 return -TARGET_EFAULT;
4827 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4828 unlock_user(argptr, lurb->target_urb_adr, target_size);
4830 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4831 /* write back the urb handle */
4832 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4833 if (!argptr) {
4834 g_free(lurb);
4835 return -TARGET_EFAULT;
4838 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4839 target_urb_adr = lurb->target_urb_adr;
4840 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4841 unlock_user(argptr, arg, target_size);
4843 g_free(lurb);
4844 return ret;
4847 static abi_long
4848 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4849 uint8_t *buf_temp __attribute__((unused)),
4850 int fd, int cmd, abi_long arg)
4852 struct live_urb *lurb;
4854 /* map target address back to host URB with metadata. */
4855 lurb = urb_hashtable_lookup(arg);
4856 if (!lurb) {
4857 return -TARGET_EFAULT;
4859 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4862 static abi_long
4863 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4864 int fd, int cmd, abi_long arg)
4866 const argtype *arg_type = ie->arg_type;
4867 int target_size;
4868 abi_long ret;
4869 void *argptr;
4870 int rw_dir;
4871 struct live_urb *lurb;
4874 * each submitted URB needs to map to a unique ID for the
4875 * kernel, and that unique ID needs to be a pointer to
4876 * host memory. hence, we need to malloc for each URB.
4877 * isochronous transfers have a variable length struct.
4879 arg_type++;
4880 target_size = thunk_type_size(arg_type, THUNK_TARGET);
4882 /* construct host copy of urb and metadata */
4883 lurb = g_try_malloc0(sizeof(struct live_urb));
4884 if (!lurb) {
4885 return -TARGET_ENOMEM;
4888 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4889 if (!argptr) {
4890 g_free(lurb);
4891 return -TARGET_EFAULT;
4893 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4894 unlock_user(argptr, arg, 0);
4896 lurb->target_urb_adr = arg;
4897 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4899 /* buffer space used depends on endpoint type so lock the entire buffer */
4900 /* control type urbs should check the buffer contents for true direction */
4901 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4902 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4903 lurb->host_urb.buffer_length, 1);
4904 if (lurb->target_buf_ptr == NULL) {
4905 g_free(lurb);
4906 return -TARGET_EFAULT;
4909 /* update buffer pointer in host copy */
4910 lurb->host_urb.buffer = lurb->target_buf_ptr;
4912 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4913 if (is_error(ret)) {
4914 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4915 g_free(lurb);
4916 } else {
4917 urb_hashtable_insert(lurb);
4920 return ret;
4922 #endif /* CONFIG_USBFS */
4924 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
4925 int cmd, abi_long arg)
4927 void *argptr;
4928 struct dm_ioctl *host_dm;
4929 abi_long guest_data;
4930 uint32_t guest_data_size;
4931 int target_size;
4932 const argtype *arg_type = ie->arg_type;
4933 abi_long ret;
4934 void *big_buf = NULL;
4935 char *host_data;
4937 arg_type++;
4938 target_size = thunk_type_size(arg_type, 0);
4939 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4940 if (!argptr) {
4941 ret = -TARGET_EFAULT;
4942 goto out;
4944 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4945 unlock_user(argptr, arg, 0);
4947 /* buf_temp is too small, so fetch things into a bigger buffer */
4948 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
4949 memcpy(big_buf, buf_temp, target_size);
4950 buf_temp = big_buf;
4951 host_dm = big_buf;
4953 guest_data = arg + host_dm->data_start;
4954 if ((guest_data - arg) < 0) {
4955 ret = -TARGET_EINVAL;
4956 goto out;
4958 guest_data_size = host_dm->data_size - host_dm->data_start;
4959 host_data = (char*)host_dm + host_dm->data_start;
4961 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
4962 if (!argptr) {
4963 ret = -TARGET_EFAULT;
4964 goto out;
4967 switch (ie->host_cmd) {
4968 case DM_REMOVE_ALL:
4969 case DM_LIST_DEVICES:
4970 case DM_DEV_CREATE:
4971 case DM_DEV_REMOVE:
4972 case DM_DEV_SUSPEND:
4973 case DM_DEV_STATUS:
4974 case DM_DEV_WAIT:
4975 case DM_TABLE_STATUS:
4976 case DM_TABLE_CLEAR:
4977 case DM_TABLE_DEPS:
4978 case DM_LIST_VERSIONS:
4979 /* no input data */
4980 break;
4981 case DM_DEV_RENAME:
4982 case DM_DEV_SET_GEOMETRY:
4983 /* data contains only strings */
4984 memcpy(host_data, argptr, guest_data_size);
4985 break;
4986 case DM_TARGET_MSG:
4987 memcpy(host_data, argptr, guest_data_size);
4988 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
4989 break;
4990 case DM_TABLE_LOAD:
4992 void *gspec = argptr;
4993 void *cur_data = host_data;
4994 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
4995 int spec_size = thunk_type_size(arg_type, 0);
4996 int i;
4998 for (i = 0; i < host_dm->target_count; i++) {
4999 struct dm_target_spec *spec = cur_data;
5000 uint32_t next;
5001 int slen;
5003 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5004 slen = strlen((char*)gspec + spec_size) + 1;
5005 next = spec->next;
5006 spec->next = sizeof(*spec) + slen;
5007 strcpy((char*)&spec[1], gspec + spec_size);
5008 gspec += next;
5009 cur_data += spec->next;
5011 break;
5013 default:
5014 ret = -TARGET_EINVAL;
5015 unlock_user(argptr, guest_data, 0);
5016 goto out;
5018 unlock_user(argptr, guest_data, 0);
5020 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5021 if (!is_error(ret)) {
5022 guest_data = arg + host_dm->data_start;
5023 guest_data_size = host_dm->data_size - host_dm->data_start;
5024 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5025 switch (ie->host_cmd) {
5026 case DM_REMOVE_ALL:
5027 case DM_DEV_CREATE:
5028 case DM_DEV_REMOVE:
5029 case DM_DEV_RENAME:
5030 case DM_DEV_SUSPEND:
5031 case DM_DEV_STATUS:
5032 case DM_TABLE_LOAD:
5033 case DM_TABLE_CLEAR:
5034 case DM_TARGET_MSG:
5035 case DM_DEV_SET_GEOMETRY:
5036 /* no return data */
5037 break;
5038 case DM_LIST_DEVICES:
5040 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5041 uint32_t remaining_data = guest_data_size;
5042 void *cur_data = argptr;
5043 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5044 int nl_size = 12; /* can't use thunk_size due to alignment */
5046 while (1) {
5047 uint32_t next = nl->next;
5048 if (next) {
5049 nl->next = nl_size + (strlen(nl->name) + 1);
5051 if (remaining_data < nl->next) {
5052 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5053 break;
5055 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5056 strcpy(cur_data + nl_size, nl->name);
5057 cur_data += nl->next;
5058 remaining_data -= nl->next;
5059 if (!next) {
5060 break;
5062 nl = (void*)nl + next;
5064 break;
5066 case DM_DEV_WAIT:
5067 case DM_TABLE_STATUS:
5069 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5070 void *cur_data = argptr;
5071 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5072 int spec_size = thunk_type_size(arg_type, 0);
5073 int i;
5075 for (i = 0; i < host_dm->target_count; i++) {
5076 uint32_t next = spec->next;
5077 int slen = strlen((char*)&spec[1]) + 1;
5078 spec->next = (cur_data - argptr) + spec_size + slen;
5079 if (guest_data_size < spec->next) {
5080 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5081 break;
5083 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5084 strcpy(cur_data + spec_size, (char*)&spec[1]);
5085 cur_data = argptr + spec->next;
5086 spec = (void*)host_dm + host_dm->data_start + next;
5088 break;
5090 case DM_TABLE_DEPS:
5092 void *hdata = (void*)host_dm + host_dm->data_start;
5093 int count = *(uint32_t*)hdata;
5094 uint64_t *hdev = hdata + 8;
5095 uint64_t *gdev = argptr + 8;
5096 int i;
5098 *(uint32_t*)argptr = tswap32(count);
5099 for (i = 0; i < count; i++) {
5100 *gdev = tswap64(*hdev);
5101 gdev++;
5102 hdev++;
5104 break;
5106 case DM_LIST_VERSIONS:
5108 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5109 uint32_t remaining_data = guest_data_size;
5110 void *cur_data = argptr;
5111 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5112 int vers_size = thunk_type_size(arg_type, 0);
5114 while (1) {
5115 uint32_t next = vers->next;
5116 if (next) {
5117 vers->next = vers_size + (strlen(vers->name) + 1);
5119 if (remaining_data < vers->next) {
5120 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5121 break;
5123 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5124 strcpy(cur_data + vers_size, vers->name);
5125 cur_data += vers->next;
5126 remaining_data -= vers->next;
5127 if (!next) {
5128 break;
5130 vers = (void*)vers + next;
5132 break;
5134 default:
5135 unlock_user(argptr, guest_data, 0);
5136 ret = -TARGET_EINVAL;
5137 goto out;
5139 unlock_user(argptr, guest_data, guest_data_size);
5141 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5142 if (!argptr) {
5143 ret = -TARGET_EFAULT;
5144 goto out;
5146 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5147 unlock_user(argptr, arg, target_size);
5149 out:
5150 g_free(big_buf);
5151 return ret;
5154 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5155 int cmd, abi_long arg)
5157 void *argptr;
5158 int target_size;
5159 const argtype *arg_type = ie->arg_type;
5160 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5161 abi_long ret;
5163 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5164 struct blkpg_partition host_part;
5166 /* Read and convert blkpg */
5167 arg_type++;
5168 target_size = thunk_type_size(arg_type, 0);
5169 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5170 if (!argptr) {
5171 ret = -TARGET_EFAULT;
5172 goto out;
5174 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5175 unlock_user(argptr, arg, 0);
5177 switch (host_blkpg->op) {
5178 case BLKPG_ADD_PARTITION:
5179 case BLKPG_DEL_PARTITION:
5180 /* payload is struct blkpg_partition */
5181 break;
5182 default:
5183 /* Unknown opcode */
5184 ret = -TARGET_EINVAL;
5185 goto out;
5188 /* Read and convert blkpg->data */
5189 arg = (abi_long)(uintptr_t)host_blkpg->data;
5190 target_size = thunk_type_size(part_arg_type, 0);
5191 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5192 if (!argptr) {
5193 ret = -TARGET_EFAULT;
5194 goto out;
5196 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5197 unlock_user(argptr, arg, 0);
5199 /* Swizzle the data pointer to our local copy and call! */
5200 host_blkpg->data = &host_part;
5201 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5203 out:
5204 return ret;
5207 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5208 int fd, int cmd, abi_long arg)
5210 const argtype *arg_type = ie->arg_type;
5211 const StructEntry *se;
5212 const argtype *field_types;
5213 const int *dst_offsets, *src_offsets;
5214 int target_size;
5215 void *argptr;
5216 abi_ulong *target_rt_dev_ptr = NULL;
5217 unsigned long *host_rt_dev_ptr = NULL;
5218 abi_long ret;
5219 int i;
5221 assert(ie->access == IOC_W);
5222 assert(*arg_type == TYPE_PTR);
5223 arg_type++;
5224 assert(*arg_type == TYPE_STRUCT);
5225 target_size = thunk_type_size(arg_type, 0);
5226 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5227 if (!argptr) {
5228 return -TARGET_EFAULT;
5230 arg_type++;
5231 assert(*arg_type == (int)STRUCT_rtentry);
5232 se = struct_entries + *arg_type++;
5233 assert(se->convert[0] == NULL);
5234 /* convert struct here to be able to catch rt_dev string */
5235 field_types = se->field_types;
5236 dst_offsets = se->field_offsets[THUNK_HOST];
5237 src_offsets = se->field_offsets[THUNK_TARGET];
5238 for (i = 0; i < se->nb_fields; i++) {
5239 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5240 assert(*field_types == TYPE_PTRVOID);
5241 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5242 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5243 if (*target_rt_dev_ptr != 0) {
5244 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5245 tswapal(*target_rt_dev_ptr));
5246 if (!*host_rt_dev_ptr) {
5247 unlock_user(argptr, arg, 0);
5248 return -TARGET_EFAULT;
5250 } else {
5251 *host_rt_dev_ptr = 0;
5253 field_types++;
5254 continue;
5256 field_types = thunk_convert(buf_temp + dst_offsets[i],
5257 argptr + src_offsets[i],
5258 field_types, THUNK_HOST);
5260 unlock_user(argptr, arg, 0);
5262 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5264 assert(host_rt_dev_ptr != NULL);
5265 assert(target_rt_dev_ptr != NULL);
5266 if (*host_rt_dev_ptr != 0) {
5267 unlock_user((void *)*host_rt_dev_ptr,
5268 *target_rt_dev_ptr, 0);
5270 return ret;
5273 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5274 int fd, int cmd, abi_long arg)
5276 int sig = target_to_host_signal(arg);
5277 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5280 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5281 int fd, int cmd, abi_long arg)
5283 struct timeval tv;
5284 abi_long ret;
5286 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5287 if (is_error(ret)) {
5288 return ret;
5291 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5292 if (copy_to_user_timeval(arg, &tv)) {
5293 return -TARGET_EFAULT;
5295 } else {
5296 if (copy_to_user_timeval64(arg, &tv)) {
5297 return -TARGET_EFAULT;
5301 return ret;
5304 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5305 int fd, int cmd, abi_long arg)
5307 struct timespec ts;
5308 abi_long ret;
5310 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5311 if (is_error(ret)) {
5312 return ret;
5315 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5316 if (host_to_target_timespec(arg, &ts)) {
5317 return -TARGET_EFAULT;
5319 } else{
5320 if (host_to_target_timespec64(arg, &ts)) {
5321 return -TARGET_EFAULT;
5325 return ret;
5328 #ifdef TIOCGPTPEER
5329 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5330 int fd, int cmd, abi_long arg)
5332 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5333 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5335 #endif
5337 #ifdef HAVE_DRM_H
5339 static void unlock_drm_version(struct drm_version *host_ver,
5340 struct target_drm_version *target_ver,
5341 bool copy)
5343 unlock_user(host_ver->name, target_ver->name,
5344 copy ? host_ver->name_len : 0);
5345 unlock_user(host_ver->date, target_ver->date,
5346 copy ? host_ver->date_len : 0);
5347 unlock_user(host_ver->desc, target_ver->desc,
5348 copy ? host_ver->desc_len : 0);
5351 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5352 struct target_drm_version *target_ver)
5354 memset(host_ver, 0, sizeof(*host_ver));
5356 __get_user(host_ver->name_len, &target_ver->name_len);
5357 if (host_ver->name_len) {
5358 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5359 target_ver->name_len, 0);
5360 if (!host_ver->name) {
5361 return -EFAULT;
5365 __get_user(host_ver->date_len, &target_ver->date_len);
5366 if (host_ver->date_len) {
5367 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5368 target_ver->date_len, 0);
5369 if (!host_ver->date) {
5370 goto err;
5374 __get_user(host_ver->desc_len, &target_ver->desc_len);
5375 if (host_ver->desc_len) {
5376 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5377 target_ver->desc_len, 0);
5378 if (!host_ver->desc) {
5379 goto err;
5383 return 0;
5384 err:
5385 unlock_drm_version(host_ver, target_ver, false);
5386 return -EFAULT;
5389 static inline void host_to_target_drmversion(
5390 struct target_drm_version *target_ver,
5391 struct drm_version *host_ver)
5393 __put_user(host_ver->version_major, &target_ver->version_major);
5394 __put_user(host_ver->version_minor, &target_ver->version_minor);
5395 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5396 __put_user(host_ver->name_len, &target_ver->name_len);
5397 __put_user(host_ver->date_len, &target_ver->date_len);
5398 __put_user(host_ver->desc_len, &target_ver->desc_len);
5399 unlock_drm_version(host_ver, target_ver, true);
5402 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5403 int fd, int cmd, abi_long arg)
5405 struct drm_version *ver;
5406 struct target_drm_version *target_ver;
5407 abi_long ret;
5409 switch (ie->host_cmd) {
5410 case DRM_IOCTL_VERSION:
5411 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5412 return -TARGET_EFAULT;
5414 ver = (struct drm_version *)buf_temp;
5415 ret = target_to_host_drmversion(ver, target_ver);
5416 if (!is_error(ret)) {
5417 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5418 if (is_error(ret)) {
5419 unlock_drm_version(ver, target_ver, false);
5420 } else {
5421 host_to_target_drmversion(target_ver, ver);
5424 unlock_user_struct(target_ver, arg, 0);
5425 return ret;
5427 return -TARGET_ENOSYS;
5430 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5431 struct drm_i915_getparam *gparam,
5432 int fd, abi_long arg)
5434 abi_long ret;
5435 int value;
5436 struct target_drm_i915_getparam *target_gparam;
5438 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5439 return -TARGET_EFAULT;
5442 __get_user(gparam->param, &target_gparam->param);
5443 gparam->value = &value;
5444 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5445 put_user_s32(value, target_gparam->value);
5447 unlock_user_struct(target_gparam, arg, 0);
5448 return ret;
5451 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5452 int fd, int cmd, abi_long arg)
5454 switch (ie->host_cmd) {
5455 case DRM_IOCTL_I915_GETPARAM:
5456 return do_ioctl_drm_i915_getparam(ie,
5457 (struct drm_i915_getparam *)buf_temp,
5458 fd, arg);
5459 default:
5460 return -TARGET_ENOSYS;
5464 #endif
5466 IOCTLEntry ioctl_entries[] = {
5467 #define IOCTL(cmd, access, ...) \
5468 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5469 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5470 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5471 #define IOCTL_IGNORE(cmd) \
5472 { TARGET_ ## cmd, 0, #cmd },
5473 #include "ioctls.h"
5474 { 0, 0, },
5477 /* ??? Implement proper locking for ioctls. */
5478 /* do_ioctl() Must return target values and target errnos. */
5479 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5481 const IOCTLEntry *ie;
5482 const argtype *arg_type;
5483 abi_long ret;
5484 uint8_t buf_temp[MAX_STRUCT_SIZE];
5485 int target_size;
5486 void *argptr;
5488 ie = ioctl_entries;
5489 for(;;) {
5490 if (ie->target_cmd == 0) {
5491 qemu_log_mask(
5492 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5493 return -TARGET_ENOSYS;
5495 if (ie->target_cmd == cmd)
5496 break;
5497 ie++;
5499 arg_type = ie->arg_type;
5500 if (ie->do_ioctl) {
5501 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5502 } else if (!ie->host_cmd) {
5503 /* Some architectures define BSD ioctls in their headers
5504 that are not implemented in Linux. */
5505 return -TARGET_ENOSYS;
5508 switch(arg_type[0]) {
5509 case TYPE_NULL:
5510 /* no argument */
5511 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5512 break;
5513 case TYPE_PTRVOID:
5514 case TYPE_INT:
5515 case TYPE_LONG:
5516 case TYPE_ULONG:
5517 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5518 break;
5519 case TYPE_PTR:
5520 arg_type++;
5521 target_size = thunk_type_size(arg_type, 0);
5522 switch(ie->access) {
5523 case IOC_R:
5524 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5525 if (!is_error(ret)) {
5526 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5527 if (!argptr)
5528 return -TARGET_EFAULT;
5529 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5530 unlock_user(argptr, arg, target_size);
5532 break;
5533 case IOC_W:
5534 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5535 if (!argptr)
5536 return -TARGET_EFAULT;
5537 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5538 unlock_user(argptr, arg, 0);
5539 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5540 break;
5541 default:
5542 case IOC_RW:
5543 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5544 if (!argptr)
5545 return -TARGET_EFAULT;
5546 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5547 unlock_user(argptr, arg, 0);
5548 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5549 if (!is_error(ret)) {
5550 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5551 if (!argptr)
5552 return -TARGET_EFAULT;
5553 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5554 unlock_user(argptr, arg, target_size);
5556 break;
5558 break;
5559 default:
5560 qemu_log_mask(LOG_UNIMP,
5561 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5562 (long)cmd, arg_type[0]);
5563 ret = -TARGET_ENOSYS;
5564 break;
5566 return ret;
5569 static const bitmask_transtbl iflag_tbl[] = {
5570 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5571 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5572 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5573 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5574 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5575 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5576 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5577 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5578 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5579 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5580 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5581 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5582 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5583 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5584 { 0, 0, 0, 0 }
5587 static const bitmask_transtbl oflag_tbl[] = {
5588 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5589 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5590 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5591 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5592 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5593 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5594 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5595 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5596 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5597 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5598 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5599 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5600 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5601 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5602 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5603 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5604 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5605 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5606 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5607 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5608 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5609 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5610 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5611 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5612 { 0, 0, 0, 0 }
5615 static const bitmask_transtbl cflag_tbl[] = {
5616 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5617 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5618 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5619 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5620 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5621 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5622 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5623 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5624 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5625 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5626 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5627 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5628 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5629 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5630 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5631 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5632 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5633 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5634 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5635 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5636 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5637 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5638 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5639 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5640 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5641 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5642 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5643 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5644 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5645 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5646 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5647 { 0, 0, 0, 0 }
5650 static const bitmask_transtbl lflag_tbl[] = {
5651 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5652 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5653 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5654 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5655 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5656 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5657 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5658 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5659 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5660 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5661 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5662 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5663 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5664 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5665 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5666 { 0, 0, 0, 0 }
5669 static void target_to_host_termios (void *dst, const void *src)
5671 struct host_termios *host = dst;
5672 const struct target_termios *target = src;
5674 host->c_iflag =
5675 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5676 host->c_oflag =
5677 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5678 host->c_cflag =
5679 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5680 host->c_lflag =
5681 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5682 host->c_line = target->c_line;
5684 memset(host->c_cc, 0, sizeof(host->c_cc));
5685 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5686 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5687 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5688 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5689 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5690 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5691 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5692 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5693 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5694 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5695 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5696 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5697 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5698 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5699 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5700 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5701 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5704 static void host_to_target_termios (void *dst, const void *src)
5706 struct target_termios *target = dst;
5707 const struct host_termios *host = src;
5709 target->c_iflag =
5710 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5711 target->c_oflag =
5712 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5713 target->c_cflag =
5714 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5715 target->c_lflag =
5716 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5717 target->c_line = host->c_line;
5719 memset(target->c_cc, 0, sizeof(target->c_cc));
5720 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5721 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5722 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5723 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5724 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5725 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5726 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5727 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5728 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5729 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5730 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5731 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5732 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5733 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5734 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5735 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5736 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5739 static const StructEntry struct_termios_def = {
5740 .convert = { host_to_target_termios, target_to_host_termios },
5741 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5742 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5745 static bitmask_transtbl mmap_flags_tbl[] = {
5746 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5747 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5748 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5749 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5750 MAP_ANONYMOUS, MAP_ANONYMOUS },
5751 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5752 MAP_GROWSDOWN, MAP_GROWSDOWN },
5753 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5754 MAP_DENYWRITE, MAP_DENYWRITE },
5755 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5756 MAP_EXECUTABLE, MAP_EXECUTABLE },
5757 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5758 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5759 MAP_NORESERVE, MAP_NORESERVE },
5760 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5761 /* MAP_STACK had been ignored by the kernel for quite some time.
5762 Recognize it for the target insofar as we do not want to pass
5763 it through to the host. */
5764 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5765 { 0, 0, 0, 0 }
5769 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5770 * TARGET_I386 is defined if TARGET_X86_64 is defined
5772 #if defined(TARGET_I386)
5774 /* NOTE: there is really one LDT for all the threads */
5775 static uint8_t *ldt_table;
5777 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5779 int size;
5780 void *p;
5782 if (!ldt_table)
5783 return 0;
5784 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5785 if (size > bytecount)
5786 size = bytecount;
5787 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5788 if (!p)
5789 return -TARGET_EFAULT;
5790 /* ??? Should this by byteswapped? */
5791 memcpy(p, ldt_table, size);
5792 unlock_user(p, ptr, size);
5793 return size;
5796 /* XXX: add locking support */
5797 static abi_long write_ldt(CPUX86State *env,
5798 abi_ulong ptr, unsigned long bytecount, int oldmode)
5800 struct target_modify_ldt_ldt_s ldt_info;
5801 struct target_modify_ldt_ldt_s *target_ldt_info;
5802 int seg_32bit, contents, read_exec_only, limit_in_pages;
5803 int seg_not_present, useable, lm;
5804 uint32_t *lp, entry_1, entry_2;
5806 if (bytecount != sizeof(ldt_info))
5807 return -TARGET_EINVAL;
5808 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5809 return -TARGET_EFAULT;
5810 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5811 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5812 ldt_info.limit = tswap32(target_ldt_info->limit);
5813 ldt_info.flags = tswap32(target_ldt_info->flags);
5814 unlock_user_struct(target_ldt_info, ptr, 0);
5816 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5817 return -TARGET_EINVAL;
5818 seg_32bit = ldt_info.flags & 1;
5819 contents = (ldt_info.flags >> 1) & 3;
5820 read_exec_only = (ldt_info.flags >> 3) & 1;
5821 limit_in_pages = (ldt_info.flags >> 4) & 1;
5822 seg_not_present = (ldt_info.flags >> 5) & 1;
5823 useable = (ldt_info.flags >> 6) & 1;
5824 #ifdef TARGET_ABI32
5825 lm = 0;
5826 #else
5827 lm = (ldt_info.flags >> 7) & 1;
5828 #endif
5829 if (contents == 3) {
5830 if (oldmode)
5831 return -TARGET_EINVAL;
5832 if (seg_not_present == 0)
5833 return -TARGET_EINVAL;
5835 /* allocate the LDT */
5836 if (!ldt_table) {
5837 env->ldt.base = target_mmap(0,
5838 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5839 PROT_READ|PROT_WRITE,
5840 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5841 if (env->ldt.base == -1)
5842 return -TARGET_ENOMEM;
5843 memset(g2h(env->ldt.base), 0,
5844 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5845 env->ldt.limit = 0xffff;
5846 ldt_table = g2h(env->ldt.base);
5849 /* NOTE: same code as Linux kernel */
5850 /* Allow LDTs to be cleared by the user. */
5851 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5852 if (oldmode ||
5853 (contents == 0 &&
5854 read_exec_only == 1 &&
5855 seg_32bit == 0 &&
5856 limit_in_pages == 0 &&
5857 seg_not_present == 1 &&
5858 useable == 0 )) {
5859 entry_1 = 0;
5860 entry_2 = 0;
5861 goto install;
5865 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5866 (ldt_info.limit & 0x0ffff);
5867 entry_2 = (ldt_info.base_addr & 0xff000000) |
5868 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5869 (ldt_info.limit & 0xf0000) |
5870 ((read_exec_only ^ 1) << 9) |
5871 (contents << 10) |
5872 ((seg_not_present ^ 1) << 15) |
5873 (seg_32bit << 22) |
5874 (limit_in_pages << 23) |
5875 (lm << 21) |
5876 0x7000;
5877 if (!oldmode)
5878 entry_2 |= (useable << 20);
5880 /* Install the new entry ... */
5881 install:
5882 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5883 lp[0] = tswap32(entry_1);
5884 lp[1] = tswap32(entry_2);
5885 return 0;
5888 /* specific and weird i386 syscalls */
5889 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5890 unsigned long bytecount)
5892 abi_long ret;
5894 switch (func) {
5895 case 0:
5896 ret = read_ldt(ptr, bytecount);
5897 break;
5898 case 1:
5899 ret = write_ldt(env, ptr, bytecount, 1);
5900 break;
5901 case 0x11:
5902 ret = write_ldt(env, ptr, bytecount, 0);
5903 break;
5904 default:
5905 ret = -TARGET_ENOSYS;
5906 break;
5908 return ret;
5911 #if defined(TARGET_ABI32)
5912 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5914 uint64_t *gdt_table = g2h(env->gdt.base);
5915 struct target_modify_ldt_ldt_s ldt_info;
5916 struct target_modify_ldt_ldt_s *target_ldt_info;
5917 int seg_32bit, contents, read_exec_only, limit_in_pages;
5918 int seg_not_present, useable, lm;
5919 uint32_t *lp, entry_1, entry_2;
5920 int i;
5922 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5923 if (!target_ldt_info)
5924 return -TARGET_EFAULT;
5925 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5926 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5927 ldt_info.limit = tswap32(target_ldt_info->limit);
5928 ldt_info.flags = tswap32(target_ldt_info->flags);
5929 if (ldt_info.entry_number == -1) {
5930 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5931 if (gdt_table[i] == 0) {
5932 ldt_info.entry_number = i;
5933 target_ldt_info->entry_number = tswap32(i);
5934 break;
5938 unlock_user_struct(target_ldt_info, ptr, 1);
5940 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5941 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5942 return -TARGET_EINVAL;
5943 seg_32bit = ldt_info.flags & 1;
5944 contents = (ldt_info.flags >> 1) & 3;
5945 read_exec_only = (ldt_info.flags >> 3) & 1;
5946 limit_in_pages = (ldt_info.flags >> 4) & 1;
5947 seg_not_present = (ldt_info.flags >> 5) & 1;
5948 useable = (ldt_info.flags >> 6) & 1;
5949 #ifdef TARGET_ABI32
5950 lm = 0;
5951 #else
5952 lm = (ldt_info.flags >> 7) & 1;
5953 #endif
5955 if (contents == 3) {
5956 if (seg_not_present == 0)
5957 return -TARGET_EINVAL;
5960 /* NOTE: same code as Linux kernel */
5961 /* Allow LDTs to be cleared by the user. */
5962 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5963 if ((contents == 0 &&
5964 read_exec_only == 1 &&
5965 seg_32bit == 0 &&
5966 limit_in_pages == 0 &&
5967 seg_not_present == 1 &&
5968 useable == 0 )) {
5969 entry_1 = 0;
5970 entry_2 = 0;
5971 goto install;
5975 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5976 (ldt_info.limit & 0x0ffff);
5977 entry_2 = (ldt_info.base_addr & 0xff000000) |
5978 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5979 (ldt_info.limit & 0xf0000) |
5980 ((read_exec_only ^ 1) << 9) |
5981 (contents << 10) |
5982 ((seg_not_present ^ 1) << 15) |
5983 (seg_32bit << 22) |
5984 (limit_in_pages << 23) |
5985 (useable << 20) |
5986 (lm << 21) |
5987 0x7000;
5989 /* Install the new entry ... */
5990 install:
5991 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5992 lp[0] = tswap32(entry_1);
5993 lp[1] = tswap32(entry_2);
5994 return 0;
5997 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5999 struct target_modify_ldt_ldt_s *target_ldt_info;
6000 uint64_t *gdt_table = g2h(env->gdt.base);
6001 uint32_t base_addr, limit, flags;
6002 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6003 int seg_not_present, useable, lm;
6004 uint32_t *lp, entry_1, entry_2;
6006 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6007 if (!target_ldt_info)
6008 return -TARGET_EFAULT;
6009 idx = tswap32(target_ldt_info->entry_number);
6010 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6011 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6012 unlock_user_struct(target_ldt_info, ptr, 1);
6013 return -TARGET_EINVAL;
6015 lp = (uint32_t *)(gdt_table + idx);
6016 entry_1 = tswap32(lp[0]);
6017 entry_2 = tswap32(lp[1]);
6019 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6020 contents = (entry_2 >> 10) & 3;
6021 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6022 seg_32bit = (entry_2 >> 22) & 1;
6023 limit_in_pages = (entry_2 >> 23) & 1;
6024 useable = (entry_2 >> 20) & 1;
6025 #ifdef TARGET_ABI32
6026 lm = 0;
6027 #else
6028 lm = (entry_2 >> 21) & 1;
6029 #endif
6030 flags = (seg_32bit << 0) | (contents << 1) |
6031 (read_exec_only << 3) | (limit_in_pages << 4) |
6032 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6033 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6034 base_addr = (entry_1 >> 16) |
6035 (entry_2 & 0xff000000) |
6036 ((entry_2 & 0xff) << 16);
6037 target_ldt_info->base_addr = tswapal(base_addr);
6038 target_ldt_info->limit = tswap32(limit);
6039 target_ldt_info->flags = tswap32(flags);
6040 unlock_user_struct(target_ldt_info, ptr, 1);
6041 return 0;
6044 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6046 return -TARGET_ENOSYS;
6048 #else
6049 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6051 abi_long ret = 0;
6052 abi_ulong val;
6053 int idx;
6055 switch(code) {
6056 case TARGET_ARCH_SET_GS:
6057 case TARGET_ARCH_SET_FS:
6058 if (code == TARGET_ARCH_SET_GS)
6059 idx = R_GS;
6060 else
6061 idx = R_FS;
6062 cpu_x86_load_seg(env, idx, 0);
6063 env->segs[idx].base = addr;
6064 break;
6065 case TARGET_ARCH_GET_GS:
6066 case TARGET_ARCH_GET_FS:
6067 if (code == TARGET_ARCH_GET_GS)
6068 idx = R_GS;
6069 else
6070 idx = R_FS;
6071 val = env->segs[idx].base;
6072 if (put_user(val, addr, abi_ulong))
6073 ret = -TARGET_EFAULT;
6074 break;
6075 default:
6076 ret = -TARGET_EINVAL;
6077 break;
6079 return ret;
6081 #endif /* defined(TARGET_ABI32 */
6083 #endif /* defined(TARGET_I386) */
6085 #define NEW_STACK_SIZE 0x40000
6088 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6089 typedef struct {
6090 CPUArchState *env;
6091 pthread_mutex_t mutex;
6092 pthread_cond_t cond;
6093 pthread_t thread;
6094 uint32_t tid;
6095 abi_ulong child_tidptr;
6096 abi_ulong parent_tidptr;
6097 sigset_t sigmask;
6098 } new_thread_info;
6100 static void *clone_func(void *arg)
6102 new_thread_info *info = arg;
6103 CPUArchState *env;
6104 CPUState *cpu;
6105 TaskState *ts;
6107 rcu_register_thread();
6108 tcg_register_thread();
6109 env = info->env;
6110 cpu = env_cpu(env);
6111 thread_cpu = cpu;
6112 ts = (TaskState *)cpu->opaque;
6113 info->tid = sys_gettid();
6114 task_settid(ts);
6115 if (info->child_tidptr)
6116 put_user_u32(info->tid, info->child_tidptr);
6117 if (info->parent_tidptr)
6118 put_user_u32(info->tid, info->parent_tidptr);
6119 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6120 /* Enable signals. */
6121 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6122 /* Signal to the parent that we're ready. */
6123 pthread_mutex_lock(&info->mutex);
6124 pthread_cond_broadcast(&info->cond);
6125 pthread_mutex_unlock(&info->mutex);
6126 /* Wait until the parent has finished initializing the tls state. */
6127 pthread_mutex_lock(&clone_lock);
6128 pthread_mutex_unlock(&clone_lock);
6129 cpu_loop(env);
6130 /* never exits */
6131 return NULL;
6134 /* do_fork() Must return host values and target errnos (unlike most
6135 do_*() functions). */
6136 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6137 abi_ulong parent_tidptr, target_ulong newtls,
6138 abi_ulong child_tidptr)
6140 CPUState *cpu = env_cpu(env);
6141 int ret;
6142 TaskState *ts;
6143 CPUState *new_cpu;
6144 CPUArchState *new_env;
6145 sigset_t sigmask;
6147 flags &= ~CLONE_IGNORED_FLAGS;
6149 /* Emulate vfork() with fork() */
6150 if (flags & CLONE_VFORK)
6151 flags &= ~(CLONE_VFORK | CLONE_VM);
6153 if (flags & CLONE_VM) {
6154 TaskState *parent_ts = (TaskState *)cpu->opaque;
6155 new_thread_info info;
6156 pthread_attr_t attr;
6158 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6159 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6160 return -TARGET_EINVAL;
6163 ts = g_new0(TaskState, 1);
6164 init_task_state(ts);
6166 /* Grab a mutex so that thread setup appears atomic. */
6167 pthread_mutex_lock(&clone_lock);
6169 /* we create a new CPU instance. */
6170 new_env = cpu_copy(env);
6171 /* Init regs that differ from the parent. */
6172 cpu_clone_regs_child(new_env, newsp, flags);
6173 cpu_clone_regs_parent(env, flags);
6174 new_cpu = env_cpu(new_env);
6175 new_cpu->opaque = ts;
6176 ts->bprm = parent_ts->bprm;
6177 ts->info = parent_ts->info;
6178 ts->signal_mask = parent_ts->signal_mask;
6180 if (flags & CLONE_CHILD_CLEARTID) {
6181 ts->child_tidptr = child_tidptr;
6184 if (flags & CLONE_SETTLS) {
6185 cpu_set_tls (new_env, newtls);
6188 memset(&info, 0, sizeof(info));
6189 pthread_mutex_init(&info.mutex, NULL);
6190 pthread_mutex_lock(&info.mutex);
6191 pthread_cond_init(&info.cond, NULL);
6192 info.env = new_env;
6193 if (flags & CLONE_CHILD_SETTID) {
6194 info.child_tidptr = child_tidptr;
6196 if (flags & CLONE_PARENT_SETTID) {
6197 info.parent_tidptr = parent_tidptr;
6200 ret = pthread_attr_init(&attr);
6201 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6202 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6203 /* It is not safe to deliver signals until the child has finished
6204 initializing, so temporarily block all signals. */
6205 sigfillset(&sigmask);
6206 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6207 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6209 /* If this is our first additional thread, we need to ensure we
6210 * generate code for parallel execution and flush old translations.
6212 if (!parallel_cpus) {
6213 parallel_cpus = true;
6214 tb_flush(cpu);
6217 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6218 /* TODO: Free new CPU state if thread creation failed. */
6220 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6221 pthread_attr_destroy(&attr);
6222 if (ret == 0) {
6223 /* Wait for the child to initialize. */
6224 pthread_cond_wait(&info.cond, &info.mutex);
6225 ret = info.tid;
6226 } else {
6227 ret = -1;
6229 pthread_mutex_unlock(&info.mutex);
6230 pthread_cond_destroy(&info.cond);
6231 pthread_mutex_destroy(&info.mutex);
6232 pthread_mutex_unlock(&clone_lock);
6233 } else {
6234 /* if no CLONE_VM, we consider it is a fork */
6235 if (flags & CLONE_INVALID_FORK_FLAGS) {
6236 return -TARGET_EINVAL;
6239 /* We can't support custom termination signals */
6240 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6241 return -TARGET_EINVAL;
6244 if (block_signals()) {
6245 return -TARGET_ERESTARTSYS;
6248 fork_start();
6249 ret = fork();
6250 if (ret == 0) {
6251 /* Child Process. */
6252 cpu_clone_regs_child(env, newsp, flags);
6253 fork_end(1);
6254 /* There is a race condition here. The parent process could
6255 theoretically read the TID in the child process before the child
6256 tid is set. This would require using either ptrace
6257 (not implemented) or having *_tidptr to point at a shared memory
6258 mapping. We can't repeat the spinlock hack used above because
6259 the child process gets its own copy of the lock. */
6260 if (flags & CLONE_CHILD_SETTID)
6261 put_user_u32(sys_gettid(), child_tidptr);
6262 if (flags & CLONE_PARENT_SETTID)
6263 put_user_u32(sys_gettid(), parent_tidptr);
6264 ts = (TaskState *)cpu->opaque;
6265 if (flags & CLONE_SETTLS)
6266 cpu_set_tls (env, newtls);
6267 if (flags & CLONE_CHILD_CLEARTID)
6268 ts->child_tidptr = child_tidptr;
6269 } else {
6270 cpu_clone_regs_parent(env, flags);
6271 fork_end(0);
6274 return ret;
6277 /* warning : doesn't handle linux specific flags... */
6278 static int target_to_host_fcntl_cmd(int cmd)
6280 int ret;
6282 switch(cmd) {
6283 case TARGET_F_DUPFD:
6284 case TARGET_F_GETFD:
6285 case TARGET_F_SETFD:
6286 case TARGET_F_GETFL:
6287 case TARGET_F_SETFL:
6288 case TARGET_F_OFD_GETLK:
6289 case TARGET_F_OFD_SETLK:
6290 case TARGET_F_OFD_SETLKW:
6291 ret = cmd;
6292 break;
6293 case TARGET_F_GETLK:
6294 ret = F_GETLK64;
6295 break;
6296 case TARGET_F_SETLK:
6297 ret = F_SETLK64;
6298 break;
6299 case TARGET_F_SETLKW:
6300 ret = F_SETLKW64;
6301 break;
6302 case TARGET_F_GETOWN:
6303 ret = F_GETOWN;
6304 break;
6305 case TARGET_F_SETOWN:
6306 ret = F_SETOWN;
6307 break;
6308 case TARGET_F_GETSIG:
6309 ret = F_GETSIG;
6310 break;
6311 case TARGET_F_SETSIG:
6312 ret = F_SETSIG;
6313 break;
6314 #if TARGET_ABI_BITS == 32
6315 case TARGET_F_GETLK64:
6316 ret = F_GETLK64;
6317 break;
6318 case TARGET_F_SETLK64:
6319 ret = F_SETLK64;
6320 break;
6321 case TARGET_F_SETLKW64:
6322 ret = F_SETLKW64;
6323 break;
6324 #endif
6325 case TARGET_F_SETLEASE:
6326 ret = F_SETLEASE;
6327 break;
6328 case TARGET_F_GETLEASE:
6329 ret = F_GETLEASE;
6330 break;
6331 #ifdef F_DUPFD_CLOEXEC
6332 case TARGET_F_DUPFD_CLOEXEC:
6333 ret = F_DUPFD_CLOEXEC;
6334 break;
6335 #endif
6336 case TARGET_F_NOTIFY:
6337 ret = F_NOTIFY;
6338 break;
6339 #ifdef F_GETOWN_EX
6340 case TARGET_F_GETOWN_EX:
6341 ret = F_GETOWN_EX;
6342 break;
6343 #endif
6344 #ifdef F_SETOWN_EX
6345 case TARGET_F_SETOWN_EX:
6346 ret = F_SETOWN_EX;
6347 break;
6348 #endif
6349 #ifdef F_SETPIPE_SZ
6350 case TARGET_F_SETPIPE_SZ:
6351 ret = F_SETPIPE_SZ;
6352 break;
6353 case TARGET_F_GETPIPE_SZ:
6354 ret = F_GETPIPE_SZ;
6355 break;
6356 #endif
6357 default:
6358 ret = -TARGET_EINVAL;
6359 break;
6362 #if defined(__powerpc64__)
6363 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6364 * is not supported by kernel. The glibc fcntl call actually adjusts
6365 * them to 5, 6 and 7 before making the syscall(). Since we make the
6366 * syscall directly, adjust to what is supported by the kernel.
6368 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6369 ret -= F_GETLK64 - 5;
6371 #endif
6373 return ret;
6376 #define FLOCK_TRANSTBL \
6377 switch (type) { \
6378 TRANSTBL_CONVERT(F_RDLCK); \
6379 TRANSTBL_CONVERT(F_WRLCK); \
6380 TRANSTBL_CONVERT(F_UNLCK); \
6381 TRANSTBL_CONVERT(F_EXLCK); \
6382 TRANSTBL_CONVERT(F_SHLCK); \
6385 static int target_to_host_flock(int type)
6387 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6388 FLOCK_TRANSTBL
6389 #undef TRANSTBL_CONVERT
6390 return -TARGET_EINVAL;
6393 static int host_to_target_flock(int type)
6395 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6396 FLOCK_TRANSTBL
6397 #undef TRANSTBL_CONVERT
6398 /* if we don't know how to convert the value coming
6399 * from the host we copy to the target field as-is
6401 return type;
6404 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6405 abi_ulong target_flock_addr)
6407 struct target_flock *target_fl;
6408 int l_type;
6410 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6411 return -TARGET_EFAULT;
6414 __get_user(l_type, &target_fl->l_type);
6415 l_type = target_to_host_flock(l_type);
6416 if (l_type < 0) {
6417 return l_type;
6419 fl->l_type = l_type;
6420 __get_user(fl->l_whence, &target_fl->l_whence);
6421 __get_user(fl->l_start, &target_fl->l_start);
6422 __get_user(fl->l_len, &target_fl->l_len);
6423 __get_user(fl->l_pid, &target_fl->l_pid);
6424 unlock_user_struct(target_fl, target_flock_addr, 0);
6425 return 0;
6428 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6429 const struct flock64 *fl)
6431 struct target_flock *target_fl;
6432 short l_type;
6434 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6435 return -TARGET_EFAULT;
6438 l_type = host_to_target_flock(fl->l_type);
6439 __put_user(l_type, &target_fl->l_type);
6440 __put_user(fl->l_whence, &target_fl->l_whence);
6441 __put_user(fl->l_start, &target_fl->l_start);
6442 __put_user(fl->l_len, &target_fl->l_len);
6443 __put_user(fl->l_pid, &target_fl->l_pid);
6444 unlock_user_struct(target_fl, target_flock_addr, 1);
6445 return 0;
6448 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6449 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6451 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6452 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6453 abi_ulong target_flock_addr)
6455 struct target_oabi_flock64 *target_fl;
6456 int l_type;
6458 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6459 return -TARGET_EFAULT;
6462 __get_user(l_type, &target_fl->l_type);
6463 l_type = target_to_host_flock(l_type);
6464 if (l_type < 0) {
6465 return l_type;
6467 fl->l_type = l_type;
6468 __get_user(fl->l_whence, &target_fl->l_whence);
6469 __get_user(fl->l_start, &target_fl->l_start);
6470 __get_user(fl->l_len, &target_fl->l_len);
6471 __get_user(fl->l_pid, &target_fl->l_pid);
6472 unlock_user_struct(target_fl, target_flock_addr, 0);
6473 return 0;
6476 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6477 const struct flock64 *fl)
6479 struct target_oabi_flock64 *target_fl;
6480 short l_type;
6482 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6483 return -TARGET_EFAULT;
6486 l_type = host_to_target_flock(fl->l_type);
6487 __put_user(l_type, &target_fl->l_type);
6488 __put_user(fl->l_whence, &target_fl->l_whence);
6489 __put_user(fl->l_start, &target_fl->l_start);
6490 __put_user(fl->l_len, &target_fl->l_len);
6491 __put_user(fl->l_pid, &target_fl->l_pid);
6492 unlock_user_struct(target_fl, target_flock_addr, 1);
6493 return 0;
6495 #endif
6497 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6498 abi_ulong target_flock_addr)
6500 struct target_flock64 *target_fl;
6501 int l_type;
6503 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6504 return -TARGET_EFAULT;
6507 __get_user(l_type, &target_fl->l_type);
6508 l_type = target_to_host_flock(l_type);
6509 if (l_type < 0) {
6510 return l_type;
6512 fl->l_type = l_type;
6513 __get_user(fl->l_whence, &target_fl->l_whence);
6514 __get_user(fl->l_start, &target_fl->l_start);
6515 __get_user(fl->l_len, &target_fl->l_len);
6516 __get_user(fl->l_pid, &target_fl->l_pid);
6517 unlock_user_struct(target_fl, target_flock_addr, 0);
6518 return 0;
6521 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6522 const struct flock64 *fl)
6524 struct target_flock64 *target_fl;
6525 short l_type;
6527 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6528 return -TARGET_EFAULT;
6531 l_type = host_to_target_flock(fl->l_type);
6532 __put_user(l_type, &target_fl->l_type);
6533 __put_user(fl->l_whence, &target_fl->l_whence);
6534 __put_user(fl->l_start, &target_fl->l_start);
6535 __put_user(fl->l_len, &target_fl->l_len);
6536 __put_user(fl->l_pid, &target_fl->l_pid);
6537 unlock_user_struct(target_fl, target_flock_addr, 1);
6538 return 0;
6541 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6543 struct flock64 fl64;
6544 #ifdef F_GETOWN_EX
6545 struct f_owner_ex fox;
6546 struct target_f_owner_ex *target_fox;
6547 #endif
6548 abi_long ret;
6549 int host_cmd = target_to_host_fcntl_cmd(cmd);
6551 if (host_cmd == -TARGET_EINVAL)
6552 return host_cmd;
6554 switch(cmd) {
6555 case TARGET_F_GETLK:
6556 ret = copy_from_user_flock(&fl64, arg);
6557 if (ret) {
6558 return ret;
6560 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6561 if (ret == 0) {
6562 ret = copy_to_user_flock(arg, &fl64);
6564 break;
6566 case TARGET_F_SETLK:
6567 case TARGET_F_SETLKW:
6568 ret = copy_from_user_flock(&fl64, arg);
6569 if (ret) {
6570 return ret;
6572 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6573 break;
6575 case TARGET_F_GETLK64:
6576 case TARGET_F_OFD_GETLK:
6577 ret = copy_from_user_flock64(&fl64, arg);
6578 if (ret) {
6579 return ret;
6581 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6582 if (ret == 0) {
6583 ret = copy_to_user_flock64(arg, &fl64);
6585 break;
6586 case TARGET_F_SETLK64:
6587 case TARGET_F_SETLKW64:
6588 case TARGET_F_OFD_SETLK:
6589 case TARGET_F_OFD_SETLKW:
6590 ret = copy_from_user_flock64(&fl64, arg);
6591 if (ret) {
6592 return ret;
6594 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6595 break;
6597 case TARGET_F_GETFL:
6598 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6599 if (ret >= 0) {
6600 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6602 break;
6604 case TARGET_F_SETFL:
6605 ret = get_errno(safe_fcntl(fd, host_cmd,
6606 target_to_host_bitmask(arg,
6607 fcntl_flags_tbl)));
6608 break;
6610 #ifdef F_GETOWN_EX
6611 case TARGET_F_GETOWN_EX:
6612 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6613 if (ret >= 0) {
6614 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6615 return -TARGET_EFAULT;
6616 target_fox->type = tswap32(fox.type);
6617 target_fox->pid = tswap32(fox.pid);
6618 unlock_user_struct(target_fox, arg, 1);
6620 break;
6621 #endif
6623 #ifdef F_SETOWN_EX
6624 case TARGET_F_SETOWN_EX:
6625 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6626 return -TARGET_EFAULT;
6627 fox.type = tswap32(target_fox->type);
6628 fox.pid = tswap32(target_fox->pid);
6629 unlock_user_struct(target_fox, arg, 0);
6630 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6631 break;
6632 #endif
6634 case TARGET_F_SETOWN:
6635 case TARGET_F_GETOWN:
6636 case TARGET_F_SETSIG:
6637 case TARGET_F_GETSIG:
6638 case TARGET_F_SETLEASE:
6639 case TARGET_F_GETLEASE:
6640 case TARGET_F_SETPIPE_SZ:
6641 case TARGET_F_GETPIPE_SZ:
6642 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6643 break;
6645 default:
6646 ret = get_errno(safe_fcntl(fd, cmd, arg));
6647 break;
6649 return ret;
6652 #ifdef USE_UID16
6654 static inline int high2lowuid(int uid)
6656 if (uid > 65535)
6657 return 65534;
6658 else
6659 return uid;
6662 static inline int high2lowgid(int gid)
6664 if (gid > 65535)
6665 return 65534;
6666 else
6667 return gid;
6670 static inline int low2highuid(int uid)
6672 if ((int16_t)uid == -1)
6673 return -1;
6674 else
6675 return uid;
6678 static inline int low2highgid(int gid)
6680 if ((int16_t)gid == -1)
6681 return -1;
6682 else
6683 return gid;
6685 static inline int tswapid(int id)
6687 return tswap16(id);
6690 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6692 #else /* !USE_UID16 */
6693 static inline int high2lowuid(int uid)
6695 return uid;
6697 static inline int high2lowgid(int gid)
6699 return gid;
6701 static inline int low2highuid(int uid)
6703 return uid;
6705 static inline int low2highgid(int gid)
6707 return gid;
6709 static inline int tswapid(int id)
6711 return tswap32(id);
6714 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6716 #endif /* USE_UID16 */
6718 /* We must do direct syscalls for setting UID/GID, because we want to
6719 * implement the Linux system call semantics of "change only for this thread",
6720 * not the libc/POSIX semantics of "change for all threads in process".
6721 * (See http://ewontfix.com/17/ for more details.)
6722 * We use the 32-bit version of the syscalls if present; if it is not
6723 * then either the host architecture supports 32-bit UIDs natively with
6724 * the standard syscall, or the 16-bit UID is the best we can do.
6726 #ifdef __NR_setuid32
6727 #define __NR_sys_setuid __NR_setuid32
6728 #else
6729 #define __NR_sys_setuid __NR_setuid
6730 #endif
6731 #ifdef __NR_setgid32
6732 #define __NR_sys_setgid __NR_setgid32
6733 #else
6734 #define __NR_sys_setgid __NR_setgid
6735 #endif
6736 #ifdef __NR_setresuid32
6737 #define __NR_sys_setresuid __NR_setresuid32
6738 #else
6739 #define __NR_sys_setresuid __NR_setresuid
6740 #endif
6741 #ifdef __NR_setresgid32
6742 #define __NR_sys_setresgid __NR_setresgid32
6743 #else
6744 #define __NR_sys_setresgid __NR_setresgid
6745 #endif
6747 _syscall1(int, sys_setuid, uid_t, uid)
6748 _syscall1(int, sys_setgid, gid_t, gid)
6749 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6750 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6752 void syscall_init(void)
6754 IOCTLEntry *ie;
6755 const argtype *arg_type;
6756 int size;
6757 int i;
6759 thunk_init(STRUCT_MAX);
6761 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6762 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6763 #include "syscall_types.h"
6764 #undef STRUCT
6765 #undef STRUCT_SPECIAL
6767 /* Build target_to_host_errno_table[] table from
6768 * host_to_target_errno_table[]. */
6769 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6770 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6773 /* we patch the ioctl size if necessary. We rely on the fact that
6774 no ioctl has all the bits at '1' in the size field */
6775 ie = ioctl_entries;
6776 while (ie->target_cmd != 0) {
6777 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6778 TARGET_IOC_SIZEMASK) {
6779 arg_type = ie->arg_type;
6780 if (arg_type[0] != TYPE_PTR) {
6781 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6782 ie->target_cmd);
6783 exit(1);
6785 arg_type++;
6786 size = thunk_type_size(arg_type, 0);
6787 ie->target_cmd = (ie->target_cmd &
6788 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6789 (size << TARGET_IOC_SIZESHIFT);
6792 /* automatic consistency check if same arch */
6793 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6794 (defined(__x86_64__) && defined(TARGET_X86_64))
6795 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6796 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6797 ie->name, ie->target_cmd, ie->host_cmd);
6799 #endif
6800 ie++;
6804 #ifdef TARGET_NR_truncate64
6805 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6806 abi_long arg2,
6807 abi_long arg3,
6808 abi_long arg4)
6810 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
6811 arg2 = arg3;
6812 arg3 = arg4;
6814 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6816 #endif
6818 #ifdef TARGET_NR_ftruncate64
6819 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6820 abi_long arg2,
6821 abi_long arg3,
6822 abi_long arg4)
6824 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
6825 arg2 = arg3;
6826 arg3 = arg4;
6828 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6830 #endif
6832 #if defined(TARGET_NR_timer_settime) || \
6833 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
6834 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
6835 abi_ulong target_addr)
6837 if (target_to_host_timespec(&host_its->it_interval, target_addr +
6838 offsetof(struct target_itimerspec,
6839 it_interval)) ||
6840 target_to_host_timespec(&host_its->it_value, target_addr +
6841 offsetof(struct target_itimerspec,
6842 it_value))) {
6843 return -TARGET_EFAULT;
6846 return 0;
6848 #endif
6850 #if defined(TARGET_NR_timer_settime64) || \
6851 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
6852 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
6853 abi_ulong target_addr)
6855 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
6856 offsetof(struct target__kernel_itimerspec,
6857 it_interval)) ||
6858 target_to_host_timespec64(&host_its->it_value, target_addr +
6859 offsetof(struct target__kernel_itimerspec,
6860 it_value))) {
6861 return -TARGET_EFAULT;
6864 return 0;
6866 #endif
6868 #if ((defined(TARGET_NR_timerfd_gettime) || \
6869 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
6870 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
6871 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6872 struct itimerspec *host_its)
6874 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6875 it_interval),
6876 &host_its->it_interval) ||
6877 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
6878 it_value),
6879 &host_its->it_value)) {
6880 return -TARGET_EFAULT;
6882 return 0;
6884 #endif
6886 #if ((defined(TARGET_NR_timerfd_gettime64) || \
6887 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
6888 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
6889 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
6890 struct itimerspec *host_its)
6892 if (host_to_target_timespec64(target_addr +
6893 offsetof(struct target__kernel_itimerspec,
6894 it_interval),
6895 &host_its->it_interval) ||
6896 host_to_target_timespec64(target_addr +
6897 offsetof(struct target__kernel_itimerspec,
6898 it_value),
6899 &host_its->it_value)) {
6900 return -TARGET_EFAULT;
6902 return 0;
6904 #endif
6906 #if defined(TARGET_NR_adjtimex) || \
6907 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
6908 static inline abi_long target_to_host_timex(struct timex *host_tx,
6909 abi_long target_addr)
6911 struct target_timex *target_tx;
6913 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6914 return -TARGET_EFAULT;
6917 __get_user(host_tx->modes, &target_tx->modes);
6918 __get_user(host_tx->offset, &target_tx->offset);
6919 __get_user(host_tx->freq, &target_tx->freq);
6920 __get_user(host_tx->maxerror, &target_tx->maxerror);
6921 __get_user(host_tx->esterror, &target_tx->esterror);
6922 __get_user(host_tx->status, &target_tx->status);
6923 __get_user(host_tx->constant, &target_tx->constant);
6924 __get_user(host_tx->precision, &target_tx->precision);
6925 __get_user(host_tx->tolerance, &target_tx->tolerance);
6926 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6927 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6928 __get_user(host_tx->tick, &target_tx->tick);
6929 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6930 __get_user(host_tx->jitter, &target_tx->jitter);
6931 __get_user(host_tx->shift, &target_tx->shift);
6932 __get_user(host_tx->stabil, &target_tx->stabil);
6933 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6934 __get_user(host_tx->calcnt, &target_tx->calcnt);
6935 __get_user(host_tx->errcnt, &target_tx->errcnt);
6936 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6937 __get_user(host_tx->tai, &target_tx->tai);
6939 unlock_user_struct(target_tx, target_addr, 0);
6940 return 0;
6943 static inline abi_long host_to_target_timex(abi_long target_addr,
6944 struct timex *host_tx)
6946 struct target_timex *target_tx;
6948 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6949 return -TARGET_EFAULT;
6952 __put_user(host_tx->modes, &target_tx->modes);
6953 __put_user(host_tx->offset, &target_tx->offset);
6954 __put_user(host_tx->freq, &target_tx->freq);
6955 __put_user(host_tx->maxerror, &target_tx->maxerror);
6956 __put_user(host_tx->esterror, &target_tx->esterror);
6957 __put_user(host_tx->status, &target_tx->status);
6958 __put_user(host_tx->constant, &target_tx->constant);
6959 __put_user(host_tx->precision, &target_tx->precision);
6960 __put_user(host_tx->tolerance, &target_tx->tolerance);
6961 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6962 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6963 __put_user(host_tx->tick, &target_tx->tick);
6964 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6965 __put_user(host_tx->jitter, &target_tx->jitter);
6966 __put_user(host_tx->shift, &target_tx->shift);
6967 __put_user(host_tx->stabil, &target_tx->stabil);
6968 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6969 __put_user(host_tx->calcnt, &target_tx->calcnt);
6970 __put_user(host_tx->errcnt, &target_tx->errcnt);
6971 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6972 __put_user(host_tx->tai, &target_tx->tai);
6974 unlock_user_struct(target_tx, target_addr, 1);
6975 return 0;
6977 #endif
6979 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6980 abi_ulong target_addr)
6982 struct target_sigevent *target_sevp;
6984 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6985 return -TARGET_EFAULT;
6988 /* This union is awkward on 64 bit systems because it has a 32 bit
6989 * integer and a pointer in it; we follow the conversion approach
6990 * used for handling sigval types in signal.c so the guest should get
6991 * the correct value back even if we did a 64 bit byteswap and it's
6992 * using the 32 bit integer.
6994 host_sevp->sigev_value.sival_ptr =
6995 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6996 host_sevp->sigev_signo =
6997 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6998 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6999 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
7001 unlock_user_struct(target_sevp, target_addr, 1);
7002 return 0;
7005 #if defined(TARGET_NR_mlockall)
7006 static inline int target_to_host_mlockall_arg(int arg)
7008 int result = 0;
7010 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
7011 result |= MCL_CURRENT;
7013 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
7014 result |= MCL_FUTURE;
7016 return result;
7018 #endif
7020 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7021 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7022 defined(TARGET_NR_newfstatat))
7023 static inline abi_long host_to_target_stat64(void *cpu_env,
7024 abi_ulong target_addr,
7025 struct stat *host_st)
7027 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7028 if (((CPUARMState *)cpu_env)->eabi) {
7029 struct target_eabi_stat64 *target_st;
7031 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7032 return -TARGET_EFAULT;
7033 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7034 __put_user(host_st->st_dev, &target_st->st_dev);
7035 __put_user(host_st->st_ino, &target_st->st_ino);
7036 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7037 __put_user(host_st->st_ino, &target_st->__st_ino);
7038 #endif
7039 __put_user(host_st->st_mode, &target_st->st_mode);
7040 __put_user(host_st->st_nlink, &target_st->st_nlink);
7041 __put_user(host_st->st_uid, &target_st->st_uid);
7042 __put_user(host_st->st_gid, &target_st->st_gid);
7043 __put_user(host_st->st_rdev, &target_st->st_rdev);
7044 __put_user(host_st->st_size, &target_st->st_size);
7045 __put_user(host_st->st_blksize, &target_st->st_blksize);
7046 __put_user(host_st->st_blocks, &target_st->st_blocks);
7047 __put_user(host_st->st_atime, &target_st->target_st_atime);
7048 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7049 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7050 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7051 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7052 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7053 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7054 #endif
7055 unlock_user_struct(target_st, target_addr, 1);
7056 } else
7057 #endif
7059 #if defined(TARGET_HAS_STRUCT_STAT64)
7060 struct target_stat64 *target_st;
7061 #else
7062 struct target_stat *target_st;
7063 #endif
7065 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7066 return -TARGET_EFAULT;
7067 memset(target_st, 0, sizeof(*target_st));
7068 __put_user(host_st->st_dev, &target_st->st_dev);
7069 __put_user(host_st->st_ino, &target_st->st_ino);
7070 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7071 __put_user(host_st->st_ino, &target_st->__st_ino);
7072 #endif
7073 __put_user(host_st->st_mode, &target_st->st_mode);
7074 __put_user(host_st->st_nlink, &target_st->st_nlink);
7075 __put_user(host_st->st_uid, &target_st->st_uid);
7076 __put_user(host_st->st_gid, &target_st->st_gid);
7077 __put_user(host_st->st_rdev, &target_st->st_rdev);
7078 /* XXX: better use of kernel struct */
7079 __put_user(host_st->st_size, &target_st->st_size);
7080 __put_user(host_st->st_blksize, &target_st->st_blksize);
7081 __put_user(host_st->st_blocks, &target_st->st_blocks);
7082 __put_user(host_st->st_atime, &target_st->target_st_atime);
7083 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7084 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7085 #if _POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700
7086 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7087 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7088 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7089 #endif
7090 unlock_user_struct(target_st, target_addr, 1);
7093 return 0;
7095 #endif
7097 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7098 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7099 abi_ulong target_addr)
7101 struct target_statx *target_stx;
7103 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7104 return -TARGET_EFAULT;
7106 memset(target_stx, 0, sizeof(*target_stx));
7108 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7109 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7110 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7111 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7112 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7113 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7114 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7115 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7116 __put_user(host_stx->stx_size, &target_stx->stx_size);
7117 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7118 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7119 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7120 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7121 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7122 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7123 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7124 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7125 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7126 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7127 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7128 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7129 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7130 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7132 unlock_user_struct(target_stx, target_addr, 1);
7134 return 0;
7136 #endif
7138 static int do_sys_futex(int *uaddr, int op, int val,
7139 const struct timespec *timeout, int *uaddr2,
7140 int val3)
7142 #if HOST_LONG_BITS == 64
7143 #if defined(__NR_futex)
7144 /* always a 64-bit time_t, it doesn't define _time64 version */
7145 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7147 #endif
7148 #else /* HOST_LONG_BITS == 64 */
7149 #if defined(__NR_futex_time64)
7150 if (sizeof(timeout->tv_sec) == 8) {
7151 /* _time64 function on 32bit arch */
7152 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7154 #endif
7155 #if defined(__NR_futex)
7156 /* old function on 32bit arch */
7157 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7158 #endif
7159 #endif /* HOST_LONG_BITS == 64 */
7160 g_assert_not_reached();
7163 static int do_safe_futex(int *uaddr, int op, int val,
7164 const struct timespec *timeout, int *uaddr2,
7165 int val3)
7167 #if HOST_LONG_BITS == 64
7168 #if defined(__NR_futex)
7169 /* always a 64-bit time_t, it doesn't define _time64 version */
7170 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7171 #endif
7172 #else /* HOST_LONG_BITS == 64 */
7173 #if defined(__NR_futex_time64)
7174 if (sizeof(timeout->tv_sec) == 8) {
7175 /* _time64 function on 32bit arch */
7176 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7177 val3));
7179 #endif
7180 #if defined(__NR_futex)
7181 /* old function on 32bit arch */
7182 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7183 #endif
7184 #endif /* HOST_LONG_BITS == 64 */
7185 return -TARGET_ENOSYS;
7188 /* ??? Using host futex calls even when target atomic operations
7189 are not really atomic probably breaks things. However implementing
7190 futexes locally would make futexes shared between multiple processes
7191 tricky. However they're probably useless because guest atomic
7192 operations won't work either. */
7193 #if defined(TARGET_NR_futex)
7194 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
7195 target_ulong uaddr2, int val3)
7197 struct timespec ts, *pts;
7198 int base_op;
7200 /* ??? We assume FUTEX_* constants are the same on both host
7201 and target. */
7202 #ifdef FUTEX_CMD_MASK
7203 base_op = op & FUTEX_CMD_MASK;
7204 #else
7205 base_op = op;
7206 #endif
7207 switch (base_op) {
7208 case FUTEX_WAIT:
7209 case FUTEX_WAIT_BITSET:
7210 if (timeout) {
7211 pts = &ts;
7212 target_to_host_timespec(pts, timeout);
7213 } else {
7214 pts = NULL;
7216 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7217 case FUTEX_WAKE:
7218 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7219 case FUTEX_FD:
7220 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7221 case FUTEX_REQUEUE:
7222 case FUTEX_CMP_REQUEUE:
7223 case FUTEX_WAKE_OP:
7224 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7225 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7226 But the prototype takes a `struct timespec *'; insert casts
7227 to satisfy the compiler. We do not need to tswap TIMEOUT
7228 since it's not compared to guest memory. */
7229 pts = (struct timespec *)(uintptr_t) timeout;
7230 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7231 (base_op == FUTEX_CMP_REQUEUE
7232 ? tswap32(val3)
7233 : val3));
7234 default:
7235 return -TARGET_ENOSYS;
7238 #endif
7240 #if defined(TARGET_NR_futex_time64)
7241 static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
7242 target_ulong uaddr2, int val3)
7244 struct timespec ts, *pts;
7245 int base_op;
7247 /* ??? We assume FUTEX_* constants are the same on both host
7248 and target. */
7249 #ifdef FUTEX_CMD_MASK
7250 base_op = op & FUTEX_CMD_MASK;
7251 #else
7252 base_op = op;
7253 #endif
7254 switch (base_op) {
7255 case FUTEX_WAIT:
7256 case FUTEX_WAIT_BITSET:
7257 if (timeout) {
7258 pts = &ts;
7259 target_to_host_timespec64(pts, timeout);
7260 } else {
7261 pts = NULL;
7263 return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
7264 case FUTEX_WAKE:
7265 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7266 case FUTEX_FD:
7267 return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
7268 case FUTEX_REQUEUE:
7269 case FUTEX_CMP_REQUEUE:
7270 case FUTEX_WAKE_OP:
7271 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7272 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7273 But the prototype takes a `struct timespec *'; insert casts
7274 to satisfy the compiler. We do not need to tswap TIMEOUT
7275 since it's not compared to guest memory. */
7276 pts = (struct timespec *)(uintptr_t) timeout;
7277 return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
7278 (base_op == FUTEX_CMP_REQUEUE
7279 ? tswap32(val3)
7280 : val3));
7281 default:
7282 return -TARGET_ENOSYS;
7285 #endif
7287 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7288 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7289 abi_long handle, abi_long mount_id,
7290 abi_long flags)
7292 struct file_handle *target_fh;
7293 struct file_handle *fh;
7294 int mid = 0;
7295 abi_long ret;
7296 char *name;
7297 unsigned int size, total_size;
7299 if (get_user_s32(size, handle)) {
7300 return -TARGET_EFAULT;
7303 name = lock_user_string(pathname);
7304 if (!name) {
7305 return -TARGET_EFAULT;
7308 total_size = sizeof(struct file_handle) + size;
7309 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7310 if (!target_fh) {
7311 unlock_user(name, pathname, 0);
7312 return -TARGET_EFAULT;
7315 fh = g_malloc0(total_size);
7316 fh->handle_bytes = size;
7318 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7319 unlock_user(name, pathname, 0);
7321 /* man name_to_handle_at(2):
7322 * Other than the use of the handle_bytes field, the caller should treat
7323 * the file_handle structure as an opaque data type
7326 memcpy(target_fh, fh, total_size);
7327 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7328 target_fh->handle_type = tswap32(fh->handle_type);
7329 g_free(fh);
7330 unlock_user(target_fh, handle, total_size);
7332 if (put_user_s32(mid, mount_id)) {
7333 return -TARGET_EFAULT;
7336 return ret;
7339 #endif
7341 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7342 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7343 abi_long flags)
7345 struct file_handle *target_fh;
7346 struct file_handle *fh;
7347 unsigned int size, total_size;
7348 abi_long ret;
7350 if (get_user_s32(size, handle)) {
7351 return -TARGET_EFAULT;
7354 total_size = sizeof(struct file_handle) + size;
7355 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7356 if (!target_fh) {
7357 return -TARGET_EFAULT;
7360 fh = g_memdup(target_fh, total_size);
7361 fh->handle_bytes = size;
7362 fh->handle_type = tswap32(target_fh->handle_type);
7364 ret = get_errno(open_by_handle_at(mount_fd, fh,
7365 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7367 g_free(fh);
7369 unlock_user(target_fh, handle, total_size);
7371 return ret;
7373 #endif
7375 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7377 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7379 int host_flags;
7380 target_sigset_t *target_mask;
7381 sigset_t host_mask;
7382 abi_long ret;
7384 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7385 return -TARGET_EINVAL;
7387 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7388 return -TARGET_EFAULT;
7391 target_to_host_sigset(&host_mask, target_mask);
7393 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7395 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7396 if (ret >= 0) {
7397 fd_trans_register(ret, &target_signalfd_trans);
7400 unlock_user_struct(target_mask, mask, 0);
7402 return ret;
7404 #endif
7406 /* Map host to target signal numbers for the wait family of syscalls.
7407 Assume all other status bits are the same. */
7408 int host_to_target_waitstatus(int status)
7410 if (WIFSIGNALED(status)) {
7411 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7413 if (WIFSTOPPED(status)) {
7414 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7415 | (status & 0xff);
7417 return status;
7420 static int open_self_cmdline(void *cpu_env, int fd)
7422 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7423 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7424 int i;
7426 for (i = 0; i < bprm->argc; i++) {
7427 size_t len = strlen(bprm->argv[i]) + 1;
7429 if (write(fd, bprm->argv[i], len) != len) {
7430 return -1;
7434 return 0;
7437 static int open_self_maps(void *cpu_env, int fd)
7439 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7440 TaskState *ts = cpu->opaque;
7441 GSList *map_info = read_self_maps();
7442 GSList *s;
7443 int count;
7445 for (s = map_info; s; s = g_slist_next(s)) {
7446 MapInfo *e = (MapInfo *) s->data;
7448 if (h2g_valid(e->start)) {
7449 unsigned long min = e->start;
7450 unsigned long max = e->end;
7451 int flags = page_get_flags(h2g(min));
7452 const char *path;
7454 max = h2g_valid(max - 1) ?
7455 max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
7457 if (page_check_range(h2g(min), max - min, flags) == -1) {
7458 continue;
7461 if (h2g(min) == ts->info->stack_limit) {
7462 path = "[stack]";
7463 } else {
7464 path = e->path;
7467 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7468 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7469 h2g(min), h2g(max - 1) + 1,
7470 e->is_read ? 'r' : '-',
7471 e->is_write ? 'w' : '-',
7472 e->is_exec ? 'x' : '-',
7473 e->is_priv ? 'p' : '-',
7474 (uint64_t) e->offset, e->dev, e->inode);
7475 if (path) {
7476 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7477 } else {
7478 dprintf(fd, "\n");
7483 free_self_maps(map_info);
7485 #ifdef TARGET_VSYSCALL_PAGE
7487 * We only support execution from the vsyscall page.
7488 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7490 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7491 " --xp 00000000 00:00 0",
7492 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7493 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
7494 #endif
7496 return 0;
7499 static int open_self_stat(void *cpu_env, int fd)
7501 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7502 TaskState *ts = cpu->opaque;
7503 g_autoptr(GString) buf = g_string_new(NULL);
7504 int i;
7506 for (i = 0; i < 44; i++) {
7507 if (i == 0) {
7508 /* pid */
7509 g_string_printf(buf, FMT_pid " ", getpid());
7510 } else if (i == 1) {
7511 /* app name */
7512 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7513 bin = bin ? bin + 1 : ts->bprm->argv[0];
7514 g_string_printf(buf, "(%.15s) ", bin);
7515 } else if (i == 27) {
7516 /* stack bottom */
7517 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7518 } else {
7519 /* for the rest, there is MasterCard */
7520 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7523 if (write(fd, buf->str, buf->len) != buf->len) {
7524 return -1;
7528 return 0;
7531 static int open_self_auxv(void *cpu_env, int fd)
7533 CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7534 TaskState *ts = cpu->opaque;
7535 abi_ulong auxv = ts->info->saved_auxv;
7536 abi_ulong len = ts->info->auxv_len;
7537 char *ptr;
7540 * Auxiliary vector is stored in target process stack.
7541 * read in whole auxv vector and copy it to file
7543 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7544 if (ptr != NULL) {
7545 while (len > 0) {
7546 ssize_t r;
7547 r = write(fd, ptr, len);
7548 if (r <= 0) {
7549 break;
7551 len -= r;
7552 ptr += r;
7554 lseek(fd, 0, SEEK_SET);
7555 unlock_user(ptr, auxv, len);
7558 return 0;
7561 static int is_proc_myself(const char *filename, const char *entry)
7563 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7564 filename += strlen("/proc/");
7565 if (!strncmp(filename, "self/", strlen("self/"))) {
7566 filename += strlen("self/");
7567 } else if (*filename >= '1' && *filename <= '9') {
7568 char myself[80];
7569 snprintf(myself, sizeof(myself), "%d/", getpid());
7570 if (!strncmp(filename, myself, strlen(myself))) {
7571 filename += strlen(myself);
7572 } else {
7573 return 0;
7575 } else {
7576 return 0;
7578 if (!strcmp(filename, entry)) {
7579 return 1;
7582 return 0;
7585 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7586 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7587 static int is_proc(const char *filename, const char *entry)
7589 return strcmp(filename, entry) == 0;
7591 #endif
7593 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7594 static int open_net_route(void *cpu_env, int fd)
7596 FILE *fp;
7597 char *line = NULL;
7598 size_t len = 0;
7599 ssize_t read;
7601 fp = fopen("/proc/net/route", "r");
7602 if (fp == NULL) {
7603 return -1;
7606 /* read header */
7608 read = getline(&line, &len, fp);
7609 dprintf(fd, "%s", line);
7611 /* read routes */
7613 while ((read = getline(&line, &len, fp)) != -1) {
7614 char iface[16];
7615 uint32_t dest, gw, mask;
7616 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7617 int fields;
7619 fields = sscanf(line,
7620 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7621 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7622 &mask, &mtu, &window, &irtt);
7623 if (fields != 11) {
7624 continue;
7626 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7627 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7628 metric, tswap32(mask), mtu, window, irtt);
7631 free(line);
7632 fclose(fp);
7634 return 0;
7636 #endif
7638 #if defined(TARGET_SPARC)
7639 static int open_cpuinfo(void *cpu_env, int fd)
7641 dprintf(fd, "type\t\t: sun4u\n");
7642 return 0;
7644 #endif
7646 #if defined(TARGET_HPPA)
7647 static int open_cpuinfo(void *cpu_env, int fd)
7649 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7650 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7651 dprintf(fd, "capabilities\t: os32\n");
7652 dprintf(fd, "model\t\t: 9000/778/B160L\n");
7653 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7654 return 0;
7656 #endif
7658 #if defined(TARGET_M68K)
7659 static int open_hardware(void *cpu_env, int fd)
7661 dprintf(fd, "Model:\t\tqemu-m68k\n");
7662 return 0;
7664 #endif
7666 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7668 struct fake_open {
7669 const char *filename;
7670 int (*fill)(void *cpu_env, int fd);
7671 int (*cmp)(const char *s1, const char *s2);
7673 const struct fake_open *fake_open;
7674 static const struct fake_open fakes[] = {
7675 { "maps", open_self_maps, is_proc_myself },
7676 { "stat", open_self_stat, is_proc_myself },
7677 { "auxv", open_self_auxv, is_proc_myself },
7678 { "cmdline", open_self_cmdline, is_proc_myself },
7679 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7680 { "/proc/net/route", open_net_route, is_proc },
7681 #endif
7682 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7683 { "/proc/cpuinfo", open_cpuinfo, is_proc },
7684 #endif
7685 #if defined(TARGET_M68K)
7686 { "/proc/hardware", open_hardware, is_proc },
7687 #endif
7688 { NULL, NULL, NULL }
7691 if (is_proc_myself(pathname, "exe")) {
7692 int execfd = qemu_getauxval(AT_EXECFD);
7693 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7696 for (fake_open = fakes; fake_open->filename; fake_open++) {
7697 if (fake_open->cmp(pathname, fake_open->filename)) {
7698 break;
7702 if (fake_open->filename) {
7703 const char *tmpdir;
7704 char filename[PATH_MAX];
7705 int fd, r;
7707 /* create temporary file to map stat to */
7708 tmpdir = getenv("TMPDIR");
7709 if (!tmpdir)
7710 tmpdir = "/tmp";
7711 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7712 fd = mkstemp(filename);
7713 if (fd < 0) {
7714 return fd;
7716 unlink(filename);
7718 if ((r = fake_open->fill(cpu_env, fd))) {
7719 int e = errno;
7720 close(fd);
7721 errno = e;
7722 return r;
7724 lseek(fd, 0, SEEK_SET);
7726 return fd;
7729 return safe_openat(dirfd, path(pathname), flags, mode);
7732 #define TIMER_MAGIC 0x0caf0000
7733 #define TIMER_MAGIC_MASK 0xffff0000
7735 /* Convert QEMU provided timer ID back to internal 16bit index format */
7736 static target_timer_t get_timer_id(abi_long arg)
7738 target_timer_t timerid = arg;
7740 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7741 return -TARGET_EINVAL;
7744 timerid &= 0xffff;
7746 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7747 return -TARGET_EINVAL;
7750 return timerid;
7753 static int target_to_host_cpu_mask(unsigned long *host_mask,
7754 size_t host_size,
7755 abi_ulong target_addr,
7756 size_t target_size)
7758 unsigned target_bits = sizeof(abi_ulong) * 8;
7759 unsigned host_bits = sizeof(*host_mask) * 8;
7760 abi_ulong *target_mask;
7761 unsigned i, j;
7763 assert(host_size >= target_size);
7765 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
7766 if (!target_mask) {
7767 return -TARGET_EFAULT;
7769 memset(host_mask, 0, host_size);
7771 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7772 unsigned bit = i * target_bits;
7773 abi_ulong val;
7775 __get_user(val, &target_mask[i]);
7776 for (j = 0; j < target_bits; j++, bit++) {
7777 if (val & (1UL << j)) {
7778 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
7783 unlock_user(target_mask, target_addr, 0);
7784 return 0;
7787 static int host_to_target_cpu_mask(const unsigned long *host_mask,
7788 size_t host_size,
7789 abi_ulong target_addr,
7790 size_t target_size)
7792 unsigned target_bits = sizeof(abi_ulong) * 8;
7793 unsigned host_bits = sizeof(*host_mask) * 8;
7794 abi_ulong *target_mask;
7795 unsigned i, j;
7797 assert(host_size >= target_size);
7799 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
7800 if (!target_mask) {
7801 return -TARGET_EFAULT;
7804 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
7805 unsigned bit = i * target_bits;
7806 abi_ulong val = 0;
7808 for (j = 0; j < target_bits; j++, bit++) {
7809 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
7810 val |= 1UL << j;
7813 __put_user(val, &target_mask[i]);
7816 unlock_user(target_mask, target_addr, target_size);
7817 return 0;
7820 /* This is an internal helper for do_syscall so that it is easier
7821 * to have a single return point, so that actions, such as logging
7822 * of syscall results, can be performed.
7823 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
7825 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
7826 abi_long arg2, abi_long arg3, abi_long arg4,
7827 abi_long arg5, abi_long arg6, abi_long arg7,
7828 abi_long arg8)
7830 CPUState *cpu = env_cpu(cpu_env);
7831 abi_long ret;
7832 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
7833 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
7834 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
7835 || defined(TARGET_NR_statx)
7836 struct stat st;
7837 #endif
7838 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
7839 || defined(TARGET_NR_fstatfs)
7840 struct statfs stfs;
7841 #endif
7842 void *p;
7844 switch(num) {
7845 case TARGET_NR_exit:
7846 /* In old applications this may be used to implement _exit(2).
7847 However in threaded applictions it is used for thread termination,
7848 and _exit_group is used for application termination.
7849 Do thread termination if we have more then one thread. */
7851 if (block_signals()) {
7852 return -TARGET_ERESTARTSYS;
7855 pthread_mutex_lock(&clone_lock);
7857 if (CPU_NEXT(first_cpu)) {
7858 TaskState *ts = cpu->opaque;
7860 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
7861 object_unref(OBJECT(cpu));
7863 * At this point the CPU should be unrealized and removed
7864 * from cpu lists. We can clean-up the rest of the thread
7865 * data without the lock held.
7868 pthread_mutex_unlock(&clone_lock);
7870 if (ts->child_tidptr) {
7871 put_user_u32(0, ts->child_tidptr);
7872 do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7873 NULL, NULL, 0);
7875 thread_cpu = NULL;
7876 g_free(ts);
7877 rcu_unregister_thread();
7878 pthread_exit(NULL);
7881 pthread_mutex_unlock(&clone_lock);
7882 preexit_cleanup(cpu_env, arg1);
7883 _exit(arg1);
7884 return 0; /* avoid warning */
7885 case TARGET_NR_read:
7886 if (arg2 == 0 && arg3 == 0) {
7887 return get_errno(safe_read(arg1, 0, 0));
7888 } else {
7889 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7890 return -TARGET_EFAULT;
7891 ret = get_errno(safe_read(arg1, p, arg3));
7892 if (ret >= 0 &&
7893 fd_trans_host_to_target_data(arg1)) {
7894 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7896 unlock_user(p, arg2, ret);
7898 return ret;
7899 case TARGET_NR_write:
7900 if (arg2 == 0 && arg3 == 0) {
7901 return get_errno(safe_write(arg1, 0, 0));
7903 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7904 return -TARGET_EFAULT;
7905 if (fd_trans_target_to_host_data(arg1)) {
7906 void *copy = g_malloc(arg3);
7907 memcpy(copy, p, arg3);
7908 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
7909 if (ret >= 0) {
7910 ret = get_errno(safe_write(arg1, copy, ret));
7912 g_free(copy);
7913 } else {
7914 ret = get_errno(safe_write(arg1, p, arg3));
7916 unlock_user(p, arg2, 0);
7917 return ret;
7919 #ifdef TARGET_NR_open
7920 case TARGET_NR_open:
7921 if (!(p = lock_user_string(arg1)))
7922 return -TARGET_EFAULT;
7923 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7924 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7925 arg3));
7926 fd_trans_unregister(ret);
7927 unlock_user(p, arg1, 0);
7928 return ret;
7929 #endif
7930 case TARGET_NR_openat:
7931 if (!(p = lock_user_string(arg2)))
7932 return -TARGET_EFAULT;
7933 ret = get_errno(do_openat(cpu_env, arg1, p,
7934 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7935 arg4));
7936 fd_trans_unregister(ret);
7937 unlock_user(p, arg2, 0);
7938 return ret;
7939 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7940 case TARGET_NR_name_to_handle_at:
7941 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7942 return ret;
7943 #endif
7944 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7945 case TARGET_NR_open_by_handle_at:
7946 ret = do_open_by_handle_at(arg1, arg2, arg3);
7947 fd_trans_unregister(ret);
7948 return ret;
7949 #endif
7950 case TARGET_NR_close:
7951 fd_trans_unregister(arg1);
7952 return get_errno(close(arg1));
7954 case TARGET_NR_brk:
7955 return do_brk(arg1);
7956 #ifdef TARGET_NR_fork
7957 case TARGET_NR_fork:
7958 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
7959 #endif
7960 #ifdef TARGET_NR_waitpid
7961 case TARGET_NR_waitpid:
7963 int status;
7964 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7965 if (!is_error(ret) && arg2 && ret
7966 && put_user_s32(host_to_target_waitstatus(status), arg2))
7967 return -TARGET_EFAULT;
7969 return ret;
7970 #endif
7971 #ifdef TARGET_NR_waitid
7972 case TARGET_NR_waitid:
7974 siginfo_t info;
7975 info.si_pid = 0;
7976 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7977 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7978 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7979 return -TARGET_EFAULT;
7980 host_to_target_siginfo(p, &info);
7981 unlock_user(p, arg3, sizeof(target_siginfo_t));
7984 return ret;
7985 #endif
7986 #ifdef TARGET_NR_creat /* not on alpha */
7987 case TARGET_NR_creat:
7988 if (!(p = lock_user_string(arg1)))
7989 return -TARGET_EFAULT;
7990 ret = get_errno(creat(p, arg2));
7991 fd_trans_unregister(ret);
7992 unlock_user(p, arg1, 0);
7993 return ret;
7994 #endif
7995 #ifdef TARGET_NR_link
7996 case TARGET_NR_link:
7998 void * p2;
7999 p = lock_user_string(arg1);
8000 p2 = lock_user_string(arg2);
8001 if (!p || !p2)
8002 ret = -TARGET_EFAULT;
8003 else
8004 ret = get_errno(link(p, p2));
8005 unlock_user(p2, arg2, 0);
8006 unlock_user(p, arg1, 0);
8008 return ret;
8009 #endif
8010 #if defined(TARGET_NR_linkat)
8011 case TARGET_NR_linkat:
8013 void * p2 = NULL;
8014 if (!arg2 || !arg4)
8015 return -TARGET_EFAULT;
8016 p = lock_user_string(arg2);
8017 p2 = lock_user_string(arg4);
8018 if (!p || !p2)
8019 ret = -TARGET_EFAULT;
8020 else
8021 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8022 unlock_user(p, arg2, 0);
8023 unlock_user(p2, arg4, 0);
8025 return ret;
8026 #endif
8027 #ifdef TARGET_NR_unlink
8028 case TARGET_NR_unlink:
8029 if (!(p = lock_user_string(arg1)))
8030 return -TARGET_EFAULT;
8031 ret = get_errno(unlink(p));
8032 unlock_user(p, arg1, 0);
8033 return ret;
8034 #endif
8035 #if defined(TARGET_NR_unlinkat)
8036 case TARGET_NR_unlinkat:
8037 if (!(p = lock_user_string(arg2)))
8038 return -TARGET_EFAULT;
8039 ret = get_errno(unlinkat(arg1, p, arg3));
8040 unlock_user(p, arg2, 0);
8041 return ret;
8042 #endif
8043 case TARGET_NR_execve:
8045 char **argp, **envp;
8046 int argc, envc;
8047 abi_ulong gp;
8048 abi_ulong guest_argp;
8049 abi_ulong guest_envp;
8050 abi_ulong addr;
8051 char **q;
8052 int total_size = 0;
8054 argc = 0;
8055 guest_argp = arg2;
8056 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8057 if (get_user_ual(addr, gp))
8058 return -TARGET_EFAULT;
8059 if (!addr)
8060 break;
8061 argc++;
8063 envc = 0;
8064 guest_envp = arg3;
8065 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8066 if (get_user_ual(addr, gp))
8067 return -TARGET_EFAULT;
8068 if (!addr)
8069 break;
8070 envc++;
8073 argp = g_new0(char *, argc + 1);
8074 envp = g_new0(char *, envc + 1);
8076 for (gp = guest_argp, q = argp; gp;
8077 gp += sizeof(abi_ulong), q++) {
8078 if (get_user_ual(addr, gp))
8079 goto execve_efault;
8080 if (!addr)
8081 break;
8082 if (!(*q = lock_user_string(addr)))
8083 goto execve_efault;
8084 total_size += strlen(*q) + 1;
8086 *q = NULL;
8088 for (gp = guest_envp, q = envp; gp;
8089 gp += sizeof(abi_ulong), q++) {
8090 if (get_user_ual(addr, gp))
8091 goto execve_efault;
8092 if (!addr)
8093 break;
8094 if (!(*q = lock_user_string(addr)))
8095 goto execve_efault;
8096 total_size += strlen(*q) + 1;
8098 *q = NULL;
8100 if (!(p = lock_user_string(arg1)))
8101 goto execve_efault;
8102 /* Although execve() is not an interruptible syscall it is
8103 * a special case where we must use the safe_syscall wrapper:
8104 * if we allow a signal to happen before we make the host
8105 * syscall then we will 'lose' it, because at the point of
8106 * execve the process leaves QEMU's control. So we use the
8107 * safe syscall wrapper to ensure that we either take the
8108 * signal as a guest signal, or else it does not happen
8109 * before the execve completes and makes it the other
8110 * program's problem.
8112 ret = get_errno(safe_execve(p, argp, envp));
8113 unlock_user(p, arg1, 0);
8115 goto execve_end;
8117 execve_efault:
8118 ret = -TARGET_EFAULT;
8120 execve_end:
8121 for (gp = guest_argp, q = argp; *q;
8122 gp += sizeof(abi_ulong), q++) {
8123 if (get_user_ual(addr, gp)
8124 || !addr)
8125 break;
8126 unlock_user(*q, addr, 0);
8128 for (gp = guest_envp, q = envp; *q;
8129 gp += sizeof(abi_ulong), q++) {
8130 if (get_user_ual(addr, gp)
8131 || !addr)
8132 break;
8133 unlock_user(*q, addr, 0);
8136 g_free(argp);
8137 g_free(envp);
8139 return ret;
8140 case TARGET_NR_chdir:
8141 if (!(p = lock_user_string(arg1)))
8142 return -TARGET_EFAULT;
8143 ret = get_errno(chdir(p));
8144 unlock_user(p, arg1, 0);
8145 return ret;
8146 #ifdef TARGET_NR_time
8147 case TARGET_NR_time:
8149 time_t host_time;
8150 ret = get_errno(time(&host_time));
8151 if (!is_error(ret)
8152 && arg1
8153 && put_user_sal(host_time, arg1))
8154 return -TARGET_EFAULT;
8156 return ret;
8157 #endif
8158 #ifdef TARGET_NR_mknod
8159 case TARGET_NR_mknod:
8160 if (!(p = lock_user_string(arg1)))
8161 return -TARGET_EFAULT;
8162 ret = get_errno(mknod(p, arg2, arg3));
8163 unlock_user(p, arg1, 0);
8164 return ret;
8165 #endif
8166 #if defined(TARGET_NR_mknodat)
8167 case TARGET_NR_mknodat:
8168 if (!(p = lock_user_string(arg2)))
8169 return -TARGET_EFAULT;
8170 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8171 unlock_user(p, arg2, 0);
8172 return ret;
8173 #endif
8174 #ifdef TARGET_NR_chmod
8175 case TARGET_NR_chmod:
8176 if (!(p = lock_user_string(arg1)))
8177 return -TARGET_EFAULT;
8178 ret = get_errno(chmod(p, arg2));
8179 unlock_user(p, arg1, 0);
8180 return ret;
8181 #endif
8182 #ifdef TARGET_NR_lseek
8183 case TARGET_NR_lseek:
8184 return get_errno(lseek(arg1, arg2, arg3));
8185 #endif
8186 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8187 /* Alpha specific */
8188 case TARGET_NR_getxpid:
8189 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8190 return get_errno(getpid());
8191 #endif
8192 #ifdef TARGET_NR_getpid
8193 case TARGET_NR_getpid:
8194 return get_errno(getpid());
8195 #endif
8196 case TARGET_NR_mount:
8198 /* need to look at the data field */
8199 void *p2, *p3;
8201 if (arg1) {
8202 p = lock_user_string(arg1);
8203 if (!p) {
8204 return -TARGET_EFAULT;
8206 } else {
8207 p = NULL;
8210 p2 = lock_user_string(arg2);
8211 if (!p2) {
8212 if (arg1) {
8213 unlock_user(p, arg1, 0);
8215 return -TARGET_EFAULT;
8218 if (arg3) {
8219 p3 = lock_user_string(arg3);
8220 if (!p3) {
8221 if (arg1) {
8222 unlock_user(p, arg1, 0);
8224 unlock_user(p2, arg2, 0);
8225 return -TARGET_EFAULT;
8227 } else {
8228 p3 = NULL;
8231 /* FIXME - arg5 should be locked, but it isn't clear how to
8232 * do that since it's not guaranteed to be a NULL-terminated
8233 * string.
8235 if (!arg5) {
8236 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8237 } else {
8238 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
8240 ret = get_errno(ret);
8242 if (arg1) {
8243 unlock_user(p, arg1, 0);
8245 unlock_user(p2, arg2, 0);
8246 if (arg3) {
8247 unlock_user(p3, arg3, 0);
8250 return ret;
8251 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8252 #if defined(TARGET_NR_umount)
8253 case TARGET_NR_umount:
8254 #endif
8255 #if defined(TARGET_NR_oldumount)
8256 case TARGET_NR_oldumount:
8257 #endif
8258 if (!(p = lock_user_string(arg1)))
8259 return -TARGET_EFAULT;
8260 ret = get_errno(umount(p));
8261 unlock_user(p, arg1, 0);
8262 return ret;
8263 #endif
8264 #ifdef TARGET_NR_stime /* not on alpha */
8265 case TARGET_NR_stime:
8267 struct timespec ts;
8268 ts.tv_nsec = 0;
8269 if (get_user_sal(ts.tv_sec, arg1)) {
8270 return -TARGET_EFAULT;
8272 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8274 #endif
8275 #ifdef TARGET_NR_alarm /* not on alpha */
8276 case TARGET_NR_alarm:
8277 return alarm(arg1);
8278 #endif
8279 #ifdef TARGET_NR_pause /* not on alpha */
8280 case TARGET_NR_pause:
8281 if (!block_signals()) {
8282 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8284 return -TARGET_EINTR;
8285 #endif
8286 #ifdef TARGET_NR_utime
8287 case TARGET_NR_utime:
8289 struct utimbuf tbuf, *host_tbuf;
8290 struct target_utimbuf *target_tbuf;
8291 if (arg2) {
8292 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8293 return -TARGET_EFAULT;
8294 tbuf.actime = tswapal(target_tbuf->actime);
8295 tbuf.modtime = tswapal(target_tbuf->modtime);
8296 unlock_user_struct(target_tbuf, arg2, 0);
8297 host_tbuf = &tbuf;
8298 } else {
8299 host_tbuf = NULL;
8301 if (!(p = lock_user_string(arg1)))
8302 return -TARGET_EFAULT;
8303 ret = get_errno(utime(p, host_tbuf));
8304 unlock_user(p, arg1, 0);
8306 return ret;
8307 #endif
8308 #ifdef TARGET_NR_utimes
8309 case TARGET_NR_utimes:
8311 struct timeval *tvp, tv[2];
8312 if (arg2) {
8313 if (copy_from_user_timeval(&tv[0], arg2)
8314 || copy_from_user_timeval(&tv[1],
8315 arg2 + sizeof(struct target_timeval)))
8316 return -TARGET_EFAULT;
8317 tvp = tv;
8318 } else {
8319 tvp = NULL;
8321 if (!(p = lock_user_string(arg1)))
8322 return -TARGET_EFAULT;
8323 ret = get_errno(utimes(p, tvp));
8324 unlock_user(p, arg1, 0);
8326 return ret;
8327 #endif
8328 #if defined(TARGET_NR_futimesat)
8329 case TARGET_NR_futimesat:
8331 struct timeval *tvp, tv[2];
8332 if (arg3) {
8333 if (copy_from_user_timeval(&tv[0], arg3)
8334 || copy_from_user_timeval(&tv[1],
8335 arg3 + sizeof(struct target_timeval)))
8336 return -TARGET_EFAULT;
8337 tvp = tv;
8338 } else {
8339 tvp = NULL;
8341 if (!(p = lock_user_string(arg2))) {
8342 return -TARGET_EFAULT;
8344 ret = get_errno(futimesat(arg1, path(p), tvp));
8345 unlock_user(p, arg2, 0);
8347 return ret;
8348 #endif
8349 #ifdef TARGET_NR_access
8350 case TARGET_NR_access:
8351 if (!(p = lock_user_string(arg1))) {
8352 return -TARGET_EFAULT;
8354 ret = get_errno(access(path(p), arg2));
8355 unlock_user(p, arg1, 0);
8356 return ret;
8357 #endif
8358 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8359 case TARGET_NR_faccessat:
8360 if (!(p = lock_user_string(arg2))) {
8361 return -TARGET_EFAULT;
8363 ret = get_errno(faccessat(arg1, p, arg3, 0));
8364 unlock_user(p, arg2, 0);
8365 return ret;
8366 #endif
8367 #ifdef TARGET_NR_nice /* not on alpha */
8368 case TARGET_NR_nice:
8369 return get_errno(nice(arg1));
8370 #endif
8371 case TARGET_NR_sync:
8372 sync();
8373 return 0;
8374 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8375 case TARGET_NR_syncfs:
8376 return get_errno(syncfs(arg1));
8377 #endif
8378 case TARGET_NR_kill:
8379 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8380 #ifdef TARGET_NR_rename
8381 case TARGET_NR_rename:
8383 void *p2;
8384 p = lock_user_string(arg1);
8385 p2 = lock_user_string(arg2);
8386 if (!p || !p2)
8387 ret = -TARGET_EFAULT;
8388 else
8389 ret = get_errno(rename(p, p2));
8390 unlock_user(p2, arg2, 0);
8391 unlock_user(p, arg1, 0);
8393 return ret;
8394 #endif
8395 #if defined(TARGET_NR_renameat)
8396 case TARGET_NR_renameat:
8398 void *p2;
8399 p = lock_user_string(arg2);
8400 p2 = lock_user_string(arg4);
8401 if (!p || !p2)
8402 ret = -TARGET_EFAULT;
8403 else
8404 ret = get_errno(renameat(arg1, p, arg3, p2));
8405 unlock_user(p2, arg4, 0);
8406 unlock_user(p, arg2, 0);
8408 return ret;
8409 #endif
8410 #if defined(TARGET_NR_renameat2)
8411 case TARGET_NR_renameat2:
8413 void *p2;
8414 p = lock_user_string(arg2);
8415 p2 = lock_user_string(arg4);
8416 if (!p || !p2) {
8417 ret = -TARGET_EFAULT;
8418 } else {
8419 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8421 unlock_user(p2, arg4, 0);
8422 unlock_user(p, arg2, 0);
8424 return ret;
8425 #endif
8426 #ifdef TARGET_NR_mkdir
8427 case TARGET_NR_mkdir:
8428 if (!(p = lock_user_string(arg1)))
8429 return -TARGET_EFAULT;
8430 ret = get_errno(mkdir(p, arg2));
8431 unlock_user(p, arg1, 0);
8432 return ret;
8433 #endif
8434 #if defined(TARGET_NR_mkdirat)
8435 case TARGET_NR_mkdirat:
8436 if (!(p = lock_user_string(arg2)))
8437 return -TARGET_EFAULT;
8438 ret = get_errno(mkdirat(arg1, p, arg3));
8439 unlock_user(p, arg2, 0);
8440 return ret;
8441 #endif
8442 #ifdef TARGET_NR_rmdir
8443 case TARGET_NR_rmdir:
8444 if (!(p = lock_user_string(arg1)))
8445 return -TARGET_EFAULT;
8446 ret = get_errno(rmdir(p));
8447 unlock_user(p, arg1, 0);
8448 return ret;
8449 #endif
8450 case TARGET_NR_dup:
8451 ret = get_errno(dup(arg1));
8452 if (ret >= 0) {
8453 fd_trans_dup(arg1, ret);
8455 return ret;
8456 #ifdef TARGET_NR_pipe
8457 case TARGET_NR_pipe:
8458 return do_pipe(cpu_env, arg1, 0, 0);
8459 #endif
8460 #ifdef TARGET_NR_pipe2
8461 case TARGET_NR_pipe2:
8462 return do_pipe(cpu_env, arg1,
8463 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8464 #endif
8465 case TARGET_NR_times:
8467 struct target_tms *tmsp;
8468 struct tms tms;
8469 ret = get_errno(times(&tms));
8470 if (arg1) {
8471 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8472 if (!tmsp)
8473 return -TARGET_EFAULT;
8474 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8475 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8476 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8477 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8479 if (!is_error(ret))
8480 ret = host_to_target_clock_t(ret);
8482 return ret;
8483 case TARGET_NR_acct:
8484 if (arg1 == 0) {
8485 ret = get_errno(acct(NULL));
8486 } else {
8487 if (!(p = lock_user_string(arg1))) {
8488 return -TARGET_EFAULT;
8490 ret = get_errno(acct(path(p)));
8491 unlock_user(p, arg1, 0);
8493 return ret;
8494 #ifdef TARGET_NR_umount2
8495 case TARGET_NR_umount2:
8496 if (!(p = lock_user_string(arg1)))
8497 return -TARGET_EFAULT;
8498 ret = get_errno(umount2(p, arg2));
8499 unlock_user(p, arg1, 0);
8500 return ret;
8501 #endif
8502 case TARGET_NR_ioctl:
8503 return do_ioctl(arg1, arg2, arg3);
8504 #ifdef TARGET_NR_fcntl
8505 case TARGET_NR_fcntl:
8506 return do_fcntl(arg1, arg2, arg3);
8507 #endif
8508 case TARGET_NR_setpgid:
8509 return get_errno(setpgid(arg1, arg2));
8510 case TARGET_NR_umask:
8511 return get_errno(umask(arg1));
8512 case TARGET_NR_chroot:
8513 if (!(p = lock_user_string(arg1)))
8514 return -TARGET_EFAULT;
8515 ret = get_errno(chroot(p));
8516 unlock_user(p, arg1, 0);
8517 return ret;
8518 #ifdef TARGET_NR_dup2
8519 case TARGET_NR_dup2:
8520 ret = get_errno(dup2(arg1, arg2));
8521 if (ret >= 0) {
8522 fd_trans_dup(arg1, arg2);
8524 return ret;
8525 #endif
8526 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8527 case TARGET_NR_dup3:
8529 int host_flags;
8531 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8532 return -EINVAL;
8534 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8535 ret = get_errno(dup3(arg1, arg2, host_flags));
8536 if (ret >= 0) {
8537 fd_trans_dup(arg1, arg2);
8539 return ret;
8541 #endif
8542 #ifdef TARGET_NR_getppid /* not on alpha */
8543 case TARGET_NR_getppid:
8544 return get_errno(getppid());
8545 #endif
8546 #ifdef TARGET_NR_getpgrp
8547 case TARGET_NR_getpgrp:
8548 return get_errno(getpgrp());
8549 #endif
8550 case TARGET_NR_setsid:
8551 return get_errno(setsid());
8552 #ifdef TARGET_NR_sigaction
8553 case TARGET_NR_sigaction:
8555 #if defined(TARGET_ALPHA)
8556 struct target_sigaction act, oact, *pact = 0;
8557 struct target_old_sigaction *old_act;
8558 if (arg2) {
8559 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8560 return -TARGET_EFAULT;
8561 act._sa_handler = old_act->_sa_handler;
8562 target_siginitset(&act.sa_mask, old_act->sa_mask);
8563 act.sa_flags = old_act->sa_flags;
8564 act.sa_restorer = 0;
8565 unlock_user_struct(old_act, arg2, 0);
8566 pact = &act;
8568 ret = get_errno(do_sigaction(arg1, pact, &oact));
8569 if (!is_error(ret) && arg3) {
8570 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8571 return -TARGET_EFAULT;
8572 old_act->_sa_handler = oact._sa_handler;
8573 old_act->sa_mask = oact.sa_mask.sig[0];
8574 old_act->sa_flags = oact.sa_flags;
8575 unlock_user_struct(old_act, arg3, 1);
8577 #elif defined(TARGET_MIPS)
8578 struct target_sigaction act, oact, *pact, *old_act;
8580 if (arg2) {
8581 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8582 return -TARGET_EFAULT;
8583 act._sa_handler = old_act->_sa_handler;
8584 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8585 act.sa_flags = old_act->sa_flags;
8586 unlock_user_struct(old_act, arg2, 0);
8587 pact = &act;
8588 } else {
8589 pact = NULL;
8592 ret = get_errno(do_sigaction(arg1, pact, &oact));
8594 if (!is_error(ret) && arg3) {
8595 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8596 return -TARGET_EFAULT;
8597 old_act->_sa_handler = oact._sa_handler;
8598 old_act->sa_flags = oact.sa_flags;
8599 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8600 old_act->sa_mask.sig[1] = 0;
8601 old_act->sa_mask.sig[2] = 0;
8602 old_act->sa_mask.sig[3] = 0;
8603 unlock_user_struct(old_act, arg3, 1);
8605 #else
8606 struct target_old_sigaction *old_act;
8607 struct target_sigaction act, oact, *pact;
8608 if (arg2) {
8609 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8610 return -TARGET_EFAULT;
8611 act._sa_handler = old_act->_sa_handler;
8612 target_siginitset(&act.sa_mask, old_act->sa_mask);
8613 act.sa_flags = old_act->sa_flags;
8614 act.sa_restorer = old_act->sa_restorer;
8615 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8616 act.ka_restorer = 0;
8617 #endif
8618 unlock_user_struct(old_act, arg2, 0);
8619 pact = &act;
8620 } else {
8621 pact = NULL;
8623 ret = get_errno(do_sigaction(arg1, pact, &oact));
8624 if (!is_error(ret) && arg3) {
8625 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8626 return -TARGET_EFAULT;
8627 old_act->_sa_handler = oact._sa_handler;
8628 old_act->sa_mask = oact.sa_mask.sig[0];
8629 old_act->sa_flags = oact.sa_flags;
8630 old_act->sa_restorer = oact.sa_restorer;
8631 unlock_user_struct(old_act, arg3, 1);
8633 #endif
8635 return ret;
8636 #endif
8637 case TARGET_NR_rt_sigaction:
8639 #if defined(TARGET_ALPHA)
8640 /* For Alpha and SPARC this is a 5 argument syscall, with
8641 * a 'restorer' parameter which must be copied into the
8642 * sa_restorer field of the sigaction struct.
8643 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8644 * and arg5 is the sigsetsize.
8645 * Alpha also has a separate rt_sigaction struct that it uses
8646 * here; SPARC uses the usual sigaction struct.
8648 struct target_rt_sigaction *rt_act;
8649 struct target_sigaction act, oact, *pact = 0;
8651 if (arg4 != sizeof(target_sigset_t)) {
8652 return -TARGET_EINVAL;
8654 if (arg2) {
8655 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8656 return -TARGET_EFAULT;
8657 act._sa_handler = rt_act->_sa_handler;
8658 act.sa_mask = rt_act->sa_mask;
8659 act.sa_flags = rt_act->sa_flags;
8660 act.sa_restorer = arg5;
8661 unlock_user_struct(rt_act, arg2, 0);
8662 pact = &act;
8664 ret = get_errno(do_sigaction(arg1, pact, &oact));
8665 if (!is_error(ret) && arg3) {
8666 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8667 return -TARGET_EFAULT;
8668 rt_act->_sa_handler = oact._sa_handler;
8669 rt_act->sa_mask = oact.sa_mask;
8670 rt_act->sa_flags = oact.sa_flags;
8671 unlock_user_struct(rt_act, arg3, 1);
8673 #else
8674 #ifdef TARGET_SPARC
8675 target_ulong restorer = arg4;
8676 target_ulong sigsetsize = arg5;
8677 #else
8678 target_ulong sigsetsize = arg4;
8679 #endif
8680 struct target_sigaction *act;
8681 struct target_sigaction *oact;
8683 if (sigsetsize != sizeof(target_sigset_t)) {
8684 return -TARGET_EINVAL;
8686 if (arg2) {
8687 if (!lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8688 return -TARGET_EFAULT;
8690 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8691 act->ka_restorer = restorer;
8692 #endif
8693 } else {
8694 act = NULL;
8696 if (arg3) {
8697 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8698 ret = -TARGET_EFAULT;
8699 goto rt_sigaction_fail;
8701 } else
8702 oact = NULL;
8703 ret = get_errno(do_sigaction(arg1, act, oact));
8704 rt_sigaction_fail:
8705 if (act)
8706 unlock_user_struct(act, arg2, 0);
8707 if (oact)
8708 unlock_user_struct(oact, arg3, 1);
8709 #endif
8711 return ret;
8712 #ifdef TARGET_NR_sgetmask /* not on alpha */
8713 case TARGET_NR_sgetmask:
8715 sigset_t cur_set;
8716 abi_ulong target_set;
8717 ret = do_sigprocmask(0, NULL, &cur_set);
8718 if (!ret) {
8719 host_to_target_old_sigset(&target_set, &cur_set);
8720 ret = target_set;
8723 return ret;
8724 #endif
8725 #ifdef TARGET_NR_ssetmask /* not on alpha */
8726 case TARGET_NR_ssetmask:
8728 sigset_t set, oset;
8729 abi_ulong target_set = arg1;
8730 target_to_host_old_sigset(&set, &target_set);
8731 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8732 if (!ret) {
8733 host_to_target_old_sigset(&target_set, &oset);
8734 ret = target_set;
8737 return ret;
8738 #endif
8739 #ifdef TARGET_NR_sigprocmask
8740 case TARGET_NR_sigprocmask:
8742 #if defined(TARGET_ALPHA)
8743 sigset_t set, oldset;
8744 abi_ulong mask;
8745 int how;
8747 switch (arg1) {
8748 case TARGET_SIG_BLOCK:
8749 how = SIG_BLOCK;
8750 break;
8751 case TARGET_SIG_UNBLOCK:
8752 how = SIG_UNBLOCK;
8753 break;
8754 case TARGET_SIG_SETMASK:
8755 how = SIG_SETMASK;
8756 break;
8757 default:
8758 return -TARGET_EINVAL;
8760 mask = arg2;
8761 target_to_host_old_sigset(&set, &mask);
8763 ret = do_sigprocmask(how, &set, &oldset);
8764 if (!is_error(ret)) {
8765 host_to_target_old_sigset(&mask, &oldset);
8766 ret = mask;
8767 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8769 #else
8770 sigset_t set, oldset, *set_ptr;
8771 int how;
8773 if (arg2) {
8774 switch (arg1) {
8775 case TARGET_SIG_BLOCK:
8776 how = SIG_BLOCK;
8777 break;
8778 case TARGET_SIG_UNBLOCK:
8779 how = SIG_UNBLOCK;
8780 break;
8781 case TARGET_SIG_SETMASK:
8782 how = SIG_SETMASK;
8783 break;
8784 default:
8785 return -TARGET_EINVAL;
8787 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8788 return -TARGET_EFAULT;
8789 target_to_host_old_sigset(&set, p);
8790 unlock_user(p, arg2, 0);
8791 set_ptr = &set;
8792 } else {
8793 how = 0;
8794 set_ptr = NULL;
8796 ret = do_sigprocmask(how, set_ptr, &oldset);
8797 if (!is_error(ret) && arg3) {
8798 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8799 return -TARGET_EFAULT;
8800 host_to_target_old_sigset(p, &oldset);
8801 unlock_user(p, arg3, sizeof(target_sigset_t));
8803 #endif
8805 return ret;
8806 #endif
8807 case TARGET_NR_rt_sigprocmask:
8809 int how = arg1;
8810 sigset_t set, oldset, *set_ptr;
8812 if (arg4 != sizeof(target_sigset_t)) {
8813 return -TARGET_EINVAL;
8816 if (arg2) {
8817 switch(how) {
8818 case TARGET_SIG_BLOCK:
8819 how = SIG_BLOCK;
8820 break;
8821 case TARGET_SIG_UNBLOCK:
8822 how = SIG_UNBLOCK;
8823 break;
8824 case TARGET_SIG_SETMASK:
8825 how = SIG_SETMASK;
8826 break;
8827 default:
8828 return -TARGET_EINVAL;
8830 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8831 return -TARGET_EFAULT;
8832 target_to_host_sigset(&set, p);
8833 unlock_user(p, arg2, 0);
8834 set_ptr = &set;
8835 } else {
8836 how = 0;
8837 set_ptr = NULL;
8839 ret = do_sigprocmask(how, set_ptr, &oldset);
8840 if (!is_error(ret) && arg3) {
8841 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8842 return -TARGET_EFAULT;
8843 host_to_target_sigset(p, &oldset);
8844 unlock_user(p, arg3, sizeof(target_sigset_t));
8847 return ret;
8848 #ifdef TARGET_NR_sigpending
8849 case TARGET_NR_sigpending:
8851 sigset_t set;
8852 ret = get_errno(sigpending(&set));
8853 if (!is_error(ret)) {
8854 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8855 return -TARGET_EFAULT;
8856 host_to_target_old_sigset(p, &set);
8857 unlock_user(p, arg1, sizeof(target_sigset_t));
8860 return ret;
8861 #endif
8862 case TARGET_NR_rt_sigpending:
8864 sigset_t set;
8866 /* Yes, this check is >, not != like most. We follow the kernel's
8867 * logic and it does it like this because it implements
8868 * NR_sigpending through the same code path, and in that case
8869 * the old_sigset_t is smaller in size.
8871 if (arg2 > sizeof(target_sigset_t)) {
8872 return -TARGET_EINVAL;
8875 ret = get_errno(sigpending(&set));
8876 if (!is_error(ret)) {
8877 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8878 return -TARGET_EFAULT;
8879 host_to_target_sigset(p, &set);
8880 unlock_user(p, arg1, sizeof(target_sigset_t));
8883 return ret;
8884 #ifdef TARGET_NR_sigsuspend
8885 case TARGET_NR_sigsuspend:
8887 TaskState *ts = cpu->opaque;
8888 #if defined(TARGET_ALPHA)
8889 abi_ulong mask = arg1;
8890 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8891 #else
8892 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8893 return -TARGET_EFAULT;
8894 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8895 unlock_user(p, arg1, 0);
8896 #endif
8897 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8898 SIGSET_T_SIZE));
8899 if (ret != -TARGET_ERESTARTSYS) {
8900 ts->in_sigsuspend = 1;
8903 return ret;
8904 #endif
8905 case TARGET_NR_rt_sigsuspend:
8907 TaskState *ts = cpu->opaque;
8909 if (arg2 != sizeof(target_sigset_t)) {
8910 return -TARGET_EINVAL;
8912 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8913 return -TARGET_EFAULT;
8914 target_to_host_sigset(&ts->sigsuspend_mask, p);
8915 unlock_user(p, arg1, 0);
8916 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8917 SIGSET_T_SIZE));
8918 if (ret != -TARGET_ERESTARTSYS) {
8919 ts->in_sigsuspend = 1;
8922 return ret;
8923 #ifdef TARGET_NR_rt_sigtimedwait
8924 case TARGET_NR_rt_sigtimedwait:
8926 sigset_t set;
8927 struct timespec uts, *puts;
8928 siginfo_t uinfo;
8930 if (arg4 != sizeof(target_sigset_t)) {
8931 return -TARGET_EINVAL;
8934 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8935 return -TARGET_EFAULT;
8936 target_to_host_sigset(&set, p);
8937 unlock_user(p, arg1, 0);
8938 if (arg3) {
8939 puts = &uts;
8940 if (target_to_host_timespec(puts, arg3)) {
8941 return -TARGET_EFAULT;
8943 } else {
8944 puts = NULL;
8946 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8947 SIGSET_T_SIZE));
8948 if (!is_error(ret)) {
8949 if (arg2) {
8950 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8952 if (!p) {
8953 return -TARGET_EFAULT;
8955 host_to_target_siginfo(p, &uinfo);
8956 unlock_user(p, arg2, sizeof(target_siginfo_t));
8958 ret = host_to_target_signal(ret);
8961 return ret;
8962 #endif
8963 case TARGET_NR_rt_sigqueueinfo:
8965 siginfo_t uinfo;
8967 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8968 if (!p) {
8969 return -TARGET_EFAULT;
8971 target_to_host_siginfo(&uinfo, p);
8972 unlock_user(p, arg3, 0);
8973 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8975 return ret;
8976 case TARGET_NR_rt_tgsigqueueinfo:
8978 siginfo_t uinfo;
8980 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
8981 if (!p) {
8982 return -TARGET_EFAULT;
8984 target_to_host_siginfo(&uinfo, p);
8985 unlock_user(p, arg4, 0);
8986 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
8988 return ret;
8989 #ifdef TARGET_NR_sigreturn
8990 case TARGET_NR_sigreturn:
8991 if (block_signals()) {
8992 return -TARGET_ERESTARTSYS;
8994 return do_sigreturn(cpu_env);
8995 #endif
8996 case TARGET_NR_rt_sigreturn:
8997 if (block_signals()) {
8998 return -TARGET_ERESTARTSYS;
9000 return do_rt_sigreturn(cpu_env);
9001 case TARGET_NR_sethostname:
9002 if (!(p = lock_user_string(arg1)))
9003 return -TARGET_EFAULT;
9004 ret = get_errno(sethostname(p, arg2));
9005 unlock_user(p, arg1, 0);
9006 return ret;
9007 #ifdef TARGET_NR_setrlimit
9008 case TARGET_NR_setrlimit:
9010 int resource = target_to_host_resource(arg1);
9011 struct target_rlimit *target_rlim;
9012 struct rlimit rlim;
9013 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9014 return -TARGET_EFAULT;
9015 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9016 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9017 unlock_user_struct(target_rlim, arg2, 0);
9019 * If we just passed through resource limit settings for memory then
9020 * they would also apply to QEMU's own allocations, and QEMU will
9021 * crash or hang or die if its allocations fail. Ideally we would
9022 * track the guest allocations in QEMU and apply the limits ourselves.
9023 * For now, just tell the guest the call succeeded but don't actually
9024 * limit anything.
9026 if (resource != RLIMIT_AS &&
9027 resource != RLIMIT_DATA &&
9028 resource != RLIMIT_STACK) {
9029 return get_errno(setrlimit(resource, &rlim));
9030 } else {
9031 return 0;
9034 #endif
9035 #ifdef TARGET_NR_getrlimit
9036 case TARGET_NR_getrlimit:
9038 int resource = target_to_host_resource(arg1);
9039 struct target_rlimit *target_rlim;
9040 struct rlimit rlim;
9042 ret = get_errno(getrlimit(resource, &rlim));
9043 if (!is_error(ret)) {
9044 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9045 return -TARGET_EFAULT;
9046 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9047 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9048 unlock_user_struct(target_rlim, arg2, 1);
9051 return ret;
9052 #endif
9053 case TARGET_NR_getrusage:
9055 struct rusage rusage;
9056 ret = get_errno(getrusage(arg1, &rusage));
9057 if (!is_error(ret)) {
9058 ret = host_to_target_rusage(arg2, &rusage);
9061 return ret;
9062 #if defined(TARGET_NR_gettimeofday)
9063 case TARGET_NR_gettimeofday:
9065 struct timeval tv;
9066 struct timezone tz;
9068 ret = get_errno(gettimeofday(&tv, &tz));
9069 if (!is_error(ret)) {
9070 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9071 return -TARGET_EFAULT;
9073 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9074 return -TARGET_EFAULT;
9078 return ret;
9079 #endif
9080 #if defined(TARGET_NR_settimeofday)
9081 case TARGET_NR_settimeofday:
9083 struct timeval tv, *ptv = NULL;
9084 struct timezone tz, *ptz = NULL;
9086 if (arg1) {
9087 if (copy_from_user_timeval(&tv, arg1)) {
9088 return -TARGET_EFAULT;
9090 ptv = &tv;
9093 if (arg2) {
9094 if (copy_from_user_timezone(&tz, arg2)) {
9095 return -TARGET_EFAULT;
9097 ptz = &tz;
9100 return get_errno(settimeofday(ptv, ptz));
9102 #endif
9103 #if defined(TARGET_NR_select)
9104 case TARGET_NR_select:
9105 #if defined(TARGET_WANT_NI_OLD_SELECT)
9106 /* some architectures used to have old_select here
9107 * but now ENOSYS it.
9109 ret = -TARGET_ENOSYS;
9110 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9111 ret = do_old_select(arg1);
9112 #else
9113 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9114 #endif
9115 return ret;
9116 #endif
9117 #ifdef TARGET_NR_pselect6
9118 case TARGET_NR_pselect6:
9120 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
9121 fd_set rfds, wfds, efds;
9122 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
9123 struct timespec ts, *ts_ptr;
9126 * The 6th arg is actually two args smashed together,
9127 * so we cannot use the C library.
9129 sigset_t set;
9130 struct {
9131 sigset_t *set;
9132 size_t size;
9133 } sig, *sig_ptr;
9135 abi_ulong arg_sigset, arg_sigsize, *arg7;
9136 target_sigset_t *target_sigset;
9138 n = arg1;
9139 rfd_addr = arg2;
9140 wfd_addr = arg3;
9141 efd_addr = arg4;
9142 ts_addr = arg5;
9144 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
9145 if (ret) {
9146 return ret;
9148 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
9149 if (ret) {
9150 return ret;
9152 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
9153 if (ret) {
9154 return ret;
9158 * This takes a timespec, and not a timeval, so we cannot
9159 * use the do_select() helper ...
9161 if (ts_addr) {
9162 if (target_to_host_timespec(&ts, ts_addr)) {
9163 return -TARGET_EFAULT;
9165 ts_ptr = &ts;
9166 } else {
9167 ts_ptr = NULL;
9170 /* Extract the two packed args for the sigset */
9171 if (arg6) {
9172 sig_ptr = &sig;
9173 sig.size = SIGSET_T_SIZE;
9175 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
9176 if (!arg7) {
9177 return -TARGET_EFAULT;
9179 arg_sigset = tswapal(arg7[0]);
9180 arg_sigsize = tswapal(arg7[1]);
9181 unlock_user(arg7, arg6, 0);
9183 if (arg_sigset) {
9184 sig.set = &set;
9185 if (arg_sigsize != sizeof(*target_sigset)) {
9186 /* Like the kernel, we enforce correct size sigsets */
9187 return -TARGET_EINVAL;
9189 target_sigset = lock_user(VERIFY_READ, arg_sigset,
9190 sizeof(*target_sigset), 1);
9191 if (!target_sigset) {
9192 return -TARGET_EFAULT;
9194 target_to_host_sigset(&set, target_sigset);
9195 unlock_user(target_sigset, arg_sigset, 0);
9196 } else {
9197 sig.set = NULL;
9199 } else {
9200 sig_ptr = NULL;
9203 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
9204 ts_ptr, sig_ptr));
9206 if (!is_error(ret)) {
9207 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
9208 return -TARGET_EFAULT;
9209 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
9210 return -TARGET_EFAULT;
9211 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
9212 return -TARGET_EFAULT;
9214 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
9215 return -TARGET_EFAULT;
9218 return ret;
9219 #endif
9220 #ifdef TARGET_NR_symlink
9221 case TARGET_NR_symlink:
9223 void *p2;
9224 p = lock_user_string(arg1);
9225 p2 = lock_user_string(arg2);
9226 if (!p || !p2)
9227 ret = -TARGET_EFAULT;
9228 else
9229 ret = get_errno(symlink(p, p2));
9230 unlock_user(p2, arg2, 0);
9231 unlock_user(p, arg1, 0);
9233 return ret;
9234 #endif
9235 #if defined(TARGET_NR_symlinkat)
9236 case TARGET_NR_symlinkat:
9238 void *p2;
9239 p = lock_user_string(arg1);
9240 p2 = lock_user_string(arg3);
9241 if (!p || !p2)
9242 ret = -TARGET_EFAULT;
9243 else
9244 ret = get_errno(symlinkat(p, arg2, p2));
9245 unlock_user(p2, arg3, 0);
9246 unlock_user(p, arg1, 0);
9248 return ret;
9249 #endif
9250 #ifdef TARGET_NR_readlink
9251 case TARGET_NR_readlink:
9253 void *p2;
9254 p = lock_user_string(arg1);
9255 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9256 if (!p || !p2) {
9257 ret = -TARGET_EFAULT;
9258 } else if (!arg3) {
9259 /* Short circuit this for the magic exe check. */
9260 ret = -TARGET_EINVAL;
9261 } else if (is_proc_myself((const char *)p, "exe")) {
9262 char real[PATH_MAX], *temp;
9263 temp = realpath(exec_path, real);
9264 /* Return value is # of bytes that we wrote to the buffer. */
9265 if (temp == NULL) {
9266 ret = get_errno(-1);
9267 } else {
9268 /* Don't worry about sign mismatch as earlier mapping
9269 * logic would have thrown a bad address error. */
9270 ret = MIN(strlen(real), arg3);
9271 /* We cannot NUL terminate the string. */
9272 memcpy(p2, real, ret);
9274 } else {
9275 ret = get_errno(readlink(path(p), p2, arg3));
9277 unlock_user(p2, arg2, ret);
9278 unlock_user(p, arg1, 0);
9280 return ret;
9281 #endif
9282 #if defined(TARGET_NR_readlinkat)
9283 case TARGET_NR_readlinkat:
9285 void *p2;
9286 p = lock_user_string(arg2);
9287 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9288 if (!p || !p2) {
9289 ret = -TARGET_EFAULT;
9290 } else if (is_proc_myself((const char *)p, "exe")) {
9291 char real[PATH_MAX], *temp;
9292 temp = realpath(exec_path, real);
9293 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9294 snprintf((char *)p2, arg4, "%s", real);
9295 } else {
9296 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9298 unlock_user(p2, arg3, ret);
9299 unlock_user(p, arg2, 0);
9301 return ret;
9302 #endif
9303 #ifdef TARGET_NR_swapon
9304 case TARGET_NR_swapon:
9305 if (!(p = lock_user_string(arg1)))
9306 return -TARGET_EFAULT;
9307 ret = get_errno(swapon(p, arg2));
9308 unlock_user(p, arg1, 0);
9309 return ret;
9310 #endif
9311 case TARGET_NR_reboot:
9312 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9313 /* arg4 must be ignored in all other cases */
9314 p = lock_user_string(arg4);
9315 if (!p) {
9316 return -TARGET_EFAULT;
9318 ret = get_errno(reboot(arg1, arg2, arg3, p));
9319 unlock_user(p, arg4, 0);
9320 } else {
9321 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9323 return ret;
9324 #ifdef TARGET_NR_mmap
9325 case TARGET_NR_mmap:
9326 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9327 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9328 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9329 || defined(TARGET_S390X)
9331 abi_ulong *v;
9332 abi_ulong v1, v2, v3, v4, v5, v6;
9333 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9334 return -TARGET_EFAULT;
9335 v1 = tswapal(v[0]);
9336 v2 = tswapal(v[1]);
9337 v3 = tswapal(v[2]);
9338 v4 = tswapal(v[3]);
9339 v5 = tswapal(v[4]);
9340 v6 = tswapal(v[5]);
9341 unlock_user(v, arg1, 0);
9342 ret = get_errno(target_mmap(v1, v2, v3,
9343 target_to_host_bitmask(v4, mmap_flags_tbl),
9344 v5, v6));
9346 #else
9347 ret = get_errno(target_mmap(arg1, arg2, arg3,
9348 target_to_host_bitmask(arg4, mmap_flags_tbl),
9349 arg5,
9350 arg6));
9351 #endif
9352 return ret;
9353 #endif
9354 #ifdef TARGET_NR_mmap2
9355 case TARGET_NR_mmap2:
9356 #ifndef MMAP_SHIFT
9357 #define MMAP_SHIFT 12
9358 #endif
9359 ret = target_mmap(arg1, arg2, arg3,
9360 target_to_host_bitmask(arg4, mmap_flags_tbl),
9361 arg5, arg6 << MMAP_SHIFT);
9362 return get_errno(ret);
9363 #endif
9364 case TARGET_NR_munmap:
9365 return get_errno(target_munmap(arg1, arg2));
9366 case TARGET_NR_mprotect:
9368 TaskState *ts = cpu->opaque;
9369 /* Special hack to detect libc making the stack executable. */
9370 if ((arg3 & PROT_GROWSDOWN)
9371 && arg1 >= ts->info->stack_limit
9372 && arg1 <= ts->info->start_stack) {
9373 arg3 &= ~PROT_GROWSDOWN;
9374 arg2 = arg2 + arg1 - ts->info->stack_limit;
9375 arg1 = ts->info->stack_limit;
9378 return get_errno(target_mprotect(arg1, arg2, arg3));
9379 #ifdef TARGET_NR_mremap
9380 case TARGET_NR_mremap:
9381 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9382 #endif
9383 /* ??? msync/mlock/munlock are broken for softmmu. */
9384 #ifdef TARGET_NR_msync
9385 case TARGET_NR_msync:
9386 return get_errno(msync(g2h(arg1), arg2, arg3));
9387 #endif
9388 #ifdef TARGET_NR_mlock
9389 case TARGET_NR_mlock:
9390 return get_errno(mlock(g2h(arg1), arg2));
9391 #endif
9392 #ifdef TARGET_NR_munlock
9393 case TARGET_NR_munlock:
9394 return get_errno(munlock(g2h(arg1), arg2));
9395 #endif
9396 #ifdef TARGET_NR_mlockall
9397 case TARGET_NR_mlockall:
9398 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9399 #endif
9400 #ifdef TARGET_NR_munlockall
9401 case TARGET_NR_munlockall:
9402 return get_errno(munlockall());
9403 #endif
9404 #ifdef TARGET_NR_truncate
9405 case TARGET_NR_truncate:
9406 if (!(p = lock_user_string(arg1)))
9407 return -TARGET_EFAULT;
9408 ret = get_errno(truncate(p, arg2));
9409 unlock_user(p, arg1, 0);
9410 return ret;
9411 #endif
9412 #ifdef TARGET_NR_ftruncate
9413 case TARGET_NR_ftruncate:
9414 return get_errno(ftruncate(arg1, arg2));
9415 #endif
9416 case TARGET_NR_fchmod:
9417 return get_errno(fchmod(arg1, arg2));
9418 #if defined(TARGET_NR_fchmodat)
9419 case TARGET_NR_fchmodat:
9420 if (!(p = lock_user_string(arg2)))
9421 return -TARGET_EFAULT;
9422 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9423 unlock_user(p, arg2, 0);
9424 return ret;
9425 #endif
9426 case TARGET_NR_getpriority:
9427 /* Note that negative values are valid for getpriority, so we must
9428 differentiate based on errno settings. */
9429 errno = 0;
9430 ret = getpriority(arg1, arg2);
9431 if (ret == -1 && errno != 0) {
9432 return -host_to_target_errno(errno);
9434 #ifdef TARGET_ALPHA
9435 /* Return value is the unbiased priority. Signal no error. */
9436 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9437 #else
9438 /* Return value is a biased priority to avoid negative numbers. */
9439 ret = 20 - ret;
9440 #endif
9441 return ret;
9442 case TARGET_NR_setpriority:
9443 return get_errno(setpriority(arg1, arg2, arg3));
9444 #ifdef TARGET_NR_statfs
9445 case TARGET_NR_statfs:
9446 if (!(p = lock_user_string(arg1))) {
9447 return -TARGET_EFAULT;
9449 ret = get_errno(statfs(path(p), &stfs));
9450 unlock_user(p, arg1, 0);
9451 convert_statfs:
9452 if (!is_error(ret)) {
9453 struct target_statfs *target_stfs;
9455 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9456 return -TARGET_EFAULT;
9457 __put_user(stfs.f_type, &target_stfs->f_type);
9458 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9459 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9460 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9461 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9462 __put_user(stfs.f_files, &target_stfs->f_files);
9463 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9464 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9465 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9466 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9467 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9468 #ifdef _STATFS_F_FLAGS
9469 __put_user(stfs.f_flags, &target_stfs->f_flags);
9470 #else
9471 __put_user(0, &target_stfs->f_flags);
9472 #endif
9473 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9474 unlock_user_struct(target_stfs, arg2, 1);
9476 return ret;
9477 #endif
9478 #ifdef TARGET_NR_fstatfs
9479 case TARGET_NR_fstatfs:
9480 ret = get_errno(fstatfs(arg1, &stfs));
9481 goto convert_statfs;
9482 #endif
9483 #ifdef TARGET_NR_statfs64
9484 case TARGET_NR_statfs64:
9485 if (!(p = lock_user_string(arg1))) {
9486 return -TARGET_EFAULT;
9488 ret = get_errno(statfs(path(p), &stfs));
9489 unlock_user(p, arg1, 0);
9490 convert_statfs64:
9491 if (!is_error(ret)) {
9492 struct target_statfs64 *target_stfs;
9494 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9495 return -TARGET_EFAULT;
9496 __put_user(stfs.f_type, &target_stfs->f_type);
9497 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9498 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9499 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9500 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9501 __put_user(stfs.f_files, &target_stfs->f_files);
9502 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9503 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9504 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9505 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9506 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9507 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9508 unlock_user_struct(target_stfs, arg3, 1);
9510 return ret;
9511 case TARGET_NR_fstatfs64:
9512 ret = get_errno(fstatfs(arg1, &stfs));
9513 goto convert_statfs64;
9514 #endif
9515 #ifdef TARGET_NR_socketcall
9516 case TARGET_NR_socketcall:
9517 return do_socketcall(arg1, arg2);
9518 #endif
9519 #ifdef TARGET_NR_accept
9520 case TARGET_NR_accept:
9521 return do_accept4(arg1, arg2, arg3, 0);
9522 #endif
9523 #ifdef TARGET_NR_accept4
9524 case TARGET_NR_accept4:
9525 return do_accept4(arg1, arg2, arg3, arg4);
9526 #endif
9527 #ifdef TARGET_NR_bind
9528 case TARGET_NR_bind:
9529 return do_bind(arg1, arg2, arg3);
9530 #endif
9531 #ifdef TARGET_NR_connect
9532 case TARGET_NR_connect:
9533 return do_connect(arg1, arg2, arg3);
9534 #endif
9535 #ifdef TARGET_NR_getpeername
9536 case TARGET_NR_getpeername:
9537 return do_getpeername(arg1, arg2, arg3);
9538 #endif
9539 #ifdef TARGET_NR_getsockname
9540 case TARGET_NR_getsockname:
9541 return do_getsockname(arg1, arg2, arg3);
9542 #endif
9543 #ifdef TARGET_NR_getsockopt
9544 case TARGET_NR_getsockopt:
9545 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9546 #endif
9547 #ifdef TARGET_NR_listen
9548 case TARGET_NR_listen:
9549 return get_errno(listen(arg1, arg2));
9550 #endif
9551 #ifdef TARGET_NR_recv
9552 case TARGET_NR_recv:
9553 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9554 #endif
9555 #ifdef TARGET_NR_recvfrom
9556 case TARGET_NR_recvfrom:
9557 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9558 #endif
9559 #ifdef TARGET_NR_recvmsg
9560 case TARGET_NR_recvmsg:
9561 return do_sendrecvmsg(arg1, arg2, arg3, 0);
9562 #endif
9563 #ifdef TARGET_NR_send
9564 case TARGET_NR_send:
9565 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9566 #endif
9567 #ifdef TARGET_NR_sendmsg
9568 case TARGET_NR_sendmsg:
9569 return do_sendrecvmsg(arg1, arg2, arg3, 1);
9570 #endif
9571 #ifdef TARGET_NR_sendmmsg
9572 case TARGET_NR_sendmmsg:
9573 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9574 #endif
9575 #ifdef TARGET_NR_recvmmsg
9576 case TARGET_NR_recvmmsg:
9577 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9578 #endif
9579 #ifdef TARGET_NR_sendto
9580 case TARGET_NR_sendto:
9581 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9582 #endif
9583 #ifdef TARGET_NR_shutdown
9584 case TARGET_NR_shutdown:
9585 return get_errno(shutdown(arg1, arg2));
9586 #endif
9587 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9588 case TARGET_NR_getrandom:
9589 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9590 if (!p) {
9591 return -TARGET_EFAULT;
9593 ret = get_errno(getrandom(p, arg2, arg3));
9594 unlock_user(p, arg1, ret);
9595 return ret;
9596 #endif
9597 #ifdef TARGET_NR_socket
9598 case TARGET_NR_socket:
9599 return do_socket(arg1, arg2, arg3);
9600 #endif
9601 #ifdef TARGET_NR_socketpair
9602 case TARGET_NR_socketpair:
9603 return do_socketpair(arg1, arg2, arg3, arg4);
9604 #endif
9605 #ifdef TARGET_NR_setsockopt
9606 case TARGET_NR_setsockopt:
9607 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9608 #endif
9609 #if defined(TARGET_NR_syslog)
9610 case TARGET_NR_syslog:
9612 int len = arg2;
9614 switch (arg1) {
9615 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9616 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9617 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9618 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9619 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9620 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9621 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9622 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9623 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9624 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9625 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9626 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9628 if (len < 0) {
9629 return -TARGET_EINVAL;
9631 if (len == 0) {
9632 return 0;
9634 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9635 if (!p) {
9636 return -TARGET_EFAULT;
9638 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9639 unlock_user(p, arg2, arg3);
9641 return ret;
9642 default:
9643 return -TARGET_EINVAL;
9646 break;
9647 #endif
9648 case TARGET_NR_setitimer:
9650 struct itimerval value, ovalue, *pvalue;
9652 if (arg2) {
9653 pvalue = &value;
9654 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9655 || copy_from_user_timeval(&pvalue->it_value,
9656 arg2 + sizeof(struct target_timeval)))
9657 return -TARGET_EFAULT;
9658 } else {
9659 pvalue = NULL;
9661 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9662 if (!is_error(ret) && arg3) {
9663 if (copy_to_user_timeval(arg3,
9664 &ovalue.it_interval)
9665 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9666 &ovalue.it_value))
9667 return -TARGET_EFAULT;
9670 return ret;
9671 case TARGET_NR_getitimer:
9673 struct itimerval value;
9675 ret = get_errno(getitimer(arg1, &value));
9676 if (!is_error(ret) && arg2) {
9677 if (copy_to_user_timeval(arg2,
9678 &value.it_interval)
9679 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9680 &value.it_value))
9681 return -TARGET_EFAULT;
9684 return ret;
9685 #ifdef TARGET_NR_stat
9686 case TARGET_NR_stat:
9687 if (!(p = lock_user_string(arg1))) {
9688 return -TARGET_EFAULT;
9690 ret = get_errno(stat(path(p), &st));
9691 unlock_user(p, arg1, 0);
9692 goto do_stat;
9693 #endif
9694 #ifdef TARGET_NR_lstat
9695 case TARGET_NR_lstat:
9696 if (!(p = lock_user_string(arg1))) {
9697 return -TARGET_EFAULT;
9699 ret = get_errno(lstat(path(p), &st));
9700 unlock_user(p, arg1, 0);
9701 goto do_stat;
9702 #endif
9703 #ifdef TARGET_NR_fstat
9704 case TARGET_NR_fstat:
9706 ret = get_errno(fstat(arg1, &st));
9707 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9708 do_stat:
9709 #endif
9710 if (!is_error(ret)) {
9711 struct target_stat *target_st;
9713 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9714 return -TARGET_EFAULT;
9715 memset(target_st, 0, sizeof(*target_st));
9716 __put_user(st.st_dev, &target_st->st_dev);
9717 __put_user(st.st_ino, &target_st->st_ino);
9718 __put_user(st.st_mode, &target_st->st_mode);
9719 __put_user(st.st_uid, &target_st->st_uid);
9720 __put_user(st.st_gid, &target_st->st_gid);
9721 __put_user(st.st_nlink, &target_st->st_nlink);
9722 __put_user(st.st_rdev, &target_st->st_rdev);
9723 __put_user(st.st_size, &target_st->st_size);
9724 __put_user(st.st_blksize, &target_st->st_blksize);
9725 __put_user(st.st_blocks, &target_st->st_blocks);
9726 __put_user(st.st_atime, &target_st->target_st_atime);
9727 __put_user(st.st_mtime, &target_st->target_st_mtime);
9728 __put_user(st.st_ctime, &target_st->target_st_ctime);
9729 #if (_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700) && \
9730 defined(TARGET_STAT_HAVE_NSEC)
9731 __put_user(st.st_atim.tv_nsec,
9732 &target_st->target_st_atime_nsec);
9733 __put_user(st.st_mtim.tv_nsec,
9734 &target_st->target_st_mtime_nsec);
9735 __put_user(st.st_ctim.tv_nsec,
9736 &target_st->target_st_ctime_nsec);
9737 #endif
9738 unlock_user_struct(target_st, arg2, 1);
9741 return ret;
9742 #endif
9743 case TARGET_NR_vhangup:
9744 return get_errno(vhangup());
9745 #ifdef TARGET_NR_syscall
9746 case TARGET_NR_syscall:
9747 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9748 arg6, arg7, arg8, 0);
9749 #endif
9750 #if defined(TARGET_NR_wait4)
9751 case TARGET_NR_wait4:
9753 int status;
9754 abi_long status_ptr = arg2;
9755 struct rusage rusage, *rusage_ptr;
9756 abi_ulong target_rusage = arg4;
9757 abi_long rusage_err;
9758 if (target_rusage)
9759 rusage_ptr = &rusage;
9760 else
9761 rusage_ptr = NULL;
9762 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9763 if (!is_error(ret)) {
9764 if (status_ptr && ret) {
9765 status = host_to_target_waitstatus(status);
9766 if (put_user_s32(status, status_ptr))
9767 return -TARGET_EFAULT;
9769 if (target_rusage) {
9770 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9771 if (rusage_err) {
9772 ret = rusage_err;
9777 return ret;
9778 #endif
9779 #ifdef TARGET_NR_swapoff
9780 case TARGET_NR_swapoff:
9781 if (!(p = lock_user_string(arg1)))
9782 return -TARGET_EFAULT;
9783 ret = get_errno(swapoff(p));
9784 unlock_user(p, arg1, 0);
9785 return ret;
9786 #endif
9787 case TARGET_NR_sysinfo:
9789 struct target_sysinfo *target_value;
9790 struct sysinfo value;
9791 ret = get_errno(sysinfo(&value));
9792 if (!is_error(ret) && arg1)
9794 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9795 return -TARGET_EFAULT;
9796 __put_user(value.uptime, &target_value->uptime);
9797 __put_user(value.loads[0], &target_value->loads[0]);
9798 __put_user(value.loads[1], &target_value->loads[1]);
9799 __put_user(value.loads[2], &target_value->loads[2]);
9800 __put_user(value.totalram, &target_value->totalram);
9801 __put_user(value.freeram, &target_value->freeram);
9802 __put_user(value.sharedram, &target_value->sharedram);
9803 __put_user(value.bufferram, &target_value->bufferram);
9804 __put_user(value.totalswap, &target_value->totalswap);
9805 __put_user(value.freeswap, &target_value->freeswap);
9806 __put_user(value.procs, &target_value->procs);
9807 __put_user(value.totalhigh, &target_value->totalhigh);
9808 __put_user(value.freehigh, &target_value->freehigh);
9809 __put_user(value.mem_unit, &target_value->mem_unit);
9810 unlock_user_struct(target_value, arg1, 1);
9813 return ret;
9814 #ifdef TARGET_NR_ipc
9815 case TARGET_NR_ipc:
9816 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9817 #endif
9818 #ifdef TARGET_NR_semget
9819 case TARGET_NR_semget:
9820 return get_errno(semget(arg1, arg2, arg3));
9821 #endif
9822 #ifdef TARGET_NR_semop
9823 case TARGET_NR_semop:
9824 return do_semtimedop(arg1, arg2, arg3, 0);
9825 #endif
9826 #ifdef TARGET_NR_semtimedop
9827 case TARGET_NR_semtimedop:
9828 return do_semtimedop(arg1, arg2, arg3, arg4);
9829 #endif
9830 #ifdef TARGET_NR_semctl
9831 case TARGET_NR_semctl:
9832 return do_semctl(arg1, arg2, arg3, arg4);
9833 #endif
9834 #ifdef TARGET_NR_msgctl
9835 case TARGET_NR_msgctl:
9836 return do_msgctl(arg1, arg2, arg3);
9837 #endif
9838 #ifdef TARGET_NR_msgget
9839 case TARGET_NR_msgget:
9840 return get_errno(msgget(arg1, arg2));
9841 #endif
9842 #ifdef TARGET_NR_msgrcv
9843 case TARGET_NR_msgrcv:
9844 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9845 #endif
9846 #ifdef TARGET_NR_msgsnd
9847 case TARGET_NR_msgsnd:
9848 return do_msgsnd(arg1, arg2, arg3, arg4);
9849 #endif
9850 #ifdef TARGET_NR_shmget
9851 case TARGET_NR_shmget:
9852 return get_errno(shmget(arg1, arg2, arg3));
9853 #endif
9854 #ifdef TARGET_NR_shmctl
9855 case TARGET_NR_shmctl:
9856 return do_shmctl(arg1, arg2, arg3);
9857 #endif
9858 #ifdef TARGET_NR_shmat
9859 case TARGET_NR_shmat:
9860 return do_shmat(cpu_env, arg1, arg2, arg3);
9861 #endif
9862 #ifdef TARGET_NR_shmdt
9863 case TARGET_NR_shmdt:
9864 return do_shmdt(arg1);
9865 #endif
9866 case TARGET_NR_fsync:
9867 return get_errno(fsync(arg1));
9868 case TARGET_NR_clone:
9869 /* Linux manages to have three different orderings for its
9870 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9871 * match the kernel's CONFIG_CLONE_* settings.
9872 * Microblaze is further special in that it uses a sixth
9873 * implicit argument to clone for the TLS pointer.
9875 #if defined(TARGET_MICROBLAZE)
9876 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9877 #elif defined(TARGET_CLONE_BACKWARDS)
9878 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9879 #elif defined(TARGET_CLONE_BACKWARDS2)
9880 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9881 #else
9882 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9883 #endif
9884 return ret;
9885 #ifdef __NR_exit_group
9886 /* new thread calls */
9887 case TARGET_NR_exit_group:
9888 preexit_cleanup(cpu_env, arg1);
9889 return get_errno(exit_group(arg1));
9890 #endif
9891 case TARGET_NR_setdomainname:
9892 if (!(p = lock_user_string(arg1)))
9893 return -TARGET_EFAULT;
9894 ret = get_errno(setdomainname(p, arg2));
9895 unlock_user(p, arg1, 0);
9896 return ret;
9897 case TARGET_NR_uname:
9898 /* no need to transcode because we use the linux syscall */
9900 struct new_utsname * buf;
9902 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9903 return -TARGET_EFAULT;
9904 ret = get_errno(sys_uname(buf));
9905 if (!is_error(ret)) {
9906 /* Overwrite the native machine name with whatever is being
9907 emulated. */
9908 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
9909 sizeof(buf->machine));
9910 /* Allow the user to override the reported release. */
9911 if (qemu_uname_release && *qemu_uname_release) {
9912 g_strlcpy(buf->release, qemu_uname_release,
9913 sizeof(buf->release));
9916 unlock_user_struct(buf, arg1, 1);
9918 return ret;
9919 #ifdef TARGET_I386
9920 case TARGET_NR_modify_ldt:
9921 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
9922 #if !defined(TARGET_X86_64)
9923 case TARGET_NR_vm86:
9924 return do_vm86(cpu_env, arg1, arg2);
9925 #endif
9926 #endif
9927 #if defined(TARGET_NR_adjtimex)
9928 case TARGET_NR_adjtimex:
9930 struct timex host_buf;
9932 if (target_to_host_timex(&host_buf, arg1) != 0) {
9933 return -TARGET_EFAULT;
9935 ret = get_errno(adjtimex(&host_buf));
9936 if (!is_error(ret)) {
9937 if (host_to_target_timex(arg1, &host_buf) != 0) {
9938 return -TARGET_EFAULT;
9942 return ret;
9943 #endif
9944 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9945 case TARGET_NR_clock_adjtime:
9947 struct timex htx, *phtx = &htx;
9949 if (target_to_host_timex(phtx, arg2) != 0) {
9950 return -TARGET_EFAULT;
9952 ret = get_errno(clock_adjtime(arg1, phtx));
9953 if (!is_error(ret) && phtx) {
9954 if (host_to_target_timex(arg2, phtx) != 0) {
9955 return -TARGET_EFAULT;
9959 return ret;
9960 #endif
9961 case TARGET_NR_getpgid:
9962 return get_errno(getpgid(arg1));
9963 case TARGET_NR_fchdir:
9964 return get_errno(fchdir(arg1));
9965 case TARGET_NR_personality:
9966 return get_errno(personality(arg1));
9967 #ifdef TARGET_NR__llseek /* Not on alpha */
9968 case TARGET_NR__llseek:
9970 int64_t res;
9971 #if !defined(__NR_llseek)
9972 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9973 if (res == -1) {
9974 ret = get_errno(res);
9975 } else {
9976 ret = 0;
9978 #else
9979 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9980 #endif
9981 if ((ret == 0) && put_user_s64(res, arg4)) {
9982 return -TARGET_EFAULT;
9985 return ret;
9986 #endif
9987 #ifdef TARGET_NR_getdents
9988 case TARGET_NR_getdents:
9989 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
9990 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9992 struct target_dirent *target_dirp;
9993 struct linux_dirent *dirp;
9994 abi_long count = arg3;
9996 dirp = g_try_malloc(count);
9997 if (!dirp) {
9998 return -TARGET_ENOMEM;
10001 ret = get_errno(sys_getdents(arg1, dirp, count));
10002 if (!is_error(ret)) {
10003 struct linux_dirent *de;
10004 struct target_dirent *tde;
10005 int len = ret;
10006 int reclen, treclen;
10007 int count1, tnamelen;
10009 count1 = 0;
10010 de = dirp;
10011 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10012 return -TARGET_EFAULT;
10013 tde = target_dirp;
10014 while (len > 0) {
10015 reclen = de->d_reclen;
10016 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10017 assert(tnamelen >= 0);
10018 treclen = tnamelen + offsetof(struct target_dirent, d_name);
10019 assert(count1 + treclen <= count);
10020 tde->d_reclen = tswap16(treclen);
10021 tde->d_ino = tswapal(de->d_ino);
10022 tde->d_off = tswapal(de->d_off);
10023 memcpy(tde->d_name, de->d_name, tnamelen);
10024 de = (struct linux_dirent *)((char *)de + reclen);
10025 len -= reclen;
10026 tde = (struct target_dirent *)((char *)tde + treclen);
10027 count1 += treclen;
10029 ret = count1;
10030 unlock_user(target_dirp, arg2, ret);
10032 g_free(dirp);
10034 #else
10036 struct linux_dirent *dirp;
10037 abi_long count = arg3;
10039 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10040 return -TARGET_EFAULT;
10041 ret = get_errno(sys_getdents(arg1, dirp, count));
10042 if (!is_error(ret)) {
10043 struct linux_dirent *de;
10044 int len = ret;
10045 int reclen;
10046 de = dirp;
10047 while (len > 0) {
10048 reclen = de->d_reclen;
10049 if (reclen > len)
10050 break;
10051 de->d_reclen = tswap16(reclen);
10052 tswapls(&de->d_ino);
10053 tswapls(&de->d_off);
10054 de = (struct linux_dirent *)((char *)de + reclen);
10055 len -= reclen;
10058 unlock_user(dirp, arg2, ret);
10060 #endif
10061 #else
10062 /* Implement getdents in terms of getdents64 */
10064 struct linux_dirent64 *dirp;
10065 abi_long count = arg3;
10067 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10068 if (!dirp) {
10069 return -TARGET_EFAULT;
10071 ret = get_errno(sys_getdents64(arg1, dirp, count));
10072 if (!is_error(ret)) {
10073 /* Convert the dirent64 structs to target dirent. We do this
10074 * in-place, since we can guarantee that a target_dirent is no
10075 * larger than a dirent64; however this means we have to be
10076 * careful to read everything before writing in the new format.
10078 struct linux_dirent64 *de;
10079 struct target_dirent *tde;
10080 int len = ret;
10081 int tlen = 0;
10083 de = dirp;
10084 tde = (struct target_dirent *)dirp;
10085 while (len > 0) {
10086 int namelen, treclen;
10087 int reclen = de->d_reclen;
10088 uint64_t ino = de->d_ino;
10089 int64_t off = de->d_off;
10090 uint8_t type = de->d_type;
10092 namelen = strlen(de->d_name);
10093 treclen = offsetof(struct target_dirent, d_name)
10094 + namelen + 2;
10095 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10097 memmove(tde->d_name, de->d_name, namelen + 1);
10098 tde->d_ino = tswapal(ino);
10099 tde->d_off = tswapal(off);
10100 tde->d_reclen = tswap16(treclen);
10101 /* The target_dirent type is in what was formerly a padding
10102 * byte at the end of the structure:
10104 *(((char *)tde) + treclen - 1) = type;
10106 de = (struct linux_dirent64 *)((char *)de + reclen);
10107 tde = (struct target_dirent *)((char *)tde + treclen);
10108 len -= reclen;
10109 tlen += treclen;
10111 ret = tlen;
10113 unlock_user(dirp, arg2, ret);
10115 #endif
10116 return ret;
10117 #endif /* TARGET_NR_getdents */
10118 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10119 case TARGET_NR_getdents64:
10121 struct linux_dirent64 *dirp;
10122 abi_long count = arg3;
10123 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10124 return -TARGET_EFAULT;
10125 ret = get_errno(sys_getdents64(arg1, dirp, count));
10126 if (!is_error(ret)) {
10127 struct linux_dirent64 *de;
10128 int len = ret;
10129 int reclen;
10130 de = dirp;
10131 while (len > 0) {
10132 reclen = de->d_reclen;
10133 if (reclen > len)
10134 break;
10135 de->d_reclen = tswap16(reclen);
10136 tswap64s((uint64_t *)&de->d_ino);
10137 tswap64s((uint64_t *)&de->d_off);
10138 de = (struct linux_dirent64 *)((char *)de + reclen);
10139 len -= reclen;
10142 unlock_user(dirp, arg2, ret);
10144 return ret;
10145 #endif /* TARGET_NR_getdents64 */
10146 #if defined(TARGET_NR__newselect)
10147 case TARGET_NR__newselect:
10148 return do_select(arg1, arg2, arg3, arg4, arg5);
10149 #endif
10150 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10151 # ifdef TARGET_NR_poll
10152 case TARGET_NR_poll:
10153 # endif
10154 # ifdef TARGET_NR_ppoll
10155 case TARGET_NR_ppoll:
10156 # endif
10158 struct target_pollfd *target_pfd;
10159 unsigned int nfds = arg2;
10160 struct pollfd *pfd;
10161 unsigned int i;
10163 pfd = NULL;
10164 target_pfd = NULL;
10165 if (nfds) {
10166 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
10167 return -TARGET_EINVAL;
10170 target_pfd = lock_user(VERIFY_WRITE, arg1,
10171 sizeof(struct target_pollfd) * nfds, 1);
10172 if (!target_pfd) {
10173 return -TARGET_EFAULT;
10176 pfd = alloca(sizeof(struct pollfd) * nfds);
10177 for (i = 0; i < nfds; i++) {
10178 pfd[i].fd = tswap32(target_pfd[i].fd);
10179 pfd[i].events = tswap16(target_pfd[i].events);
10183 switch (num) {
10184 # ifdef TARGET_NR_ppoll
10185 case TARGET_NR_ppoll:
10187 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
10188 target_sigset_t *target_set;
10189 sigset_t _set, *set = &_set;
10191 if (arg3) {
10192 if (target_to_host_timespec(timeout_ts, arg3)) {
10193 unlock_user(target_pfd, arg1, 0);
10194 return -TARGET_EFAULT;
10196 } else {
10197 timeout_ts = NULL;
10200 if (arg4) {
10201 if (arg5 != sizeof(target_sigset_t)) {
10202 unlock_user(target_pfd, arg1, 0);
10203 return -TARGET_EINVAL;
10206 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10207 if (!target_set) {
10208 unlock_user(target_pfd, arg1, 0);
10209 return -TARGET_EFAULT;
10211 target_to_host_sigset(set, target_set);
10212 } else {
10213 set = NULL;
10216 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10217 set, SIGSET_T_SIZE));
10219 if (!is_error(ret) && arg3) {
10220 host_to_target_timespec(arg3, timeout_ts);
10222 if (arg4) {
10223 unlock_user(target_set, arg4, 0);
10225 break;
10227 # endif
10228 # ifdef TARGET_NR_poll
10229 case TARGET_NR_poll:
10231 struct timespec ts, *pts;
10233 if (arg3 >= 0) {
10234 /* Convert ms to secs, ns */
10235 ts.tv_sec = arg3 / 1000;
10236 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10237 pts = &ts;
10238 } else {
10239 /* -ve poll() timeout means "infinite" */
10240 pts = NULL;
10242 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10243 break;
10245 # endif
10246 default:
10247 g_assert_not_reached();
10250 if (!is_error(ret)) {
10251 for(i = 0; i < nfds; i++) {
10252 target_pfd[i].revents = tswap16(pfd[i].revents);
10255 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10257 return ret;
10258 #endif
10259 case TARGET_NR_flock:
10260 /* NOTE: the flock constant seems to be the same for every
10261 Linux platform */
10262 return get_errno(safe_flock(arg1, arg2));
10263 case TARGET_NR_readv:
10265 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10266 if (vec != NULL) {
10267 ret = get_errno(safe_readv(arg1, vec, arg3));
10268 unlock_iovec(vec, arg2, arg3, 1);
10269 } else {
10270 ret = -host_to_target_errno(errno);
10273 return ret;
10274 case TARGET_NR_writev:
10276 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10277 if (vec != NULL) {
10278 ret = get_errno(safe_writev(arg1, vec, arg3));
10279 unlock_iovec(vec, arg2, arg3, 0);
10280 } else {
10281 ret = -host_to_target_errno(errno);
10284 return ret;
10285 #if defined(TARGET_NR_preadv)
10286 case TARGET_NR_preadv:
10288 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10289 if (vec != NULL) {
10290 unsigned long low, high;
10292 target_to_host_low_high(arg4, arg5, &low, &high);
10293 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10294 unlock_iovec(vec, arg2, arg3, 1);
10295 } else {
10296 ret = -host_to_target_errno(errno);
10299 return ret;
10300 #endif
10301 #if defined(TARGET_NR_pwritev)
10302 case TARGET_NR_pwritev:
10304 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10305 if (vec != NULL) {
10306 unsigned long low, high;
10308 target_to_host_low_high(arg4, arg5, &low, &high);
10309 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10310 unlock_iovec(vec, arg2, arg3, 0);
10311 } else {
10312 ret = -host_to_target_errno(errno);
10315 return ret;
10316 #endif
10317 case TARGET_NR_getsid:
10318 return get_errno(getsid(arg1));
10319 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10320 case TARGET_NR_fdatasync:
10321 return get_errno(fdatasync(arg1));
10322 #endif
10323 #ifdef TARGET_NR__sysctl
10324 case TARGET_NR__sysctl:
10325 /* We don't implement this, but ENOTDIR is always a safe
10326 return value. */
10327 return -TARGET_ENOTDIR;
10328 #endif
10329 case TARGET_NR_sched_getaffinity:
10331 unsigned int mask_size;
10332 unsigned long *mask;
10335 * sched_getaffinity needs multiples of ulong, so need to take
10336 * care of mismatches between target ulong and host ulong sizes.
10338 if (arg2 & (sizeof(abi_ulong) - 1)) {
10339 return -TARGET_EINVAL;
10341 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10343 mask = alloca(mask_size);
10344 memset(mask, 0, mask_size);
10345 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10347 if (!is_error(ret)) {
10348 if (ret > arg2) {
10349 /* More data returned than the caller's buffer will fit.
10350 * This only happens if sizeof(abi_long) < sizeof(long)
10351 * and the caller passed us a buffer holding an odd number
10352 * of abi_longs. If the host kernel is actually using the
10353 * extra 4 bytes then fail EINVAL; otherwise we can just
10354 * ignore them and only copy the interesting part.
10356 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10357 if (numcpus > arg2 * 8) {
10358 return -TARGET_EINVAL;
10360 ret = arg2;
10363 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10364 return -TARGET_EFAULT;
10368 return ret;
10369 case TARGET_NR_sched_setaffinity:
10371 unsigned int mask_size;
10372 unsigned long *mask;
10375 * sched_setaffinity needs multiples of ulong, so need to take
10376 * care of mismatches between target ulong and host ulong sizes.
10378 if (arg2 & (sizeof(abi_ulong) - 1)) {
10379 return -TARGET_EINVAL;
10381 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10382 mask = alloca(mask_size);
10384 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10385 if (ret) {
10386 return ret;
10389 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10391 case TARGET_NR_getcpu:
10393 unsigned cpu, node;
10394 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10395 arg2 ? &node : NULL,
10396 NULL));
10397 if (is_error(ret)) {
10398 return ret;
10400 if (arg1 && put_user_u32(cpu, arg1)) {
10401 return -TARGET_EFAULT;
10403 if (arg2 && put_user_u32(node, arg2)) {
10404 return -TARGET_EFAULT;
10407 return ret;
10408 case TARGET_NR_sched_setparam:
10410 struct sched_param *target_schp;
10411 struct sched_param schp;
10413 if (arg2 == 0) {
10414 return -TARGET_EINVAL;
10416 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10417 return -TARGET_EFAULT;
10418 schp.sched_priority = tswap32(target_schp->sched_priority);
10419 unlock_user_struct(target_schp, arg2, 0);
10420 return get_errno(sched_setparam(arg1, &schp));
10422 case TARGET_NR_sched_getparam:
10424 struct sched_param *target_schp;
10425 struct sched_param schp;
10427 if (arg2 == 0) {
10428 return -TARGET_EINVAL;
10430 ret = get_errno(sched_getparam(arg1, &schp));
10431 if (!is_error(ret)) {
10432 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10433 return -TARGET_EFAULT;
10434 target_schp->sched_priority = tswap32(schp.sched_priority);
10435 unlock_user_struct(target_schp, arg2, 1);
10438 return ret;
10439 case TARGET_NR_sched_setscheduler:
10441 struct sched_param *target_schp;
10442 struct sched_param schp;
10443 if (arg3 == 0) {
10444 return -TARGET_EINVAL;
10446 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10447 return -TARGET_EFAULT;
10448 schp.sched_priority = tswap32(target_schp->sched_priority);
10449 unlock_user_struct(target_schp, arg3, 0);
10450 return get_errno(sched_setscheduler(arg1, arg2, &schp));
10452 case TARGET_NR_sched_getscheduler:
10453 return get_errno(sched_getscheduler(arg1));
10454 case TARGET_NR_sched_yield:
10455 return get_errno(sched_yield());
10456 case TARGET_NR_sched_get_priority_max:
10457 return get_errno(sched_get_priority_max(arg1));
10458 case TARGET_NR_sched_get_priority_min:
10459 return get_errno(sched_get_priority_min(arg1));
10460 #ifdef TARGET_NR_sched_rr_get_interval
10461 case TARGET_NR_sched_rr_get_interval:
10463 struct timespec ts;
10464 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10465 if (!is_error(ret)) {
10466 ret = host_to_target_timespec(arg2, &ts);
10469 return ret;
10470 #endif
10471 #if defined(TARGET_NR_nanosleep)
10472 case TARGET_NR_nanosleep:
10474 struct timespec req, rem;
10475 target_to_host_timespec(&req, arg1);
10476 ret = get_errno(safe_nanosleep(&req, &rem));
10477 if (is_error(ret) && arg2) {
10478 host_to_target_timespec(arg2, &rem);
10481 return ret;
10482 #endif
10483 case TARGET_NR_prctl:
10484 switch (arg1) {
10485 case PR_GET_PDEATHSIG:
10487 int deathsig;
10488 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10489 if (!is_error(ret) && arg2
10490 && put_user_ual(deathsig, arg2)) {
10491 return -TARGET_EFAULT;
10493 return ret;
10495 #ifdef PR_GET_NAME
10496 case PR_GET_NAME:
10498 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10499 if (!name) {
10500 return -TARGET_EFAULT;
10502 ret = get_errno(prctl(arg1, (unsigned long)name,
10503 arg3, arg4, arg5));
10504 unlock_user(name, arg2, 16);
10505 return ret;
10507 case PR_SET_NAME:
10509 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10510 if (!name) {
10511 return -TARGET_EFAULT;
10513 ret = get_errno(prctl(arg1, (unsigned long)name,
10514 arg3, arg4, arg5));
10515 unlock_user(name, arg2, 0);
10516 return ret;
10518 #endif
10519 #ifdef TARGET_MIPS
10520 case TARGET_PR_GET_FP_MODE:
10522 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10523 ret = 0;
10524 if (env->CP0_Status & (1 << CP0St_FR)) {
10525 ret |= TARGET_PR_FP_MODE_FR;
10527 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10528 ret |= TARGET_PR_FP_MODE_FRE;
10530 return ret;
10532 case TARGET_PR_SET_FP_MODE:
10534 CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10535 bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10536 bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10537 bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10538 bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10540 const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10541 TARGET_PR_FP_MODE_FRE;
10543 /* If nothing to change, return right away, successfully. */
10544 if (old_fr == new_fr && old_fre == new_fre) {
10545 return 0;
10547 /* Check the value is valid */
10548 if (arg2 & ~known_bits) {
10549 return -TARGET_EOPNOTSUPP;
10551 /* Setting FRE without FR is not supported. */
10552 if (new_fre && !new_fr) {
10553 return -TARGET_EOPNOTSUPP;
10555 if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10556 /* FR1 is not supported */
10557 return -TARGET_EOPNOTSUPP;
10559 if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10560 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10561 /* cannot set FR=0 */
10562 return -TARGET_EOPNOTSUPP;
10564 if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10565 /* Cannot set FRE=1 */
10566 return -TARGET_EOPNOTSUPP;
10569 int i;
10570 fpr_t *fpr = env->active_fpu.fpr;
10571 for (i = 0; i < 32 ; i += 2) {
10572 if (!old_fr && new_fr) {
10573 fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10574 } else if (old_fr && !new_fr) {
10575 fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10579 if (new_fr) {
10580 env->CP0_Status |= (1 << CP0St_FR);
10581 env->hflags |= MIPS_HFLAG_F64;
10582 } else {
10583 env->CP0_Status &= ~(1 << CP0St_FR);
10584 env->hflags &= ~MIPS_HFLAG_F64;
10586 if (new_fre) {
10587 env->CP0_Config5 |= (1 << CP0C5_FRE);
10588 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10589 env->hflags |= MIPS_HFLAG_FRE;
10591 } else {
10592 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10593 env->hflags &= ~MIPS_HFLAG_FRE;
10596 return 0;
10598 #endif /* MIPS */
10599 #ifdef TARGET_AARCH64
10600 case TARGET_PR_SVE_SET_VL:
10602 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10603 * PR_SVE_VL_INHERIT. Note the kernel definition
10604 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10605 * even though the current architectural maximum is VQ=16.
10607 ret = -TARGET_EINVAL;
10608 if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10609 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10610 CPUARMState *env = cpu_env;
10611 ARMCPU *cpu = env_archcpu(env);
10612 uint32_t vq, old_vq;
10614 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10615 vq = MAX(arg2 / 16, 1);
10616 vq = MIN(vq, cpu->sve_max_vq);
10618 if (vq < old_vq) {
10619 aarch64_sve_narrow_vq(env, vq);
10621 env->vfp.zcr_el[1] = vq - 1;
10622 arm_rebuild_hflags(env);
10623 ret = vq * 16;
10625 return ret;
10626 case TARGET_PR_SVE_GET_VL:
10627 ret = -TARGET_EINVAL;
10629 ARMCPU *cpu = env_archcpu(cpu_env);
10630 if (cpu_isar_feature(aa64_sve, cpu)) {
10631 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10634 return ret;
10635 case TARGET_PR_PAC_RESET_KEYS:
10637 CPUARMState *env = cpu_env;
10638 ARMCPU *cpu = env_archcpu(env);
10640 if (arg3 || arg4 || arg5) {
10641 return -TARGET_EINVAL;
10643 if (cpu_isar_feature(aa64_pauth, cpu)) {
10644 int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10645 TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10646 TARGET_PR_PAC_APGAKEY);
10647 int ret = 0;
10648 Error *err = NULL;
10650 if (arg2 == 0) {
10651 arg2 = all;
10652 } else if (arg2 & ~all) {
10653 return -TARGET_EINVAL;
10655 if (arg2 & TARGET_PR_PAC_APIAKEY) {
10656 ret |= qemu_guest_getrandom(&env->keys.apia,
10657 sizeof(ARMPACKey), &err);
10659 if (arg2 & TARGET_PR_PAC_APIBKEY) {
10660 ret |= qemu_guest_getrandom(&env->keys.apib,
10661 sizeof(ARMPACKey), &err);
10663 if (arg2 & TARGET_PR_PAC_APDAKEY) {
10664 ret |= qemu_guest_getrandom(&env->keys.apda,
10665 sizeof(ARMPACKey), &err);
10667 if (arg2 & TARGET_PR_PAC_APDBKEY) {
10668 ret |= qemu_guest_getrandom(&env->keys.apdb,
10669 sizeof(ARMPACKey), &err);
10671 if (arg2 & TARGET_PR_PAC_APGAKEY) {
10672 ret |= qemu_guest_getrandom(&env->keys.apga,
10673 sizeof(ARMPACKey), &err);
10675 if (ret != 0) {
10677 * Some unknown failure in the crypto. The best
10678 * we can do is log it and fail the syscall.
10679 * The real syscall cannot fail this way.
10681 qemu_log_mask(LOG_UNIMP,
10682 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10683 error_get_pretty(err));
10684 error_free(err);
10685 return -TARGET_EIO;
10687 return 0;
10690 return -TARGET_EINVAL;
10691 #endif /* AARCH64 */
10692 case PR_GET_SECCOMP:
10693 case PR_SET_SECCOMP:
10694 /* Disable seccomp to prevent the target disabling syscalls we
10695 * need. */
10696 return -TARGET_EINVAL;
10697 default:
10698 /* Most prctl options have no pointer arguments */
10699 return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10701 break;
10702 #ifdef TARGET_NR_arch_prctl
10703 case TARGET_NR_arch_prctl:
10704 return do_arch_prctl(cpu_env, arg1, arg2);
10705 #endif
10706 #ifdef TARGET_NR_pread64
10707 case TARGET_NR_pread64:
10708 if (regpairs_aligned(cpu_env, num)) {
10709 arg4 = arg5;
10710 arg5 = arg6;
10712 if (arg2 == 0 && arg3 == 0) {
10713 /* Special-case NULL buffer and zero length, which should succeed */
10714 p = 0;
10715 } else {
10716 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10717 if (!p) {
10718 return -TARGET_EFAULT;
10721 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10722 unlock_user(p, arg2, ret);
10723 return ret;
10724 case TARGET_NR_pwrite64:
10725 if (regpairs_aligned(cpu_env, num)) {
10726 arg4 = arg5;
10727 arg5 = arg6;
10729 if (arg2 == 0 && arg3 == 0) {
10730 /* Special-case NULL buffer and zero length, which should succeed */
10731 p = 0;
10732 } else {
10733 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10734 if (!p) {
10735 return -TARGET_EFAULT;
10738 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10739 unlock_user(p, arg2, 0);
10740 return ret;
10741 #endif
10742 case TARGET_NR_getcwd:
10743 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10744 return -TARGET_EFAULT;
10745 ret = get_errno(sys_getcwd1(p, arg2));
10746 unlock_user(p, arg1, ret);
10747 return ret;
10748 case TARGET_NR_capget:
10749 case TARGET_NR_capset:
10751 struct target_user_cap_header *target_header;
10752 struct target_user_cap_data *target_data = NULL;
10753 struct __user_cap_header_struct header;
10754 struct __user_cap_data_struct data[2];
10755 struct __user_cap_data_struct *dataptr = NULL;
10756 int i, target_datalen;
10757 int data_items = 1;
10759 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10760 return -TARGET_EFAULT;
10762 header.version = tswap32(target_header->version);
10763 header.pid = tswap32(target_header->pid);
10765 if (header.version != _LINUX_CAPABILITY_VERSION) {
10766 /* Version 2 and up takes pointer to two user_data structs */
10767 data_items = 2;
10770 target_datalen = sizeof(*target_data) * data_items;
10772 if (arg2) {
10773 if (num == TARGET_NR_capget) {
10774 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10775 } else {
10776 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10778 if (!target_data) {
10779 unlock_user_struct(target_header, arg1, 0);
10780 return -TARGET_EFAULT;
10783 if (num == TARGET_NR_capset) {
10784 for (i = 0; i < data_items; i++) {
10785 data[i].effective = tswap32(target_data[i].effective);
10786 data[i].permitted = tswap32(target_data[i].permitted);
10787 data[i].inheritable = tswap32(target_data[i].inheritable);
10791 dataptr = data;
10794 if (num == TARGET_NR_capget) {
10795 ret = get_errno(capget(&header, dataptr));
10796 } else {
10797 ret = get_errno(capset(&header, dataptr));
10800 /* The kernel always updates version for both capget and capset */
10801 target_header->version = tswap32(header.version);
10802 unlock_user_struct(target_header, arg1, 1);
10804 if (arg2) {
10805 if (num == TARGET_NR_capget) {
10806 for (i = 0; i < data_items; i++) {
10807 target_data[i].effective = tswap32(data[i].effective);
10808 target_data[i].permitted = tswap32(data[i].permitted);
10809 target_data[i].inheritable = tswap32(data[i].inheritable);
10811 unlock_user(target_data, arg2, target_datalen);
10812 } else {
10813 unlock_user(target_data, arg2, 0);
10816 return ret;
10818 case TARGET_NR_sigaltstack:
10819 return do_sigaltstack(arg1, arg2,
10820 get_sp_from_cpustate((CPUArchState *)cpu_env));
10822 #ifdef CONFIG_SENDFILE
10823 #ifdef TARGET_NR_sendfile
10824 case TARGET_NR_sendfile:
10826 off_t *offp = NULL;
10827 off_t off;
10828 if (arg3) {
10829 ret = get_user_sal(off, arg3);
10830 if (is_error(ret)) {
10831 return ret;
10833 offp = &off;
10835 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10836 if (!is_error(ret) && arg3) {
10837 abi_long ret2 = put_user_sal(off, arg3);
10838 if (is_error(ret2)) {
10839 ret = ret2;
10842 return ret;
10844 #endif
10845 #ifdef TARGET_NR_sendfile64
10846 case TARGET_NR_sendfile64:
10848 off_t *offp = NULL;
10849 off_t off;
10850 if (arg3) {
10851 ret = get_user_s64(off, arg3);
10852 if (is_error(ret)) {
10853 return ret;
10855 offp = &off;
10857 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10858 if (!is_error(ret) && arg3) {
10859 abi_long ret2 = put_user_s64(off, arg3);
10860 if (is_error(ret2)) {
10861 ret = ret2;
10864 return ret;
10866 #endif
10867 #endif
10868 #ifdef TARGET_NR_vfork
10869 case TARGET_NR_vfork:
10870 return get_errno(do_fork(cpu_env,
10871 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10872 0, 0, 0, 0));
10873 #endif
10874 #ifdef TARGET_NR_ugetrlimit
10875 case TARGET_NR_ugetrlimit:
10877 struct rlimit rlim;
10878 int resource = target_to_host_resource(arg1);
10879 ret = get_errno(getrlimit(resource, &rlim));
10880 if (!is_error(ret)) {
10881 struct target_rlimit *target_rlim;
10882 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10883 return -TARGET_EFAULT;
10884 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10885 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10886 unlock_user_struct(target_rlim, arg2, 1);
10888 return ret;
10890 #endif
10891 #ifdef TARGET_NR_truncate64
10892 case TARGET_NR_truncate64:
10893 if (!(p = lock_user_string(arg1)))
10894 return -TARGET_EFAULT;
10895 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10896 unlock_user(p, arg1, 0);
10897 return ret;
10898 #endif
10899 #ifdef TARGET_NR_ftruncate64
10900 case TARGET_NR_ftruncate64:
10901 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10902 #endif
10903 #ifdef TARGET_NR_stat64
10904 case TARGET_NR_stat64:
10905 if (!(p = lock_user_string(arg1))) {
10906 return -TARGET_EFAULT;
10908 ret = get_errno(stat(path(p), &st));
10909 unlock_user(p, arg1, 0);
10910 if (!is_error(ret))
10911 ret = host_to_target_stat64(cpu_env, arg2, &st);
10912 return ret;
10913 #endif
10914 #ifdef TARGET_NR_lstat64
10915 case TARGET_NR_lstat64:
10916 if (!(p = lock_user_string(arg1))) {
10917 return -TARGET_EFAULT;
10919 ret = get_errno(lstat(path(p), &st));
10920 unlock_user(p, arg1, 0);
10921 if (!is_error(ret))
10922 ret = host_to_target_stat64(cpu_env, arg2, &st);
10923 return ret;
10924 #endif
10925 #ifdef TARGET_NR_fstat64
10926 case TARGET_NR_fstat64:
10927 ret = get_errno(fstat(arg1, &st));
10928 if (!is_error(ret))
10929 ret = host_to_target_stat64(cpu_env, arg2, &st);
10930 return ret;
10931 #endif
10932 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10933 #ifdef TARGET_NR_fstatat64
10934 case TARGET_NR_fstatat64:
10935 #endif
10936 #ifdef TARGET_NR_newfstatat
10937 case TARGET_NR_newfstatat:
10938 #endif
10939 if (!(p = lock_user_string(arg2))) {
10940 return -TARGET_EFAULT;
10942 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10943 unlock_user(p, arg2, 0);
10944 if (!is_error(ret))
10945 ret = host_to_target_stat64(cpu_env, arg3, &st);
10946 return ret;
10947 #endif
10948 #if defined(TARGET_NR_statx)
10949 case TARGET_NR_statx:
10951 struct target_statx *target_stx;
10952 int dirfd = arg1;
10953 int flags = arg3;
10955 p = lock_user_string(arg2);
10956 if (p == NULL) {
10957 return -TARGET_EFAULT;
10959 #if defined(__NR_statx)
10962 * It is assumed that struct statx is architecture independent.
10964 struct target_statx host_stx;
10965 int mask = arg4;
10967 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
10968 if (!is_error(ret)) {
10969 if (host_to_target_statx(&host_stx, arg5) != 0) {
10970 unlock_user(p, arg2, 0);
10971 return -TARGET_EFAULT;
10975 if (ret != -TARGET_ENOSYS) {
10976 unlock_user(p, arg2, 0);
10977 return ret;
10980 #endif
10981 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
10982 unlock_user(p, arg2, 0);
10984 if (!is_error(ret)) {
10985 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
10986 return -TARGET_EFAULT;
10988 memset(target_stx, 0, sizeof(*target_stx));
10989 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
10990 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
10991 __put_user(st.st_ino, &target_stx->stx_ino);
10992 __put_user(st.st_mode, &target_stx->stx_mode);
10993 __put_user(st.st_uid, &target_stx->stx_uid);
10994 __put_user(st.st_gid, &target_stx->stx_gid);
10995 __put_user(st.st_nlink, &target_stx->stx_nlink);
10996 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
10997 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
10998 __put_user(st.st_size, &target_stx->stx_size);
10999 __put_user(st.st_blksize, &target_stx->stx_blksize);
11000 __put_user(st.st_blocks, &target_stx->stx_blocks);
11001 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11002 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11003 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11004 unlock_user_struct(target_stx, arg5, 1);
11007 return ret;
11008 #endif
11009 #ifdef TARGET_NR_lchown
11010 case TARGET_NR_lchown:
11011 if (!(p = lock_user_string(arg1)))
11012 return -TARGET_EFAULT;
11013 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11014 unlock_user(p, arg1, 0);
11015 return ret;
11016 #endif
11017 #ifdef TARGET_NR_getuid
11018 case TARGET_NR_getuid:
11019 return get_errno(high2lowuid(getuid()));
11020 #endif
11021 #ifdef TARGET_NR_getgid
11022 case TARGET_NR_getgid:
11023 return get_errno(high2lowgid(getgid()));
11024 #endif
11025 #ifdef TARGET_NR_geteuid
11026 case TARGET_NR_geteuid:
11027 return get_errno(high2lowuid(geteuid()));
11028 #endif
11029 #ifdef TARGET_NR_getegid
11030 case TARGET_NR_getegid:
11031 return get_errno(high2lowgid(getegid()));
11032 #endif
11033 case TARGET_NR_setreuid:
11034 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11035 case TARGET_NR_setregid:
11036 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11037 case TARGET_NR_getgroups:
11039 int gidsetsize = arg1;
11040 target_id *target_grouplist;
11041 gid_t *grouplist;
11042 int i;
11044 grouplist = alloca(gidsetsize * sizeof(gid_t));
11045 ret = get_errno(getgroups(gidsetsize, grouplist));
11046 if (gidsetsize == 0)
11047 return ret;
11048 if (!is_error(ret)) {
11049 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11050 if (!target_grouplist)
11051 return -TARGET_EFAULT;
11052 for(i = 0;i < ret; i++)
11053 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11054 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11057 return ret;
11058 case TARGET_NR_setgroups:
11060 int gidsetsize = arg1;
11061 target_id *target_grouplist;
11062 gid_t *grouplist = NULL;
11063 int i;
11064 if (gidsetsize) {
11065 grouplist = alloca(gidsetsize * sizeof(gid_t));
11066 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11067 if (!target_grouplist) {
11068 return -TARGET_EFAULT;
11070 for (i = 0; i < gidsetsize; i++) {
11071 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11073 unlock_user(target_grouplist, arg2, 0);
11075 return get_errno(setgroups(gidsetsize, grouplist));
11077 case TARGET_NR_fchown:
11078 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11079 #if defined(TARGET_NR_fchownat)
11080 case TARGET_NR_fchownat:
11081 if (!(p = lock_user_string(arg2)))
11082 return -TARGET_EFAULT;
11083 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11084 low2highgid(arg4), arg5));
11085 unlock_user(p, arg2, 0);
11086 return ret;
11087 #endif
11088 #ifdef TARGET_NR_setresuid
11089 case TARGET_NR_setresuid:
11090 return get_errno(sys_setresuid(low2highuid(arg1),
11091 low2highuid(arg2),
11092 low2highuid(arg3)));
11093 #endif
11094 #ifdef TARGET_NR_getresuid
11095 case TARGET_NR_getresuid:
11097 uid_t ruid, euid, suid;
11098 ret = get_errno(getresuid(&ruid, &euid, &suid));
11099 if (!is_error(ret)) {
11100 if (put_user_id(high2lowuid(ruid), arg1)
11101 || put_user_id(high2lowuid(euid), arg2)
11102 || put_user_id(high2lowuid(suid), arg3))
11103 return -TARGET_EFAULT;
11106 return ret;
11107 #endif
11108 #ifdef TARGET_NR_getresgid
11109 case TARGET_NR_setresgid:
11110 return get_errno(sys_setresgid(low2highgid(arg1),
11111 low2highgid(arg2),
11112 low2highgid(arg3)));
11113 #endif
11114 #ifdef TARGET_NR_getresgid
11115 case TARGET_NR_getresgid:
11117 gid_t rgid, egid, sgid;
11118 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11119 if (!is_error(ret)) {
11120 if (put_user_id(high2lowgid(rgid), arg1)
11121 || put_user_id(high2lowgid(egid), arg2)
11122 || put_user_id(high2lowgid(sgid), arg3))
11123 return -TARGET_EFAULT;
11126 return ret;
11127 #endif
11128 #ifdef TARGET_NR_chown
11129 case TARGET_NR_chown:
11130 if (!(p = lock_user_string(arg1)))
11131 return -TARGET_EFAULT;
11132 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11133 unlock_user(p, arg1, 0);
11134 return ret;
11135 #endif
11136 case TARGET_NR_setuid:
11137 return get_errno(sys_setuid(low2highuid(arg1)));
11138 case TARGET_NR_setgid:
11139 return get_errno(sys_setgid(low2highgid(arg1)));
11140 case TARGET_NR_setfsuid:
11141 return get_errno(setfsuid(arg1));
11142 case TARGET_NR_setfsgid:
11143 return get_errno(setfsgid(arg1));
11145 #ifdef TARGET_NR_lchown32
11146 case TARGET_NR_lchown32:
11147 if (!(p = lock_user_string(arg1)))
11148 return -TARGET_EFAULT;
11149 ret = get_errno(lchown(p, arg2, arg3));
11150 unlock_user(p, arg1, 0);
11151 return ret;
11152 #endif
11153 #ifdef TARGET_NR_getuid32
11154 case TARGET_NR_getuid32:
11155 return get_errno(getuid());
11156 #endif
11158 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11159 /* Alpha specific */
11160 case TARGET_NR_getxuid:
11162 uid_t euid;
11163 euid=geteuid();
11164 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11166 return get_errno(getuid());
11167 #endif
11168 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11169 /* Alpha specific */
11170 case TARGET_NR_getxgid:
11172 uid_t egid;
11173 egid=getegid();
11174 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11176 return get_errno(getgid());
11177 #endif
11178 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11179 /* Alpha specific */
11180 case TARGET_NR_osf_getsysinfo:
11181 ret = -TARGET_EOPNOTSUPP;
11182 switch (arg1) {
11183 case TARGET_GSI_IEEE_FP_CONTROL:
11185 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11186 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11188 swcr &= ~SWCR_STATUS_MASK;
11189 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11191 if (put_user_u64 (swcr, arg2))
11192 return -TARGET_EFAULT;
11193 ret = 0;
11195 break;
11197 /* case GSI_IEEE_STATE_AT_SIGNAL:
11198 -- Not implemented in linux kernel.
11199 case GSI_UACPROC:
11200 -- Retrieves current unaligned access state; not much used.
11201 case GSI_PROC_TYPE:
11202 -- Retrieves implver information; surely not used.
11203 case GSI_GET_HWRPB:
11204 -- Grabs a copy of the HWRPB; surely not used.
11207 return ret;
11208 #endif
11209 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11210 /* Alpha specific */
11211 case TARGET_NR_osf_setsysinfo:
11212 ret = -TARGET_EOPNOTSUPP;
11213 switch (arg1) {
11214 case TARGET_SSI_IEEE_FP_CONTROL:
11216 uint64_t swcr, fpcr;
11218 if (get_user_u64 (swcr, arg2)) {
11219 return -TARGET_EFAULT;
11223 * The kernel calls swcr_update_status to update the
11224 * status bits from the fpcr at every point that it
11225 * could be queried. Therefore, we store the status
11226 * bits only in FPCR.
11228 ((CPUAlphaState *)cpu_env)->swcr
11229 = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11231 fpcr = cpu_alpha_load_fpcr(cpu_env);
11232 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11233 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11234 cpu_alpha_store_fpcr(cpu_env, fpcr);
11235 ret = 0;
11237 break;
11239 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11241 uint64_t exc, fpcr, fex;
11243 if (get_user_u64(exc, arg2)) {
11244 return -TARGET_EFAULT;
11246 exc &= SWCR_STATUS_MASK;
11247 fpcr = cpu_alpha_load_fpcr(cpu_env);
11249 /* Old exceptions are not signaled. */
11250 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11251 fex = exc & ~fex;
11252 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11253 fex &= ((CPUArchState *)cpu_env)->swcr;
11255 /* Update the hardware fpcr. */
11256 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11257 cpu_alpha_store_fpcr(cpu_env, fpcr);
11259 if (fex) {
11260 int si_code = TARGET_FPE_FLTUNK;
11261 target_siginfo_t info;
11263 if (fex & SWCR_TRAP_ENABLE_DNO) {
11264 si_code = TARGET_FPE_FLTUND;
11266 if (fex & SWCR_TRAP_ENABLE_INE) {
11267 si_code = TARGET_FPE_FLTRES;
11269 if (fex & SWCR_TRAP_ENABLE_UNF) {
11270 si_code = TARGET_FPE_FLTUND;
11272 if (fex & SWCR_TRAP_ENABLE_OVF) {
11273 si_code = TARGET_FPE_FLTOVF;
11275 if (fex & SWCR_TRAP_ENABLE_DZE) {
11276 si_code = TARGET_FPE_FLTDIV;
11278 if (fex & SWCR_TRAP_ENABLE_INV) {
11279 si_code = TARGET_FPE_FLTINV;
11282 info.si_signo = SIGFPE;
11283 info.si_errno = 0;
11284 info.si_code = si_code;
11285 info._sifields._sigfault._addr
11286 = ((CPUArchState *)cpu_env)->pc;
11287 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11288 QEMU_SI_FAULT, &info);
11290 ret = 0;
11292 break;
11294 /* case SSI_NVPAIRS:
11295 -- Used with SSIN_UACPROC to enable unaligned accesses.
11296 case SSI_IEEE_STATE_AT_SIGNAL:
11297 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11298 -- Not implemented in linux kernel
11301 return ret;
11302 #endif
11303 #ifdef TARGET_NR_osf_sigprocmask
11304 /* Alpha specific. */
11305 case TARGET_NR_osf_sigprocmask:
11307 abi_ulong mask;
11308 int how;
11309 sigset_t set, oldset;
11311 switch(arg1) {
11312 case TARGET_SIG_BLOCK:
11313 how = SIG_BLOCK;
11314 break;
11315 case TARGET_SIG_UNBLOCK:
11316 how = SIG_UNBLOCK;
11317 break;
11318 case TARGET_SIG_SETMASK:
11319 how = SIG_SETMASK;
11320 break;
11321 default:
11322 return -TARGET_EINVAL;
11324 mask = arg2;
11325 target_to_host_old_sigset(&set, &mask);
11326 ret = do_sigprocmask(how, &set, &oldset);
11327 if (!ret) {
11328 host_to_target_old_sigset(&mask, &oldset);
11329 ret = mask;
11332 return ret;
11333 #endif
11335 #ifdef TARGET_NR_getgid32
11336 case TARGET_NR_getgid32:
11337 return get_errno(getgid());
11338 #endif
11339 #ifdef TARGET_NR_geteuid32
11340 case TARGET_NR_geteuid32:
11341 return get_errno(geteuid());
11342 #endif
11343 #ifdef TARGET_NR_getegid32
11344 case TARGET_NR_getegid32:
11345 return get_errno(getegid());
11346 #endif
11347 #ifdef TARGET_NR_setreuid32
11348 case TARGET_NR_setreuid32:
11349 return get_errno(setreuid(arg1, arg2));
11350 #endif
11351 #ifdef TARGET_NR_setregid32
11352 case TARGET_NR_setregid32:
11353 return get_errno(setregid(arg1, arg2));
11354 #endif
11355 #ifdef TARGET_NR_getgroups32
11356 case TARGET_NR_getgroups32:
11358 int gidsetsize = arg1;
11359 uint32_t *target_grouplist;
11360 gid_t *grouplist;
11361 int i;
11363 grouplist = alloca(gidsetsize * sizeof(gid_t));
11364 ret = get_errno(getgroups(gidsetsize, grouplist));
11365 if (gidsetsize == 0)
11366 return ret;
11367 if (!is_error(ret)) {
11368 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11369 if (!target_grouplist) {
11370 return -TARGET_EFAULT;
11372 for(i = 0;i < ret; i++)
11373 target_grouplist[i] = tswap32(grouplist[i]);
11374 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11377 return ret;
11378 #endif
11379 #ifdef TARGET_NR_setgroups32
11380 case TARGET_NR_setgroups32:
11382 int gidsetsize = arg1;
11383 uint32_t *target_grouplist;
11384 gid_t *grouplist;
11385 int i;
11387 grouplist = alloca(gidsetsize * sizeof(gid_t));
11388 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11389 if (!target_grouplist) {
11390 return -TARGET_EFAULT;
11392 for(i = 0;i < gidsetsize; i++)
11393 grouplist[i] = tswap32(target_grouplist[i]);
11394 unlock_user(target_grouplist, arg2, 0);
11395 return get_errno(setgroups(gidsetsize, grouplist));
11397 #endif
11398 #ifdef TARGET_NR_fchown32
11399 case TARGET_NR_fchown32:
11400 return get_errno(fchown(arg1, arg2, arg3));
11401 #endif
11402 #ifdef TARGET_NR_setresuid32
11403 case TARGET_NR_setresuid32:
11404 return get_errno(sys_setresuid(arg1, arg2, arg3));
11405 #endif
11406 #ifdef TARGET_NR_getresuid32
11407 case TARGET_NR_getresuid32:
11409 uid_t ruid, euid, suid;
11410 ret = get_errno(getresuid(&ruid, &euid, &suid));
11411 if (!is_error(ret)) {
11412 if (put_user_u32(ruid, arg1)
11413 || put_user_u32(euid, arg2)
11414 || put_user_u32(suid, arg3))
11415 return -TARGET_EFAULT;
11418 return ret;
11419 #endif
11420 #ifdef TARGET_NR_setresgid32
11421 case TARGET_NR_setresgid32:
11422 return get_errno(sys_setresgid(arg1, arg2, arg3));
11423 #endif
11424 #ifdef TARGET_NR_getresgid32
11425 case TARGET_NR_getresgid32:
11427 gid_t rgid, egid, sgid;
11428 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11429 if (!is_error(ret)) {
11430 if (put_user_u32(rgid, arg1)
11431 || put_user_u32(egid, arg2)
11432 || put_user_u32(sgid, arg3))
11433 return -TARGET_EFAULT;
11436 return ret;
11437 #endif
11438 #ifdef TARGET_NR_chown32
11439 case TARGET_NR_chown32:
11440 if (!(p = lock_user_string(arg1)))
11441 return -TARGET_EFAULT;
11442 ret = get_errno(chown(p, arg2, arg3));
11443 unlock_user(p, arg1, 0);
11444 return ret;
11445 #endif
11446 #ifdef TARGET_NR_setuid32
11447 case TARGET_NR_setuid32:
11448 return get_errno(sys_setuid(arg1));
11449 #endif
11450 #ifdef TARGET_NR_setgid32
11451 case TARGET_NR_setgid32:
11452 return get_errno(sys_setgid(arg1));
11453 #endif
11454 #ifdef TARGET_NR_setfsuid32
11455 case TARGET_NR_setfsuid32:
11456 return get_errno(setfsuid(arg1));
11457 #endif
11458 #ifdef TARGET_NR_setfsgid32
11459 case TARGET_NR_setfsgid32:
11460 return get_errno(setfsgid(arg1));
11461 #endif
11462 #ifdef TARGET_NR_mincore
11463 case TARGET_NR_mincore:
11465 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11466 if (!a) {
11467 return -TARGET_ENOMEM;
11469 p = lock_user_string(arg3);
11470 if (!p) {
11471 ret = -TARGET_EFAULT;
11472 } else {
11473 ret = get_errno(mincore(a, arg2, p));
11474 unlock_user(p, arg3, ret);
11476 unlock_user(a, arg1, 0);
11478 return ret;
11479 #endif
11480 #ifdef TARGET_NR_arm_fadvise64_64
11481 case TARGET_NR_arm_fadvise64_64:
11482 /* arm_fadvise64_64 looks like fadvise64_64 but
11483 * with different argument order: fd, advice, offset, len
11484 * rather than the usual fd, offset, len, advice.
11485 * Note that offset and len are both 64-bit so appear as
11486 * pairs of 32-bit registers.
11488 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11489 target_offset64(arg5, arg6), arg2);
11490 return -host_to_target_errno(ret);
11491 #endif
11493 #if TARGET_ABI_BITS == 32
11495 #ifdef TARGET_NR_fadvise64_64
11496 case TARGET_NR_fadvise64_64:
11497 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11498 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11499 ret = arg2;
11500 arg2 = arg3;
11501 arg3 = arg4;
11502 arg4 = arg5;
11503 arg5 = arg6;
11504 arg6 = ret;
11505 #else
11506 /* 6 args: fd, offset (high, low), len (high, low), advice */
11507 if (regpairs_aligned(cpu_env, num)) {
11508 /* offset is in (3,4), len in (5,6) and advice in 7 */
11509 arg2 = arg3;
11510 arg3 = arg4;
11511 arg4 = arg5;
11512 arg5 = arg6;
11513 arg6 = arg7;
11515 #endif
11516 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11517 target_offset64(arg4, arg5), arg6);
11518 return -host_to_target_errno(ret);
11519 #endif
11521 #ifdef TARGET_NR_fadvise64
11522 case TARGET_NR_fadvise64:
11523 /* 5 args: fd, offset (high, low), len, advice */
11524 if (regpairs_aligned(cpu_env, num)) {
11525 /* offset is in (3,4), len in 5 and advice in 6 */
11526 arg2 = arg3;
11527 arg3 = arg4;
11528 arg4 = arg5;
11529 arg5 = arg6;
11531 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11532 return -host_to_target_errno(ret);
11533 #endif
11535 #else /* not a 32-bit ABI */
11536 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11537 #ifdef TARGET_NR_fadvise64_64
11538 case TARGET_NR_fadvise64_64:
11539 #endif
11540 #ifdef TARGET_NR_fadvise64
11541 case TARGET_NR_fadvise64:
11542 #endif
11543 #ifdef TARGET_S390X
11544 switch (arg4) {
11545 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11546 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11547 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11548 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11549 default: break;
11551 #endif
11552 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11553 #endif
11554 #endif /* end of 64-bit ABI fadvise handling */
11556 #ifdef TARGET_NR_madvise
11557 case TARGET_NR_madvise:
11558 /* A straight passthrough may not be safe because qemu sometimes
11559 turns private file-backed mappings into anonymous mappings.
11560 This will break MADV_DONTNEED.
11561 This is a hint, so ignoring and returning success is ok. */
11562 return 0;
11563 #endif
11564 #ifdef TARGET_NR_fcntl64
11565 case TARGET_NR_fcntl64:
11567 int cmd;
11568 struct flock64 fl;
11569 from_flock64_fn *copyfrom = copy_from_user_flock64;
11570 to_flock64_fn *copyto = copy_to_user_flock64;
11572 #ifdef TARGET_ARM
11573 if (!((CPUARMState *)cpu_env)->eabi) {
11574 copyfrom = copy_from_user_oabi_flock64;
11575 copyto = copy_to_user_oabi_flock64;
11577 #endif
11579 cmd = target_to_host_fcntl_cmd(arg2);
11580 if (cmd == -TARGET_EINVAL) {
11581 return cmd;
11584 switch(arg2) {
11585 case TARGET_F_GETLK64:
11586 ret = copyfrom(&fl, arg3);
11587 if (ret) {
11588 break;
11590 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11591 if (ret == 0) {
11592 ret = copyto(arg3, &fl);
11594 break;
11596 case TARGET_F_SETLK64:
11597 case TARGET_F_SETLKW64:
11598 ret = copyfrom(&fl, arg3);
11599 if (ret) {
11600 break;
11602 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11603 break;
11604 default:
11605 ret = do_fcntl(arg1, arg2, arg3);
11606 break;
11608 return ret;
11610 #endif
11611 #ifdef TARGET_NR_cacheflush
11612 case TARGET_NR_cacheflush:
11613 /* self-modifying code is handled automatically, so nothing needed */
11614 return 0;
11615 #endif
11616 #ifdef TARGET_NR_getpagesize
11617 case TARGET_NR_getpagesize:
11618 return TARGET_PAGE_SIZE;
11619 #endif
11620 case TARGET_NR_gettid:
11621 return get_errno(sys_gettid());
11622 #ifdef TARGET_NR_readahead
11623 case TARGET_NR_readahead:
11624 #if TARGET_ABI_BITS == 32
11625 if (regpairs_aligned(cpu_env, num)) {
11626 arg2 = arg3;
11627 arg3 = arg4;
11628 arg4 = arg5;
11630 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11631 #else
11632 ret = get_errno(readahead(arg1, arg2, arg3));
11633 #endif
11634 return ret;
11635 #endif
11636 #ifdef CONFIG_ATTR
11637 #ifdef TARGET_NR_setxattr
11638 case TARGET_NR_listxattr:
11639 case TARGET_NR_llistxattr:
11641 void *p, *b = 0;
11642 if (arg2) {
11643 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11644 if (!b) {
11645 return -TARGET_EFAULT;
11648 p = lock_user_string(arg1);
11649 if (p) {
11650 if (num == TARGET_NR_listxattr) {
11651 ret = get_errno(listxattr(p, b, arg3));
11652 } else {
11653 ret = get_errno(llistxattr(p, b, arg3));
11655 } else {
11656 ret = -TARGET_EFAULT;
11658 unlock_user(p, arg1, 0);
11659 unlock_user(b, arg2, arg3);
11660 return ret;
11662 case TARGET_NR_flistxattr:
11664 void *b = 0;
11665 if (arg2) {
11666 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11667 if (!b) {
11668 return -TARGET_EFAULT;
11671 ret = get_errno(flistxattr(arg1, b, arg3));
11672 unlock_user(b, arg2, arg3);
11673 return ret;
11675 case TARGET_NR_setxattr:
11676 case TARGET_NR_lsetxattr:
11678 void *p, *n, *v = 0;
11679 if (arg3) {
11680 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11681 if (!v) {
11682 return -TARGET_EFAULT;
11685 p = lock_user_string(arg1);
11686 n = lock_user_string(arg2);
11687 if (p && n) {
11688 if (num == TARGET_NR_setxattr) {
11689 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11690 } else {
11691 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11693 } else {
11694 ret = -TARGET_EFAULT;
11696 unlock_user(p, arg1, 0);
11697 unlock_user(n, arg2, 0);
11698 unlock_user(v, arg3, 0);
11700 return ret;
11701 case TARGET_NR_fsetxattr:
11703 void *n, *v = 0;
11704 if (arg3) {
11705 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11706 if (!v) {
11707 return -TARGET_EFAULT;
11710 n = lock_user_string(arg2);
11711 if (n) {
11712 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11713 } else {
11714 ret = -TARGET_EFAULT;
11716 unlock_user(n, arg2, 0);
11717 unlock_user(v, arg3, 0);
11719 return ret;
11720 case TARGET_NR_getxattr:
11721 case TARGET_NR_lgetxattr:
11723 void *p, *n, *v = 0;
11724 if (arg3) {
11725 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11726 if (!v) {
11727 return -TARGET_EFAULT;
11730 p = lock_user_string(arg1);
11731 n = lock_user_string(arg2);
11732 if (p && n) {
11733 if (num == TARGET_NR_getxattr) {
11734 ret = get_errno(getxattr(p, n, v, arg4));
11735 } else {
11736 ret = get_errno(lgetxattr(p, n, v, arg4));
11738 } else {
11739 ret = -TARGET_EFAULT;
11741 unlock_user(p, arg1, 0);
11742 unlock_user(n, arg2, 0);
11743 unlock_user(v, arg3, arg4);
11745 return ret;
11746 case TARGET_NR_fgetxattr:
11748 void *n, *v = 0;
11749 if (arg3) {
11750 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11751 if (!v) {
11752 return -TARGET_EFAULT;
11755 n = lock_user_string(arg2);
11756 if (n) {
11757 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11758 } else {
11759 ret = -TARGET_EFAULT;
11761 unlock_user(n, arg2, 0);
11762 unlock_user(v, arg3, arg4);
11764 return ret;
11765 case TARGET_NR_removexattr:
11766 case TARGET_NR_lremovexattr:
11768 void *p, *n;
11769 p = lock_user_string(arg1);
11770 n = lock_user_string(arg2);
11771 if (p && n) {
11772 if (num == TARGET_NR_removexattr) {
11773 ret = get_errno(removexattr(p, n));
11774 } else {
11775 ret = get_errno(lremovexattr(p, n));
11777 } else {
11778 ret = -TARGET_EFAULT;
11780 unlock_user(p, arg1, 0);
11781 unlock_user(n, arg2, 0);
11783 return ret;
11784 case TARGET_NR_fremovexattr:
11786 void *n;
11787 n = lock_user_string(arg2);
11788 if (n) {
11789 ret = get_errno(fremovexattr(arg1, n));
11790 } else {
11791 ret = -TARGET_EFAULT;
11793 unlock_user(n, arg2, 0);
11795 return ret;
11796 #endif
11797 #endif /* CONFIG_ATTR */
11798 #ifdef TARGET_NR_set_thread_area
11799 case TARGET_NR_set_thread_area:
11800 #if defined(TARGET_MIPS)
11801 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11802 return 0;
11803 #elif defined(TARGET_CRIS)
11804 if (arg1 & 0xff)
11805 ret = -TARGET_EINVAL;
11806 else {
11807 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11808 ret = 0;
11810 return ret;
11811 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11812 return do_set_thread_area(cpu_env, arg1);
11813 #elif defined(TARGET_M68K)
11815 TaskState *ts = cpu->opaque;
11816 ts->tp_value = arg1;
11817 return 0;
11819 #else
11820 return -TARGET_ENOSYS;
11821 #endif
11822 #endif
11823 #ifdef TARGET_NR_get_thread_area
11824 case TARGET_NR_get_thread_area:
11825 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11826 return do_get_thread_area(cpu_env, arg1);
11827 #elif defined(TARGET_M68K)
11829 TaskState *ts = cpu->opaque;
11830 return ts->tp_value;
11832 #else
11833 return -TARGET_ENOSYS;
11834 #endif
11835 #endif
11836 #ifdef TARGET_NR_getdomainname
11837 case TARGET_NR_getdomainname:
11838 return -TARGET_ENOSYS;
11839 #endif
11841 #ifdef TARGET_NR_clock_settime
11842 case TARGET_NR_clock_settime:
11844 struct timespec ts;
11846 ret = target_to_host_timespec(&ts, arg2);
11847 if (!is_error(ret)) {
11848 ret = get_errno(clock_settime(arg1, &ts));
11850 return ret;
11852 #endif
11853 #ifdef TARGET_NR_clock_settime64
11854 case TARGET_NR_clock_settime64:
11856 struct timespec ts;
11858 ret = target_to_host_timespec64(&ts, arg2);
11859 if (!is_error(ret)) {
11860 ret = get_errno(clock_settime(arg1, &ts));
11862 return ret;
11864 #endif
11865 #ifdef TARGET_NR_clock_gettime
11866 case TARGET_NR_clock_gettime:
11868 struct timespec ts;
11869 ret = get_errno(clock_gettime(arg1, &ts));
11870 if (!is_error(ret)) {
11871 ret = host_to_target_timespec(arg2, &ts);
11873 return ret;
11875 #endif
11876 #ifdef TARGET_NR_clock_gettime64
11877 case TARGET_NR_clock_gettime64:
11879 struct timespec ts;
11880 ret = get_errno(clock_gettime(arg1, &ts));
11881 if (!is_error(ret)) {
11882 ret = host_to_target_timespec64(arg2, &ts);
11884 return ret;
11886 #endif
11887 #ifdef TARGET_NR_clock_getres
11888 case TARGET_NR_clock_getres:
11890 struct timespec ts;
11891 ret = get_errno(clock_getres(arg1, &ts));
11892 if (!is_error(ret)) {
11893 host_to_target_timespec(arg2, &ts);
11895 return ret;
11897 #endif
11898 #ifdef TARGET_NR_clock_getres_time64
11899 case TARGET_NR_clock_getres_time64:
11901 struct timespec ts;
11902 ret = get_errno(clock_getres(arg1, &ts));
11903 if (!is_error(ret)) {
11904 host_to_target_timespec64(arg2, &ts);
11906 return ret;
11908 #endif
11909 #ifdef TARGET_NR_clock_nanosleep
11910 case TARGET_NR_clock_nanosleep:
11912 struct timespec ts;
11913 if (target_to_host_timespec(&ts, arg3)) {
11914 return -TARGET_EFAULT;
11916 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11917 &ts, arg4 ? &ts : NULL));
11919 * if the call is interrupted by a signal handler, it fails
11920 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
11921 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
11923 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
11924 host_to_target_timespec(arg4, &ts)) {
11925 return -TARGET_EFAULT;
11928 return ret;
11930 #endif
11932 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11933 case TARGET_NR_set_tid_address:
11934 return get_errno(set_tid_address((int *)g2h(arg1)));
11935 #endif
11937 case TARGET_NR_tkill:
11938 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11940 case TARGET_NR_tgkill:
11941 return get_errno(safe_tgkill((int)arg1, (int)arg2,
11942 target_to_host_signal(arg3)));
11944 #ifdef TARGET_NR_set_robust_list
11945 case TARGET_NR_set_robust_list:
11946 case TARGET_NR_get_robust_list:
11947 /* The ABI for supporting robust futexes has userspace pass
11948 * the kernel a pointer to a linked list which is updated by
11949 * userspace after the syscall; the list is walked by the kernel
11950 * when the thread exits. Since the linked list in QEMU guest
11951 * memory isn't a valid linked list for the host and we have
11952 * no way to reliably intercept the thread-death event, we can't
11953 * support these. Silently return ENOSYS so that guest userspace
11954 * falls back to a non-robust futex implementation (which should
11955 * be OK except in the corner case of the guest crashing while
11956 * holding a mutex that is shared with another process via
11957 * shared memory).
11959 return -TARGET_ENOSYS;
11960 #endif
11962 #if defined(TARGET_NR_utimensat)
11963 case TARGET_NR_utimensat:
11965 struct timespec *tsp, ts[2];
11966 if (!arg3) {
11967 tsp = NULL;
11968 } else {
11969 if (target_to_host_timespec(ts, arg3)) {
11970 return -TARGET_EFAULT;
11972 if (target_to_host_timespec(ts + 1, arg3 +
11973 sizeof(struct target_timespec))) {
11974 return -TARGET_EFAULT;
11976 tsp = ts;
11978 if (!arg2)
11979 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11980 else {
11981 if (!(p = lock_user_string(arg2))) {
11982 return -TARGET_EFAULT;
11984 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11985 unlock_user(p, arg2, 0);
11988 return ret;
11989 #endif
11990 #ifdef TARGET_NR_futex
11991 case TARGET_NR_futex:
11992 return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11993 #endif
11994 #ifdef TARGET_NR_futex_time64
11995 case TARGET_NR_futex_time64:
11996 return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
11997 #endif
11998 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11999 case TARGET_NR_inotify_init:
12000 ret = get_errno(sys_inotify_init());
12001 if (ret >= 0) {
12002 fd_trans_register(ret, &target_inotify_trans);
12004 return ret;
12005 #endif
12006 #ifdef CONFIG_INOTIFY1
12007 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12008 case TARGET_NR_inotify_init1:
12009 ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12010 fcntl_flags_tbl)));
12011 if (ret >= 0) {
12012 fd_trans_register(ret, &target_inotify_trans);
12014 return ret;
12015 #endif
12016 #endif
12017 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12018 case TARGET_NR_inotify_add_watch:
12019 p = lock_user_string(arg2);
12020 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12021 unlock_user(p, arg2, 0);
12022 return ret;
12023 #endif
12024 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12025 case TARGET_NR_inotify_rm_watch:
12026 return get_errno(sys_inotify_rm_watch(arg1, arg2));
12027 #endif
12029 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12030 case TARGET_NR_mq_open:
12032 struct mq_attr posix_mq_attr;
12033 struct mq_attr *pposix_mq_attr;
12034 int host_flags;
12036 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12037 pposix_mq_attr = NULL;
12038 if (arg4) {
12039 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12040 return -TARGET_EFAULT;
12042 pposix_mq_attr = &posix_mq_attr;
12044 p = lock_user_string(arg1 - 1);
12045 if (!p) {
12046 return -TARGET_EFAULT;
12048 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12049 unlock_user (p, arg1, 0);
12051 return ret;
12053 case TARGET_NR_mq_unlink:
12054 p = lock_user_string(arg1 - 1);
12055 if (!p) {
12056 return -TARGET_EFAULT;
12058 ret = get_errno(mq_unlink(p));
12059 unlock_user (p, arg1, 0);
12060 return ret;
12062 #ifdef TARGET_NR_mq_timedsend
12063 case TARGET_NR_mq_timedsend:
12065 struct timespec ts;
12067 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12068 if (arg5 != 0) {
12069 target_to_host_timespec(&ts, arg5);
12070 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12071 host_to_target_timespec(arg5, &ts);
12072 } else {
12073 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12075 unlock_user (p, arg2, arg3);
12077 return ret;
12078 #endif
12080 #ifdef TARGET_NR_mq_timedreceive
12081 case TARGET_NR_mq_timedreceive:
12083 struct timespec ts;
12084 unsigned int prio;
12086 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12087 if (arg5 != 0) {
12088 target_to_host_timespec(&ts, arg5);
12089 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12090 &prio, &ts));
12091 host_to_target_timespec(arg5, &ts);
12092 } else {
12093 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12094 &prio, NULL));
12096 unlock_user (p, arg2, arg3);
12097 if (arg4 != 0)
12098 put_user_u32(prio, arg4);
12100 return ret;
12101 #endif
12103 /* Not implemented for now... */
12104 /* case TARGET_NR_mq_notify: */
12105 /* break; */
12107 case TARGET_NR_mq_getsetattr:
12109 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12110 ret = 0;
12111 if (arg2 != 0) {
12112 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12113 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12114 &posix_mq_attr_out));
12115 } else if (arg3 != 0) {
12116 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12118 if (ret == 0 && arg3 != 0) {
12119 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12122 return ret;
12123 #endif
12125 #ifdef CONFIG_SPLICE
12126 #ifdef TARGET_NR_tee
12127 case TARGET_NR_tee:
12129 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12131 return ret;
12132 #endif
12133 #ifdef TARGET_NR_splice
12134 case TARGET_NR_splice:
12136 loff_t loff_in, loff_out;
12137 loff_t *ploff_in = NULL, *ploff_out = NULL;
12138 if (arg2) {
12139 if (get_user_u64(loff_in, arg2)) {
12140 return -TARGET_EFAULT;
12142 ploff_in = &loff_in;
12144 if (arg4) {
12145 if (get_user_u64(loff_out, arg4)) {
12146 return -TARGET_EFAULT;
12148 ploff_out = &loff_out;
12150 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12151 if (arg2) {
12152 if (put_user_u64(loff_in, arg2)) {
12153 return -TARGET_EFAULT;
12156 if (arg4) {
12157 if (put_user_u64(loff_out, arg4)) {
12158 return -TARGET_EFAULT;
12162 return ret;
12163 #endif
12164 #ifdef TARGET_NR_vmsplice
12165 case TARGET_NR_vmsplice:
12167 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12168 if (vec != NULL) {
12169 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12170 unlock_iovec(vec, arg2, arg3, 0);
12171 } else {
12172 ret = -host_to_target_errno(errno);
12175 return ret;
12176 #endif
12177 #endif /* CONFIG_SPLICE */
12178 #ifdef CONFIG_EVENTFD
12179 #if defined(TARGET_NR_eventfd)
12180 case TARGET_NR_eventfd:
12181 ret = get_errno(eventfd(arg1, 0));
12182 if (ret >= 0) {
12183 fd_trans_register(ret, &target_eventfd_trans);
12185 return ret;
12186 #endif
12187 #if defined(TARGET_NR_eventfd2)
12188 case TARGET_NR_eventfd2:
12190 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
12191 if (arg2 & TARGET_O_NONBLOCK) {
12192 host_flags |= O_NONBLOCK;
12194 if (arg2 & TARGET_O_CLOEXEC) {
12195 host_flags |= O_CLOEXEC;
12197 ret = get_errno(eventfd(arg1, host_flags));
12198 if (ret >= 0) {
12199 fd_trans_register(ret, &target_eventfd_trans);
12201 return ret;
12203 #endif
12204 #endif /* CONFIG_EVENTFD */
12205 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12206 case TARGET_NR_fallocate:
12207 #if TARGET_ABI_BITS == 32
12208 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12209 target_offset64(arg5, arg6)));
12210 #else
12211 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12212 #endif
12213 return ret;
12214 #endif
12215 #if defined(CONFIG_SYNC_FILE_RANGE)
12216 #if defined(TARGET_NR_sync_file_range)
12217 case TARGET_NR_sync_file_range:
12218 #if TARGET_ABI_BITS == 32
12219 #if defined(TARGET_MIPS)
12220 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12221 target_offset64(arg5, arg6), arg7));
12222 #else
12223 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12224 target_offset64(arg4, arg5), arg6));
12225 #endif /* !TARGET_MIPS */
12226 #else
12227 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12228 #endif
12229 return ret;
12230 #endif
12231 #if defined(TARGET_NR_sync_file_range2) || \
12232 defined(TARGET_NR_arm_sync_file_range)
12233 #if defined(TARGET_NR_sync_file_range2)
12234 case TARGET_NR_sync_file_range2:
12235 #endif
12236 #if defined(TARGET_NR_arm_sync_file_range)
12237 case TARGET_NR_arm_sync_file_range:
12238 #endif
12239 /* This is like sync_file_range but the arguments are reordered */
12240 #if TARGET_ABI_BITS == 32
12241 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12242 target_offset64(arg5, arg6), arg2));
12243 #else
12244 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12245 #endif
12246 return ret;
12247 #endif
12248 #endif
12249 #if defined(TARGET_NR_signalfd4)
12250 case TARGET_NR_signalfd4:
12251 return do_signalfd4(arg1, arg2, arg4);
12252 #endif
12253 #if defined(TARGET_NR_signalfd)
12254 case TARGET_NR_signalfd:
12255 return do_signalfd4(arg1, arg2, 0);
12256 #endif
12257 #if defined(CONFIG_EPOLL)
12258 #if defined(TARGET_NR_epoll_create)
12259 case TARGET_NR_epoll_create:
12260 return get_errno(epoll_create(arg1));
12261 #endif
12262 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12263 case TARGET_NR_epoll_create1:
12264 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12265 #endif
12266 #if defined(TARGET_NR_epoll_ctl)
12267 case TARGET_NR_epoll_ctl:
12269 struct epoll_event ep;
12270 struct epoll_event *epp = 0;
12271 if (arg4) {
12272 struct target_epoll_event *target_ep;
12273 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12274 return -TARGET_EFAULT;
12276 ep.events = tswap32(target_ep->events);
12277 /* The epoll_data_t union is just opaque data to the kernel,
12278 * so we transfer all 64 bits across and need not worry what
12279 * actual data type it is.
12281 ep.data.u64 = tswap64(target_ep->data.u64);
12282 unlock_user_struct(target_ep, arg4, 0);
12283 epp = &ep;
12285 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12287 #endif
12289 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12290 #if defined(TARGET_NR_epoll_wait)
12291 case TARGET_NR_epoll_wait:
12292 #endif
12293 #if defined(TARGET_NR_epoll_pwait)
12294 case TARGET_NR_epoll_pwait:
12295 #endif
12297 struct target_epoll_event *target_ep;
12298 struct epoll_event *ep;
12299 int epfd = arg1;
12300 int maxevents = arg3;
12301 int timeout = arg4;
12303 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12304 return -TARGET_EINVAL;
12307 target_ep = lock_user(VERIFY_WRITE, arg2,
12308 maxevents * sizeof(struct target_epoll_event), 1);
12309 if (!target_ep) {
12310 return -TARGET_EFAULT;
12313 ep = g_try_new(struct epoll_event, maxevents);
12314 if (!ep) {
12315 unlock_user(target_ep, arg2, 0);
12316 return -TARGET_ENOMEM;
12319 switch (num) {
12320 #if defined(TARGET_NR_epoll_pwait)
12321 case TARGET_NR_epoll_pwait:
12323 target_sigset_t *target_set;
12324 sigset_t _set, *set = &_set;
12326 if (arg5) {
12327 if (arg6 != sizeof(target_sigset_t)) {
12328 ret = -TARGET_EINVAL;
12329 break;
12332 target_set = lock_user(VERIFY_READ, arg5,
12333 sizeof(target_sigset_t), 1);
12334 if (!target_set) {
12335 ret = -TARGET_EFAULT;
12336 break;
12338 target_to_host_sigset(set, target_set);
12339 unlock_user(target_set, arg5, 0);
12340 } else {
12341 set = NULL;
12344 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12345 set, SIGSET_T_SIZE));
12346 break;
12348 #endif
12349 #if defined(TARGET_NR_epoll_wait)
12350 case TARGET_NR_epoll_wait:
12351 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12352 NULL, 0));
12353 break;
12354 #endif
12355 default:
12356 ret = -TARGET_ENOSYS;
12358 if (!is_error(ret)) {
12359 int i;
12360 for (i = 0; i < ret; i++) {
12361 target_ep[i].events = tswap32(ep[i].events);
12362 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12364 unlock_user(target_ep, arg2,
12365 ret * sizeof(struct target_epoll_event));
12366 } else {
12367 unlock_user(target_ep, arg2, 0);
12369 g_free(ep);
12370 return ret;
12372 #endif
12373 #endif
12374 #ifdef TARGET_NR_prlimit64
12375 case TARGET_NR_prlimit64:
12377 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12378 struct target_rlimit64 *target_rnew, *target_rold;
12379 struct host_rlimit64 rnew, rold, *rnewp = 0;
12380 int resource = target_to_host_resource(arg2);
12382 if (arg3 && (resource != RLIMIT_AS &&
12383 resource != RLIMIT_DATA &&
12384 resource != RLIMIT_STACK)) {
12385 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12386 return -TARGET_EFAULT;
12388 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12389 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12390 unlock_user_struct(target_rnew, arg3, 0);
12391 rnewp = &rnew;
12394 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12395 if (!is_error(ret) && arg4) {
12396 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12397 return -TARGET_EFAULT;
12399 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12400 target_rold->rlim_max = tswap64(rold.rlim_max);
12401 unlock_user_struct(target_rold, arg4, 1);
12403 return ret;
12405 #endif
12406 #ifdef TARGET_NR_gethostname
12407 case TARGET_NR_gethostname:
12409 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12410 if (name) {
12411 ret = get_errno(gethostname(name, arg2));
12412 unlock_user(name, arg1, arg2);
12413 } else {
12414 ret = -TARGET_EFAULT;
12416 return ret;
12418 #endif
12419 #ifdef TARGET_NR_atomic_cmpxchg_32
12420 case TARGET_NR_atomic_cmpxchg_32:
12422 /* should use start_exclusive from main.c */
12423 abi_ulong mem_value;
12424 if (get_user_u32(mem_value, arg6)) {
12425 target_siginfo_t info;
12426 info.si_signo = SIGSEGV;
12427 info.si_errno = 0;
12428 info.si_code = TARGET_SEGV_MAPERR;
12429 info._sifields._sigfault._addr = arg6;
12430 queue_signal((CPUArchState *)cpu_env, info.si_signo,
12431 QEMU_SI_FAULT, &info);
12432 ret = 0xdeadbeef;
12435 if (mem_value == arg2)
12436 put_user_u32(arg1, arg6);
12437 return mem_value;
12439 #endif
12440 #ifdef TARGET_NR_atomic_barrier
12441 case TARGET_NR_atomic_barrier:
12442 /* Like the kernel implementation and the
12443 qemu arm barrier, no-op this? */
12444 return 0;
12445 #endif
12447 #ifdef TARGET_NR_timer_create
12448 case TARGET_NR_timer_create:
12450 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12452 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12454 int clkid = arg1;
12455 int timer_index = next_free_host_timer();
12457 if (timer_index < 0) {
12458 ret = -TARGET_EAGAIN;
12459 } else {
12460 timer_t *phtimer = g_posix_timers + timer_index;
12462 if (arg2) {
12463 phost_sevp = &host_sevp;
12464 ret = target_to_host_sigevent(phost_sevp, arg2);
12465 if (ret != 0) {
12466 return ret;
12470 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12471 if (ret) {
12472 phtimer = NULL;
12473 } else {
12474 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12475 return -TARGET_EFAULT;
12479 return ret;
12481 #endif
12483 #ifdef TARGET_NR_timer_settime
12484 case TARGET_NR_timer_settime:
12486 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12487 * struct itimerspec * old_value */
12488 target_timer_t timerid = get_timer_id(arg1);
12490 if (timerid < 0) {
12491 ret = timerid;
12492 } else if (arg3 == 0) {
12493 ret = -TARGET_EINVAL;
12494 } else {
12495 timer_t htimer = g_posix_timers[timerid];
12496 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12498 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12499 return -TARGET_EFAULT;
12501 ret = get_errno(
12502 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12503 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12504 return -TARGET_EFAULT;
12507 return ret;
12509 #endif
12511 #ifdef TARGET_NR_timer_settime64
12512 case TARGET_NR_timer_settime64:
12514 target_timer_t timerid = get_timer_id(arg1);
12516 if (timerid < 0) {
12517 ret = timerid;
12518 } else if (arg3 == 0) {
12519 ret = -TARGET_EINVAL;
12520 } else {
12521 timer_t htimer = g_posix_timers[timerid];
12522 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12524 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12525 return -TARGET_EFAULT;
12527 ret = get_errno(
12528 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12529 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12530 return -TARGET_EFAULT;
12533 return ret;
12535 #endif
12537 #ifdef TARGET_NR_timer_gettime
12538 case TARGET_NR_timer_gettime:
12540 /* args: timer_t timerid, struct itimerspec *curr_value */
12541 target_timer_t timerid = get_timer_id(arg1);
12543 if (timerid < 0) {
12544 ret = timerid;
12545 } else if (!arg2) {
12546 ret = -TARGET_EFAULT;
12547 } else {
12548 timer_t htimer = g_posix_timers[timerid];
12549 struct itimerspec hspec;
12550 ret = get_errno(timer_gettime(htimer, &hspec));
12552 if (host_to_target_itimerspec(arg2, &hspec)) {
12553 ret = -TARGET_EFAULT;
12556 return ret;
12558 #endif
12560 #ifdef TARGET_NR_timer_gettime64
12561 case TARGET_NR_timer_gettime64:
12563 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12564 target_timer_t timerid = get_timer_id(arg1);
12566 if (timerid < 0) {
12567 ret = timerid;
12568 } else if (!arg2) {
12569 ret = -TARGET_EFAULT;
12570 } else {
12571 timer_t htimer = g_posix_timers[timerid];
12572 struct itimerspec hspec;
12573 ret = get_errno(timer_gettime(htimer, &hspec));
12575 if (host_to_target_itimerspec64(arg2, &hspec)) {
12576 ret = -TARGET_EFAULT;
12579 return ret;
12581 #endif
12583 #ifdef TARGET_NR_timer_getoverrun
12584 case TARGET_NR_timer_getoverrun:
12586 /* args: timer_t timerid */
12587 target_timer_t timerid = get_timer_id(arg1);
12589 if (timerid < 0) {
12590 ret = timerid;
12591 } else {
12592 timer_t htimer = g_posix_timers[timerid];
12593 ret = get_errno(timer_getoverrun(htimer));
12595 return ret;
12597 #endif
12599 #ifdef TARGET_NR_timer_delete
12600 case TARGET_NR_timer_delete:
12602 /* args: timer_t timerid */
12603 target_timer_t timerid = get_timer_id(arg1);
12605 if (timerid < 0) {
12606 ret = timerid;
12607 } else {
12608 timer_t htimer = g_posix_timers[timerid];
12609 ret = get_errno(timer_delete(htimer));
12610 g_posix_timers[timerid] = 0;
12612 return ret;
12614 #endif
12616 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12617 case TARGET_NR_timerfd_create:
12618 return get_errno(timerfd_create(arg1,
12619 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12620 #endif
12622 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12623 case TARGET_NR_timerfd_gettime:
12625 struct itimerspec its_curr;
12627 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12629 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12630 return -TARGET_EFAULT;
12633 return ret;
12634 #endif
12636 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12637 case TARGET_NR_timerfd_gettime64:
12639 struct itimerspec its_curr;
12641 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12643 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12644 return -TARGET_EFAULT;
12647 return ret;
12648 #endif
12650 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12651 case TARGET_NR_timerfd_settime:
12653 struct itimerspec its_new, its_old, *p_new;
12655 if (arg3) {
12656 if (target_to_host_itimerspec(&its_new, arg3)) {
12657 return -TARGET_EFAULT;
12659 p_new = &its_new;
12660 } else {
12661 p_new = NULL;
12664 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12666 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12667 return -TARGET_EFAULT;
12670 return ret;
12671 #endif
12673 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12674 case TARGET_NR_timerfd_settime64:
12676 struct itimerspec its_new, its_old, *p_new;
12678 if (arg3) {
12679 if (target_to_host_itimerspec64(&its_new, arg3)) {
12680 return -TARGET_EFAULT;
12682 p_new = &its_new;
12683 } else {
12684 p_new = NULL;
12687 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12689 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12690 return -TARGET_EFAULT;
12693 return ret;
12694 #endif
12696 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12697 case TARGET_NR_ioprio_get:
12698 return get_errno(ioprio_get(arg1, arg2));
12699 #endif
12701 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12702 case TARGET_NR_ioprio_set:
12703 return get_errno(ioprio_set(arg1, arg2, arg3));
12704 #endif
12706 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12707 case TARGET_NR_setns:
12708 return get_errno(setns(arg1, arg2));
12709 #endif
12710 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12711 case TARGET_NR_unshare:
12712 return get_errno(unshare(arg1));
12713 #endif
12714 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12715 case TARGET_NR_kcmp:
12716 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12717 #endif
12718 #ifdef TARGET_NR_swapcontext
12719 case TARGET_NR_swapcontext:
12720 /* PowerPC specific. */
12721 return do_swapcontext(cpu_env, arg1, arg2, arg3);
12722 #endif
12723 #ifdef TARGET_NR_memfd_create
12724 case TARGET_NR_memfd_create:
12725 p = lock_user_string(arg1);
12726 if (!p) {
12727 return -TARGET_EFAULT;
12729 ret = get_errno(memfd_create(p, arg2));
12730 fd_trans_unregister(ret);
12731 unlock_user(p, arg1, 0);
12732 return ret;
12733 #endif
12734 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12735 case TARGET_NR_membarrier:
12736 return get_errno(membarrier(arg1, arg2));
12737 #endif
12739 default:
12740 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12741 return -TARGET_ENOSYS;
12743 return ret;
12746 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12747 abi_long arg2, abi_long arg3, abi_long arg4,
12748 abi_long arg5, abi_long arg6, abi_long arg7,
12749 abi_long arg8)
12751 CPUState *cpu = env_cpu(cpu_env);
12752 abi_long ret;
12754 #ifdef DEBUG_ERESTARTSYS
12755 /* Debug-only code for exercising the syscall-restart code paths
12756 * in the per-architecture cpu main loops: restart every syscall
12757 * the guest makes once before letting it through.
12760 static bool flag;
12761 flag = !flag;
12762 if (flag) {
12763 return -TARGET_ERESTARTSYS;
12766 #endif
12768 record_syscall_start(cpu, num, arg1,
12769 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
12771 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12772 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
12775 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
12776 arg5, arg6, arg7, arg8);
12778 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
12779 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
12780 arg3, arg4, arg5, arg6);
12783 record_syscall_return(cpu, num, ret);
12784 return ret;