tests: acpi: update expected blobs after HPET move
[qemu.git] / linux-user / syscall.c
blob2e954d8dbd9ed76c582831db061c72bd1417305e
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
101 * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102 * which in turn prevents use of linux/fs.h. So we have to
103 * define the constants ourselves for now.
105 #define FS_IOC_GETFLAGS _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION _IOW('v', 2, int)
114 #else
115 #include <linux/fs.h>
116 #endif
117 #include <linux/fd.h>
118 #if defined(CONFIG_FIEMAP)
119 #include <linux/fiemap.h>
120 #endif
121 #include <linux/fb.h>
122 #if defined(CONFIG_USBFS)
123 #include <linux/usbdevice_fs.h>
124 #include <linux/usb/ch9.h>
125 #endif
126 #include <linux/vt.h>
127 #include <linux/dm-ioctl.h>
128 #include <linux/reboot.h>
129 #include <linux/route.h>
130 #include <linux/filter.h>
131 #include <linux/blkpg.h>
132 #include <netpacket/packet.h>
133 #include <linux/netlink.h>
134 #include <linux/if_alg.h>
135 #include <linux/rtc.h>
136 #include <sound/asound.h>
137 #ifdef HAVE_BTRFS_H
138 #include <linux/btrfs.h>
139 #endif
140 #ifdef HAVE_DRM_H
141 #include <libdrm/drm.h>
142 #include <libdrm/i915_drm.h>
143 #endif
144 #include "linux_loop.h"
145 #include "uname.h"
147 #include "qemu.h"
148 #include "user-internals.h"
149 #include "strace.h"
150 #include "signal-common.h"
151 #include "loader.h"
152 #include "user-mmap.h"
153 #include "user/safe-syscall.h"
154 #include "qemu/guest-random.h"
155 #include "qemu/selfmap.h"
156 #include "user/syscall-trace.h"
157 #include "special-errno.h"
158 #include "qapi/error.h"
159 #include "fd-trans.h"
160 #include "tcg/tcg.h"
162 #ifndef CLONE_IO
163 #define CLONE_IO 0x80000000 /* Clone io context */
164 #endif
166 /* We can't directly call the host clone syscall, because this will
167 * badly confuse libc (breaking mutexes, for example). So we must
168 * divide clone flags into:
169 * * flag combinations that look like pthread_create()
170 * * flag combinations that look like fork()
171 * * flags we can implement within QEMU itself
172 * * flags we can't support and will return an error for
174 /* For thread creation, all these flags must be present; for
175 * fork, none must be present.
177 #define CLONE_THREAD_FLAGS \
178 (CLONE_VM | CLONE_FS | CLONE_FILES | \
179 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
181 /* These flags are ignored:
182 * CLONE_DETACHED is now ignored by the kernel;
183 * CLONE_IO is just an optimisation hint to the I/O scheduler
185 #define CLONE_IGNORED_FLAGS \
186 (CLONE_DETACHED | CLONE_IO)
188 /* Flags for fork which we can implement within QEMU itself */
189 #define CLONE_OPTIONAL_FORK_FLAGS \
190 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
191 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
193 /* Flags for thread creation which we can implement within QEMU itself */
194 #define CLONE_OPTIONAL_THREAD_FLAGS \
195 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
196 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
198 #define CLONE_INVALID_FORK_FLAGS \
199 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
201 #define CLONE_INVALID_THREAD_FLAGS \
202 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
203 CLONE_IGNORED_FLAGS))
205 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
206 * have almost all been allocated. We cannot support any of
207 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
208 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
209 * The checks against the invalid thread masks above will catch these.
210 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
213 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
214 * once. This exercises the codepaths for restart.
216 //#define DEBUG_ERESTARTSYS
218 //#include <linux/msdos_fs.h>
219 #define VFAT_IOCTL_READDIR_BOTH \
220 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
221 #define VFAT_IOCTL_READDIR_SHORT \
222 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
224 #undef _syscall0
225 #undef _syscall1
226 #undef _syscall2
227 #undef _syscall3
228 #undef _syscall4
229 #undef _syscall5
230 #undef _syscall6
232 #define _syscall0(type,name) \
233 static type name (void) \
235 return syscall(__NR_##name); \
238 #define _syscall1(type,name,type1,arg1) \
239 static type name (type1 arg1) \
241 return syscall(__NR_##name, arg1); \
244 #define _syscall2(type,name,type1,arg1,type2,arg2) \
245 static type name (type1 arg1,type2 arg2) \
247 return syscall(__NR_##name, arg1, arg2); \
250 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
251 static type name (type1 arg1,type2 arg2,type3 arg3) \
253 return syscall(__NR_##name, arg1, arg2, arg3); \
256 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
257 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
259 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
262 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
263 type5,arg5) \
264 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
266 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
270 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
271 type5,arg5,type6,arg6) \
272 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
273 type6 arg6) \
275 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
279 #define __NR_sys_uname __NR_uname
280 #define __NR_sys_getcwd1 __NR_getcwd
281 #define __NR_sys_getdents __NR_getdents
282 #define __NR_sys_getdents64 __NR_getdents64
283 #define __NR_sys_getpriority __NR_getpriority
284 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
285 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
286 #define __NR_sys_syslog __NR_syslog
287 #if defined(__NR_futex)
288 # define __NR_sys_futex __NR_futex
289 #endif
290 #if defined(__NR_futex_time64)
291 # define __NR_sys_futex_time64 __NR_futex_time64
292 #endif
293 #define __NR_sys_statx __NR_statx
295 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
296 #define __NR__llseek __NR_lseek
297 #endif
299 /* Newer kernel ports have llseek() instead of _llseek() */
300 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
301 #define TARGET_NR__llseek TARGET_NR_llseek
302 #endif
304 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
305 #ifndef TARGET_O_NONBLOCK_MASK
306 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
307 #endif
309 #define __NR_sys_gettid __NR_gettid
310 _syscall0(int, sys_gettid)
312 /* For the 64-bit guest on 32-bit host case we must emulate
313 * getdents using getdents64, because otherwise the host
314 * might hand us back more dirent records than we can fit
315 * into the guest buffer after structure format conversion.
316 * Otherwise we emulate getdents with getdents if the host has it.
318 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
319 #define EMULATE_GETDENTS_WITH_GETDENTS
320 #endif
322 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
323 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
324 #endif
325 #if (defined(TARGET_NR_getdents) && \
326 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
327 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
328 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
329 #endif
330 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
331 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
332 loff_t *, res, uint, wh);
333 #endif
334 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
335 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
336 siginfo_t *, uinfo)
337 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
338 #ifdef __NR_exit_group
339 _syscall1(int,exit_group,int,error_code)
340 #endif
341 #if defined(__NR_futex)
342 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
343 const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_futex_time64)
346 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
347 const struct timespec *,timeout,int *,uaddr2,int,val3)
348 #endif
349 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
350 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
353 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
354 unsigned int, flags);
355 #endif
356 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
357 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
358 #endif
359 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
360 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
361 unsigned long *, user_mask_ptr);
362 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
363 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
364 unsigned long *, user_mask_ptr);
365 /* sched_attr is not defined in glibc */
366 struct sched_attr {
367 uint32_t size;
368 uint32_t sched_policy;
369 uint64_t sched_flags;
370 int32_t sched_nice;
371 uint32_t sched_priority;
372 uint64_t sched_runtime;
373 uint64_t sched_deadline;
374 uint64_t sched_period;
375 uint32_t sched_util_min;
376 uint32_t sched_util_max;
378 #define __NR_sys_sched_getattr __NR_sched_getattr
379 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
380 unsigned int, size, unsigned int, flags);
381 #define __NR_sys_sched_setattr __NR_sched_setattr
382 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
383 unsigned int, flags);
384 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
385 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
386 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
387 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
388 const struct sched_param *, param);
389 #define __NR_sys_sched_getparam __NR_sched_getparam
390 _syscall2(int, sys_sched_getparam, pid_t, pid,
391 struct sched_param *, param);
392 #define __NR_sys_sched_setparam __NR_sched_setparam
393 _syscall2(int, sys_sched_setparam, pid_t, pid,
394 const struct sched_param *, param);
395 #define __NR_sys_getcpu __NR_getcpu
396 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
397 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
398 void *, arg);
399 _syscall2(int, capget, struct __user_cap_header_struct *, header,
400 struct __user_cap_data_struct *, data);
401 _syscall2(int, capset, struct __user_cap_header_struct *, header,
402 struct __user_cap_data_struct *, data);
403 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
404 _syscall2(int, ioprio_get, int, which, int, who)
405 #endif
406 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
407 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
408 #endif
409 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
410 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
411 #endif
413 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
414 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
415 unsigned long, idx1, unsigned long, idx2)
416 #endif
419 * It is assumed that struct statx is architecture independent.
421 #if defined(TARGET_NR_statx) && defined(__NR_statx)
422 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
423 unsigned int, mask, struct target_statx *, statxbuf)
424 #endif
425 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
426 _syscall2(int, membarrier, int, cmd, int, flags)
427 #endif
429 static const bitmask_transtbl fcntl_flags_tbl[] = {
430 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
431 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
432 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
433 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
434 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
435 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
436 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
437 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
438 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
439 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
440 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
441 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
442 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
443 #if defined(O_DIRECT)
444 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
445 #endif
446 #if defined(O_NOATIME)
447 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
448 #endif
449 #if defined(O_CLOEXEC)
450 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
451 #endif
452 #if defined(O_PATH)
453 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
454 #endif
455 #if defined(O_TMPFILE)
456 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
457 #endif
458 /* Don't terminate the list prematurely on 64-bit host+guest. */
459 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
460 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
461 #endif
462 { 0, 0, 0, 0 }
465 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
467 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
468 #if defined(__NR_utimensat)
469 #define __NR_sys_utimensat __NR_utimensat
470 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
471 const struct timespec *,tsp,int,flags)
472 #else
473 static int sys_utimensat(int dirfd, const char *pathname,
474 const struct timespec times[2], int flags)
476 errno = ENOSYS;
477 return -1;
479 #endif
480 #endif /* TARGET_NR_utimensat */
482 #ifdef TARGET_NR_renameat2
483 #if defined(__NR_renameat2)
484 #define __NR_sys_renameat2 __NR_renameat2
485 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
486 const char *, new, unsigned int, flags)
487 #else
488 static int sys_renameat2(int oldfd, const char *old,
489 int newfd, const char *new, int flags)
491 if (flags == 0) {
492 return renameat(oldfd, old, newfd, new);
494 errno = ENOSYS;
495 return -1;
497 #endif
498 #endif /* TARGET_NR_renameat2 */
500 #ifdef CONFIG_INOTIFY
501 #include <sys/inotify.h>
502 #else
503 /* Userspace can usually survive runtime without inotify */
504 #undef TARGET_NR_inotify_init
505 #undef TARGET_NR_inotify_init1
506 #undef TARGET_NR_inotify_add_watch
507 #undef TARGET_NR_inotify_rm_watch
508 #endif /* CONFIG_INOTIFY */
510 #if defined(TARGET_NR_prlimit64)
511 #ifndef __NR_prlimit64
512 # define __NR_prlimit64 -1
513 #endif
514 #define __NR_sys_prlimit64 __NR_prlimit64
515 /* The glibc rlimit structure may not be that used by the underlying syscall */
516 struct host_rlimit64 {
517 uint64_t rlim_cur;
518 uint64_t rlim_max;
520 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
521 const struct host_rlimit64 *, new_limit,
522 struct host_rlimit64 *, old_limit)
523 #endif
526 #if defined(TARGET_NR_timer_create)
527 /* Maximum of 32 active POSIX timers allowed at any one time. */
528 #define GUEST_TIMER_MAX 32
529 static timer_t g_posix_timers[GUEST_TIMER_MAX];
530 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
532 static inline int next_free_host_timer(void)
534 int k;
535 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
536 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
537 return k;
540 return -1;
543 static inline void free_host_timer_slot(int id)
545 qatomic_store_release(g_posix_timer_allocated + id, 0);
547 #endif
549 static inline int host_to_target_errno(int host_errno)
551 switch (host_errno) {
552 #define E(X) case X: return TARGET_##X;
553 #include "errnos.c.inc"
554 #undef E
555 default:
556 return host_errno;
560 static inline int target_to_host_errno(int target_errno)
562 switch (target_errno) {
563 #define E(X) case TARGET_##X: return X;
564 #include "errnos.c.inc"
565 #undef E
566 default:
567 return target_errno;
571 abi_long get_errno(abi_long ret)
573 if (ret == -1)
574 return -host_to_target_errno(errno);
575 else
576 return ret;
579 const char *target_strerror(int err)
581 if (err == QEMU_ERESTARTSYS) {
582 return "To be restarted";
584 if (err == QEMU_ESIGRETURN) {
585 return "Successful exit from sigreturn";
588 return strerror(target_to_host_errno(err));
591 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
593 int i;
594 uint8_t b;
595 if (usize <= ksize) {
596 return 1;
598 for (i = ksize; i < usize; i++) {
599 if (get_user_u8(b, addr + i)) {
600 return -TARGET_EFAULT;
602 if (b != 0) {
603 return 0;
606 return 1;
609 #define safe_syscall0(type, name) \
610 static type safe_##name(void) \
612 return safe_syscall(__NR_##name); \
615 #define safe_syscall1(type, name, type1, arg1) \
616 static type safe_##name(type1 arg1) \
618 return safe_syscall(__NR_##name, arg1); \
621 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
622 static type safe_##name(type1 arg1, type2 arg2) \
624 return safe_syscall(__NR_##name, arg1, arg2); \
627 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
628 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
630 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
633 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
634 type4, arg4) \
635 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
637 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
640 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
641 type4, arg4, type5, arg5) \
642 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
643 type5 arg5) \
645 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
648 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
649 type4, arg4, type5, arg5, type6, arg6) \
650 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
651 type5 arg5, type6 arg6) \
653 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
656 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
657 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
658 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
659 int, flags, mode_t, mode)
660 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
661 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
662 struct rusage *, rusage)
663 #endif
664 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
665 int, options, struct rusage *, rusage)
666 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
667 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
668 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
669 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
670 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
671 #endif
672 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
673 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
674 struct timespec *, tsp, const sigset_t *, sigmask,
675 size_t, sigsetsize)
676 #endif
677 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
678 int, maxevents, int, timeout, const sigset_t *, sigmask,
679 size_t, sigsetsize)
680 #if defined(__NR_futex)
681 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
682 const struct timespec *,timeout,int *,uaddr2,int,val3)
683 #endif
684 #if defined(__NR_futex_time64)
685 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
686 const struct timespec *,timeout,int *,uaddr2,int,val3)
687 #endif
688 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
689 safe_syscall2(int, kill, pid_t, pid, int, sig)
690 safe_syscall2(int, tkill, int, tid, int, sig)
691 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
692 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
693 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
694 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
695 unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
697 unsigned long, pos_l, unsigned long, pos_h)
698 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
699 socklen_t, addrlen)
700 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
701 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
702 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
703 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
704 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
705 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
706 safe_syscall2(int, flock, int, fd, int, operation)
707 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
708 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
709 const struct timespec *, uts, size_t, sigsetsize)
710 #endif
711 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
712 int, flags)
713 #if defined(TARGET_NR_nanosleep)
714 safe_syscall2(int, nanosleep, const struct timespec *, req,
715 struct timespec *, rem)
716 #endif
717 #if defined(TARGET_NR_clock_nanosleep) || \
718 defined(TARGET_NR_clock_nanosleep_time64)
719 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
720 const struct timespec *, req, struct timespec *, rem)
721 #endif
722 #ifdef __NR_ipc
723 #ifdef __s390x__
724 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
725 void *, ptr)
726 #else
727 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
728 void *, ptr, long, fifth)
729 #endif
730 #endif
731 #ifdef __NR_msgsnd
732 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
733 int, flags)
734 #endif
735 #ifdef __NR_msgrcv
736 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
737 long, msgtype, int, flags)
738 #endif
739 #ifdef __NR_semtimedop
740 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
741 unsigned, nsops, const struct timespec *, timeout)
742 #endif
743 #if defined(TARGET_NR_mq_timedsend) || \
744 defined(TARGET_NR_mq_timedsend_time64)
745 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
746 size_t, len, unsigned, prio, const struct timespec *, timeout)
747 #endif
748 #if defined(TARGET_NR_mq_timedreceive) || \
749 defined(TARGET_NR_mq_timedreceive_time64)
750 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
751 size_t, len, unsigned *, prio, const struct timespec *, timeout)
752 #endif
753 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
754 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
755 int, outfd, loff_t *, poutoff, size_t, length,
756 unsigned int, flags)
757 #endif
759 /* We do ioctl like this rather than via safe_syscall3 to preserve the
760 * "third argument might be integer or pointer or not present" behaviour of
761 * the libc function.
763 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
764 /* Similarly for fcntl. Note that callers must always:
765 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
766 * use the flock64 struct rather than unsuffixed flock
767 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
769 #ifdef __NR_fcntl64
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
771 #else
772 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
773 #endif
775 static inline int host_to_target_sock_type(int host_type)
777 int target_type;
779 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
780 case SOCK_DGRAM:
781 target_type = TARGET_SOCK_DGRAM;
782 break;
783 case SOCK_STREAM:
784 target_type = TARGET_SOCK_STREAM;
785 break;
786 default:
787 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
788 break;
791 #if defined(SOCK_CLOEXEC)
792 if (host_type & SOCK_CLOEXEC) {
793 target_type |= TARGET_SOCK_CLOEXEC;
795 #endif
797 #if defined(SOCK_NONBLOCK)
798 if (host_type & SOCK_NONBLOCK) {
799 target_type |= TARGET_SOCK_NONBLOCK;
801 #endif
803 return target_type;
806 static abi_ulong target_brk;
807 static abi_ulong target_original_brk;
808 static abi_ulong brk_page;
810 void target_set_brk(abi_ulong new_brk)
812 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
813 brk_page = HOST_PAGE_ALIGN(target_brk);
816 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
817 #define DEBUGF_BRK(message, args...)
819 /* do_brk() must return target values and target errnos. */
820 abi_long do_brk(abi_ulong new_brk)
822 abi_long mapped_addr;
823 abi_ulong new_alloc_size;
825 /* brk pointers are always untagged */
827 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
829 if (!new_brk) {
830 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
831 return target_brk;
833 if (new_brk < target_original_brk) {
834 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
835 target_brk);
836 return target_brk;
839 /* If the new brk is less than the highest page reserved to the
840 * target heap allocation, set it and we're almost done... */
841 if (new_brk <= brk_page) {
842 /* Heap contents are initialized to zero, as for anonymous
843 * mapped pages. */
844 if (new_brk > target_brk) {
845 memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
847 target_brk = new_brk;
848 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
849 return target_brk;
852 /* We need to allocate more memory after the brk... Note that
853 * we don't use MAP_FIXED because that will map over the top of
854 * any existing mapping (like the one with the host libc or qemu
855 * itself); instead we treat "mapped but at wrong address" as
856 * a failure and unmap again.
858 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
859 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
860 PROT_READ|PROT_WRITE,
861 MAP_ANON|MAP_PRIVATE, 0, 0));
863 if (mapped_addr == brk_page) {
864 /* Heap contents are initialized to zero, as for anonymous
865 * mapped pages. Technically the new pages are already
866 * initialized to zero since they *are* anonymous mapped
867 * pages, however we have to take care with the contents that
868 * come from the remaining part of the previous page: it may
869 * contains garbage data due to a previous heap usage (grown
870 * then shrunken). */
871 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
873 target_brk = new_brk;
874 brk_page = HOST_PAGE_ALIGN(target_brk);
875 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
876 target_brk);
877 return target_brk;
878 } else if (mapped_addr != -1) {
879 /* Mapped but at wrong address, meaning there wasn't actually
880 * enough space for this brk.
882 target_munmap(mapped_addr, new_alloc_size);
883 mapped_addr = -1;
884 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
886 else {
887 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
890 #if defined(TARGET_ALPHA)
891 /* We (partially) emulate OSF/1 on Alpha, which requires we
892 return a proper errno, not an unchanged brk value. */
893 return -TARGET_ENOMEM;
894 #endif
895 /* For everything else, return the previous break. */
896 return target_brk;
899 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
900 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
901 static inline abi_long copy_from_user_fdset(fd_set *fds,
902 abi_ulong target_fds_addr,
903 int n)
905 int i, nw, j, k;
906 abi_ulong b, *target_fds;
908 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
909 if (!(target_fds = lock_user(VERIFY_READ,
910 target_fds_addr,
911 sizeof(abi_ulong) * nw,
912 1)))
913 return -TARGET_EFAULT;
915 FD_ZERO(fds);
916 k = 0;
917 for (i = 0; i < nw; i++) {
918 /* grab the abi_ulong */
919 __get_user(b, &target_fds[i]);
920 for (j = 0; j < TARGET_ABI_BITS; j++) {
921 /* check the bit inside the abi_ulong */
922 if ((b >> j) & 1)
923 FD_SET(k, fds);
924 k++;
928 unlock_user(target_fds, target_fds_addr, 0);
930 return 0;
933 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
934 abi_ulong target_fds_addr,
935 int n)
937 if (target_fds_addr) {
938 if (copy_from_user_fdset(fds, target_fds_addr, n))
939 return -TARGET_EFAULT;
940 *fds_ptr = fds;
941 } else {
942 *fds_ptr = NULL;
944 return 0;
947 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
948 const fd_set *fds,
949 int n)
951 int i, nw, j, k;
952 abi_long v;
953 abi_ulong *target_fds;
955 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
956 if (!(target_fds = lock_user(VERIFY_WRITE,
957 target_fds_addr,
958 sizeof(abi_ulong) * nw,
959 0)))
960 return -TARGET_EFAULT;
962 k = 0;
963 for (i = 0; i < nw; i++) {
964 v = 0;
965 for (j = 0; j < TARGET_ABI_BITS; j++) {
966 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
967 k++;
969 __put_user(v, &target_fds[i]);
972 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
974 return 0;
976 #endif
978 #if defined(__alpha__)
979 #define HOST_HZ 1024
980 #else
981 #define HOST_HZ 100
982 #endif
984 static inline abi_long host_to_target_clock_t(long ticks)
986 #if HOST_HZ == TARGET_HZ
987 return ticks;
988 #else
989 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
990 #endif
993 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
994 const struct rusage *rusage)
996 struct target_rusage *target_rusage;
998 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
999 return -TARGET_EFAULT;
1000 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1001 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1002 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1003 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1004 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1005 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1006 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1007 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1008 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1009 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1010 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1011 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1012 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1013 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1014 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1015 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1016 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1017 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1018 unlock_user_struct(target_rusage, target_addr, 1);
1020 return 0;
1023 #ifdef TARGET_NR_setrlimit
1024 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1026 abi_ulong target_rlim_swap;
1027 rlim_t result;
1029 target_rlim_swap = tswapal(target_rlim);
1030 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1031 return RLIM_INFINITY;
1033 result = target_rlim_swap;
1034 if (target_rlim_swap != (rlim_t)result)
1035 return RLIM_INFINITY;
1037 return result;
1039 #endif
1041 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1042 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1044 abi_ulong target_rlim_swap;
1045 abi_ulong result;
1047 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1048 target_rlim_swap = TARGET_RLIM_INFINITY;
1049 else
1050 target_rlim_swap = rlim;
1051 result = tswapal(target_rlim_swap);
1053 return result;
1055 #endif
1057 static inline int target_to_host_resource(int code)
1059 switch (code) {
1060 case TARGET_RLIMIT_AS:
1061 return RLIMIT_AS;
1062 case TARGET_RLIMIT_CORE:
1063 return RLIMIT_CORE;
1064 case TARGET_RLIMIT_CPU:
1065 return RLIMIT_CPU;
1066 case TARGET_RLIMIT_DATA:
1067 return RLIMIT_DATA;
1068 case TARGET_RLIMIT_FSIZE:
1069 return RLIMIT_FSIZE;
1070 case TARGET_RLIMIT_LOCKS:
1071 return RLIMIT_LOCKS;
1072 case TARGET_RLIMIT_MEMLOCK:
1073 return RLIMIT_MEMLOCK;
1074 case TARGET_RLIMIT_MSGQUEUE:
1075 return RLIMIT_MSGQUEUE;
1076 case TARGET_RLIMIT_NICE:
1077 return RLIMIT_NICE;
1078 case TARGET_RLIMIT_NOFILE:
1079 return RLIMIT_NOFILE;
1080 case TARGET_RLIMIT_NPROC:
1081 return RLIMIT_NPROC;
1082 case TARGET_RLIMIT_RSS:
1083 return RLIMIT_RSS;
1084 case TARGET_RLIMIT_RTPRIO:
1085 return RLIMIT_RTPRIO;
1086 #ifdef RLIMIT_RTTIME
1087 case TARGET_RLIMIT_RTTIME:
1088 return RLIMIT_RTTIME;
1089 #endif
1090 case TARGET_RLIMIT_SIGPENDING:
1091 return RLIMIT_SIGPENDING;
1092 case TARGET_RLIMIT_STACK:
1093 return RLIMIT_STACK;
1094 default:
1095 return code;
1099 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1100 abi_ulong target_tv_addr)
1102 struct target_timeval *target_tv;
1104 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1105 return -TARGET_EFAULT;
1108 __get_user(tv->tv_sec, &target_tv->tv_sec);
1109 __get_user(tv->tv_usec, &target_tv->tv_usec);
1111 unlock_user_struct(target_tv, target_tv_addr, 0);
1113 return 0;
1116 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1117 const struct timeval *tv)
1119 struct target_timeval *target_tv;
1121 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1122 return -TARGET_EFAULT;
1125 __put_user(tv->tv_sec, &target_tv->tv_sec);
1126 __put_user(tv->tv_usec, &target_tv->tv_usec);
1128 unlock_user_struct(target_tv, target_tv_addr, 1);
1130 return 0;
1133 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1134 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1135 abi_ulong target_tv_addr)
1137 struct target__kernel_sock_timeval *target_tv;
1139 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1140 return -TARGET_EFAULT;
1143 __get_user(tv->tv_sec, &target_tv->tv_sec);
1144 __get_user(tv->tv_usec, &target_tv->tv_usec);
1146 unlock_user_struct(target_tv, target_tv_addr, 0);
1148 return 0;
1150 #endif
1152 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1153 const struct timeval *tv)
1155 struct target__kernel_sock_timeval *target_tv;
1157 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1158 return -TARGET_EFAULT;
1161 __put_user(tv->tv_sec, &target_tv->tv_sec);
1162 __put_user(tv->tv_usec, &target_tv->tv_usec);
1164 unlock_user_struct(target_tv, target_tv_addr, 1);
1166 return 0;
1169 #if defined(TARGET_NR_futex) || \
1170 defined(TARGET_NR_rt_sigtimedwait) || \
1171 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1172 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1173 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1174 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1175 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1176 defined(TARGET_NR_timer_settime) || \
1177 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1178 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1179 abi_ulong target_addr)
1181 struct target_timespec *target_ts;
1183 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1184 return -TARGET_EFAULT;
1186 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1187 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1188 unlock_user_struct(target_ts, target_addr, 0);
1189 return 0;
1191 #endif
1193 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1194 defined(TARGET_NR_timer_settime64) || \
1195 defined(TARGET_NR_mq_timedsend_time64) || \
1196 defined(TARGET_NR_mq_timedreceive_time64) || \
1197 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1198 defined(TARGET_NR_clock_nanosleep_time64) || \
1199 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1200 defined(TARGET_NR_utimensat) || \
1201 defined(TARGET_NR_utimensat_time64) || \
1202 defined(TARGET_NR_semtimedop_time64) || \
1203 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1204 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1205 abi_ulong target_addr)
1207 struct target__kernel_timespec *target_ts;
1209 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1210 return -TARGET_EFAULT;
1212 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1213 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1214 /* in 32bit mode, this drops the padding */
1215 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1216 unlock_user_struct(target_ts, target_addr, 0);
1217 return 0;
1219 #endif
1221 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1222 struct timespec *host_ts)
1224 struct target_timespec *target_ts;
1226 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1227 return -TARGET_EFAULT;
1229 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1230 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1231 unlock_user_struct(target_ts, target_addr, 1);
1232 return 0;
1235 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1236 struct timespec *host_ts)
1238 struct target__kernel_timespec *target_ts;
1240 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1241 return -TARGET_EFAULT;
1243 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1244 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1245 unlock_user_struct(target_ts, target_addr, 1);
1246 return 0;
1249 #if defined(TARGET_NR_gettimeofday)
1250 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1251 struct timezone *tz)
1253 struct target_timezone *target_tz;
1255 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1256 return -TARGET_EFAULT;
1259 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1260 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1262 unlock_user_struct(target_tz, target_tz_addr, 1);
1264 return 0;
1266 #endif
1268 #if defined(TARGET_NR_settimeofday)
1269 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1270 abi_ulong target_tz_addr)
1272 struct target_timezone *target_tz;
1274 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1275 return -TARGET_EFAULT;
1278 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1279 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1281 unlock_user_struct(target_tz, target_tz_addr, 0);
1283 return 0;
1285 #endif
1287 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1288 #include <mqueue.h>
1290 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1291 abi_ulong target_mq_attr_addr)
1293 struct target_mq_attr *target_mq_attr;
1295 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1296 target_mq_attr_addr, 1))
1297 return -TARGET_EFAULT;
1299 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1300 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1301 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1302 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1304 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1306 return 0;
1309 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1310 const struct mq_attr *attr)
1312 struct target_mq_attr *target_mq_attr;
1314 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1315 target_mq_attr_addr, 0))
1316 return -TARGET_EFAULT;
1318 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1319 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1320 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1321 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1323 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1325 return 0;
1327 #endif
1329 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1330 /* do_select() must return target values and target errnos. */
1331 static abi_long do_select(int n,
1332 abi_ulong rfd_addr, abi_ulong wfd_addr,
1333 abi_ulong efd_addr, abi_ulong target_tv_addr)
1335 fd_set rfds, wfds, efds;
1336 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1337 struct timeval tv;
1338 struct timespec ts, *ts_ptr;
1339 abi_long ret;
1341 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1342 if (ret) {
1343 return ret;
1345 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1346 if (ret) {
1347 return ret;
1349 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1350 if (ret) {
1351 return ret;
1354 if (target_tv_addr) {
1355 if (copy_from_user_timeval(&tv, target_tv_addr))
1356 return -TARGET_EFAULT;
1357 ts.tv_sec = tv.tv_sec;
1358 ts.tv_nsec = tv.tv_usec * 1000;
1359 ts_ptr = &ts;
1360 } else {
1361 ts_ptr = NULL;
1364 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1365 ts_ptr, NULL));
1367 if (!is_error(ret)) {
1368 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1369 return -TARGET_EFAULT;
1370 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1371 return -TARGET_EFAULT;
1372 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1373 return -TARGET_EFAULT;
1375 if (target_tv_addr) {
1376 tv.tv_sec = ts.tv_sec;
1377 tv.tv_usec = ts.tv_nsec / 1000;
1378 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1379 return -TARGET_EFAULT;
1384 return ret;
1387 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1388 static abi_long do_old_select(abi_ulong arg1)
1390 struct target_sel_arg_struct *sel;
1391 abi_ulong inp, outp, exp, tvp;
1392 long nsel;
1394 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1395 return -TARGET_EFAULT;
1398 nsel = tswapal(sel->n);
1399 inp = tswapal(sel->inp);
1400 outp = tswapal(sel->outp);
1401 exp = tswapal(sel->exp);
1402 tvp = tswapal(sel->tvp);
1404 unlock_user_struct(sel, arg1, 0);
1406 return do_select(nsel, inp, outp, exp, tvp);
1408 #endif
1409 #endif
1411 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1412 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1413 abi_long arg4, abi_long arg5, abi_long arg6,
1414 bool time64)
1416 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1417 fd_set rfds, wfds, efds;
1418 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1419 struct timespec ts, *ts_ptr;
1420 abi_long ret;
1423 * The 6th arg is actually two args smashed together,
1424 * so we cannot use the C library.
1426 struct {
1427 sigset_t *set;
1428 size_t size;
1429 } sig, *sig_ptr;
1431 abi_ulong arg_sigset, arg_sigsize, *arg7;
1433 n = arg1;
1434 rfd_addr = arg2;
1435 wfd_addr = arg3;
1436 efd_addr = arg4;
1437 ts_addr = arg5;
1439 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1440 if (ret) {
1441 return ret;
1443 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1444 if (ret) {
1445 return ret;
1447 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1448 if (ret) {
1449 return ret;
1453 * This takes a timespec, and not a timeval, so we cannot
1454 * use the do_select() helper ...
1456 if (ts_addr) {
1457 if (time64) {
1458 if (target_to_host_timespec64(&ts, ts_addr)) {
1459 return -TARGET_EFAULT;
1461 } else {
1462 if (target_to_host_timespec(&ts, ts_addr)) {
1463 return -TARGET_EFAULT;
1466 ts_ptr = &ts;
1467 } else {
1468 ts_ptr = NULL;
1471 /* Extract the two packed args for the sigset */
1472 sig_ptr = NULL;
1473 if (arg6) {
1474 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1475 if (!arg7) {
1476 return -TARGET_EFAULT;
1478 arg_sigset = tswapal(arg7[0]);
1479 arg_sigsize = tswapal(arg7[1]);
1480 unlock_user(arg7, arg6, 0);
1482 if (arg_sigset) {
1483 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1484 if (ret != 0) {
1485 return ret;
1487 sig_ptr = &sig;
1488 sig.size = SIGSET_T_SIZE;
1492 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1493 ts_ptr, sig_ptr));
1495 if (sig_ptr) {
1496 finish_sigsuspend_mask(ret);
1499 if (!is_error(ret)) {
1500 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1501 return -TARGET_EFAULT;
1503 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1504 return -TARGET_EFAULT;
1506 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1507 return -TARGET_EFAULT;
1509 if (time64) {
1510 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1511 return -TARGET_EFAULT;
1513 } else {
1514 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1515 return -TARGET_EFAULT;
1519 return ret;
1521 #endif
1523 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1524 defined(TARGET_NR_ppoll_time64)
1525 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1526 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1528 struct target_pollfd *target_pfd;
1529 unsigned int nfds = arg2;
1530 struct pollfd *pfd;
1531 unsigned int i;
1532 abi_long ret;
1534 pfd = NULL;
1535 target_pfd = NULL;
1536 if (nfds) {
1537 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1538 return -TARGET_EINVAL;
1540 target_pfd = lock_user(VERIFY_WRITE, arg1,
1541 sizeof(struct target_pollfd) * nfds, 1);
1542 if (!target_pfd) {
1543 return -TARGET_EFAULT;
1546 pfd = alloca(sizeof(struct pollfd) * nfds);
1547 for (i = 0; i < nfds; i++) {
1548 pfd[i].fd = tswap32(target_pfd[i].fd);
1549 pfd[i].events = tswap16(target_pfd[i].events);
1552 if (ppoll) {
1553 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1554 sigset_t *set = NULL;
1556 if (arg3) {
1557 if (time64) {
1558 if (target_to_host_timespec64(timeout_ts, arg3)) {
1559 unlock_user(target_pfd, arg1, 0);
1560 return -TARGET_EFAULT;
1562 } else {
1563 if (target_to_host_timespec(timeout_ts, arg3)) {
1564 unlock_user(target_pfd, arg1, 0);
1565 return -TARGET_EFAULT;
1568 } else {
1569 timeout_ts = NULL;
1572 if (arg4) {
1573 ret = process_sigsuspend_mask(&set, arg4, arg5);
1574 if (ret != 0) {
1575 unlock_user(target_pfd, arg1, 0);
1576 return ret;
1580 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1581 set, SIGSET_T_SIZE));
1583 if (set) {
1584 finish_sigsuspend_mask(ret);
1586 if (!is_error(ret) && arg3) {
1587 if (time64) {
1588 if (host_to_target_timespec64(arg3, timeout_ts)) {
1589 return -TARGET_EFAULT;
1591 } else {
1592 if (host_to_target_timespec(arg3, timeout_ts)) {
1593 return -TARGET_EFAULT;
1597 } else {
1598 struct timespec ts, *pts;
1600 if (arg3 >= 0) {
1601 /* Convert ms to secs, ns */
1602 ts.tv_sec = arg3 / 1000;
1603 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1604 pts = &ts;
1605 } else {
1606 /* -ve poll() timeout means "infinite" */
1607 pts = NULL;
1609 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1612 if (!is_error(ret)) {
1613 for (i = 0; i < nfds; i++) {
1614 target_pfd[i].revents = tswap16(pfd[i].revents);
1617 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1618 return ret;
1620 #endif
1622 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1623 int flags, int is_pipe2)
1625 int host_pipe[2];
1626 abi_long ret;
1627 ret = pipe2(host_pipe, flags);
1629 if (is_error(ret))
1630 return get_errno(ret);
1632 /* Several targets have special calling conventions for the original
1633 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1634 if (!is_pipe2) {
1635 #if defined(TARGET_ALPHA)
1636 cpu_env->ir[IR_A4] = host_pipe[1];
1637 return host_pipe[0];
1638 #elif defined(TARGET_MIPS)
1639 cpu_env->active_tc.gpr[3] = host_pipe[1];
1640 return host_pipe[0];
1641 #elif defined(TARGET_SH4)
1642 cpu_env->gregs[1] = host_pipe[1];
1643 return host_pipe[0];
1644 #elif defined(TARGET_SPARC)
1645 cpu_env->regwptr[1] = host_pipe[1];
1646 return host_pipe[0];
1647 #endif
1650 if (put_user_s32(host_pipe[0], pipedes)
1651 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1652 return -TARGET_EFAULT;
1653 return get_errno(ret);
1656 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1657 abi_ulong target_addr,
1658 socklen_t len)
1660 struct target_ip_mreqn *target_smreqn;
1662 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1663 if (!target_smreqn)
1664 return -TARGET_EFAULT;
1665 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1666 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1667 if (len == sizeof(struct target_ip_mreqn))
1668 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1669 unlock_user(target_smreqn, target_addr, 0);
1671 return 0;
1674 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1675 abi_ulong target_addr,
1676 socklen_t len)
1678 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1679 sa_family_t sa_family;
1680 struct target_sockaddr *target_saddr;
1682 if (fd_trans_target_to_host_addr(fd)) {
1683 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1686 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1687 if (!target_saddr)
1688 return -TARGET_EFAULT;
1690 sa_family = tswap16(target_saddr->sa_family);
1692 /* Oops. The caller might send a incomplete sun_path; sun_path
1693 * must be terminated by \0 (see the manual page), but
1694 * unfortunately it is quite common to specify sockaddr_un
1695 * length as "strlen(x->sun_path)" while it should be
1696 * "strlen(...) + 1". We'll fix that here if needed.
1697 * Linux kernel has a similar feature.
1700 if (sa_family == AF_UNIX) {
1701 if (len < unix_maxlen && len > 0) {
1702 char *cp = (char*)target_saddr;
1704 if ( cp[len-1] && !cp[len] )
1705 len++;
1707 if (len > unix_maxlen)
1708 len = unix_maxlen;
1711 memcpy(addr, target_saddr, len);
1712 addr->sa_family = sa_family;
1713 if (sa_family == AF_NETLINK) {
1714 struct sockaddr_nl *nladdr;
1716 nladdr = (struct sockaddr_nl *)addr;
1717 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1718 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1719 } else if (sa_family == AF_PACKET) {
1720 struct target_sockaddr_ll *lladdr;
1722 lladdr = (struct target_sockaddr_ll *)addr;
1723 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1724 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1726 unlock_user(target_saddr, target_addr, 0);
1728 return 0;
1731 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1732 struct sockaddr *addr,
1733 socklen_t len)
1735 struct target_sockaddr *target_saddr;
1737 if (len == 0) {
1738 return 0;
1740 assert(addr);
1742 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1743 if (!target_saddr)
1744 return -TARGET_EFAULT;
1745 memcpy(target_saddr, addr, len);
1746 if (len >= offsetof(struct target_sockaddr, sa_family) +
1747 sizeof(target_saddr->sa_family)) {
1748 target_saddr->sa_family = tswap16(addr->sa_family);
1750 if (addr->sa_family == AF_NETLINK &&
1751 len >= sizeof(struct target_sockaddr_nl)) {
1752 struct target_sockaddr_nl *target_nl =
1753 (struct target_sockaddr_nl *)target_saddr;
1754 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1755 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1756 } else if (addr->sa_family == AF_PACKET) {
1757 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1758 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1759 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1760 } else if (addr->sa_family == AF_INET6 &&
1761 len >= sizeof(struct target_sockaddr_in6)) {
1762 struct target_sockaddr_in6 *target_in6 =
1763 (struct target_sockaddr_in6 *)target_saddr;
1764 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1766 unlock_user(target_saddr, target_addr, len);
1768 return 0;
1771 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1772 struct target_msghdr *target_msgh)
1774 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1775 abi_long msg_controllen;
1776 abi_ulong target_cmsg_addr;
1777 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1778 socklen_t space = 0;
1780 msg_controllen = tswapal(target_msgh->msg_controllen);
1781 if (msg_controllen < sizeof (struct target_cmsghdr))
1782 goto the_end;
1783 target_cmsg_addr = tswapal(target_msgh->msg_control);
1784 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1785 target_cmsg_start = target_cmsg;
1786 if (!target_cmsg)
1787 return -TARGET_EFAULT;
1789 while (cmsg && target_cmsg) {
1790 void *data = CMSG_DATA(cmsg);
1791 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1793 int len = tswapal(target_cmsg->cmsg_len)
1794 - sizeof(struct target_cmsghdr);
1796 space += CMSG_SPACE(len);
1797 if (space > msgh->msg_controllen) {
1798 space -= CMSG_SPACE(len);
1799 /* This is a QEMU bug, since we allocated the payload
1800 * area ourselves (unlike overflow in host-to-target
1801 * conversion, which is just the guest giving us a buffer
1802 * that's too small). It can't happen for the payload types
1803 * we currently support; if it becomes an issue in future
1804 * we would need to improve our allocation strategy to
1805 * something more intelligent than "twice the size of the
1806 * target buffer we're reading from".
1808 qemu_log_mask(LOG_UNIMP,
1809 ("Unsupported ancillary data %d/%d: "
1810 "unhandled msg size\n"),
1811 tswap32(target_cmsg->cmsg_level),
1812 tswap32(target_cmsg->cmsg_type));
1813 break;
1816 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1817 cmsg->cmsg_level = SOL_SOCKET;
1818 } else {
1819 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1821 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1822 cmsg->cmsg_len = CMSG_LEN(len);
1824 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1825 int *fd = (int *)data;
1826 int *target_fd = (int *)target_data;
1827 int i, numfds = len / sizeof(int);
1829 for (i = 0; i < numfds; i++) {
1830 __get_user(fd[i], target_fd + i);
1832 } else if (cmsg->cmsg_level == SOL_SOCKET
1833 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1834 struct ucred *cred = (struct ucred *)data;
1835 struct target_ucred *target_cred =
1836 (struct target_ucred *)target_data;
1838 __get_user(cred->pid, &target_cred->pid);
1839 __get_user(cred->uid, &target_cred->uid);
1840 __get_user(cred->gid, &target_cred->gid);
1841 } else {
1842 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1843 cmsg->cmsg_level, cmsg->cmsg_type);
1844 memcpy(data, target_data, len);
1847 cmsg = CMSG_NXTHDR(msgh, cmsg);
1848 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1849 target_cmsg_start);
1851 unlock_user(target_cmsg, target_cmsg_addr, 0);
1852 the_end:
1853 msgh->msg_controllen = space;
1854 return 0;
1857 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1858 struct msghdr *msgh)
1860 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1861 abi_long msg_controllen;
1862 abi_ulong target_cmsg_addr;
1863 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1864 socklen_t space = 0;
1866 msg_controllen = tswapal(target_msgh->msg_controllen);
1867 if (msg_controllen < sizeof (struct target_cmsghdr))
1868 goto the_end;
1869 target_cmsg_addr = tswapal(target_msgh->msg_control);
1870 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1871 target_cmsg_start = target_cmsg;
1872 if (!target_cmsg)
1873 return -TARGET_EFAULT;
1875 while (cmsg && target_cmsg) {
1876 void *data = CMSG_DATA(cmsg);
1877 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1879 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1880 int tgt_len, tgt_space;
1882 /* We never copy a half-header but may copy half-data;
1883 * this is Linux's behaviour in put_cmsg(). Note that
1884 * truncation here is a guest problem (which we report
1885 * to the guest via the CTRUNC bit), unlike truncation
1886 * in target_to_host_cmsg, which is a QEMU bug.
1888 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1889 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1890 break;
1893 if (cmsg->cmsg_level == SOL_SOCKET) {
1894 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1895 } else {
1896 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1898 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1900 /* Payload types which need a different size of payload on
1901 * the target must adjust tgt_len here.
1903 tgt_len = len;
1904 switch (cmsg->cmsg_level) {
1905 case SOL_SOCKET:
1906 switch (cmsg->cmsg_type) {
1907 case SO_TIMESTAMP:
1908 tgt_len = sizeof(struct target_timeval);
1909 break;
1910 default:
1911 break;
1913 break;
1914 default:
1915 break;
1918 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1919 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1920 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1923 /* We must now copy-and-convert len bytes of payload
1924 * into tgt_len bytes of destination space. Bear in mind
1925 * that in both source and destination we may be dealing
1926 * with a truncated value!
1928 switch (cmsg->cmsg_level) {
1929 case SOL_SOCKET:
1930 switch (cmsg->cmsg_type) {
1931 case SCM_RIGHTS:
1933 int *fd = (int *)data;
1934 int *target_fd = (int *)target_data;
1935 int i, numfds = tgt_len / sizeof(int);
1937 for (i = 0; i < numfds; i++) {
1938 __put_user(fd[i], target_fd + i);
1940 break;
1942 case SO_TIMESTAMP:
1944 struct timeval *tv = (struct timeval *)data;
1945 struct target_timeval *target_tv =
1946 (struct target_timeval *)target_data;
1948 if (len != sizeof(struct timeval) ||
1949 tgt_len != sizeof(struct target_timeval)) {
1950 goto unimplemented;
1953 /* copy struct timeval to target */
1954 __put_user(tv->tv_sec, &target_tv->tv_sec);
1955 __put_user(tv->tv_usec, &target_tv->tv_usec);
1956 break;
1958 case SCM_CREDENTIALS:
1960 struct ucred *cred = (struct ucred *)data;
1961 struct target_ucred *target_cred =
1962 (struct target_ucred *)target_data;
1964 __put_user(cred->pid, &target_cred->pid);
1965 __put_user(cred->uid, &target_cred->uid);
1966 __put_user(cred->gid, &target_cred->gid);
1967 break;
1969 default:
1970 goto unimplemented;
1972 break;
1974 case SOL_IP:
1975 switch (cmsg->cmsg_type) {
1976 case IP_TTL:
1978 uint32_t *v = (uint32_t *)data;
1979 uint32_t *t_int = (uint32_t *)target_data;
1981 if (len != sizeof(uint32_t) ||
1982 tgt_len != sizeof(uint32_t)) {
1983 goto unimplemented;
1985 __put_user(*v, t_int);
1986 break;
1988 case IP_RECVERR:
1990 struct errhdr_t {
1991 struct sock_extended_err ee;
1992 struct sockaddr_in offender;
1994 struct errhdr_t *errh = (struct errhdr_t *)data;
1995 struct errhdr_t *target_errh =
1996 (struct errhdr_t *)target_data;
1998 if (len != sizeof(struct errhdr_t) ||
1999 tgt_len != sizeof(struct errhdr_t)) {
2000 goto unimplemented;
2002 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2003 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2004 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2005 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2006 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2007 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2008 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2009 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2010 (void *) &errh->offender, sizeof(errh->offender));
2011 break;
2013 default:
2014 goto unimplemented;
2016 break;
2018 case SOL_IPV6:
2019 switch (cmsg->cmsg_type) {
2020 case IPV6_HOPLIMIT:
2022 uint32_t *v = (uint32_t *)data;
2023 uint32_t *t_int = (uint32_t *)target_data;
2025 if (len != sizeof(uint32_t) ||
2026 tgt_len != sizeof(uint32_t)) {
2027 goto unimplemented;
2029 __put_user(*v, t_int);
2030 break;
2032 case IPV6_RECVERR:
2034 struct errhdr6_t {
2035 struct sock_extended_err ee;
2036 struct sockaddr_in6 offender;
2038 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2039 struct errhdr6_t *target_errh =
2040 (struct errhdr6_t *)target_data;
2042 if (len != sizeof(struct errhdr6_t) ||
2043 tgt_len != sizeof(struct errhdr6_t)) {
2044 goto unimplemented;
2046 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2047 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2048 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2049 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2050 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2051 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2052 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2053 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2054 (void *) &errh->offender, sizeof(errh->offender));
2055 break;
2057 default:
2058 goto unimplemented;
2060 break;
2062 default:
2063 unimplemented:
2064 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2065 cmsg->cmsg_level, cmsg->cmsg_type);
2066 memcpy(target_data, data, MIN(len, tgt_len));
2067 if (tgt_len > len) {
2068 memset(target_data + len, 0, tgt_len - len);
2072 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2073 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2074 if (msg_controllen < tgt_space) {
2075 tgt_space = msg_controllen;
2077 msg_controllen -= tgt_space;
2078 space += tgt_space;
2079 cmsg = CMSG_NXTHDR(msgh, cmsg);
2080 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2081 target_cmsg_start);
2083 unlock_user(target_cmsg, target_cmsg_addr, space);
2084 the_end:
2085 target_msgh->msg_controllen = tswapal(space);
2086 return 0;
2089 /* do_setsockopt() Must return target values and target errnos. */
2090 static abi_long do_setsockopt(int sockfd, int level, int optname,
2091 abi_ulong optval_addr, socklen_t optlen)
2093 abi_long ret;
2094 int val;
2095 struct ip_mreqn *ip_mreq;
2096 struct ip_mreq_source *ip_mreq_source;
2098 switch(level) {
2099 case SOL_TCP:
2100 case SOL_UDP:
2101 /* TCP and UDP options all take an 'int' value. */
2102 if (optlen < sizeof(uint32_t))
2103 return -TARGET_EINVAL;
2105 if (get_user_u32(val, optval_addr))
2106 return -TARGET_EFAULT;
2107 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2108 break;
2109 case SOL_IP:
2110 switch(optname) {
2111 case IP_TOS:
2112 case IP_TTL:
2113 case IP_HDRINCL:
2114 case IP_ROUTER_ALERT:
2115 case IP_RECVOPTS:
2116 case IP_RETOPTS:
2117 case IP_PKTINFO:
2118 case IP_MTU_DISCOVER:
2119 case IP_RECVERR:
2120 case IP_RECVTTL:
2121 case IP_RECVTOS:
2122 #ifdef IP_FREEBIND
2123 case IP_FREEBIND:
2124 #endif
2125 case IP_MULTICAST_TTL:
2126 case IP_MULTICAST_LOOP:
2127 val = 0;
2128 if (optlen >= sizeof(uint32_t)) {
2129 if (get_user_u32(val, optval_addr))
2130 return -TARGET_EFAULT;
2131 } else if (optlen >= 1) {
2132 if (get_user_u8(val, optval_addr))
2133 return -TARGET_EFAULT;
2135 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2136 break;
2137 case IP_ADD_MEMBERSHIP:
2138 case IP_DROP_MEMBERSHIP:
2139 if (optlen < sizeof (struct target_ip_mreq) ||
2140 optlen > sizeof (struct target_ip_mreqn))
2141 return -TARGET_EINVAL;
2143 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2144 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2145 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2146 break;
2148 case IP_BLOCK_SOURCE:
2149 case IP_UNBLOCK_SOURCE:
2150 case IP_ADD_SOURCE_MEMBERSHIP:
2151 case IP_DROP_SOURCE_MEMBERSHIP:
2152 if (optlen != sizeof (struct target_ip_mreq_source))
2153 return -TARGET_EINVAL;
2155 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2156 if (!ip_mreq_source) {
2157 return -TARGET_EFAULT;
2159 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2160 unlock_user (ip_mreq_source, optval_addr, 0);
2161 break;
2163 default:
2164 goto unimplemented;
2166 break;
2167 case SOL_IPV6:
2168 switch (optname) {
2169 case IPV6_MTU_DISCOVER:
2170 case IPV6_MTU:
2171 case IPV6_V6ONLY:
2172 case IPV6_RECVPKTINFO:
2173 case IPV6_UNICAST_HOPS:
2174 case IPV6_MULTICAST_HOPS:
2175 case IPV6_MULTICAST_LOOP:
2176 case IPV6_RECVERR:
2177 case IPV6_RECVHOPLIMIT:
2178 case IPV6_2292HOPLIMIT:
2179 case IPV6_CHECKSUM:
2180 case IPV6_ADDRFORM:
2181 case IPV6_2292PKTINFO:
2182 case IPV6_RECVTCLASS:
2183 case IPV6_RECVRTHDR:
2184 case IPV6_2292RTHDR:
2185 case IPV6_RECVHOPOPTS:
2186 case IPV6_2292HOPOPTS:
2187 case IPV6_RECVDSTOPTS:
2188 case IPV6_2292DSTOPTS:
2189 case IPV6_TCLASS:
2190 case IPV6_ADDR_PREFERENCES:
2191 #ifdef IPV6_RECVPATHMTU
2192 case IPV6_RECVPATHMTU:
2193 #endif
2194 #ifdef IPV6_TRANSPARENT
2195 case IPV6_TRANSPARENT:
2196 #endif
2197 #ifdef IPV6_FREEBIND
2198 case IPV6_FREEBIND:
2199 #endif
2200 #ifdef IPV6_RECVORIGDSTADDR
2201 case IPV6_RECVORIGDSTADDR:
2202 #endif
2203 val = 0;
2204 if (optlen < sizeof(uint32_t)) {
2205 return -TARGET_EINVAL;
2207 if (get_user_u32(val, optval_addr)) {
2208 return -TARGET_EFAULT;
2210 ret = get_errno(setsockopt(sockfd, level, optname,
2211 &val, sizeof(val)));
2212 break;
2213 case IPV6_PKTINFO:
2215 struct in6_pktinfo pki;
2217 if (optlen < sizeof(pki)) {
2218 return -TARGET_EINVAL;
2221 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2222 return -TARGET_EFAULT;
2225 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2227 ret = get_errno(setsockopt(sockfd, level, optname,
2228 &pki, sizeof(pki)));
2229 break;
2231 case IPV6_ADD_MEMBERSHIP:
2232 case IPV6_DROP_MEMBERSHIP:
2234 struct ipv6_mreq ipv6mreq;
2236 if (optlen < sizeof(ipv6mreq)) {
2237 return -TARGET_EINVAL;
2240 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2241 return -TARGET_EFAULT;
2244 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2246 ret = get_errno(setsockopt(sockfd, level, optname,
2247 &ipv6mreq, sizeof(ipv6mreq)));
2248 break;
2250 default:
2251 goto unimplemented;
2253 break;
2254 case SOL_ICMPV6:
2255 switch (optname) {
2256 case ICMPV6_FILTER:
2258 struct icmp6_filter icmp6f;
2260 if (optlen > sizeof(icmp6f)) {
2261 optlen = sizeof(icmp6f);
2264 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2265 return -TARGET_EFAULT;
2268 for (val = 0; val < 8; val++) {
2269 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2272 ret = get_errno(setsockopt(sockfd, level, optname,
2273 &icmp6f, optlen));
2274 break;
2276 default:
2277 goto unimplemented;
2279 break;
2280 case SOL_RAW:
2281 switch (optname) {
2282 case ICMP_FILTER:
2283 case IPV6_CHECKSUM:
2284 /* those take an u32 value */
2285 if (optlen < sizeof(uint32_t)) {
2286 return -TARGET_EINVAL;
2289 if (get_user_u32(val, optval_addr)) {
2290 return -TARGET_EFAULT;
2292 ret = get_errno(setsockopt(sockfd, level, optname,
2293 &val, sizeof(val)));
2294 break;
2296 default:
2297 goto unimplemented;
2299 break;
2300 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2301 case SOL_ALG:
2302 switch (optname) {
2303 case ALG_SET_KEY:
2305 char *alg_key = g_malloc(optlen);
2307 if (!alg_key) {
2308 return -TARGET_ENOMEM;
2310 if (copy_from_user(alg_key, optval_addr, optlen)) {
2311 g_free(alg_key);
2312 return -TARGET_EFAULT;
2314 ret = get_errno(setsockopt(sockfd, level, optname,
2315 alg_key, optlen));
2316 g_free(alg_key);
2317 break;
2319 case ALG_SET_AEAD_AUTHSIZE:
2321 ret = get_errno(setsockopt(sockfd, level, optname,
2322 NULL, optlen));
2323 break;
2325 default:
2326 goto unimplemented;
2328 break;
2329 #endif
2330 case TARGET_SOL_SOCKET:
2331 switch (optname) {
2332 case TARGET_SO_RCVTIMEO:
2334 struct timeval tv;
2336 optname = SO_RCVTIMEO;
2338 set_timeout:
2339 if (optlen != sizeof(struct target_timeval)) {
2340 return -TARGET_EINVAL;
2343 if (copy_from_user_timeval(&tv, optval_addr)) {
2344 return -TARGET_EFAULT;
2347 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2348 &tv, sizeof(tv)));
2349 return ret;
2351 case TARGET_SO_SNDTIMEO:
2352 optname = SO_SNDTIMEO;
2353 goto set_timeout;
2354 case TARGET_SO_ATTACH_FILTER:
2356 struct target_sock_fprog *tfprog;
2357 struct target_sock_filter *tfilter;
2358 struct sock_fprog fprog;
2359 struct sock_filter *filter;
2360 int i;
2362 if (optlen != sizeof(*tfprog)) {
2363 return -TARGET_EINVAL;
2365 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2366 return -TARGET_EFAULT;
2368 if (!lock_user_struct(VERIFY_READ, tfilter,
2369 tswapal(tfprog->filter), 0)) {
2370 unlock_user_struct(tfprog, optval_addr, 1);
2371 return -TARGET_EFAULT;
2374 fprog.len = tswap16(tfprog->len);
2375 filter = g_try_new(struct sock_filter, fprog.len);
2376 if (filter == NULL) {
2377 unlock_user_struct(tfilter, tfprog->filter, 1);
2378 unlock_user_struct(tfprog, optval_addr, 1);
2379 return -TARGET_ENOMEM;
2381 for (i = 0; i < fprog.len; i++) {
2382 filter[i].code = tswap16(tfilter[i].code);
2383 filter[i].jt = tfilter[i].jt;
2384 filter[i].jf = tfilter[i].jf;
2385 filter[i].k = tswap32(tfilter[i].k);
2387 fprog.filter = filter;
2389 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2390 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2391 g_free(filter);
2393 unlock_user_struct(tfilter, tfprog->filter, 1);
2394 unlock_user_struct(tfprog, optval_addr, 1);
2395 return ret;
2397 case TARGET_SO_BINDTODEVICE:
2399 char *dev_ifname, *addr_ifname;
2401 if (optlen > IFNAMSIZ - 1) {
2402 optlen = IFNAMSIZ - 1;
2404 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2405 if (!dev_ifname) {
2406 return -TARGET_EFAULT;
2408 optname = SO_BINDTODEVICE;
2409 addr_ifname = alloca(IFNAMSIZ);
2410 memcpy(addr_ifname, dev_ifname, optlen);
2411 addr_ifname[optlen] = 0;
2412 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2413 addr_ifname, optlen));
2414 unlock_user (dev_ifname, optval_addr, 0);
2415 return ret;
2417 case TARGET_SO_LINGER:
2419 struct linger lg;
2420 struct target_linger *tlg;
2422 if (optlen != sizeof(struct target_linger)) {
2423 return -TARGET_EINVAL;
2425 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2426 return -TARGET_EFAULT;
2428 __get_user(lg.l_onoff, &tlg->l_onoff);
2429 __get_user(lg.l_linger, &tlg->l_linger);
2430 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2431 &lg, sizeof(lg)));
2432 unlock_user_struct(tlg, optval_addr, 0);
2433 return ret;
2435 /* Options with 'int' argument. */
2436 case TARGET_SO_DEBUG:
2437 optname = SO_DEBUG;
2438 break;
2439 case TARGET_SO_REUSEADDR:
2440 optname = SO_REUSEADDR;
2441 break;
2442 #ifdef SO_REUSEPORT
2443 case TARGET_SO_REUSEPORT:
2444 optname = SO_REUSEPORT;
2445 break;
2446 #endif
2447 case TARGET_SO_TYPE:
2448 optname = SO_TYPE;
2449 break;
2450 case TARGET_SO_ERROR:
2451 optname = SO_ERROR;
2452 break;
2453 case TARGET_SO_DONTROUTE:
2454 optname = SO_DONTROUTE;
2455 break;
2456 case TARGET_SO_BROADCAST:
2457 optname = SO_BROADCAST;
2458 break;
2459 case TARGET_SO_SNDBUF:
2460 optname = SO_SNDBUF;
2461 break;
2462 case TARGET_SO_SNDBUFFORCE:
2463 optname = SO_SNDBUFFORCE;
2464 break;
2465 case TARGET_SO_RCVBUF:
2466 optname = SO_RCVBUF;
2467 break;
2468 case TARGET_SO_RCVBUFFORCE:
2469 optname = SO_RCVBUFFORCE;
2470 break;
2471 case TARGET_SO_KEEPALIVE:
2472 optname = SO_KEEPALIVE;
2473 break;
2474 case TARGET_SO_OOBINLINE:
2475 optname = SO_OOBINLINE;
2476 break;
2477 case TARGET_SO_NO_CHECK:
2478 optname = SO_NO_CHECK;
2479 break;
2480 case TARGET_SO_PRIORITY:
2481 optname = SO_PRIORITY;
2482 break;
2483 #ifdef SO_BSDCOMPAT
2484 case TARGET_SO_BSDCOMPAT:
2485 optname = SO_BSDCOMPAT;
2486 break;
2487 #endif
2488 case TARGET_SO_PASSCRED:
2489 optname = SO_PASSCRED;
2490 break;
2491 case TARGET_SO_PASSSEC:
2492 optname = SO_PASSSEC;
2493 break;
2494 case TARGET_SO_TIMESTAMP:
2495 optname = SO_TIMESTAMP;
2496 break;
2497 case TARGET_SO_RCVLOWAT:
2498 optname = SO_RCVLOWAT;
2499 break;
2500 default:
2501 goto unimplemented;
2503 if (optlen < sizeof(uint32_t))
2504 return -TARGET_EINVAL;
2506 if (get_user_u32(val, optval_addr))
2507 return -TARGET_EFAULT;
2508 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2509 break;
2510 #ifdef SOL_NETLINK
2511 case SOL_NETLINK:
2512 switch (optname) {
2513 case NETLINK_PKTINFO:
2514 case NETLINK_ADD_MEMBERSHIP:
2515 case NETLINK_DROP_MEMBERSHIP:
2516 case NETLINK_BROADCAST_ERROR:
2517 case NETLINK_NO_ENOBUFS:
2518 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2519 case NETLINK_LISTEN_ALL_NSID:
2520 case NETLINK_CAP_ACK:
2521 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2523 case NETLINK_EXT_ACK:
2524 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2525 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2526 case NETLINK_GET_STRICT_CHK:
2527 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2528 break;
2529 default:
2530 goto unimplemented;
2532 val = 0;
2533 if (optlen < sizeof(uint32_t)) {
2534 return -TARGET_EINVAL;
2536 if (get_user_u32(val, optval_addr)) {
2537 return -TARGET_EFAULT;
2539 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2540 sizeof(val)));
2541 break;
2542 #endif /* SOL_NETLINK */
2543 default:
2544 unimplemented:
2545 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2546 level, optname);
2547 ret = -TARGET_ENOPROTOOPT;
2549 return ret;
2552 /* do_getsockopt() Must return target values and target errnos. */
2553 static abi_long do_getsockopt(int sockfd, int level, int optname,
2554 abi_ulong optval_addr, abi_ulong optlen)
2556 abi_long ret;
2557 int len, val;
2558 socklen_t lv;
2560 switch(level) {
2561 case TARGET_SOL_SOCKET:
2562 level = SOL_SOCKET;
2563 switch (optname) {
2564 /* These don't just return a single integer */
2565 case TARGET_SO_PEERNAME:
2566 goto unimplemented;
2567 case TARGET_SO_RCVTIMEO: {
2568 struct timeval tv;
2569 socklen_t tvlen;
2571 optname = SO_RCVTIMEO;
2573 get_timeout:
2574 if (get_user_u32(len, optlen)) {
2575 return -TARGET_EFAULT;
2577 if (len < 0) {
2578 return -TARGET_EINVAL;
2581 tvlen = sizeof(tv);
2582 ret = get_errno(getsockopt(sockfd, level, optname,
2583 &tv, &tvlen));
2584 if (ret < 0) {
2585 return ret;
2587 if (len > sizeof(struct target_timeval)) {
2588 len = sizeof(struct target_timeval);
2590 if (copy_to_user_timeval(optval_addr, &tv)) {
2591 return -TARGET_EFAULT;
2593 if (put_user_u32(len, optlen)) {
2594 return -TARGET_EFAULT;
2596 break;
2598 case TARGET_SO_SNDTIMEO:
2599 optname = SO_SNDTIMEO;
2600 goto get_timeout;
2601 case TARGET_SO_PEERCRED: {
2602 struct ucred cr;
2603 socklen_t crlen;
2604 struct target_ucred *tcr;
2606 if (get_user_u32(len, optlen)) {
2607 return -TARGET_EFAULT;
2609 if (len < 0) {
2610 return -TARGET_EINVAL;
2613 crlen = sizeof(cr);
2614 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2615 &cr, &crlen));
2616 if (ret < 0) {
2617 return ret;
2619 if (len > crlen) {
2620 len = crlen;
2622 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2623 return -TARGET_EFAULT;
2625 __put_user(cr.pid, &tcr->pid);
2626 __put_user(cr.uid, &tcr->uid);
2627 __put_user(cr.gid, &tcr->gid);
2628 unlock_user_struct(tcr, optval_addr, 1);
2629 if (put_user_u32(len, optlen)) {
2630 return -TARGET_EFAULT;
2632 break;
2634 case TARGET_SO_PEERSEC: {
2635 char *name;
2637 if (get_user_u32(len, optlen)) {
2638 return -TARGET_EFAULT;
2640 if (len < 0) {
2641 return -TARGET_EINVAL;
2643 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2644 if (!name) {
2645 return -TARGET_EFAULT;
2647 lv = len;
2648 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2649 name, &lv));
2650 if (put_user_u32(lv, optlen)) {
2651 ret = -TARGET_EFAULT;
2653 unlock_user(name, optval_addr, lv);
2654 break;
2656 case TARGET_SO_LINGER:
2658 struct linger lg;
2659 socklen_t lglen;
2660 struct target_linger *tlg;
2662 if (get_user_u32(len, optlen)) {
2663 return -TARGET_EFAULT;
2665 if (len < 0) {
2666 return -TARGET_EINVAL;
2669 lglen = sizeof(lg);
2670 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2671 &lg, &lglen));
2672 if (ret < 0) {
2673 return ret;
2675 if (len > lglen) {
2676 len = lglen;
2678 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2679 return -TARGET_EFAULT;
2681 __put_user(lg.l_onoff, &tlg->l_onoff);
2682 __put_user(lg.l_linger, &tlg->l_linger);
2683 unlock_user_struct(tlg, optval_addr, 1);
2684 if (put_user_u32(len, optlen)) {
2685 return -TARGET_EFAULT;
2687 break;
2689 /* Options with 'int' argument. */
2690 case TARGET_SO_DEBUG:
2691 optname = SO_DEBUG;
2692 goto int_case;
2693 case TARGET_SO_REUSEADDR:
2694 optname = SO_REUSEADDR;
2695 goto int_case;
2696 #ifdef SO_REUSEPORT
2697 case TARGET_SO_REUSEPORT:
2698 optname = SO_REUSEPORT;
2699 goto int_case;
2700 #endif
2701 case TARGET_SO_TYPE:
2702 optname = SO_TYPE;
2703 goto int_case;
2704 case TARGET_SO_ERROR:
2705 optname = SO_ERROR;
2706 goto int_case;
2707 case TARGET_SO_DONTROUTE:
2708 optname = SO_DONTROUTE;
2709 goto int_case;
2710 case TARGET_SO_BROADCAST:
2711 optname = SO_BROADCAST;
2712 goto int_case;
2713 case TARGET_SO_SNDBUF:
2714 optname = SO_SNDBUF;
2715 goto int_case;
2716 case TARGET_SO_RCVBUF:
2717 optname = SO_RCVBUF;
2718 goto int_case;
2719 case TARGET_SO_KEEPALIVE:
2720 optname = SO_KEEPALIVE;
2721 goto int_case;
2722 case TARGET_SO_OOBINLINE:
2723 optname = SO_OOBINLINE;
2724 goto int_case;
2725 case TARGET_SO_NO_CHECK:
2726 optname = SO_NO_CHECK;
2727 goto int_case;
2728 case TARGET_SO_PRIORITY:
2729 optname = SO_PRIORITY;
2730 goto int_case;
2731 #ifdef SO_BSDCOMPAT
2732 case TARGET_SO_BSDCOMPAT:
2733 optname = SO_BSDCOMPAT;
2734 goto int_case;
2735 #endif
2736 case TARGET_SO_PASSCRED:
2737 optname = SO_PASSCRED;
2738 goto int_case;
2739 case TARGET_SO_TIMESTAMP:
2740 optname = SO_TIMESTAMP;
2741 goto int_case;
2742 case TARGET_SO_RCVLOWAT:
2743 optname = SO_RCVLOWAT;
2744 goto int_case;
2745 case TARGET_SO_ACCEPTCONN:
2746 optname = SO_ACCEPTCONN;
2747 goto int_case;
2748 case TARGET_SO_PROTOCOL:
2749 optname = SO_PROTOCOL;
2750 goto int_case;
2751 case TARGET_SO_DOMAIN:
2752 optname = SO_DOMAIN;
2753 goto int_case;
2754 default:
2755 goto int_case;
2757 break;
2758 case SOL_TCP:
2759 case SOL_UDP:
2760 /* TCP and UDP options all take an 'int' value. */
2761 int_case:
2762 if (get_user_u32(len, optlen))
2763 return -TARGET_EFAULT;
2764 if (len < 0)
2765 return -TARGET_EINVAL;
2766 lv = sizeof(lv);
2767 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2768 if (ret < 0)
2769 return ret;
2770 if (optname == SO_TYPE) {
2771 val = host_to_target_sock_type(val);
2773 if (len > lv)
2774 len = lv;
2775 if (len == 4) {
2776 if (put_user_u32(val, optval_addr))
2777 return -TARGET_EFAULT;
2778 } else {
2779 if (put_user_u8(val, optval_addr))
2780 return -TARGET_EFAULT;
2782 if (put_user_u32(len, optlen))
2783 return -TARGET_EFAULT;
2784 break;
2785 case SOL_IP:
2786 switch(optname) {
2787 case IP_TOS:
2788 case IP_TTL:
2789 case IP_HDRINCL:
2790 case IP_ROUTER_ALERT:
2791 case IP_RECVOPTS:
2792 case IP_RETOPTS:
2793 case IP_PKTINFO:
2794 case IP_MTU_DISCOVER:
2795 case IP_RECVERR:
2796 case IP_RECVTOS:
2797 #ifdef IP_FREEBIND
2798 case IP_FREEBIND:
2799 #endif
2800 case IP_MULTICAST_TTL:
2801 case IP_MULTICAST_LOOP:
2802 if (get_user_u32(len, optlen))
2803 return -TARGET_EFAULT;
2804 if (len < 0)
2805 return -TARGET_EINVAL;
2806 lv = sizeof(lv);
2807 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2808 if (ret < 0)
2809 return ret;
2810 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2811 len = 1;
2812 if (put_user_u32(len, optlen)
2813 || put_user_u8(val, optval_addr))
2814 return -TARGET_EFAULT;
2815 } else {
2816 if (len > sizeof(int))
2817 len = sizeof(int);
2818 if (put_user_u32(len, optlen)
2819 || put_user_u32(val, optval_addr))
2820 return -TARGET_EFAULT;
2822 break;
2823 default:
2824 ret = -TARGET_ENOPROTOOPT;
2825 break;
2827 break;
2828 case SOL_IPV6:
2829 switch (optname) {
2830 case IPV6_MTU_DISCOVER:
2831 case IPV6_MTU:
2832 case IPV6_V6ONLY:
2833 case IPV6_RECVPKTINFO:
2834 case IPV6_UNICAST_HOPS:
2835 case IPV6_MULTICAST_HOPS:
2836 case IPV6_MULTICAST_LOOP:
2837 case IPV6_RECVERR:
2838 case IPV6_RECVHOPLIMIT:
2839 case IPV6_2292HOPLIMIT:
2840 case IPV6_CHECKSUM:
2841 case IPV6_ADDRFORM:
2842 case IPV6_2292PKTINFO:
2843 case IPV6_RECVTCLASS:
2844 case IPV6_RECVRTHDR:
2845 case IPV6_2292RTHDR:
2846 case IPV6_RECVHOPOPTS:
2847 case IPV6_2292HOPOPTS:
2848 case IPV6_RECVDSTOPTS:
2849 case IPV6_2292DSTOPTS:
2850 case IPV6_TCLASS:
2851 case IPV6_ADDR_PREFERENCES:
2852 #ifdef IPV6_RECVPATHMTU
2853 case IPV6_RECVPATHMTU:
2854 #endif
2855 #ifdef IPV6_TRANSPARENT
2856 case IPV6_TRANSPARENT:
2857 #endif
2858 #ifdef IPV6_FREEBIND
2859 case IPV6_FREEBIND:
2860 #endif
2861 #ifdef IPV6_RECVORIGDSTADDR
2862 case IPV6_RECVORIGDSTADDR:
2863 #endif
2864 if (get_user_u32(len, optlen))
2865 return -TARGET_EFAULT;
2866 if (len < 0)
2867 return -TARGET_EINVAL;
2868 lv = sizeof(lv);
2869 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2870 if (ret < 0)
2871 return ret;
2872 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2873 len = 1;
2874 if (put_user_u32(len, optlen)
2875 || put_user_u8(val, optval_addr))
2876 return -TARGET_EFAULT;
2877 } else {
2878 if (len > sizeof(int))
2879 len = sizeof(int);
2880 if (put_user_u32(len, optlen)
2881 || put_user_u32(val, optval_addr))
2882 return -TARGET_EFAULT;
2884 break;
2885 default:
2886 ret = -TARGET_ENOPROTOOPT;
2887 break;
2889 break;
2890 #ifdef SOL_NETLINK
2891 case SOL_NETLINK:
2892 switch (optname) {
2893 case NETLINK_PKTINFO:
2894 case NETLINK_BROADCAST_ERROR:
2895 case NETLINK_NO_ENOBUFS:
2896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2897 case NETLINK_LISTEN_ALL_NSID:
2898 case NETLINK_CAP_ACK:
2899 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2901 case NETLINK_EXT_ACK:
2902 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2904 case NETLINK_GET_STRICT_CHK:
2905 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2906 if (get_user_u32(len, optlen)) {
2907 return -TARGET_EFAULT;
2909 if (len != sizeof(val)) {
2910 return -TARGET_EINVAL;
2912 lv = len;
2913 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2914 if (ret < 0) {
2915 return ret;
2917 if (put_user_u32(lv, optlen)
2918 || put_user_u32(val, optval_addr)) {
2919 return -TARGET_EFAULT;
2921 break;
2922 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2923 case NETLINK_LIST_MEMBERSHIPS:
2925 uint32_t *results;
2926 int i;
2927 if (get_user_u32(len, optlen)) {
2928 return -TARGET_EFAULT;
2930 if (len < 0) {
2931 return -TARGET_EINVAL;
2933 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2934 if (!results && len > 0) {
2935 return -TARGET_EFAULT;
2937 lv = len;
2938 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2939 if (ret < 0) {
2940 unlock_user(results, optval_addr, 0);
2941 return ret;
2943 /* swap host endianess to target endianess. */
2944 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2945 results[i] = tswap32(results[i]);
2947 if (put_user_u32(lv, optlen)) {
2948 return -TARGET_EFAULT;
2950 unlock_user(results, optval_addr, 0);
2951 break;
2953 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2954 default:
2955 goto unimplemented;
2957 break;
2958 #endif /* SOL_NETLINK */
2959 default:
2960 unimplemented:
2961 qemu_log_mask(LOG_UNIMP,
2962 "getsockopt level=%d optname=%d not yet supported\n",
2963 level, optname);
2964 ret = -TARGET_EOPNOTSUPP;
2965 break;
2967 return ret;
2970 /* Convert target low/high pair representing file offset into the host
2971 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2972 * as the kernel doesn't handle them either.
2974 static void target_to_host_low_high(abi_ulong tlow,
2975 abi_ulong thigh,
2976 unsigned long *hlow,
2977 unsigned long *hhigh)
2979 uint64_t off = tlow |
2980 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2981 TARGET_LONG_BITS / 2;
2983 *hlow = off;
2984 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2987 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2988 abi_ulong count, int copy)
2990 struct target_iovec *target_vec;
2991 struct iovec *vec;
2992 abi_ulong total_len, max_len;
2993 int i;
2994 int err = 0;
2995 bool bad_address = false;
2997 if (count == 0) {
2998 errno = 0;
2999 return NULL;
3001 if (count > IOV_MAX) {
3002 errno = EINVAL;
3003 return NULL;
3006 vec = g_try_new0(struct iovec, count);
3007 if (vec == NULL) {
3008 errno = ENOMEM;
3009 return NULL;
3012 target_vec = lock_user(VERIFY_READ, target_addr,
3013 count * sizeof(struct target_iovec), 1);
3014 if (target_vec == NULL) {
3015 err = EFAULT;
3016 goto fail2;
3019 /* ??? If host page size > target page size, this will result in a
3020 value larger than what we can actually support. */
3021 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3022 total_len = 0;
3024 for (i = 0; i < count; i++) {
3025 abi_ulong base = tswapal(target_vec[i].iov_base);
3026 abi_long len = tswapal(target_vec[i].iov_len);
3028 if (len < 0) {
3029 err = EINVAL;
3030 goto fail;
3031 } else if (len == 0) {
3032 /* Zero length pointer is ignored. */
3033 vec[i].iov_base = 0;
3034 } else {
3035 vec[i].iov_base = lock_user(type, base, len, copy);
3036 /* If the first buffer pointer is bad, this is a fault. But
3037 * subsequent bad buffers will result in a partial write; this
3038 * is realized by filling the vector with null pointers and
3039 * zero lengths. */
3040 if (!vec[i].iov_base) {
3041 if (i == 0) {
3042 err = EFAULT;
3043 goto fail;
3044 } else {
3045 bad_address = true;
3048 if (bad_address) {
3049 len = 0;
3051 if (len > max_len - total_len) {
3052 len = max_len - total_len;
3055 vec[i].iov_len = len;
3056 total_len += len;
3059 unlock_user(target_vec, target_addr, 0);
3060 return vec;
3062 fail:
3063 while (--i >= 0) {
3064 if (tswapal(target_vec[i].iov_len) > 0) {
3065 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3068 unlock_user(target_vec, target_addr, 0);
3069 fail2:
3070 g_free(vec);
3071 errno = err;
3072 return NULL;
3075 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3076 abi_ulong count, int copy)
3078 struct target_iovec *target_vec;
3079 int i;
3081 target_vec = lock_user(VERIFY_READ, target_addr,
3082 count * sizeof(struct target_iovec), 1);
3083 if (target_vec) {
3084 for (i = 0; i < count; i++) {
3085 abi_ulong base = tswapal(target_vec[i].iov_base);
3086 abi_long len = tswapal(target_vec[i].iov_len);
3087 if (len < 0) {
3088 break;
3090 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3092 unlock_user(target_vec, target_addr, 0);
3095 g_free(vec);
3098 static inline int target_to_host_sock_type(int *type)
3100 int host_type = 0;
3101 int target_type = *type;
3103 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3104 case TARGET_SOCK_DGRAM:
3105 host_type = SOCK_DGRAM;
3106 break;
3107 case TARGET_SOCK_STREAM:
3108 host_type = SOCK_STREAM;
3109 break;
3110 default:
3111 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3112 break;
3114 if (target_type & TARGET_SOCK_CLOEXEC) {
3115 #if defined(SOCK_CLOEXEC)
3116 host_type |= SOCK_CLOEXEC;
3117 #else
3118 return -TARGET_EINVAL;
3119 #endif
3121 if (target_type & TARGET_SOCK_NONBLOCK) {
3122 #if defined(SOCK_NONBLOCK)
3123 host_type |= SOCK_NONBLOCK;
3124 #elif !defined(O_NONBLOCK)
3125 return -TARGET_EINVAL;
3126 #endif
3128 *type = host_type;
3129 return 0;
3132 /* Try to emulate socket type flags after socket creation. */
3133 static int sock_flags_fixup(int fd, int target_type)
3135 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3136 if (target_type & TARGET_SOCK_NONBLOCK) {
3137 int flags = fcntl(fd, F_GETFL);
3138 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3139 close(fd);
3140 return -TARGET_EINVAL;
3143 #endif
3144 return fd;
3147 /* do_socket() Must return target values and target errnos. */
3148 static abi_long do_socket(int domain, int type, int protocol)
3150 int target_type = type;
3151 int ret;
3153 ret = target_to_host_sock_type(&type);
3154 if (ret) {
3155 return ret;
3158 if (domain == PF_NETLINK && !(
3159 #ifdef CONFIG_RTNETLINK
3160 protocol == NETLINK_ROUTE ||
3161 #endif
3162 protocol == NETLINK_KOBJECT_UEVENT ||
3163 protocol == NETLINK_AUDIT)) {
3164 return -TARGET_EPROTONOSUPPORT;
3167 if (domain == AF_PACKET ||
3168 (domain == AF_INET && type == SOCK_PACKET)) {
3169 protocol = tswap16(protocol);
3172 ret = get_errno(socket(domain, type, protocol));
3173 if (ret >= 0) {
3174 ret = sock_flags_fixup(ret, target_type);
3175 if (type == SOCK_PACKET) {
3176 /* Manage an obsolete case :
3177 * if socket type is SOCK_PACKET, bind by name
3179 fd_trans_register(ret, &target_packet_trans);
3180 } else if (domain == PF_NETLINK) {
3181 switch (protocol) {
3182 #ifdef CONFIG_RTNETLINK
3183 case NETLINK_ROUTE:
3184 fd_trans_register(ret, &target_netlink_route_trans);
3185 break;
3186 #endif
3187 case NETLINK_KOBJECT_UEVENT:
3188 /* nothing to do: messages are strings */
3189 break;
3190 case NETLINK_AUDIT:
3191 fd_trans_register(ret, &target_netlink_audit_trans);
3192 break;
3193 default:
3194 g_assert_not_reached();
3198 return ret;
3201 /* do_bind() Must return target values and target errnos. */
3202 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3203 socklen_t addrlen)
3205 void *addr;
3206 abi_long ret;
3208 if ((int)addrlen < 0) {
3209 return -TARGET_EINVAL;
3212 addr = alloca(addrlen+1);
3214 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3215 if (ret)
3216 return ret;
3218 return get_errno(bind(sockfd, addr, addrlen));
3221 /* do_connect() Must return target values and target errnos. */
3222 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3223 socklen_t addrlen)
3225 void *addr;
3226 abi_long ret;
3228 if ((int)addrlen < 0) {
3229 return -TARGET_EINVAL;
3232 addr = alloca(addrlen+1);
3234 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3235 if (ret)
3236 return ret;
3238 return get_errno(safe_connect(sockfd, addr, addrlen));
3241 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3242 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3243 int flags, int send)
3245 abi_long ret, len;
3246 struct msghdr msg;
3247 abi_ulong count;
3248 struct iovec *vec;
3249 abi_ulong target_vec;
3251 if (msgp->msg_name) {
3252 msg.msg_namelen = tswap32(msgp->msg_namelen);
3253 msg.msg_name = alloca(msg.msg_namelen+1);
3254 ret = target_to_host_sockaddr(fd, msg.msg_name,
3255 tswapal(msgp->msg_name),
3256 msg.msg_namelen);
3257 if (ret == -TARGET_EFAULT) {
3258 /* For connected sockets msg_name and msg_namelen must
3259 * be ignored, so returning EFAULT immediately is wrong.
3260 * Instead, pass a bad msg_name to the host kernel, and
3261 * let it decide whether to return EFAULT or not.
3263 msg.msg_name = (void *)-1;
3264 } else if (ret) {
3265 goto out2;
3267 } else {
3268 msg.msg_name = NULL;
3269 msg.msg_namelen = 0;
3271 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3272 msg.msg_control = alloca(msg.msg_controllen);
3273 memset(msg.msg_control, 0, msg.msg_controllen);
3275 msg.msg_flags = tswap32(msgp->msg_flags);
3277 count = tswapal(msgp->msg_iovlen);
3278 target_vec = tswapal(msgp->msg_iov);
3280 if (count > IOV_MAX) {
3281 /* sendrcvmsg returns a different errno for this condition than
3282 * readv/writev, so we must catch it here before lock_iovec() does.
3284 ret = -TARGET_EMSGSIZE;
3285 goto out2;
3288 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3289 target_vec, count, send);
3290 if (vec == NULL) {
3291 ret = -host_to_target_errno(errno);
3292 goto out2;
3294 msg.msg_iovlen = count;
3295 msg.msg_iov = vec;
3297 if (send) {
3298 if (fd_trans_target_to_host_data(fd)) {
3299 void *host_msg;
3301 host_msg = g_malloc(msg.msg_iov->iov_len);
3302 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3303 ret = fd_trans_target_to_host_data(fd)(host_msg,
3304 msg.msg_iov->iov_len);
3305 if (ret >= 0) {
3306 msg.msg_iov->iov_base = host_msg;
3307 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3309 g_free(host_msg);
3310 } else {
3311 ret = target_to_host_cmsg(&msg, msgp);
3312 if (ret == 0) {
3313 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3316 } else {
3317 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3318 if (!is_error(ret)) {
3319 len = ret;
3320 if (fd_trans_host_to_target_data(fd)) {
3321 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3322 MIN(msg.msg_iov->iov_len, len));
3323 } else {
3324 ret = host_to_target_cmsg(msgp, &msg);
3326 if (!is_error(ret)) {
3327 msgp->msg_namelen = tswap32(msg.msg_namelen);
3328 msgp->msg_flags = tswap32(msg.msg_flags);
3329 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3330 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3331 msg.msg_name, msg.msg_namelen);
3332 if (ret) {
3333 goto out;
3337 ret = len;
3342 out:
3343 unlock_iovec(vec, target_vec, count, !send);
3344 out2:
3345 return ret;
3348 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3349 int flags, int send)
3351 abi_long ret;
3352 struct target_msghdr *msgp;
3354 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3355 msgp,
3356 target_msg,
3357 send ? 1 : 0)) {
3358 return -TARGET_EFAULT;
3360 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3361 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3362 return ret;
3365 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3366 * so it might not have this *mmsg-specific flag either.
3368 #ifndef MSG_WAITFORONE
3369 #define MSG_WAITFORONE 0x10000
3370 #endif
3372 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3373 unsigned int vlen, unsigned int flags,
3374 int send)
3376 struct target_mmsghdr *mmsgp;
3377 abi_long ret = 0;
3378 int i;
3380 if (vlen > UIO_MAXIOV) {
3381 vlen = UIO_MAXIOV;
3384 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3385 if (!mmsgp) {
3386 return -TARGET_EFAULT;
3389 for (i = 0; i < vlen; i++) {
3390 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3391 if (is_error(ret)) {
3392 break;
3394 mmsgp[i].msg_len = tswap32(ret);
3395 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3396 if (flags & MSG_WAITFORONE) {
3397 flags |= MSG_DONTWAIT;
3401 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3403 /* Return number of datagrams sent if we sent any at all;
3404 * otherwise return the error.
3406 if (i) {
3407 return i;
3409 return ret;
3412 /* do_accept4() Must return target values and target errnos. */
3413 static abi_long do_accept4(int fd, abi_ulong target_addr,
3414 abi_ulong target_addrlen_addr, int flags)
3416 socklen_t addrlen, ret_addrlen;
3417 void *addr;
3418 abi_long ret;
3419 int host_flags;
3421 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3423 if (target_addr == 0) {
3424 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3427 /* linux returns EFAULT if addrlen pointer is invalid */
3428 if (get_user_u32(addrlen, target_addrlen_addr))
3429 return -TARGET_EFAULT;
3431 if ((int)addrlen < 0) {
3432 return -TARGET_EINVAL;
3435 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3436 return -TARGET_EFAULT;
3439 addr = alloca(addrlen);
3441 ret_addrlen = addrlen;
3442 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3443 if (!is_error(ret)) {
3444 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3445 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3446 ret = -TARGET_EFAULT;
3449 return ret;
3452 /* do_getpeername() Must return target values and target errnos. */
3453 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3454 abi_ulong target_addrlen_addr)
3456 socklen_t addrlen, ret_addrlen;
3457 void *addr;
3458 abi_long ret;
3460 if (get_user_u32(addrlen, target_addrlen_addr))
3461 return -TARGET_EFAULT;
3463 if ((int)addrlen < 0) {
3464 return -TARGET_EINVAL;
3467 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3468 return -TARGET_EFAULT;
3471 addr = alloca(addrlen);
3473 ret_addrlen = addrlen;
3474 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3475 if (!is_error(ret)) {
3476 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3477 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3478 ret = -TARGET_EFAULT;
3481 return ret;
3484 /* do_getsockname() Must return target values and target errnos. */
3485 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3486 abi_ulong target_addrlen_addr)
3488 socklen_t addrlen, ret_addrlen;
3489 void *addr;
3490 abi_long ret;
3492 if (get_user_u32(addrlen, target_addrlen_addr))
3493 return -TARGET_EFAULT;
3495 if ((int)addrlen < 0) {
3496 return -TARGET_EINVAL;
3499 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3500 return -TARGET_EFAULT;
3503 addr = alloca(addrlen);
3505 ret_addrlen = addrlen;
3506 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3507 if (!is_error(ret)) {
3508 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3509 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3510 ret = -TARGET_EFAULT;
3513 return ret;
3516 /* do_socketpair() Must return target values and target errnos. */
3517 static abi_long do_socketpair(int domain, int type, int protocol,
3518 abi_ulong target_tab_addr)
3520 int tab[2];
3521 abi_long ret;
3523 target_to_host_sock_type(&type);
3525 ret = get_errno(socketpair(domain, type, protocol, tab));
3526 if (!is_error(ret)) {
3527 if (put_user_s32(tab[0], target_tab_addr)
3528 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3529 ret = -TARGET_EFAULT;
3531 return ret;
3534 /* do_sendto() Must return target values and target errnos. */
3535 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3536 abi_ulong target_addr, socklen_t addrlen)
3538 void *addr;
3539 void *host_msg;
3540 void *copy_msg = NULL;
3541 abi_long ret;
3543 if ((int)addrlen < 0) {
3544 return -TARGET_EINVAL;
3547 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3548 if (!host_msg)
3549 return -TARGET_EFAULT;
3550 if (fd_trans_target_to_host_data(fd)) {
3551 copy_msg = host_msg;
3552 host_msg = g_malloc(len);
3553 memcpy(host_msg, copy_msg, len);
3554 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3555 if (ret < 0) {
3556 goto fail;
3559 if (target_addr) {
3560 addr = alloca(addrlen+1);
3561 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3562 if (ret) {
3563 goto fail;
3565 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3566 } else {
3567 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3569 fail:
3570 if (copy_msg) {
3571 g_free(host_msg);
3572 host_msg = copy_msg;
3574 unlock_user(host_msg, msg, 0);
3575 return ret;
3578 /* do_recvfrom() Must return target values and target errnos. */
3579 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3580 abi_ulong target_addr,
3581 abi_ulong target_addrlen)
3583 socklen_t addrlen, ret_addrlen;
3584 void *addr;
3585 void *host_msg;
3586 abi_long ret;
3588 if (!msg) {
3589 host_msg = NULL;
3590 } else {
3591 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3592 if (!host_msg) {
3593 return -TARGET_EFAULT;
3596 if (target_addr) {
3597 if (get_user_u32(addrlen, target_addrlen)) {
3598 ret = -TARGET_EFAULT;
3599 goto fail;
3601 if ((int)addrlen < 0) {
3602 ret = -TARGET_EINVAL;
3603 goto fail;
3605 addr = alloca(addrlen);
3606 ret_addrlen = addrlen;
3607 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3608 addr, &ret_addrlen));
3609 } else {
3610 addr = NULL; /* To keep compiler quiet. */
3611 addrlen = 0; /* To keep compiler quiet. */
3612 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3614 if (!is_error(ret)) {
3615 if (fd_trans_host_to_target_data(fd)) {
3616 abi_long trans;
3617 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3618 if (is_error(trans)) {
3619 ret = trans;
3620 goto fail;
3623 if (target_addr) {
3624 host_to_target_sockaddr(target_addr, addr,
3625 MIN(addrlen, ret_addrlen));
3626 if (put_user_u32(ret_addrlen, target_addrlen)) {
3627 ret = -TARGET_EFAULT;
3628 goto fail;
3631 unlock_user(host_msg, msg, len);
3632 } else {
3633 fail:
3634 unlock_user(host_msg, msg, 0);
3636 return ret;
3639 #ifdef TARGET_NR_socketcall
3640 /* do_socketcall() must return target values and target errnos. */
3641 static abi_long do_socketcall(int num, abi_ulong vptr)
3643 static const unsigned nargs[] = { /* number of arguments per operation */
3644 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3645 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3646 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3647 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3648 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3649 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3650 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3651 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3652 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3653 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3654 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3655 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3656 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3657 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3658 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3659 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3660 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3661 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3662 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3663 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3665 abi_long a[6]; /* max 6 args */
3666 unsigned i;
3668 /* check the range of the first argument num */
3669 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3670 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3671 return -TARGET_EINVAL;
3673 /* ensure we have space for args */
3674 if (nargs[num] > ARRAY_SIZE(a)) {
3675 return -TARGET_EINVAL;
3677 /* collect the arguments in a[] according to nargs[] */
3678 for (i = 0; i < nargs[num]; ++i) {
3679 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3680 return -TARGET_EFAULT;
3683 /* now when we have the args, invoke the appropriate underlying function */
3684 switch (num) {
3685 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3686 return do_socket(a[0], a[1], a[2]);
3687 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3688 return do_bind(a[0], a[1], a[2]);
3689 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3690 return do_connect(a[0], a[1], a[2]);
3691 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3692 return get_errno(listen(a[0], a[1]));
3693 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3694 return do_accept4(a[0], a[1], a[2], 0);
3695 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3696 return do_getsockname(a[0], a[1], a[2]);
3697 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3698 return do_getpeername(a[0], a[1], a[2]);
3699 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3700 return do_socketpair(a[0], a[1], a[2], a[3]);
3701 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3702 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3703 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3704 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3705 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3706 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3707 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3708 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3709 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3710 return get_errno(shutdown(a[0], a[1]));
3711 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3712 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3713 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3714 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3715 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3716 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3717 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3718 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3719 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3720 return do_accept4(a[0], a[1], a[2], a[3]);
3721 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3722 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3723 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3724 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3725 default:
3726 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3727 return -TARGET_EINVAL;
3730 #endif
3732 #define N_SHM_REGIONS 32
3734 static struct shm_region {
3735 abi_ulong start;
3736 abi_ulong size;
3737 bool in_use;
3738 } shm_regions[N_SHM_REGIONS];
3740 #ifndef TARGET_SEMID64_DS
3741 /* asm-generic version of this struct */
3742 struct target_semid64_ds
3744 struct target_ipc_perm sem_perm;
3745 abi_ulong sem_otime;
3746 #if TARGET_ABI_BITS == 32
3747 abi_ulong __unused1;
3748 #endif
3749 abi_ulong sem_ctime;
3750 #if TARGET_ABI_BITS == 32
3751 abi_ulong __unused2;
3752 #endif
3753 abi_ulong sem_nsems;
3754 abi_ulong __unused3;
3755 abi_ulong __unused4;
3757 #endif
3759 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3760 abi_ulong target_addr)
3762 struct target_ipc_perm *target_ip;
3763 struct target_semid64_ds *target_sd;
3765 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3766 return -TARGET_EFAULT;
3767 target_ip = &(target_sd->sem_perm);
3768 host_ip->__key = tswap32(target_ip->__key);
3769 host_ip->uid = tswap32(target_ip->uid);
3770 host_ip->gid = tswap32(target_ip->gid);
3771 host_ip->cuid = tswap32(target_ip->cuid);
3772 host_ip->cgid = tswap32(target_ip->cgid);
3773 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3774 host_ip->mode = tswap32(target_ip->mode);
3775 #else
3776 host_ip->mode = tswap16(target_ip->mode);
3777 #endif
3778 #if defined(TARGET_PPC)
3779 host_ip->__seq = tswap32(target_ip->__seq);
3780 #else
3781 host_ip->__seq = tswap16(target_ip->__seq);
3782 #endif
3783 unlock_user_struct(target_sd, target_addr, 0);
3784 return 0;
3787 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3788 struct ipc_perm *host_ip)
3790 struct target_ipc_perm *target_ip;
3791 struct target_semid64_ds *target_sd;
3793 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3794 return -TARGET_EFAULT;
3795 target_ip = &(target_sd->sem_perm);
3796 target_ip->__key = tswap32(host_ip->__key);
3797 target_ip->uid = tswap32(host_ip->uid);
3798 target_ip->gid = tswap32(host_ip->gid);
3799 target_ip->cuid = tswap32(host_ip->cuid);
3800 target_ip->cgid = tswap32(host_ip->cgid);
3801 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3802 target_ip->mode = tswap32(host_ip->mode);
3803 #else
3804 target_ip->mode = tswap16(host_ip->mode);
3805 #endif
3806 #if defined(TARGET_PPC)
3807 target_ip->__seq = tswap32(host_ip->__seq);
3808 #else
3809 target_ip->__seq = tswap16(host_ip->__seq);
3810 #endif
3811 unlock_user_struct(target_sd, target_addr, 1);
3812 return 0;
3815 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3816 abi_ulong target_addr)
3818 struct target_semid64_ds *target_sd;
3820 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3821 return -TARGET_EFAULT;
3822 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3823 return -TARGET_EFAULT;
3824 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3825 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3826 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3827 unlock_user_struct(target_sd, target_addr, 0);
3828 return 0;
3831 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3832 struct semid_ds *host_sd)
3834 struct target_semid64_ds *target_sd;
3836 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3837 return -TARGET_EFAULT;
3838 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3839 return -TARGET_EFAULT;
3840 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3841 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3842 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3843 unlock_user_struct(target_sd, target_addr, 1);
3844 return 0;
3847 struct target_seminfo {
3848 int semmap;
3849 int semmni;
3850 int semmns;
3851 int semmnu;
3852 int semmsl;
3853 int semopm;
3854 int semume;
3855 int semusz;
3856 int semvmx;
3857 int semaem;
3860 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3861 struct seminfo *host_seminfo)
3863 struct target_seminfo *target_seminfo;
3864 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3865 return -TARGET_EFAULT;
3866 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3867 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3868 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3869 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3870 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3871 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3872 __put_user(host_seminfo->semume, &target_seminfo->semume);
3873 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3874 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3875 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3876 unlock_user_struct(target_seminfo, target_addr, 1);
3877 return 0;
3880 union semun {
3881 int val;
3882 struct semid_ds *buf;
3883 unsigned short *array;
3884 struct seminfo *__buf;
3887 union target_semun {
3888 int val;
3889 abi_ulong buf;
3890 abi_ulong array;
3891 abi_ulong __buf;
3894 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3895 abi_ulong target_addr)
3897 int nsems;
3898 unsigned short *array;
3899 union semun semun;
3900 struct semid_ds semid_ds;
3901 int i, ret;
3903 semun.buf = &semid_ds;
3905 ret = semctl(semid, 0, IPC_STAT, semun);
3906 if (ret == -1)
3907 return get_errno(ret);
3909 nsems = semid_ds.sem_nsems;
3911 *host_array = g_try_new(unsigned short, nsems);
3912 if (!*host_array) {
3913 return -TARGET_ENOMEM;
3915 array = lock_user(VERIFY_READ, target_addr,
3916 nsems*sizeof(unsigned short), 1);
3917 if (!array) {
3918 g_free(*host_array);
3919 return -TARGET_EFAULT;
3922 for(i=0; i<nsems; i++) {
3923 __get_user((*host_array)[i], &array[i]);
3925 unlock_user(array, target_addr, 0);
3927 return 0;
3930 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3931 unsigned short **host_array)
3933 int nsems;
3934 unsigned short *array;
3935 union semun semun;
3936 struct semid_ds semid_ds;
3937 int i, ret;
3939 semun.buf = &semid_ds;
3941 ret = semctl(semid, 0, IPC_STAT, semun);
3942 if (ret == -1)
3943 return get_errno(ret);
3945 nsems = semid_ds.sem_nsems;
3947 array = lock_user(VERIFY_WRITE, target_addr,
3948 nsems*sizeof(unsigned short), 0);
3949 if (!array)
3950 return -TARGET_EFAULT;
3952 for(i=0; i<nsems; i++) {
3953 __put_user((*host_array)[i], &array[i]);
3955 g_free(*host_array);
3956 unlock_user(array, target_addr, 1);
3958 return 0;
3961 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3962 abi_ulong target_arg)
3964 union target_semun target_su = { .buf = target_arg };
3965 union semun arg;
3966 struct semid_ds dsarg;
3967 unsigned short *array = NULL;
3968 struct seminfo seminfo;
3969 abi_long ret = -TARGET_EINVAL;
3970 abi_long err;
3971 cmd &= 0xff;
3973 switch( cmd ) {
3974 case GETVAL:
3975 case SETVAL:
3976 /* In 64 bit cross-endian situations, we will erroneously pick up
3977 * the wrong half of the union for the "val" element. To rectify
3978 * this, the entire 8-byte structure is byteswapped, followed by
3979 * a swap of the 4 byte val field. In other cases, the data is
3980 * already in proper host byte order. */
3981 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3982 target_su.buf = tswapal(target_su.buf);
3983 arg.val = tswap32(target_su.val);
3984 } else {
3985 arg.val = target_su.val;
3987 ret = get_errno(semctl(semid, semnum, cmd, arg));
3988 break;
3989 case GETALL:
3990 case SETALL:
3991 err = target_to_host_semarray(semid, &array, target_su.array);
3992 if (err)
3993 return err;
3994 arg.array = array;
3995 ret = get_errno(semctl(semid, semnum, cmd, arg));
3996 err = host_to_target_semarray(semid, target_su.array, &array);
3997 if (err)
3998 return err;
3999 break;
4000 case IPC_STAT:
4001 case IPC_SET:
4002 case SEM_STAT:
4003 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4004 if (err)
4005 return err;
4006 arg.buf = &dsarg;
4007 ret = get_errno(semctl(semid, semnum, cmd, arg));
4008 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4009 if (err)
4010 return err;
4011 break;
4012 case IPC_INFO:
4013 case SEM_INFO:
4014 arg.__buf = &seminfo;
4015 ret = get_errno(semctl(semid, semnum, cmd, arg));
4016 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4017 if (err)
4018 return err;
4019 break;
4020 case IPC_RMID:
4021 case GETPID:
4022 case GETNCNT:
4023 case GETZCNT:
4024 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4025 break;
4028 return ret;
4031 struct target_sembuf {
4032 unsigned short sem_num;
4033 short sem_op;
4034 short sem_flg;
4037 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4038 abi_ulong target_addr,
4039 unsigned nsops)
4041 struct target_sembuf *target_sembuf;
4042 int i;
4044 target_sembuf = lock_user(VERIFY_READ, target_addr,
4045 nsops*sizeof(struct target_sembuf), 1);
4046 if (!target_sembuf)
4047 return -TARGET_EFAULT;
4049 for(i=0; i<nsops; i++) {
4050 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4051 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4052 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4055 unlock_user(target_sembuf, target_addr, 0);
4057 return 0;
4060 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4061 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4064 * This macro is required to handle the s390 variants, which passes the
4065 * arguments in a different order than default.
4067 #ifdef __s390x__
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069 (__nsops), (__timeout), (__sops)
4070 #else
4071 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4072 (__nsops), 0, (__sops), (__timeout)
4073 #endif
4075 static inline abi_long do_semtimedop(int semid,
4076 abi_long ptr,
4077 unsigned nsops,
4078 abi_long timeout, bool time64)
4080 struct sembuf *sops;
4081 struct timespec ts, *pts = NULL;
4082 abi_long ret;
4084 if (timeout) {
4085 pts = &ts;
4086 if (time64) {
4087 if (target_to_host_timespec64(pts, timeout)) {
4088 return -TARGET_EFAULT;
4090 } else {
4091 if (target_to_host_timespec(pts, timeout)) {
4092 return -TARGET_EFAULT;
4097 if (nsops > TARGET_SEMOPM) {
4098 return -TARGET_E2BIG;
4101 sops = g_new(struct sembuf, nsops);
4103 if (target_to_host_sembuf(sops, ptr, nsops)) {
4104 g_free(sops);
4105 return -TARGET_EFAULT;
4108 ret = -TARGET_ENOSYS;
4109 #ifdef __NR_semtimedop
4110 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4111 #endif
4112 #ifdef __NR_ipc
4113 if (ret == -TARGET_ENOSYS) {
4114 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4115 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4117 #endif
4118 g_free(sops);
4119 return ret;
4121 #endif
4123 struct target_msqid_ds
4125 struct target_ipc_perm msg_perm;
4126 abi_ulong msg_stime;
4127 #if TARGET_ABI_BITS == 32
4128 abi_ulong __unused1;
4129 #endif
4130 abi_ulong msg_rtime;
4131 #if TARGET_ABI_BITS == 32
4132 abi_ulong __unused2;
4133 #endif
4134 abi_ulong msg_ctime;
4135 #if TARGET_ABI_BITS == 32
4136 abi_ulong __unused3;
4137 #endif
4138 abi_ulong __msg_cbytes;
4139 abi_ulong msg_qnum;
4140 abi_ulong msg_qbytes;
4141 abi_ulong msg_lspid;
4142 abi_ulong msg_lrpid;
4143 abi_ulong __unused4;
4144 abi_ulong __unused5;
4147 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4148 abi_ulong target_addr)
4150 struct target_msqid_ds *target_md;
4152 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4153 return -TARGET_EFAULT;
4154 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4155 return -TARGET_EFAULT;
4156 host_md->msg_stime = tswapal(target_md->msg_stime);
4157 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4158 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4159 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4160 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4161 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4162 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4163 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4164 unlock_user_struct(target_md, target_addr, 0);
4165 return 0;
4168 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4169 struct msqid_ds *host_md)
4171 struct target_msqid_ds *target_md;
4173 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4174 return -TARGET_EFAULT;
4175 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4176 return -TARGET_EFAULT;
4177 target_md->msg_stime = tswapal(host_md->msg_stime);
4178 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4179 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4180 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4181 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4182 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4183 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4184 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4185 unlock_user_struct(target_md, target_addr, 1);
4186 return 0;
4189 struct target_msginfo {
4190 int msgpool;
4191 int msgmap;
4192 int msgmax;
4193 int msgmnb;
4194 int msgmni;
4195 int msgssz;
4196 int msgtql;
4197 unsigned short int msgseg;
4200 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4201 struct msginfo *host_msginfo)
4203 struct target_msginfo *target_msginfo;
4204 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4205 return -TARGET_EFAULT;
4206 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4207 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4208 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4209 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4210 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4211 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4212 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4213 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4214 unlock_user_struct(target_msginfo, target_addr, 1);
4215 return 0;
4218 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4220 struct msqid_ds dsarg;
4221 struct msginfo msginfo;
4222 abi_long ret = -TARGET_EINVAL;
4224 cmd &= 0xff;
4226 switch (cmd) {
4227 case IPC_STAT:
4228 case IPC_SET:
4229 case MSG_STAT:
4230 if (target_to_host_msqid_ds(&dsarg,ptr))
4231 return -TARGET_EFAULT;
4232 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4233 if (host_to_target_msqid_ds(ptr,&dsarg))
4234 return -TARGET_EFAULT;
4235 break;
4236 case IPC_RMID:
4237 ret = get_errno(msgctl(msgid, cmd, NULL));
4238 break;
4239 case IPC_INFO:
4240 case MSG_INFO:
4241 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4242 if (host_to_target_msginfo(ptr, &msginfo))
4243 return -TARGET_EFAULT;
4244 break;
4247 return ret;
4250 struct target_msgbuf {
4251 abi_long mtype;
4252 char mtext[1];
4255 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4256 ssize_t msgsz, int msgflg)
4258 struct target_msgbuf *target_mb;
4259 struct msgbuf *host_mb;
4260 abi_long ret = 0;
4262 if (msgsz < 0) {
4263 return -TARGET_EINVAL;
4266 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4267 return -TARGET_EFAULT;
4268 host_mb = g_try_malloc(msgsz + sizeof(long));
4269 if (!host_mb) {
4270 unlock_user_struct(target_mb, msgp, 0);
4271 return -TARGET_ENOMEM;
4273 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4274 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4275 ret = -TARGET_ENOSYS;
4276 #ifdef __NR_msgsnd
4277 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4278 #endif
4279 #ifdef __NR_ipc
4280 if (ret == -TARGET_ENOSYS) {
4281 #ifdef __s390x__
4282 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4283 host_mb));
4284 #else
4285 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4286 host_mb, 0));
4287 #endif
4289 #endif
4290 g_free(host_mb);
4291 unlock_user_struct(target_mb, msgp, 0);
4293 return ret;
4296 #ifdef __NR_ipc
4297 #if defined(__sparc__)
4298 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4300 #elif defined(__s390x__)
4301 /* The s390 sys_ipc variant has only five parameters. */
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303 ((long int[]){(long int)__msgp, __msgtyp})
4304 #else
4305 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4306 ((long int[]){(long int)__msgp, __msgtyp}), 0
4307 #endif
4308 #endif
4310 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4311 ssize_t msgsz, abi_long msgtyp,
4312 int msgflg)
4314 struct target_msgbuf *target_mb;
4315 char *target_mtext;
4316 struct msgbuf *host_mb;
4317 abi_long ret = 0;
4319 if (msgsz < 0) {
4320 return -TARGET_EINVAL;
4323 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4324 return -TARGET_EFAULT;
4326 host_mb = g_try_malloc(msgsz + sizeof(long));
4327 if (!host_mb) {
4328 ret = -TARGET_ENOMEM;
4329 goto end;
4331 ret = -TARGET_ENOSYS;
4332 #ifdef __NR_msgrcv
4333 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4334 #endif
4335 #ifdef __NR_ipc
4336 if (ret == -TARGET_ENOSYS) {
4337 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4338 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4340 #endif
4342 if (ret > 0) {
4343 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4344 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4345 if (!target_mtext) {
4346 ret = -TARGET_EFAULT;
4347 goto end;
4349 memcpy(target_mb->mtext, host_mb->mtext, ret);
4350 unlock_user(target_mtext, target_mtext_addr, ret);
4353 target_mb->mtype = tswapal(host_mb->mtype);
4355 end:
4356 if (target_mb)
4357 unlock_user_struct(target_mb, msgp, 1);
4358 g_free(host_mb);
4359 return ret;
4362 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4363 abi_ulong target_addr)
4365 struct target_shmid_ds *target_sd;
4367 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4368 return -TARGET_EFAULT;
4369 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4370 return -TARGET_EFAULT;
4371 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4372 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4373 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4374 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4375 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4376 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4377 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4378 unlock_user_struct(target_sd, target_addr, 0);
4379 return 0;
4382 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4383 struct shmid_ds *host_sd)
4385 struct target_shmid_ds *target_sd;
4387 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4388 return -TARGET_EFAULT;
4389 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4390 return -TARGET_EFAULT;
4391 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4392 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4393 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4394 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4395 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4396 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4397 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4398 unlock_user_struct(target_sd, target_addr, 1);
4399 return 0;
4402 struct target_shminfo {
4403 abi_ulong shmmax;
4404 abi_ulong shmmin;
4405 abi_ulong shmmni;
4406 abi_ulong shmseg;
4407 abi_ulong shmall;
4410 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4411 struct shminfo *host_shminfo)
4413 struct target_shminfo *target_shminfo;
4414 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4415 return -TARGET_EFAULT;
4416 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4417 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4418 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4419 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4420 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4421 unlock_user_struct(target_shminfo, target_addr, 1);
4422 return 0;
4425 struct target_shm_info {
4426 int used_ids;
4427 abi_ulong shm_tot;
4428 abi_ulong shm_rss;
4429 abi_ulong shm_swp;
4430 abi_ulong swap_attempts;
4431 abi_ulong swap_successes;
4434 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4435 struct shm_info *host_shm_info)
4437 struct target_shm_info *target_shm_info;
4438 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4439 return -TARGET_EFAULT;
4440 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4441 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4442 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4443 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4444 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4445 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4446 unlock_user_struct(target_shm_info, target_addr, 1);
4447 return 0;
4450 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4452 struct shmid_ds dsarg;
4453 struct shminfo shminfo;
4454 struct shm_info shm_info;
4455 abi_long ret = -TARGET_EINVAL;
4457 cmd &= 0xff;
4459 switch(cmd) {
4460 case IPC_STAT:
4461 case IPC_SET:
4462 case SHM_STAT:
4463 if (target_to_host_shmid_ds(&dsarg, buf))
4464 return -TARGET_EFAULT;
4465 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4466 if (host_to_target_shmid_ds(buf, &dsarg))
4467 return -TARGET_EFAULT;
4468 break;
4469 case IPC_INFO:
4470 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4471 if (host_to_target_shminfo(buf, &shminfo))
4472 return -TARGET_EFAULT;
4473 break;
4474 case SHM_INFO:
4475 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4476 if (host_to_target_shm_info(buf, &shm_info))
4477 return -TARGET_EFAULT;
4478 break;
4479 case IPC_RMID:
4480 case SHM_LOCK:
4481 case SHM_UNLOCK:
4482 ret = get_errno(shmctl(shmid, cmd, NULL));
4483 break;
4486 return ret;
4489 #ifndef TARGET_FORCE_SHMLBA
4490 /* For most architectures, SHMLBA is the same as the page size;
4491 * some architectures have larger values, in which case they should
4492 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4493 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4494 * and defining its own value for SHMLBA.
4496 * The kernel also permits SHMLBA to be set by the architecture to a
4497 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4498 * this means that addresses are rounded to the large size if
4499 * SHM_RND is set but addresses not aligned to that size are not rejected
4500 * as long as they are at least page-aligned. Since the only architecture
4501 * which uses this is ia64 this code doesn't provide for that oddity.
4503 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4505 return TARGET_PAGE_SIZE;
4507 #endif
4509 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4510 int shmid, abi_ulong shmaddr, int shmflg)
4512 CPUState *cpu = env_cpu(cpu_env);
4513 abi_long raddr;
4514 void *host_raddr;
4515 struct shmid_ds shm_info;
4516 int i,ret;
4517 abi_ulong shmlba;
4519 /* shmat pointers are always untagged */
4521 /* find out the length of the shared memory segment */
4522 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4523 if (is_error(ret)) {
4524 /* can't get length, bail out */
4525 return ret;
4528 shmlba = target_shmlba(cpu_env);
4530 if (shmaddr & (shmlba - 1)) {
4531 if (shmflg & SHM_RND) {
4532 shmaddr &= ~(shmlba - 1);
4533 } else {
4534 return -TARGET_EINVAL;
4537 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4538 return -TARGET_EINVAL;
4541 mmap_lock();
4544 * We're mapping shared memory, so ensure we generate code for parallel
4545 * execution and flush old translations. This will work up to the level
4546 * supported by the host -- anything that requires EXCP_ATOMIC will not
4547 * be atomic with respect to an external process.
4549 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4550 cpu->tcg_cflags |= CF_PARALLEL;
4551 tb_flush(cpu);
4554 if (shmaddr)
4555 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4556 else {
4557 abi_ulong mmap_start;
4559 /* In order to use the host shmat, we need to honor host SHMLBA. */
4560 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4562 if (mmap_start == -1) {
4563 errno = ENOMEM;
4564 host_raddr = (void *)-1;
4565 } else
4566 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4567 shmflg | SHM_REMAP);
4570 if (host_raddr == (void *)-1) {
4571 mmap_unlock();
4572 return get_errno((long)host_raddr);
4574 raddr=h2g((unsigned long)host_raddr);
4576 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4577 PAGE_VALID | PAGE_RESET | PAGE_READ |
4578 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4580 for (i = 0; i < N_SHM_REGIONS; i++) {
4581 if (!shm_regions[i].in_use) {
4582 shm_regions[i].in_use = true;
4583 shm_regions[i].start = raddr;
4584 shm_regions[i].size = shm_info.shm_segsz;
4585 break;
4589 mmap_unlock();
4590 return raddr;
4594 static inline abi_long do_shmdt(abi_ulong shmaddr)
4596 int i;
4597 abi_long rv;
4599 /* shmdt pointers are always untagged */
4601 mmap_lock();
4603 for (i = 0; i < N_SHM_REGIONS; ++i) {
4604 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4605 shm_regions[i].in_use = false;
4606 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4607 break;
4610 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4612 mmap_unlock();
4614 return rv;
4617 #ifdef TARGET_NR_ipc
4618 /* ??? This only works with linear mappings. */
4619 /* do_ipc() must return target values and target errnos. */
4620 static abi_long do_ipc(CPUArchState *cpu_env,
4621 unsigned int call, abi_long first,
4622 abi_long second, abi_long third,
4623 abi_long ptr, abi_long fifth)
4625 int version;
4626 abi_long ret = 0;
4628 version = call >> 16;
4629 call &= 0xffff;
4631 switch (call) {
4632 case IPCOP_semop:
4633 ret = do_semtimedop(first, ptr, second, 0, false);
4634 break;
4635 case IPCOP_semtimedop:
4637 * The s390 sys_ipc variant has only five parameters instead of six
4638 * (as for default variant) and the only difference is the handling of
4639 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4640 * to a struct timespec where the generic variant uses fifth parameter.
4642 #if defined(TARGET_S390X)
4643 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4644 #else
4645 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4646 #endif
4647 break;
4649 case IPCOP_semget:
4650 ret = get_errno(semget(first, second, third));
4651 break;
4653 case IPCOP_semctl: {
4654 /* The semun argument to semctl is passed by value, so dereference the
4655 * ptr argument. */
4656 abi_ulong atptr;
4657 get_user_ual(atptr, ptr);
4658 ret = do_semctl(first, second, third, atptr);
4659 break;
4662 case IPCOP_msgget:
4663 ret = get_errno(msgget(first, second));
4664 break;
4666 case IPCOP_msgsnd:
4667 ret = do_msgsnd(first, ptr, second, third);
4668 break;
4670 case IPCOP_msgctl:
4671 ret = do_msgctl(first, second, ptr);
4672 break;
4674 case IPCOP_msgrcv:
4675 switch (version) {
4676 case 0:
4678 struct target_ipc_kludge {
4679 abi_long msgp;
4680 abi_long msgtyp;
4681 } *tmp;
4683 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4684 ret = -TARGET_EFAULT;
4685 break;
4688 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4690 unlock_user_struct(tmp, ptr, 0);
4691 break;
4693 default:
4694 ret = do_msgrcv(first, ptr, second, fifth, third);
4696 break;
4698 case IPCOP_shmat:
4699 switch (version) {
4700 default:
4702 abi_ulong raddr;
4703 raddr = do_shmat(cpu_env, first, ptr, second);
4704 if (is_error(raddr))
4705 return get_errno(raddr);
4706 if (put_user_ual(raddr, third))
4707 return -TARGET_EFAULT;
4708 break;
4710 case 1:
4711 ret = -TARGET_EINVAL;
4712 break;
4714 break;
4715 case IPCOP_shmdt:
4716 ret = do_shmdt(ptr);
4717 break;
4719 case IPCOP_shmget:
4720 /* IPC_* flag values are the same on all linux platforms */
4721 ret = get_errno(shmget(first, second, third));
4722 break;
4724 /* IPC_* and SHM_* command values are the same on all linux platforms */
4725 case IPCOP_shmctl:
4726 ret = do_shmctl(first, second, ptr);
4727 break;
4728 default:
4729 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4730 call, version);
4731 ret = -TARGET_ENOSYS;
4732 break;
4734 return ret;
4736 #endif
4738 /* kernel structure types definitions */
4740 #define STRUCT(name, ...) STRUCT_ ## name,
4741 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4742 enum {
4743 #include "syscall_types.h"
4744 STRUCT_MAX
4746 #undef STRUCT
4747 #undef STRUCT_SPECIAL
4749 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4750 #define STRUCT_SPECIAL(name)
4751 #include "syscall_types.h"
4752 #undef STRUCT
4753 #undef STRUCT_SPECIAL
4755 #define MAX_STRUCT_SIZE 4096
4757 #ifdef CONFIG_FIEMAP
4758 /* So fiemap access checks don't overflow on 32 bit systems.
4759 * This is very slightly smaller than the limit imposed by
4760 * the underlying kernel.
4762 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4763 / sizeof(struct fiemap_extent))
4765 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4766 int fd, int cmd, abi_long arg)
4768 /* The parameter for this ioctl is a struct fiemap followed
4769 * by an array of struct fiemap_extent whose size is set
4770 * in fiemap->fm_extent_count. The array is filled in by the
4771 * ioctl.
4773 int target_size_in, target_size_out;
4774 struct fiemap *fm;
4775 const argtype *arg_type = ie->arg_type;
4776 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4777 void *argptr, *p;
4778 abi_long ret;
4779 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4780 uint32_t outbufsz;
4781 int free_fm = 0;
4783 assert(arg_type[0] == TYPE_PTR);
4784 assert(ie->access == IOC_RW);
4785 arg_type++;
4786 target_size_in = thunk_type_size(arg_type, 0);
4787 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4788 if (!argptr) {
4789 return -TARGET_EFAULT;
4791 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4792 unlock_user(argptr, arg, 0);
4793 fm = (struct fiemap *)buf_temp;
4794 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4795 return -TARGET_EINVAL;
4798 outbufsz = sizeof (*fm) +
4799 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4801 if (outbufsz > MAX_STRUCT_SIZE) {
4802 /* We can't fit all the extents into the fixed size buffer.
4803 * Allocate one that is large enough and use it instead.
4805 fm = g_try_malloc(outbufsz);
4806 if (!fm) {
4807 return -TARGET_ENOMEM;
4809 memcpy(fm, buf_temp, sizeof(struct fiemap));
4810 free_fm = 1;
4812 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4813 if (!is_error(ret)) {
4814 target_size_out = target_size_in;
4815 /* An extent_count of 0 means we were only counting the extents
4816 * so there are no structs to copy
4818 if (fm->fm_extent_count != 0) {
4819 target_size_out += fm->fm_mapped_extents * extent_size;
4821 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4822 if (!argptr) {
4823 ret = -TARGET_EFAULT;
4824 } else {
4825 /* Convert the struct fiemap */
4826 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4827 if (fm->fm_extent_count != 0) {
4828 p = argptr + target_size_in;
4829 /* ...and then all the struct fiemap_extents */
4830 for (i = 0; i < fm->fm_mapped_extents; i++) {
4831 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4832 THUNK_TARGET);
4833 p += extent_size;
4836 unlock_user(argptr, arg, target_size_out);
4839 if (free_fm) {
4840 g_free(fm);
4842 return ret;
4844 #endif
4846 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4847 int fd, int cmd, abi_long arg)
4849 const argtype *arg_type = ie->arg_type;
4850 int target_size;
4851 void *argptr;
4852 int ret;
4853 struct ifconf *host_ifconf;
4854 uint32_t outbufsz;
4855 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4856 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4857 int target_ifreq_size;
4858 int nb_ifreq;
4859 int free_buf = 0;
4860 int i;
4861 int target_ifc_len;
4862 abi_long target_ifc_buf;
4863 int host_ifc_len;
4864 char *host_ifc_buf;
4866 assert(arg_type[0] == TYPE_PTR);
4867 assert(ie->access == IOC_RW);
4869 arg_type++;
4870 target_size = thunk_type_size(arg_type, 0);
4872 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4873 if (!argptr)
4874 return -TARGET_EFAULT;
4875 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4876 unlock_user(argptr, arg, 0);
4878 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4879 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4880 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4882 if (target_ifc_buf != 0) {
4883 target_ifc_len = host_ifconf->ifc_len;
4884 nb_ifreq = target_ifc_len / target_ifreq_size;
4885 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4887 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4888 if (outbufsz > MAX_STRUCT_SIZE) {
4890 * We can't fit all the extents into the fixed size buffer.
4891 * Allocate one that is large enough and use it instead.
4893 host_ifconf = g_try_malloc(outbufsz);
4894 if (!host_ifconf) {
4895 return -TARGET_ENOMEM;
4897 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4898 free_buf = 1;
4900 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4902 host_ifconf->ifc_len = host_ifc_len;
4903 } else {
4904 host_ifc_buf = NULL;
4906 host_ifconf->ifc_buf = host_ifc_buf;
4908 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4909 if (!is_error(ret)) {
4910 /* convert host ifc_len to target ifc_len */
4912 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4913 target_ifc_len = nb_ifreq * target_ifreq_size;
4914 host_ifconf->ifc_len = target_ifc_len;
4916 /* restore target ifc_buf */
4918 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4920 /* copy struct ifconf to target user */
4922 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4923 if (!argptr)
4924 return -TARGET_EFAULT;
4925 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4926 unlock_user(argptr, arg, target_size);
4928 if (target_ifc_buf != 0) {
4929 /* copy ifreq[] to target user */
4930 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4931 for (i = 0; i < nb_ifreq ; i++) {
4932 thunk_convert(argptr + i * target_ifreq_size,
4933 host_ifc_buf + i * sizeof(struct ifreq),
4934 ifreq_arg_type, THUNK_TARGET);
4936 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4940 if (free_buf) {
4941 g_free(host_ifconf);
4944 return ret;
4947 #if defined(CONFIG_USBFS)
4948 #if HOST_LONG_BITS > 64
4949 #error USBDEVFS thunks do not support >64 bit hosts yet.
4950 #endif
4951 struct live_urb {
4952 uint64_t target_urb_adr;
4953 uint64_t target_buf_adr;
4954 char *target_buf_ptr;
4955 struct usbdevfs_urb host_urb;
4958 static GHashTable *usbdevfs_urb_hashtable(void)
4960 static GHashTable *urb_hashtable;
4962 if (!urb_hashtable) {
4963 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4965 return urb_hashtable;
4968 static void urb_hashtable_insert(struct live_urb *urb)
4970 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4971 g_hash_table_insert(urb_hashtable, urb, urb);
4974 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4976 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4977 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4980 static void urb_hashtable_remove(struct live_urb *urb)
4982 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4983 g_hash_table_remove(urb_hashtable, urb);
4986 static abi_long
4987 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4988 int fd, int cmd, abi_long arg)
4990 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4991 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4992 struct live_urb *lurb;
4993 void *argptr;
4994 uint64_t hurb;
4995 int target_size;
4996 uintptr_t target_urb_adr;
4997 abi_long ret;
4999 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5001 memset(buf_temp, 0, sizeof(uint64_t));
5002 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5003 if (is_error(ret)) {
5004 return ret;
5007 memcpy(&hurb, buf_temp, sizeof(uint64_t));
5008 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5009 if (!lurb->target_urb_adr) {
5010 return -TARGET_EFAULT;
5012 urb_hashtable_remove(lurb);
5013 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5014 lurb->host_urb.buffer_length);
5015 lurb->target_buf_ptr = NULL;
5017 /* restore the guest buffer pointer */
5018 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5020 /* update the guest urb struct */
5021 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5022 if (!argptr) {
5023 g_free(lurb);
5024 return -TARGET_EFAULT;
5026 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5027 unlock_user(argptr, lurb->target_urb_adr, target_size);
5029 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5030 /* write back the urb handle */
5031 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5032 if (!argptr) {
5033 g_free(lurb);
5034 return -TARGET_EFAULT;
5037 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5038 target_urb_adr = lurb->target_urb_adr;
5039 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5040 unlock_user(argptr, arg, target_size);
5042 g_free(lurb);
5043 return ret;
5046 static abi_long
5047 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5048 uint8_t *buf_temp __attribute__((unused)),
5049 int fd, int cmd, abi_long arg)
5051 struct live_urb *lurb;
5053 /* map target address back to host URB with metadata. */
5054 lurb = urb_hashtable_lookup(arg);
5055 if (!lurb) {
5056 return -TARGET_EFAULT;
5058 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5061 static abi_long
5062 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5063 int fd, int cmd, abi_long arg)
5065 const argtype *arg_type = ie->arg_type;
5066 int target_size;
5067 abi_long ret;
5068 void *argptr;
5069 int rw_dir;
5070 struct live_urb *lurb;
5073 * each submitted URB needs to map to a unique ID for the
5074 * kernel, and that unique ID needs to be a pointer to
5075 * host memory. hence, we need to malloc for each URB.
5076 * isochronous transfers have a variable length struct.
5078 arg_type++;
5079 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5081 /* construct host copy of urb and metadata */
5082 lurb = g_try_new0(struct live_urb, 1);
5083 if (!lurb) {
5084 return -TARGET_ENOMEM;
5087 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5088 if (!argptr) {
5089 g_free(lurb);
5090 return -TARGET_EFAULT;
5092 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5093 unlock_user(argptr, arg, 0);
5095 lurb->target_urb_adr = arg;
5096 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5098 /* buffer space used depends on endpoint type so lock the entire buffer */
5099 /* control type urbs should check the buffer contents for true direction */
5100 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5101 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5102 lurb->host_urb.buffer_length, 1);
5103 if (lurb->target_buf_ptr == NULL) {
5104 g_free(lurb);
5105 return -TARGET_EFAULT;
5108 /* update buffer pointer in host copy */
5109 lurb->host_urb.buffer = lurb->target_buf_ptr;
5111 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5112 if (is_error(ret)) {
5113 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5114 g_free(lurb);
5115 } else {
5116 urb_hashtable_insert(lurb);
5119 return ret;
5121 #endif /* CONFIG_USBFS */
5123 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5124 int cmd, abi_long arg)
5126 void *argptr;
5127 struct dm_ioctl *host_dm;
5128 abi_long guest_data;
5129 uint32_t guest_data_size;
5130 int target_size;
5131 const argtype *arg_type = ie->arg_type;
5132 abi_long ret;
5133 void *big_buf = NULL;
5134 char *host_data;
5136 arg_type++;
5137 target_size = thunk_type_size(arg_type, 0);
5138 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5139 if (!argptr) {
5140 ret = -TARGET_EFAULT;
5141 goto out;
5143 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5144 unlock_user(argptr, arg, 0);
5146 /* buf_temp is too small, so fetch things into a bigger buffer */
5147 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5148 memcpy(big_buf, buf_temp, target_size);
5149 buf_temp = big_buf;
5150 host_dm = big_buf;
5152 guest_data = arg + host_dm->data_start;
5153 if ((guest_data - arg) < 0) {
5154 ret = -TARGET_EINVAL;
5155 goto out;
5157 guest_data_size = host_dm->data_size - host_dm->data_start;
5158 host_data = (char*)host_dm + host_dm->data_start;
5160 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5161 if (!argptr) {
5162 ret = -TARGET_EFAULT;
5163 goto out;
5166 switch (ie->host_cmd) {
5167 case DM_REMOVE_ALL:
5168 case DM_LIST_DEVICES:
5169 case DM_DEV_CREATE:
5170 case DM_DEV_REMOVE:
5171 case DM_DEV_SUSPEND:
5172 case DM_DEV_STATUS:
5173 case DM_DEV_WAIT:
5174 case DM_TABLE_STATUS:
5175 case DM_TABLE_CLEAR:
5176 case DM_TABLE_DEPS:
5177 case DM_LIST_VERSIONS:
5178 /* no input data */
5179 break;
5180 case DM_DEV_RENAME:
5181 case DM_DEV_SET_GEOMETRY:
5182 /* data contains only strings */
5183 memcpy(host_data, argptr, guest_data_size);
5184 break;
5185 case DM_TARGET_MSG:
5186 memcpy(host_data, argptr, guest_data_size);
5187 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5188 break;
5189 case DM_TABLE_LOAD:
5191 void *gspec = argptr;
5192 void *cur_data = host_data;
5193 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5194 int spec_size = thunk_type_size(arg_type, 0);
5195 int i;
5197 for (i = 0; i < host_dm->target_count; i++) {
5198 struct dm_target_spec *spec = cur_data;
5199 uint32_t next;
5200 int slen;
5202 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5203 slen = strlen((char*)gspec + spec_size) + 1;
5204 next = spec->next;
5205 spec->next = sizeof(*spec) + slen;
5206 strcpy((char*)&spec[1], gspec + spec_size);
5207 gspec += next;
5208 cur_data += spec->next;
5210 break;
5212 default:
5213 ret = -TARGET_EINVAL;
5214 unlock_user(argptr, guest_data, 0);
5215 goto out;
5217 unlock_user(argptr, guest_data, 0);
5219 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5220 if (!is_error(ret)) {
5221 guest_data = arg + host_dm->data_start;
5222 guest_data_size = host_dm->data_size - host_dm->data_start;
5223 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5224 switch (ie->host_cmd) {
5225 case DM_REMOVE_ALL:
5226 case DM_DEV_CREATE:
5227 case DM_DEV_REMOVE:
5228 case DM_DEV_RENAME:
5229 case DM_DEV_SUSPEND:
5230 case DM_DEV_STATUS:
5231 case DM_TABLE_LOAD:
5232 case DM_TABLE_CLEAR:
5233 case DM_TARGET_MSG:
5234 case DM_DEV_SET_GEOMETRY:
5235 /* no return data */
5236 break;
5237 case DM_LIST_DEVICES:
5239 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5240 uint32_t remaining_data = guest_data_size;
5241 void *cur_data = argptr;
5242 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5243 int nl_size = 12; /* can't use thunk_size due to alignment */
5245 while (1) {
5246 uint32_t next = nl->next;
5247 if (next) {
5248 nl->next = nl_size + (strlen(nl->name) + 1);
5250 if (remaining_data < nl->next) {
5251 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5252 break;
5254 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5255 strcpy(cur_data + nl_size, nl->name);
5256 cur_data += nl->next;
5257 remaining_data -= nl->next;
5258 if (!next) {
5259 break;
5261 nl = (void*)nl + next;
5263 break;
5265 case DM_DEV_WAIT:
5266 case DM_TABLE_STATUS:
5268 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5269 void *cur_data = argptr;
5270 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5271 int spec_size = thunk_type_size(arg_type, 0);
5272 int i;
5274 for (i = 0; i < host_dm->target_count; i++) {
5275 uint32_t next = spec->next;
5276 int slen = strlen((char*)&spec[1]) + 1;
5277 spec->next = (cur_data - argptr) + spec_size + slen;
5278 if (guest_data_size < spec->next) {
5279 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5280 break;
5282 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5283 strcpy(cur_data + spec_size, (char*)&spec[1]);
5284 cur_data = argptr + spec->next;
5285 spec = (void*)host_dm + host_dm->data_start + next;
5287 break;
5289 case DM_TABLE_DEPS:
5291 void *hdata = (void*)host_dm + host_dm->data_start;
5292 int count = *(uint32_t*)hdata;
5293 uint64_t *hdev = hdata + 8;
5294 uint64_t *gdev = argptr + 8;
5295 int i;
5297 *(uint32_t*)argptr = tswap32(count);
5298 for (i = 0; i < count; i++) {
5299 *gdev = tswap64(*hdev);
5300 gdev++;
5301 hdev++;
5303 break;
5305 case DM_LIST_VERSIONS:
5307 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5308 uint32_t remaining_data = guest_data_size;
5309 void *cur_data = argptr;
5310 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5311 int vers_size = thunk_type_size(arg_type, 0);
5313 while (1) {
5314 uint32_t next = vers->next;
5315 if (next) {
5316 vers->next = vers_size + (strlen(vers->name) + 1);
5318 if (remaining_data < vers->next) {
5319 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5320 break;
5322 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5323 strcpy(cur_data + vers_size, vers->name);
5324 cur_data += vers->next;
5325 remaining_data -= vers->next;
5326 if (!next) {
5327 break;
5329 vers = (void*)vers + next;
5331 break;
5333 default:
5334 unlock_user(argptr, guest_data, 0);
5335 ret = -TARGET_EINVAL;
5336 goto out;
5338 unlock_user(argptr, guest_data, guest_data_size);
5340 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5341 if (!argptr) {
5342 ret = -TARGET_EFAULT;
5343 goto out;
5345 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5346 unlock_user(argptr, arg, target_size);
5348 out:
5349 g_free(big_buf);
5350 return ret;
5353 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5354 int cmd, abi_long arg)
5356 void *argptr;
5357 int target_size;
5358 const argtype *arg_type = ie->arg_type;
5359 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5360 abi_long ret;
5362 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5363 struct blkpg_partition host_part;
5365 /* Read and convert blkpg */
5366 arg_type++;
5367 target_size = thunk_type_size(arg_type, 0);
5368 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5369 if (!argptr) {
5370 ret = -TARGET_EFAULT;
5371 goto out;
5373 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5374 unlock_user(argptr, arg, 0);
5376 switch (host_blkpg->op) {
5377 case BLKPG_ADD_PARTITION:
5378 case BLKPG_DEL_PARTITION:
5379 /* payload is struct blkpg_partition */
5380 break;
5381 default:
5382 /* Unknown opcode */
5383 ret = -TARGET_EINVAL;
5384 goto out;
5387 /* Read and convert blkpg->data */
5388 arg = (abi_long)(uintptr_t)host_blkpg->data;
5389 target_size = thunk_type_size(part_arg_type, 0);
5390 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5391 if (!argptr) {
5392 ret = -TARGET_EFAULT;
5393 goto out;
5395 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5396 unlock_user(argptr, arg, 0);
5398 /* Swizzle the data pointer to our local copy and call! */
5399 host_blkpg->data = &host_part;
5400 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5402 out:
5403 return ret;
5406 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5407 int fd, int cmd, abi_long arg)
5409 const argtype *arg_type = ie->arg_type;
5410 const StructEntry *se;
5411 const argtype *field_types;
5412 const int *dst_offsets, *src_offsets;
5413 int target_size;
5414 void *argptr;
5415 abi_ulong *target_rt_dev_ptr = NULL;
5416 unsigned long *host_rt_dev_ptr = NULL;
5417 abi_long ret;
5418 int i;
5420 assert(ie->access == IOC_W);
5421 assert(*arg_type == TYPE_PTR);
5422 arg_type++;
5423 assert(*arg_type == TYPE_STRUCT);
5424 target_size = thunk_type_size(arg_type, 0);
5425 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5426 if (!argptr) {
5427 return -TARGET_EFAULT;
5429 arg_type++;
5430 assert(*arg_type == (int)STRUCT_rtentry);
5431 se = struct_entries + *arg_type++;
5432 assert(se->convert[0] == NULL);
5433 /* convert struct here to be able to catch rt_dev string */
5434 field_types = se->field_types;
5435 dst_offsets = se->field_offsets[THUNK_HOST];
5436 src_offsets = se->field_offsets[THUNK_TARGET];
5437 for (i = 0; i < se->nb_fields; i++) {
5438 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5439 assert(*field_types == TYPE_PTRVOID);
5440 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5441 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5442 if (*target_rt_dev_ptr != 0) {
5443 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5444 tswapal(*target_rt_dev_ptr));
5445 if (!*host_rt_dev_ptr) {
5446 unlock_user(argptr, arg, 0);
5447 return -TARGET_EFAULT;
5449 } else {
5450 *host_rt_dev_ptr = 0;
5452 field_types++;
5453 continue;
5455 field_types = thunk_convert(buf_temp + dst_offsets[i],
5456 argptr + src_offsets[i],
5457 field_types, THUNK_HOST);
5459 unlock_user(argptr, arg, 0);
5461 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5463 assert(host_rt_dev_ptr != NULL);
5464 assert(target_rt_dev_ptr != NULL);
5465 if (*host_rt_dev_ptr != 0) {
5466 unlock_user((void *)*host_rt_dev_ptr,
5467 *target_rt_dev_ptr, 0);
5469 return ret;
5472 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5473 int fd, int cmd, abi_long arg)
5475 int sig = target_to_host_signal(arg);
5476 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5479 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5480 int fd, int cmd, abi_long arg)
5482 struct timeval tv;
5483 abi_long ret;
5485 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5486 if (is_error(ret)) {
5487 return ret;
5490 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5491 if (copy_to_user_timeval(arg, &tv)) {
5492 return -TARGET_EFAULT;
5494 } else {
5495 if (copy_to_user_timeval64(arg, &tv)) {
5496 return -TARGET_EFAULT;
5500 return ret;
5503 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5504 int fd, int cmd, abi_long arg)
5506 struct timespec ts;
5507 abi_long ret;
5509 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5510 if (is_error(ret)) {
5511 return ret;
5514 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5515 if (host_to_target_timespec(arg, &ts)) {
5516 return -TARGET_EFAULT;
5518 } else{
5519 if (host_to_target_timespec64(arg, &ts)) {
5520 return -TARGET_EFAULT;
5524 return ret;
5527 #ifdef TIOCGPTPEER
5528 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5529 int fd, int cmd, abi_long arg)
5531 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5532 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5534 #endif
5536 #ifdef HAVE_DRM_H
5538 static void unlock_drm_version(struct drm_version *host_ver,
5539 struct target_drm_version *target_ver,
5540 bool copy)
5542 unlock_user(host_ver->name, target_ver->name,
5543 copy ? host_ver->name_len : 0);
5544 unlock_user(host_ver->date, target_ver->date,
5545 copy ? host_ver->date_len : 0);
5546 unlock_user(host_ver->desc, target_ver->desc,
5547 copy ? host_ver->desc_len : 0);
5550 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5551 struct target_drm_version *target_ver)
5553 memset(host_ver, 0, sizeof(*host_ver));
5555 __get_user(host_ver->name_len, &target_ver->name_len);
5556 if (host_ver->name_len) {
5557 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5558 target_ver->name_len, 0);
5559 if (!host_ver->name) {
5560 return -EFAULT;
5564 __get_user(host_ver->date_len, &target_ver->date_len);
5565 if (host_ver->date_len) {
5566 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5567 target_ver->date_len, 0);
5568 if (!host_ver->date) {
5569 goto err;
5573 __get_user(host_ver->desc_len, &target_ver->desc_len);
5574 if (host_ver->desc_len) {
5575 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5576 target_ver->desc_len, 0);
5577 if (!host_ver->desc) {
5578 goto err;
5582 return 0;
5583 err:
5584 unlock_drm_version(host_ver, target_ver, false);
5585 return -EFAULT;
5588 static inline void host_to_target_drmversion(
5589 struct target_drm_version *target_ver,
5590 struct drm_version *host_ver)
5592 __put_user(host_ver->version_major, &target_ver->version_major);
5593 __put_user(host_ver->version_minor, &target_ver->version_minor);
5594 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5595 __put_user(host_ver->name_len, &target_ver->name_len);
5596 __put_user(host_ver->date_len, &target_ver->date_len);
5597 __put_user(host_ver->desc_len, &target_ver->desc_len);
5598 unlock_drm_version(host_ver, target_ver, true);
5601 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5602 int fd, int cmd, abi_long arg)
5604 struct drm_version *ver;
5605 struct target_drm_version *target_ver;
5606 abi_long ret;
5608 switch (ie->host_cmd) {
5609 case DRM_IOCTL_VERSION:
5610 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5611 return -TARGET_EFAULT;
5613 ver = (struct drm_version *)buf_temp;
5614 ret = target_to_host_drmversion(ver, target_ver);
5615 if (!is_error(ret)) {
5616 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5617 if (is_error(ret)) {
5618 unlock_drm_version(ver, target_ver, false);
5619 } else {
5620 host_to_target_drmversion(target_ver, ver);
5623 unlock_user_struct(target_ver, arg, 0);
5624 return ret;
5626 return -TARGET_ENOSYS;
5629 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5630 struct drm_i915_getparam *gparam,
5631 int fd, abi_long arg)
5633 abi_long ret;
5634 int value;
5635 struct target_drm_i915_getparam *target_gparam;
5637 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5638 return -TARGET_EFAULT;
5641 __get_user(gparam->param, &target_gparam->param);
5642 gparam->value = &value;
5643 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5644 put_user_s32(value, target_gparam->value);
5646 unlock_user_struct(target_gparam, arg, 0);
5647 return ret;
5650 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5651 int fd, int cmd, abi_long arg)
5653 switch (ie->host_cmd) {
5654 case DRM_IOCTL_I915_GETPARAM:
5655 return do_ioctl_drm_i915_getparam(ie,
5656 (struct drm_i915_getparam *)buf_temp,
5657 fd, arg);
5658 default:
5659 return -TARGET_ENOSYS;
5663 #endif
5665 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5666 int fd, int cmd, abi_long arg)
5668 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5669 struct tun_filter *target_filter;
5670 char *target_addr;
5672 assert(ie->access == IOC_W);
5674 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5675 if (!target_filter) {
5676 return -TARGET_EFAULT;
5678 filter->flags = tswap16(target_filter->flags);
5679 filter->count = tswap16(target_filter->count);
5680 unlock_user(target_filter, arg, 0);
5682 if (filter->count) {
5683 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5684 MAX_STRUCT_SIZE) {
5685 return -TARGET_EFAULT;
5688 target_addr = lock_user(VERIFY_READ,
5689 arg + offsetof(struct tun_filter, addr),
5690 filter->count * ETH_ALEN, 1);
5691 if (!target_addr) {
5692 return -TARGET_EFAULT;
5694 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5695 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5698 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5701 IOCTLEntry ioctl_entries[] = {
5702 #define IOCTL(cmd, access, ...) \
5703 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5704 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5705 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5706 #define IOCTL_IGNORE(cmd) \
5707 { TARGET_ ## cmd, 0, #cmd },
5708 #include "ioctls.h"
5709 { 0, 0, },
5712 /* ??? Implement proper locking for ioctls. */
5713 /* do_ioctl() Must return target values and target errnos. */
5714 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5716 const IOCTLEntry *ie;
5717 const argtype *arg_type;
5718 abi_long ret;
5719 uint8_t buf_temp[MAX_STRUCT_SIZE];
5720 int target_size;
5721 void *argptr;
5723 ie = ioctl_entries;
5724 for(;;) {
5725 if (ie->target_cmd == 0) {
5726 qemu_log_mask(
5727 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5728 return -TARGET_ENOSYS;
5730 if (ie->target_cmd == cmd)
5731 break;
5732 ie++;
5734 arg_type = ie->arg_type;
5735 if (ie->do_ioctl) {
5736 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5737 } else if (!ie->host_cmd) {
5738 /* Some architectures define BSD ioctls in their headers
5739 that are not implemented in Linux. */
5740 return -TARGET_ENOSYS;
5743 switch(arg_type[0]) {
5744 case TYPE_NULL:
5745 /* no argument */
5746 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5747 break;
5748 case TYPE_PTRVOID:
5749 case TYPE_INT:
5750 case TYPE_LONG:
5751 case TYPE_ULONG:
5752 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5753 break;
5754 case TYPE_PTR:
5755 arg_type++;
5756 target_size = thunk_type_size(arg_type, 0);
5757 switch(ie->access) {
5758 case IOC_R:
5759 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5760 if (!is_error(ret)) {
5761 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5762 if (!argptr)
5763 return -TARGET_EFAULT;
5764 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5765 unlock_user(argptr, arg, target_size);
5767 break;
5768 case IOC_W:
5769 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5770 if (!argptr)
5771 return -TARGET_EFAULT;
5772 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5773 unlock_user(argptr, arg, 0);
5774 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5775 break;
5776 default:
5777 case IOC_RW:
5778 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5779 if (!argptr)
5780 return -TARGET_EFAULT;
5781 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5782 unlock_user(argptr, arg, 0);
5783 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5784 if (!is_error(ret)) {
5785 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5786 if (!argptr)
5787 return -TARGET_EFAULT;
5788 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5789 unlock_user(argptr, arg, target_size);
5791 break;
5793 break;
5794 default:
5795 qemu_log_mask(LOG_UNIMP,
5796 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5797 (long)cmd, arg_type[0]);
5798 ret = -TARGET_ENOSYS;
5799 break;
5801 return ret;
5804 static const bitmask_transtbl iflag_tbl[] = {
5805 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5806 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5807 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5808 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5809 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5810 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5811 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5812 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5813 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5814 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5815 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5816 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5817 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5818 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5819 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5820 { 0, 0, 0, 0 }
5823 static const bitmask_transtbl oflag_tbl[] = {
5824 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5825 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5826 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5827 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5828 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5829 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5830 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5831 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5832 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5833 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5834 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5835 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5836 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5837 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5838 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5839 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5840 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5841 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5842 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5843 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5844 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5845 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5846 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5847 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5848 { 0, 0, 0, 0 }
5851 static const bitmask_transtbl cflag_tbl[] = {
5852 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5853 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5854 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5855 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5856 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5857 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5858 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5859 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5860 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5861 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5862 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5863 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5864 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5865 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5866 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5867 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5868 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5869 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5870 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5871 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5872 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5873 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5874 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5875 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5876 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5877 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5878 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5879 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5880 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5881 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5882 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5883 { 0, 0, 0, 0 }
5886 static const bitmask_transtbl lflag_tbl[] = {
5887 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5888 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5889 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5890 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5891 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5892 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5893 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5894 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5895 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5896 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5897 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5898 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5899 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5900 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5901 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5902 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5903 { 0, 0, 0, 0 }
5906 static void target_to_host_termios (void *dst, const void *src)
5908 struct host_termios *host = dst;
5909 const struct target_termios *target = src;
5911 host->c_iflag =
5912 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5913 host->c_oflag =
5914 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5915 host->c_cflag =
5916 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5917 host->c_lflag =
5918 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5919 host->c_line = target->c_line;
5921 memset(host->c_cc, 0, sizeof(host->c_cc));
5922 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5923 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5924 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5925 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5926 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5927 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5928 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5929 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5930 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5931 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5932 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5933 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5934 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5935 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5936 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5937 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5938 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5941 static void host_to_target_termios (void *dst, const void *src)
5943 struct target_termios *target = dst;
5944 const struct host_termios *host = src;
5946 target->c_iflag =
5947 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5948 target->c_oflag =
5949 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5950 target->c_cflag =
5951 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5952 target->c_lflag =
5953 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5954 target->c_line = host->c_line;
5956 memset(target->c_cc, 0, sizeof(target->c_cc));
5957 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5958 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5959 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5960 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5961 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5962 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5963 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5964 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5965 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5966 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5967 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5968 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5969 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5970 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5971 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5972 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5973 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5976 static const StructEntry struct_termios_def = {
5977 .convert = { host_to_target_termios, target_to_host_termios },
5978 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5979 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5980 .print = print_termios,
5983 static const bitmask_transtbl mmap_flags_tbl[] = {
5984 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5985 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5986 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5987 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5988 MAP_ANONYMOUS, MAP_ANONYMOUS },
5989 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5990 MAP_GROWSDOWN, MAP_GROWSDOWN },
5991 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5992 MAP_DENYWRITE, MAP_DENYWRITE },
5993 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5994 MAP_EXECUTABLE, MAP_EXECUTABLE },
5995 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5996 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5997 MAP_NORESERVE, MAP_NORESERVE },
5998 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5999 /* MAP_STACK had been ignored by the kernel for quite some time.
6000 Recognize it for the target insofar as we do not want to pass
6001 it through to the host. */
6002 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6003 { 0, 0, 0, 0 }
6007 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6008 * TARGET_I386 is defined if TARGET_X86_64 is defined
6010 #if defined(TARGET_I386)
6012 /* NOTE: there is really one LDT for all the threads */
6013 static uint8_t *ldt_table;
6015 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6017 int size;
6018 void *p;
6020 if (!ldt_table)
6021 return 0;
6022 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6023 if (size > bytecount)
6024 size = bytecount;
6025 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6026 if (!p)
6027 return -TARGET_EFAULT;
6028 /* ??? Should this by byteswapped? */
6029 memcpy(p, ldt_table, size);
6030 unlock_user(p, ptr, size);
6031 return size;
6034 /* XXX: add locking support */
6035 static abi_long write_ldt(CPUX86State *env,
6036 abi_ulong ptr, unsigned long bytecount, int oldmode)
6038 struct target_modify_ldt_ldt_s ldt_info;
6039 struct target_modify_ldt_ldt_s *target_ldt_info;
6040 int seg_32bit, contents, read_exec_only, limit_in_pages;
6041 int seg_not_present, useable, lm;
6042 uint32_t *lp, entry_1, entry_2;
6044 if (bytecount != sizeof(ldt_info))
6045 return -TARGET_EINVAL;
6046 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6047 return -TARGET_EFAULT;
6048 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6049 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6050 ldt_info.limit = tswap32(target_ldt_info->limit);
6051 ldt_info.flags = tswap32(target_ldt_info->flags);
6052 unlock_user_struct(target_ldt_info, ptr, 0);
6054 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6055 return -TARGET_EINVAL;
6056 seg_32bit = ldt_info.flags & 1;
6057 contents = (ldt_info.flags >> 1) & 3;
6058 read_exec_only = (ldt_info.flags >> 3) & 1;
6059 limit_in_pages = (ldt_info.flags >> 4) & 1;
6060 seg_not_present = (ldt_info.flags >> 5) & 1;
6061 useable = (ldt_info.flags >> 6) & 1;
6062 #ifdef TARGET_ABI32
6063 lm = 0;
6064 #else
6065 lm = (ldt_info.flags >> 7) & 1;
6066 #endif
6067 if (contents == 3) {
6068 if (oldmode)
6069 return -TARGET_EINVAL;
6070 if (seg_not_present == 0)
6071 return -TARGET_EINVAL;
6073 /* allocate the LDT */
6074 if (!ldt_table) {
6075 env->ldt.base = target_mmap(0,
6076 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6077 PROT_READ|PROT_WRITE,
6078 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6079 if (env->ldt.base == -1)
6080 return -TARGET_ENOMEM;
6081 memset(g2h_untagged(env->ldt.base), 0,
6082 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6083 env->ldt.limit = 0xffff;
6084 ldt_table = g2h_untagged(env->ldt.base);
6087 /* NOTE: same code as Linux kernel */
6088 /* Allow LDTs to be cleared by the user. */
6089 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6090 if (oldmode ||
6091 (contents == 0 &&
6092 read_exec_only == 1 &&
6093 seg_32bit == 0 &&
6094 limit_in_pages == 0 &&
6095 seg_not_present == 1 &&
6096 useable == 0 )) {
6097 entry_1 = 0;
6098 entry_2 = 0;
6099 goto install;
6103 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6104 (ldt_info.limit & 0x0ffff);
6105 entry_2 = (ldt_info.base_addr & 0xff000000) |
6106 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6107 (ldt_info.limit & 0xf0000) |
6108 ((read_exec_only ^ 1) << 9) |
6109 (contents << 10) |
6110 ((seg_not_present ^ 1) << 15) |
6111 (seg_32bit << 22) |
6112 (limit_in_pages << 23) |
6113 (lm << 21) |
6114 0x7000;
6115 if (!oldmode)
6116 entry_2 |= (useable << 20);
6118 /* Install the new entry ... */
6119 install:
6120 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6121 lp[0] = tswap32(entry_1);
6122 lp[1] = tswap32(entry_2);
6123 return 0;
6126 /* specific and weird i386 syscalls */
6127 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6128 unsigned long bytecount)
6130 abi_long ret;
6132 switch (func) {
6133 case 0:
6134 ret = read_ldt(ptr, bytecount);
6135 break;
6136 case 1:
6137 ret = write_ldt(env, ptr, bytecount, 1);
6138 break;
6139 case 0x11:
6140 ret = write_ldt(env, ptr, bytecount, 0);
6141 break;
6142 default:
6143 ret = -TARGET_ENOSYS;
6144 break;
6146 return ret;
6149 #if defined(TARGET_ABI32)
6150 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6152 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6153 struct target_modify_ldt_ldt_s ldt_info;
6154 struct target_modify_ldt_ldt_s *target_ldt_info;
6155 int seg_32bit, contents, read_exec_only, limit_in_pages;
6156 int seg_not_present, useable, lm;
6157 uint32_t *lp, entry_1, entry_2;
6158 int i;
6160 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6161 if (!target_ldt_info)
6162 return -TARGET_EFAULT;
6163 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6164 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6165 ldt_info.limit = tswap32(target_ldt_info->limit);
6166 ldt_info.flags = tswap32(target_ldt_info->flags);
6167 if (ldt_info.entry_number == -1) {
6168 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6169 if (gdt_table[i] == 0) {
6170 ldt_info.entry_number = i;
6171 target_ldt_info->entry_number = tswap32(i);
6172 break;
6176 unlock_user_struct(target_ldt_info, ptr, 1);
6178 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6179 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6180 return -TARGET_EINVAL;
6181 seg_32bit = ldt_info.flags & 1;
6182 contents = (ldt_info.flags >> 1) & 3;
6183 read_exec_only = (ldt_info.flags >> 3) & 1;
6184 limit_in_pages = (ldt_info.flags >> 4) & 1;
6185 seg_not_present = (ldt_info.flags >> 5) & 1;
6186 useable = (ldt_info.flags >> 6) & 1;
6187 #ifdef TARGET_ABI32
6188 lm = 0;
6189 #else
6190 lm = (ldt_info.flags >> 7) & 1;
6191 #endif
6193 if (contents == 3) {
6194 if (seg_not_present == 0)
6195 return -TARGET_EINVAL;
6198 /* NOTE: same code as Linux kernel */
6199 /* Allow LDTs to be cleared by the user. */
6200 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6201 if ((contents == 0 &&
6202 read_exec_only == 1 &&
6203 seg_32bit == 0 &&
6204 limit_in_pages == 0 &&
6205 seg_not_present == 1 &&
6206 useable == 0 )) {
6207 entry_1 = 0;
6208 entry_2 = 0;
6209 goto install;
6213 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6214 (ldt_info.limit & 0x0ffff);
6215 entry_2 = (ldt_info.base_addr & 0xff000000) |
6216 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6217 (ldt_info.limit & 0xf0000) |
6218 ((read_exec_only ^ 1) << 9) |
6219 (contents << 10) |
6220 ((seg_not_present ^ 1) << 15) |
6221 (seg_32bit << 22) |
6222 (limit_in_pages << 23) |
6223 (useable << 20) |
6224 (lm << 21) |
6225 0x7000;
6227 /* Install the new entry ... */
6228 install:
6229 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6230 lp[0] = tswap32(entry_1);
6231 lp[1] = tswap32(entry_2);
6232 return 0;
6235 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6237 struct target_modify_ldt_ldt_s *target_ldt_info;
6238 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6239 uint32_t base_addr, limit, flags;
6240 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6241 int seg_not_present, useable, lm;
6242 uint32_t *lp, entry_1, entry_2;
6244 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6245 if (!target_ldt_info)
6246 return -TARGET_EFAULT;
6247 idx = tswap32(target_ldt_info->entry_number);
6248 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6249 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6250 unlock_user_struct(target_ldt_info, ptr, 1);
6251 return -TARGET_EINVAL;
6253 lp = (uint32_t *)(gdt_table + idx);
6254 entry_1 = tswap32(lp[0]);
6255 entry_2 = tswap32(lp[1]);
6257 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6258 contents = (entry_2 >> 10) & 3;
6259 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6260 seg_32bit = (entry_2 >> 22) & 1;
6261 limit_in_pages = (entry_2 >> 23) & 1;
6262 useable = (entry_2 >> 20) & 1;
6263 #ifdef TARGET_ABI32
6264 lm = 0;
6265 #else
6266 lm = (entry_2 >> 21) & 1;
6267 #endif
6268 flags = (seg_32bit << 0) | (contents << 1) |
6269 (read_exec_only << 3) | (limit_in_pages << 4) |
6270 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6271 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6272 base_addr = (entry_1 >> 16) |
6273 (entry_2 & 0xff000000) |
6274 ((entry_2 & 0xff) << 16);
6275 target_ldt_info->base_addr = tswapal(base_addr);
6276 target_ldt_info->limit = tswap32(limit);
6277 target_ldt_info->flags = tswap32(flags);
6278 unlock_user_struct(target_ldt_info, ptr, 1);
6279 return 0;
6282 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6284 return -TARGET_ENOSYS;
6286 #else
6287 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6289 abi_long ret = 0;
6290 abi_ulong val;
6291 int idx;
6293 switch(code) {
6294 case TARGET_ARCH_SET_GS:
6295 case TARGET_ARCH_SET_FS:
6296 if (code == TARGET_ARCH_SET_GS)
6297 idx = R_GS;
6298 else
6299 idx = R_FS;
6300 cpu_x86_load_seg(env, idx, 0);
6301 env->segs[idx].base = addr;
6302 break;
6303 case TARGET_ARCH_GET_GS:
6304 case TARGET_ARCH_GET_FS:
6305 if (code == TARGET_ARCH_GET_GS)
6306 idx = R_GS;
6307 else
6308 idx = R_FS;
6309 val = env->segs[idx].base;
6310 if (put_user(val, addr, abi_ulong))
6311 ret = -TARGET_EFAULT;
6312 break;
6313 default:
6314 ret = -TARGET_EINVAL;
6315 break;
6317 return ret;
6319 #endif /* defined(TARGET_ABI32 */
6320 #endif /* defined(TARGET_I386) */
6323 * These constants are generic. Supply any that are missing from the host.
6325 #ifndef PR_SET_NAME
6326 # define PR_SET_NAME 15
6327 # define PR_GET_NAME 16
6328 #endif
6329 #ifndef PR_SET_FP_MODE
6330 # define PR_SET_FP_MODE 45
6331 # define PR_GET_FP_MODE 46
6332 # define PR_FP_MODE_FR (1 << 0)
6333 # define PR_FP_MODE_FRE (1 << 1)
6334 #endif
6335 #ifndef PR_SVE_SET_VL
6336 # define PR_SVE_SET_VL 50
6337 # define PR_SVE_GET_VL 51
6338 # define PR_SVE_VL_LEN_MASK 0xffff
6339 # define PR_SVE_VL_INHERIT (1 << 17)
6340 #endif
6341 #ifndef PR_PAC_RESET_KEYS
6342 # define PR_PAC_RESET_KEYS 54
6343 # define PR_PAC_APIAKEY (1 << 0)
6344 # define PR_PAC_APIBKEY (1 << 1)
6345 # define PR_PAC_APDAKEY (1 << 2)
6346 # define PR_PAC_APDBKEY (1 << 3)
6347 # define PR_PAC_APGAKEY (1 << 4)
6348 #endif
6349 #ifndef PR_SET_TAGGED_ADDR_CTRL
6350 # define PR_SET_TAGGED_ADDR_CTRL 55
6351 # define PR_GET_TAGGED_ADDR_CTRL 56
6352 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6353 #endif
6354 #ifndef PR_MTE_TCF_SHIFT
6355 # define PR_MTE_TCF_SHIFT 1
6356 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6357 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6358 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6359 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6360 # define PR_MTE_TAG_SHIFT 3
6361 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6362 #endif
6363 #ifndef PR_SET_IO_FLUSHER
6364 # define PR_SET_IO_FLUSHER 57
6365 # define PR_GET_IO_FLUSHER 58
6366 #endif
6367 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6368 # define PR_SET_SYSCALL_USER_DISPATCH 59
6369 #endif
6370 #ifndef PR_SME_SET_VL
6371 # define PR_SME_SET_VL 63
6372 # define PR_SME_GET_VL 64
6373 # define PR_SME_VL_LEN_MASK 0xffff
6374 # define PR_SME_VL_INHERIT (1 << 17)
6375 #endif
6377 #include "target_prctl.h"
6379 static abi_long do_prctl_inval0(CPUArchState *env)
6381 return -TARGET_EINVAL;
6384 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6386 return -TARGET_EINVAL;
6389 #ifndef do_prctl_get_fp_mode
6390 #define do_prctl_get_fp_mode do_prctl_inval0
6391 #endif
6392 #ifndef do_prctl_set_fp_mode
6393 #define do_prctl_set_fp_mode do_prctl_inval1
6394 #endif
6395 #ifndef do_prctl_sve_get_vl
6396 #define do_prctl_sve_get_vl do_prctl_inval0
6397 #endif
6398 #ifndef do_prctl_sve_set_vl
6399 #define do_prctl_sve_set_vl do_prctl_inval1
6400 #endif
6401 #ifndef do_prctl_reset_keys
6402 #define do_prctl_reset_keys do_prctl_inval1
6403 #endif
6404 #ifndef do_prctl_set_tagged_addr_ctrl
6405 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6406 #endif
6407 #ifndef do_prctl_get_tagged_addr_ctrl
6408 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6409 #endif
6410 #ifndef do_prctl_get_unalign
6411 #define do_prctl_get_unalign do_prctl_inval1
6412 #endif
6413 #ifndef do_prctl_set_unalign
6414 #define do_prctl_set_unalign do_prctl_inval1
6415 #endif
6416 #ifndef do_prctl_sme_get_vl
6417 #define do_prctl_sme_get_vl do_prctl_inval0
6418 #endif
6419 #ifndef do_prctl_sme_set_vl
6420 #define do_prctl_sme_set_vl do_prctl_inval1
6421 #endif
6423 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6424 abi_long arg3, abi_long arg4, abi_long arg5)
6426 abi_long ret;
6428 switch (option) {
6429 case PR_GET_PDEATHSIG:
6431 int deathsig;
6432 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6433 arg3, arg4, arg5));
6434 if (!is_error(ret) &&
6435 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6436 return -TARGET_EFAULT;
6438 return ret;
6440 case PR_SET_PDEATHSIG:
6441 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6442 arg3, arg4, arg5));
6443 case PR_GET_NAME:
6445 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6446 if (!name) {
6447 return -TARGET_EFAULT;
6449 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6450 arg3, arg4, arg5));
6451 unlock_user(name, arg2, 16);
6452 return ret;
6454 case PR_SET_NAME:
6456 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6457 if (!name) {
6458 return -TARGET_EFAULT;
6460 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6461 arg3, arg4, arg5));
6462 unlock_user(name, arg2, 0);
6463 return ret;
6465 case PR_GET_FP_MODE:
6466 return do_prctl_get_fp_mode(env);
6467 case PR_SET_FP_MODE:
6468 return do_prctl_set_fp_mode(env, arg2);
6469 case PR_SVE_GET_VL:
6470 return do_prctl_sve_get_vl(env);
6471 case PR_SVE_SET_VL:
6472 return do_prctl_sve_set_vl(env, arg2);
6473 case PR_SME_GET_VL:
6474 return do_prctl_sme_get_vl(env);
6475 case PR_SME_SET_VL:
6476 return do_prctl_sme_set_vl(env, arg2);
6477 case PR_PAC_RESET_KEYS:
6478 if (arg3 || arg4 || arg5) {
6479 return -TARGET_EINVAL;
6481 return do_prctl_reset_keys(env, arg2);
6482 case PR_SET_TAGGED_ADDR_CTRL:
6483 if (arg3 || arg4 || arg5) {
6484 return -TARGET_EINVAL;
6486 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6487 case PR_GET_TAGGED_ADDR_CTRL:
6488 if (arg2 || arg3 || arg4 || arg5) {
6489 return -TARGET_EINVAL;
6491 return do_prctl_get_tagged_addr_ctrl(env);
6493 case PR_GET_UNALIGN:
6494 return do_prctl_get_unalign(env, arg2);
6495 case PR_SET_UNALIGN:
6496 return do_prctl_set_unalign(env, arg2);
6498 case PR_CAP_AMBIENT:
6499 case PR_CAPBSET_READ:
6500 case PR_CAPBSET_DROP:
6501 case PR_GET_DUMPABLE:
6502 case PR_SET_DUMPABLE:
6503 case PR_GET_KEEPCAPS:
6504 case PR_SET_KEEPCAPS:
6505 case PR_GET_SECUREBITS:
6506 case PR_SET_SECUREBITS:
6507 case PR_GET_TIMING:
6508 case PR_SET_TIMING:
6509 case PR_GET_TIMERSLACK:
6510 case PR_SET_TIMERSLACK:
6511 case PR_MCE_KILL:
6512 case PR_MCE_KILL_GET:
6513 case PR_GET_NO_NEW_PRIVS:
6514 case PR_SET_NO_NEW_PRIVS:
6515 case PR_GET_IO_FLUSHER:
6516 case PR_SET_IO_FLUSHER:
6517 /* Some prctl options have no pointer arguments and we can pass on. */
6518 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6520 case PR_GET_CHILD_SUBREAPER:
6521 case PR_SET_CHILD_SUBREAPER:
6522 case PR_GET_SPECULATION_CTRL:
6523 case PR_SET_SPECULATION_CTRL:
6524 case PR_GET_TID_ADDRESS:
6525 /* TODO */
6526 return -TARGET_EINVAL;
6528 case PR_GET_FPEXC:
6529 case PR_SET_FPEXC:
6530 /* Was used for SPE on PowerPC. */
6531 return -TARGET_EINVAL;
6533 case PR_GET_ENDIAN:
6534 case PR_SET_ENDIAN:
6535 case PR_GET_FPEMU:
6536 case PR_SET_FPEMU:
6537 case PR_SET_MM:
6538 case PR_GET_SECCOMP:
6539 case PR_SET_SECCOMP:
6540 case PR_SET_SYSCALL_USER_DISPATCH:
6541 case PR_GET_THP_DISABLE:
6542 case PR_SET_THP_DISABLE:
6543 case PR_GET_TSC:
6544 case PR_SET_TSC:
6545 /* Disable to prevent the target disabling stuff we need. */
6546 return -TARGET_EINVAL;
6548 default:
6549 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6550 option);
6551 return -TARGET_EINVAL;
6555 #define NEW_STACK_SIZE 0x40000
6558 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6559 typedef struct {
6560 CPUArchState *env;
6561 pthread_mutex_t mutex;
6562 pthread_cond_t cond;
6563 pthread_t thread;
6564 uint32_t tid;
6565 abi_ulong child_tidptr;
6566 abi_ulong parent_tidptr;
6567 sigset_t sigmask;
6568 } new_thread_info;
6570 static void *clone_func(void *arg)
6572 new_thread_info *info = arg;
6573 CPUArchState *env;
6574 CPUState *cpu;
6575 TaskState *ts;
6577 rcu_register_thread();
6578 tcg_register_thread();
6579 env = info->env;
6580 cpu = env_cpu(env);
6581 thread_cpu = cpu;
6582 ts = (TaskState *)cpu->opaque;
6583 info->tid = sys_gettid();
6584 task_settid(ts);
6585 if (info->child_tidptr)
6586 put_user_u32(info->tid, info->child_tidptr);
6587 if (info->parent_tidptr)
6588 put_user_u32(info->tid, info->parent_tidptr);
6589 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6590 /* Enable signals. */
6591 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6592 /* Signal to the parent that we're ready. */
6593 pthread_mutex_lock(&info->mutex);
6594 pthread_cond_broadcast(&info->cond);
6595 pthread_mutex_unlock(&info->mutex);
6596 /* Wait until the parent has finished initializing the tls state. */
6597 pthread_mutex_lock(&clone_lock);
6598 pthread_mutex_unlock(&clone_lock);
6599 cpu_loop(env);
6600 /* never exits */
6601 return NULL;
6604 /* do_fork() Must return host values and target errnos (unlike most
6605 do_*() functions). */
6606 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6607 abi_ulong parent_tidptr, target_ulong newtls,
6608 abi_ulong child_tidptr)
6610 CPUState *cpu = env_cpu(env);
6611 int ret;
6612 TaskState *ts;
6613 CPUState *new_cpu;
6614 CPUArchState *new_env;
6615 sigset_t sigmask;
6617 flags &= ~CLONE_IGNORED_FLAGS;
6619 /* Emulate vfork() with fork() */
6620 if (flags & CLONE_VFORK)
6621 flags &= ~(CLONE_VFORK | CLONE_VM);
6623 if (flags & CLONE_VM) {
6624 TaskState *parent_ts = (TaskState *)cpu->opaque;
6625 new_thread_info info;
6626 pthread_attr_t attr;
6628 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6629 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6630 return -TARGET_EINVAL;
6633 ts = g_new0(TaskState, 1);
6634 init_task_state(ts);
6636 /* Grab a mutex so that thread setup appears atomic. */
6637 pthread_mutex_lock(&clone_lock);
6640 * If this is our first additional thread, we need to ensure we
6641 * generate code for parallel execution and flush old translations.
6642 * Do this now so that the copy gets CF_PARALLEL too.
6644 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6645 cpu->tcg_cflags |= CF_PARALLEL;
6646 tb_flush(cpu);
6649 /* we create a new CPU instance. */
6650 new_env = cpu_copy(env);
6651 /* Init regs that differ from the parent. */
6652 cpu_clone_regs_child(new_env, newsp, flags);
6653 cpu_clone_regs_parent(env, flags);
6654 new_cpu = env_cpu(new_env);
6655 new_cpu->opaque = ts;
6656 ts->bprm = parent_ts->bprm;
6657 ts->info = parent_ts->info;
6658 ts->signal_mask = parent_ts->signal_mask;
6660 if (flags & CLONE_CHILD_CLEARTID) {
6661 ts->child_tidptr = child_tidptr;
6664 if (flags & CLONE_SETTLS) {
6665 cpu_set_tls (new_env, newtls);
6668 memset(&info, 0, sizeof(info));
6669 pthread_mutex_init(&info.mutex, NULL);
6670 pthread_mutex_lock(&info.mutex);
6671 pthread_cond_init(&info.cond, NULL);
6672 info.env = new_env;
6673 if (flags & CLONE_CHILD_SETTID) {
6674 info.child_tidptr = child_tidptr;
6676 if (flags & CLONE_PARENT_SETTID) {
6677 info.parent_tidptr = parent_tidptr;
6680 ret = pthread_attr_init(&attr);
6681 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6682 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6683 /* It is not safe to deliver signals until the child has finished
6684 initializing, so temporarily block all signals. */
6685 sigfillset(&sigmask);
6686 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6687 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6689 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6690 /* TODO: Free new CPU state if thread creation failed. */
6692 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6693 pthread_attr_destroy(&attr);
6694 if (ret == 0) {
6695 /* Wait for the child to initialize. */
6696 pthread_cond_wait(&info.cond, &info.mutex);
6697 ret = info.tid;
6698 } else {
6699 ret = -1;
6701 pthread_mutex_unlock(&info.mutex);
6702 pthread_cond_destroy(&info.cond);
6703 pthread_mutex_destroy(&info.mutex);
6704 pthread_mutex_unlock(&clone_lock);
6705 } else {
6706 /* if no CLONE_VM, we consider it is a fork */
6707 if (flags & CLONE_INVALID_FORK_FLAGS) {
6708 return -TARGET_EINVAL;
6711 /* We can't support custom termination signals */
6712 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6713 return -TARGET_EINVAL;
6716 if (block_signals()) {
6717 return -QEMU_ERESTARTSYS;
6720 fork_start();
6721 ret = fork();
6722 if (ret == 0) {
6723 /* Child Process. */
6724 cpu_clone_regs_child(env, newsp, flags);
6725 fork_end(1);
6726 /* There is a race condition here. The parent process could
6727 theoretically read the TID in the child process before the child
6728 tid is set. This would require using either ptrace
6729 (not implemented) or having *_tidptr to point at a shared memory
6730 mapping. We can't repeat the spinlock hack used above because
6731 the child process gets its own copy of the lock. */
6732 if (flags & CLONE_CHILD_SETTID)
6733 put_user_u32(sys_gettid(), child_tidptr);
6734 if (flags & CLONE_PARENT_SETTID)
6735 put_user_u32(sys_gettid(), parent_tidptr);
6736 ts = (TaskState *)cpu->opaque;
6737 if (flags & CLONE_SETTLS)
6738 cpu_set_tls (env, newtls);
6739 if (flags & CLONE_CHILD_CLEARTID)
6740 ts->child_tidptr = child_tidptr;
6741 } else {
6742 cpu_clone_regs_parent(env, flags);
6743 fork_end(0);
6746 return ret;
6749 /* warning : doesn't handle linux specific flags... */
6750 static int target_to_host_fcntl_cmd(int cmd)
6752 int ret;
6754 switch(cmd) {
6755 case TARGET_F_DUPFD:
6756 case TARGET_F_GETFD:
6757 case TARGET_F_SETFD:
6758 case TARGET_F_GETFL:
6759 case TARGET_F_SETFL:
6760 case TARGET_F_OFD_GETLK:
6761 case TARGET_F_OFD_SETLK:
6762 case TARGET_F_OFD_SETLKW:
6763 ret = cmd;
6764 break;
6765 case TARGET_F_GETLK:
6766 ret = F_GETLK64;
6767 break;
6768 case TARGET_F_SETLK:
6769 ret = F_SETLK64;
6770 break;
6771 case TARGET_F_SETLKW:
6772 ret = F_SETLKW64;
6773 break;
6774 case TARGET_F_GETOWN:
6775 ret = F_GETOWN;
6776 break;
6777 case TARGET_F_SETOWN:
6778 ret = F_SETOWN;
6779 break;
6780 case TARGET_F_GETSIG:
6781 ret = F_GETSIG;
6782 break;
6783 case TARGET_F_SETSIG:
6784 ret = F_SETSIG;
6785 break;
6786 #if TARGET_ABI_BITS == 32
6787 case TARGET_F_GETLK64:
6788 ret = F_GETLK64;
6789 break;
6790 case TARGET_F_SETLK64:
6791 ret = F_SETLK64;
6792 break;
6793 case TARGET_F_SETLKW64:
6794 ret = F_SETLKW64;
6795 break;
6796 #endif
6797 case TARGET_F_SETLEASE:
6798 ret = F_SETLEASE;
6799 break;
6800 case TARGET_F_GETLEASE:
6801 ret = F_GETLEASE;
6802 break;
6803 #ifdef F_DUPFD_CLOEXEC
6804 case TARGET_F_DUPFD_CLOEXEC:
6805 ret = F_DUPFD_CLOEXEC;
6806 break;
6807 #endif
6808 case TARGET_F_NOTIFY:
6809 ret = F_NOTIFY;
6810 break;
6811 #ifdef F_GETOWN_EX
6812 case TARGET_F_GETOWN_EX:
6813 ret = F_GETOWN_EX;
6814 break;
6815 #endif
6816 #ifdef F_SETOWN_EX
6817 case TARGET_F_SETOWN_EX:
6818 ret = F_SETOWN_EX;
6819 break;
6820 #endif
6821 #ifdef F_SETPIPE_SZ
6822 case TARGET_F_SETPIPE_SZ:
6823 ret = F_SETPIPE_SZ;
6824 break;
6825 case TARGET_F_GETPIPE_SZ:
6826 ret = F_GETPIPE_SZ;
6827 break;
6828 #endif
6829 #ifdef F_ADD_SEALS
6830 case TARGET_F_ADD_SEALS:
6831 ret = F_ADD_SEALS;
6832 break;
6833 case TARGET_F_GET_SEALS:
6834 ret = F_GET_SEALS;
6835 break;
6836 #endif
6837 default:
6838 ret = -TARGET_EINVAL;
6839 break;
6842 #if defined(__powerpc64__)
6843 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6844 * is not supported by kernel. The glibc fcntl call actually adjusts
6845 * them to 5, 6 and 7 before making the syscall(). Since we make the
6846 * syscall directly, adjust to what is supported by the kernel.
6848 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6849 ret -= F_GETLK64 - 5;
6851 #endif
6853 return ret;
6856 #define FLOCK_TRANSTBL \
6857 switch (type) { \
6858 TRANSTBL_CONVERT(F_RDLCK); \
6859 TRANSTBL_CONVERT(F_WRLCK); \
6860 TRANSTBL_CONVERT(F_UNLCK); \
6863 static int target_to_host_flock(int type)
6865 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6866 FLOCK_TRANSTBL
6867 #undef TRANSTBL_CONVERT
6868 return -TARGET_EINVAL;
6871 static int host_to_target_flock(int type)
6873 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6874 FLOCK_TRANSTBL
6875 #undef TRANSTBL_CONVERT
6876 /* if we don't know how to convert the value coming
6877 * from the host we copy to the target field as-is
6879 return type;
6882 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6883 abi_ulong target_flock_addr)
6885 struct target_flock *target_fl;
6886 int l_type;
6888 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6889 return -TARGET_EFAULT;
6892 __get_user(l_type, &target_fl->l_type);
6893 l_type = target_to_host_flock(l_type);
6894 if (l_type < 0) {
6895 return l_type;
6897 fl->l_type = l_type;
6898 __get_user(fl->l_whence, &target_fl->l_whence);
6899 __get_user(fl->l_start, &target_fl->l_start);
6900 __get_user(fl->l_len, &target_fl->l_len);
6901 __get_user(fl->l_pid, &target_fl->l_pid);
6902 unlock_user_struct(target_fl, target_flock_addr, 0);
6903 return 0;
6906 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6907 const struct flock64 *fl)
6909 struct target_flock *target_fl;
6910 short l_type;
6912 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6913 return -TARGET_EFAULT;
6916 l_type = host_to_target_flock(fl->l_type);
6917 __put_user(l_type, &target_fl->l_type);
6918 __put_user(fl->l_whence, &target_fl->l_whence);
6919 __put_user(fl->l_start, &target_fl->l_start);
6920 __put_user(fl->l_len, &target_fl->l_len);
6921 __put_user(fl->l_pid, &target_fl->l_pid);
6922 unlock_user_struct(target_fl, target_flock_addr, 1);
6923 return 0;
6926 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6927 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6929 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6930 struct target_oabi_flock64 {
6931 abi_short l_type;
6932 abi_short l_whence;
6933 abi_llong l_start;
6934 abi_llong l_len;
6935 abi_int l_pid;
6936 } QEMU_PACKED;
6938 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6939 abi_ulong target_flock_addr)
6941 struct target_oabi_flock64 *target_fl;
6942 int l_type;
6944 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6945 return -TARGET_EFAULT;
6948 __get_user(l_type, &target_fl->l_type);
6949 l_type = target_to_host_flock(l_type);
6950 if (l_type < 0) {
6951 return l_type;
6953 fl->l_type = l_type;
6954 __get_user(fl->l_whence, &target_fl->l_whence);
6955 __get_user(fl->l_start, &target_fl->l_start);
6956 __get_user(fl->l_len, &target_fl->l_len);
6957 __get_user(fl->l_pid, &target_fl->l_pid);
6958 unlock_user_struct(target_fl, target_flock_addr, 0);
6959 return 0;
6962 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6963 const struct flock64 *fl)
6965 struct target_oabi_flock64 *target_fl;
6966 short l_type;
6968 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6969 return -TARGET_EFAULT;
6972 l_type = host_to_target_flock(fl->l_type);
6973 __put_user(l_type, &target_fl->l_type);
6974 __put_user(fl->l_whence, &target_fl->l_whence);
6975 __put_user(fl->l_start, &target_fl->l_start);
6976 __put_user(fl->l_len, &target_fl->l_len);
6977 __put_user(fl->l_pid, &target_fl->l_pid);
6978 unlock_user_struct(target_fl, target_flock_addr, 1);
6979 return 0;
6981 #endif
6983 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6984 abi_ulong target_flock_addr)
6986 struct target_flock64 *target_fl;
6987 int l_type;
6989 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6990 return -TARGET_EFAULT;
6993 __get_user(l_type, &target_fl->l_type);
6994 l_type = target_to_host_flock(l_type);
6995 if (l_type < 0) {
6996 return l_type;
6998 fl->l_type = l_type;
6999 __get_user(fl->l_whence, &target_fl->l_whence);
7000 __get_user(fl->l_start, &target_fl->l_start);
7001 __get_user(fl->l_len, &target_fl->l_len);
7002 __get_user(fl->l_pid, &target_fl->l_pid);
7003 unlock_user_struct(target_fl, target_flock_addr, 0);
7004 return 0;
7007 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7008 const struct flock64 *fl)
7010 struct target_flock64 *target_fl;
7011 short l_type;
7013 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7014 return -TARGET_EFAULT;
7017 l_type = host_to_target_flock(fl->l_type);
7018 __put_user(l_type, &target_fl->l_type);
7019 __put_user(fl->l_whence, &target_fl->l_whence);
7020 __put_user(fl->l_start, &target_fl->l_start);
7021 __put_user(fl->l_len, &target_fl->l_len);
7022 __put_user(fl->l_pid, &target_fl->l_pid);
7023 unlock_user_struct(target_fl, target_flock_addr, 1);
7024 return 0;
7027 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7029 struct flock64 fl64;
7030 #ifdef F_GETOWN_EX
7031 struct f_owner_ex fox;
7032 struct target_f_owner_ex *target_fox;
7033 #endif
7034 abi_long ret;
7035 int host_cmd = target_to_host_fcntl_cmd(cmd);
7037 if (host_cmd == -TARGET_EINVAL)
7038 return host_cmd;
7040 switch(cmd) {
7041 case TARGET_F_GETLK:
7042 ret = copy_from_user_flock(&fl64, arg);
7043 if (ret) {
7044 return ret;
7046 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7047 if (ret == 0) {
7048 ret = copy_to_user_flock(arg, &fl64);
7050 break;
7052 case TARGET_F_SETLK:
7053 case TARGET_F_SETLKW:
7054 ret = copy_from_user_flock(&fl64, arg);
7055 if (ret) {
7056 return ret;
7058 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7059 break;
7061 case TARGET_F_GETLK64:
7062 case TARGET_F_OFD_GETLK:
7063 ret = copy_from_user_flock64(&fl64, arg);
7064 if (ret) {
7065 return ret;
7067 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7068 if (ret == 0) {
7069 ret = copy_to_user_flock64(arg, &fl64);
7071 break;
7072 case TARGET_F_SETLK64:
7073 case TARGET_F_SETLKW64:
7074 case TARGET_F_OFD_SETLK:
7075 case TARGET_F_OFD_SETLKW:
7076 ret = copy_from_user_flock64(&fl64, arg);
7077 if (ret) {
7078 return ret;
7080 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7081 break;
7083 case TARGET_F_GETFL:
7084 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7085 if (ret >= 0) {
7086 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7088 break;
7090 case TARGET_F_SETFL:
7091 ret = get_errno(safe_fcntl(fd, host_cmd,
7092 target_to_host_bitmask(arg,
7093 fcntl_flags_tbl)));
7094 break;
7096 #ifdef F_GETOWN_EX
7097 case TARGET_F_GETOWN_EX:
7098 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7099 if (ret >= 0) {
7100 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7101 return -TARGET_EFAULT;
7102 target_fox->type = tswap32(fox.type);
7103 target_fox->pid = tswap32(fox.pid);
7104 unlock_user_struct(target_fox, arg, 1);
7106 break;
7107 #endif
7109 #ifdef F_SETOWN_EX
7110 case TARGET_F_SETOWN_EX:
7111 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7112 return -TARGET_EFAULT;
7113 fox.type = tswap32(target_fox->type);
7114 fox.pid = tswap32(target_fox->pid);
7115 unlock_user_struct(target_fox, arg, 0);
7116 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7117 break;
7118 #endif
7120 case TARGET_F_SETSIG:
7121 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7122 break;
7124 case TARGET_F_GETSIG:
7125 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7126 break;
7128 case TARGET_F_SETOWN:
7129 case TARGET_F_GETOWN:
7130 case TARGET_F_SETLEASE:
7131 case TARGET_F_GETLEASE:
7132 case TARGET_F_SETPIPE_SZ:
7133 case TARGET_F_GETPIPE_SZ:
7134 case TARGET_F_ADD_SEALS:
7135 case TARGET_F_GET_SEALS:
7136 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7137 break;
7139 default:
7140 ret = get_errno(safe_fcntl(fd, cmd, arg));
7141 break;
7143 return ret;
7146 #ifdef USE_UID16
7148 static inline int high2lowuid(int uid)
7150 if (uid > 65535)
7151 return 65534;
7152 else
7153 return uid;
7156 static inline int high2lowgid(int gid)
7158 if (gid > 65535)
7159 return 65534;
7160 else
7161 return gid;
7164 static inline int low2highuid(int uid)
7166 if ((int16_t)uid == -1)
7167 return -1;
7168 else
7169 return uid;
7172 static inline int low2highgid(int gid)
7174 if ((int16_t)gid == -1)
7175 return -1;
7176 else
7177 return gid;
7179 static inline int tswapid(int id)
7181 return tswap16(id);
7184 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7186 #else /* !USE_UID16 */
7187 static inline int high2lowuid(int uid)
7189 return uid;
7191 static inline int high2lowgid(int gid)
7193 return gid;
7195 static inline int low2highuid(int uid)
7197 return uid;
7199 static inline int low2highgid(int gid)
7201 return gid;
7203 static inline int tswapid(int id)
7205 return tswap32(id);
7208 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7210 #endif /* USE_UID16 */
7212 /* We must do direct syscalls for setting UID/GID, because we want to
7213 * implement the Linux system call semantics of "change only for this thread",
7214 * not the libc/POSIX semantics of "change for all threads in process".
7215 * (See http://ewontfix.com/17/ for more details.)
7216 * We use the 32-bit version of the syscalls if present; if it is not
7217 * then either the host architecture supports 32-bit UIDs natively with
7218 * the standard syscall, or the 16-bit UID is the best we can do.
7220 #ifdef __NR_setuid32
7221 #define __NR_sys_setuid __NR_setuid32
7222 #else
7223 #define __NR_sys_setuid __NR_setuid
7224 #endif
7225 #ifdef __NR_setgid32
7226 #define __NR_sys_setgid __NR_setgid32
7227 #else
7228 #define __NR_sys_setgid __NR_setgid
7229 #endif
7230 #ifdef __NR_setresuid32
7231 #define __NR_sys_setresuid __NR_setresuid32
7232 #else
7233 #define __NR_sys_setresuid __NR_setresuid
7234 #endif
7235 #ifdef __NR_setresgid32
7236 #define __NR_sys_setresgid __NR_setresgid32
7237 #else
7238 #define __NR_sys_setresgid __NR_setresgid
7239 #endif
7241 _syscall1(int, sys_setuid, uid_t, uid)
7242 _syscall1(int, sys_setgid, gid_t, gid)
7243 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7244 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7246 void syscall_init(void)
7248 IOCTLEntry *ie;
7249 const argtype *arg_type;
7250 int size;
7252 thunk_init(STRUCT_MAX);
7254 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7255 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7256 #include "syscall_types.h"
7257 #undef STRUCT
7258 #undef STRUCT_SPECIAL
7260 /* we patch the ioctl size if necessary. We rely on the fact that
7261 no ioctl has all the bits at '1' in the size field */
7262 ie = ioctl_entries;
7263 while (ie->target_cmd != 0) {
7264 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7265 TARGET_IOC_SIZEMASK) {
7266 arg_type = ie->arg_type;
7267 if (arg_type[0] != TYPE_PTR) {
7268 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7269 ie->target_cmd);
7270 exit(1);
7272 arg_type++;
7273 size = thunk_type_size(arg_type, 0);
7274 ie->target_cmd = (ie->target_cmd &
7275 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7276 (size << TARGET_IOC_SIZESHIFT);
7279 /* automatic consistency check if same arch */
7280 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7281 (defined(__x86_64__) && defined(TARGET_X86_64))
7282 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7283 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7284 ie->name, ie->target_cmd, ie->host_cmd);
7286 #endif
7287 ie++;
7291 #ifdef TARGET_NR_truncate64
7292 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7293 abi_long arg2,
7294 abi_long arg3,
7295 abi_long arg4)
7297 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7298 arg2 = arg3;
7299 arg3 = arg4;
7301 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7303 #endif
7305 #ifdef TARGET_NR_ftruncate64
7306 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7307 abi_long arg2,
7308 abi_long arg3,
7309 abi_long arg4)
7311 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7312 arg2 = arg3;
7313 arg3 = arg4;
7315 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7317 #endif
7319 #if defined(TARGET_NR_timer_settime) || \
7320 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7321 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7322 abi_ulong target_addr)
7324 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7325 offsetof(struct target_itimerspec,
7326 it_interval)) ||
7327 target_to_host_timespec(&host_its->it_value, target_addr +
7328 offsetof(struct target_itimerspec,
7329 it_value))) {
7330 return -TARGET_EFAULT;
7333 return 0;
7335 #endif
7337 #if defined(TARGET_NR_timer_settime64) || \
7338 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7339 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7340 abi_ulong target_addr)
7342 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7343 offsetof(struct target__kernel_itimerspec,
7344 it_interval)) ||
7345 target_to_host_timespec64(&host_its->it_value, target_addr +
7346 offsetof(struct target__kernel_itimerspec,
7347 it_value))) {
7348 return -TARGET_EFAULT;
7351 return 0;
7353 #endif
7355 #if ((defined(TARGET_NR_timerfd_gettime) || \
7356 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7357 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7358 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7359 struct itimerspec *host_its)
7361 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7362 it_interval),
7363 &host_its->it_interval) ||
7364 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7365 it_value),
7366 &host_its->it_value)) {
7367 return -TARGET_EFAULT;
7369 return 0;
7371 #endif
7373 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7374 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7375 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7376 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7377 struct itimerspec *host_its)
7379 if (host_to_target_timespec64(target_addr +
7380 offsetof(struct target__kernel_itimerspec,
7381 it_interval),
7382 &host_its->it_interval) ||
7383 host_to_target_timespec64(target_addr +
7384 offsetof(struct target__kernel_itimerspec,
7385 it_value),
7386 &host_its->it_value)) {
7387 return -TARGET_EFAULT;
7389 return 0;
7391 #endif
7393 #if defined(TARGET_NR_adjtimex) || \
7394 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7395 static inline abi_long target_to_host_timex(struct timex *host_tx,
7396 abi_long target_addr)
7398 struct target_timex *target_tx;
7400 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7401 return -TARGET_EFAULT;
7404 __get_user(host_tx->modes, &target_tx->modes);
7405 __get_user(host_tx->offset, &target_tx->offset);
7406 __get_user(host_tx->freq, &target_tx->freq);
7407 __get_user(host_tx->maxerror, &target_tx->maxerror);
7408 __get_user(host_tx->esterror, &target_tx->esterror);
7409 __get_user(host_tx->status, &target_tx->status);
7410 __get_user(host_tx->constant, &target_tx->constant);
7411 __get_user(host_tx->precision, &target_tx->precision);
7412 __get_user(host_tx->tolerance, &target_tx->tolerance);
7413 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7414 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7415 __get_user(host_tx->tick, &target_tx->tick);
7416 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7417 __get_user(host_tx->jitter, &target_tx->jitter);
7418 __get_user(host_tx->shift, &target_tx->shift);
7419 __get_user(host_tx->stabil, &target_tx->stabil);
7420 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7421 __get_user(host_tx->calcnt, &target_tx->calcnt);
7422 __get_user(host_tx->errcnt, &target_tx->errcnt);
7423 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7424 __get_user(host_tx->tai, &target_tx->tai);
7426 unlock_user_struct(target_tx, target_addr, 0);
7427 return 0;
7430 static inline abi_long host_to_target_timex(abi_long target_addr,
7431 struct timex *host_tx)
7433 struct target_timex *target_tx;
7435 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7436 return -TARGET_EFAULT;
7439 __put_user(host_tx->modes, &target_tx->modes);
7440 __put_user(host_tx->offset, &target_tx->offset);
7441 __put_user(host_tx->freq, &target_tx->freq);
7442 __put_user(host_tx->maxerror, &target_tx->maxerror);
7443 __put_user(host_tx->esterror, &target_tx->esterror);
7444 __put_user(host_tx->status, &target_tx->status);
7445 __put_user(host_tx->constant, &target_tx->constant);
7446 __put_user(host_tx->precision, &target_tx->precision);
7447 __put_user(host_tx->tolerance, &target_tx->tolerance);
7448 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7449 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7450 __put_user(host_tx->tick, &target_tx->tick);
7451 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7452 __put_user(host_tx->jitter, &target_tx->jitter);
7453 __put_user(host_tx->shift, &target_tx->shift);
7454 __put_user(host_tx->stabil, &target_tx->stabil);
7455 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7456 __put_user(host_tx->calcnt, &target_tx->calcnt);
7457 __put_user(host_tx->errcnt, &target_tx->errcnt);
7458 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7459 __put_user(host_tx->tai, &target_tx->tai);
7461 unlock_user_struct(target_tx, target_addr, 1);
7462 return 0;
7464 #endif
7467 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7468 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7469 abi_long target_addr)
7471 struct target__kernel_timex *target_tx;
7473 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7474 offsetof(struct target__kernel_timex,
7475 time))) {
7476 return -TARGET_EFAULT;
7479 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7480 return -TARGET_EFAULT;
7483 __get_user(host_tx->modes, &target_tx->modes);
7484 __get_user(host_tx->offset, &target_tx->offset);
7485 __get_user(host_tx->freq, &target_tx->freq);
7486 __get_user(host_tx->maxerror, &target_tx->maxerror);
7487 __get_user(host_tx->esterror, &target_tx->esterror);
7488 __get_user(host_tx->status, &target_tx->status);
7489 __get_user(host_tx->constant, &target_tx->constant);
7490 __get_user(host_tx->precision, &target_tx->precision);
7491 __get_user(host_tx->tolerance, &target_tx->tolerance);
7492 __get_user(host_tx->tick, &target_tx->tick);
7493 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7494 __get_user(host_tx->jitter, &target_tx->jitter);
7495 __get_user(host_tx->shift, &target_tx->shift);
7496 __get_user(host_tx->stabil, &target_tx->stabil);
7497 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7498 __get_user(host_tx->calcnt, &target_tx->calcnt);
7499 __get_user(host_tx->errcnt, &target_tx->errcnt);
7500 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7501 __get_user(host_tx->tai, &target_tx->tai);
7503 unlock_user_struct(target_tx, target_addr, 0);
7504 return 0;
7507 static inline abi_long host_to_target_timex64(abi_long target_addr,
7508 struct timex *host_tx)
7510 struct target__kernel_timex *target_tx;
7512 if (copy_to_user_timeval64(target_addr +
7513 offsetof(struct target__kernel_timex, time),
7514 &host_tx->time)) {
7515 return -TARGET_EFAULT;
7518 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7519 return -TARGET_EFAULT;
7522 __put_user(host_tx->modes, &target_tx->modes);
7523 __put_user(host_tx->offset, &target_tx->offset);
7524 __put_user(host_tx->freq, &target_tx->freq);
7525 __put_user(host_tx->maxerror, &target_tx->maxerror);
7526 __put_user(host_tx->esterror, &target_tx->esterror);
7527 __put_user(host_tx->status, &target_tx->status);
7528 __put_user(host_tx->constant, &target_tx->constant);
7529 __put_user(host_tx->precision, &target_tx->precision);
7530 __put_user(host_tx->tolerance, &target_tx->tolerance);
7531 __put_user(host_tx->tick, &target_tx->tick);
7532 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7533 __put_user(host_tx->jitter, &target_tx->jitter);
7534 __put_user(host_tx->shift, &target_tx->shift);
7535 __put_user(host_tx->stabil, &target_tx->stabil);
7536 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7537 __put_user(host_tx->calcnt, &target_tx->calcnt);
7538 __put_user(host_tx->errcnt, &target_tx->errcnt);
7539 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7540 __put_user(host_tx->tai, &target_tx->tai);
7542 unlock_user_struct(target_tx, target_addr, 1);
7543 return 0;
7545 #endif
7547 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7548 #define sigev_notify_thread_id _sigev_un._tid
7549 #endif
7551 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7552 abi_ulong target_addr)
7554 struct target_sigevent *target_sevp;
7556 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7557 return -TARGET_EFAULT;
7560 /* This union is awkward on 64 bit systems because it has a 32 bit
7561 * integer and a pointer in it; we follow the conversion approach
7562 * used for handling sigval types in signal.c so the guest should get
7563 * the correct value back even if we did a 64 bit byteswap and it's
7564 * using the 32 bit integer.
7566 host_sevp->sigev_value.sival_ptr =
7567 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7568 host_sevp->sigev_signo =
7569 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7570 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7571 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7573 unlock_user_struct(target_sevp, target_addr, 1);
7574 return 0;
7577 #if defined(TARGET_NR_mlockall)
7578 static inline int target_to_host_mlockall_arg(int arg)
7580 int result = 0;
7582 if (arg & TARGET_MCL_CURRENT) {
7583 result |= MCL_CURRENT;
7585 if (arg & TARGET_MCL_FUTURE) {
7586 result |= MCL_FUTURE;
7588 #ifdef MCL_ONFAULT
7589 if (arg & TARGET_MCL_ONFAULT) {
7590 result |= MCL_ONFAULT;
7592 #endif
7594 return result;
7596 #endif
7598 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7599 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7600 defined(TARGET_NR_newfstatat))
7601 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7602 abi_ulong target_addr,
7603 struct stat *host_st)
7605 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7606 if (cpu_env->eabi) {
7607 struct target_eabi_stat64 *target_st;
7609 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7610 return -TARGET_EFAULT;
7611 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7612 __put_user(host_st->st_dev, &target_st->st_dev);
7613 __put_user(host_st->st_ino, &target_st->st_ino);
7614 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7615 __put_user(host_st->st_ino, &target_st->__st_ino);
7616 #endif
7617 __put_user(host_st->st_mode, &target_st->st_mode);
7618 __put_user(host_st->st_nlink, &target_st->st_nlink);
7619 __put_user(host_st->st_uid, &target_st->st_uid);
7620 __put_user(host_st->st_gid, &target_st->st_gid);
7621 __put_user(host_st->st_rdev, &target_st->st_rdev);
7622 __put_user(host_st->st_size, &target_st->st_size);
7623 __put_user(host_st->st_blksize, &target_st->st_blksize);
7624 __put_user(host_st->st_blocks, &target_st->st_blocks);
7625 __put_user(host_st->st_atime, &target_st->target_st_atime);
7626 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7627 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7628 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7629 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7630 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7631 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7632 #endif
7633 unlock_user_struct(target_st, target_addr, 1);
7634 } else
7635 #endif
7637 #if defined(TARGET_HAS_STRUCT_STAT64)
7638 struct target_stat64 *target_st;
7639 #else
7640 struct target_stat *target_st;
7641 #endif
7643 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7644 return -TARGET_EFAULT;
7645 memset(target_st, 0, sizeof(*target_st));
7646 __put_user(host_st->st_dev, &target_st->st_dev);
7647 __put_user(host_st->st_ino, &target_st->st_ino);
7648 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7649 __put_user(host_st->st_ino, &target_st->__st_ino);
7650 #endif
7651 __put_user(host_st->st_mode, &target_st->st_mode);
7652 __put_user(host_st->st_nlink, &target_st->st_nlink);
7653 __put_user(host_st->st_uid, &target_st->st_uid);
7654 __put_user(host_st->st_gid, &target_st->st_gid);
7655 __put_user(host_st->st_rdev, &target_st->st_rdev);
7656 /* XXX: better use of kernel struct */
7657 __put_user(host_st->st_size, &target_st->st_size);
7658 __put_user(host_st->st_blksize, &target_st->st_blksize);
7659 __put_user(host_st->st_blocks, &target_st->st_blocks);
7660 __put_user(host_st->st_atime, &target_st->target_st_atime);
7661 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7662 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7663 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7664 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7665 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7666 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7667 #endif
7668 unlock_user_struct(target_st, target_addr, 1);
7671 return 0;
7673 #endif
7675 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7676 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7677 abi_ulong target_addr)
7679 struct target_statx *target_stx;
7681 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7682 return -TARGET_EFAULT;
7684 memset(target_stx, 0, sizeof(*target_stx));
7686 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7687 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7688 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7689 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7690 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7691 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7692 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7693 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7694 __put_user(host_stx->stx_size, &target_stx->stx_size);
7695 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7696 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7697 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7698 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7699 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7700 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7701 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7702 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7703 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7704 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7705 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7706 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7707 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7708 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7710 unlock_user_struct(target_stx, target_addr, 1);
7712 return 0;
7714 #endif
7716 static int do_sys_futex(int *uaddr, int op, int val,
7717 const struct timespec *timeout, int *uaddr2,
7718 int val3)
7720 #if HOST_LONG_BITS == 64
7721 #if defined(__NR_futex)
7722 /* always a 64-bit time_t, it doesn't define _time64 version */
7723 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7725 #endif
7726 #else /* HOST_LONG_BITS == 64 */
7727 #if defined(__NR_futex_time64)
7728 if (sizeof(timeout->tv_sec) == 8) {
7729 /* _time64 function on 32bit arch */
7730 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7732 #endif
7733 #if defined(__NR_futex)
7734 /* old function on 32bit arch */
7735 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7736 #endif
7737 #endif /* HOST_LONG_BITS == 64 */
7738 g_assert_not_reached();
7741 static int do_safe_futex(int *uaddr, int op, int val,
7742 const struct timespec *timeout, int *uaddr2,
7743 int val3)
7745 #if HOST_LONG_BITS == 64
7746 #if defined(__NR_futex)
7747 /* always a 64-bit time_t, it doesn't define _time64 version */
7748 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7749 #endif
7750 #else /* HOST_LONG_BITS == 64 */
7751 #if defined(__NR_futex_time64)
7752 if (sizeof(timeout->tv_sec) == 8) {
7753 /* _time64 function on 32bit arch */
7754 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7755 val3));
7757 #endif
7758 #if defined(__NR_futex)
7759 /* old function on 32bit arch */
7760 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7761 #endif
7762 #endif /* HOST_LONG_BITS == 64 */
7763 return -TARGET_ENOSYS;
7766 /* ??? Using host futex calls even when target atomic operations
7767 are not really atomic probably breaks things. However implementing
7768 futexes locally would make futexes shared between multiple processes
7769 tricky. However they're probably useless because guest atomic
7770 operations won't work either. */
7771 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7772 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7773 int op, int val, target_ulong timeout,
7774 target_ulong uaddr2, int val3)
7776 struct timespec ts, *pts = NULL;
7777 void *haddr2 = NULL;
7778 int base_op;
7780 /* We assume FUTEX_* constants are the same on both host and target. */
7781 #ifdef FUTEX_CMD_MASK
7782 base_op = op & FUTEX_CMD_MASK;
7783 #else
7784 base_op = op;
7785 #endif
7786 switch (base_op) {
7787 case FUTEX_WAIT:
7788 case FUTEX_WAIT_BITSET:
7789 val = tswap32(val);
7790 break;
7791 case FUTEX_WAIT_REQUEUE_PI:
7792 val = tswap32(val);
7793 haddr2 = g2h(cpu, uaddr2);
7794 break;
7795 case FUTEX_LOCK_PI:
7796 case FUTEX_LOCK_PI2:
7797 break;
7798 case FUTEX_WAKE:
7799 case FUTEX_WAKE_BITSET:
7800 case FUTEX_TRYLOCK_PI:
7801 case FUTEX_UNLOCK_PI:
7802 timeout = 0;
7803 break;
7804 case FUTEX_FD:
7805 val = target_to_host_signal(val);
7806 timeout = 0;
7807 break;
7808 case FUTEX_CMP_REQUEUE:
7809 case FUTEX_CMP_REQUEUE_PI:
7810 val3 = tswap32(val3);
7811 /* fall through */
7812 case FUTEX_REQUEUE:
7813 case FUTEX_WAKE_OP:
7815 * For these, the 4th argument is not TIMEOUT, but VAL2.
7816 * But the prototype of do_safe_futex takes a pointer, so
7817 * insert casts to satisfy the compiler. We do not need
7818 * to tswap VAL2 since it's not compared to guest memory.
7820 pts = (struct timespec *)(uintptr_t)timeout;
7821 timeout = 0;
7822 haddr2 = g2h(cpu, uaddr2);
7823 break;
7824 default:
7825 return -TARGET_ENOSYS;
7827 if (timeout) {
7828 pts = &ts;
7829 if (time64
7830 ? target_to_host_timespec64(pts, timeout)
7831 : target_to_host_timespec(pts, timeout)) {
7832 return -TARGET_EFAULT;
7835 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7837 #endif
7839 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7840 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7841 abi_long handle, abi_long mount_id,
7842 abi_long flags)
7844 struct file_handle *target_fh;
7845 struct file_handle *fh;
7846 int mid = 0;
7847 abi_long ret;
7848 char *name;
7849 unsigned int size, total_size;
7851 if (get_user_s32(size, handle)) {
7852 return -TARGET_EFAULT;
7855 name = lock_user_string(pathname);
7856 if (!name) {
7857 return -TARGET_EFAULT;
7860 total_size = sizeof(struct file_handle) + size;
7861 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7862 if (!target_fh) {
7863 unlock_user(name, pathname, 0);
7864 return -TARGET_EFAULT;
7867 fh = g_malloc0(total_size);
7868 fh->handle_bytes = size;
7870 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7871 unlock_user(name, pathname, 0);
7873 /* man name_to_handle_at(2):
7874 * Other than the use of the handle_bytes field, the caller should treat
7875 * the file_handle structure as an opaque data type
7878 memcpy(target_fh, fh, total_size);
7879 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7880 target_fh->handle_type = tswap32(fh->handle_type);
7881 g_free(fh);
7882 unlock_user(target_fh, handle, total_size);
7884 if (put_user_s32(mid, mount_id)) {
7885 return -TARGET_EFAULT;
7888 return ret;
7891 #endif
7893 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7894 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7895 abi_long flags)
7897 struct file_handle *target_fh;
7898 struct file_handle *fh;
7899 unsigned int size, total_size;
7900 abi_long ret;
7902 if (get_user_s32(size, handle)) {
7903 return -TARGET_EFAULT;
7906 total_size = sizeof(struct file_handle) + size;
7907 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7908 if (!target_fh) {
7909 return -TARGET_EFAULT;
7912 fh = g_memdup(target_fh, total_size);
7913 fh->handle_bytes = size;
7914 fh->handle_type = tswap32(target_fh->handle_type);
7916 ret = get_errno(open_by_handle_at(mount_fd, fh,
7917 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7919 g_free(fh);
7921 unlock_user(target_fh, handle, total_size);
7923 return ret;
7925 #endif
7927 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7929 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7931 int host_flags;
7932 target_sigset_t *target_mask;
7933 sigset_t host_mask;
7934 abi_long ret;
7936 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7937 return -TARGET_EINVAL;
7939 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7940 return -TARGET_EFAULT;
7943 target_to_host_sigset(&host_mask, target_mask);
7945 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7947 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7948 if (ret >= 0) {
7949 fd_trans_register(ret, &target_signalfd_trans);
7952 unlock_user_struct(target_mask, mask, 0);
7954 return ret;
7956 #endif
7958 /* Map host to target signal numbers for the wait family of syscalls.
7959 Assume all other status bits are the same. */
7960 int host_to_target_waitstatus(int status)
7962 if (WIFSIGNALED(status)) {
7963 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7965 if (WIFSTOPPED(status)) {
7966 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7967 | (status & 0xff);
7969 return status;
7972 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7974 CPUState *cpu = env_cpu(cpu_env);
7975 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7976 int i;
7978 for (i = 0; i < bprm->argc; i++) {
7979 size_t len = strlen(bprm->argv[i]) + 1;
7981 if (write(fd, bprm->argv[i], len) != len) {
7982 return -1;
7986 return 0;
7989 static int open_self_maps(CPUArchState *cpu_env, int fd)
7991 CPUState *cpu = env_cpu(cpu_env);
7992 TaskState *ts = cpu->opaque;
7993 GSList *map_info = read_self_maps();
7994 GSList *s;
7995 int count;
7997 for (s = map_info; s; s = g_slist_next(s)) {
7998 MapInfo *e = (MapInfo *) s->data;
8000 if (h2g_valid(e->start)) {
8001 unsigned long min = e->start;
8002 unsigned long max = e->end;
8003 int flags = page_get_flags(h2g(min));
8004 const char *path;
8006 max = h2g_valid(max - 1) ?
8007 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8009 if (page_check_range(h2g(min), max - min, flags) == -1) {
8010 continue;
8013 #ifdef TARGET_HPPA
8014 if (h2g(max) == ts->info->stack_limit) {
8015 #else
8016 if (h2g(min) == ts->info->stack_limit) {
8017 #endif
8018 path = "[stack]";
8019 } else {
8020 path = e->path;
8023 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8024 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8025 h2g(min), h2g(max - 1) + 1,
8026 (flags & PAGE_READ) ? 'r' : '-',
8027 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8028 (flags & PAGE_EXEC) ? 'x' : '-',
8029 e->is_priv ? 'p' : 's',
8030 (uint64_t) e->offset, e->dev, e->inode);
8031 if (path) {
8032 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8033 } else {
8034 dprintf(fd, "\n");
8039 free_self_maps(map_info);
8041 #ifdef TARGET_VSYSCALL_PAGE
8043 * We only support execution from the vsyscall page.
8044 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8046 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8047 " --xp 00000000 00:00 0",
8048 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8049 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8050 #endif
8052 return 0;
8055 static int open_self_stat(CPUArchState *cpu_env, int fd)
8057 CPUState *cpu = env_cpu(cpu_env);
8058 TaskState *ts = cpu->opaque;
8059 g_autoptr(GString) buf = g_string_new(NULL);
8060 int i;
8062 for (i = 0; i < 44; i++) {
8063 if (i == 0) {
8064 /* pid */
8065 g_string_printf(buf, FMT_pid " ", getpid());
8066 } else if (i == 1) {
8067 /* app name */
8068 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8069 bin = bin ? bin + 1 : ts->bprm->argv[0];
8070 g_string_printf(buf, "(%.15s) ", bin);
8071 } else if (i == 3) {
8072 /* ppid */
8073 g_string_printf(buf, FMT_pid " ", getppid());
8074 } else if (i == 21) {
8075 /* starttime */
8076 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8077 } else if (i == 27) {
8078 /* stack bottom */
8079 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8080 } else {
8081 /* for the rest, there is MasterCard */
8082 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8085 if (write(fd, buf->str, buf->len) != buf->len) {
8086 return -1;
8090 return 0;
8093 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8095 CPUState *cpu = env_cpu(cpu_env);
8096 TaskState *ts = cpu->opaque;
8097 abi_ulong auxv = ts->info->saved_auxv;
8098 abi_ulong len = ts->info->auxv_len;
8099 char *ptr;
8102 * Auxiliary vector is stored in target process stack.
8103 * read in whole auxv vector and copy it to file
8105 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8106 if (ptr != NULL) {
8107 while (len > 0) {
8108 ssize_t r;
8109 r = write(fd, ptr, len);
8110 if (r <= 0) {
8111 break;
8113 len -= r;
8114 ptr += r;
8116 lseek(fd, 0, SEEK_SET);
8117 unlock_user(ptr, auxv, len);
8120 return 0;
8123 static int is_proc_myself(const char *filename, const char *entry)
8125 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8126 filename += strlen("/proc/");
8127 if (!strncmp(filename, "self/", strlen("self/"))) {
8128 filename += strlen("self/");
8129 } else if (*filename >= '1' && *filename <= '9') {
8130 char myself[80];
8131 snprintf(myself, sizeof(myself), "%d/", getpid());
8132 if (!strncmp(filename, myself, strlen(myself))) {
8133 filename += strlen(myself);
8134 } else {
8135 return 0;
8137 } else {
8138 return 0;
8140 if (!strcmp(filename, entry)) {
8141 return 1;
8144 return 0;
8147 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8148 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8149 static int is_proc(const char *filename, const char *entry)
8151 return strcmp(filename, entry) == 0;
8153 #endif
8155 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8156 static int open_net_route(CPUArchState *cpu_env, int fd)
8158 FILE *fp;
8159 char *line = NULL;
8160 size_t len = 0;
8161 ssize_t read;
8163 fp = fopen("/proc/net/route", "r");
8164 if (fp == NULL) {
8165 return -1;
8168 /* read header */
8170 read = getline(&line, &len, fp);
8171 dprintf(fd, "%s", line);
8173 /* read routes */
8175 while ((read = getline(&line, &len, fp)) != -1) {
8176 char iface[16];
8177 uint32_t dest, gw, mask;
8178 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8179 int fields;
8181 fields = sscanf(line,
8182 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8183 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8184 &mask, &mtu, &window, &irtt);
8185 if (fields != 11) {
8186 continue;
8188 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8189 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8190 metric, tswap32(mask), mtu, window, irtt);
8193 free(line);
8194 fclose(fp);
8196 return 0;
8198 #endif
8200 #if defined(TARGET_SPARC)
8201 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8203 dprintf(fd, "type\t\t: sun4u\n");
8204 return 0;
8206 #endif
8208 #if defined(TARGET_HPPA)
8209 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8211 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8212 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8213 dprintf(fd, "capabilities\t: os32\n");
8214 dprintf(fd, "model\t\t: 9000/778/B160L\n");
8215 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8216 return 0;
8218 #endif
8220 #if defined(TARGET_M68K)
8221 static int open_hardware(CPUArchState *cpu_env, int fd)
8223 dprintf(fd, "Model:\t\tqemu-m68k\n");
8224 return 0;
8226 #endif
8228 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8230 struct fake_open {
8231 const char *filename;
8232 int (*fill)(CPUArchState *cpu_env, int fd);
8233 int (*cmp)(const char *s1, const char *s2);
8235 const struct fake_open *fake_open;
8236 static const struct fake_open fakes[] = {
8237 { "maps", open_self_maps, is_proc_myself },
8238 { "stat", open_self_stat, is_proc_myself },
8239 { "auxv", open_self_auxv, is_proc_myself },
8240 { "cmdline", open_self_cmdline, is_proc_myself },
8241 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8242 { "/proc/net/route", open_net_route, is_proc },
8243 #endif
8244 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8245 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8246 #endif
8247 #if defined(TARGET_M68K)
8248 { "/proc/hardware", open_hardware, is_proc },
8249 #endif
8250 { NULL, NULL, NULL }
8253 if (is_proc_myself(pathname, "exe")) {
8254 int execfd = qemu_getauxval(AT_EXECFD);
8255 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8258 for (fake_open = fakes; fake_open->filename; fake_open++) {
8259 if (fake_open->cmp(pathname, fake_open->filename)) {
8260 break;
8264 if (fake_open->filename) {
8265 const char *tmpdir;
8266 char filename[PATH_MAX];
8267 int fd, r;
8269 fd = memfd_create("qemu-open", 0);
8270 if (fd < 0) {
8271 if (errno != ENOSYS) {
8272 return fd;
8274 /* create temporary file to map stat to */
8275 tmpdir = getenv("TMPDIR");
8276 if (!tmpdir)
8277 tmpdir = "/tmp";
8278 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8279 fd = mkstemp(filename);
8280 if (fd < 0) {
8281 return fd;
8283 unlink(filename);
8286 if ((r = fake_open->fill(cpu_env, fd))) {
8287 int e = errno;
8288 close(fd);
8289 errno = e;
8290 return r;
8292 lseek(fd, 0, SEEK_SET);
8294 return fd;
8297 return safe_openat(dirfd, path(pathname), flags, mode);
8300 #define TIMER_MAGIC 0x0caf0000
8301 #define TIMER_MAGIC_MASK 0xffff0000
8303 /* Convert QEMU provided timer ID back to internal 16bit index format */
8304 static target_timer_t get_timer_id(abi_long arg)
8306 target_timer_t timerid = arg;
8308 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8309 return -TARGET_EINVAL;
8312 timerid &= 0xffff;
8314 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8315 return -TARGET_EINVAL;
8318 return timerid;
8321 static int target_to_host_cpu_mask(unsigned long *host_mask,
8322 size_t host_size,
8323 abi_ulong target_addr,
8324 size_t target_size)
8326 unsigned target_bits = sizeof(abi_ulong) * 8;
8327 unsigned host_bits = sizeof(*host_mask) * 8;
8328 abi_ulong *target_mask;
8329 unsigned i, j;
8331 assert(host_size >= target_size);
8333 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8334 if (!target_mask) {
8335 return -TARGET_EFAULT;
8337 memset(host_mask, 0, host_size);
8339 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8340 unsigned bit = i * target_bits;
8341 abi_ulong val;
8343 __get_user(val, &target_mask[i]);
8344 for (j = 0; j < target_bits; j++, bit++) {
8345 if (val & (1UL << j)) {
8346 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8351 unlock_user(target_mask, target_addr, 0);
8352 return 0;
8355 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8356 size_t host_size,
8357 abi_ulong target_addr,
8358 size_t target_size)
8360 unsigned target_bits = sizeof(abi_ulong) * 8;
8361 unsigned host_bits = sizeof(*host_mask) * 8;
8362 abi_ulong *target_mask;
8363 unsigned i, j;
8365 assert(host_size >= target_size);
8367 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8368 if (!target_mask) {
8369 return -TARGET_EFAULT;
8372 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8373 unsigned bit = i * target_bits;
8374 abi_ulong val = 0;
8376 for (j = 0; j < target_bits; j++, bit++) {
8377 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8378 val |= 1UL << j;
8381 __put_user(val, &target_mask[i]);
8384 unlock_user(target_mask, target_addr, target_size);
8385 return 0;
8388 #ifdef TARGET_NR_getdents
8389 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8391 g_autofree void *hdirp = NULL;
8392 void *tdirp;
8393 int hlen, hoff, toff;
8394 int hreclen, treclen;
8395 off64_t prev_diroff = 0;
8397 hdirp = g_try_malloc(count);
8398 if (!hdirp) {
8399 return -TARGET_ENOMEM;
8402 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8403 hlen = sys_getdents(dirfd, hdirp, count);
8404 #else
8405 hlen = sys_getdents64(dirfd, hdirp, count);
8406 #endif
8408 hlen = get_errno(hlen);
8409 if (is_error(hlen)) {
8410 return hlen;
8413 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8414 if (!tdirp) {
8415 return -TARGET_EFAULT;
8418 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8419 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8420 struct linux_dirent *hde = hdirp + hoff;
8421 #else
8422 struct linux_dirent64 *hde = hdirp + hoff;
8423 #endif
8424 struct target_dirent *tde = tdirp + toff;
8425 int namelen;
8426 uint8_t type;
8428 namelen = strlen(hde->d_name);
8429 hreclen = hde->d_reclen;
8430 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8431 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8433 if (toff + treclen > count) {
8435 * If the host struct is smaller than the target struct, or
8436 * requires less alignment and thus packs into less space,
8437 * then the host can return more entries than we can pass
8438 * on to the guest.
8440 if (toff == 0) {
8441 toff = -TARGET_EINVAL; /* result buffer is too small */
8442 break;
8445 * Return what we have, resetting the file pointer to the
8446 * location of the first record not returned.
8448 lseek64(dirfd, prev_diroff, SEEK_SET);
8449 break;
8452 prev_diroff = hde->d_off;
8453 tde->d_ino = tswapal(hde->d_ino);
8454 tde->d_off = tswapal(hde->d_off);
8455 tde->d_reclen = tswap16(treclen);
8456 memcpy(tde->d_name, hde->d_name, namelen + 1);
8459 * The getdents type is in what was formerly a padding byte at the
8460 * end of the structure.
8462 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8463 type = *((uint8_t *)hde + hreclen - 1);
8464 #else
8465 type = hde->d_type;
8466 #endif
8467 *((uint8_t *)tde + treclen - 1) = type;
8470 unlock_user(tdirp, arg2, toff);
8471 return toff;
8473 #endif /* TARGET_NR_getdents */
8475 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8476 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8478 g_autofree void *hdirp = NULL;
8479 void *tdirp;
8480 int hlen, hoff, toff;
8481 int hreclen, treclen;
8482 off64_t prev_diroff = 0;
8484 hdirp = g_try_malloc(count);
8485 if (!hdirp) {
8486 return -TARGET_ENOMEM;
8489 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8490 if (is_error(hlen)) {
8491 return hlen;
8494 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8495 if (!tdirp) {
8496 return -TARGET_EFAULT;
8499 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8500 struct linux_dirent64 *hde = hdirp + hoff;
8501 struct target_dirent64 *tde = tdirp + toff;
8502 int namelen;
8504 namelen = strlen(hde->d_name) + 1;
8505 hreclen = hde->d_reclen;
8506 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8507 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8509 if (toff + treclen > count) {
8511 * If the host struct is smaller than the target struct, or
8512 * requires less alignment and thus packs into less space,
8513 * then the host can return more entries than we can pass
8514 * on to the guest.
8516 if (toff == 0) {
8517 toff = -TARGET_EINVAL; /* result buffer is too small */
8518 break;
8521 * Return what we have, resetting the file pointer to the
8522 * location of the first record not returned.
8524 lseek64(dirfd, prev_diroff, SEEK_SET);
8525 break;
8528 prev_diroff = hde->d_off;
8529 tde->d_ino = tswap64(hde->d_ino);
8530 tde->d_off = tswap64(hde->d_off);
8531 tde->d_reclen = tswap16(treclen);
8532 tde->d_type = hde->d_type;
8533 memcpy(tde->d_name, hde->d_name, namelen);
8536 unlock_user(tdirp, arg2, toff);
8537 return toff;
8539 #endif /* TARGET_NR_getdents64 */
8541 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8542 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8543 #endif
8545 /* This is an internal helper for do_syscall so that it is easier
8546 * to have a single return point, so that actions, such as logging
8547 * of syscall results, can be performed.
8548 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8550 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8551 abi_long arg2, abi_long arg3, abi_long arg4,
8552 abi_long arg5, abi_long arg6, abi_long arg7,
8553 abi_long arg8)
8555 CPUState *cpu = env_cpu(cpu_env);
8556 abi_long ret;
8557 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8558 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8559 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8560 || defined(TARGET_NR_statx)
8561 struct stat st;
8562 #endif
8563 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8564 || defined(TARGET_NR_fstatfs)
8565 struct statfs stfs;
8566 #endif
8567 void *p;
8569 switch(num) {
8570 case TARGET_NR_exit:
8571 /* In old applications this may be used to implement _exit(2).
8572 However in threaded applications it is used for thread termination,
8573 and _exit_group is used for application termination.
8574 Do thread termination if we have more then one thread. */
8576 if (block_signals()) {
8577 return -QEMU_ERESTARTSYS;
8580 pthread_mutex_lock(&clone_lock);
8582 if (CPU_NEXT(first_cpu)) {
8583 TaskState *ts = cpu->opaque;
8585 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8586 object_unref(OBJECT(cpu));
8588 * At this point the CPU should be unrealized and removed
8589 * from cpu lists. We can clean-up the rest of the thread
8590 * data without the lock held.
8593 pthread_mutex_unlock(&clone_lock);
8595 if (ts->child_tidptr) {
8596 put_user_u32(0, ts->child_tidptr);
8597 do_sys_futex(g2h(cpu, ts->child_tidptr),
8598 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8600 thread_cpu = NULL;
8601 g_free(ts);
8602 rcu_unregister_thread();
8603 pthread_exit(NULL);
8606 pthread_mutex_unlock(&clone_lock);
8607 preexit_cleanup(cpu_env, arg1);
8608 _exit(arg1);
8609 return 0; /* avoid warning */
8610 case TARGET_NR_read:
8611 if (arg2 == 0 && arg3 == 0) {
8612 return get_errno(safe_read(arg1, 0, 0));
8613 } else {
8614 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8615 return -TARGET_EFAULT;
8616 ret = get_errno(safe_read(arg1, p, arg3));
8617 if (ret >= 0 &&
8618 fd_trans_host_to_target_data(arg1)) {
8619 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8621 unlock_user(p, arg2, ret);
8623 return ret;
8624 case TARGET_NR_write:
8625 if (arg2 == 0 && arg3 == 0) {
8626 return get_errno(safe_write(arg1, 0, 0));
8628 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8629 return -TARGET_EFAULT;
8630 if (fd_trans_target_to_host_data(arg1)) {
8631 void *copy = g_malloc(arg3);
8632 memcpy(copy, p, arg3);
8633 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8634 if (ret >= 0) {
8635 ret = get_errno(safe_write(arg1, copy, ret));
8637 g_free(copy);
8638 } else {
8639 ret = get_errno(safe_write(arg1, p, arg3));
8641 unlock_user(p, arg2, 0);
8642 return ret;
8644 #ifdef TARGET_NR_open
8645 case TARGET_NR_open:
8646 if (!(p = lock_user_string(arg1)))
8647 return -TARGET_EFAULT;
8648 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8649 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8650 arg3));
8651 fd_trans_unregister(ret);
8652 unlock_user(p, arg1, 0);
8653 return ret;
8654 #endif
8655 case TARGET_NR_openat:
8656 if (!(p = lock_user_string(arg2)))
8657 return -TARGET_EFAULT;
8658 ret = get_errno(do_openat(cpu_env, arg1, p,
8659 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8660 arg4));
8661 fd_trans_unregister(ret);
8662 unlock_user(p, arg2, 0);
8663 return ret;
8664 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8665 case TARGET_NR_name_to_handle_at:
8666 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8667 return ret;
8668 #endif
8669 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8670 case TARGET_NR_open_by_handle_at:
8671 ret = do_open_by_handle_at(arg1, arg2, arg3);
8672 fd_trans_unregister(ret);
8673 return ret;
8674 #endif
8675 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8676 case TARGET_NR_pidfd_open:
8677 return get_errno(pidfd_open(arg1, arg2));
8678 #endif
8679 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8680 case TARGET_NR_pidfd_send_signal:
8682 siginfo_t uinfo;
8684 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8685 if (!p) {
8686 return -TARGET_EFAULT;
8688 target_to_host_siginfo(&uinfo, p);
8689 unlock_user(p, arg3, 0);
8690 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8691 &uinfo, arg4));
8693 return ret;
8694 #endif
8695 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8696 case TARGET_NR_pidfd_getfd:
8697 return get_errno(pidfd_getfd(arg1, arg2, arg3));
8698 #endif
8699 case TARGET_NR_close:
8700 fd_trans_unregister(arg1);
8701 return get_errno(close(arg1));
8703 case TARGET_NR_brk:
8704 return do_brk(arg1);
8705 #ifdef TARGET_NR_fork
8706 case TARGET_NR_fork:
8707 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8708 #endif
8709 #ifdef TARGET_NR_waitpid
8710 case TARGET_NR_waitpid:
8712 int status;
8713 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8714 if (!is_error(ret) && arg2 && ret
8715 && put_user_s32(host_to_target_waitstatus(status), arg2))
8716 return -TARGET_EFAULT;
8718 return ret;
8719 #endif
8720 #ifdef TARGET_NR_waitid
8721 case TARGET_NR_waitid:
8723 siginfo_t info;
8724 info.si_pid = 0;
8725 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8726 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8727 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8728 return -TARGET_EFAULT;
8729 host_to_target_siginfo(p, &info);
8730 unlock_user(p, arg3, sizeof(target_siginfo_t));
8733 return ret;
8734 #endif
8735 #ifdef TARGET_NR_creat /* not on alpha */
8736 case TARGET_NR_creat:
8737 if (!(p = lock_user_string(arg1)))
8738 return -TARGET_EFAULT;
8739 ret = get_errno(creat(p, arg2));
8740 fd_trans_unregister(ret);
8741 unlock_user(p, arg1, 0);
8742 return ret;
8743 #endif
8744 #ifdef TARGET_NR_link
8745 case TARGET_NR_link:
8747 void * p2;
8748 p = lock_user_string(arg1);
8749 p2 = lock_user_string(arg2);
8750 if (!p || !p2)
8751 ret = -TARGET_EFAULT;
8752 else
8753 ret = get_errno(link(p, p2));
8754 unlock_user(p2, arg2, 0);
8755 unlock_user(p, arg1, 0);
8757 return ret;
8758 #endif
8759 #if defined(TARGET_NR_linkat)
8760 case TARGET_NR_linkat:
8762 void * p2 = NULL;
8763 if (!arg2 || !arg4)
8764 return -TARGET_EFAULT;
8765 p = lock_user_string(arg2);
8766 p2 = lock_user_string(arg4);
8767 if (!p || !p2)
8768 ret = -TARGET_EFAULT;
8769 else
8770 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8771 unlock_user(p, arg2, 0);
8772 unlock_user(p2, arg4, 0);
8774 return ret;
8775 #endif
8776 #ifdef TARGET_NR_unlink
8777 case TARGET_NR_unlink:
8778 if (!(p = lock_user_string(arg1)))
8779 return -TARGET_EFAULT;
8780 ret = get_errno(unlink(p));
8781 unlock_user(p, arg1, 0);
8782 return ret;
8783 #endif
8784 #if defined(TARGET_NR_unlinkat)
8785 case TARGET_NR_unlinkat:
8786 if (!(p = lock_user_string(arg2)))
8787 return -TARGET_EFAULT;
8788 ret = get_errno(unlinkat(arg1, p, arg3));
8789 unlock_user(p, arg2, 0);
8790 return ret;
8791 #endif
8792 case TARGET_NR_execve:
8794 char **argp, **envp;
8795 int argc, envc;
8796 abi_ulong gp;
8797 abi_ulong guest_argp;
8798 abi_ulong guest_envp;
8799 abi_ulong addr;
8800 char **q;
8802 argc = 0;
8803 guest_argp = arg2;
8804 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8805 if (get_user_ual(addr, gp))
8806 return -TARGET_EFAULT;
8807 if (!addr)
8808 break;
8809 argc++;
8811 envc = 0;
8812 guest_envp = arg3;
8813 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8814 if (get_user_ual(addr, gp))
8815 return -TARGET_EFAULT;
8816 if (!addr)
8817 break;
8818 envc++;
8821 argp = g_new0(char *, argc + 1);
8822 envp = g_new0(char *, envc + 1);
8824 for (gp = guest_argp, q = argp; gp;
8825 gp += sizeof(abi_ulong), q++) {
8826 if (get_user_ual(addr, gp))
8827 goto execve_efault;
8828 if (!addr)
8829 break;
8830 if (!(*q = lock_user_string(addr)))
8831 goto execve_efault;
8833 *q = NULL;
8835 for (gp = guest_envp, q = envp; gp;
8836 gp += sizeof(abi_ulong), q++) {
8837 if (get_user_ual(addr, gp))
8838 goto execve_efault;
8839 if (!addr)
8840 break;
8841 if (!(*q = lock_user_string(addr)))
8842 goto execve_efault;
8844 *q = NULL;
8846 if (!(p = lock_user_string(arg1)))
8847 goto execve_efault;
8848 /* Although execve() is not an interruptible syscall it is
8849 * a special case where we must use the safe_syscall wrapper:
8850 * if we allow a signal to happen before we make the host
8851 * syscall then we will 'lose' it, because at the point of
8852 * execve the process leaves QEMU's control. So we use the
8853 * safe syscall wrapper to ensure that we either take the
8854 * signal as a guest signal, or else it does not happen
8855 * before the execve completes and makes it the other
8856 * program's problem.
8858 ret = get_errno(safe_execve(p, argp, envp));
8859 unlock_user(p, arg1, 0);
8861 goto execve_end;
8863 execve_efault:
8864 ret = -TARGET_EFAULT;
8866 execve_end:
8867 for (gp = guest_argp, q = argp; *q;
8868 gp += sizeof(abi_ulong), q++) {
8869 if (get_user_ual(addr, gp)
8870 || !addr)
8871 break;
8872 unlock_user(*q, addr, 0);
8874 for (gp = guest_envp, q = envp; *q;
8875 gp += sizeof(abi_ulong), q++) {
8876 if (get_user_ual(addr, gp)
8877 || !addr)
8878 break;
8879 unlock_user(*q, addr, 0);
8882 g_free(argp);
8883 g_free(envp);
8885 return ret;
8886 case TARGET_NR_chdir:
8887 if (!(p = lock_user_string(arg1)))
8888 return -TARGET_EFAULT;
8889 ret = get_errno(chdir(p));
8890 unlock_user(p, arg1, 0);
8891 return ret;
8892 #ifdef TARGET_NR_time
8893 case TARGET_NR_time:
8895 time_t host_time;
8896 ret = get_errno(time(&host_time));
8897 if (!is_error(ret)
8898 && arg1
8899 && put_user_sal(host_time, arg1))
8900 return -TARGET_EFAULT;
8902 return ret;
8903 #endif
8904 #ifdef TARGET_NR_mknod
8905 case TARGET_NR_mknod:
8906 if (!(p = lock_user_string(arg1)))
8907 return -TARGET_EFAULT;
8908 ret = get_errno(mknod(p, arg2, arg3));
8909 unlock_user(p, arg1, 0);
8910 return ret;
8911 #endif
8912 #if defined(TARGET_NR_mknodat)
8913 case TARGET_NR_mknodat:
8914 if (!(p = lock_user_string(arg2)))
8915 return -TARGET_EFAULT;
8916 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8917 unlock_user(p, arg2, 0);
8918 return ret;
8919 #endif
8920 #ifdef TARGET_NR_chmod
8921 case TARGET_NR_chmod:
8922 if (!(p = lock_user_string(arg1)))
8923 return -TARGET_EFAULT;
8924 ret = get_errno(chmod(p, arg2));
8925 unlock_user(p, arg1, 0);
8926 return ret;
8927 #endif
8928 #ifdef TARGET_NR_lseek
8929 case TARGET_NR_lseek:
8930 return get_errno(lseek(arg1, arg2, arg3));
8931 #endif
8932 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8933 /* Alpha specific */
8934 case TARGET_NR_getxpid:
8935 cpu_env->ir[IR_A4] = getppid();
8936 return get_errno(getpid());
8937 #endif
8938 #ifdef TARGET_NR_getpid
8939 case TARGET_NR_getpid:
8940 return get_errno(getpid());
8941 #endif
8942 case TARGET_NR_mount:
8944 /* need to look at the data field */
8945 void *p2, *p3;
8947 if (arg1) {
8948 p = lock_user_string(arg1);
8949 if (!p) {
8950 return -TARGET_EFAULT;
8952 } else {
8953 p = NULL;
8956 p2 = lock_user_string(arg2);
8957 if (!p2) {
8958 if (arg1) {
8959 unlock_user(p, arg1, 0);
8961 return -TARGET_EFAULT;
8964 if (arg3) {
8965 p3 = lock_user_string(arg3);
8966 if (!p3) {
8967 if (arg1) {
8968 unlock_user(p, arg1, 0);
8970 unlock_user(p2, arg2, 0);
8971 return -TARGET_EFAULT;
8973 } else {
8974 p3 = NULL;
8977 /* FIXME - arg5 should be locked, but it isn't clear how to
8978 * do that since it's not guaranteed to be a NULL-terminated
8979 * string.
8981 if (!arg5) {
8982 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8983 } else {
8984 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8986 ret = get_errno(ret);
8988 if (arg1) {
8989 unlock_user(p, arg1, 0);
8991 unlock_user(p2, arg2, 0);
8992 if (arg3) {
8993 unlock_user(p3, arg3, 0);
8996 return ret;
8997 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8998 #if defined(TARGET_NR_umount)
8999 case TARGET_NR_umount:
9000 #endif
9001 #if defined(TARGET_NR_oldumount)
9002 case TARGET_NR_oldumount:
9003 #endif
9004 if (!(p = lock_user_string(arg1)))
9005 return -TARGET_EFAULT;
9006 ret = get_errno(umount(p));
9007 unlock_user(p, arg1, 0);
9008 return ret;
9009 #endif
9010 #ifdef TARGET_NR_stime /* not on alpha */
9011 case TARGET_NR_stime:
9013 struct timespec ts;
9014 ts.tv_nsec = 0;
9015 if (get_user_sal(ts.tv_sec, arg1)) {
9016 return -TARGET_EFAULT;
9018 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9020 #endif
9021 #ifdef TARGET_NR_alarm /* not on alpha */
9022 case TARGET_NR_alarm:
9023 return alarm(arg1);
9024 #endif
9025 #ifdef TARGET_NR_pause /* not on alpha */
9026 case TARGET_NR_pause:
9027 if (!block_signals()) {
9028 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9030 return -TARGET_EINTR;
9031 #endif
9032 #ifdef TARGET_NR_utime
9033 case TARGET_NR_utime:
9035 struct utimbuf tbuf, *host_tbuf;
9036 struct target_utimbuf *target_tbuf;
9037 if (arg2) {
9038 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9039 return -TARGET_EFAULT;
9040 tbuf.actime = tswapal(target_tbuf->actime);
9041 tbuf.modtime = tswapal(target_tbuf->modtime);
9042 unlock_user_struct(target_tbuf, arg2, 0);
9043 host_tbuf = &tbuf;
9044 } else {
9045 host_tbuf = NULL;
9047 if (!(p = lock_user_string(arg1)))
9048 return -TARGET_EFAULT;
9049 ret = get_errno(utime(p, host_tbuf));
9050 unlock_user(p, arg1, 0);
9052 return ret;
9053 #endif
9054 #ifdef TARGET_NR_utimes
9055 case TARGET_NR_utimes:
9057 struct timeval *tvp, tv[2];
9058 if (arg2) {
9059 if (copy_from_user_timeval(&tv[0], arg2)
9060 || copy_from_user_timeval(&tv[1],
9061 arg2 + sizeof(struct target_timeval)))
9062 return -TARGET_EFAULT;
9063 tvp = tv;
9064 } else {
9065 tvp = NULL;
9067 if (!(p = lock_user_string(arg1)))
9068 return -TARGET_EFAULT;
9069 ret = get_errno(utimes(p, tvp));
9070 unlock_user(p, arg1, 0);
9072 return ret;
9073 #endif
9074 #if defined(TARGET_NR_futimesat)
9075 case TARGET_NR_futimesat:
9077 struct timeval *tvp, tv[2];
9078 if (arg3) {
9079 if (copy_from_user_timeval(&tv[0], arg3)
9080 || copy_from_user_timeval(&tv[1],
9081 arg3 + sizeof(struct target_timeval)))
9082 return -TARGET_EFAULT;
9083 tvp = tv;
9084 } else {
9085 tvp = NULL;
9087 if (!(p = lock_user_string(arg2))) {
9088 return -TARGET_EFAULT;
9090 ret = get_errno(futimesat(arg1, path(p), tvp));
9091 unlock_user(p, arg2, 0);
9093 return ret;
9094 #endif
9095 #ifdef TARGET_NR_access
9096 case TARGET_NR_access:
9097 if (!(p = lock_user_string(arg1))) {
9098 return -TARGET_EFAULT;
9100 ret = get_errno(access(path(p), arg2));
9101 unlock_user(p, arg1, 0);
9102 return ret;
9103 #endif
9104 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9105 case TARGET_NR_faccessat:
9106 if (!(p = lock_user_string(arg2))) {
9107 return -TARGET_EFAULT;
9109 ret = get_errno(faccessat(arg1, p, arg3, 0));
9110 unlock_user(p, arg2, 0);
9111 return ret;
9112 #endif
9113 #ifdef TARGET_NR_nice /* not on alpha */
9114 case TARGET_NR_nice:
9115 return get_errno(nice(arg1));
9116 #endif
9117 case TARGET_NR_sync:
9118 sync();
9119 return 0;
9120 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9121 case TARGET_NR_syncfs:
9122 return get_errno(syncfs(arg1));
9123 #endif
9124 case TARGET_NR_kill:
9125 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9126 #ifdef TARGET_NR_rename
9127 case TARGET_NR_rename:
9129 void *p2;
9130 p = lock_user_string(arg1);
9131 p2 = lock_user_string(arg2);
9132 if (!p || !p2)
9133 ret = -TARGET_EFAULT;
9134 else
9135 ret = get_errno(rename(p, p2));
9136 unlock_user(p2, arg2, 0);
9137 unlock_user(p, arg1, 0);
9139 return ret;
9140 #endif
9141 #if defined(TARGET_NR_renameat)
9142 case TARGET_NR_renameat:
9144 void *p2;
9145 p = lock_user_string(arg2);
9146 p2 = lock_user_string(arg4);
9147 if (!p || !p2)
9148 ret = -TARGET_EFAULT;
9149 else
9150 ret = get_errno(renameat(arg1, p, arg3, p2));
9151 unlock_user(p2, arg4, 0);
9152 unlock_user(p, arg2, 0);
9154 return ret;
9155 #endif
9156 #if defined(TARGET_NR_renameat2)
9157 case TARGET_NR_renameat2:
9159 void *p2;
9160 p = lock_user_string(arg2);
9161 p2 = lock_user_string(arg4);
9162 if (!p || !p2) {
9163 ret = -TARGET_EFAULT;
9164 } else {
9165 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9167 unlock_user(p2, arg4, 0);
9168 unlock_user(p, arg2, 0);
9170 return ret;
9171 #endif
9172 #ifdef TARGET_NR_mkdir
9173 case TARGET_NR_mkdir:
9174 if (!(p = lock_user_string(arg1)))
9175 return -TARGET_EFAULT;
9176 ret = get_errno(mkdir(p, arg2));
9177 unlock_user(p, arg1, 0);
9178 return ret;
9179 #endif
9180 #if defined(TARGET_NR_mkdirat)
9181 case TARGET_NR_mkdirat:
9182 if (!(p = lock_user_string(arg2)))
9183 return -TARGET_EFAULT;
9184 ret = get_errno(mkdirat(arg1, p, arg3));
9185 unlock_user(p, arg2, 0);
9186 return ret;
9187 #endif
9188 #ifdef TARGET_NR_rmdir
9189 case TARGET_NR_rmdir:
9190 if (!(p = lock_user_string(arg1)))
9191 return -TARGET_EFAULT;
9192 ret = get_errno(rmdir(p));
9193 unlock_user(p, arg1, 0);
9194 return ret;
9195 #endif
9196 case TARGET_NR_dup:
9197 ret = get_errno(dup(arg1));
9198 if (ret >= 0) {
9199 fd_trans_dup(arg1, ret);
9201 return ret;
9202 #ifdef TARGET_NR_pipe
9203 case TARGET_NR_pipe:
9204 return do_pipe(cpu_env, arg1, 0, 0);
9205 #endif
9206 #ifdef TARGET_NR_pipe2
9207 case TARGET_NR_pipe2:
9208 return do_pipe(cpu_env, arg1,
9209 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9210 #endif
9211 case TARGET_NR_times:
9213 struct target_tms *tmsp;
9214 struct tms tms;
9215 ret = get_errno(times(&tms));
9216 if (arg1) {
9217 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9218 if (!tmsp)
9219 return -TARGET_EFAULT;
9220 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9221 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9222 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9223 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9225 if (!is_error(ret))
9226 ret = host_to_target_clock_t(ret);
9228 return ret;
9229 case TARGET_NR_acct:
9230 if (arg1 == 0) {
9231 ret = get_errno(acct(NULL));
9232 } else {
9233 if (!(p = lock_user_string(arg1))) {
9234 return -TARGET_EFAULT;
9236 ret = get_errno(acct(path(p)));
9237 unlock_user(p, arg1, 0);
9239 return ret;
9240 #ifdef TARGET_NR_umount2
9241 case TARGET_NR_umount2:
9242 if (!(p = lock_user_string(arg1)))
9243 return -TARGET_EFAULT;
9244 ret = get_errno(umount2(p, arg2));
9245 unlock_user(p, arg1, 0);
9246 return ret;
9247 #endif
9248 case TARGET_NR_ioctl:
9249 return do_ioctl(arg1, arg2, arg3);
9250 #ifdef TARGET_NR_fcntl
9251 case TARGET_NR_fcntl:
9252 return do_fcntl(arg1, arg2, arg3);
9253 #endif
9254 case TARGET_NR_setpgid:
9255 return get_errno(setpgid(arg1, arg2));
9256 case TARGET_NR_umask:
9257 return get_errno(umask(arg1));
9258 case TARGET_NR_chroot:
9259 if (!(p = lock_user_string(arg1)))
9260 return -TARGET_EFAULT;
9261 ret = get_errno(chroot(p));
9262 unlock_user(p, arg1, 0);
9263 return ret;
9264 #ifdef TARGET_NR_dup2
9265 case TARGET_NR_dup2:
9266 ret = get_errno(dup2(arg1, arg2));
9267 if (ret >= 0) {
9268 fd_trans_dup(arg1, arg2);
9270 return ret;
9271 #endif
9272 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9273 case TARGET_NR_dup3:
9275 int host_flags;
9277 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9278 return -EINVAL;
9280 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9281 ret = get_errno(dup3(arg1, arg2, host_flags));
9282 if (ret >= 0) {
9283 fd_trans_dup(arg1, arg2);
9285 return ret;
9287 #endif
9288 #ifdef TARGET_NR_getppid /* not on alpha */
9289 case TARGET_NR_getppid:
9290 return get_errno(getppid());
9291 #endif
9292 #ifdef TARGET_NR_getpgrp
9293 case TARGET_NR_getpgrp:
9294 return get_errno(getpgrp());
9295 #endif
9296 case TARGET_NR_setsid:
9297 return get_errno(setsid());
9298 #ifdef TARGET_NR_sigaction
9299 case TARGET_NR_sigaction:
9301 #if defined(TARGET_MIPS)
9302 struct target_sigaction act, oact, *pact, *old_act;
9304 if (arg2) {
9305 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9306 return -TARGET_EFAULT;
9307 act._sa_handler = old_act->_sa_handler;
9308 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9309 act.sa_flags = old_act->sa_flags;
9310 unlock_user_struct(old_act, arg2, 0);
9311 pact = &act;
9312 } else {
9313 pact = NULL;
9316 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9318 if (!is_error(ret) && arg3) {
9319 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9320 return -TARGET_EFAULT;
9321 old_act->_sa_handler = oact._sa_handler;
9322 old_act->sa_flags = oact.sa_flags;
9323 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9324 old_act->sa_mask.sig[1] = 0;
9325 old_act->sa_mask.sig[2] = 0;
9326 old_act->sa_mask.sig[3] = 0;
9327 unlock_user_struct(old_act, arg3, 1);
9329 #else
9330 struct target_old_sigaction *old_act;
9331 struct target_sigaction act, oact, *pact;
9332 if (arg2) {
9333 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9334 return -TARGET_EFAULT;
9335 act._sa_handler = old_act->_sa_handler;
9336 target_siginitset(&act.sa_mask, old_act->sa_mask);
9337 act.sa_flags = old_act->sa_flags;
9338 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9339 act.sa_restorer = old_act->sa_restorer;
9340 #endif
9341 unlock_user_struct(old_act, arg2, 0);
9342 pact = &act;
9343 } else {
9344 pact = NULL;
9346 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9347 if (!is_error(ret) && arg3) {
9348 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9349 return -TARGET_EFAULT;
9350 old_act->_sa_handler = oact._sa_handler;
9351 old_act->sa_mask = oact.sa_mask.sig[0];
9352 old_act->sa_flags = oact.sa_flags;
9353 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9354 old_act->sa_restorer = oact.sa_restorer;
9355 #endif
9356 unlock_user_struct(old_act, arg3, 1);
9358 #endif
9360 return ret;
9361 #endif
9362 case TARGET_NR_rt_sigaction:
9365 * For Alpha and SPARC this is a 5 argument syscall, with
9366 * a 'restorer' parameter which must be copied into the
9367 * sa_restorer field of the sigaction struct.
9368 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9369 * and arg5 is the sigsetsize.
9371 #if defined(TARGET_ALPHA)
9372 target_ulong sigsetsize = arg4;
9373 target_ulong restorer = arg5;
9374 #elif defined(TARGET_SPARC)
9375 target_ulong restorer = arg4;
9376 target_ulong sigsetsize = arg5;
9377 #else
9378 target_ulong sigsetsize = arg4;
9379 target_ulong restorer = 0;
9380 #endif
9381 struct target_sigaction *act = NULL;
9382 struct target_sigaction *oact = NULL;
9384 if (sigsetsize != sizeof(target_sigset_t)) {
9385 return -TARGET_EINVAL;
9387 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9388 return -TARGET_EFAULT;
9390 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9391 ret = -TARGET_EFAULT;
9392 } else {
9393 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9394 if (oact) {
9395 unlock_user_struct(oact, arg3, 1);
9398 if (act) {
9399 unlock_user_struct(act, arg2, 0);
9402 return ret;
9403 #ifdef TARGET_NR_sgetmask /* not on alpha */
9404 case TARGET_NR_sgetmask:
9406 sigset_t cur_set;
9407 abi_ulong target_set;
9408 ret = do_sigprocmask(0, NULL, &cur_set);
9409 if (!ret) {
9410 host_to_target_old_sigset(&target_set, &cur_set);
9411 ret = target_set;
9414 return ret;
9415 #endif
9416 #ifdef TARGET_NR_ssetmask /* not on alpha */
9417 case TARGET_NR_ssetmask:
9419 sigset_t set, oset;
9420 abi_ulong target_set = arg1;
9421 target_to_host_old_sigset(&set, &target_set);
9422 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9423 if (!ret) {
9424 host_to_target_old_sigset(&target_set, &oset);
9425 ret = target_set;
9428 return ret;
9429 #endif
9430 #ifdef TARGET_NR_sigprocmask
9431 case TARGET_NR_sigprocmask:
9433 #if defined(TARGET_ALPHA)
9434 sigset_t set, oldset;
9435 abi_ulong mask;
9436 int how;
9438 switch (arg1) {
9439 case TARGET_SIG_BLOCK:
9440 how = SIG_BLOCK;
9441 break;
9442 case TARGET_SIG_UNBLOCK:
9443 how = SIG_UNBLOCK;
9444 break;
9445 case TARGET_SIG_SETMASK:
9446 how = SIG_SETMASK;
9447 break;
9448 default:
9449 return -TARGET_EINVAL;
9451 mask = arg2;
9452 target_to_host_old_sigset(&set, &mask);
9454 ret = do_sigprocmask(how, &set, &oldset);
9455 if (!is_error(ret)) {
9456 host_to_target_old_sigset(&mask, &oldset);
9457 ret = mask;
9458 cpu_env->ir[IR_V0] = 0; /* force no error */
9460 #else
9461 sigset_t set, oldset, *set_ptr;
9462 int how;
9464 if (arg2) {
9465 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9466 if (!p) {
9467 return -TARGET_EFAULT;
9469 target_to_host_old_sigset(&set, p);
9470 unlock_user(p, arg2, 0);
9471 set_ptr = &set;
9472 switch (arg1) {
9473 case TARGET_SIG_BLOCK:
9474 how = SIG_BLOCK;
9475 break;
9476 case TARGET_SIG_UNBLOCK:
9477 how = SIG_UNBLOCK;
9478 break;
9479 case TARGET_SIG_SETMASK:
9480 how = SIG_SETMASK;
9481 break;
9482 default:
9483 return -TARGET_EINVAL;
9485 } else {
9486 how = 0;
9487 set_ptr = NULL;
9489 ret = do_sigprocmask(how, set_ptr, &oldset);
9490 if (!is_error(ret) && arg3) {
9491 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9492 return -TARGET_EFAULT;
9493 host_to_target_old_sigset(p, &oldset);
9494 unlock_user(p, arg3, sizeof(target_sigset_t));
9496 #endif
9498 return ret;
9499 #endif
9500 case TARGET_NR_rt_sigprocmask:
9502 int how = arg1;
9503 sigset_t set, oldset, *set_ptr;
9505 if (arg4 != sizeof(target_sigset_t)) {
9506 return -TARGET_EINVAL;
9509 if (arg2) {
9510 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9511 if (!p) {
9512 return -TARGET_EFAULT;
9514 target_to_host_sigset(&set, p);
9515 unlock_user(p, arg2, 0);
9516 set_ptr = &set;
9517 switch(how) {
9518 case TARGET_SIG_BLOCK:
9519 how = SIG_BLOCK;
9520 break;
9521 case TARGET_SIG_UNBLOCK:
9522 how = SIG_UNBLOCK;
9523 break;
9524 case TARGET_SIG_SETMASK:
9525 how = SIG_SETMASK;
9526 break;
9527 default:
9528 return -TARGET_EINVAL;
9530 } else {
9531 how = 0;
9532 set_ptr = NULL;
9534 ret = do_sigprocmask(how, set_ptr, &oldset);
9535 if (!is_error(ret) && arg3) {
9536 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9537 return -TARGET_EFAULT;
9538 host_to_target_sigset(p, &oldset);
9539 unlock_user(p, arg3, sizeof(target_sigset_t));
9542 return ret;
9543 #ifdef TARGET_NR_sigpending
9544 case TARGET_NR_sigpending:
9546 sigset_t set;
9547 ret = get_errno(sigpending(&set));
9548 if (!is_error(ret)) {
9549 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9550 return -TARGET_EFAULT;
9551 host_to_target_old_sigset(p, &set);
9552 unlock_user(p, arg1, sizeof(target_sigset_t));
9555 return ret;
9556 #endif
9557 case TARGET_NR_rt_sigpending:
9559 sigset_t set;
9561 /* Yes, this check is >, not != like most. We follow the kernel's
9562 * logic and it does it like this because it implements
9563 * NR_sigpending through the same code path, and in that case
9564 * the old_sigset_t is smaller in size.
9566 if (arg2 > sizeof(target_sigset_t)) {
9567 return -TARGET_EINVAL;
9570 ret = get_errno(sigpending(&set));
9571 if (!is_error(ret)) {
9572 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9573 return -TARGET_EFAULT;
9574 host_to_target_sigset(p, &set);
9575 unlock_user(p, arg1, sizeof(target_sigset_t));
9578 return ret;
9579 #ifdef TARGET_NR_sigsuspend
9580 case TARGET_NR_sigsuspend:
9582 sigset_t *set;
9584 #if defined(TARGET_ALPHA)
9585 TaskState *ts = cpu->opaque;
9586 /* target_to_host_old_sigset will bswap back */
9587 abi_ulong mask = tswapal(arg1);
9588 set = &ts->sigsuspend_mask;
9589 target_to_host_old_sigset(set, &mask);
9590 #else
9591 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9592 if (ret != 0) {
9593 return ret;
9595 #endif
9596 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9597 finish_sigsuspend_mask(ret);
9599 return ret;
9600 #endif
9601 case TARGET_NR_rt_sigsuspend:
9603 sigset_t *set;
9605 ret = process_sigsuspend_mask(&set, arg1, arg2);
9606 if (ret != 0) {
9607 return ret;
9609 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9610 finish_sigsuspend_mask(ret);
9612 return ret;
9613 #ifdef TARGET_NR_rt_sigtimedwait
9614 case TARGET_NR_rt_sigtimedwait:
9616 sigset_t set;
9617 struct timespec uts, *puts;
9618 siginfo_t uinfo;
9620 if (arg4 != sizeof(target_sigset_t)) {
9621 return -TARGET_EINVAL;
9624 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9625 return -TARGET_EFAULT;
9626 target_to_host_sigset(&set, p);
9627 unlock_user(p, arg1, 0);
9628 if (arg3) {
9629 puts = &uts;
9630 if (target_to_host_timespec(puts, arg3)) {
9631 return -TARGET_EFAULT;
9633 } else {
9634 puts = NULL;
9636 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9637 SIGSET_T_SIZE));
9638 if (!is_error(ret)) {
9639 if (arg2) {
9640 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9642 if (!p) {
9643 return -TARGET_EFAULT;
9645 host_to_target_siginfo(p, &uinfo);
9646 unlock_user(p, arg2, sizeof(target_siginfo_t));
9648 ret = host_to_target_signal(ret);
9651 return ret;
9652 #endif
9653 #ifdef TARGET_NR_rt_sigtimedwait_time64
9654 case TARGET_NR_rt_sigtimedwait_time64:
9656 sigset_t set;
9657 struct timespec uts, *puts;
9658 siginfo_t uinfo;
9660 if (arg4 != sizeof(target_sigset_t)) {
9661 return -TARGET_EINVAL;
9664 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9665 if (!p) {
9666 return -TARGET_EFAULT;
9668 target_to_host_sigset(&set, p);
9669 unlock_user(p, arg1, 0);
9670 if (arg3) {
9671 puts = &uts;
9672 if (target_to_host_timespec64(puts, arg3)) {
9673 return -TARGET_EFAULT;
9675 } else {
9676 puts = NULL;
9678 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9679 SIGSET_T_SIZE));
9680 if (!is_error(ret)) {
9681 if (arg2) {
9682 p = lock_user(VERIFY_WRITE, arg2,
9683 sizeof(target_siginfo_t), 0);
9684 if (!p) {
9685 return -TARGET_EFAULT;
9687 host_to_target_siginfo(p, &uinfo);
9688 unlock_user(p, arg2, sizeof(target_siginfo_t));
9690 ret = host_to_target_signal(ret);
9693 return ret;
9694 #endif
9695 case TARGET_NR_rt_sigqueueinfo:
9697 siginfo_t uinfo;
9699 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9700 if (!p) {
9701 return -TARGET_EFAULT;
9703 target_to_host_siginfo(&uinfo, p);
9704 unlock_user(p, arg3, 0);
9705 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9707 return ret;
9708 case TARGET_NR_rt_tgsigqueueinfo:
9710 siginfo_t uinfo;
9712 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9713 if (!p) {
9714 return -TARGET_EFAULT;
9716 target_to_host_siginfo(&uinfo, p);
9717 unlock_user(p, arg4, 0);
9718 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9720 return ret;
9721 #ifdef TARGET_NR_sigreturn
9722 case TARGET_NR_sigreturn:
9723 if (block_signals()) {
9724 return -QEMU_ERESTARTSYS;
9726 return do_sigreturn(cpu_env);
9727 #endif
9728 case TARGET_NR_rt_sigreturn:
9729 if (block_signals()) {
9730 return -QEMU_ERESTARTSYS;
9732 return do_rt_sigreturn(cpu_env);
9733 case TARGET_NR_sethostname:
9734 if (!(p = lock_user_string(arg1)))
9735 return -TARGET_EFAULT;
9736 ret = get_errno(sethostname(p, arg2));
9737 unlock_user(p, arg1, 0);
9738 return ret;
9739 #ifdef TARGET_NR_setrlimit
9740 case TARGET_NR_setrlimit:
9742 int resource = target_to_host_resource(arg1);
9743 struct target_rlimit *target_rlim;
9744 struct rlimit rlim;
9745 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9746 return -TARGET_EFAULT;
9747 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9748 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9749 unlock_user_struct(target_rlim, arg2, 0);
9751 * If we just passed through resource limit settings for memory then
9752 * they would also apply to QEMU's own allocations, and QEMU will
9753 * crash or hang or die if its allocations fail. Ideally we would
9754 * track the guest allocations in QEMU and apply the limits ourselves.
9755 * For now, just tell the guest the call succeeded but don't actually
9756 * limit anything.
9758 if (resource != RLIMIT_AS &&
9759 resource != RLIMIT_DATA &&
9760 resource != RLIMIT_STACK) {
9761 return get_errno(setrlimit(resource, &rlim));
9762 } else {
9763 return 0;
9766 #endif
9767 #ifdef TARGET_NR_getrlimit
9768 case TARGET_NR_getrlimit:
9770 int resource = target_to_host_resource(arg1);
9771 struct target_rlimit *target_rlim;
9772 struct rlimit rlim;
9774 ret = get_errno(getrlimit(resource, &rlim));
9775 if (!is_error(ret)) {
9776 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9777 return -TARGET_EFAULT;
9778 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9779 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9780 unlock_user_struct(target_rlim, arg2, 1);
9783 return ret;
9784 #endif
9785 case TARGET_NR_getrusage:
9787 struct rusage rusage;
9788 ret = get_errno(getrusage(arg1, &rusage));
9789 if (!is_error(ret)) {
9790 ret = host_to_target_rusage(arg2, &rusage);
9793 return ret;
9794 #if defined(TARGET_NR_gettimeofday)
9795 case TARGET_NR_gettimeofday:
9797 struct timeval tv;
9798 struct timezone tz;
9800 ret = get_errno(gettimeofday(&tv, &tz));
9801 if (!is_error(ret)) {
9802 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9803 return -TARGET_EFAULT;
9805 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9806 return -TARGET_EFAULT;
9810 return ret;
9811 #endif
9812 #if defined(TARGET_NR_settimeofday)
9813 case TARGET_NR_settimeofday:
9815 struct timeval tv, *ptv = NULL;
9816 struct timezone tz, *ptz = NULL;
9818 if (arg1) {
9819 if (copy_from_user_timeval(&tv, arg1)) {
9820 return -TARGET_EFAULT;
9822 ptv = &tv;
9825 if (arg2) {
9826 if (copy_from_user_timezone(&tz, arg2)) {
9827 return -TARGET_EFAULT;
9829 ptz = &tz;
9832 return get_errno(settimeofday(ptv, ptz));
9834 #endif
9835 #if defined(TARGET_NR_select)
9836 case TARGET_NR_select:
9837 #if defined(TARGET_WANT_NI_OLD_SELECT)
9838 /* some architectures used to have old_select here
9839 * but now ENOSYS it.
9841 ret = -TARGET_ENOSYS;
9842 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9843 ret = do_old_select(arg1);
9844 #else
9845 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9846 #endif
9847 return ret;
9848 #endif
9849 #ifdef TARGET_NR_pselect6
9850 case TARGET_NR_pselect6:
9851 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9852 #endif
9853 #ifdef TARGET_NR_pselect6_time64
9854 case TARGET_NR_pselect6_time64:
9855 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9856 #endif
9857 #ifdef TARGET_NR_symlink
9858 case TARGET_NR_symlink:
9860 void *p2;
9861 p = lock_user_string(arg1);
9862 p2 = lock_user_string(arg2);
9863 if (!p || !p2)
9864 ret = -TARGET_EFAULT;
9865 else
9866 ret = get_errno(symlink(p, p2));
9867 unlock_user(p2, arg2, 0);
9868 unlock_user(p, arg1, 0);
9870 return ret;
9871 #endif
9872 #if defined(TARGET_NR_symlinkat)
9873 case TARGET_NR_symlinkat:
9875 void *p2;
9876 p = lock_user_string(arg1);
9877 p2 = lock_user_string(arg3);
9878 if (!p || !p2)
9879 ret = -TARGET_EFAULT;
9880 else
9881 ret = get_errno(symlinkat(p, arg2, p2));
9882 unlock_user(p2, arg3, 0);
9883 unlock_user(p, arg1, 0);
9885 return ret;
9886 #endif
9887 #ifdef TARGET_NR_readlink
9888 case TARGET_NR_readlink:
9890 void *p2;
9891 p = lock_user_string(arg1);
9892 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9893 if (!p || !p2) {
9894 ret = -TARGET_EFAULT;
9895 } else if (!arg3) {
9896 /* Short circuit this for the magic exe check. */
9897 ret = -TARGET_EINVAL;
9898 } else if (is_proc_myself((const char *)p, "exe")) {
9899 char real[PATH_MAX], *temp;
9900 temp = realpath(exec_path, real);
9901 /* Return value is # of bytes that we wrote to the buffer. */
9902 if (temp == NULL) {
9903 ret = get_errno(-1);
9904 } else {
9905 /* Don't worry about sign mismatch as earlier mapping
9906 * logic would have thrown a bad address error. */
9907 ret = MIN(strlen(real), arg3);
9908 /* We cannot NUL terminate the string. */
9909 memcpy(p2, real, ret);
9911 } else {
9912 ret = get_errno(readlink(path(p), p2, arg3));
9914 unlock_user(p2, arg2, ret);
9915 unlock_user(p, arg1, 0);
9917 return ret;
9918 #endif
9919 #if defined(TARGET_NR_readlinkat)
9920 case TARGET_NR_readlinkat:
9922 void *p2;
9923 p = lock_user_string(arg2);
9924 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9925 if (!p || !p2) {
9926 ret = -TARGET_EFAULT;
9927 } else if (!arg4) {
9928 /* Short circuit this for the magic exe check. */
9929 ret = -TARGET_EINVAL;
9930 } else if (is_proc_myself((const char *)p, "exe")) {
9931 char real[PATH_MAX], *temp;
9932 temp = realpath(exec_path, real);
9933 /* Return value is # of bytes that we wrote to the buffer. */
9934 if (temp == NULL) {
9935 ret = get_errno(-1);
9936 } else {
9937 /* Don't worry about sign mismatch as earlier mapping
9938 * logic would have thrown a bad address error. */
9939 ret = MIN(strlen(real), arg4);
9940 /* We cannot NUL terminate the string. */
9941 memcpy(p2, real, ret);
9943 } else {
9944 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9946 unlock_user(p2, arg3, ret);
9947 unlock_user(p, arg2, 0);
9949 return ret;
9950 #endif
9951 #ifdef TARGET_NR_swapon
9952 case TARGET_NR_swapon:
9953 if (!(p = lock_user_string(arg1)))
9954 return -TARGET_EFAULT;
9955 ret = get_errno(swapon(p, arg2));
9956 unlock_user(p, arg1, 0);
9957 return ret;
9958 #endif
9959 case TARGET_NR_reboot:
9960 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9961 /* arg4 must be ignored in all other cases */
9962 p = lock_user_string(arg4);
9963 if (!p) {
9964 return -TARGET_EFAULT;
9966 ret = get_errno(reboot(arg1, arg2, arg3, p));
9967 unlock_user(p, arg4, 0);
9968 } else {
9969 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9971 return ret;
9972 #ifdef TARGET_NR_mmap
9973 case TARGET_NR_mmap:
9974 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9975 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9976 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9977 || defined(TARGET_S390X)
9979 abi_ulong *v;
9980 abi_ulong v1, v2, v3, v4, v5, v6;
9981 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9982 return -TARGET_EFAULT;
9983 v1 = tswapal(v[0]);
9984 v2 = tswapal(v[1]);
9985 v3 = tswapal(v[2]);
9986 v4 = tswapal(v[3]);
9987 v5 = tswapal(v[4]);
9988 v6 = tswapal(v[5]);
9989 unlock_user(v, arg1, 0);
9990 ret = get_errno(target_mmap(v1, v2, v3,
9991 target_to_host_bitmask(v4, mmap_flags_tbl),
9992 v5, v6));
9994 #else
9995 /* mmap pointers are always untagged */
9996 ret = get_errno(target_mmap(arg1, arg2, arg3,
9997 target_to_host_bitmask(arg4, mmap_flags_tbl),
9998 arg5,
9999 arg6));
10000 #endif
10001 return ret;
10002 #endif
10003 #ifdef TARGET_NR_mmap2
10004 case TARGET_NR_mmap2:
10005 #ifndef MMAP_SHIFT
10006 #define MMAP_SHIFT 12
10007 #endif
10008 ret = target_mmap(arg1, arg2, arg3,
10009 target_to_host_bitmask(arg4, mmap_flags_tbl),
10010 arg5, arg6 << MMAP_SHIFT);
10011 return get_errno(ret);
10012 #endif
10013 case TARGET_NR_munmap:
10014 arg1 = cpu_untagged_addr(cpu, arg1);
10015 return get_errno(target_munmap(arg1, arg2));
10016 case TARGET_NR_mprotect:
10017 arg1 = cpu_untagged_addr(cpu, arg1);
10019 TaskState *ts = cpu->opaque;
10020 /* Special hack to detect libc making the stack executable. */
10021 if ((arg3 & PROT_GROWSDOWN)
10022 && arg1 >= ts->info->stack_limit
10023 && arg1 <= ts->info->start_stack) {
10024 arg3 &= ~PROT_GROWSDOWN;
10025 arg2 = arg2 + arg1 - ts->info->stack_limit;
10026 arg1 = ts->info->stack_limit;
10029 return get_errno(target_mprotect(arg1, arg2, arg3));
10030 #ifdef TARGET_NR_mremap
10031 case TARGET_NR_mremap:
10032 arg1 = cpu_untagged_addr(cpu, arg1);
10033 /* mremap new_addr (arg5) is always untagged */
10034 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10035 #endif
10036 /* ??? msync/mlock/munlock are broken for softmmu. */
10037 #ifdef TARGET_NR_msync
10038 case TARGET_NR_msync:
10039 return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10040 #endif
10041 #ifdef TARGET_NR_mlock
10042 case TARGET_NR_mlock:
10043 return get_errno(mlock(g2h(cpu, arg1), arg2));
10044 #endif
10045 #ifdef TARGET_NR_munlock
10046 case TARGET_NR_munlock:
10047 return get_errno(munlock(g2h(cpu, arg1), arg2));
10048 #endif
10049 #ifdef TARGET_NR_mlockall
10050 case TARGET_NR_mlockall:
10051 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10052 #endif
10053 #ifdef TARGET_NR_munlockall
10054 case TARGET_NR_munlockall:
10055 return get_errno(munlockall());
10056 #endif
10057 #ifdef TARGET_NR_truncate
10058 case TARGET_NR_truncate:
10059 if (!(p = lock_user_string(arg1)))
10060 return -TARGET_EFAULT;
10061 ret = get_errno(truncate(p, arg2));
10062 unlock_user(p, arg1, 0);
10063 return ret;
10064 #endif
10065 #ifdef TARGET_NR_ftruncate
10066 case TARGET_NR_ftruncate:
10067 return get_errno(ftruncate(arg1, arg2));
10068 #endif
10069 case TARGET_NR_fchmod:
10070 return get_errno(fchmod(arg1, arg2));
10071 #if defined(TARGET_NR_fchmodat)
10072 case TARGET_NR_fchmodat:
10073 if (!(p = lock_user_string(arg2)))
10074 return -TARGET_EFAULT;
10075 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10076 unlock_user(p, arg2, 0);
10077 return ret;
10078 #endif
10079 case TARGET_NR_getpriority:
10080 /* Note that negative values are valid for getpriority, so we must
10081 differentiate based on errno settings. */
10082 errno = 0;
10083 ret = getpriority(arg1, arg2);
10084 if (ret == -1 && errno != 0) {
10085 return -host_to_target_errno(errno);
10087 #ifdef TARGET_ALPHA
10088 /* Return value is the unbiased priority. Signal no error. */
10089 cpu_env->ir[IR_V0] = 0;
10090 #else
10091 /* Return value is a biased priority to avoid negative numbers. */
10092 ret = 20 - ret;
10093 #endif
10094 return ret;
10095 case TARGET_NR_setpriority:
10096 return get_errno(setpriority(arg1, arg2, arg3));
10097 #ifdef TARGET_NR_statfs
10098 case TARGET_NR_statfs:
10099 if (!(p = lock_user_string(arg1))) {
10100 return -TARGET_EFAULT;
10102 ret = get_errno(statfs(path(p), &stfs));
10103 unlock_user(p, arg1, 0);
10104 convert_statfs:
10105 if (!is_error(ret)) {
10106 struct target_statfs *target_stfs;
10108 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10109 return -TARGET_EFAULT;
10110 __put_user(stfs.f_type, &target_stfs->f_type);
10111 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10112 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10113 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10114 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10115 __put_user(stfs.f_files, &target_stfs->f_files);
10116 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10117 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10118 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10119 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10120 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10121 #ifdef _STATFS_F_FLAGS
10122 __put_user(stfs.f_flags, &target_stfs->f_flags);
10123 #else
10124 __put_user(0, &target_stfs->f_flags);
10125 #endif
10126 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10127 unlock_user_struct(target_stfs, arg2, 1);
10129 return ret;
10130 #endif
10131 #ifdef TARGET_NR_fstatfs
10132 case TARGET_NR_fstatfs:
10133 ret = get_errno(fstatfs(arg1, &stfs));
10134 goto convert_statfs;
10135 #endif
10136 #ifdef TARGET_NR_statfs64
10137 case TARGET_NR_statfs64:
10138 if (!(p = lock_user_string(arg1))) {
10139 return -TARGET_EFAULT;
10141 ret = get_errno(statfs(path(p), &stfs));
10142 unlock_user(p, arg1, 0);
10143 convert_statfs64:
10144 if (!is_error(ret)) {
10145 struct target_statfs64 *target_stfs;
10147 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10148 return -TARGET_EFAULT;
10149 __put_user(stfs.f_type, &target_stfs->f_type);
10150 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10151 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10152 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10153 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10154 __put_user(stfs.f_files, &target_stfs->f_files);
10155 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10156 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10157 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10158 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10159 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10160 #ifdef _STATFS_F_FLAGS
10161 __put_user(stfs.f_flags, &target_stfs->f_flags);
10162 #else
10163 __put_user(0, &target_stfs->f_flags);
10164 #endif
10165 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10166 unlock_user_struct(target_stfs, arg3, 1);
10168 return ret;
10169 case TARGET_NR_fstatfs64:
10170 ret = get_errno(fstatfs(arg1, &stfs));
10171 goto convert_statfs64;
10172 #endif
10173 #ifdef TARGET_NR_socketcall
10174 case TARGET_NR_socketcall:
10175 return do_socketcall(arg1, arg2);
10176 #endif
10177 #ifdef TARGET_NR_accept
10178 case TARGET_NR_accept:
10179 return do_accept4(arg1, arg2, arg3, 0);
10180 #endif
10181 #ifdef TARGET_NR_accept4
10182 case TARGET_NR_accept4:
10183 return do_accept4(arg1, arg2, arg3, arg4);
10184 #endif
10185 #ifdef TARGET_NR_bind
10186 case TARGET_NR_bind:
10187 return do_bind(arg1, arg2, arg3);
10188 #endif
10189 #ifdef TARGET_NR_connect
10190 case TARGET_NR_connect:
10191 return do_connect(arg1, arg2, arg3);
10192 #endif
10193 #ifdef TARGET_NR_getpeername
10194 case TARGET_NR_getpeername:
10195 return do_getpeername(arg1, arg2, arg3);
10196 #endif
10197 #ifdef TARGET_NR_getsockname
10198 case TARGET_NR_getsockname:
10199 return do_getsockname(arg1, arg2, arg3);
10200 #endif
10201 #ifdef TARGET_NR_getsockopt
10202 case TARGET_NR_getsockopt:
10203 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10204 #endif
10205 #ifdef TARGET_NR_listen
10206 case TARGET_NR_listen:
10207 return get_errno(listen(arg1, arg2));
10208 #endif
10209 #ifdef TARGET_NR_recv
10210 case TARGET_NR_recv:
10211 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10212 #endif
10213 #ifdef TARGET_NR_recvfrom
10214 case TARGET_NR_recvfrom:
10215 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10216 #endif
10217 #ifdef TARGET_NR_recvmsg
10218 case TARGET_NR_recvmsg:
10219 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10220 #endif
10221 #ifdef TARGET_NR_send
10222 case TARGET_NR_send:
10223 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10224 #endif
10225 #ifdef TARGET_NR_sendmsg
10226 case TARGET_NR_sendmsg:
10227 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10228 #endif
10229 #ifdef TARGET_NR_sendmmsg
10230 case TARGET_NR_sendmmsg:
10231 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10232 #endif
10233 #ifdef TARGET_NR_recvmmsg
10234 case TARGET_NR_recvmmsg:
10235 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10236 #endif
10237 #ifdef TARGET_NR_sendto
10238 case TARGET_NR_sendto:
10239 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10240 #endif
10241 #ifdef TARGET_NR_shutdown
10242 case TARGET_NR_shutdown:
10243 return get_errno(shutdown(arg1, arg2));
10244 #endif
10245 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10246 case TARGET_NR_getrandom:
10247 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10248 if (!p) {
10249 return -TARGET_EFAULT;
10251 ret = get_errno(getrandom(p, arg2, arg3));
10252 unlock_user(p, arg1, ret);
10253 return ret;
10254 #endif
10255 #ifdef TARGET_NR_socket
10256 case TARGET_NR_socket:
10257 return do_socket(arg1, arg2, arg3);
10258 #endif
10259 #ifdef TARGET_NR_socketpair
10260 case TARGET_NR_socketpair:
10261 return do_socketpair(arg1, arg2, arg3, arg4);
10262 #endif
10263 #ifdef TARGET_NR_setsockopt
10264 case TARGET_NR_setsockopt:
10265 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10266 #endif
10267 #if defined(TARGET_NR_syslog)
10268 case TARGET_NR_syslog:
10270 int len = arg2;
10272 switch (arg1) {
10273 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10274 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10275 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10276 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10277 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10278 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10279 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10280 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10281 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10282 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10283 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10284 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10286 if (len < 0) {
10287 return -TARGET_EINVAL;
10289 if (len == 0) {
10290 return 0;
10292 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10293 if (!p) {
10294 return -TARGET_EFAULT;
10296 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10297 unlock_user(p, arg2, arg3);
10299 return ret;
10300 default:
10301 return -TARGET_EINVAL;
10304 break;
10305 #endif
10306 case TARGET_NR_setitimer:
10308 struct itimerval value, ovalue, *pvalue;
10310 if (arg2) {
10311 pvalue = &value;
10312 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10313 || copy_from_user_timeval(&pvalue->it_value,
10314 arg2 + sizeof(struct target_timeval)))
10315 return -TARGET_EFAULT;
10316 } else {
10317 pvalue = NULL;
10319 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10320 if (!is_error(ret) && arg3) {
10321 if (copy_to_user_timeval(arg3,
10322 &ovalue.it_interval)
10323 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10324 &ovalue.it_value))
10325 return -TARGET_EFAULT;
10328 return ret;
10329 case TARGET_NR_getitimer:
10331 struct itimerval value;
10333 ret = get_errno(getitimer(arg1, &value));
10334 if (!is_error(ret) && arg2) {
10335 if (copy_to_user_timeval(arg2,
10336 &value.it_interval)
10337 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10338 &value.it_value))
10339 return -TARGET_EFAULT;
10342 return ret;
10343 #ifdef TARGET_NR_stat
10344 case TARGET_NR_stat:
10345 if (!(p = lock_user_string(arg1))) {
10346 return -TARGET_EFAULT;
10348 ret = get_errno(stat(path(p), &st));
10349 unlock_user(p, arg1, 0);
10350 goto do_stat;
10351 #endif
10352 #ifdef TARGET_NR_lstat
10353 case TARGET_NR_lstat:
10354 if (!(p = lock_user_string(arg1))) {
10355 return -TARGET_EFAULT;
10357 ret = get_errno(lstat(path(p), &st));
10358 unlock_user(p, arg1, 0);
10359 goto do_stat;
10360 #endif
10361 #ifdef TARGET_NR_fstat
10362 case TARGET_NR_fstat:
10364 ret = get_errno(fstat(arg1, &st));
10365 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10366 do_stat:
10367 #endif
10368 if (!is_error(ret)) {
10369 struct target_stat *target_st;
10371 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10372 return -TARGET_EFAULT;
10373 memset(target_st, 0, sizeof(*target_st));
10374 __put_user(st.st_dev, &target_st->st_dev);
10375 __put_user(st.st_ino, &target_st->st_ino);
10376 __put_user(st.st_mode, &target_st->st_mode);
10377 __put_user(st.st_uid, &target_st->st_uid);
10378 __put_user(st.st_gid, &target_st->st_gid);
10379 __put_user(st.st_nlink, &target_st->st_nlink);
10380 __put_user(st.st_rdev, &target_st->st_rdev);
10381 __put_user(st.st_size, &target_st->st_size);
10382 __put_user(st.st_blksize, &target_st->st_blksize);
10383 __put_user(st.st_blocks, &target_st->st_blocks);
10384 __put_user(st.st_atime, &target_st->target_st_atime);
10385 __put_user(st.st_mtime, &target_st->target_st_mtime);
10386 __put_user(st.st_ctime, &target_st->target_st_ctime);
10387 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10388 __put_user(st.st_atim.tv_nsec,
10389 &target_st->target_st_atime_nsec);
10390 __put_user(st.st_mtim.tv_nsec,
10391 &target_st->target_st_mtime_nsec);
10392 __put_user(st.st_ctim.tv_nsec,
10393 &target_st->target_st_ctime_nsec);
10394 #endif
10395 unlock_user_struct(target_st, arg2, 1);
10398 return ret;
10399 #endif
10400 case TARGET_NR_vhangup:
10401 return get_errno(vhangup());
10402 #ifdef TARGET_NR_syscall
10403 case TARGET_NR_syscall:
10404 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10405 arg6, arg7, arg8, 0);
10406 #endif
10407 #if defined(TARGET_NR_wait4)
10408 case TARGET_NR_wait4:
10410 int status;
10411 abi_long status_ptr = arg2;
10412 struct rusage rusage, *rusage_ptr;
10413 abi_ulong target_rusage = arg4;
10414 abi_long rusage_err;
10415 if (target_rusage)
10416 rusage_ptr = &rusage;
10417 else
10418 rusage_ptr = NULL;
10419 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10420 if (!is_error(ret)) {
10421 if (status_ptr && ret) {
10422 status = host_to_target_waitstatus(status);
10423 if (put_user_s32(status, status_ptr))
10424 return -TARGET_EFAULT;
10426 if (target_rusage) {
10427 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10428 if (rusage_err) {
10429 ret = rusage_err;
10434 return ret;
10435 #endif
10436 #ifdef TARGET_NR_swapoff
10437 case TARGET_NR_swapoff:
10438 if (!(p = lock_user_string(arg1)))
10439 return -TARGET_EFAULT;
10440 ret = get_errno(swapoff(p));
10441 unlock_user(p, arg1, 0);
10442 return ret;
10443 #endif
10444 case TARGET_NR_sysinfo:
10446 struct target_sysinfo *target_value;
10447 struct sysinfo value;
10448 ret = get_errno(sysinfo(&value));
10449 if (!is_error(ret) && arg1)
10451 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10452 return -TARGET_EFAULT;
10453 __put_user(value.uptime, &target_value->uptime);
10454 __put_user(value.loads[0], &target_value->loads[0]);
10455 __put_user(value.loads[1], &target_value->loads[1]);
10456 __put_user(value.loads[2], &target_value->loads[2]);
10457 __put_user(value.totalram, &target_value->totalram);
10458 __put_user(value.freeram, &target_value->freeram);
10459 __put_user(value.sharedram, &target_value->sharedram);
10460 __put_user(value.bufferram, &target_value->bufferram);
10461 __put_user(value.totalswap, &target_value->totalswap);
10462 __put_user(value.freeswap, &target_value->freeswap);
10463 __put_user(value.procs, &target_value->procs);
10464 __put_user(value.totalhigh, &target_value->totalhigh);
10465 __put_user(value.freehigh, &target_value->freehigh);
10466 __put_user(value.mem_unit, &target_value->mem_unit);
10467 unlock_user_struct(target_value, arg1, 1);
10470 return ret;
10471 #ifdef TARGET_NR_ipc
10472 case TARGET_NR_ipc:
10473 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10474 #endif
10475 #ifdef TARGET_NR_semget
10476 case TARGET_NR_semget:
10477 return get_errno(semget(arg1, arg2, arg3));
10478 #endif
10479 #ifdef TARGET_NR_semop
10480 case TARGET_NR_semop:
10481 return do_semtimedop(arg1, arg2, arg3, 0, false);
10482 #endif
10483 #ifdef TARGET_NR_semtimedop
10484 case TARGET_NR_semtimedop:
10485 return do_semtimedop(arg1, arg2, arg3, arg4, false);
10486 #endif
10487 #ifdef TARGET_NR_semtimedop_time64
10488 case TARGET_NR_semtimedop_time64:
10489 return do_semtimedop(arg1, arg2, arg3, arg4, true);
10490 #endif
10491 #ifdef TARGET_NR_semctl
10492 case TARGET_NR_semctl:
10493 return do_semctl(arg1, arg2, arg3, arg4);
10494 #endif
10495 #ifdef TARGET_NR_msgctl
10496 case TARGET_NR_msgctl:
10497 return do_msgctl(arg1, arg2, arg3);
10498 #endif
10499 #ifdef TARGET_NR_msgget
10500 case TARGET_NR_msgget:
10501 return get_errno(msgget(arg1, arg2));
10502 #endif
10503 #ifdef TARGET_NR_msgrcv
10504 case TARGET_NR_msgrcv:
10505 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10506 #endif
10507 #ifdef TARGET_NR_msgsnd
10508 case TARGET_NR_msgsnd:
10509 return do_msgsnd(arg1, arg2, arg3, arg4);
10510 #endif
10511 #ifdef TARGET_NR_shmget
10512 case TARGET_NR_shmget:
10513 return get_errno(shmget(arg1, arg2, arg3));
10514 #endif
10515 #ifdef TARGET_NR_shmctl
10516 case TARGET_NR_shmctl:
10517 return do_shmctl(arg1, arg2, arg3);
10518 #endif
10519 #ifdef TARGET_NR_shmat
10520 case TARGET_NR_shmat:
10521 return do_shmat(cpu_env, arg1, arg2, arg3);
10522 #endif
10523 #ifdef TARGET_NR_shmdt
10524 case TARGET_NR_shmdt:
10525 return do_shmdt(arg1);
10526 #endif
10527 case TARGET_NR_fsync:
10528 return get_errno(fsync(arg1));
10529 case TARGET_NR_clone:
10530 /* Linux manages to have three different orderings for its
10531 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10532 * match the kernel's CONFIG_CLONE_* settings.
10533 * Microblaze is further special in that it uses a sixth
10534 * implicit argument to clone for the TLS pointer.
10536 #if defined(TARGET_MICROBLAZE)
10537 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10538 #elif defined(TARGET_CLONE_BACKWARDS)
10539 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10540 #elif defined(TARGET_CLONE_BACKWARDS2)
10541 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10542 #else
10543 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10544 #endif
10545 return ret;
10546 #ifdef __NR_exit_group
10547 /* new thread calls */
10548 case TARGET_NR_exit_group:
10549 preexit_cleanup(cpu_env, arg1);
10550 return get_errno(exit_group(arg1));
10551 #endif
10552 case TARGET_NR_setdomainname:
10553 if (!(p = lock_user_string(arg1)))
10554 return -TARGET_EFAULT;
10555 ret = get_errno(setdomainname(p, arg2));
10556 unlock_user(p, arg1, 0);
10557 return ret;
10558 case TARGET_NR_uname:
10559 /* no need to transcode because we use the linux syscall */
10561 struct new_utsname * buf;
10563 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10564 return -TARGET_EFAULT;
10565 ret = get_errno(sys_uname(buf));
10566 if (!is_error(ret)) {
10567 /* Overwrite the native machine name with whatever is being
10568 emulated. */
10569 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10570 sizeof(buf->machine));
10571 /* Allow the user to override the reported release. */
10572 if (qemu_uname_release && *qemu_uname_release) {
10573 g_strlcpy(buf->release, qemu_uname_release,
10574 sizeof(buf->release));
10577 unlock_user_struct(buf, arg1, 1);
10579 return ret;
10580 #ifdef TARGET_I386
10581 case TARGET_NR_modify_ldt:
10582 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10583 #if !defined(TARGET_X86_64)
10584 case TARGET_NR_vm86:
10585 return do_vm86(cpu_env, arg1, arg2);
10586 #endif
10587 #endif
10588 #if defined(TARGET_NR_adjtimex)
10589 case TARGET_NR_adjtimex:
10591 struct timex host_buf;
10593 if (target_to_host_timex(&host_buf, arg1) != 0) {
10594 return -TARGET_EFAULT;
10596 ret = get_errno(adjtimex(&host_buf));
10597 if (!is_error(ret)) {
10598 if (host_to_target_timex(arg1, &host_buf) != 0) {
10599 return -TARGET_EFAULT;
10603 return ret;
10604 #endif
10605 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10606 case TARGET_NR_clock_adjtime:
10608 struct timex htx, *phtx = &htx;
10610 if (target_to_host_timex(phtx, arg2) != 0) {
10611 return -TARGET_EFAULT;
10613 ret = get_errno(clock_adjtime(arg1, phtx));
10614 if (!is_error(ret) && phtx) {
10615 if (host_to_target_timex(arg2, phtx) != 0) {
10616 return -TARGET_EFAULT;
10620 return ret;
10621 #endif
10622 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10623 case TARGET_NR_clock_adjtime64:
10625 struct timex htx;
10627 if (target_to_host_timex64(&htx, arg2) != 0) {
10628 return -TARGET_EFAULT;
10630 ret = get_errno(clock_adjtime(arg1, &htx));
10631 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10632 return -TARGET_EFAULT;
10635 return ret;
10636 #endif
10637 case TARGET_NR_getpgid:
10638 return get_errno(getpgid(arg1));
10639 case TARGET_NR_fchdir:
10640 return get_errno(fchdir(arg1));
10641 case TARGET_NR_personality:
10642 return get_errno(personality(arg1));
10643 #ifdef TARGET_NR__llseek /* Not on alpha */
10644 case TARGET_NR__llseek:
10646 int64_t res;
10647 #if !defined(__NR_llseek)
10648 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10649 if (res == -1) {
10650 ret = get_errno(res);
10651 } else {
10652 ret = 0;
10654 #else
10655 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10656 #endif
10657 if ((ret == 0) && put_user_s64(res, arg4)) {
10658 return -TARGET_EFAULT;
10661 return ret;
10662 #endif
10663 #ifdef TARGET_NR_getdents
10664 case TARGET_NR_getdents:
10665 return do_getdents(arg1, arg2, arg3);
10666 #endif /* TARGET_NR_getdents */
10667 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10668 case TARGET_NR_getdents64:
10669 return do_getdents64(arg1, arg2, arg3);
10670 #endif /* TARGET_NR_getdents64 */
10671 #if defined(TARGET_NR__newselect)
10672 case TARGET_NR__newselect:
10673 return do_select(arg1, arg2, arg3, arg4, arg5);
10674 #endif
10675 #ifdef TARGET_NR_poll
10676 case TARGET_NR_poll:
10677 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10678 #endif
10679 #ifdef TARGET_NR_ppoll
10680 case TARGET_NR_ppoll:
10681 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10682 #endif
10683 #ifdef TARGET_NR_ppoll_time64
10684 case TARGET_NR_ppoll_time64:
10685 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10686 #endif
10687 case TARGET_NR_flock:
10688 /* NOTE: the flock constant seems to be the same for every
10689 Linux platform */
10690 return get_errno(safe_flock(arg1, arg2));
10691 case TARGET_NR_readv:
10693 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10694 if (vec != NULL) {
10695 ret = get_errno(safe_readv(arg1, vec, arg3));
10696 unlock_iovec(vec, arg2, arg3, 1);
10697 } else {
10698 ret = -host_to_target_errno(errno);
10701 return ret;
10702 case TARGET_NR_writev:
10704 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10705 if (vec != NULL) {
10706 ret = get_errno(safe_writev(arg1, vec, arg3));
10707 unlock_iovec(vec, arg2, arg3, 0);
10708 } else {
10709 ret = -host_to_target_errno(errno);
10712 return ret;
10713 #if defined(TARGET_NR_preadv)
10714 case TARGET_NR_preadv:
10716 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10717 if (vec != NULL) {
10718 unsigned long low, high;
10720 target_to_host_low_high(arg4, arg5, &low, &high);
10721 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10722 unlock_iovec(vec, arg2, arg3, 1);
10723 } else {
10724 ret = -host_to_target_errno(errno);
10727 return ret;
10728 #endif
10729 #if defined(TARGET_NR_pwritev)
10730 case TARGET_NR_pwritev:
10732 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10733 if (vec != NULL) {
10734 unsigned long low, high;
10736 target_to_host_low_high(arg4, arg5, &low, &high);
10737 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10738 unlock_iovec(vec, arg2, arg3, 0);
10739 } else {
10740 ret = -host_to_target_errno(errno);
10743 return ret;
10744 #endif
10745 case TARGET_NR_getsid:
10746 return get_errno(getsid(arg1));
10747 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10748 case TARGET_NR_fdatasync:
10749 return get_errno(fdatasync(arg1));
10750 #endif
10751 case TARGET_NR_sched_getaffinity:
10753 unsigned int mask_size;
10754 unsigned long *mask;
10757 * sched_getaffinity needs multiples of ulong, so need to take
10758 * care of mismatches between target ulong and host ulong sizes.
10760 if (arg2 & (sizeof(abi_ulong) - 1)) {
10761 return -TARGET_EINVAL;
10763 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10765 mask = alloca(mask_size);
10766 memset(mask, 0, mask_size);
10767 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10769 if (!is_error(ret)) {
10770 if (ret > arg2) {
10771 /* More data returned than the caller's buffer will fit.
10772 * This only happens if sizeof(abi_long) < sizeof(long)
10773 * and the caller passed us a buffer holding an odd number
10774 * of abi_longs. If the host kernel is actually using the
10775 * extra 4 bytes then fail EINVAL; otherwise we can just
10776 * ignore them and only copy the interesting part.
10778 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10779 if (numcpus > arg2 * 8) {
10780 return -TARGET_EINVAL;
10782 ret = arg2;
10785 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10786 return -TARGET_EFAULT;
10790 return ret;
10791 case TARGET_NR_sched_setaffinity:
10793 unsigned int mask_size;
10794 unsigned long *mask;
10797 * sched_setaffinity needs multiples of ulong, so need to take
10798 * care of mismatches between target ulong and host ulong sizes.
10800 if (arg2 & (sizeof(abi_ulong) - 1)) {
10801 return -TARGET_EINVAL;
10803 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10804 mask = alloca(mask_size);
10806 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10807 if (ret) {
10808 return ret;
10811 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10813 case TARGET_NR_getcpu:
10815 unsigned cpu, node;
10816 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10817 arg2 ? &node : NULL,
10818 NULL));
10819 if (is_error(ret)) {
10820 return ret;
10822 if (arg1 && put_user_u32(cpu, arg1)) {
10823 return -TARGET_EFAULT;
10825 if (arg2 && put_user_u32(node, arg2)) {
10826 return -TARGET_EFAULT;
10829 return ret;
10830 case TARGET_NR_sched_setparam:
10832 struct target_sched_param *target_schp;
10833 struct sched_param schp;
10835 if (arg2 == 0) {
10836 return -TARGET_EINVAL;
10838 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10839 return -TARGET_EFAULT;
10841 schp.sched_priority = tswap32(target_schp->sched_priority);
10842 unlock_user_struct(target_schp, arg2, 0);
10843 return get_errno(sys_sched_setparam(arg1, &schp));
10845 case TARGET_NR_sched_getparam:
10847 struct target_sched_param *target_schp;
10848 struct sched_param schp;
10850 if (arg2 == 0) {
10851 return -TARGET_EINVAL;
10853 ret = get_errno(sys_sched_getparam(arg1, &schp));
10854 if (!is_error(ret)) {
10855 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10856 return -TARGET_EFAULT;
10858 target_schp->sched_priority = tswap32(schp.sched_priority);
10859 unlock_user_struct(target_schp, arg2, 1);
10862 return ret;
10863 case TARGET_NR_sched_setscheduler:
10865 struct target_sched_param *target_schp;
10866 struct sched_param schp;
10867 if (arg3 == 0) {
10868 return -TARGET_EINVAL;
10870 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10871 return -TARGET_EFAULT;
10873 schp.sched_priority = tswap32(target_schp->sched_priority);
10874 unlock_user_struct(target_schp, arg3, 0);
10875 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10877 case TARGET_NR_sched_getscheduler:
10878 return get_errno(sys_sched_getscheduler(arg1));
10879 case TARGET_NR_sched_getattr:
10881 struct target_sched_attr *target_scha;
10882 struct sched_attr scha;
10883 if (arg2 == 0) {
10884 return -TARGET_EINVAL;
10886 if (arg3 > sizeof(scha)) {
10887 arg3 = sizeof(scha);
10889 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10890 if (!is_error(ret)) {
10891 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10892 if (!target_scha) {
10893 return -TARGET_EFAULT;
10895 target_scha->size = tswap32(scha.size);
10896 target_scha->sched_policy = tswap32(scha.sched_policy);
10897 target_scha->sched_flags = tswap64(scha.sched_flags);
10898 target_scha->sched_nice = tswap32(scha.sched_nice);
10899 target_scha->sched_priority = tswap32(scha.sched_priority);
10900 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10901 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10902 target_scha->sched_period = tswap64(scha.sched_period);
10903 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10904 target_scha->sched_util_min = tswap32(scha.sched_util_min);
10905 target_scha->sched_util_max = tswap32(scha.sched_util_max);
10907 unlock_user(target_scha, arg2, arg3);
10909 return ret;
10911 case TARGET_NR_sched_setattr:
10913 struct target_sched_attr *target_scha;
10914 struct sched_attr scha;
10915 uint32_t size;
10916 int zeroed;
10917 if (arg2 == 0) {
10918 return -TARGET_EINVAL;
10920 if (get_user_u32(size, arg2)) {
10921 return -TARGET_EFAULT;
10923 if (!size) {
10924 size = offsetof(struct target_sched_attr, sched_util_min);
10926 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10927 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10928 return -TARGET_EFAULT;
10930 return -TARGET_E2BIG;
10933 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10934 if (zeroed < 0) {
10935 return zeroed;
10936 } else if (zeroed == 0) {
10937 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10938 return -TARGET_EFAULT;
10940 return -TARGET_E2BIG;
10942 if (size > sizeof(struct target_sched_attr)) {
10943 size = sizeof(struct target_sched_attr);
10946 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10947 if (!target_scha) {
10948 return -TARGET_EFAULT;
10950 scha.size = size;
10951 scha.sched_policy = tswap32(target_scha->sched_policy);
10952 scha.sched_flags = tswap64(target_scha->sched_flags);
10953 scha.sched_nice = tswap32(target_scha->sched_nice);
10954 scha.sched_priority = tswap32(target_scha->sched_priority);
10955 scha.sched_runtime = tswap64(target_scha->sched_runtime);
10956 scha.sched_deadline = tswap64(target_scha->sched_deadline);
10957 scha.sched_period = tswap64(target_scha->sched_period);
10958 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10959 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10960 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10962 unlock_user(target_scha, arg2, 0);
10963 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10965 case TARGET_NR_sched_yield:
10966 return get_errno(sched_yield());
10967 case TARGET_NR_sched_get_priority_max:
10968 return get_errno(sched_get_priority_max(arg1));
10969 case TARGET_NR_sched_get_priority_min:
10970 return get_errno(sched_get_priority_min(arg1));
10971 #ifdef TARGET_NR_sched_rr_get_interval
10972 case TARGET_NR_sched_rr_get_interval:
10974 struct timespec ts;
10975 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10976 if (!is_error(ret)) {
10977 ret = host_to_target_timespec(arg2, &ts);
10980 return ret;
10981 #endif
10982 #ifdef TARGET_NR_sched_rr_get_interval_time64
10983 case TARGET_NR_sched_rr_get_interval_time64:
10985 struct timespec ts;
10986 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10987 if (!is_error(ret)) {
10988 ret = host_to_target_timespec64(arg2, &ts);
10991 return ret;
10992 #endif
10993 #if defined(TARGET_NR_nanosleep)
10994 case TARGET_NR_nanosleep:
10996 struct timespec req, rem;
10997 target_to_host_timespec(&req, arg1);
10998 ret = get_errno(safe_nanosleep(&req, &rem));
10999 if (is_error(ret) && arg2) {
11000 host_to_target_timespec(arg2, &rem);
11003 return ret;
11004 #endif
11005 case TARGET_NR_prctl:
11006 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11007 break;
11008 #ifdef TARGET_NR_arch_prctl
11009 case TARGET_NR_arch_prctl:
11010 return do_arch_prctl(cpu_env, arg1, arg2);
11011 #endif
11012 #ifdef TARGET_NR_pread64
11013 case TARGET_NR_pread64:
11014 if (regpairs_aligned(cpu_env, num)) {
11015 arg4 = arg5;
11016 arg5 = arg6;
11018 if (arg2 == 0 && arg3 == 0) {
11019 /* Special-case NULL buffer and zero length, which should succeed */
11020 p = 0;
11021 } else {
11022 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11023 if (!p) {
11024 return -TARGET_EFAULT;
11027 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11028 unlock_user(p, arg2, ret);
11029 return ret;
11030 case TARGET_NR_pwrite64:
11031 if (regpairs_aligned(cpu_env, num)) {
11032 arg4 = arg5;
11033 arg5 = arg6;
11035 if (arg2 == 0 && arg3 == 0) {
11036 /* Special-case NULL buffer and zero length, which should succeed */
11037 p = 0;
11038 } else {
11039 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11040 if (!p) {
11041 return -TARGET_EFAULT;
11044 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11045 unlock_user(p, arg2, 0);
11046 return ret;
11047 #endif
11048 case TARGET_NR_getcwd:
11049 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11050 return -TARGET_EFAULT;
11051 ret = get_errno(sys_getcwd1(p, arg2));
11052 unlock_user(p, arg1, ret);
11053 return ret;
11054 case TARGET_NR_capget:
11055 case TARGET_NR_capset:
11057 struct target_user_cap_header *target_header;
11058 struct target_user_cap_data *target_data = NULL;
11059 struct __user_cap_header_struct header;
11060 struct __user_cap_data_struct data[2];
11061 struct __user_cap_data_struct *dataptr = NULL;
11062 int i, target_datalen;
11063 int data_items = 1;
11065 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11066 return -TARGET_EFAULT;
11068 header.version = tswap32(target_header->version);
11069 header.pid = tswap32(target_header->pid);
11071 if (header.version != _LINUX_CAPABILITY_VERSION) {
11072 /* Version 2 and up takes pointer to two user_data structs */
11073 data_items = 2;
11076 target_datalen = sizeof(*target_data) * data_items;
11078 if (arg2) {
11079 if (num == TARGET_NR_capget) {
11080 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11081 } else {
11082 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11084 if (!target_data) {
11085 unlock_user_struct(target_header, arg1, 0);
11086 return -TARGET_EFAULT;
11089 if (num == TARGET_NR_capset) {
11090 for (i = 0; i < data_items; i++) {
11091 data[i].effective = tswap32(target_data[i].effective);
11092 data[i].permitted = tswap32(target_data[i].permitted);
11093 data[i].inheritable = tswap32(target_data[i].inheritable);
11097 dataptr = data;
11100 if (num == TARGET_NR_capget) {
11101 ret = get_errno(capget(&header, dataptr));
11102 } else {
11103 ret = get_errno(capset(&header, dataptr));
11106 /* The kernel always updates version for both capget and capset */
11107 target_header->version = tswap32(header.version);
11108 unlock_user_struct(target_header, arg1, 1);
11110 if (arg2) {
11111 if (num == TARGET_NR_capget) {
11112 for (i = 0; i < data_items; i++) {
11113 target_data[i].effective = tswap32(data[i].effective);
11114 target_data[i].permitted = tswap32(data[i].permitted);
11115 target_data[i].inheritable = tswap32(data[i].inheritable);
11117 unlock_user(target_data, arg2, target_datalen);
11118 } else {
11119 unlock_user(target_data, arg2, 0);
11122 return ret;
11124 case TARGET_NR_sigaltstack:
11125 return do_sigaltstack(arg1, arg2, cpu_env);
11127 #ifdef CONFIG_SENDFILE
11128 #ifdef TARGET_NR_sendfile
11129 case TARGET_NR_sendfile:
11131 off_t *offp = NULL;
11132 off_t off;
11133 if (arg3) {
11134 ret = get_user_sal(off, arg3);
11135 if (is_error(ret)) {
11136 return ret;
11138 offp = &off;
11140 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11141 if (!is_error(ret) && arg3) {
11142 abi_long ret2 = put_user_sal(off, arg3);
11143 if (is_error(ret2)) {
11144 ret = ret2;
11147 return ret;
11149 #endif
11150 #ifdef TARGET_NR_sendfile64
11151 case TARGET_NR_sendfile64:
11153 off_t *offp = NULL;
11154 off_t off;
11155 if (arg3) {
11156 ret = get_user_s64(off, arg3);
11157 if (is_error(ret)) {
11158 return ret;
11160 offp = &off;
11162 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11163 if (!is_error(ret) && arg3) {
11164 abi_long ret2 = put_user_s64(off, arg3);
11165 if (is_error(ret2)) {
11166 ret = ret2;
11169 return ret;
11171 #endif
11172 #endif
11173 #ifdef TARGET_NR_vfork
11174 case TARGET_NR_vfork:
11175 return get_errno(do_fork(cpu_env,
11176 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11177 0, 0, 0, 0));
11178 #endif
11179 #ifdef TARGET_NR_ugetrlimit
11180 case TARGET_NR_ugetrlimit:
11182 struct rlimit rlim;
11183 int resource = target_to_host_resource(arg1);
11184 ret = get_errno(getrlimit(resource, &rlim));
11185 if (!is_error(ret)) {
11186 struct target_rlimit *target_rlim;
11187 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11188 return -TARGET_EFAULT;
11189 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11190 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11191 unlock_user_struct(target_rlim, arg2, 1);
11193 return ret;
11195 #endif
11196 #ifdef TARGET_NR_truncate64
11197 case TARGET_NR_truncate64:
11198 if (!(p = lock_user_string(arg1)))
11199 return -TARGET_EFAULT;
11200 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11201 unlock_user(p, arg1, 0);
11202 return ret;
11203 #endif
11204 #ifdef TARGET_NR_ftruncate64
11205 case TARGET_NR_ftruncate64:
11206 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11207 #endif
11208 #ifdef TARGET_NR_stat64
11209 case TARGET_NR_stat64:
11210 if (!(p = lock_user_string(arg1))) {
11211 return -TARGET_EFAULT;
11213 ret = get_errno(stat(path(p), &st));
11214 unlock_user(p, arg1, 0);
11215 if (!is_error(ret))
11216 ret = host_to_target_stat64(cpu_env, arg2, &st);
11217 return ret;
11218 #endif
11219 #ifdef TARGET_NR_lstat64
11220 case TARGET_NR_lstat64:
11221 if (!(p = lock_user_string(arg1))) {
11222 return -TARGET_EFAULT;
11224 ret = get_errno(lstat(path(p), &st));
11225 unlock_user(p, arg1, 0);
11226 if (!is_error(ret))
11227 ret = host_to_target_stat64(cpu_env, arg2, &st);
11228 return ret;
11229 #endif
11230 #ifdef TARGET_NR_fstat64
11231 case TARGET_NR_fstat64:
11232 ret = get_errno(fstat(arg1, &st));
11233 if (!is_error(ret))
11234 ret = host_to_target_stat64(cpu_env, arg2, &st);
11235 return ret;
11236 #endif
11237 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11238 #ifdef TARGET_NR_fstatat64
11239 case TARGET_NR_fstatat64:
11240 #endif
11241 #ifdef TARGET_NR_newfstatat
11242 case TARGET_NR_newfstatat:
11243 #endif
11244 if (!(p = lock_user_string(arg2))) {
11245 return -TARGET_EFAULT;
11247 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11248 unlock_user(p, arg2, 0);
11249 if (!is_error(ret))
11250 ret = host_to_target_stat64(cpu_env, arg3, &st);
11251 return ret;
11252 #endif
11253 #if defined(TARGET_NR_statx)
11254 case TARGET_NR_statx:
11256 struct target_statx *target_stx;
11257 int dirfd = arg1;
11258 int flags = arg3;
11260 p = lock_user_string(arg2);
11261 if (p == NULL) {
11262 return -TARGET_EFAULT;
11264 #if defined(__NR_statx)
11267 * It is assumed that struct statx is architecture independent.
11269 struct target_statx host_stx;
11270 int mask = arg4;
11272 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11273 if (!is_error(ret)) {
11274 if (host_to_target_statx(&host_stx, arg5) != 0) {
11275 unlock_user(p, arg2, 0);
11276 return -TARGET_EFAULT;
11280 if (ret != -TARGET_ENOSYS) {
11281 unlock_user(p, arg2, 0);
11282 return ret;
11285 #endif
11286 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11287 unlock_user(p, arg2, 0);
11289 if (!is_error(ret)) {
11290 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11291 return -TARGET_EFAULT;
11293 memset(target_stx, 0, sizeof(*target_stx));
11294 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11295 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11296 __put_user(st.st_ino, &target_stx->stx_ino);
11297 __put_user(st.st_mode, &target_stx->stx_mode);
11298 __put_user(st.st_uid, &target_stx->stx_uid);
11299 __put_user(st.st_gid, &target_stx->stx_gid);
11300 __put_user(st.st_nlink, &target_stx->stx_nlink);
11301 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11302 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11303 __put_user(st.st_size, &target_stx->stx_size);
11304 __put_user(st.st_blksize, &target_stx->stx_blksize);
11305 __put_user(st.st_blocks, &target_stx->stx_blocks);
11306 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11307 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11308 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11309 unlock_user_struct(target_stx, arg5, 1);
11312 return ret;
11313 #endif
11314 #ifdef TARGET_NR_lchown
11315 case TARGET_NR_lchown:
11316 if (!(p = lock_user_string(arg1)))
11317 return -TARGET_EFAULT;
11318 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11319 unlock_user(p, arg1, 0);
11320 return ret;
11321 #endif
11322 #ifdef TARGET_NR_getuid
11323 case TARGET_NR_getuid:
11324 return get_errno(high2lowuid(getuid()));
11325 #endif
11326 #ifdef TARGET_NR_getgid
11327 case TARGET_NR_getgid:
11328 return get_errno(high2lowgid(getgid()));
11329 #endif
11330 #ifdef TARGET_NR_geteuid
11331 case TARGET_NR_geteuid:
11332 return get_errno(high2lowuid(geteuid()));
11333 #endif
11334 #ifdef TARGET_NR_getegid
11335 case TARGET_NR_getegid:
11336 return get_errno(high2lowgid(getegid()));
11337 #endif
11338 case TARGET_NR_setreuid:
11339 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11340 case TARGET_NR_setregid:
11341 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11342 case TARGET_NR_getgroups:
11344 int gidsetsize = arg1;
11345 target_id *target_grouplist;
11346 gid_t *grouplist;
11347 int i;
11349 grouplist = alloca(gidsetsize * sizeof(gid_t));
11350 ret = get_errno(getgroups(gidsetsize, grouplist));
11351 if (gidsetsize == 0)
11352 return ret;
11353 if (!is_error(ret)) {
11354 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11355 if (!target_grouplist)
11356 return -TARGET_EFAULT;
11357 for(i = 0;i < ret; i++)
11358 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11359 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11362 return ret;
11363 case TARGET_NR_setgroups:
11365 int gidsetsize = arg1;
11366 target_id *target_grouplist;
11367 gid_t *grouplist = NULL;
11368 int i;
11369 if (gidsetsize) {
11370 grouplist = alloca(gidsetsize * sizeof(gid_t));
11371 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11372 if (!target_grouplist) {
11373 return -TARGET_EFAULT;
11375 for (i = 0; i < gidsetsize; i++) {
11376 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11378 unlock_user(target_grouplist, arg2, 0);
11380 return get_errno(setgroups(gidsetsize, grouplist));
11382 case TARGET_NR_fchown:
11383 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11384 #if defined(TARGET_NR_fchownat)
11385 case TARGET_NR_fchownat:
11386 if (!(p = lock_user_string(arg2)))
11387 return -TARGET_EFAULT;
11388 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11389 low2highgid(arg4), arg5));
11390 unlock_user(p, arg2, 0);
11391 return ret;
11392 #endif
11393 #ifdef TARGET_NR_setresuid
11394 case TARGET_NR_setresuid:
11395 return get_errno(sys_setresuid(low2highuid(arg1),
11396 low2highuid(arg2),
11397 low2highuid(arg3)));
11398 #endif
11399 #ifdef TARGET_NR_getresuid
11400 case TARGET_NR_getresuid:
11402 uid_t ruid, euid, suid;
11403 ret = get_errno(getresuid(&ruid, &euid, &suid));
11404 if (!is_error(ret)) {
11405 if (put_user_id(high2lowuid(ruid), arg1)
11406 || put_user_id(high2lowuid(euid), arg2)
11407 || put_user_id(high2lowuid(suid), arg3))
11408 return -TARGET_EFAULT;
11411 return ret;
11412 #endif
11413 #ifdef TARGET_NR_getresgid
11414 case TARGET_NR_setresgid:
11415 return get_errno(sys_setresgid(low2highgid(arg1),
11416 low2highgid(arg2),
11417 low2highgid(arg3)));
11418 #endif
11419 #ifdef TARGET_NR_getresgid
11420 case TARGET_NR_getresgid:
11422 gid_t rgid, egid, sgid;
11423 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11424 if (!is_error(ret)) {
11425 if (put_user_id(high2lowgid(rgid), arg1)
11426 || put_user_id(high2lowgid(egid), arg2)
11427 || put_user_id(high2lowgid(sgid), arg3))
11428 return -TARGET_EFAULT;
11431 return ret;
11432 #endif
11433 #ifdef TARGET_NR_chown
11434 case TARGET_NR_chown:
11435 if (!(p = lock_user_string(arg1)))
11436 return -TARGET_EFAULT;
11437 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11438 unlock_user(p, arg1, 0);
11439 return ret;
11440 #endif
11441 case TARGET_NR_setuid:
11442 return get_errno(sys_setuid(low2highuid(arg1)));
11443 case TARGET_NR_setgid:
11444 return get_errno(sys_setgid(low2highgid(arg1)));
11445 case TARGET_NR_setfsuid:
11446 return get_errno(setfsuid(arg1));
11447 case TARGET_NR_setfsgid:
11448 return get_errno(setfsgid(arg1));
11450 #ifdef TARGET_NR_lchown32
11451 case TARGET_NR_lchown32:
11452 if (!(p = lock_user_string(arg1)))
11453 return -TARGET_EFAULT;
11454 ret = get_errno(lchown(p, arg2, arg3));
11455 unlock_user(p, arg1, 0);
11456 return ret;
11457 #endif
11458 #ifdef TARGET_NR_getuid32
11459 case TARGET_NR_getuid32:
11460 return get_errno(getuid());
11461 #endif
11463 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11464 /* Alpha specific */
11465 case TARGET_NR_getxuid:
11467 uid_t euid;
11468 euid=geteuid();
11469 cpu_env->ir[IR_A4]=euid;
11471 return get_errno(getuid());
11472 #endif
11473 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11474 /* Alpha specific */
11475 case TARGET_NR_getxgid:
11477 uid_t egid;
11478 egid=getegid();
11479 cpu_env->ir[IR_A4]=egid;
11481 return get_errno(getgid());
11482 #endif
11483 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11484 /* Alpha specific */
11485 case TARGET_NR_osf_getsysinfo:
11486 ret = -TARGET_EOPNOTSUPP;
11487 switch (arg1) {
11488 case TARGET_GSI_IEEE_FP_CONTROL:
11490 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11491 uint64_t swcr = cpu_env->swcr;
11493 swcr &= ~SWCR_STATUS_MASK;
11494 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11496 if (put_user_u64 (swcr, arg2))
11497 return -TARGET_EFAULT;
11498 ret = 0;
11500 break;
11502 /* case GSI_IEEE_STATE_AT_SIGNAL:
11503 -- Not implemented in linux kernel.
11504 case GSI_UACPROC:
11505 -- Retrieves current unaligned access state; not much used.
11506 case GSI_PROC_TYPE:
11507 -- Retrieves implver information; surely not used.
11508 case GSI_GET_HWRPB:
11509 -- Grabs a copy of the HWRPB; surely not used.
11512 return ret;
11513 #endif
11514 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11515 /* Alpha specific */
11516 case TARGET_NR_osf_setsysinfo:
11517 ret = -TARGET_EOPNOTSUPP;
11518 switch (arg1) {
11519 case TARGET_SSI_IEEE_FP_CONTROL:
11521 uint64_t swcr, fpcr;
11523 if (get_user_u64 (swcr, arg2)) {
11524 return -TARGET_EFAULT;
11528 * The kernel calls swcr_update_status to update the
11529 * status bits from the fpcr at every point that it
11530 * could be queried. Therefore, we store the status
11531 * bits only in FPCR.
11533 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11535 fpcr = cpu_alpha_load_fpcr(cpu_env);
11536 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11537 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11538 cpu_alpha_store_fpcr(cpu_env, fpcr);
11539 ret = 0;
11541 break;
11543 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11545 uint64_t exc, fpcr, fex;
11547 if (get_user_u64(exc, arg2)) {
11548 return -TARGET_EFAULT;
11550 exc &= SWCR_STATUS_MASK;
11551 fpcr = cpu_alpha_load_fpcr(cpu_env);
11553 /* Old exceptions are not signaled. */
11554 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11555 fex = exc & ~fex;
11556 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11557 fex &= (cpu_env)->swcr;
11559 /* Update the hardware fpcr. */
11560 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11561 cpu_alpha_store_fpcr(cpu_env, fpcr);
11563 if (fex) {
11564 int si_code = TARGET_FPE_FLTUNK;
11565 target_siginfo_t info;
11567 if (fex & SWCR_TRAP_ENABLE_DNO) {
11568 si_code = TARGET_FPE_FLTUND;
11570 if (fex & SWCR_TRAP_ENABLE_INE) {
11571 si_code = TARGET_FPE_FLTRES;
11573 if (fex & SWCR_TRAP_ENABLE_UNF) {
11574 si_code = TARGET_FPE_FLTUND;
11576 if (fex & SWCR_TRAP_ENABLE_OVF) {
11577 si_code = TARGET_FPE_FLTOVF;
11579 if (fex & SWCR_TRAP_ENABLE_DZE) {
11580 si_code = TARGET_FPE_FLTDIV;
11582 if (fex & SWCR_TRAP_ENABLE_INV) {
11583 si_code = TARGET_FPE_FLTINV;
11586 info.si_signo = SIGFPE;
11587 info.si_errno = 0;
11588 info.si_code = si_code;
11589 info._sifields._sigfault._addr = (cpu_env)->pc;
11590 queue_signal(cpu_env, info.si_signo,
11591 QEMU_SI_FAULT, &info);
11593 ret = 0;
11595 break;
11597 /* case SSI_NVPAIRS:
11598 -- Used with SSIN_UACPROC to enable unaligned accesses.
11599 case SSI_IEEE_STATE_AT_SIGNAL:
11600 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11601 -- Not implemented in linux kernel
11604 return ret;
11605 #endif
11606 #ifdef TARGET_NR_osf_sigprocmask
11607 /* Alpha specific. */
11608 case TARGET_NR_osf_sigprocmask:
11610 abi_ulong mask;
11611 int how;
11612 sigset_t set, oldset;
11614 switch(arg1) {
11615 case TARGET_SIG_BLOCK:
11616 how = SIG_BLOCK;
11617 break;
11618 case TARGET_SIG_UNBLOCK:
11619 how = SIG_UNBLOCK;
11620 break;
11621 case TARGET_SIG_SETMASK:
11622 how = SIG_SETMASK;
11623 break;
11624 default:
11625 return -TARGET_EINVAL;
11627 mask = arg2;
11628 target_to_host_old_sigset(&set, &mask);
11629 ret = do_sigprocmask(how, &set, &oldset);
11630 if (!ret) {
11631 host_to_target_old_sigset(&mask, &oldset);
11632 ret = mask;
11635 return ret;
11636 #endif
11638 #ifdef TARGET_NR_getgid32
11639 case TARGET_NR_getgid32:
11640 return get_errno(getgid());
11641 #endif
11642 #ifdef TARGET_NR_geteuid32
11643 case TARGET_NR_geteuid32:
11644 return get_errno(geteuid());
11645 #endif
11646 #ifdef TARGET_NR_getegid32
11647 case TARGET_NR_getegid32:
11648 return get_errno(getegid());
11649 #endif
11650 #ifdef TARGET_NR_setreuid32
11651 case TARGET_NR_setreuid32:
11652 return get_errno(setreuid(arg1, arg2));
11653 #endif
11654 #ifdef TARGET_NR_setregid32
11655 case TARGET_NR_setregid32:
11656 return get_errno(setregid(arg1, arg2));
11657 #endif
11658 #ifdef TARGET_NR_getgroups32
11659 case TARGET_NR_getgroups32:
11661 int gidsetsize = arg1;
11662 uint32_t *target_grouplist;
11663 gid_t *grouplist;
11664 int i;
11666 grouplist = alloca(gidsetsize * sizeof(gid_t));
11667 ret = get_errno(getgroups(gidsetsize, grouplist));
11668 if (gidsetsize == 0)
11669 return ret;
11670 if (!is_error(ret)) {
11671 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11672 if (!target_grouplist) {
11673 return -TARGET_EFAULT;
11675 for(i = 0;i < ret; i++)
11676 target_grouplist[i] = tswap32(grouplist[i]);
11677 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11680 return ret;
11681 #endif
11682 #ifdef TARGET_NR_setgroups32
11683 case TARGET_NR_setgroups32:
11685 int gidsetsize = arg1;
11686 uint32_t *target_grouplist;
11687 gid_t *grouplist;
11688 int i;
11690 grouplist = alloca(gidsetsize * sizeof(gid_t));
11691 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11692 if (!target_grouplist) {
11693 return -TARGET_EFAULT;
11695 for(i = 0;i < gidsetsize; i++)
11696 grouplist[i] = tswap32(target_grouplist[i]);
11697 unlock_user(target_grouplist, arg2, 0);
11698 return get_errno(setgroups(gidsetsize, grouplist));
11700 #endif
11701 #ifdef TARGET_NR_fchown32
11702 case TARGET_NR_fchown32:
11703 return get_errno(fchown(arg1, arg2, arg3));
11704 #endif
11705 #ifdef TARGET_NR_setresuid32
11706 case TARGET_NR_setresuid32:
11707 return get_errno(sys_setresuid(arg1, arg2, arg3));
11708 #endif
11709 #ifdef TARGET_NR_getresuid32
11710 case TARGET_NR_getresuid32:
11712 uid_t ruid, euid, suid;
11713 ret = get_errno(getresuid(&ruid, &euid, &suid));
11714 if (!is_error(ret)) {
11715 if (put_user_u32(ruid, arg1)
11716 || put_user_u32(euid, arg2)
11717 || put_user_u32(suid, arg3))
11718 return -TARGET_EFAULT;
11721 return ret;
11722 #endif
11723 #ifdef TARGET_NR_setresgid32
11724 case TARGET_NR_setresgid32:
11725 return get_errno(sys_setresgid(arg1, arg2, arg3));
11726 #endif
11727 #ifdef TARGET_NR_getresgid32
11728 case TARGET_NR_getresgid32:
11730 gid_t rgid, egid, sgid;
11731 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11732 if (!is_error(ret)) {
11733 if (put_user_u32(rgid, arg1)
11734 || put_user_u32(egid, arg2)
11735 || put_user_u32(sgid, arg3))
11736 return -TARGET_EFAULT;
11739 return ret;
11740 #endif
11741 #ifdef TARGET_NR_chown32
11742 case TARGET_NR_chown32:
11743 if (!(p = lock_user_string(arg1)))
11744 return -TARGET_EFAULT;
11745 ret = get_errno(chown(p, arg2, arg3));
11746 unlock_user(p, arg1, 0);
11747 return ret;
11748 #endif
11749 #ifdef TARGET_NR_setuid32
11750 case TARGET_NR_setuid32:
11751 return get_errno(sys_setuid(arg1));
11752 #endif
11753 #ifdef TARGET_NR_setgid32
11754 case TARGET_NR_setgid32:
11755 return get_errno(sys_setgid(arg1));
11756 #endif
11757 #ifdef TARGET_NR_setfsuid32
11758 case TARGET_NR_setfsuid32:
11759 return get_errno(setfsuid(arg1));
11760 #endif
11761 #ifdef TARGET_NR_setfsgid32
11762 case TARGET_NR_setfsgid32:
11763 return get_errno(setfsgid(arg1));
11764 #endif
11765 #ifdef TARGET_NR_mincore
11766 case TARGET_NR_mincore:
11768 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11769 if (!a) {
11770 return -TARGET_ENOMEM;
11772 p = lock_user_string(arg3);
11773 if (!p) {
11774 ret = -TARGET_EFAULT;
11775 } else {
11776 ret = get_errno(mincore(a, arg2, p));
11777 unlock_user(p, arg3, ret);
11779 unlock_user(a, arg1, 0);
11781 return ret;
11782 #endif
11783 #ifdef TARGET_NR_arm_fadvise64_64
11784 case TARGET_NR_arm_fadvise64_64:
11785 /* arm_fadvise64_64 looks like fadvise64_64 but
11786 * with different argument order: fd, advice, offset, len
11787 * rather than the usual fd, offset, len, advice.
11788 * Note that offset and len are both 64-bit so appear as
11789 * pairs of 32-bit registers.
11791 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11792 target_offset64(arg5, arg6), arg2);
11793 return -host_to_target_errno(ret);
11794 #endif
11796 #if TARGET_ABI_BITS == 32
11798 #ifdef TARGET_NR_fadvise64_64
11799 case TARGET_NR_fadvise64_64:
11800 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11801 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11802 ret = arg2;
11803 arg2 = arg3;
11804 arg3 = arg4;
11805 arg4 = arg5;
11806 arg5 = arg6;
11807 arg6 = ret;
11808 #else
11809 /* 6 args: fd, offset (high, low), len (high, low), advice */
11810 if (regpairs_aligned(cpu_env, num)) {
11811 /* offset is in (3,4), len in (5,6) and advice in 7 */
11812 arg2 = arg3;
11813 arg3 = arg4;
11814 arg4 = arg5;
11815 arg5 = arg6;
11816 arg6 = arg7;
11818 #endif
11819 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11820 target_offset64(arg4, arg5), arg6);
11821 return -host_to_target_errno(ret);
11822 #endif
11824 #ifdef TARGET_NR_fadvise64
11825 case TARGET_NR_fadvise64:
11826 /* 5 args: fd, offset (high, low), len, advice */
11827 if (regpairs_aligned(cpu_env, num)) {
11828 /* offset is in (3,4), len in 5 and advice in 6 */
11829 arg2 = arg3;
11830 arg3 = arg4;
11831 arg4 = arg5;
11832 arg5 = arg6;
11834 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11835 return -host_to_target_errno(ret);
11836 #endif
11838 #else /* not a 32-bit ABI */
11839 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11840 #ifdef TARGET_NR_fadvise64_64
11841 case TARGET_NR_fadvise64_64:
11842 #endif
11843 #ifdef TARGET_NR_fadvise64
11844 case TARGET_NR_fadvise64:
11845 #endif
11846 #ifdef TARGET_S390X
11847 switch (arg4) {
11848 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11849 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11850 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11851 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11852 default: break;
11854 #endif
11855 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11856 #endif
11857 #endif /* end of 64-bit ABI fadvise handling */
11859 #ifdef TARGET_NR_madvise
11860 case TARGET_NR_madvise:
11861 return target_madvise(arg1, arg2, arg3);
11862 #endif
11863 #ifdef TARGET_NR_fcntl64
11864 case TARGET_NR_fcntl64:
11866 int cmd;
11867 struct flock64 fl;
11868 from_flock64_fn *copyfrom = copy_from_user_flock64;
11869 to_flock64_fn *copyto = copy_to_user_flock64;
11871 #ifdef TARGET_ARM
11872 if (!cpu_env->eabi) {
11873 copyfrom = copy_from_user_oabi_flock64;
11874 copyto = copy_to_user_oabi_flock64;
11876 #endif
11878 cmd = target_to_host_fcntl_cmd(arg2);
11879 if (cmd == -TARGET_EINVAL) {
11880 return cmd;
11883 switch(arg2) {
11884 case TARGET_F_GETLK64:
11885 ret = copyfrom(&fl, arg3);
11886 if (ret) {
11887 break;
11889 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11890 if (ret == 0) {
11891 ret = copyto(arg3, &fl);
11893 break;
11895 case TARGET_F_SETLK64:
11896 case TARGET_F_SETLKW64:
11897 ret = copyfrom(&fl, arg3);
11898 if (ret) {
11899 break;
11901 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11902 break;
11903 default:
11904 ret = do_fcntl(arg1, arg2, arg3);
11905 break;
11907 return ret;
11909 #endif
11910 #ifdef TARGET_NR_cacheflush
11911 case TARGET_NR_cacheflush:
11912 /* self-modifying code is handled automatically, so nothing needed */
11913 return 0;
11914 #endif
11915 #ifdef TARGET_NR_getpagesize
11916 case TARGET_NR_getpagesize:
11917 return TARGET_PAGE_SIZE;
11918 #endif
11919 case TARGET_NR_gettid:
11920 return get_errno(sys_gettid());
11921 #ifdef TARGET_NR_readahead
11922 case TARGET_NR_readahead:
11923 #if TARGET_ABI_BITS == 32
11924 if (regpairs_aligned(cpu_env, num)) {
11925 arg2 = arg3;
11926 arg3 = arg4;
11927 arg4 = arg5;
11929 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11930 #else
11931 ret = get_errno(readahead(arg1, arg2, arg3));
11932 #endif
11933 return ret;
11934 #endif
11935 #ifdef CONFIG_ATTR
11936 #ifdef TARGET_NR_setxattr
11937 case TARGET_NR_listxattr:
11938 case TARGET_NR_llistxattr:
11940 void *p, *b = 0;
11941 if (arg2) {
11942 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11943 if (!b) {
11944 return -TARGET_EFAULT;
11947 p = lock_user_string(arg1);
11948 if (p) {
11949 if (num == TARGET_NR_listxattr) {
11950 ret = get_errno(listxattr(p, b, arg3));
11951 } else {
11952 ret = get_errno(llistxattr(p, b, arg3));
11954 } else {
11955 ret = -TARGET_EFAULT;
11957 unlock_user(p, arg1, 0);
11958 unlock_user(b, arg2, arg3);
11959 return ret;
11961 case TARGET_NR_flistxattr:
11963 void *b = 0;
11964 if (arg2) {
11965 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11966 if (!b) {
11967 return -TARGET_EFAULT;
11970 ret = get_errno(flistxattr(arg1, b, arg3));
11971 unlock_user(b, arg2, arg3);
11972 return ret;
11974 case TARGET_NR_setxattr:
11975 case TARGET_NR_lsetxattr:
11977 void *p, *n, *v = 0;
11978 if (arg3) {
11979 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11980 if (!v) {
11981 return -TARGET_EFAULT;
11984 p = lock_user_string(arg1);
11985 n = lock_user_string(arg2);
11986 if (p && n) {
11987 if (num == TARGET_NR_setxattr) {
11988 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11989 } else {
11990 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11992 } else {
11993 ret = -TARGET_EFAULT;
11995 unlock_user(p, arg1, 0);
11996 unlock_user(n, arg2, 0);
11997 unlock_user(v, arg3, 0);
11999 return ret;
12000 case TARGET_NR_fsetxattr:
12002 void *n, *v = 0;
12003 if (arg3) {
12004 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12005 if (!v) {
12006 return -TARGET_EFAULT;
12009 n = lock_user_string(arg2);
12010 if (n) {
12011 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12012 } else {
12013 ret = -TARGET_EFAULT;
12015 unlock_user(n, arg2, 0);
12016 unlock_user(v, arg3, 0);
12018 return ret;
12019 case TARGET_NR_getxattr:
12020 case TARGET_NR_lgetxattr:
12022 void *p, *n, *v = 0;
12023 if (arg3) {
12024 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12025 if (!v) {
12026 return -TARGET_EFAULT;
12029 p = lock_user_string(arg1);
12030 n = lock_user_string(arg2);
12031 if (p && n) {
12032 if (num == TARGET_NR_getxattr) {
12033 ret = get_errno(getxattr(p, n, v, arg4));
12034 } else {
12035 ret = get_errno(lgetxattr(p, n, v, arg4));
12037 } else {
12038 ret = -TARGET_EFAULT;
12040 unlock_user(p, arg1, 0);
12041 unlock_user(n, arg2, 0);
12042 unlock_user(v, arg3, arg4);
12044 return ret;
12045 case TARGET_NR_fgetxattr:
12047 void *n, *v = 0;
12048 if (arg3) {
12049 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12050 if (!v) {
12051 return -TARGET_EFAULT;
12054 n = lock_user_string(arg2);
12055 if (n) {
12056 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12057 } else {
12058 ret = -TARGET_EFAULT;
12060 unlock_user(n, arg2, 0);
12061 unlock_user(v, arg3, arg4);
12063 return ret;
12064 case TARGET_NR_removexattr:
12065 case TARGET_NR_lremovexattr:
12067 void *p, *n;
12068 p = lock_user_string(arg1);
12069 n = lock_user_string(arg2);
12070 if (p && n) {
12071 if (num == TARGET_NR_removexattr) {
12072 ret = get_errno(removexattr(p, n));
12073 } else {
12074 ret = get_errno(lremovexattr(p, n));
12076 } else {
12077 ret = -TARGET_EFAULT;
12079 unlock_user(p, arg1, 0);
12080 unlock_user(n, arg2, 0);
12082 return ret;
12083 case TARGET_NR_fremovexattr:
12085 void *n;
12086 n = lock_user_string(arg2);
12087 if (n) {
12088 ret = get_errno(fremovexattr(arg1, n));
12089 } else {
12090 ret = -TARGET_EFAULT;
12092 unlock_user(n, arg2, 0);
12094 return ret;
12095 #endif
12096 #endif /* CONFIG_ATTR */
12097 #ifdef TARGET_NR_set_thread_area
12098 case TARGET_NR_set_thread_area:
12099 #if defined(TARGET_MIPS)
12100 cpu_env->active_tc.CP0_UserLocal = arg1;
12101 return 0;
12102 #elif defined(TARGET_CRIS)
12103 if (arg1 & 0xff)
12104 ret = -TARGET_EINVAL;
12105 else {
12106 cpu_env->pregs[PR_PID] = arg1;
12107 ret = 0;
12109 return ret;
12110 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12111 return do_set_thread_area(cpu_env, arg1);
12112 #elif defined(TARGET_M68K)
12114 TaskState *ts = cpu->opaque;
12115 ts->tp_value = arg1;
12116 return 0;
12118 #else
12119 return -TARGET_ENOSYS;
12120 #endif
12121 #endif
12122 #ifdef TARGET_NR_get_thread_area
12123 case TARGET_NR_get_thread_area:
12124 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12125 return do_get_thread_area(cpu_env, arg1);
12126 #elif defined(TARGET_M68K)
12128 TaskState *ts = cpu->opaque;
12129 return ts->tp_value;
12131 #else
12132 return -TARGET_ENOSYS;
12133 #endif
12134 #endif
12135 #ifdef TARGET_NR_getdomainname
12136 case TARGET_NR_getdomainname:
12137 return -TARGET_ENOSYS;
12138 #endif
12140 #ifdef TARGET_NR_clock_settime
12141 case TARGET_NR_clock_settime:
12143 struct timespec ts;
12145 ret = target_to_host_timespec(&ts, arg2);
12146 if (!is_error(ret)) {
12147 ret = get_errno(clock_settime(arg1, &ts));
12149 return ret;
12151 #endif
12152 #ifdef TARGET_NR_clock_settime64
12153 case TARGET_NR_clock_settime64:
12155 struct timespec ts;
12157 ret = target_to_host_timespec64(&ts, arg2);
12158 if (!is_error(ret)) {
12159 ret = get_errno(clock_settime(arg1, &ts));
12161 return ret;
12163 #endif
12164 #ifdef TARGET_NR_clock_gettime
12165 case TARGET_NR_clock_gettime:
12167 struct timespec ts;
12168 ret = get_errno(clock_gettime(arg1, &ts));
12169 if (!is_error(ret)) {
12170 ret = host_to_target_timespec(arg2, &ts);
12172 return ret;
12174 #endif
12175 #ifdef TARGET_NR_clock_gettime64
12176 case TARGET_NR_clock_gettime64:
12178 struct timespec ts;
12179 ret = get_errno(clock_gettime(arg1, &ts));
12180 if (!is_error(ret)) {
12181 ret = host_to_target_timespec64(arg2, &ts);
12183 return ret;
12185 #endif
12186 #ifdef TARGET_NR_clock_getres
12187 case TARGET_NR_clock_getres:
12189 struct timespec ts;
12190 ret = get_errno(clock_getres(arg1, &ts));
12191 if (!is_error(ret)) {
12192 host_to_target_timespec(arg2, &ts);
12194 return ret;
12196 #endif
12197 #ifdef TARGET_NR_clock_getres_time64
12198 case TARGET_NR_clock_getres_time64:
12200 struct timespec ts;
12201 ret = get_errno(clock_getres(arg1, &ts));
12202 if (!is_error(ret)) {
12203 host_to_target_timespec64(arg2, &ts);
12205 return ret;
12207 #endif
12208 #ifdef TARGET_NR_clock_nanosleep
12209 case TARGET_NR_clock_nanosleep:
12211 struct timespec ts;
12212 if (target_to_host_timespec(&ts, arg3)) {
12213 return -TARGET_EFAULT;
12215 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12216 &ts, arg4 ? &ts : NULL));
12218 * if the call is interrupted by a signal handler, it fails
12219 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12220 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12222 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12223 host_to_target_timespec(arg4, &ts)) {
12224 return -TARGET_EFAULT;
12227 return ret;
12229 #endif
12230 #ifdef TARGET_NR_clock_nanosleep_time64
12231 case TARGET_NR_clock_nanosleep_time64:
12233 struct timespec ts;
12235 if (target_to_host_timespec64(&ts, arg3)) {
12236 return -TARGET_EFAULT;
12239 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12240 &ts, arg4 ? &ts : NULL));
12242 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12243 host_to_target_timespec64(arg4, &ts)) {
12244 return -TARGET_EFAULT;
12246 return ret;
12248 #endif
12250 #if defined(TARGET_NR_set_tid_address)
12251 case TARGET_NR_set_tid_address:
12253 TaskState *ts = cpu->opaque;
12254 ts->child_tidptr = arg1;
12255 /* do not call host set_tid_address() syscall, instead return tid() */
12256 return get_errno(sys_gettid());
12258 #endif
12260 case TARGET_NR_tkill:
12261 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12263 case TARGET_NR_tgkill:
12264 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12265 target_to_host_signal(arg3)));
12267 #ifdef TARGET_NR_set_robust_list
12268 case TARGET_NR_set_robust_list:
12269 case TARGET_NR_get_robust_list:
12270 /* The ABI for supporting robust futexes has userspace pass
12271 * the kernel a pointer to a linked list which is updated by
12272 * userspace after the syscall; the list is walked by the kernel
12273 * when the thread exits. Since the linked list in QEMU guest
12274 * memory isn't a valid linked list for the host and we have
12275 * no way to reliably intercept the thread-death event, we can't
12276 * support these. Silently return ENOSYS so that guest userspace
12277 * falls back to a non-robust futex implementation (which should
12278 * be OK except in the corner case of the guest crashing while
12279 * holding a mutex that is shared with another process via
12280 * shared memory).
12282 return -TARGET_ENOSYS;
12283 #endif
12285 #if defined(TARGET_NR_utimensat)
12286 case TARGET_NR_utimensat:
12288 struct timespec *tsp, ts[2];
12289 if (!arg3) {
12290 tsp = NULL;
12291 } else {
12292 if (target_to_host_timespec(ts, arg3)) {
12293 return -TARGET_EFAULT;
12295 if (target_to_host_timespec(ts + 1, arg3 +
12296 sizeof(struct target_timespec))) {
12297 return -TARGET_EFAULT;
12299 tsp = ts;
12301 if (!arg2)
12302 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12303 else {
12304 if (!(p = lock_user_string(arg2))) {
12305 return -TARGET_EFAULT;
12307 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12308 unlock_user(p, arg2, 0);
12311 return ret;
12312 #endif
12313 #ifdef TARGET_NR_utimensat_time64
12314 case TARGET_NR_utimensat_time64:
12316 struct timespec *tsp, ts[2];
12317 if (!arg3) {
12318 tsp = NULL;
12319 } else {
12320 if (target_to_host_timespec64(ts, arg3)) {
12321 return -TARGET_EFAULT;
12323 if (target_to_host_timespec64(ts + 1, arg3 +
12324 sizeof(struct target__kernel_timespec))) {
12325 return -TARGET_EFAULT;
12327 tsp = ts;
12329 if (!arg2)
12330 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12331 else {
12332 p = lock_user_string(arg2);
12333 if (!p) {
12334 return -TARGET_EFAULT;
12336 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12337 unlock_user(p, arg2, 0);
12340 return ret;
12341 #endif
12342 #ifdef TARGET_NR_futex
12343 case TARGET_NR_futex:
12344 return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12345 #endif
12346 #ifdef TARGET_NR_futex_time64
12347 case TARGET_NR_futex_time64:
12348 return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12349 #endif
12350 #ifdef CONFIG_INOTIFY
12351 #if defined(TARGET_NR_inotify_init)
12352 case TARGET_NR_inotify_init:
12353 ret = get_errno(inotify_init());
12354 if (ret >= 0) {
12355 fd_trans_register(ret, &target_inotify_trans);
12357 return ret;
12358 #endif
12359 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12360 case TARGET_NR_inotify_init1:
12361 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12362 fcntl_flags_tbl)));
12363 if (ret >= 0) {
12364 fd_trans_register(ret, &target_inotify_trans);
12366 return ret;
12367 #endif
12368 #if defined(TARGET_NR_inotify_add_watch)
12369 case TARGET_NR_inotify_add_watch:
12370 p = lock_user_string(arg2);
12371 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12372 unlock_user(p, arg2, 0);
12373 return ret;
12374 #endif
12375 #if defined(TARGET_NR_inotify_rm_watch)
12376 case TARGET_NR_inotify_rm_watch:
12377 return get_errno(inotify_rm_watch(arg1, arg2));
12378 #endif
12379 #endif
12381 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12382 case TARGET_NR_mq_open:
12384 struct mq_attr posix_mq_attr;
12385 struct mq_attr *pposix_mq_attr;
12386 int host_flags;
12388 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12389 pposix_mq_attr = NULL;
12390 if (arg4) {
12391 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12392 return -TARGET_EFAULT;
12394 pposix_mq_attr = &posix_mq_attr;
12396 p = lock_user_string(arg1 - 1);
12397 if (!p) {
12398 return -TARGET_EFAULT;
12400 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12401 unlock_user (p, arg1, 0);
12403 return ret;
12405 case TARGET_NR_mq_unlink:
12406 p = lock_user_string(arg1 - 1);
12407 if (!p) {
12408 return -TARGET_EFAULT;
12410 ret = get_errno(mq_unlink(p));
12411 unlock_user (p, arg1, 0);
12412 return ret;
12414 #ifdef TARGET_NR_mq_timedsend
12415 case TARGET_NR_mq_timedsend:
12417 struct timespec ts;
12419 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12420 if (arg5 != 0) {
12421 if (target_to_host_timespec(&ts, arg5)) {
12422 return -TARGET_EFAULT;
12424 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12425 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12426 return -TARGET_EFAULT;
12428 } else {
12429 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12431 unlock_user (p, arg2, arg3);
12433 return ret;
12434 #endif
12435 #ifdef TARGET_NR_mq_timedsend_time64
12436 case TARGET_NR_mq_timedsend_time64:
12438 struct timespec ts;
12440 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12441 if (arg5 != 0) {
12442 if (target_to_host_timespec64(&ts, arg5)) {
12443 return -TARGET_EFAULT;
12445 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12446 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12447 return -TARGET_EFAULT;
12449 } else {
12450 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12452 unlock_user(p, arg2, arg3);
12454 return ret;
12455 #endif
12457 #ifdef TARGET_NR_mq_timedreceive
12458 case TARGET_NR_mq_timedreceive:
12460 struct timespec ts;
12461 unsigned int prio;
12463 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12464 if (arg5 != 0) {
12465 if (target_to_host_timespec(&ts, arg5)) {
12466 return -TARGET_EFAULT;
12468 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12469 &prio, &ts));
12470 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12471 return -TARGET_EFAULT;
12473 } else {
12474 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12475 &prio, NULL));
12477 unlock_user (p, arg2, arg3);
12478 if (arg4 != 0)
12479 put_user_u32(prio, arg4);
12481 return ret;
12482 #endif
12483 #ifdef TARGET_NR_mq_timedreceive_time64
12484 case TARGET_NR_mq_timedreceive_time64:
12486 struct timespec ts;
12487 unsigned int prio;
12489 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12490 if (arg5 != 0) {
12491 if (target_to_host_timespec64(&ts, arg5)) {
12492 return -TARGET_EFAULT;
12494 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12495 &prio, &ts));
12496 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12497 return -TARGET_EFAULT;
12499 } else {
12500 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12501 &prio, NULL));
12503 unlock_user(p, arg2, arg3);
12504 if (arg4 != 0) {
12505 put_user_u32(prio, arg4);
12508 return ret;
12509 #endif
12511 /* Not implemented for now... */
12512 /* case TARGET_NR_mq_notify: */
12513 /* break; */
12515 case TARGET_NR_mq_getsetattr:
12517 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12518 ret = 0;
12519 if (arg2 != 0) {
12520 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12521 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12522 &posix_mq_attr_out));
12523 } else if (arg3 != 0) {
12524 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12526 if (ret == 0 && arg3 != 0) {
12527 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12530 return ret;
12531 #endif
12533 #ifdef CONFIG_SPLICE
12534 #ifdef TARGET_NR_tee
12535 case TARGET_NR_tee:
12537 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12539 return ret;
12540 #endif
12541 #ifdef TARGET_NR_splice
12542 case TARGET_NR_splice:
12544 loff_t loff_in, loff_out;
12545 loff_t *ploff_in = NULL, *ploff_out = NULL;
12546 if (arg2) {
12547 if (get_user_u64(loff_in, arg2)) {
12548 return -TARGET_EFAULT;
12550 ploff_in = &loff_in;
12552 if (arg4) {
12553 if (get_user_u64(loff_out, arg4)) {
12554 return -TARGET_EFAULT;
12556 ploff_out = &loff_out;
12558 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12559 if (arg2) {
12560 if (put_user_u64(loff_in, arg2)) {
12561 return -TARGET_EFAULT;
12564 if (arg4) {
12565 if (put_user_u64(loff_out, arg4)) {
12566 return -TARGET_EFAULT;
12570 return ret;
12571 #endif
12572 #ifdef TARGET_NR_vmsplice
12573 case TARGET_NR_vmsplice:
12575 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12576 if (vec != NULL) {
12577 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12578 unlock_iovec(vec, arg2, arg3, 0);
12579 } else {
12580 ret = -host_to_target_errno(errno);
12583 return ret;
12584 #endif
12585 #endif /* CONFIG_SPLICE */
12586 #ifdef CONFIG_EVENTFD
12587 #if defined(TARGET_NR_eventfd)
12588 case TARGET_NR_eventfd:
12589 ret = get_errno(eventfd(arg1, 0));
12590 if (ret >= 0) {
12591 fd_trans_register(ret, &target_eventfd_trans);
12593 return ret;
12594 #endif
12595 #if defined(TARGET_NR_eventfd2)
12596 case TARGET_NR_eventfd2:
12598 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12599 if (arg2 & TARGET_O_NONBLOCK) {
12600 host_flags |= O_NONBLOCK;
12602 if (arg2 & TARGET_O_CLOEXEC) {
12603 host_flags |= O_CLOEXEC;
12605 ret = get_errno(eventfd(arg1, host_flags));
12606 if (ret >= 0) {
12607 fd_trans_register(ret, &target_eventfd_trans);
12609 return ret;
12611 #endif
12612 #endif /* CONFIG_EVENTFD */
12613 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12614 case TARGET_NR_fallocate:
12615 #if TARGET_ABI_BITS == 32
12616 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12617 target_offset64(arg5, arg6)));
12618 #else
12619 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12620 #endif
12621 return ret;
12622 #endif
12623 #if defined(CONFIG_SYNC_FILE_RANGE)
12624 #if defined(TARGET_NR_sync_file_range)
12625 case TARGET_NR_sync_file_range:
12626 #if TARGET_ABI_BITS == 32
12627 #if defined(TARGET_MIPS)
12628 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12629 target_offset64(arg5, arg6), arg7));
12630 #else
12631 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12632 target_offset64(arg4, arg5), arg6));
12633 #endif /* !TARGET_MIPS */
12634 #else
12635 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12636 #endif
12637 return ret;
12638 #endif
12639 #if defined(TARGET_NR_sync_file_range2) || \
12640 defined(TARGET_NR_arm_sync_file_range)
12641 #if defined(TARGET_NR_sync_file_range2)
12642 case TARGET_NR_sync_file_range2:
12643 #endif
12644 #if defined(TARGET_NR_arm_sync_file_range)
12645 case TARGET_NR_arm_sync_file_range:
12646 #endif
12647 /* This is like sync_file_range but the arguments are reordered */
12648 #if TARGET_ABI_BITS == 32
12649 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12650 target_offset64(arg5, arg6), arg2));
12651 #else
12652 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12653 #endif
12654 return ret;
12655 #endif
12656 #endif
12657 #if defined(TARGET_NR_signalfd4)
12658 case TARGET_NR_signalfd4:
12659 return do_signalfd4(arg1, arg2, arg4);
12660 #endif
12661 #if defined(TARGET_NR_signalfd)
12662 case TARGET_NR_signalfd:
12663 return do_signalfd4(arg1, arg2, 0);
12664 #endif
12665 #if defined(CONFIG_EPOLL)
12666 #if defined(TARGET_NR_epoll_create)
12667 case TARGET_NR_epoll_create:
12668 return get_errno(epoll_create(arg1));
12669 #endif
12670 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12671 case TARGET_NR_epoll_create1:
12672 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12673 #endif
12674 #if defined(TARGET_NR_epoll_ctl)
12675 case TARGET_NR_epoll_ctl:
12677 struct epoll_event ep;
12678 struct epoll_event *epp = 0;
12679 if (arg4) {
12680 if (arg2 != EPOLL_CTL_DEL) {
12681 struct target_epoll_event *target_ep;
12682 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12683 return -TARGET_EFAULT;
12685 ep.events = tswap32(target_ep->events);
12687 * The epoll_data_t union is just opaque data to the kernel,
12688 * so we transfer all 64 bits across and need not worry what
12689 * actual data type it is.
12691 ep.data.u64 = tswap64(target_ep->data.u64);
12692 unlock_user_struct(target_ep, arg4, 0);
12695 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12696 * non-null pointer, even though this argument is ignored.
12699 epp = &ep;
12701 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12703 #endif
12705 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12706 #if defined(TARGET_NR_epoll_wait)
12707 case TARGET_NR_epoll_wait:
12708 #endif
12709 #if defined(TARGET_NR_epoll_pwait)
12710 case TARGET_NR_epoll_pwait:
12711 #endif
12713 struct target_epoll_event *target_ep;
12714 struct epoll_event *ep;
12715 int epfd = arg1;
12716 int maxevents = arg3;
12717 int timeout = arg4;
12719 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12720 return -TARGET_EINVAL;
12723 target_ep = lock_user(VERIFY_WRITE, arg2,
12724 maxevents * sizeof(struct target_epoll_event), 1);
12725 if (!target_ep) {
12726 return -TARGET_EFAULT;
12729 ep = g_try_new(struct epoll_event, maxevents);
12730 if (!ep) {
12731 unlock_user(target_ep, arg2, 0);
12732 return -TARGET_ENOMEM;
12735 switch (num) {
12736 #if defined(TARGET_NR_epoll_pwait)
12737 case TARGET_NR_epoll_pwait:
12739 sigset_t *set = NULL;
12741 if (arg5) {
12742 ret = process_sigsuspend_mask(&set, arg5, arg6);
12743 if (ret != 0) {
12744 break;
12748 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12749 set, SIGSET_T_SIZE));
12751 if (set) {
12752 finish_sigsuspend_mask(ret);
12754 break;
12756 #endif
12757 #if defined(TARGET_NR_epoll_wait)
12758 case TARGET_NR_epoll_wait:
12759 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12760 NULL, 0));
12761 break;
12762 #endif
12763 default:
12764 ret = -TARGET_ENOSYS;
12766 if (!is_error(ret)) {
12767 int i;
12768 for (i = 0; i < ret; i++) {
12769 target_ep[i].events = tswap32(ep[i].events);
12770 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12772 unlock_user(target_ep, arg2,
12773 ret * sizeof(struct target_epoll_event));
12774 } else {
12775 unlock_user(target_ep, arg2, 0);
12777 g_free(ep);
12778 return ret;
12780 #endif
12781 #endif
12782 #ifdef TARGET_NR_prlimit64
12783 case TARGET_NR_prlimit64:
12785 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12786 struct target_rlimit64 *target_rnew, *target_rold;
12787 struct host_rlimit64 rnew, rold, *rnewp = 0;
12788 int resource = target_to_host_resource(arg2);
12790 if (arg3 && (resource != RLIMIT_AS &&
12791 resource != RLIMIT_DATA &&
12792 resource != RLIMIT_STACK)) {
12793 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12794 return -TARGET_EFAULT;
12796 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12797 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12798 unlock_user_struct(target_rnew, arg3, 0);
12799 rnewp = &rnew;
12802 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12803 if (!is_error(ret) && arg4) {
12804 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12805 return -TARGET_EFAULT;
12807 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12808 target_rold->rlim_max = tswap64(rold.rlim_max);
12809 unlock_user_struct(target_rold, arg4, 1);
12811 return ret;
12813 #endif
12814 #ifdef TARGET_NR_gethostname
12815 case TARGET_NR_gethostname:
12817 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12818 if (name) {
12819 ret = get_errno(gethostname(name, arg2));
12820 unlock_user(name, arg1, arg2);
12821 } else {
12822 ret = -TARGET_EFAULT;
12824 return ret;
12826 #endif
12827 #ifdef TARGET_NR_atomic_cmpxchg_32
12828 case TARGET_NR_atomic_cmpxchg_32:
12830 /* should use start_exclusive from main.c */
12831 abi_ulong mem_value;
12832 if (get_user_u32(mem_value, arg6)) {
12833 target_siginfo_t info;
12834 info.si_signo = SIGSEGV;
12835 info.si_errno = 0;
12836 info.si_code = TARGET_SEGV_MAPERR;
12837 info._sifields._sigfault._addr = arg6;
12838 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12839 ret = 0xdeadbeef;
12842 if (mem_value == arg2)
12843 put_user_u32(arg1, arg6);
12844 return mem_value;
12846 #endif
12847 #ifdef TARGET_NR_atomic_barrier
12848 case TARGET_NR_atomic_barrier:
12849 /* Like the kernel implementation and the
12850 qemu arm barrier, no-op this? */
12851 return 0;
12852 #endif
12854 #ifdef TARGET_NR_timer_create
12855 case TARGET_NR_timer_create:
12857 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12859 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12861 int clkid = arg1;
12862 int timer_index = next_free_host_timer();
12864 if (timer_index < 0) {
12865 ret = -TARGET_EAGAIN;
12866 } else {
12867 timer_t *phtimer = g_posix_timers + timer_index;
12869 if (arg2) {
12870 phost_sevp = &host_sevp;
12871 ret = target_to_host_sigevent(phost_sevp, arg2);
12872 if (ret != 0) {
12873 free_host_timer_slot(timer_index);
12874 return ret;
12878 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12879 if (ret) {
12880 free_host_timer_slot(timer_index);
12881 } else {
12882 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12883 timer_delete(*phtimer);
12884 free_host_timer_slot(timer_index);
12885 return -TARGET_EFAULT;
12889 return ret;
12891 #endif
12893 #ifdef TARGET_NR_timer_settime
12894 case TARGET_NR_timer_settime:
12896 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12897 * struct itimerspec * old_value */
12898 target_timer_t timerid = get_timer_id(arg1);
12900 if (timerid < 0) {
12901 ret = timerid;
12902 } else if (arg3 == 0) {
12903 ret = -TARGET_EINVAL;
12904 } else {
12905 timer_t htimer = g_posix_timers[timerid];
12906 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12908 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12909 return -TARGET_EFAULT;
12911 ret = get_errno(
12912 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12913 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12914 return -TARGET_EFAULT;
12917 return ret;
12919 #endif
12921 #ifdef TARGET_NR_timer_settime64
12922 case TARGET_NR_timer_settime64:
12924 target_timer_t timerid = get_timer_id(arg1);
12926 if (timerid < 0) {
12927 ret = timerid;
12928 } else if (arg3 == 0) {
12929 ret = -TARGET_EINVAL;
12930 } else {
12931 timer_t htimer = g_posix_timers[timerid];
12932 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12934 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12935 return -TARGET_EFAULT;
12937 ret = get_errno(
12938 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12939 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12940 return -TARGET_EFAULT;
12943 return ret;
12945 #endif
12947 #ifdef TARGET_NR_timer_gettime
12948 case TARGET_NR_timer_gettime:
12950 /* args: timer_t timerid, struct itimerspec *curr_value */
12951 target_timer_t timerid = get_timer_id(arg1);
12953 if (timerid < 0) {
12954 ret = timerid;
12955 } else if (!arg2) {
12956 ret = -TARGET_EFAULT;
12957 } else {
12958 timer_t htimer = g_posix_timers[timerid];
12959 struct itimerspec hspec;
12960 ret = get_errno(timer_gettime(htimer, &hspec));
12962 if (host_to_target_itimerspec(arg2, &hspec)) {
12963 ret = -TARGET_EFAULT;
12966 return ret;
12968 #endif
12970 #ifdef TARGET_NR_timer_gettime64
12971 case TARGET_NR_timer_gettime64:
12973 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12974 target_timer_t timerid = get_timer_id(arg1);
12976 if (timerid < 0) {
12977 ret = timerid;
12978 } else if (!arg2) {
12979 ret = -TARGET_EFAULT;
12980 } else {
12981 timer_t htimer = g_posix_timers[timerid];
12982 struct itimerspec hspec;
12983 ret = get_errno(timer_gettime(htimer, &hspec));
12985 if (host_to_target_itimerspec64(arg2, &hspec)) {
12986 ret = -TARGET_EFAULT;
12989 return ret;
12991 #endif
12993 #ifdef TARGET_NR_timer_getoverrun
12994 case TARGET_NR_timer_getoverrun:
12996 /* args: timer_t timerid */
12997 target_timer_t timerid = get_timer_id(arg1);
12999 if (timerid < 0) {
13000 ret = timerid;
13001 } else {
13002 timer_t htimer = g_posix_timers[timerid];
13003 ret = get_errno(timer_getoverrun(htimer));
13005 return ret;
13007 #endif
13009 #ifdef TARGET_NR_timer_delete
13010 case TARGET_NR_timer_delete:
13012 /* args: timer_t timerid */
13013 target_timer_t timerid = get_timer_id(arg1);
13015 if (timerid < 0) {
13016 ret = timerid;
13017 } else {
13018 timer_t htimer = g_posix_timers[timerid];
13019 ret = get_errno(timer_delete(htimer));
13020 free_host_timer_slot(timerid);
13022 return ret;
13024 #endif
13026 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13027 case TARGET_NR_timerfd_create:
13028 return get_errno(timerfd_create(arg1,
13029 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13030 #endif
13032 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13033 case TARGET_NR_timerfd_gettime:
13035 struct itimerspec its_curr;
13037 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13039 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13040 return -TARGET_EFAULT;
13043 return ret;
13044 #endif
13046 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13047 case TARGET_NR_timerfd_gettime64:
13049 struct itimerspec its_curr;
13051 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13053 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13054 return -TARGET_EFAULT;
13057 return ret;
13058 #endif
13060 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13061 case TARGET_NR_timerfd_settime:
13063 struct itimerspec its_new, its_old, *p_new;
13065 if (arg3) {
13066 if (target_to_host_itimerspec(&its_new, arg3)) {
13067 return -TARGET_EFAULT;
13069 p_new = &its_new;
13070 } else {
13071 p_new = NULL;
13074 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13076 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13077 return -TARGET_EFAULT;
13080 return ret;
13081 #endif
13083 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13084 case TARGET_NR_timerfd_settime64:
13086 struct itimerspec its_new, its_old, *p_new;
13088 if (arg3) {
13089 if (target_to_host_itimerspec64(&its_new, arg3)) {
13090 return -TARGET_EFAULT;
13092 p_new = &its_new;
13093 } else {
13094 p_new = NULL;
13097 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13099 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13100 return -TARGET_EFAULT;
13103 return ret;
13104 #endif
13106 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13107 case TARGET_NR_ioprio_get:
13108 return get_errno(ioprio_get(arg1, arg2));
13109 #endif
13111 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13112 case TARGET_NR_ioprio_set:
13113 return get_errno(ioprio_set(arg1, arg2, arg3));
13114 #endif
13116 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13117 case TARGET_NR_setns:
13118 return get_errno(setns(arg1, arg2));
13119 #endif
13120 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13121 case TARGET_NR_unshare:
13122 return get_errno(unshare(arg1));
13123 #endif
13124 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13125 case TARGET_NR_kcmp:
13126 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13127 #endif
13128 #ifdef TARGET_NR_swapcontext
13129 case TARGET_NR_swapcontext:
13130 /* PowerPC specific. */
13131 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13132 #endif
13133 #ifdef TARGET_NR_memfd_create
13134 case TARGET_NR_memfd_create:
13135 p = lock_user_string(arg1);
13136 if (!p) {
13137 return -TARGET_EFAULT;
13139 ret = get_errno(memfd_create(p, arg2));
13140 fd_trans_unregister(ret);
13141 unlock_user(p, arg1, 0);
13142 return ret;
13143 #endif
13144 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13145 case TARGET_NR_membarrier:
13146 return get_errno(membarrier(arg1, arg2));
13147 #endif
13149 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13150 case TARGET_NR_copy_file_range:
13152 loff_t inoff, outoff;
13153 loff_t *pinoff = NULL, *poutoff = NULL;
13155 if (arg2) {
13156 if (get_user_u64(inoff, arg2)) {
13157 return -TARGET_EFAULT;
13159 pinoff = &inoff;
13161 if (arg4) {
13162 if (get_user_u64(outoff, arg4)) {
13163 return -TARGET_EFAULT;
13165 poutoff = &outoff;
13167 /* Do not sign-extend the count parameter. */
13168 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13169 (abi_ulong)arg5, arg6));
13170 if (!is_error(ret) && ret > 0) {
13171 if (arg2) {
13172 if (put_user_u64(inoff, arg2)) {
13173 return -TARGET_EFAULT;
13176 if (arg4) {
13177 if (put_user_u64(outoff, arg4)) {
13178 return -TARGET_EFAULT;
13183 return ret;
13184 #endif
13186 #if defined(TARGET_NR_pivot_root)
13187 case TARGET_NR_pivot_root:
13189 void *p2;
13190 p = lock_user_string(arg1); /* new_root */
13191 p2 = lock_user_string(arg2); /* put_old */
13192 if (!p || !p2) {
13193 ret = -TARGET_EFAULT;
13194 } else {
13195 ret = get_errno(pivot_root(p, p2));
13197 unlock_user(p2, arg2, 0);
13198 unlock_user(p, arg1, 0);
13200 return ret;
13201 #endif
13203 default:
13204 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13205 return -TARGET_ENOSYS;
13207 return ret;
13210 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13211 abi_long arg2, abi_long arg3, abi_long arg4,
13212 abi_long arg5, abi_long arg6, abi_long arg7,
13213 abi_long arg8)
13215 CPUState *cpu = env_cpu(cpu_env);
13216 abi_long ret;
13218 #ifdef DEBUG_ERESTARTSYS
13219 /* Debug-only code for exercising the syscall-restart code paths
13220 * in the per-architecture cpu main loops: restart every syscall
13221 * the guest makes once before letting it through.
13224 static bool flag;
13225 flag = !flag;
13226 if (flag) {
13227 return -QEMU_ERESTARTSYS;
13230 #endif
13232 record_syscall_start(cpu, num, arg1,
13233 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13235 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13236 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13239 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13240 arg5, arg6, arg7, arg8);
13242 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13243 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13244 arg3, arg4, arg5, arg6);
13247 record_syscall_return(cpu, num, ret);
13248 return ret;