ppc/pnv: enable user created pnv-phb for powernv9
[qemu.git] / linux-user / syscall.c
blobf4091212027c254a19dc871a675d133215fc46c8
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
99 #ifdef HAVE_SYS_MOUNT_FSCONFIG
101 * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h,
102 * which in turn prevents use of linux/fs.h. So we have to
103 * define the constants ourselves for now.
105 #define FS_IOC_GETFLAGS _IOR('f', 1, long)
106 #define FS_IOC_SETFLAGS _IOW('f', 2, long)
107 #define FS_IOC_GETVERSION _IOR('v', 1, long)
108 #define FS_IOC_SETVERSION _IOW('v', 2, long)
109 #define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap)
110 #define FS_IOC32_GETFLAGS _IOR('f', 1, int)
111 #define FS_IOC32_SETFLAGS _IOW('f', 2, int)
112 #define FS_IOC32_GETVERSION _IOR('v', 1, int)
113 #define FS_IOC32_SETVERSION _IOW('v', 2, int)
114 #else
115 #include <linux/fs.h>
116 #endif
117 #include <linux/fd.h>
118 #if defined(CONFIG_FIEMAP)
119 #include <linux/fiemap.h>
120 #endif
121 #include <linux/fb.h>
122 #if defined(CONFIG_USBFS)
123 #include <linux/usbdevice_fs.h>
124 #include <linux/usb/ch9.h>
125 #endif
126 #include <linux/vt.h>
127 #include <linux/dm-ioctl.h>
128 #include <linux/reboot.h>
129 #include <linux/route.h>
130 #include <linux/filter.h>
131 #include <linux/blkpg.h>
132 #include <netpacket/packet.h>
133 #include <linux/netlink.h>
134 #include <linux/if_alg.h>
135 #include <linux/rtc.h>
136 #include <sound/asound.h>
137 #ifdef HAVE_BTRFS_H
138 #include <linux/btrfs.h>
139 #endif
140 #ifdef HAVE_DRM_H
141 #include <libdrm/drm.h>
142 #include <libdrm/i915_drm.h>
143 #endif
144 #include "linux_loop.h"
145 #include "uname.h"
147 #include "qemu.h"
148 #include "user-internals.h"
149 #include "strace.h"
150 #include "signal-common.h"
151 #include "loader.h"
152 #include "user-mmap.h"
153 #include "user/safe-syscall.h"
154 #include "qemu/guest-random.h"
155 #include "qemu/selfmap.h"
156 #include "user/syscall-trace.h"
157 #include "special-errno.h"
158 #include "qapi/error.h"
159 #include "fd-trans.h"
160 #include "tcg/tcg.h"
162 #ifndef CLONE_IO
163 #define CLONE_IO 0x80000000 /* Clone io context */
164 #endif
166 /* We can't directly call the host clone syscall, because this will
167 * badly confuse libc (breaking mutexes, for example). So we must
168 * divide clone flags into:
169 * * flag combinations that look like pthread_create()
170 * * flag combinations that look like fork()
171 * * flags we can implement within QEMU itself
172 * * flags we can't support and will return an error for
174 /* For thread creation, all these flags must be present; for
175 * fork, none must be present.
177 #define CLONE_THREAD_FLAGS \
178 (CLONE_VM | CLONE_FS | CLONE_FILES | \
179 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
181 /* These flags are ignored:
182 * CLONE_DETACHED is now ignored by the kernel;
183 * CLONE_IO is just an optimisation hint to the I/O scheduler
185 #define CLONE_IGNORED_FLAGS \
186 (CLONE_DETACHED | CLONE_IO)
188 /* Flags for fork which we can implement within QEMU itself */
189 #define CLONE_OPTIONAL_FORK_FLAGS \
190 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
191 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
193 /* Flags for thread creation which we can implement within QEMU itself */
194 #define CLONE_OPTIONAL_THREAD_FLAGS \
195 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
196 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
198 #define CLONE_INVALID_FORK_FLAGS \
199 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
201 #define CLONE_INVALID_THREAD_FLAGS \
202 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
203 CLONE_IGNORED_FLAGS))
205 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
206 * have almost all been allocated. We cannot support any of
207 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
208 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
209 * The checks against the invalid thread masks above will catch these.
210 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
213 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
214 * once. This exercises the codepaths for restart.
216 //#define DEBUG_ERESTARTSYS
218 //#include <linux/msdos_fs.h>
219 #define VFAT_IOCTL_READDIR_BOTH \
220 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
221 #define VFAT_IOCTL_READDIR_SHORT \
222 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
224 #undef _syscall0
225 #undef _syscall1
226 #undef _syscall2
227 #undef _syscall3
228 #undef _syscall4
229 #undef _syscall5
230 #undef _syscall6
232 #define _syscall0(type,name) \
233 static type name (void) \
235 return syscall(__NR_##name); \
238 #define _syscall1(type,name,type1,arg1) \
239 static type name (type1 arg1) \
241 return syscall(__NR_##name, arg1); \
244 #define _syscall2(type,name,type1,arg1,type2,arg2) \
245 static type name (type1 arg1,type2 arg2) \
247 return syscall(__NR_##name, arg1, arg2); \
250 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
251 static type name (type1 arg1,type2 arg2,type3 arg3) \
253 return syscall(__NR_##name, arg1, arg2, arg3); \
256 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
257 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
259 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
262 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
263 type5,arg5) \
264 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
266 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
270 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
271 type5,arg5,type6,arg6) \
272 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
273 type6 arg6) \
275 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
279 #define __NR_sys_uname __NR_uname
280 #define __NR_sys_getcwd1 __NR_getcwd
281 #define __NR_sys_getdents __NR_getdents
282 #define __NR_sys_getdents64 __NR_getdents64
283 #define __NR_sys_getpriority __NR_getpriority
284 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
285 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
286 #define __NR_sys_syslog __NR_syslog
287 #if defined(__NR_futex)
288 # define __NR_sys_futex __NR_futex
289 #endif
290 #if defined(__NR_futex_time64)
291 # define __NR_sys_futex_time64 __NR_futex_time64
292 #endif
293 #define __NR_sys_statx __NR_statx
295 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
296 #define __NR__llseek __NR_lseek
297 #endif
299 /* Newer kernel ports have llseek() instead of _llseek() */
300 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
301 #define TARGET_NR__llseek TARGET_NR_llseek
302 #endif
304 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
305 #ifndef TARGET_O_NONBLOCK_MASK
306 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
307 #endif
309 #define __NR_sys_gettid __NR_gettid
310 _syscall0(int, sys_gettid)
312 /* For the 64-bit guest on 32-bit host case we must emulate
313 * getdents using getdents64, because otherwise the host
314 * might hand us back more dirent records than we can fit
315 * into the guest buffer after structure format conversion.
316 * Otherwise we emulate getdents with getdents if the host has it.
318 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
319 #define EMULATE_GETDENTS_WITH_GETDENTS
320 #endif
322 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
323 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
324 #endif
325 #if (defined(TARGET_NR_getdents) && \
326 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
327 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
328 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
329 #endif
330 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
331 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
332 loff_t *, res, uint, wh);
333 #endif
334 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
335 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
336 siginfo_t *, uinfo)
337 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
338 #ifdef __NR_exit_group
339 _syscall1(int,exit_group,int,error_code)
340 #endif
341 #if defined(__NR_futex)
342 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
343 const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_futex_time64)
346 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
347 const struct timespec *,timeout,int *,uaddr2,int,val3)
348 #endif
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
351 unsigned long *, user_mask_ptr);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
354 unsigned long *, user_mask_ptr);
355 /* sched_attr is not defined in glibc */
356 struct sched_attr {
357 uint32_t size;
358 uint32_t sched_policy;
359 uint64_t sched_flags;
360 int32_t sched_nice;
361 uint32_t sched_priority;
362 uint64_t sched_runtime;
363 uint64_t sched_deadline;
364 uint64_t sched_period;
365 uint32_t sched_util_min;
366 uint32_t sched_util_max;
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
370 unsigned int, size, unsigned int, flags);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
373 unsigned int, flags);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
378 const struct sched_param *, param);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam, pid_t, pid,
381 struct sched_param *, param);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam, pid_t, pid,
384 const struct sched_param *, param);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
388 void *, arg);
389 _syscall2(int, capget, struct __user_cap_header_struct *, header,
390 struct __user_cap_data_struct *, data);
391 _syscall2(int, capset, struct __user_cap_header_struct *, header,
392 struct __user_cap_data_struct *, data);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get, int, which, int, who)
395 #endif
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
398 #endif
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
401 #endif
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
405 unsigned long, idx1, unsigned long, idx2)
406 #endif
409 * It is assumed that struct statx is architecture independent.
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
413 unsigned int, mask, struct target_statx *, statxbuf)
414 #endif
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier, int, cmd, int, flags)
417 #endif
419 static const bitmask_transtbl fcntl_flags_tbl[] = {
420 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
421 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
422 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
423 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
424 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
425 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
426 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
427 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
428 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
429 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
430 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
431 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
432 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
433 #if defined(O_DIRECT)
434 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
435 #endif
436 #if defined(O_NOATIME)
437 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
438 #endif
439 #if defined(O_CLOEXEC)
440 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
441 #endif
442 #if defined(O_PATH)
443 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
444 #endif
445 #if defined(O_TMPFILE)
446 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
447 #endif
448 /* Don't terminate the list prematurely on 64-bit host+guest. */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
451 #endif
452 { 0, 0, 0, 0 }
455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
461 const struct timespec *,tsp,int,flags)
462 #else
463 static int sys_utimensat(int dirfd, const char *pathname,
464 const struct timespec times[2], int flags)
466 errno = ENOSYS;
467 return -1;
469 #endif
470 #endif /* TARGET_NR_utimensat */
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
476 const char *, new, unsigned int, flags)
477 #else
478 static int sys_renameat2(int oldfd, const char *old,
479 int newfd, const char *new, int flags)
481 if (flags == 0) {
482 return renameat(oldfd, old, newfd, new);
484 errno = ENOSYS;
485 return -1;
487 #endif
488 #endif /* TARGET_NR_renameat2 */
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
492 #else
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY */
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
503 #endif
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64 {
507 uint64_t rlim_cur;
508 uint64_t rlim_max;
510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
511 const struct host_rlimit64 *, new_limit,
512 struct host_rlimit64 *, old_limit)
513 #endif
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 static timer_t g_posix_timers[32] = { 0, } ;
520 static inline int next_free_host_timer(void)
522 int k ;
523 /* FIXME: Does finding the next free slot require a lock? */
524 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
525 if (g_posix_timers[k] == 0) {
526 g_posix_timers[k] = (timer_t) 1;
527 return k;
530 return -1;
532 #endif
534 static inline int host_to_target_errno(int host_errno)
536 switch (host_errno) {
537 #define E(X) case X: return TARGET_##X;
538 #include "errnos.c.inc"
539 #undef E
540 default:
541 return host_errno;
545 static inline int target_to_host_errno(int target_errno)
547 switch (target_errno) {
548 #define E(X) case TARGET_##X: return X;
549 #include "errnos.c.inc"
550 #undef E
551 default:
552 return target_errno;
556 abi_long get_errno(abi_long ret)
558 if (ret == -1)
559 return -host_to_target_errno(errno);
560 else
561 return ret;
564 const char *target_strerror(int err)
566 if (err == QEMU_ERESTARTSYS) {
567 return "To be restarted";
569 if (err == QEMU_ESIGRETURN) {
570 return "Successful exit from sigreturn";
573 return strerror(target_to_host_errno(err));
576 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
578 int i;
579 uint8_t b;
580 if (usize <= ksize) {
581 return 1;
583 for (i = ksize; i < usize; i++) {
584 if (get_user_u8(b, addr + i)) {
585 return -TARGET_EFAULT;
587 if (b != 0) {
588 return 0;
591 return 1;
594 #define safe_syscall0(type, name) \
595 static type safe_##name(void) \
597 return safe_syscall(__NR_##name); \
600 #define safe_syscall1(type, name, type1, arg1) \
601 static type safe_##name(type1 arg1) \
603 return safe_syscall(__NR_##name, arg1); \
606 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
607 static type safe_##name(type1 arg1, type2 arg2) \
609 return safe_syscall(__NR_##name, arg1, arg2); \
612 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
613 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
615 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
618 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
619 type4, arg4) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
622 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
625 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
626 type4, arg4, type5, arg5) \
627 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
628 type5 arg5) \
630 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
633 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
634 type4, arg4, type5, arg5, type6, arg6) \
635 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
636 type5 arg5, type6 arg6) \
638 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
641 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
642 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
643 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
644 int, flags, mode_t, mode)
645 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
646 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
647 struct rusage *, rusage)
648 #endif
649 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
650 int, options, struct rusage *, rusage)
651 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
652 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
653 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
654 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
655 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
656 #endif
657 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
658 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
659 struct timespec *, tsp, const sigset_t *, sigmask,
660 size_t, sigsetsize)
661 #endif
662 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
663 int, maxevents, int, timeout, const sigset_t *, sigmask,
664 size_t, sigsetsize)
665 #if defined(__NR_futex)
666 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
667 const struct timespec *,timeout,int *,uaddr2,int,val3)
668 #endif
669 #if defined(__NR_futex_time64)
670 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
671 const struct timespec *,timeout,int *,uaddr2,int,val3)
672 #endif
673 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
674 safe_syscall2(int, kill, pid_t, pid, int, sig)
675 safe_syscall2(int, tkill, int, tid, int, sig)
676 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
677 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
678 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
679 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
680 unsigned long, pos_l, unsigned long, pos_h)
681 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
682 unsigned long, pos_l, unsigned long, pos_h)
683 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
684 socklen_t, addrlen)
685 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
686 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
687 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
688 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
689 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
690 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
691 safe_syscall2(int, flock, int, fd, int, operation)
692 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
693 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
694 const struct timespec *, uts, size_t, sigsetsize)
695 #endif
696 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
697 int, flags)
698 #if defined(TARGET_NR_nanosleep)
699 safe_syscall2(int, nanosleep, const struct timespec *, req,
700 struct timespec *, rem)
701 #endif
702 #if defined(TARGET_NR_clock_nanosleep) || \
703 defined(TARGET_NR_clock_nanosleep_time64)
704 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
705 const struct timespec *, req, struct timespec *, rem)
706 #endif
707 #ifdef __NR_ipc
708 #ifdef __s390x__
709 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
710 void *, ptr)
711 #else
712 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
713 void *, ptr, long, fifth)
714 #endif
715 #endif
716 #ifdef __NR_msgsnd
717 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
718 int, flags)
719 #endif
720 #ifdef __NR_msgrcv
721 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
722 long, msgtype, int, flags)
723 #endif
724 #ifdef __NR_semtimedop
725 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
726 unsigned, nsops, const struct timespec *, timeout)
727 #endif
728 #if defined(TARGET_NR_mq_timedsend) || \
729 defined(TARGET_NR_mq_timedsend_time64)
730 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
731 size_t, len, unsigned, prio, const struct timespec *, timeout)
732 #endif
733 #if defined(TARGET_NR_mq_timedreceive) || \
734 defined(TARGET_NR_mq_timedreceive_time64)
735 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
736 size_t, len, unsigned *, prio, const struct timespec *, timeout)
737 #endif
738 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
739 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
740 int, outfd, loff_t *, poutoff, size_t, length,
741 unsigned int, flags)
742 #endif
744 /* We do ioctl like this rather than via safe_syscall3 to preserve the
745 * "third argument might be integer or pointer or not present" behaviour of
746 * the libc function.
748 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
749 /* Similarly for fcntl. Note that callers must always:
750 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
751 * use the flock64 struct rather than unsuffixed flock
752 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
754 #ifdef __NR_fcntl64
755 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
756 #else
757 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
758 #endif
760 static inline int host_to_target_sock_type(int host_type)
762 int target_type;
764 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
765 case SOCK_DGRAM:
766 target_type = TARGET_SOCK_DGRAM;
767 break;
768 case SOCK_STREAM:
769 target_type = TARGET_SOCK_STREAM;
770 break;
771 default:
772 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
773 break;
776 #if defined(SOCK_CLOEXEC)
777 if (host_type & SOCK_CLOEXEC) {
778 target_type |= TARGET_SOCK_CLOEXEC;
780 #endif
782 #if defined(SOCK_NONBLOCK)
783 if (host_type & SOCK_NONBLOCK) {
784 target_type |= TARGET_SOCK_NONBLOCK;
786 #endif
788 return target_type;
791 static abi_ulong target_brk;
792 static abi_ulong target_original_brk;
793 static abi_ulong brk_page;
795 void target_set_brk(abi_ulong new_brk)
797 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
798 brk_page = HOST_PAGE_ALIGN(target_brk);
801 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
802 #define DEBUGF_BRK(message, args...)
804 /* do_brk() must return target values and target errnos. */
805 abi_long do_brk(abi_ulong new_brk)
807 abi_long mapped_addr;
808 abi_ulong new_alloc_size;
810 /* brk pointers are always untagged */
812 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
814 if (!new_brk) {
815 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
816 return target_brk;
818 if (new_brk < target_original_brk) {
819 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
820 target_brk);
821 return target_brk;
824 /* If the new brk is less than the highest page reserved to the
825 * target heap allocation, set it and we're almost done... */
826 if (new_brk <= brk_page) {
827 /* Heap contents are initialized to zero, as for anonymous
828 * mapped pages. */
829 if (new_brk > target_brk) {
830 memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
832 target_brk = new_brk;
833 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
834 return target_brk;
837 /* We need to allocate more memory after the brk... Note that
838 * we don't use MAP_FIXED because that will map over the top of
839 * any existing mapping (like the one with the host libc or qemu
840 * itself); instead we treat "mapped but at wrong address" as
841 * a failure and unmap again.
843 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
844 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
845 PROT_READ|PROT_WRITE,
846 MAP_ANON|MAP_PRIVATE, 0, 0));
848 if (mapped_addr == brk_page) {
849 /* Heap contents are initialized to zero, as for anonymous
850 * mapped pages. Technically the new pages are already
851 * initialized to zero since they *are* anonymous mapped
852 * pages, however we have to take care with the contents that
853 * come from the remaining part of the previous page: it may
854 * contains garbage data due to a previous heap usage (grown
855 * then shrunken). */
856 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
858 target_brk = new_brk;
859 brk_page = HOST_PAGE_ALIGN(target_brk);
860 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
861 target_brk);
862 return target_brk;
863 } else if (mapped_addr != -1) {
864 /* Mapped but at wrong address, meaning there wasn't actually
865 * enough space for this brk.
867 target_munmap(mapped_addr, new_alloc_size);
868 mapped_addr = -1;
869 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
871 else {
872 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
875 #if defined(TARGET_ALPHA)
876 /* We (partially) emulate OSF/1 on Alpha, which requires we
877 return a proper errno, not an unchanged brk value. */
878 return -TARGET_ENOMEM;
879 #endif
880 /* For everything else, return the previous break. */
881 return target_brk;
884 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
885 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
886 static inline abi_long copy_from_user_fdset(fd_set *fds,
887 abi_ulong target_fds_addr,
888 int n)
890 int i, nw, j, k;
891 abi_ulong b, *target_fds;
893 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
894 if (!(target_fds = lock_user(VERIFY_READ,
895 target_fds_addr,
896 sizeof(abi_ulong) * nw,
897 1)))
898 return -TARGET_EFAULT;
900 FD_ZERO(fds);
901 k = 0;
902 for (i = 0; i < nw; i++) {
903 /* grab the abi_ulong */
904 __get_user(b, &target_fds[i]);
905 for (j = 0; j < TARGET_ABI_BITS; j++) {
906 /* check the bit inside the abi_ulong */
907 if ((b >> j) & 1)
908 FD_SET(k, fds);
909 k++;
913 unlock_user(target_fds, target_fds_addr, 0);
915 return 0;
918 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
919 abi_ulong target_fds_addr,
920 int n)
922 if (target_fds_addr) {
923 if (copy_from_user_fdset(fds, target_fds_addr, n))
924 return -TARGET_EFAULT;
925 *fds_ptr = fds;
926 } else {
927 *fds_ptr = NULL;
929 return 0;
932 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
933 const fd_set *fds,
934 int n)
936 int i, nw, j, k;
937 abi_long v;
938 abi_ulong *target_fds;
940 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
941 if (!(target_fds = lock_user(VERIFY_WRITE,
942 target_fds_addr,
943 sizeof(abi_ulong) * nw,
944 0)))
945 return -TARGET_EFAULT;
947 k = 0;
948 for (i = 0; i < nw; i++) {
949 v = 0;
950 for (j = 0; j < TARGET_ABI_BITS; j++) {
951 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
952 k++;
954 __put_user(v, &target_fds[i]);
957 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
959 return 0;
961 #endif
963 #if defined(__alpha__)
964 #define HOST_HZ 1024
965 #else
966 #define HOST_HZ 100
967 #endif
969 static inline abi_long host_to_target_clock_t(long ticks)
971 #if HOST_HZ == TARGET_HZ
972 return ticks;
973 #else
974 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
975 #endif
978 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
979 const struct rusage *rusage)
981 struct target_rusage *target_rusage;
983 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
984 return -TARGET_EFAULT;
985 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
986 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
987 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
988 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
989 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
990 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
991 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
992 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
993 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
994 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
995 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
996 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
997 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
998 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
999 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1000 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1001 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1002 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1003 unlock_user_struct(target_rusage, target_addr, 1);
1005 return 0;
1008 #ifdef TARGET_NR_setrlimit
1009 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1011 abi_ulong target_rlim_swap;
1012 rlim_t result;
1014 target_rlim_swap = tswapal(target_rlim);
1015 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1016 return RLIM_INFINITY;
1018 result = target_rlim_swap;
1019 if (target_rlim_swap != (rlim_t)result)
1020 return RLIM_INFINITY;
1022 return result;
1024 #endif
1026 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1027 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1029 abi_ulong target_rlim_swap;
1030 abi_ulong result;
1032 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1033 target_rlim_swap = TARGET_RLIM_INFINITY;
1034 else
1035 target_rlim_swap = rlim;
1036 result = tswapal(target_rlim_swap);
1038 return result;
1040 #endif
1042 static inline int target_to_host_resource(int code)
1044 switch (code) {
1045 case TARGET_RLIMIT_AS:
1046 return RLIMIT_AS;
1047 case TARGET_RLIMIT_CORE:
1048 return RLIMIT_CORE;
1049 case TARGET_RLIMIT_CPU:
1050 return RLIMIT_CPU;
1051 case TARGET_RLIMIT_DATA:
1052 return RLIMIT_DATA;
1053 case TARGET_RLIMIT_FSIZE:
1054 return RLIMIT_FSIZE;
1055 case TARGET_RLIMIT_LOCKS:
1056 return RLIMIT_LOCKS;
1057 case TARGET_RLIMIT_MEMLOCK:
1058 return RLIMIT_MEMLOCK;
1059 case TARGET_RLIMIT_MSGQUEUE:
1060 return RLIMIT_MSGQUEUE;
1061 case TARGET_RLIMIT_NICE:
1062 return RLIMIT_NICE;
1063 case TARGET_RLIMIT_NOFILE:
1064 return RLIMIT_NOFILE;
1065 case TARGET_RLIMIT_NPROC:
1066 return RLIMIT_NPROC;
1067 case TARGET_RLIMIT_RSS:
1068 return RLIMIT_RSS;
1069 case TARGET_RLIMIT_RTPRIO:
1070 return RLIMIT_RTPRIO;
1071 #ifdef RLIMIT_RTTIME
1072 case TARGET_RLIMIT_RTTIME:
1073 return RLIMIT_RTTIME;
1074 #endif
1075 case TARGET_RLIMIT_SIGPENDING:
1076 return RLIMIT_SIGPENDING;
1077 case TARGET_RLIMIT_STACK:
1078 return RLIMIT_STACK;
1079 default:
1080 return code;
1084 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1085 abi_ulong target_tv_addr)
1087 struct target_timeval *target_tv;
1089 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1090 return -TARGET_EFAULT;
1093 __get_user(tv->tv_sec, &target_tv->tv_sec);
1094 __get_user(tv->tv_usec, &target_tv->tv_usec);
1096 unlock_user_struct(target_tv, target_tv_addr, 0);
1098 return 0;
1101 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1102 const struct timeval *tv)
1104 struct target_timeval *target_tv;
1106 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1107 return -TARGET_EFAULT;
1110 __put_user(tv->tv_sec, &target_tv->tv_sec);
1111 __put_user(tv->tv_usec, &target_tv->tv_usec);
1113 unlock_user_struct(target_tv, target_tv_addr, 1);
1115 return 0;
1118 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1119 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1120 abi_ulong target_tv_addr)
1122 struct target__kernel_sock_timeval *target_tv;
1124 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1125 return -TARGET_EFAULT;
1128 __get_user(tv->tv_sec, &target_tv->tv_sec);
1129 __get_user(tv->tv_usec, &target_tv->tv_usec);
1131 unlock_user_struct(target_tv, target_tv_addr, 0);
1133 return 0;
1135 #endif
1137 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1138 const struct timeval *tv)
1140 struct target__kernel_sock_timeval *target_tv;
1142 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1143 return -TARGET_EFAULT;
1146 __put_user(tv->tv_sec, &target_tv->tv_sec);
1147 __put_user(tv->tv_usec, &target_tv->tv_usec);
1149 unlock_user_struct(target_tv, target_tv_addr, 1);
1151 return 0;
1154 #if defined(TARGET_NR_futex) || \
1155 defined(TARGET_NR_rt_sigtimedwait) || \
1156 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1157 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1158 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1159 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1160 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1161 defined(TARGET_NR_timer_settime) || \
1162 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1163 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1164 abi_ulong target_addr)
1166 struct target_timespec *target_ts;
1168 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1169 return -TARGET_EFAULT;
1171 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1172 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1173 unlock_user_struct(target_ts, target_addr, 0);
1174 return 0;
1176 #endif
1178 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1179 defined(TARGET_NR_timer_settime64) || \
1180 defined(TARGET_NR_mq_timedsend_time64) || \
1181 defined(TARGET_NR_mq_timedreceive_time64) || \
1182 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1183 defined(TARGET_NR_clock_nanosleep_time64) || \
1184 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1185 defined(TARGET_NR_utimensat) || \
1186 defined(TARGET_NR_utimensat_time64) || \
1187 defined(TARGET_NR_semtimedop_time64) || \
1188 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1189 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1190 abi_ulong target_addr)
1192 struct target__kernel_timespec *target_ts;
1194 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1195 return -TARGET_EFAULT;
1197 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1198 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1199 /* in 32bit mode, this drops the padding */
1200 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1201 unlock_user_struct(target_ts, target_addr, 0);
1202 return 0;
1204 #endif
1206 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1207 struct timespec *host_ts)
1209 struct target_timespec *target_ts;
1211 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1212 return -TARGET_EFAULT;
1214 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1215 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1216 unlock_user_struct(target_ts, target_addr, 1);
1217 return 0;
1220 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1221 struct timespec *host_ts)
1223 struct target__kernel_timespec *target_ts;
1225 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1226 return -TARGET_EFAULT;
1228 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1229 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1230 unlock_user_struct(target_ts, target_addr, 1);
1231 return 0;
1234 #if defined(TARGET_NR_gettimeofday)
1235 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1236 struct timezone *tz)
1238 struct target_timezone *target_tz;
1240 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1241 return -TARGET_EFAULT;
1244 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1245 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1247 unlock_user_struct(target_tz, target_tz_addr, 1);
1249 return 0;
1251 #endif
1253 #if defined(TARGET_NR_settimeofday)
1254 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1255 abi_ulong target_tz_addr)
1257 struct target_timezone *target_tz;
1259 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1260 return -TARGET_EFAULT;
1263 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1264 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1266 unlock_user_struct(target_tz, target_tz_addr, 0);
1268 return 0;
1270 #endif
1272 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1273 #include <mqueue.h>
1275 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1276 abi_ulong target_mq_attr_addr)
1278 struct target_mq_attr *target_mq_attr;
1280 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1281 target_mq_attr_addr, 1))
1282 return -TARGET_EFAULT;
1284 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1285 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1286 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1287 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1289 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1291 return 0;
1294 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1295 const struct mq_attr *attr)
1297 struct target_mq_attr *target_mq_attr;
1299 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1300 target_mq_attr_addr, 0))
1301 return -TARGET_EFAULT;
1303 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1304 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1305 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1306 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1308 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1310 return 0;
1312 #endif
1314 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1315 /* do_select() must return target values and target errnos. */
1316 static abi_long do_select(int n,
1317 abi_ulong rfd_addr, abi_ulong wfd_addr,
1318 abi_ulong efd_addr, abi_ulong target_tv_addr)
1320 fd_set rfds, wfds, efds;
1321 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1322 struct timeval tv;
1323 struct timespec ts, *ts_ptr;
1324 abi_long ret;
1326 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1327 if (ret) {
1328 return ret;
1330 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1331 if (ret) {
1332 return ret;
1334 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1335 if (ret) {
1336 return ret;
1339 if (target_tv_addr) {
1340 if (copy_from_user_timeval(&tv, target_tv_addr))
1341 return -TARGET_EFAULT;
1342 ts.tv_sec = tv.tv_sec;
1343 ts.tv_nsec = tv.tv_usec * 1000;
1344 ts_ptr = &ts;
1345 } else {
1346 ts_ptr = NULL;
1349 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1350 ts_ptr, NULL));
1352 if (!is_error(ret)) {
1353 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1354 return -TARGET_EFAULT;
1355 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1356 return -TARGET_EFAULT;
1357 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1358 return -TARGET_EFAULT;
1360 if (target_tv_addr) {
1361 tv.tv_sec = ts.tv_sec;
1362 tv.tv_usec = ts.tv_nsec / 1000;
1363 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1364 return -TARGET_EFAULT;
1369 return ret;
1372 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1373 static abi_long do_old_select(abi_ulong arg1)
1375 struct target_sel_arg_struct *sel;
1376 abi_ulong inp, outp, exp, tvp;
1377 long nsel;
1379 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1380 return -TARGET_EFAULT;
1383 nsel = tswapal(sel->n);
1384 inp = tswapal(sel->inp);
1385 outp = tswapal(sel->outp);
1386 exp = tswapal(sel->exp);
1387 tvp = tswapal(sel->tvp);
1389 unlock_user_struct(sel, arg1, 0);
1391 return do_select(nsel, inp, outp, exp, tvp);
1393 #endif
1394 #endif
1396 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1397 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1398 abi_long arg4, abi_long arg5, abi_long arg6,
1399 bool time64)
1401 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1402 fd_set rfds, wfds, efds;
1403 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1404 struct timespec ts, *ts_ptr;
1405 abi_long ret;
1408 * The 6th arg is actually two args smashed together,
1409 * so we cannot use the C library.
1411 struct {
1412 sigset_t *set;
1413 size_t size;
1414 } sig, *sig_ptr;
1416 abi_ulong arg_sigset, arg_sigsize, *arg7;
1418 n = arg1;
1419 rfd_addr = arg2;
1420 wfd_addr = arg3;
1421 efd_addr = arg4;
1422 ts_addr = arg5;
1424 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1425 if (ret) {
1426 return ret;
1428 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1429 if (ret) {
1430 return ret;
1432 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1433 if (ret) {
1434 return ret;
1438 * This takes a timespec, and not a timeval, so we cannot
1439 * use the do_select() helper ...
1441 if (ts_addr) {
1442 if (time64) {
1443 if (target_to_host_timespec64(&ts, ts_addr)) {
1444 return -TARGET_EFAULT;
1446 } else {
1447 if (target_to_host_timespec(&ts, ts_addr)) {
1448 return -TARGET_EFAULT;
1451 ts_ptr = &ts;
1452 } else {
1453 ts_ptr = NULL;
1456 /* Extract the two packed args for the sigset */
1457 sig_ptr = NULL;
1458 if (arg6) {
1459 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1460 if (!arg7) {
1461 return -TARGET_EFAULT;
1463 arg_sigset = tswapal(arg7[0]);
1464 arg_sigsize = tswapal(arg7[1]);
1465 unlock_user(arg7, arg6, 0);
1467 if (arg_sigset) {
1468 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1469 if (ret != 0) {
1470 return ret;
1472 sig_ptr = &sig;
1473 sig.size = SIGSET_T_SIZE;
1477 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1478 ts_ptr, sig_ptr));
1480 if (sig_ptr) {
1481 finish_sigsuspend_mask(ret);
1484 if (!is_error(ret)) {
1485 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1486 return -TARGET_EFAULT;
1488 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1489 return -TARGET_EFAULT;
1491 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1492 return -TARGET_EFAULT;
1494 if (time64) {
1495 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1496 return -TARGET_EFAULT;
1498 } else {
1499 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1500 return -TARGET_EFAULT;
1504 return ret;
1506 #endif
1508 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1509 defined(TARGET_NR_ppoll_time64)
1510 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1511 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1513 struct target_pollfd *target_pfd;
1514 unsigned int nfds = arg2;
1515 struct pollfd *pfd;
1516 unsigned int i;
1517 abi_long ret;
1519 pfd = NULL;
1520 target_pfd = NULL;
1521 if (nfds) {
1522 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1523 return -TARGET_EINVAL;
1525 target_pfd = lock_user(VERIFY_WRITE, arg1,
1526 sizeof(struct target_pollfd) * nfds, 1);
1527 if (!target_pfd) {
1528 return -TARGET_EFAULT;
1531 pfd = alloca(sizeof(struct pollfd) * nfds);
1532 for (i = 0; i < nfds; i++) {
1533 pfd[i].fd = tswap32(target_pfd[i].fd);
1534 pfd[i].events = tswap16(target_pfd[i].events);
1537 if (ppoll) {
1538 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1539 sigset_t *set = NULL;
1541 if (arg3) {
1542 if (time64) {
1543 if (target_to_host_timespec64(timeout_ts, arg3)) {
1544 unlock_user(target_pfd, arg1, 0);
1545 return -TARGET_EFAULT;
1547 } else {
1548 if (target_to_host_timespec(timeout_ts, arg3)) {
1549 unlock_user(target_pfd, arg1, 0);
1550 return -TARGET_EFAULT;
1553 } else {
1554 timeout_ts = NULL;
1557 if (arg4) {
1558 ret = process_sigsuspend_mask(&set, arg4, arg5);
1559 if (ret != 0) {
1560 unlock_user(target_pfd, arg1, 0);
1561 return ret;
1565 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1566 set, SIGSET_T_SIZE));
1568 if (set) {
1569 finish_sigsuspend_mask(ret);
1571 if (!is_error(ret) && arg3) {
1572 if (time64) {
1573 if (host_to_target_timespec64(arg3, timeout_ts)) {
1574 return -TARGET_EFAULT;
1576 } else {
1577 if (host_to_target_timespec(arg3, timeout_ts)) {
1578 return -TARGET_EFAULT;
1582 } else {
1583 struct timespec ts, *pts;
1585 if (arg3 >= 0) {
1586 /* Convert ms to secs, ns */
1587 ts.tv_sec = arg3 / 1000;
1588 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1589 pts = &ts;
1590 } else {
1591 /* -ve poll() timeout means "infinite" */
1592 pts = NULL;
1594 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1597 if (!is_error(ret)) {
1598 for (i = 0; i < nfds; i++) {
1599 target_pfd[i].revents = tswap16(pfd[i].revents);
1602 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1603 return ret;
1605 #endif
1607 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1608 int flags, int is_pipe2)
1610 int host_pipe[2];
1611 abi_long ret;
1612 ret = pipe2(host_pipe, flags);
1614 if (is_error(ret))
1615 return get_errno(ret);
1617 /* Several targets have special calling conventions for the original
1618 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1619 if (!is_pipe2) {
1620 #if defined(TARGET_ALPHA)
1621 cpu_env->ir[IR_A4] = host_pipe[1];
1622 return host_pipe[0];
1623 #elif defined(TARGET_MIPS)
1624 cpu_env->active_tc.gpr[3] = host_pipe[1];
1625 return host_pipe[0];
1626 #elif defined(TARGET_SH4)
1627 cpu_env->gregs[1] = host_pipe[1];
1628 return host_pipe[0];
1629 #elif defined(TARGET_SPARC)
1630 cpu_env->regwptr[1] = host_pipe[1];
1631 return host_pipe[0];
1632 #endif
1635 if (put_user_s32(host_pipe[0], pipedes)
1636 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1637 return -TARGET_EFAULT;
1638 return get_errno(ret);
1641 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1642 abi_ulong target_addr,
1643 socklen_t len)
1645 struct target_ip_mreqn *target_smreqn;
1647 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1648 if (!target_smreqn)
1649 return -TARGET_EFAULT;
1650 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1651 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1652 if (len == sizeof(struct target_ip_mreqn))
1653 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1654 unlock_user(target_smreqn, target_addr, 0);
1656 return 0;
1659 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1660 abi_ulong target_addr,
1661 socklen_t len)
1663 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1664 sa_family_t sa_family;
1665 struct target_sockaddr *target_saddr;
1667 if (fd_trans_target_to_host_addr(fd)) {
1668 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1671 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1672 if (!target_saddr)
1673 return -TARGET_EFAULT;
1675 sa_family = tswap16(target_saddr->sa_family);
1677 /* Oops. The caller might send a incomplete sun_path; sun_path
1678 * must be terminated by \0 (see the manual page), but
1679 * unfortunately it is quite common to specify sockaddr_un
1680 * length as "strlen(x->sun_path)" while it should be
1681 * "strlen(...) + 1". We'll fix that here if needed.
1682 * Linux kernel has a similar feature.
1685 if (sa_family == AF_UNIX) {
1686 if (len < unix_maxlen && len > 0) {
1687 char *cp = (char*)target_saddr;
1689 if ( cp[len-1] && !cp[len] )
1690 len++;
1692 if (len > unix_maxlen)
1693 len = unix_maxlen;
1696 memcpy(addr, target_saddr, len);
1697 addr->sa_family = sa_family;
1698 if (sa_family == AF_NETLINK) {
1699 struct sockaddr_nl *nladdr;
1701 nladdr = (struct sockaddr_nl *)addr;
1702 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1703 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1704 } else if (sa_family == AF_PACKET) {
1705 struct target_sockaddr_ll *lladdr;
1707 lladdr = (struct target_sockaddr_ll *)addr;
1708 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1709 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1711 unlock_user(target_saddr, target_addr, 0);
1713 return 0;
1716 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1717 struct sockaddr *addr,
1718 socklen_t len)
1720 struct target_sockaddr *target_saddr;
1722 if (len == 0) {
1723 return 0;
1725 assert(addr);
1727 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1728 if (!target_saddr)
1729 return -TARGET_EFAULT;
1730 memcpy(target_saddr, addr, len);
1731 if (len >= offsetof(struct target_sockaddr, sa_family) +
1732 sizeof(target_saddr->sa_family)) {
1733 target_saddr->sa_family = tswap16(addr->sa_family);
1735 if (addr->sa_family == AF_NETLINK &&
1736 len >= sizeof(struct target_sockaddr_nl)) {
1737 struct target_sockaddr_nl *target_nl =
1738 (struct target_sockaddr_nl *)target_saddr;
1739 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1740 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1741 } else if (addr->sa_family == AF_PACKET) {
1742 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1743 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1744 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1745 } else if (addr->sa_family == AF_INET6 &&
1746 len >= sizeof(struct target_sockaddr_in6)) {
1747 struct target_sockaddr_in6 *target_in6 =
1748 (struct target_sockaddr_in6 *)target_saddr;
1749 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1751 unlock_user(target_saddr, target_addr, len);
1753 return 0;
1756 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1757 struct target_msghdr *target_msgh)
1759 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1760 abi_long msg_controllen;
1761 abi_ulong target_cmsg_addr;
1762 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1763 socklen_t space = 0;
1765 msg_controllen = tswapal(target_msgh->msg_controllen);
1766 if (msg_controllen < sizeof (struct target_cmsghdr))
1767 goto the_end;
1768 target_cmsg_addr = tswapal(target_msgh->msg_control);
1769 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1770 target_cmsg_start = target_cmsg;
1771 if (!target_cmsg)
1772 return -TARGET_EFAULT;
1774 while (cmsg && target_cmsg) {
1775 void *data = CMSG_DATA(cmsg);
1776 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1778 int len = tswapal(target_cmsg->cmsg_len)
1779 - sizeof(struct target_cmsghdr);
1781 space += CMSG_SPACE(len);
1782 if (space > msgh->msg_controllen) {
1783 space -= CMSG_SPACE(len);
1784 /* This is a QEMU bug, since we allocated the payload
1785 * area ourselves (unlike overflow in host-to-target
1786 * conversion, which is just the guest giving us a buffer
1787 * that's too small). It can't happen for the payload types
1788 * we currently support; if it becomes an issue in future
1789 * we would need to improve our allocation strategy to
1790 * something more intelligent than "twice the size of the
1791 * target buffer we're reading from".
1793 qemu_log_mask(LOG_UNIMP,
1794 ("Unsupported ancillary data %d/%d: "
1795 "unhandled msg size\n"),
1796 tswap32(target_cmsg->cmsg_level),
1797 tswap32(target_cmsg->cmsg_type));
1798 break;
1801 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1802 cmsg->cmsg_level = SOL_SOCKET;
1803 } else {
1804 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1806 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1807 cmsg->cmsg_len = CMSG_LEN(len);
1809 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1810 int *fd = (int *)data;
1811 int *target_fd = (int *)target_data;
1812 int i, numfds = len / sizeof(int);
1814 for (i = 0; i < numfds; i++) {
1815 __get_user(fd[i], target_fd + i);
1817 } else if (cmsg->cmsg_level == SOL_SOCKET
1818 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1819 struct ucred *cred = (struct ucred *)data;
1820 struct target_ucred *target_cred =
1821 (struct target_ucred *)target_data;
1823 __get_user(cred->pid, &target_cred->pid);
1824 __get_user(cred->uid, &target_cred->uid);
1825 __get_user(cred->gid, &target_cred->gid);
1826 } else {
1827 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1828 cmsg->cmsg_level, cmsg->cmsg_type);
1829 memcpy(data, target_data, len);
1832 cmsg = CMSG_NXTHDR(msgh, cmsg);
1833 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1834 target_cmsg_start);
1836 unlock_user(target_cmsg, target_cmsg_addr, 0);
1837 the_end:
1838 msgh->msg_controllen = space;
1839 return 0;
1842 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1843 struct msghdr *msgh)
1845 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1846 abi_long msg_controllen;
1847 abi_ulong target_cmsg_addr;
1848 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1849 socklen_t space = 0;
1851 msg_controllen = tswapal(target_msgh->msg_controllen);
1852 if (msg_controllen < sizeof (struct target_cmsghdr))
1853 goto the_end;
1854 target_cmsg_addr = tswapal(target_msgh->msg_control);
1855 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1856 target_cmsg_start = target_cmsg;
1857 if (!target_cmsg)
1858 return -TARGET_EFAULT;
1860 while (cmsg && target_cmsg) {
1861 void *data = CMSG_DATA(cmsg);
1862 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1864 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1865 int tgt_len, tgt_space;
1867 /* We never copy a half-header but may copy half-data;
1868 * this is Linux's behaviour in put_cmsg(). Note that
1869 * truncation here is a guest problem (which we report
1870 * to the guest via the CTRUNC bit), unlike truncation
1871 * in target_to_host_cmsg, which is a QEMU bug.
1873 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1874 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1875 break;
1878 if (cmsg->cmsg_level == SOL_SOCKET) {
1879 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1880 } else {
1881 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1883 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1885 /* Payload types which need a different size of payload on
1886 * the target must adjust tgt_len here.
1888 tgt_len = len;
1889 switch (cmsg->cmsg_level) {
1890 case SOL_SOCKET:
1891 switch (cmsg->cmsg_type) {
1892 case SO_TIMESTAMP:
1893 tgt_len = sizeof(struct target_timeval);
1894 break;
1895 default:
1896 break;
1898 break;
1899 default:
1900 break;
1903 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1904 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1905 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1908 /* We must now copy-and-convert len bytes of payload
1909 * into tgt_len bytes of destination space. Bear in mind
1910 * that in both source and destination we may be dealing
1911 * with a truncated value!
1913 switch (cmsg->cmsg_level) {
1914 case SOL_SOCKET:
1915 switch (cmsg->cmsg_type) {
1916 case SCM_RIGHTS:
1918 int *fd = (int *)data;
1919 int *target_fd = (int *)target_data;
1920 int i, numfds = tgt_len / sizeof(int);
1922 for (i = 0; i < numfds; i++) {
1923 __put_user(fd[i], target_fd + i);
1925 break;
1927 case SO_TIMESTAMP:
1929 struct timeval *tv = (struct timeval *)data;
1930 struct target_timeval *target_tv =
1931 (struct target_timeval *)target_data;
1933 if (len != sizeof(struct timeval) ||
1934 tgt_len != sizeof(struct target_timeval)) {
1935 goto unimplemented;
1938 /* copy struct timeval to target */
1939 __put_user(tv->tv_sec, &target_tv->tv_sec);
1940 __put_user(tv->tv_usec, &target_tv->tv_usec);
1941 break;
1943 case SCM_CREDENTIALS:
1945 struct ucred *cred = (struct ucred *)data;
1946 struct target_ucred *target_cred =
1947 (struct target_ucred *)target_data;
1949 __put_user(cred->pid, &target_cred->pid);
1950 __put_user(cred->uid, &target_cred->uid);
1951 __put_user(cred->gid, &target_cred->gid);
1952 break;
1954 default:
1955 goto unimplemented;
1957 break;
1959 case SOL_IP:
1960 switch (cmsg->cmsg_type) {
1961 case IP_TTL:
1963 uint32_t *v = (uint32_t *)data;
1964 uint32_t *t_int = (uint32_t *)target_data;
1966 if (len != sizeof(uint32_t) ||
1967 tgt_len != sizeof(uint32_t)) {
1968 goto unimplemented;
1970 __put_user(*v, t_int);
1971 break;
1973 case IP_RECVERR:
1975 struct errhdr_t {
1976 struct sock_extended_err ee;
1977 struct sockaddr_in offender;
1979 struct errhdr_t *errh = (struct errhdr_t *)data;
1980 struct errhdr_t *target_errh =
1981 (struct errhdr_t *)target_data;
1983 if (len != sizeof(struct errhdr_t) ||
1984 tgt_len != sizeof(struct errhdr_t)) {
1985 goto unimplemented;
1987 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1988 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1989 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1990 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1991 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1992 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1993 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1994 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1995 (void *) &errh->offender, sizeof(errh->offender));
1996 break;
1998 default:
1999 goto unimplemented;
2001 break;
2003 case SOL_IPV6:
2004 switch (cmsg->cmsg_type) {
2005 case IPV6_HOPLIMIT:
2007 uint32_t *v = (uint32_t *)data;
2008 uint32_t *t_int = (uint32_t *)target_data;
2010 if (len != sizeof(uint32_t) ||
2011 tgt_len != sizeof(uint32_t)) {
2012 goto unimplemented;
2014 __put_user(*v, t_int);
2015 break;
2017 case IPV6_RECVERR:
2019 struct errhdr6_t {
2020 struct sock_extended_err ee;
2021 struct sockaddr_in6 offender;
2023 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2024 struct errhdr6_t *target_errh =
2025 (struct errhdr6_t *)target_data;
2027 if (len != sizeof(struct errhdr6_t) ||
2028 tgt_len != sizeof(struct errhdr6_t)) {
2029 goto unimplemented;
2031 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2032 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2033 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2034 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2035 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2036 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2037 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2038 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2039 (void *) &errh->offender, sizeof(errh->offender));
2040 break;
2042 default:
2043 goto unimplemented;
2045 break;
2047 default:
2048 unimplemented:
2049 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2050 cmsg->cmsg_level, cmsg->cmsg_type);
2051 memcpy(target_data, data, MIN(len, tgt_len));
2052 if (tgt_len > len) {
2053 memset(target_data + len, 0, tgt_len - len);
2057 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2058 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2059 if (msg_controllen < tgt_space) {
2060 tgt_space = msg_controllen;
2062 msg_controllen -= tgt_space;
2063 space += tgt_space;
2064 cmsg = CMSG_NXTHDR(msgh, cmsg);
2065 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2066 target_cmsg_start);
2068 unlock_user(target_cmsg, target_cmsg_addr, space);
2069 the_end:
2070 target_msgh->msg_controllen = tswapal(space);
2071 return 0;
2074 /* do_setsockopt() Must return target values and target errnos. */
2075 static abi_long do_setsockopt(int sockfd, int level, int optname,
2076 abi_ulong optval_addr, socklen_t optlen)
2078 abi_long ret;
2079 int val;
2080 struct ip_mreqn *ip_mreq;
2081 struct ip_mreq_source *ip_mreq_source;
2083 switch(level) {
2084 case SOL_TCP:
2085 case SOL_UDP:
2086 /* TCP and UDP options all take an 'int' value. */
2087 if (optlen < sizeof(uint32_t))
2088 return -TARGET_EINVAL;
2090 if (get_user_u32(val, optval_addr))
2091 return -TARGET_EFAULT;
2092 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2093 break;
2094 case SOL_IP:
2095 switch(optname) {
2096 case IP_TOS:
2097 case IP_TTL:
2098 case IP_HDRINCL:
2099 case IP_ROUTER_ALERT:
2100 case IP_RECVOPTS:
2101 case IP_RETOPTS:
2102 case IP_PKTINFO:
2103 case IP_MTU_DISCOVER:
2104 case IP_RECVERR:
2105 case IP_RECVTTL:
2106 case IP_RECVTOS:
2107 #ifdef IP_FREEBIND
2108 case IP_FREEBIND:
2109 #endif
2110 case IP_MULTICAST_TTL:
2111 case IP_MULTICAST_LOOP:
2112 val = 0;
2113 if (optlen >= sizeof(uint32_t)) {
2114 if (get_user_u32(val, optval_addr))
2115 return -TARGET_EFAULT;
2116 } else if (optlen >= 1) {
2117 if (get_user_u8(val, optval_addr))
2118 return -TARGET_EFAULT;
2120 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2121 break;
2122 case IP_ADD_MEMBERSHIP:
2123 case IP_DROP_MEMBERSHIP:
2124 if (optlen < sizeof (struct target_ip_mreq) ||
2125 optlen > sizeof (struct target_ip_mreqn))
2126 return -TARGET_EINVAL;
2128 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2129 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2130 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2131 break;
2133 case IP_BLOCK_SOURCE:
2134 case IP_UNBLOCK_SOURCE:
2135 case IP_ADD_SOURCE_MEMBERSHIP:
2136 case IP_DROP_SOURCE_MEMBERSHIP:
2137 if (optlen != sizeof (struct target_ip_mreq_source))
2138 return -TARGET_EINVAL;
2140 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2141 if (!ip_mreq_source) {
2142 return -TARGET_EFAULT;
2144 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2145 unlock_user (ip_mreq_source, optval_addr, 0);
2146 break;
2148 default:
2149 goto unimplemented;
2151 break;
2152 case SOL_IPV6:
2153 switch (optname) {
2154 case IPV6_MTU_DISCOVER:
2155 case IPV6_MTU:
2156 case IPV6_V6ONLY:
2157 case IPV6_RECVPKTINFO:
2158 case IPV6_UNICAST_HOPS:
2159 case IPV6_MULTICAST_HOPS:
2160 case IPV6_MULTICAST_LOOP:
2161 case IPV6_RECVERR:
2162 case IPV6_RECVHOPLIMIT:
2163 case IPV6_2292HOPLIMIT:
2164 case IPV6_CHECKSUM:
2165 case IPV6_ADDRFORM:
2166 case IPV6_2292PKTINFO:
2167 case IPV6_RECVTCLASS:
2168 case IPV6_RECVRTHDR:
2169 case IPV6_2292RTHDR:
2170 case IPV6_RECVHOPOPTS:
2171 case IPV6_2292HOPOPTS:
2172 case IPV6_RECVDSTOPTS:
2173 case IPV6_2292DSTOPTS:
2174 case IPV6_TCLASS:
2175 case IPV6_ADDR_PREFERENCES:
2176 #ifdef IPV6_RECVPATHMTU
2177 case IPV6_RECVPATHMTU:
2178 #endif
2179 #ifdef IPV6_TRANSPARENT
2180 case IPV6_TRANSPARENT:
2181 #endif
2182 #ifdef IPV6_FREEBIND
2183 case IPV6_FREEBIND:
2184 #endif
2185 #ifdef IPV6_RECVORIGDSTADDR
2186 case IPV6_RECVORIGDSTADDR:
2187 #endif
2188 val = 0;
2189 if (optlen < sizeof(uint32_t)) {
2190 return -TARGET_EINVAL;
2192 if (get_user_u32(val, optval_addr)) {
2193 return -TARGET_EFAULT;
2195 ret = get_errno(setsockopt(sockfd, level, optname,
2196 &val, sizeof(val)));
2197 break;
2198 case IPV6_PKTINFO:
2200 struct in6_pktinfo pki;
2202 if (optlen < sizeof(pki)) {
2203 return -TARGET_EINVAL;
2206 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2207 return -TARGET_EFAULT;
2210 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2212 ret = get_errno(setsockopt(sockfd, level, optname,
2213 &pki, sizeof(pki)));
2214 break;
2216 case IPV6_ADD_MEMBERSHIP:
2217 case IPV6_DROP_MEMBERSHIP:
2219 struct ipv6_mreq ipv6mreq;
2221 if (optlen < sizeof(ipv6mreq)) {
2222 return -TARGET_EINVAL;
2225 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2226 return -TARGET_EFAULT;
2229 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2231 ret = get_errno(setsockopt(sockfd, level, optname,
2232 &ipv6mreq, sizeof(ipv6mreq)));
2233 break;
2235 default:
2236 goto unimplemented;
2238 break;
2239 case SOL_ICMPV6:
2240 switch (optname) {
2241 case ICMPV6_FILTER:
2243 struct icmp6_filter icmp6f;
2245 if (optlen > sizeof(icmp6f)) {
2246 optlen = sizeof(icmp6f);
2249 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2250 return -TARGET_EFAULT;
2253 for (val = 0; val < 8; val++) {
2254 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2257 ret = get_errno(setsockopt(sockfd, level, optname,
2258 &icmp6f, optlen));
2259 break;
2261 default:
2262 goto unimplemented;
2264 break;
2265 case SOL_RAW:
2266 switch (optname) {
2267 case ICMP_FILTER:
2268 case IPV6_CHECKSUM:
2269 /* those take an u32 value */
2270 if (optlen < sizeof(uint32_t)) {
2271 return -TARGET_EINVAL;
2274 if (get_user_u32(val, optval_addr)) {
2275 return -TARGET_EFAULT;
2277 ret = get_errno(setsockopt(sockfd, level, optname,
2278 &val, sizeof(val)));
2279 break;
2281 default:
2282 goto unimplemented;
2284 break;
2285 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2286 case SOL_ALG:
2287 switch (optname) {
2288 case ALG_SET_KEY:
2290 char *alg_key = g_malloc(optlen);
2292 if (!alg_key) {
2293 return -TARGET_ENOMEM;
2295 if (copy_from_user(alg_key, optval_addr, optlen)) {
2296 g_free(alg_key);
2297 return -TARGET_EFAULT;
2299 ret = get_errno(setsockopt(sockfd, level, optname,
2300 alg_key, optlen));
2301 g_free(alg_key);
2302 break;
2304 case ALG_SET_AEAD_AUTHSIZE:
2306 ret = get_errno(setsockopt(sockfd, level, optname,
2307 NULL, optlen));
2308 break;
2310 default:
2311 goto unimplemented;
2313 break;
2314 #endif
2315 case TARGET_SOL_SOCKET:
2316 switch (optname) {
2317 case TARGET_SO_RCVTIMEO:
2319 struct timeval tv;
2321 optname = SO_RCVTIMEO;
2323 set_timeout:
2324 if (optlen != sizeof(struct target_timeval)) {
2325 return -TARGET_EINVAL;
2328 if (copy_from_user_timeval(&tv, optval_addr)) {
2329 return -TARGET_EFAULT;
2332 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2333 &tv, sizeof(tv)));
2334 return ret;
2336 case TARGET_SO_SNDTIMEO:
2337 optname = SO_SNDTIMEO;
2338 goto set_timeout;
2339 case TARGET_SO_ATTACH_FILTER:
2341 struct target_sock_fprog *tfprog;
2342 struct target_sock_filter *tfilter;
2343 struct sock_fprog fprog;
2344 struct sock_filter *filter;
2345 int i;
2347 if (optlen != sizeof(*tfprog)) {
2348 return -TARGET_EINVAL;
2350 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2351 return -TARGET_EFAULT;
2353 if (!lock_user_struct(VERIFY_READ, tfilter,
2354 tswapal(tfprog->filter), 0)) {
2355 unlock_user_struct(tfprog, optval_addr, 1);
2356 return -TARGET_EFAULT;
2359 fprog.len = tswap16(tfprog->len);
2360 filter = g_try_new(struct sock_filter, fprog.len);
2361 if (filter == NULL) {
2362 unlock_user_struct(tfilter, tfprog->filter, 1);
2363 unlock_user_struct(tfprog, optval_addr, 1);
2364 return -TARGET_ENOMEM;
2366 for (i = 0; i < fprog.len; i++) {
2367 filter[i].code = tswap16(tfilter[i].code);
2368 filter[i].jt = tfilter[i].jt;
2369 filter[i].jf = tfilter[i].jf;
2370 filter[i].k = tswap32(tfilter[i].k);
2372 fprog.filter = filter;
2374 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2375 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2376 g_free(filter);
2378 unlock_user_struct(tfilter, tfprog->filter, 1);
2379 unlock_user_struct(tfprog, optval_addr, 1);
2380 return ret;
2382 case TARGET_SO_BINDTODEVICE:
2384 char *dev_ifname, *addr_ifname;
2386 if (optlen > IFNAMSIZ - 1) {
2387 optlen = IFNAMSIZ - 1;
2389 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2390 if (!dev_ifname) {
2391 return -TARGET_EFAULT;
2393 optname = SO_BINDTODEVICE;
2394 addr_ifname = alloca(IFNAMSIZ);
2395 memcpy(addr_ifname, dev_ifname, optlen);
2396 addr_ifname[optlen] = 0;
2397 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2398 addr_ifname, optlen));
2399 unlock_user (dev_ifname, optval_addr, 0);
2400 return ret;
2402 case TARGET_SO_LINGER:
2404 struct linger lg;
2405 struct target_linger *tlg;
2407 if (optlen != sizeof(struct target_linger)) {
2408 return -TARGET_EINVAL;
2410 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2411 return -TARGET_EFAULT;
2413 __get_user(lg.l_onoff, &tlg->l_onoff);
2414 __get_user(lg.l_linger, &tlg->l_linger);
2415 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2416 &lg, sizeof(lg)));
2417 unlock_user_struct(tlg, optval_addr, 0);
2418 return ret;
2420 /* Options with 'int' argument. */
2421 case TARGET_SO_DEBUG:
2422 optname = SO_DEBUG;
2423 break;
2424 case TARGET_SO_REUSEADDR:
2425 optname = SO_REUSEADDR;
2426 break;
2427 #ifdef SO_REUSEPORT
2428 case TARGET_SO_REUSEPORT:
2429 optname = SO_REUSEPORT;
2430 break;
2431 #endif
2432 case TARGET_SO_TYPE:
2433 optname = SO_TYPE;
2434 break;
2435 case TARGET_SO_ERROR:
2436 optname = SO_ERROR;
2437 break;
2438 case TARGET_SO_DONTROUTE:
2439 optname = SO_DONTROUTE;
2440 break;
2441 case TARGET_SO_BROADCAST:
2442 optname = SO_BROADCAST;
2443 break;
2444 case TARGET_SO_SNDBUF:
2445 optname = SO_SNDBUF;
2446 break;
2447 case TARGET_SO_SNDBUFFORCE:
2448 optname = SO_SNDBUFFORCE;
2449 break;
2450 case TARGET_SO_RCVBUF:
2451 optname = SO_RCVBUF;
2452 break;
2453 case TARGET_SO_RCVBUFFORCE:
2454 optname = SO_RCVBUFFORCE;
2455 break;
2456 case TARGET_SO_KEEPALIVE:
2457 optname = SO_KEEPALIVE;
2458 break;
2459 case TARGET_SO_OOBINLINE:
2460 optname = SO_OOBINLINE;
2461 break;
2462 case TARGET_SO_NO_CHECK:
2463 optname = SO_NO_CHECK;
2464 break;
2465 case TARGET_SO_PRIORITY:
2466 optname = SO_PRIORITY;
2467 break;
2468 #ifdef SO_BSDCOMPAT
2469 case TARGET_SO_BSDCOMPAT:
2470 optname = SO_BSDCOMPAT;
2471 break;
2472 #endif
2473 case TARGET_SO_PASSCRED:
2474 optname = SO_PASSCRED;
2475 break;
2476 case TARGET_SO_PASSSEC:
2477 optname = SO_PASSSEC;
2478 break;
2479 case TARGET_SO_TIMESTAMP:
2480 optname = SO_TIMESTAMP;
2481 break;
2482 case TARGET_SO_RCVLOWAT:
2483 optname = SO_RCVLOWAT;
2484 break;
2485 default:
2486 goto unimplemented;
2488 if (optlen < sizeof(uint32_t))
2489 return -TARGET_EINVAL;
2491 if (get_user_u32(val, optval_addr))
2492 return -TARGET_EFAULT;
2493 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2494 break;
2495 #ifdef SOL_NETLINK
2496 case SOL_NETLINK:
2497 switch (optname) {
2498 case NETLINK_PKTINFO:
2499 case NETLINK_ADD_MEMBERSHIP:
2500 case NETLINK_DROP_MEMBERSHIP:
2501 case NETLINK_BROADCAST_ERROR:
2502 case NETLINK_NO_ENOBUFS:
2503 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2504 case NETLINK_LISTEN_ALL_NSID:
2505 case NETLINK_CAP_ACK:
2506 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2507 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2508 case NETLINK_EXT_ACK:
2509 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2510 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2511 case NETLINK_GET_STRICT_CHK:
2512 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2513 break;
2514 default:
2515 goto unimplemented;
2517 val = 0;
2518 if (optlen < sizeof(uint32_t)) {
2519 return -TARGET_EINVAL;
2521 if (get_user_u32(val, optval_addr)) {
2522 return -TARGET_EFAULT;
2524 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2525 sizeof(val)));
2526 break;
2527 #endif /* SOL_NETLINK */
2528 default:
2529 unimplemented:
2530 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2531 level, optname);
2532 ret = -TARGET_ENOPROTOOPT;
2534 return ret;
2537 /* do_getsockopt() Must return target values and target errnos. */
2538 static abi_long do_getsockopt(int sockfd, int level, int optname,
2539 abi_ulong optval_addr, abi_ulong optlen)
2541 abi_long ret;
2542 int len, val;
2543 socklen_t lv;
2545 switch(level) {
2546 case TARGET_SOL_SOCKET:
2547 level = SOL_SOCKET;
2548 switch (optname) {
2549 /* These don't just return a single integer */
2550 case TARGET_SO_PEERNAME:
2551 goto unimplemented;
2552 case TARGET_SO_RCVTIMEO: {
2553 struct timeval tv;
2554 socklen_t tvlen;
2556 optname = SO_RCVTIMEO;
2558 get_timeout:
2559 if (get_user_u32(len, optlen)) {
2560 return -TARGET_EFAULT;
2562 if (len < 0) {
2563 return -TARGET_EINVAL;
2566 tvlen = sizeof(tv);
2567 ret = get_errno(getsockopt(sockfd, level, optname,
2568 &tv, &tvlen));
2569 if (ret < 0) {
2570 return ret;
2572 if (len > sizeof(struct target_timeval)) {
2573 len = sizeof(struct target_timeval);
2575 if (copy_to_user_timeval(optval_addr, &tv)) {
2576 return -TARGET_EFAULT;
2578 if (put_user_u32(len, optlen)) {
2579 return -TARGET_EFAULT;
2581 break;
2583 case TARGET_SO_SNDTIMEO:
2584 optname = SO_SNDTIMEO;
2585 goto get_timeout;
2586 case TARGET_SO_PEERCRED: {
2587 struct ucred cr;
2588 socklen_t crlen;
2589 struct target_ucred *tcr;
2591 if (get_user_u32(len, optlen)) {
2592 return -TARGET_EFAULT;
2594 if (len < 0) {
2595 return -TARGET_EINVAL;
2598 crlen = sizeof(cr);
2599 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2600 &cr, &crlen));
2601 if (ret < 0) {
2602 return ret;
2604 if (len > crlen) {
2605 len = crlen;
2607 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2608 return -TARGET_EFAULT;
2610 __put_user(cr.pid, &tcr->pid);
2611 __put_user(cr.uid, &tcr->uid);
2612 __put_user(cr.gid, &tcr->gid);
2613 unlock_user_struct(tcr, optval_addr, 1);
2614 if (put_user_u32(len, optlen)) {
2615 return -TARGET_EFAULT;
2617 break;
2619 case TARGET_SO_PEERSEC: {
2620 char *name;
2622 if (get_user_u32(len, optlen)) {
2623 return -TARGET_EFAULT;
2625 if (len < 0) {
2626 return -TARGET_EINVAL;
2628 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2629 if (!name) {
2630 return -TARGET_EFAULT;
2632 lv = len;
2633 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2634 name, &lv));
2635 if (put_user_u32(lv, optlen)) {
2636 ret = -TARGET_EFAULT;
2638 unlock_user(name, optval_addr, lv);
2639 break;
2641 case TARGET_SO_LINGER:
2643 struct linger lg;
2644 socklen_t lglen;
2645 struct target_linger *tlg;
2647 if (get_user_u32(len, optlen)) {
2648 return -TARGET_EFAULT;
2650 if (len < 0) {
2651 return -TARGET_EINVAL;
2654 lglen = sizeof(lg);
2655 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2656 &lg, &lglen));
2657 if (ret < 0) {
2658 return ret;
2660 if (len > lglen) {
2661 len = lglen;
2663 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2664 return -TARGET_EFAULT;
2666 __put_user(lg.l_onoff, &tlg->l_onoff);
2667 __put_user(lg.l_linger, &tlg->l_linger);
2668 unlock_user_struct(tlg, optval_addr, 1);
2669 if (put_user_u32(len, optlen)) {
2670 return -TARGET_EFAULT;
2672 break;
2674 /* Options with 'int' argument. */
2675 case TARGET_SO_DEBUG:
2676 optname = SO_DEBUG;
2677 goto int_case;
2678 case TARGET_SO_REUSEADDR:
2679 optname = SO_REUSEADDR;
2680 goto int_case;
2681 #ifdef SO_REUSEPORT
2682 case TARGET_SO_REUSEPORT:
2683 optname = SO_REUSEPORT;
2684 goto int_case;
2685 #endif
2686 case TARGET_SO_TYPE:
2687 optname = SO_TYPE;
2688 goto int_case;
2689 case TARGET_SO_ERROR:
2690 optname = SO_ERROR;
2691 goto int_case;
2692 case TARGET_SO_DONTROUTE:
2693 optname = SO_DONTROUTE;
2694 goto int_case;
2695 case TARGET_SO_BROADCAST:
2696 optname = SO_BROADCAST;
2697 goto int_case;
2698 case TARGET_SO_SNDBUF:
2699 optname = SO_SNDBUF;
2700 goto int_case;
2701 case TARGET_SO_RCVBUF:
2702 optname = SO_RCVBUF;
2703 goto int_case;
2704 case TARGET_SO_KEEPALIVE:
2705 optname = SO_KEEPALIVE;
2706 goto int_case;
2707 case TARGET_SO_OOBINLINE:
2708 optname = SO_OOBINLINE;
2709 goto int_case;
2710 case TARGET_SO_NO_CHECK:
2711 optname = SO_NO_CHECK;
2712 goto int_case;
2713 case TARGET_SO_PRIORITY:
2714 optname = SO_PRIORITY;
2715 goto int_case;
2716 #ifdef SO_BSDCOMPAT
2717 case TARGET_SO_BSDCOMPAT:
2718 optname = SO_BSDCOMPAT;
2719 goto int_case;
2720 #endif
2721 case TARGET_SO_PASSCRED:
2722 optname = SO_PASSCRED;
2723 goto int_case;
2724 case TARGET_SO_TIMESTAMP:
2725 optname = SO_TIMESTAMP;
2726 goto int_case;
2727 case TARGET_SO_RCVLOWAT:
2728 optname = SO_RCVLOWAT;
2729 goto int_case;
2730 case TARGET_SO_ACCEPTCONN:
2731 optname = SO_ACCEPTCONN;
2732 goto int_case;
2733 case TARGET_SO_PROTOCOL:
2734 optname = SO_PROTOCOL;
2735 goto int_case;
2736 case TARGET_SO_DOMAIN:
2737 optname = SO_DOMAIN;
2738 goto int_case;
2739 default:
2740 goto int_case;
2742 break;
2743 case SOL_TCP:
2744 case SOL_UDP:
2745 /* TCP and UDP options all take an 'int' value. */
2746 int_case:
2747 if (get_user_u32(len, optlen))
2748 return -TARGET_EFAULT;
2749 if (len < 0)
2750 return -TARGET_EINVAL;
2751 lv = sizeof(lv);
2752 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2753 if (ret < 0)
2754 return ret;
2755 if (optname == SO_TYPE) {
2756 val = host_to_target_sock_type(val);
2758 if (len > lv)
2759 len = lv;
2760 if (len == 4) {
2761 if (put_user_u32(val, optval_addr))
2762 return -TARGET_EFAULT;
2763 } else {
2764 if (put_user_u8(val, optval_addr))
2765 return -TARGET_EFAULT;
2767 if (put_user_u32(len, optlen))
2768 return -TARGET_EFAULT;
2769 break;
2770 case SOL_IP:
2771 switch(optname) {
2772 case IP_TOS:
2773 case IP_TTL:
2774 case IP_HDRINCL:
2775 case IP_ROUTER_ALERT:
2776 case IP_RECVOPTS:
2777 case IP_RETOPTS:
2778 case IP_PKTINFO:
2779 case IP_MTU_DISCOVER:
2780 case IP_RECVERR:
2781 case IP_RECVTOS:
2782 #ifdef IP_FREEBIND
2783 case IP_FREEBIND:
2784 #endif
2785 case IP_MULTICAST_TTL:
2786 case IP_MULTICAST_LOOP:
2787 if (get_user_u32(len, optlen))
2788 return -TARGET_EFAULT;
2789 if (len < 0)
2790 return -TARGET_EINVAL;
2791 lv = sizeof(lv);
2792 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2793 if (ret < 0)
2794 return ret;
2795 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2796 len = 1;
2797 if (put_user_u32(len, optlen)
2798 || put_user_u8(val, optval_addr))
2799 return -TARGET_EFAULT;
2800 } else {
2801 if (len > sizeof(int))
2802 len = sizeof(int);
2803 if (put_user_u32(len, optlen)
2804 || put_user_u32(val, optval_addr))
2805 return -TARGET_EFAULT;
2807 break;
2808 default:
2809 ret = -TARGET_ENOPROTOOPT;
2810 break;
2812 break;
2813 case SOL_IPV6:
2814 switch (optname) {
2815 case IPV6_MTU_DISCOVER:
2816 case IPV6_MTU:
2817 case IPV6_V6ONLY:
2818 case IPV6_RECVPKTINFO:
2819 case IPV6_UNICAST_HOPS:
2820 case IPV6_MULTICAST_HOPS:
2821 case IPV6_MULTICAST_LOOP:
2822 case IPV6_RECVERR:
2823 case IPV6_RECVHOPLIMIT:
2824 case IPV6_2292HOPLIMIT:
2825 case IPV6_CHECKSUM:
2826 case IPV6_ADDRFORM:
2827 case IPV6_2292PKTINFO:
2828 case IPV6_RECVTCLASS:
2829 case IPV6_RECVRTHDR:
2830 case IPV6_2292RTHDR:
2831 case IPV6_RECVHOPOPTS:
2832 case IPV6_2292HOPOPTS:
2833 case IPV6_RECVDSTOPTS:
2834 case IPV6_2292DSTOPTS:
2835 case IPV6_TCLASS:
2836 case IPV6_ADDR_PREFERENCES:
2837 #ifdef IPV6_RECVPATHMTU
2838 case IPV6_RECVPATHMTU:
2839 #endif
2840 #ifdef IPV6_TRANSPARENT
2841 case IPV6_TRANSPARENT:
2842 #endif
2843 #ifdef IPV6_FREEBIND
2844 case IPV6_FREEBIND:
2845 #endif
2846 #ifdef IPV6_RECVORIGDSTADDR
2847 case IPV6_RECVORIGDSTADDR:
2848 #endif
2849 if (get_user_u32(len, optlen))
2850 return -TARGET_EFAULT;
2851 if (len < 0)
2852 return -TARGET_EINVAL;
2853 lv = sizeof(lv);
2854 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2855 if (ret < 0)
2856 return ret;
2857 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2858 len = 1;
2859 if (put_user_u32(len, optlen)
2860 || put_user_u8(val, optval_addr))
2861 return -TARGET_EFAULT;
2862 } else {
2863 if (len > sizeof(int))
2864 len = sizeof(int);
2865 if (put_user_u32(len, optlen)
2866 || put_user_u32(val, optval_addr))
2867 return -TARGET_EFAULT;
2869 break;
2870 default:
2871 ret = -TARGET_ENOPROTOOPT;
2872 break;
2874 break;
2875 #ifdef SOL_NETLINK
2876 case SOL_NETLINK:
2877 switch (optname) {
2878 case NETLINK_PKTINFO:
2879 case NETLINK_BROADCAST_ERROR:
2880 case NETLINK_NO_ENOBUFS:
2881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2882 case NETLINK_LISTEN_ALL_NSID:
2883 case NETLINK_CAP_ACK:
2884 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2885 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2886 case NETLINK_EXT_ACK:
2887 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2888 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2889 case NETLINK_GET_STRICT_CHK:
2890 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2891 if (get_user_u32(len, optlen)) {
2892 return -TARGET_EFAULT;
2894 if (len != sizeof(val)) {
2895 return -TARGET_EINVAL;
2897 lv = len;
2898 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2899 if (ret < 0) {
2900 return ret;
2902 if (put_user_u32(lv, optlen)
2903 || put_user_u32(val, optval_addr)) {
2904 return -TARGET_EFAULT;
2906 break;
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2908 case NETLINK_LIST_MEMBERSHIPS:
2910 uint32_t *results;
2911 int i;
2912 if (get_user_u32(len, optlen)) {
2913 return -TARGET_EFAULT;
2915 if (len < 0) {
2916 return -TARGET_EINVAL;
2918 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2919 if (!results && len > 0) {
2920 return -TARGET_EFAULT;
2922 lv = len;
2923 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2924 if (ret < 0) {
2925 unlock_user(results, optval_addr, 0);
2926 return ret;
2928 /* swap host endianess to target endianess. */
2929 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2930 results[i] = tswap32(results[i]);
2932 if (put_user_u32(lv, optlen)) {
2933 return -TARGET_EFAULT;
2935 unlock_user(results, optval_addr, 0);
2936 break;
2938 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2939 default:
2940 goto unimplemented;
2942 break;
2943 #endif /* SOL_NETLINK */
2944 default:
2945 unimplemented:
2946 qemu_log_mask(LOG_UNIMP,
2947 "getsockopt level=%d optname=%d not yet supported\n",
2948 level, optname);
2949 ret = -TARGET_EOPNOTSUPP;
2950 break;
2952 return ret;
2955 /* Convert target low/high pair representing file offset into the host
2956 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2957 * as the kernel doesn't handle them either.
2959 static void target_to_host_low_high(abi_ulong tlow,
2960 abi_ulong thigh,
2961 unsigned long *hlow,
2962 unsigned long *hhigh)
2964 uint64_t off = tlow |
2965 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2966 TARGET_LONG_BITS / 2;
2968 *hlow = off;
2969 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2972 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2973 abi_ulong count, int copy)
2975 struct target_iovec *target_vec;
2976 struct iovec *vec;
2977 abi_ulong total_len, max_len;
2978 int i;
2979 int err = 0;
2980 bool bad_address = false;
2982 if (count == 0) {
2983 errno = 0;
2984 return NULL;
2986 if (count > IOV_MAX) {
2987 errno = EINVAL;
2988 return NULL;
2991 vec = g_try_new0(struct iovec, count);
2992 if (vec == NULL) {
2993 errno = ENOMEM;
2994 return NULL;
2997 target_vec = lock_user(VERIFY_READ, target_addr,
2998 count * sizeof(struct target_iovec), 1);
2999 if (target_vec == NULL) {
3000 err = EFAULT;
3001 goto fail2;
3004 /* ??? If host page size > target page size, this will result in a
3005 value larger than what we can actually support. */
3006 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3007 total_len = 0;
3009 for (i = 0; i < count; i++) {
3010 abi_ulong base = tswapal(target_vec[i].iov_base);
3011 abi_long len = tswapal(target_vec[i].iov_len);
3013 if (len < 0) {
3014 err = EINVAL;
3015 goto fail;
3016 } else if (len == 0) {
3017 /* Zero length pointer is ignored. */
3018 vec[i].iov_base = 0;
3019 } else {
3020 vec[i].iov_base = lock_user(type, base, len, copy);
3021 /* If the first buffer pointer is bad, this is a fault. But
3022 * subsequent bad buffers will result in a partial write; this
3023 * is realized by filling the vector with null pointers and
3024 * zero lengths. */
3025 if (!vec[i].iov_base) {
3026 if (i == 0) {
3027 err = EFAULT;
3028 goto fail;
3029 } else {
3030 bad_address = true;
3033 if (bad_address) {
3034 len = 0;
3036 if (len > max_len - total_len) {
3037 len = max_len - total_len;
3040 vec[i].iov_len = len;
3041 total_len += len;
3044 unlock_user(target_vec, target_addr, 0);
3045 return vec;
3047 fail:
3048 while (--i >= 0) {
3049 if (tswapal(target_vec[i].iov_len) > 0) {
3050 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3053 unlock_user(target_vec, target_addr, 0);
3054 fail2:
3055 g_free(vec);
3056 errno = err;
3057 return NULL;
3060 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3061 abi_ulong count, int copy)
3063 struct target_iovec *target_vec;
3064 int i;
3066 target_vec = lock_user(VERIFY_READ, target_addr,
3067 count * sizeof(struct target_iovec), 1);
3068 if (target_vec) {
3069 for (i = 0; i < count; i++) {
3070 abi_ulong base = tswapal(target_vec[i].iov_base);
3071 abi_long len = tswapal(target_vec[i].iov_len);
3072 if (len < 0) {
3073 break;
3075 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3077 unlock_user(target_vec, target_addr, 0);
3080 g_free(vec);
3083 static inline int target_to_host_sock_type(int *type)
3085 int host_type = 0;
3086 int target_type = *type;
3088 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3089 case TARGET_SOCK_DGRAM:
3090 host_type = SOCK_DGRAM;
3091 break;
3092 case TARGET_SOCK_STREAM:
3093 host_type = SOCK_STREAM;
3094 break;
3095 default:
3096 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3097 break;
3099 if (target_type & TARGET_SOCK_CLOEXEC) {
3100 #if defined(SOCK_CLOEXEC)
3101 host_type |= SOCK_CLOEXEC;
3102 #else
3103 return -TARGET_EINVAL;
3104 #endif
3106 if (target_type & TARGET_SOCK_NONBLOCK) {
3107 #if defined(SOCK_NONBLOCK)
3108 host_type |= SOCK_NONBLOCK;
3109 #elif !defined(O_NONBLOCK)
3110 return -TARGET_EINVAL;
3111 #endif
3113 *type = host_type;
3114 return 0;
3117 /* Try to emulate socket type flags after socket creation. */
3118 static int sock_flags_fixup(int fd, int target_type)
3120 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3121 if (target_type & TARGET_SOCK_NONBLOCK) {
3122 int flags = fcntl(fd, F_GETFL);
3123 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3124 close(fd);
3125 return -TARGET_EINVAL;
3128 #endif
3129 return fd;
3132 /* do_socket() Must return target values and target errnos. */
3133 static abi_long do_socket(int domain, int type, int protocol)
3135 int target_type = type;
3136 int ret;
3138 ret = target_to_host_sock_type(&type);
3139 if (ret) {
3140 return ret;
3143 if (domain == PF_NETLINK && !(
3144 #ifdef CONFIG_RTNETLINK
3145 protocol == NETLINK_ROUTE ||
3146 #endif
3147 protocol == NETLINK_KOBJECT_UEVENT ||
3148 protocol == NETLINK_AUDIT)) {
3149 return -TARGET_EPROTONOSUPPORT;
3152 if (domain == AF_PACKET ||
3153 (domain == AF_INET && type == SOCK_PACKET)) {
3154 protocol = tswap16(protocol);
3157 ret = get_errno(socket(domain, type, protocol));
3158 if (ret >= 0) {
3159 ret = sock_flags_fixup(ret, target_type);
3160 if (type == SOCK_PACKET) {
3161 /* Manage an obsolete case :
3162 * if socket type is SOCK_PACKET, bind by name
3164 fd_trans_register(ret, &target_packet_trans);
3165 } else if (domain == PF_NETLINK) {
3166 switch (protocol) {
3167 #ifdef CONFIG_RTNETLINK
3168 case NETLINK_ROUTE:
3169 fd_trans_register(ret, &target_netlink_route_trans);
3170 break;
3171 #endif
3172 case NETLINK_KOBJECT_UEVENT:
3173 /* nothing to do: messages are strings */
3174 break;
3175 case NETLINK_AUDIT:
3176 fd_trans_register(ret, &target_netlink_audit_trans);
3177 break;
3178 default:
3179 g_assert_not_reached();
3183 return ret;
3186 /* do_bind() Must return target values and target errnos. */
3187 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3188 socklen_t addrlen)
3190 void *addr;
3191 abi_long ret;
3193 if ((int)addrlen < 0) {
3194 return -TARGET_EINVAL;
3197 addr = alloca(addrlen+1);
3199 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3200 if (ret)
3201 return ret;
3203 return get_errno(bind(sockfd, addr, addrlen));
3206 /* do_connect() Must return target values and target errnos. */
3207 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3208 socklen_t addrlen)
3210 void *addr;
3211 abi_long ret;
3213 if ((int)addrlen < 0) {
3214 return -TARGET_EINVAL;
3217 addr = alloca(addrlen+1);
3219 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3220 if (ret)
3221 return ret;
3223 return get_errno(safe_connect(sockfd, addr, addrlen));
3226 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3227 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3228 int flags, int send)
3230 abi_long ret, len;
3231 struct msghdr msg;
3232 abi_ulong count;
3233 struct iovec *vec;
3234 abi_ulong target_vec;
3236 if (msgp->msg_name) {
3237 msg.msg_namelen = tswap32(msgp->msg_namelen);
3238 msg.msg_name = alloca(msg.msg_namelen+1);
3239 ret = target_to_host_sockaddr(fd, msg.msg_name,
3240 tswapal(msgp->msg_name),
3241 msg.msg_namelen);
3242 if (ret == -TARGET_EFAULT) {
3243 /* For connected sockets msg_name and msg_namelen must
3244 * be ignored, so returning EFAULT immediately is wrong.
3245 * Instead, pass a bad msg_name to the host kernel, and
3246 * let it decide whether to return EFAULT or not.
3248 msg.msg_name = (void *)-1;
3249 } else if (ret) {
3250 goto out2;
3252 } else {
3253 msg.msg_name = NULL;
3254 msg.msg_namelen = 0;
3256 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3257 msg.msg_control = alloca(msg.msg_controllen);
3258 memset(msg.msg_control, 0, msg.msg_controllen);
3260 msg.msg_flags = tswap32(msgp->msg_flags);
3262 count = tswapal(msgp->msg_iovlen);
3263 target_vec = tswapal(msgp->msg_iov);
3265 if (count > IOV_MAX) {
3266 /* sendrcvmsg returns a different errno for this condition than
3267 * readv/writev, so we must catch it here before lock_iovec() does.
3269 ret = -TARGET_EMSGSIZE;
3270 goto out2;
3273 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3274 target_vec, count, send);
3275 if (vec == NULL) {
3276 ret = -host_to_target_errno(errno);
3277 goto out2;
3279 msg.msg_iovlen = count;
3280 msg.msg_iov = vec;
3282 if (send) {
3283 if (fd_trans_target_to_host_data(fd)) {
3284 void *host_msg;
3286 host_msg = g_malloc(msg.msg_iov->iov_len);
3287 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3288 ret = fd_trans_target_to_host_data(fd)(host_msg,
3289 msg.msg_iov->iov_len);
3290 if (ret >= 0) {
3291 msg.msg_iov->iov_base = host_msg;
3292 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3294 g_free(host_msg);
3295 } else {
3296 ret = target_to_host_cmsg(&msg, msgp);
3297 if (ret == 0) {
3298 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3301 } else {
3302 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3303 if (!is_error(ret)) {
3304 len = ret;
3305 if (fd_trans_host_to_target_data(fd)) {
3306 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3307 MIN(msg.msg_iov->iov_len, len));
3308 } else {
3309 ret = host_to_target_cmsg(msgp, &msg);
3311 if (!is_error(ret)) {
3312 msgp->msg_namelen = tswap32(msg.msg_namelen);
3313 msgp->msg_flags = tswap32(msg.msg_flags);
3314 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3315 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3316 msg.msg_name, msg.msg_namelen);
3317 if (ret) {
3318 goto out;
3322 ret = len;
3327 out:
3328 unlock_iovec(vec, target_vec, count, !send);
3329 out2:
3330 return ret;
3333 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3334 int flags, int send)
3336 abi_long ret;
3337 struct target_msghdr *msgp;
3339 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3340 msgp,
3341 target_msg,
3342 send ? 1 : 0)) {
3343 return -TARGET_EFAULT;
3345 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3346 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3347 return ret;
3350 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3351 * so it might not have this *mmsg-specific flag either.
3353 #ifndef MSG_WAITFORONE
3354 #define MSG_WAITFORONE 0x10000
3355 #endif
3357 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3358 unsigned int vlen, unsigned int flags,
3359 int send)
3361 struct target_mmsghdr *mmsgp;
3362 abi_long ret = 0;
3363 int i;
3365 if (vlen > UIO_MAXIOV) {
3366 vlen = UIO_MAXIOV;
3369 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3370 if (!mmsgp) {
3371 return -TARGET_EFAULT;
3374 for (i = 0; i < vlen; i++) {
3375 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3376 if (is_error(ret)) {
3377 break;
3379 mmsgp[i].msg_len = tswap32(ret);
3380 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3381 if (flags & MSG_WAITFORONE) {
3382 flags |= MSG_DONTWAIT;
3386 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3388 /* Return number of datagrams sent if we sent any at all;
3389 * otherwise return the error.
3391 if (i) {
3392 return i;
3394 return ret;
3397 /* do_accept4() Must return target values and target errnos. */
3398 static abi_long do_accept4(int fd, abi_ulong target_addr,
3399 abi_ulong target_addrlen_addr, int flags)
3401 socklen_t addrlen, ret_addrlen;
3402 void *addr;
3403 abi_long ret;
3404 int host_flags;
3406 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3408 if (target_addr == 0) {
3409 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3412 /* linux returns EFAULT if addrlen pointer is invalid */
3413 if (get_user_u32(addrlen, target_addrlen_addr))
3414 return -TARGET_EFAULT;
3416 if ((int)addrlen < 0) {
3417 return -TARGET_EINVAL;
3420 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3421 return -TARGET_EFAULT;
3424 addr = alloca(addrlen);
3426 ret_addrlen = addrlen;
3427 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3428 if (!is_error(ret)) {
3429 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3430 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3431 ret = -TARGET_EFAULT;
3434 return ret;
3437 /* do_getpeername() Must return target values and target errnos. */
3438 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3439 abi_ulong target_addrlen_addr)
3441 socklen_t addrlen, ret_addrlen;
3442 void *addr;
3443 abi_long ret;
3445 if (get_user_u32(addrlen, target_addrlen_addr))
3446 return -TARGET_EFAULT;
3448 if ((int)addrlen < 0) {
3449 return -TARGET_EINVAL;
3452 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3453 return -TARGET_EFAULT;
3456 addr = alloca(addrlen);
3458 ret_addrlen = addrlen;
3459 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3460 if (!is_error(ret)) {
3461 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3462 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3463 ret = -TARGET_EFAULT;
3466 return ret;
3469 /* do_getsockname() Must return target values and target errnos. */
3470 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3471 abi_ulong target_addrlen_addr)
3473 socklen_t addrlen, ret_addrlen;
3474 void *addr;
3475 abi_long ret;
3477 if (get_user_u32(addrlen, target_addrlen_addr))
3478 return -TARGET_EFAULT;
3480 if ((int)addrlen < 0) {
3481 return -TARGET_EINVAL;
3484 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3485 return -TARGET_EFAULT;
3488 addr = alloca(addrlen);
3490 ret_addrlen = addrlen;
3491 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3492 if (!is_error(ret)) {
3493 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3494 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3495 ret = -TARGET_EFAULT;
3498 return ret;
3501 /* do_socketpair() Must return target values and target errnos. */
3502 static abi_long do_socketpair(int domain, int type, int protocol,
3503 abi_ulong target_tab_addr)
3505 int tab[2];
3506 abi_long ret;
3508 target_to_host_sock_type(&type);
3510 ret = get_errno(socketpair(domain, type, protocol, tab));
3511 if (!is_error(ret)) {
3512 if (put_user_s32(tab[0], target_tab_addr)
3513 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3514 ret = -TARGET_EFAULT;
3516 return ret;
3519 /* do_sendto() Must return target values and target errnos. */
3520 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3521 abi_ulong target_addr, socklen_t addrlen)
3523 void *addr;
3524 void *host_msg;
3525 void *copy_msg = NULL;
3526 abi_long ret;
3528 if ((int)addrlen < 0) {
3529 return -TARGET_EINVAL;
3532 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3533 if (!host_msg)
3534 return -TARGET_EFAULT;
3535 if (fd_trans_target_to_host_data(fd)) {
3536 copy_msg = host_msg;
3537 host_msg = g_malloc(len);
3538 memcpy(host_msg, copy_msg, len);
3539 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3540 if (ret < 0) {
3541 goto fail;
3544 if (target_addr) {
3545 addr = alloca(addrlen+1);
3546 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3547 if (ret) {
3548 goto fail;
3550 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3551 } else {
3552 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3554 fail:
3555 if (copy_msg) {
3556 g_free(host_msg);
3557 host_msg = copy_msg;
3559 unlock_user(host_msg, msg, 0);
3560 return ret;
3563 /* do_recvfrom() Must return target values and target errnos. */
3564 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3565 abi_ulong target_addr,
3566 abi_ulong target_addrlen)
3568 socklen_t addrlen, ret_addrlen;
3569 void *addr;
3570 void *host_msg;
3571 abi_long ret;
3573 if (!msg) {
3574 host_msg = NULL;
3575 } else {
3576 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3577 if (!host_msg) {
3578 return -TARGET_EFAULT;
3581 if (target_addr) {
3582 if (get_user_u32(addrlen, target_addrlen)) {
3583 ret = -TARGET_EFAULT;
3584 goto fail;
3586 if ((int)addrlen < 0) {
3587 ret = -TARGET_EINVAL;
3588 goto fail;
3590 addr = alloca(addrlen);
3591 ret_addrlen = addrlen;
3592 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3593 addr, &ret_addrlen));
3594 } else {
3595 addr = NULL; /* To keep compiler quiet. */
3596 addrlen = 0; /* To keep compiler quiet. */
3597 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3599 if (!is_error(ret)) {
3600 if (fd_trans_host_to_target_data(fd)) {
3601 abi_long trans;
3602 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3603 if (is_error(trans)) {
3604 ret = trans;
3605 goto fail;
3608 if (target_addr) {
3609 host_to_target_sockaddr(target_addr, addr,
3610 MIN(addrlen, ret_addrlen));
3611 if (put_user_u32(ret_addrlen, target_addrlen)) {
3612 ret = -TARGET_EFAULT;
3613 goto fail;
3616 unlock_user(host_msg, msg, len);
3617 } else {
3618 fail:
3619 unlock_user(host_msg, msg, 0);
3621 return ret;
3624 #ifdef TARGET_NR_socketcall
3625 /* do_socketcall() must return target values and target errnos. */
3626 static abi_long do_socketcall(int num, abi_ulong vptr)
3628 static const unsigned nargs[] = { /* number of arguments per operation */
3629 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3630 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3631 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3632 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3633 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3634 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3635 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3636 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3637 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3638 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3639 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3640 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3641 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3642 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3643 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3644 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3645 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3646 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3647 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3648 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3650 abi_long a[6]; /* max 6 args */
3651 unsigned i;
3653 /* check the range of the first argument num */
3654 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3655 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3656 return -TARGET_EINVAL;
3658 /* ensure we have space for args */
3659 if (nargs[num] > ARRAY_SIZE(a)) {
3660 return -TARGET_EINVAL;
3662 /* collect the arguments in a[] according to nargs[] */
3663 for (i = 0; i < nargs[num]; ++i) {
3664 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3665 return -TARGET_EFAULT;
3668 /* now when we have the args, invoke the appropriate underlying function */
3669 switch (num) {
3670 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3671 return do_socket(a[0], a[1], a[2]);
3672 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3673 return do_bind(a[0], a[1], a[2]);
3674 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3675 return do_connect(a[0], a[1], a[2]);
3676 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3677 return get_errno(listen(a[0], a[1]));
3678 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3679 return do_accept4(a[0], a[1], a[2], 0);
3680 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3681 return do_getsockname(a[0], a[1], a[2]);
3682 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3683 return do_getpeername(a[0], a[1], a[2]);
3684 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3685 return do_socketpair(a[0], a[1], a[2], a[3]);
3686 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3687 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3688 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3689 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3690 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3691 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3692 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3693 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3694 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3695 return get_errno(shutdown(a[0], a[1]));
3696 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3697 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3698 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3699 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3700 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3701 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3702 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3703 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3704 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3705 return do_accept4(a[0], a[1], a[2], a[3]);
3706 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3707 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3708 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3709 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3710 default:
3711 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3712 return -TARGET_EINVAL;
3715 #endif
3717 #define N_SHM_REGIONS 32
3719 static struct shm_region {
3720 abi_ulong start;
3721 abi_ulong size;
3722 bool in_use;
3723 } shm_regions[N_SHM_REGIONS];
3725 #ifndef TARGET_SEMID64_DS
3726 /* asm-generic version of this struct */
3727 struct target_semid64_ds
3729 struct target_ipc_perm sem_perm;
3730 abi_ulong sem_otime;
3731 #if TARGET_ABI_BITS == 32
3732 abi_ulong __unused1;
3733 #endif
3734 abi_ulong sem_ctime;
3735 #if TARGET_ABI_BITS == 32
3736 abi_ulong __unused2;
3737 #endif
3738 abi_ulong sem_nsems;
3739 abi_ulong __unused3;
3740 abi_ulong __unused4;
3742 #endif
3744 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3745 abi_ulong target_addr)
3747 struct target_ipc_perm *target_ip;
3748 struct target_semid64_ds *target_sd;
3750 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3751 return -TARGET_EFAULT;
3752 target_ip = &(target_sd->sem_perm);
3753 host_ip->__key = tswap32(target_ip->__key);
3754 host_ip->uid = tswap32(target_ip->uid);
3755 host_ip->gid = tswap32(target_ip->gid);
3756 host_ip->cuid = tswap32(target_ip->cuid);
3757 host_ip->cgid = tswap32(target_ip->cgid);
3758 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3759 host_ip->mode = tswap32(target_ip->mode);
3760 #else
3761 host_ip->mode = tswap16(target_ip->mode);
3762 #endif
3763 #if defined(TARGET_PPC)
3764 host_ip->__seq = tswap32(target_ip->__seq);
3765 #else
3766 host_ip->__seq = tswap16(target_ip->__seq);
3767 #endif
3768 unlock_user_struct(target_sd, target_addr, 0);
3769 return 0;
3772 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3773 struct ipc_perm *host_ip)
3775 struct target_ipc_perm *target_ip;
3776 struct target_semid64_ds *target_sd;
3778 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3779 return -TARGET_EFAULT;
3780 target_ip = &(target_sd->sem_perm);
3781 target_ip->__key = tswap32(host_ip->__key);
3782 target_ip->uid = tswap32(host_ip->uid);
3783 target_ip->gid = tswap32(host_ip->gid);
3784 target_ip->cuid = tswap32(host_ip->cuid);
3785 target_ip->cgid = tswap32(host_ip->cgid);
3786 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3787 target_ip->mode = tswap32(host_ip->mode);
3788 #else
3789 target_ip->mode = tswap16(host_ip->mode);
3790 #endif
3791 #if defined(TARGET_PPC)
3792 target_ip->__seq = tswap32(host_ip->__seq);
3793 #else
3794 target_ip->__seq = tswap16(host_ip->__seq);
3795 #endif
3796 unlock_user_struct(target_sd, target_addr, 1);
3797 return 0;
3800 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3801 abi_ulong target_addr)
3803 struct target_semid64_ds *target_sd;
3805 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3806 return -TARGET_EFAULT;
3807 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3808 return -TARGET_EFAULT;
3809 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3810 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3811 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3812 unlock_user_struct(target_sd, target_addr, 0);
3813 return 0;
3816 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3817 struct semid_ds *host_sd)
3819 struct target_semid64_ds *target_sd;
3821 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3822 return -TARGET_EFAULT;
3823 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3824 return -TARGET_EFAULT;
3825 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3826 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3827 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3828 unlock_user_struct(target_sd, target_addr, 1);
3829 return 0;
3832 struct target_seminfo {
3833 int semmap;
3834 int semmni;
3835 int semmns;
3836 int semmnu;
3837 int semmsl;
3838 int semopm;
3839 int semume;
3840 int semusz;
3841 int semvmx;
3842 int semaem;
3845 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3846 struct seminfo *host_seminfo)
3848 struct target_seminfo *target_seminfo;
3849 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3850 return -TARGET_EFAULT;
3851 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3852 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3853 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3854 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3855 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3856 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3857 __put_user(host_seminfo->semume, &target_seminfo->semume);
3858 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3859 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3860 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3861 unlock_user_struct(target_seminfo, target_addr, 1);
3862 return 0;
3865 union semun {
3866 int val;
3867 struct semid_ds *buf;
3868 unsigned short *array;
3869 struct seminfo *__buf;
3872 union target_semun {
3873 int val;
3874 abi_ulong buf;
3875 abi_ulong array;
3876 abi_ulong __buf;
3879 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3880 abi_ulong target_addr)
3882 int nsems;
3883 unsigned short *array;
3884 union semun semun;
3885 struct semid_ds semid_ds;
3886 int i, ret;
3888 semun.buf = &semid_ds;
3890 ret = semctl(semid, 0, IPC_STAT, semun);
3891 if (ret == -1)
3892 return get_errno(ret);
3894 nsems = semid_ds.sem_nsems;
3896 *host_array = g_try_new(unsigned short, nsems);
3897 if (!*host_array) {
3898 return -TARGET_ENOMEM;
3900 array = lock_user(VERIFY_READ, target_addr,
3901 nsems*sizeof(unsigned short), 1);
3902 if (!array) {
3903 g_free(*host_array);
3904 return -TARGET_EFAULT;
3907 for(i=0; i<nsems; i++) {
3908 __get_user((*host_array)[i], &array[i]);
3910 unlock_user(array, target_addr, 0);
3912 return 0;
3915 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3916 unsigned short **host_array)
3918 int nsems;
3919 unsigned short *array;
3920 union semun semun;
3921 struct semid_ds semid_ds;
3922 int i, ret;
3924 semun.buf = &semid_ds;
3926 ret = semctl(semid, 0, IPC_STAT, semun);
3927 if (ret == -1)
3928 return get_errno(ret);
3930 nsems = semid_ds.sem_nsems;
3932 array = lock_user(VERIFY_WRITE, target_addr,
3933 nsems*sizeof(unsigned short), 0);
3934 if (!array)
3935 return -TARGET_EFAULT;
3937 for(i=0; i<nsems; i++) {
3938 __put_user((*host_array)[i], &array[i]);
3940 g_free(*host_array);
3941 unlock_user(array, target_addr, 1);
3943 return 0;
3946 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3947 abi_ulong target_arg)
3949 union target_semun target_su = { .buf = target_arg };
3950 union semun arg;
3951 struct semid_ds dsarg;
3952 unsigned short *array = NULL;
3953 struct seminfo seminfo;
3954 abi_long ret = -TARGET_EINVAL;
3955 abi_long err;
3956 cmd &= 0xff;
3958 switch( cmd ) {
3959 case GETVAL:
3960 case SETVAL:
3961 /* In 64 bit cross-endian situations, we will erroneously pick up
3962 * the wrong half of the union for the "val" element. To rectify
3963 * this, the entire 8-byte structure is byteswapped, followed by
3964 * a swap of the 4 byte val field. In other cases, the data is
3965 * already in proper host byte order. */
3966 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3967 target_su.buf = tswapal(target_su.buf);
3968 arg.val = tswap32(target_su.val);
3969 } else {
3970 arg.val = target_su.val;
3972 ret = get_errno(semctl(semid, semnum, cmd, arg));
3973 break;
3974 case GETALL:
3975 case SETALL:
3976 err = target_to_host_semarray(semid, &array, target_su.array);
3977 if (err)
3978 return err;
3979 arg.array = array;
3980 ret = get_errno(semctl(semid, semnum, cmd, arg));
3981 err = host_to_target_semarray(semid, target_su.array, &array);
3982 if (err)
3983 return err;
3984 break;
3985 case IPC_STAT:
3986 case IPC_SET:
3987 case SEM_STAT:
3988 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3989 if (err)
3990 return err;
3991 arg.buf = &dsarg;
3992 ret = get_errno(semctl(semid, semnum, cmd, arg));
3993 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3994 if (err)
3995 return err;
3996 break;
3997 case IPC_INFO:
3998 case SEM_INFO:
3999 arg.__buf = &seminfo;
4000 ret = get_errno(semctl(semid, semnum, cmd, arg));
4001 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4002 if (err)
4003 return err;
4004 break;
4005 case IPC_RMID:
4006 case GETPID:
4007 case GETNCNT:
4008 case GETZCNT:
4009 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4010 break;
4013 return ret;
4016 struct target_sembuf {
4017 unsigned short sem_num;
4018 short sem_op;
4019 short sem_flg;
4022 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4023 abi_ulong target_addr,
4024 unsigned nsops)
4026 struct target_sembuf *target_sembuf;
4027 int i;
4029 target_sembuf = lock_user(VERIFY_READ, target_addr,
4030 nsops*sizeof(struct target_sembuf), 1);
4031 if (!target_sembuf)
4032 return -TARGET_EFAULT;
4034 for(i=0; i<nsops; i++) {
4035 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4036 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4037 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4040 unlock_user(target_sembuf, target_addr, 0);
4042 return 0;
4045 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4046 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4049 * This macro is required to handle the s390 variants, which passes the
4050 * arguments in a different order than default.
4052 #ifdef __s390x__
4053 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4054 (__nsops), (__timeout), (__sops)
4055 #else
4056 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4057 (__nsops), 0, (__sops), (__timeout)
4058 #endif
4060 static inline abi_long do_semtimedop(int semid,
4061 abi_long ptr,
4062 unsigned nsops,
4063 abi_long timeout, bool time64)
4065 struct sembuf *sops;
4066 struct timespec ts, *pts = NULL;
4067 abi_long ret;
4069 if (timeout) {
4070 pts = &ts;
4071 if (time64) {
4072 if (target_to_host_timespec64(pts, timeout)) {
4073 return -TARGET_EFAULT;
4075 } else {
4076 if (target_to_host_timespec(pts, timeout)) {
4077 return -TARGET_EFAULT;
4082 if (nsops > TARGET_SEMOPM) {
4083 return -TARGET_E2BIG;
4086 sops = g_new(struct sembuf, nsops);
4088 if (target_to_host_sembuf(sops, ptr, nsops)) {
4089 g_free(sops);
4090 return -TARGET_EFAULT;
4093 ret = -TARGET_ENOSYS;
4094 #ifdef __NR_semtimedop
4095 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4096 #endif
4097 #ifdef __NR_ipc
4098 if (ret == -TARGET_ENOSYS) {
4099 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4100 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4102 #endif
4103 g_free(sops);
4104 return ret;
4106 #endif
4108 struct target_msqid_ds
4110 struct target_ipc_perm msg_perm;
4111 abi_ulong msg_stime;
4112 #if TARGET_ABI_BITS == 32
4113 abi_ulong __unused1;
4114 #endif
4115 abi_ulong msg_rtime;
4116 #if TARGET_ABI_BITS == 32
4117 abi_ulong __unused2;
4118 #endif
4119 abi_ulong msg_ctime;
4120 #if TARGET_ABI_BITS == 32
4121 abi_ulong __unused3;
4122 #endif
4123 abi_ulong __msg_cbytes;
4124 abi_ulong msg_qnum;
4125 abi_ulong msg_qbytes;
4126 abi_ulong msg_lspid;
4127 abi_ulong msg_lrpid;
4128 abi_ulong __unused4;
4129 abi_ulong __unused5;
4132 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4133 abi_ulong target_addr)
4135 struct target_msqid_ds *target_md;
4137 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4138 return -TARGET_EFAULT;
4139 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4140 return -TARGET_EFAULT;
4141 host_md->msg_stime = tswapal(target_md->msg_stime);
4142 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4143 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4144 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4145 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4146 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4147 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4148 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4149 unlock_user_struct(target_md, target_addr, 0);
4150 return 0;
4153 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4154 struct msqid_ds *host_md)
4156 struct target_msqid_ds *target_md;
4158 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4159 return -TARGET_EFAULT;
4160 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4161 return -TARGET_EFAULT;
4162 target_md->msg_stime = tswapal(host_md->msg_stime);
4163 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4164 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4165 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4166 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4167 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4168 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4169 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4170 unlock_user_struct(target_md, target_addr, 1);
4171 return 0;
4174 struct target_msginfo {
4175 int msgpool;
4176 int msgmap;
4177 int msgmax;
4178 int msgmnb;
4179 int msgmni;
4180 int msgssz;
4181 int msgtql;
4182 unsigned short int msgseg;
4185 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4186 struct msginfo *host_msginfo)
4188 struct target_msginfo *target_msginfo;
4189 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4190 return -TARGET_EFAULT;
4191 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4192 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4193 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4194 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4195 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4196 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4197 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4198 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4199 unlock_user_struct(target_msginfo, target_addr, 1);
4200 return 0;
4203 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4205 struct msqid_ds dsarg;
4206 struct msginfo msginfo;
4207 abi_long ret = -TARGET_EINVAL;
4209 cmd &= 0xff;
4211 switch (cmd) {
4212 case IPC_STAT:
4213 case IPC_SET:
4214 case MSG_STAT:
4215 if (target_to_host_msqid_ds(&dsarg,ptr))
4216 return -TARGET_EFAULT;
4217 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4218 if (host_to_target_msqid_ds(ptr,&dsarg))
4219 return -TARGET_EFAULT;
4220 break;
4221 case IPC_RMID:
4222 ret = get_errno(msgctl(msgid, cmd, NULL));
4223 break;
4224 case IPC_INFO:
4225 case MSG_INFO:
4226 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4227 if (host_to_target_msginfo(ptr, &msginfo))
4228 return -TARGET_EFAULT;
4229 break;
4232 return ret;
4235 struct target_msgbuf {
4236 abi_long mtype;
4237 char mtext[1];
4240 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4241 ssize_t msgsz, int msgflg)
4243 struct target_msgbuf *target_mb;
4244 struct msgbuf *host_mb;
4245 abi_long ret = 0;
4247 if (msgsz < 0) {
4248 return -TARGET_EINVAL;
4251 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4252 return -TARGET_EFAULT;
4253 host_mb = g_try_malloc(msgsz + sizeof(long));
4254 if (!host_mb) {
4255 unlock_user_struct(target_mb, msgp, 0);
4256 return -TARGET_ENOMEM;
4258 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4259 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4260 ret = -TARGET_ENOSYS;
4261 #ifdef __NR_msgsnd
4262 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4263 #endif
4264 #ifdef __NR_ipc
4265 if (ret == -TARGET_ENOSYS) {
4266 #ifdef __s390x__
4267 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4268 host_mb));
4269 #else
4270 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4271 host_mb, 0));
4272 #endif
4274 #endif
4275 g_free(host_mb);
4276 unlock_user_struct(target_mb, msgp, 0);
4278 return ret;
4281 #ifdef __NR_ipc
4282 #if defined(__sparc__)
4283 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4284 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4285 #elif defined(__s390x__)
4286 /* The s390 sys_ipc variant has only five parameters. */
4287 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4288 ((long int[]){(long int)__msgp, __msgtyp})
4289 #else
4290 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4291 ((long int[]){(long int)__msgp, __msgtyp}), 0
4292 #endif
4293 #endif
4295 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4296 ssize_t msgsz, abi_long msgtyp,
4297 int msgflg)
4299 struct target_msgbuf *target_mb;
4300 char *target_mtext;
4301 struct msgbuf *host_mb;
4302 abi_long ret = 0;
4304 if (msgsz < 0) {
4305 return -TARGET_EINVAL;
4308 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4309 return -TARGET_EFAULT;
4311 host_mb = g_try_malloc(msgsz + sizeof(long));
4312 if (!host_mb) {
4313 ret = -TARGET_ENOMEM;
4314 goto end;
4316 ret = -TARGET_ENOSYS;
4317 #ifdef __NR_msgrcv
4318 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4319 #endif
4320 #ifdef __NR_ipc
4321 if (ret == -TARGET_ENOSYS) {
4322 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4323 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4325 #endif
4327 if (ret > 0) {
4328 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4329 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4330 if (!target_mtext) {
4331 ret = -TARGET_EFAULT;
4332 goto end;
4334 memcpy(target_mb->mtext, host_mb->mtext, ret);
4335 unlock_user(target_mtext, target_mtext_addr, ret);
4338 target_mb->mtype = tswapal(host_mb->mtype);
4340 end:
4341 if (target_mb)
4342 unlock_user_struct(target_mb, msgp, 1);
4343 g_free(host_mb);
4344 return ret;
4347 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4348 abi_ulong target_addr)
4350 struct target_shmid_ds *target_sd;
4352 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4353 return -TARGET_EFAULT;
4354 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4355 return -TARGET_EFAULT;
4356 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4357 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4358 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4359 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4360 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4361 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4362 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4363 unlock_user_struct(target_sd, target_addr, 0);
4364 return 0;
4367 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4368 struct shmid_ds *host_sd)
4370 struct target_shmid_ds *target_sd;
4372 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4373 return -TARGET_EFAULT;
4374 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4375 return -TARGET_EFAULT;
4376 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4377 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4378 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4379 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4380 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4381 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4382 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4383 unlock_user_struct(target_sd, target_addr, 1);
4384 return 0;
4387 struct target_shminfo {
4388 abi_ulong shmmax;
4389 abi_ulong shmmin;
4390 abi_ulong shmmni;
4391 abi_ulong shmseg;
4392 abi_ulong shmall;
4395 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4396 struct shminfo *host_shminfo)
4398 struct target_shminfo *target_shminfo;
4399 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4400 return -TARGET_EFAULT;
4401 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4402 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4403 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4404 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4405 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4406 unlock_user_struct(target_shminfo, target_addr, 1);
4407 return 0;
4410 struct target_shm_info {
4411 int used_ids;
4412 abi_ulong shm_tot;
4413 abi_ulong shm_rss;
4414 abi_ulong shm_swp;
4415 abi_ulong swap_attempts;
4416 abi_ulong swap_successes;
4419 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4420 struct shm_info *host_shm_info)
4422 struct target_shm_info *target_shm_info;
4423 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4424 return -TARGET_EFAULT;
4425 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4426 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4427 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4428 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4429 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4430 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4431 unlock_user_struct(target_shm_info, target_addr, 1);
4432 return 0;
4435 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4437 struct shmid_ds dsarg;
4438 struct shminfo shminfo;
4439 struct shm_info shm_info;
4440 abi_long ret = -TARGET_EINVAL;
4442 cmd &= 0xff;
4444 switch(cmd) {
4445 case IPC_STAT:
4446 case IPC_SET:
4447 case SHM_STAT:
4448 if (target_to_host_shmid_ds(&dsarg, buf))
4449 return -TARGET_EFAULT;
4450 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4451 if (host_to_target_shmid_ds(buf, &dsarg))
4452 return -TARGET_EFAULT;
4453 break;
4454 case IPC_INFO:
4455 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4456 if (host_to_target_shminfo(buf, &shminfo))
4457 return -TARGET_EFAULT;
4458 break;
4459 case SHM_INFO:
4460 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4461 if (host_to_target_shm_info(buf, &shm_info))
4462 return -TARGET_EFAULT;
4463 break;
4464 case IPC_RMID:
4465 case SHM_LOCK:
4466 case SHM_UNLOCK:
4467 ret = get_errno(shmctl(shmid, cmd, NULL));
4468 break;
4471 return ret;
4474 #ifndef TARGET_FORCE_SHMLBA
4475 /* For most architectures, SHMLBA is the same as the page size;
4476 * some architectures have larger values, in which case they should
4477 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4478 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4479 * and defining its own value for SHMLBA.
4481 * The kernel also permits SHMLBA to be set by the architecture to a
4482 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4483 * this means that addresses are rounded to the large size if
4484 * SHM_RND is set but addresses not aligned to that size are not rejected
4485 * as long as they are at least page-aligned. Since the only architecture
4486 * which uses this is ia64 this code doesn't provide for that oddity.
4488 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4490 return TARGET_PAGE_SIZE;
4492 #endif
4494 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4495 int shmid, abi_ulong shmaddr, int shmflg)
4497 CPUState *cpu = env_cpu(cpu_env);
4498 abi_long raddr;
4499 void *host_raddr;
4500 struct shmid_ds shm_info;
4501 int i,ret;
4502 abi_ulong shmlba;
4504 /* shmat pointers are always untagged */
4506 /* find out the length of the shared memory segment */
4507 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4508 if (is_error(ret)) {
4509 /* can't get length, bail out */
4510 return ret;
4513 shmlba = target_shmlba(cpu_env);
4515 if (shmaddr & (shmlba - 1)) {
4516 if (shmflg & SHM_RND) {
4517 shmaddr &= ~(shmlba - 1);
4518 } else {
4519 return -TARGET_EINVAL;
4522 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4523 return -TARGET_EINVAL;
4526 mmap_lock();
4529 * We're mapping shared memory, so ensure we generate code for parallel
4530 * execution and flush old translations. This will work up to the level
4531 * supported by the host -- anything that requires EXCP_ATOMIC will not
4532 * be atomic with respect to an external process.
4534 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4535 cpu->tcg_cflags |= CF_PARALLEL;
4536 tb_flush(cpu);
4539 if (shmaddr)
4540 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4541 else {
4542 abi_ulong mmap_start;
4544 /* In order to use the host shmat, we need to honor host SHMLBA. */
4545 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4547 if (mmap_start == -1) {
4548 errno = ENOMEM;
4549 host_raddr = (void *)-1;
4550 } else
4551 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4552 shmflg | SHM_REMAP);
4555 if (host_raddr == (void *)-1) {
4556 mmap_unlock();
4557 return get_errno((long)host_raddr);
4559 raddr=h2g((unsigned long)host_raddr);
4561 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4562 PAGE_VALID | PAGE_RESET | PAGE_READ |
4563 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4565 for (i = 0; i < N_SHM_REGIONS; i++) {
4566 if (!shm_regions[i].in_use) {
4567 shm_regions[i].in_use = true;
4568 shm_regions[i].start = raddr;
4569 shm_regions[i].size = shm_info.shm_segsz;
4570 break;
4574 mmap_unlock();
4575 return raddr;
4579 static inline abi_long do_shmdt(abi_ulong shmaddr)
4581 int i;
4582 abi_long rv;
4584 /* shmdt pointers are always untagged */
4586 mmap_lock();
4588 for (i = 0; i < N_SHM_REGIONS; ++i) {
4589 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4590 shm_regions[i].in_use = false;
4591 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4592 break;
4595 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4597 mmap_unlock();
4599 return rv;
4602 #ifdef TARGET_NR_ipc
4603 /* ??? This only works with linear mappings. */
4604 /* do_ipc() must return target values and target errnos. */
4605 static abi_long do_ipc(CPUArchState *cpu_env,
4606 unsigned int call, abi_long first,
4607 abi_long second, abi_long third,
4608 abi_long ptr, abi_long fifth)
4610 int version;
4611 abi_long ret = 0;
4613 version = call >> 16;
4614 call &= 0xffff;
4616 switch (call) {
4617 case IPCOP_semop:
4618 ret = do_semtimedop(first, ptr, second, 0, false);
4619 break;
4620 case IPCOP_semtimedop:
4622 * The s390 sys_ipc variant has only five parameters instead of six
4623 * (as for default variant) and the only difference is the handling of
4624 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4625 * to a struct timespec where the generic variant uses fifth parameter.
4627 #if defined(TARGET_S390X)
4628 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4629 #else
4630 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4631 #endif
4632 break;
4634 case IPCOP_semget:
4635 ret = get_errno(semget(first, second, third));
4636 break;
4638 case IPCOP_semctl: {
4639 /* The semun argument to semctl is passed by value, so dereference the
4640 * ptr argument. */
4641 abi_ulong atptr;
4642 get_user_ual(atptr, ptr);
4643 ret = do_semctl(first, second, third, atptr);
4644 break;
4647 case IPCOP_msgget:
4648 ret = get_errno(msgget(first, second));
4649 break;
4651 case IPCOP_msgsnd:
4652 ret = do_msgsnd(first, ptr, second, third);
4653 break;
4655 case IPCOP_msgctl:
4656 ret = do_msgctl(first, second, ptr);
4657 break;
4659 case IPCOP_msgrcv:
4660 switch (version) {
4661 case 0:
4663 struct target_ipc_kludge {
4664 abi_long msgp;
4665 abi_long msgtyp;
4666 } *tmp;
4668 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4669 ret = -TARGET_EFAULT;
4670 break;
4673 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4675 unlock_user_struct(tmp, ptr, 0);
4676 break;
4678 default:
4679 ret = do_msgrcv(first, ptr, second, fifth, third);
4681 break;
4683 case IPCOP_shmat:
4684 switch (version) {
4685 default:
4687 abi_ulong raddr;
4688 raddr = do_shmat(cpu_env, first, ptr, second);
4689 if (is_error(raddr))
4690 return get_errno(raddr);
4691 if (put_user_ual(raddr, third))
4692 return -TARGET_EFAULT;
4693 break;
4695 case 1:
4696 ret = -TARGET_EINVAL;
4697 break;
4699 break;
4700 case IPCOP_shmdt:
4701 ret = do_shmdt(ptr);
4702 break;
4704 case IPCOP_shmget:
4705 /* IPC_* flag values are the same on all linux platforms */
4706 ret = get_errno(shmget(first, second, third));
4707 break;
4709 /* IPC_* and SHM_* command values are the same on all linux platforms */
4710 case IPCOP_shmctl:
4711 ret = do_shmctl(first, second, ptr);
4712 break;
4713 default:
4714 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4715 call, version);
4716 ret = -TARGET_ENOSYS;
4717 break;
4719 return ret;
4721 #endif
4723 /* kernel structure types definitions */
4725 #define STRUCT(name, ...) STRUCT_ ## name,
4726 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4727 enum {
4728 #include "syscall_types.h"
4729 STRUCT_MAX
4731 #undef STRUCT
4732 #undef STRUCT_SPECIAL
4734 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4735 #define STRUCT_SPECIAL(name)
4736 #include "syscall_types.h"
4737 #undef STRUCT
4738 #undef STRUCT_SPECIAL
4740 #define MAX_STRUCT_SIZE 4096
4742 #ifdef CONFIG_FIEMAP
4743 /* So fiemap access checks don't overflow on 32 bit systems.
4744 * This is very slightly smaller than the limit imposed by
4745 * the underlying kernel.
4747 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4748 / sizeof(struct fiemap_extent))
4750 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4751 int fd, int cmd, abi_long arg)
4753 /* The parameter for this ioctl is a struct fiemap followed
4754 * by an array of struct fiemap_extent whose size is set
4755 * in fiemap->fm_extent_count. The array is filled in by the
4756 * ioctl.
4758 int target_size_in, target_size_out;
4759 struct fiemap *fm;
4760 const argtype *arg_type = ie->arg_type;
4761 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4762 void *argptr, *p;
4763 abi_long ret;
4764 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4765 uint32_t outbufsz;
4766 int free_fm = 0;
4768 assert(arg_type[0] == TYPE_PTR);
4769 assert(ie->access == IOC_RW);
4770 arg_type++;
4771 target_size_in = thunk_type_size(arg_type, 0);
4772 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4773 if (!argptr) {
4774 return -TARGET_EFAULT;
4776 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4777 unlock_user(argptr, arg, 0);
4778 fm = (struct fiemap *)buf_temp;
4779 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4780 return -TARGET_EINVAL;
4783 outbufsz = sizeof (*fm) +
4784 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4786 if (outbufsz > MAX_STRUCT_SIZE) {
4787 /* We can't fit all the extents into the fixed size buffer.
4788 * Allocate one that is large enough and use it instead.
4790 fm = g_try_malloc(outbufsz);
4791 if (!fm) {
4792 return -TARGET_ENOMEM;
4794 memcpy(fm, buf_temp, sizeof(struct fiemap));
4795 free_fm = 1;
4797 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4798 if (!is_error(ret)) {
4799 target_size_out = target_size_in;
4800 /* An extent_count of 0 means we were only counting the extents
4801 * so there are no structs to copy
4803 if (fm->fm_extent_count != 0) {
4804 target_size_out += fm->fm_mapped_extents * extent_size;
4806 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4807 if (!argptr) {
4808 ret = -TARGET_EFAULT;
4809 } else {
4810 /* Convert the struct fiemap */
4811 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4812 if (fm->fm_extent_count != 0) {
4813 p = argptr + target_size_in;
4814 /* ...and then all the struct fiemap_extents */
4815 for (i = 0; i < fm->fm_mapped_extents; i++) {
4816 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4817 THUNK_TARGET);
4818 p += extent_size;
4821 unlock_user(argptr, arg, target_size_out);
4824 if (free_fm) {
4825 g_free(fm);
4827 return ret;
4829 #endif
4831 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4832 int fd, int cmd, abi_long arg)
4834 const argtype *arg_type = ie->arg_type;
4835 int target_size;
4836 void *argptr;
4837 int ret;
4838 struct ifconf *host_ifconf;
4839 uint32_t outbufsz;
4840 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4841 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4842 int target_ifreq_size;
4843 int nb_ifreq;
4844 int free_buf = 0;
4845 int i;
4846 int target_ifc_len;
4847 abi_long target_ifc_buf;
4848 int host_ifc_len;
4849 char *host_ifc_buf;
4851 assert(arg_type[0] == TYPE_PTR);
4852 assert(ie->access == IOC_RW);
4854 arg_type++;
4855 target_size = thunk_type_size(arg_type, 0);
4857 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4858 if (!argptr)
4859 return -TARGET_EFAULT;
4860 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4861 unlock_user(argptr, arg, 0);
4863 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4864 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4865 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4867 if (target_ifc_buf != 0) {
4868 target_ifc_len = host_ifconf->ifc_len;
4869 nb_ifreq = target_ifc_len / target_ifreq_size;
4870 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4872 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4873 if (outbufsz > MAX_STRUCT_SIZE) {
4875 * We can't fit all the extents into the fixed size buffer.
4876 * Allocate one that is large enough and use it instead.
4878 host_ifconf = g_try_malloc(outbufsz);
4879 if (!host_ifconf) {
4880 return -TARGET_ENOMEM;
4882 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4883 free_buf = 1;
4885 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4887 host_ifconf->ifc_len = host_ifc_len;
4888 } else {
4889 host_ifc_buf = NULL;
4891 host_ifconf->ifc_buf = host_ifc_buf;
4893 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4894 if (!is_error(ret)) {
4895 /* convert host ifc_len to target ifc_len */
4897 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4898 target_ifc_len = nb_ifreq * target_ifreq_size;
4899 host_ifconf->ifc_len = target_ifc_len;
4901 /* restore target ifc_buf */
4903 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4905 /* copy struct ifconf to target user */
4907 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4908 if (!argptr)
4909 return -TARGET_EFAULT;
4910 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4911 unlock_user(argptr, arg, target_size);
4913 if (target_ifc_buf != 0) {
4914 /* copy ifreq[] to target user */
4915 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4916 for (i = 0; i < nb_ifreq ; i++) {
4917 thunk_convert(argptr + i * target_ifreq_size,
4918 host_ifc_buf + i * sizeof(struct ifreq),
4919 ifreq_arg_type, THUNK_TARGET);
4921 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4925 if (free_buf) {
4926 g_free(host_ifconf);
4929 return ret;
4932 #if defined(CONFIG_USBFS)
4933 #if HOST_LONG_BITS > 64
4934 #error USBDEVFS thunks do not support >64 bit hosts yet.
4935 #endif
4936 struct live_urb {
4937 uint64_t target_urb_adr;
4938 uint64_t target_buf_adr;
4939 char *target_buf_ptr;
4940 struct usbdevfs_urb host_urb;
4943 static GHashTable *usbdevfs_urb_hashtable(void)
4945 static GHashTable *urb_hashtable;
4947 if (!urb_hashtable) {
4948 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4950 return urb_hashtable;
4953 static void urb_hashtable_insert(struct live_urb *urb)
4955 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4956 g_hash_table_insert(urb_hashtable, urb, urb);
4959 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4961 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4962 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4965 static void urb_hashtable_remove(struct live_urb *urb)
4967 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4968 g_hash_table_remove(urb_hashtable, urb);
4971 static abi_long
4972 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4973 int fd, int cmd, abi_long arg)
4975 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4976 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4977 struct live_urb *lurb;
4978 void *argptr;
4979 uint64_t hurb;
4980 int target_size;
4981 uintptr_t target_urb_adr;
4982 abi_long ret;
4984 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4986 memset(buf_temp, 0, sizeof(uint64_t));
4987 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4988 if (is_error(ret)) {
4989 return ret;
4992 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4993 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4994 if (!lurb->target_urb_adr) {
4995 return -TARGET_EFAULT;
4997 urb_hashtable_remove(lurb);
4998 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4999 lurb->host_urb.buffer_length);
5000 lurb->target_buf_ptr = NULL;
5002 /* restore the guest buffer pointer */
5003 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5005 /* update the guest urb struct */
5006 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5007 if (!argptr) {
5008 g_free(lurb);
5009 return -TARGET_EFAULT;
5011 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5012 unlock_user(argptr, lurb->target_urb_adr, target_size);
5014 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5015 /* write back the urb handle */
5016 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5017 if (!argptr) {
5018 g_free(lurb);
5019 return -TARGET_EFAULT;
5022 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5023 target_urb_adr = lurb->target_urb_adr;
5024 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5025 unlock_user(argptr, arg, target_size);
5027 g_free(lurb);
5028 return ret;
5031 static abi_long
5032 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5033 uint8_t *buf_temp __attribute__((unused)),
5034 int fd, int cmd, abi_long arg)
5036 struct live_urb *lurb;
5038 /* map target address back to host URB with metadata. */
5039 lurb = urb_hashtable_lookup(arg);
5040 if (!lurb) {
5041 return -TARGET_EFAULT;
5043 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5046 static abi_long
5047 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5048 int fd, int cmd, abi_long arg)
5050 const argtype *arg_type = ie->arg_type;
5051 int target_size;
5052 abi_long ret;
5053 void *argptr;
5054 int rw_dir;
5055 struct live_urb *lurb;
5058 * each submitted URB needs to map to a unique ID for the
5059 * kernel, and that unique ID needs to be a pointer to
5060 * host memory. hence, we need to malloc for each URB.
5061 * isochronous transfers have a variable length struct.
5063 arg_type++;
5064 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5066 /* construct host copy of urb and metadata */
5067 lurb = g_try_new0(struct live_urb, 1);
5068 if (!lurb) {
5069 return -TARGET_ENOMEM;
5072 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5073 if (!argptr) {
5074 g_free(lurb);
5075 return -TARGET_EFAULT;
5077 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5078 unlock_user(argptr, arg, 0);
5080 lurb->target_urb_adr = arg;
5081 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5083 /* buffer space used depends on endpoint type so lock the entire buffer */
5084 /* control type urbs should check the buffer contents for true direction */
5085 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5086 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5087 lurb->host_urb.buffer_length, 1);
5088 if (lurb->target_buf_ptr == NULL) {
5089 g_free(lurb);
5090 return -TARGET_EFAULT;
5093 /* update buffer pointer in host copy */
5094 lurb->host_urb.buffer = lurb->target_buf_ptr;
5096 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5097 if (is_error(ret)) {
5098 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5099 g_free(lurb);
5100 } else {
5101 urb_hashtable_insert(lurb);
5104 return ret;
5106 #endif /* CONFIG_USBFS */
5108 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5109 int cmd, abi_long arg)
5111 void *argptr;
5112 struct dm_ioctl *host_dm;
5113 abi_long guest_data;
5114 uint32_t guest_data_size;
5115 int target_size;
5116 const argtype *arg_type = ie->arg_type;
5117 abi_long ret;
5118 void *big_buf = NULL;
5119 char *host_data;
5121 arg_type++;
5122 target_size = thunk_type_size(arg_type, 0);
5123 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5124 if (!argptr) {
5125 ret = -TARGET_EFAULT;
5126 goto out;
5128 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5129 unlock_user(argptr, arg, 0);
5131 /* buf_temp is too small, so fetch things into a bigger buffer */
5132 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5133 memcpy(big_buf, buf_temp, target_size);
5134 buf_temp = big_buf;
5135 host_dm = big_buf;
5137 guest_data = arg + host_dm->data_start;
5138 if ((guest_data - arg) < 0) {
5139 ret = -TARGET_EINVAL;
5140 goto out;
5142 guest_data_size = host_dm->data_size - host_dm->data_start;
5143 host_data = (char*)host_dm + host_dm->data_start;
5145 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5146 if (!argptr) {
5147 ret = -TARGET_EFAULT;
5148 goto out;
5151 switch (ie->host_cmd) {
5152 case DM_REMOVE_ALL:
5153 case DM_LIST_DEVICES:
5154 case DM_DEV_CREATE:
5155 case DM_DEV_REMOVE:
5156 case DM_DEV_SUSPEND:
5157 case DM_DEV_STATUS:
5158 case DM_DEV_WAIT:
5159 case DM_TABLE_STATUS:
5160 case DM_TABLE_CLEAR:
5161 case DM_TABLE_DEPS:
5162 case DM_LIST_VERSIONS:
5163 /* no input data */
5164 break;
5165 case DM_DEV_RENAME:
5166 case DM_DEV_SET_GEOMETRY:
5167 /* data contains only strings */
5168 memcpy(host_data, argptr, guest_data_size);
5169 break;
5170 case DM_TARGET_MSG:
5171 memcpy(host_data, argptr, guest_data_size);
5172 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5173 break;
5174 case DM_TABLE_LOAD:
5176 void *gspec = argptr;
5177 void *cur_data = host_data;
5178 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5179 int spec_size = thunk_type_size(arg_type, 0);
5180 int i;
5182 for (i = 0; i < host_dm->target_count; i++) {
5183 struct dm_target_spec *spec = cur_data;
5184 uint32_t next;
5185 int slen;
5187 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5188 slen = strlen((char*)gspec + spec_size) + 1;
5189 next = spec->next;
5190 spec->next = sizeof(*spec) + slen;
5191 strcpy((char*)&spec[1], gspec + spec_size);
5192 gspec += next;
5193 cur_data += spec->next;
5195 break;
5197 default:
5198 ret = -TARGET_EINVAL;
5199 unlock_user(argptr, guest_data, 0);
5200 goto out;
5202 unlock_user(argptr, guest_data, 0);
5204 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5205 if (!is_error(ret)) {
5206 guest_data = arg + host_dm->data_start;
5207 guest_data_size = host_dm->data_size - host_dm->data_start;
5208 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5209 switch (ie->host_cmd) {
5210 case DM_REMOVE_ALL:
5211 case DM_DEV_CREATE:
5212 case DM_DEV_REMOVE:
5213 case DM_DEV_RENAME:
5214 case DM_DEV_SUSPEND:
5215 case DM_DEV_STATUS:
5216 case DM_TABLE_LOAD:
5217 case DM_TABLE_CLEAR:
5218 case DM_TARGET_MSG:
5219 case DM_DEV_SET_GEOMETRY:
5220 /* no return data */
5221 break;
5222 case DM_LIST_DEVICES:
5224 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5225 uint32_t remaining_data = guest_data_size;
5226 void *cur_data = argptr;
5227 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5228 int nl_size = 12; /* can't use thunk_size due to alignment */
5230 while (1) {
5231 uint32_t next = nl->next;
5232 if (next) {
5233 nl->next = nl_size + (strlen(nl->name) + 1);
5235 if (remaining_data < nl->next) {
5236 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5237 break;
5239 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5240 strcpy(cur_data + nl_size, nl->name);
5241 cur_data += nl->next;
5242 remaining_data -= nl->next;
5243 if (!next) {
5244 break;
5246 nl = (void*)nl + next;
5248 break;
5250 case DM_DEV_WAIT:
5251 case DM_TABLE_STATUS:
5253 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5254 void *cur_data = argptr;
5255 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5256 int spec_size = thunk_type_size(arg_type, 0);
5257 int i;
5259 for (i = 0; i < host_dm->target_count; i++) {
5260 uint32_t next = spec->next;
5261 int slen = strlen((char*)&spec[1]) + 1;
5262 spec->next = (cur_data - argptr) + spec_size + slen;
5263 if (guest_data_size < spec->next) {
5264 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5265 break;
5267 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5268 strcpy(cur_data + spec_size, (char*)&spec[1]);
5269 cur_data = argptr + spec->next;
5270 spec = (void*)host_dm + host_dm->data_start + next;
5272 break;
5274 case DM_TABLE_DEPS:
5276 void *hdata = (void*)host_dm + host_dm->data_start;
5277 int count = *(uint32_t*)hdata;
5278 uint64_t *hdev = hdata + 8;
5279 uint64_t *gdev = argptr + 8;
5280 int i;
5282 *(uint32_t*)argptr = tswap32(count);
5283 for (i = 0; i < count; i++) {
5284 *gdev = tswap64(*hdev);
5285 gdev++;
5286 hdev++;
5288 break;
5290 case DM_LIST_VERSIONS:
5292 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5293 uint32_t remaining_data = guest_data_size;
5294 void *cur_data = argptr;
5295 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5296 int vers_size = thunk_type_size(arg_type, 0);
5298 while (1) {
5299 uint32_t next = vers->next;
5300 if (next) {
5301 vers->next = vers_size + (strlen(vers->name) + 1);
5303 if (remaining_data < vers->next) {
5304 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5305 break;
5307 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5308 strcpy(cur_data + vers_size, vers->name);
5309 cur_data += vers->next;
5310 remaining_data -= vers->next;
5311 if (!next) {
5312 break;
5314 vers = (void*)vers + next;
5316 break;
5318 default:
5319 unlock_user(argptr, guest_data, 0);
5320 ret = -TARGET_EINVAL;
5321 goto out;
5323 unlock_user(argptr, guest_data, guest_data_size);
5325 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5326 if (!argptr) {
5327 ret = -TARGET_EFAULT;
5328 goto out;
5330 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5331 unlock_user(argptr, arg, target_size);
5333 out:
5334 g_free(big_buf);
5335 return ret;
5338 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5339 int cmd, abi_long arg)
5341 void *argptr;
5342 int target_size;
5343 const argtype *arg_type = ie->arg_type;
5344 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5345 abi_long ret;
5347 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5348 struct blkpg_partition host_part;
5350 /* Read and convert blkpg */
5351 arg_type++;
5352 target_size = thunk_type_size(arg_type, 0);
5353 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5354 if (!argptr) {
5355 ret = -TARGET_EFAULT;
5356 goto out;
5358 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5359 unlock_user(argptr, arg, 0);
5361 switch (host_blkpg->op) {
5362 case BLKPG_ADD_PARTITION:
5363 case BLKPG_DEL_PARTITION:
5364 /* payload is struct blkpg_partition */
5365 break;
5366 default:
5367 /* Unknown opcode */
5368 ret = -TARGET_EINVAL;
5369 goto out;
5372 /* Read and convert blkpg->data */
5373 arg = (abi_long)(uintptr_t)host_blkpg->data;
5374 target_size = thunk_type_size(part_arg_type, 0);
5375 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5376 if (!argptr) {
5377 ret = -TARGET_EFAULT;
5378 goto out;
5380 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5381 unlock_user(argptr, arg, 0);
5383 /* Swizzle the data pointer to our local copy and call! */
5384 host_blkpg->data = &host_part;
5385 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5387 out:
5388 return ret;
5391 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5392 int fd, int cmd, abi_long arg)
5394 const argtype *arg_type = ie->arg_type;
5395 const StructEntry *se;
5396 const argtype *field_types;
5397 const int *dst_offsets, *src_offsets;
5398 int target_size;
5399 void *argptr;
5400 abi_ulong *target_rt_dev_ptr = NULL;
5401 unsigned long *host_rt_dev_ptr = NULL;
5402 abi_long ret;
5403 int i;
5405 assert(ie->access == IOC_W);
5406 assert(*arg_type == TYPE_PTR);
5407 arg_type++;
5408 assert(*arg_type == TYPE_STRUCT);
5409 target_size = thunk_type_size(arg_type, 0);
5410 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5411 if (!argptr) {
5412 return -TARGET_EFAULT;
5414 arg_type++;
5415 assert(*arg_type == (int)STRUCT_rtentry);
5416 se = struct_entries + *arg_type++;
5417 assert(se->convert[0] == NULL);
5418 /* convert struct here to be able to catch rt_dev string */
5419 field_types = se->field_types;
5420 dst_offsets = se->field_offsets[THUNK_HOST];
5421 src_offsets = se->field_offsets[THUNK_TARGET];
5422 for (i = 0; i < se->nb_fields; i++) {
5423 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5424 assert(*field_types == TYPE_PTRVOID);
5425 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5426 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5427 if (*target_rt_dev_ptr != 0) {
5428 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5429 tswapal(*target_rt_dev_ptr));
5430 if (!*host_rt_dev_ptr) {
5431 unlock_user(argptr, arg, 0);
5432 return -TARGET_EFAULT;
5434 } else {
5435 *host_rt_dev_ptr = 0;
5437 field_types++;
5438 continue;
5440 field_types = thunk_convert(buf_temp + dst_offsets[i],
5441 argptr + src_offsets[i],
5442 field_types, THUNK_HOST);
5444 unlock_user(argptr, arg, 0);
5446 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5448 assert(host_rt_dev_ptr != NULL);
5449 assert(target_rt_dev_ptr != NULL);
5450 if (*host_rt_dev_ptr != 0) {
5451 unlock_user((void *)*host_rt_dev_ptr,
5452 *target_rt_dev_ptr, 0);
5454 return ret;
5457 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5458 int fd, int cmd, abi_long arg)
5460 int sig = target_to_host_signal(arg);
5461 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5464 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5465 int fd, int cmd, abi_long arg)
5467 struct timeval tv;
5468 abi_long ret;
5470 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5471 if (is_error(ret)) {
5472 return ret;
5475 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5476 if (copy_to_user_timeval(arg, &tv)) {
5477 return -TARGET_EFAULT;
5479 } else {
5480 if (copy_to_user_timeval64(arg, &tv)) {
5481 return -TARGET_EFAULT;
5485 return ret;
5488 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5489 int fd, int cmd, abi_long arg)
5491 struct timespec ts;
5492 abi_long ret;
5494 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5495 if (is_error(ret)) {
5496 return ret;
5499 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5500 if (host_to_target_timespec(arg, &ts)) {
5501 return -TARGET_EFAULT;
5503 } else{
5504 if (host_to_target_timespec64(arg, &ts)) {
5505 return -TARGET_EFAULT;
5509 return ret;
5512 #ifdef TIOCGPTPEER
5513 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5514 int fd, int cmd, abi_long arg)
5516 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5517 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5519 #endif
5521 #ifdef HAVE_DRM_H
5523 static void unlock_drm_version(struct drm_version *host_ver,
5524 struct target_drm_version *target_ver,
5525 bool copy)
5527 unlock_user(host_ver->name, target_ver->name,
5528 copy ? host_ver->name_len : 0);
5529 unlock_user(host_ver->date, target_ver->date,
5530 copy ? host_ver->date_len : 0);
5531 unlock_user(host_ver->desc, target_ver->desc,
5532 copy ? host_ver->desc_len : 0);
5535 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5536 struct target_drm_version *target_ver)
5538 memset(host_ver, 0, sizeof(*host_ver));
5540 __get_user(host_ver->name_len, &target_ver->name_len);
5541 if (host_ver->name_len) {
5542 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5543 target_ver->name_len, 0);
5544 if (!host_ver->name) {
5545 return -EFAULT;
5549 __get_user(host_ver->date_len, &target_ver->date_len);
5550 if (host_ver->date_len) {
5551 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5552 target_ver->date_len, 0);
5553 if (!host_ver->date) {
5554 goto err;
5558 __get_user(host_ver->desc_len, &target_ver->desc_len);
5559 if (host_ver->desc_len) {
5560 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5561 target_ver->desc_len, 0);
5562 if (!host_ver->desc) {
5563 goto err;
5567 return 0;
5568 err:
5569 unlock_drm_version(host_ver, target_ver, false);
5570 return -EFAULT;
5573 static inline void host_to_target_drmversion(
5574 struct target_drm_version *target_ver,
5575 struct drm_version *host_ver)
5577 __put_user(host_ver->version_major, &target_ver->version_major);
5578 __put_user(host_ver->version_minor, &target_ver->version_minor);
5579 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5580 __put_user(host_ver->name_len, &target_ver->name_len);
5581 __put_user(host_ver->date_len, &target_ver->date_len);
5582 __put_user(host_ver->desc_len, &target_ver->desc_len);
5583 unlock_drm_version(host_ver, target_ver, true);
5586 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5587 int fd, int cmd, abi_long arg)
5589 struct drm_version *ver;
5590 struct target_drm_version *target_ver;
5591 abi_long ret;
5593 switch (ie->host_cmd) {
5594 case DRM_IOCTL_VERSION:
5595 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5596 return -TARGET_EFAULT;
5598 ver = (struct drm_version *)buf_temp;
5599 ret = target_to_host_drmversion(ver, target_ver);
5600 if (!is_error(ret)) {
5601 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5602 if (is_error(ret)) {
5603 unlock_drm_version(ver, target_ver, false);
5604 } else {
5605 host_to_target_drmversion(target_ver, ver);
5608 unlock_user_struct(target_ver, arg, 0);
5609 return ret;
5611 return -TARGET_ENOSYS;
5614 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5615 struct drm_i915_getparam *gparam,
5616 int fd, abi_long arg)
5618 abi_long ret;
5619 int value;
5620 struct target_drm_i915_getparam *target_gparam;
5622 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5623 return -TARGET_EFAULT;
5626 __get_user(gparam->param, &target_gparam->param);
5627 gparam->value = &value;
5628 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5629 put_user_s32(value, target_gparam->value);
5631 unlock_user_struct(target_gparam, arg, 0);
5632 return ret;
5635 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5636 int fd, int cmd, abi_long arg)
5638 switch (ie->host_cmd) {
5639 case DRM_IOCTL_I915_GETPARAM:
5640 return do_ioctl_drm_i915_getparam(ie,
5641 (struct drm_i915_getparam *)buf_temp,
5642 fd, arg);
5643 default:
5644 return -TARGET_ENOSYS;
5648 #endif
5650 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5651 int fd, int cmd, abi_long arg)
5653 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5654 struct tun_filter *target_filter;
5655 char *target_addr;
5657 assert(ie->access == IOC_W);
5659 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5660 if (!target_filter) {
5661 return -TARGET_EFAULT;
5663 filter->flags = tswap16(target_filter->flags);
5664 filter->count = tswap16(target_filter->count);
5665 unlock_user(target_filter, arg, 0);
5667 if (filter->count) {
5668 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5669 MAX_STRUCT_SIZE) {
5670 return -TARGET_EFAULT;
5673 target_addr = lock_user(VERIFY_READ,
5674 arg + offsetof(struct tun_filter, addr),
5675 filter->count * ETH_ALEN, 1);
5676 if (!target_addr) {
5677 return -TARGET_EFAULT;
5679 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5680 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5683 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5686 IOCTLEntry ioctl_entries[] = {
5687 #define IOCTL(cmd, access, ...) \
5688 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5689 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5690 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5691 #define IOCTL_IGNORE(cmd) \
5692 { TARGET_ ## cmd, 0, #cmd },
5693 #include "ioctls.h"
5694 { 0, 0, },
5697 /* ??? Implement proper locking for ioctls. */
5698 /* do_ioctl() Must return target values and target errnos. */
5699 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5701 const IOCTLEntry *ie;
5702 const argtype *arg_type;
5703 abi_long ret;
5704 uint8_t buf_temp[MAX_STRUCT_SIZE];
5705 int target_size;
5706 void *argptr;
5708 ie = ioctl_entries;
5709 for(;;) {
5710 if (ie->target_cmd == 0) {
5711 qemu_log_mask(
5712 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5713 return -TARGET_ENOSYS;
5715 if (ie->target_cmd == cmd)
5716 break;
5717 ie++;
5719 arg_type = ie->arg_type;
5720 if (ie->do_ioctl) {
5721 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5722 } else if (!ie->host_cmd) {
5723 /* Some architectures define BSD ioctls in their headers
5724 that are not implemented in Linux. */
5725 return -TARGET_ENOSYS;
5728 switch(arg_type[0]) {
5729 case TYPE_NULL:
5730 /* no argument */
5731 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5732 break;
5733 case TYPE_PTRVOID:
5734 case TYPE_INT:
5735 case TYPE_LONG:
5736 case TYPE_ULONG:
5737 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5738 break;
5739 case TYPE_PTR:
5740 arg_type++;
5741 target_size = thunk_type_size(arg_type, 0);
5742 switch(ie->access) {
5743 case IOC_R:
5744 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5745 if (!is_error(ret)) {
5746 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5747 if (!argptr)
5748 return -TARGET_EFAULT;
5749 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5750 unlock_user(argptr, arg, target_size);
5752 break;
5753 case IOC_W:
5754 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5755 if (!argptr)
5756 return -TARGET_EFAULT;
5757 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5758 unlock_user(argptr, arg, 0);
5759 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5760 break;
5761 default:
5762 case IOC_RW:
5763 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5764 if (!argptr)
5765 return -TARGET_EFAULT;
5766 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5767 unlock_user(argptr, arg, 0);
5768 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5769 if (!is_error(ret)) {
5770 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5771 if (!argptr)
5772 return -TARGET_EFAULT;
5773 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5774 unlock_user(argptr, arg, target_size);
5776 break;
5778 break;
5779 default:
5780 qemu_log_mask(LOG_UNIMP,
5781 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5782 (long)cmd, arg_type[0]);
5783 ret = -TARGET_ENOSYS;
5784 break;
5786 return ret;
5789 static const bitmask_transtbl iflag_tbl[] = {
5790 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5791 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5792 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5793 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5794 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5795 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5796 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5797 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5798 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5799 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5800 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5801 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5802 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5803 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5804 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5805 { 0, 0, 0, 0 }
5808 static const bitmask_transtbl oflag_tbl[] = {
5809 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5810 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5811 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5812 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5813 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5814 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5815 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5816 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5817 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5818 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5819 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5820 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5821 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5822 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5823 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5824 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5825 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5826 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5827 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5828 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5829 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5830 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5831 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5832 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5833 { 0, 0, 0, 0 }
5836 static const bitmask_transtbl cflag_tbl[] = {
5837 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5838 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5839 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5840 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5841 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5842 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5843 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5844 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5845 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5846 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5847 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5848 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5849 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5850 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5851 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5852 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5853 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5854 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5855 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5856 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5857 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5858 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5859 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5860 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5861 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5862 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5863 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5864 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5865 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5866 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5867 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5868 { 0, 0, 0, 0 }
5871 static const bitmask_transtbl lflag_tbl[] = {
5872 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5873 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5874 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5875 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5876 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5877 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5878 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5879 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5880 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5881 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5882 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5883 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5884 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5885 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5886 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5887 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5888 { 0, 0, 0, 0 }
5891 static void target_to_host_termios (void *dst, const void *src)
5893 struct host_termios *host = dst;
5894 const struct target_termios *target = src;
5896 host->c_iflag =
5897 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5898 host->c_oflag =
5899 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5900 host->c_cflag =
5901 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5902 host->c_lflag =
5903 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5904 host->c_line = target->c_line;
5906 memset(host->c_cc, 0, sizeof(host->c_cc));
5907 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5908 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5909 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5910 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5911 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5912 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5913 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5914 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5915 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5916 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5917 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5918 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5919 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5920 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5921 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5922 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5923 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5926 static void host_to_target_termios (void *dst, const void *src)
5928 struct target_termios *target = dst;
5929 const struct host_termios *host = src;
5931 target->c_iflag =
5932 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5933 target->c_oflag =
5934 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5935 target->c_cflag =
5936 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5937 target->c_lflag =
5938 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5939 target->c_line = host->c_line;
5941 memset(target->c_cc, 0, sizeof(target->c_cc));
5942 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5943 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5944 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5945 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5946 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5947 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5948 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5949 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5950 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5951 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5952 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5953 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5954 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5955 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5956 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5957 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5958 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5961 static const StructEntry struct_termios_def = {
5962 .convert = { host_to_target_termios, target_to_host_termios },
5963 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5964 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5965 .print = print_termios,
5968 static const bitmask_transtbl mmap_flags_tbl[] = {
5969 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5970 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5971 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5972 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5973 MAP_ANONYMOUS, MAP_ANONYMOUS },
5974 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5975 MAP_GROWSDOWN, MAP_GROWSDOWN },
5976 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5977 MAP_DENYWRITE, MAP_DENYWRITE },
5978 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5979 MAP_EXECUTABLE, MAP_EXECUTABLE },
5980 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5981 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5982 MAP_NORESERVE, MAP_NORESERVE },
5983 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5984 /* MAP_STACK had been ignored by the kernel for quite some time.
5985 Recognize it for the target insofar as we do not want to pass
5986 it through to the host. */
5987 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5988 { 0, 0, 0, 0 }
5992 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5993 * TARGET_I386 is defined if TARGET_X86_64 is defined
5995 #if defined(TARGET_I386)
5997 /* NOTE: there is really one LDT for all the threads */
5998 static uint8_t *ldt_table;
6000 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6002 int size;
6003 void *p;
6005 if (!ldt_table)
6006 return 0;
6007 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6008 if (size > bytecount)
6009 size = bytecount;
6010 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6011 if (!p)
6012 return -TARGET_EFAULT;
6013 /* ??? Should this by byteswapped? */
6014 memcpy(p, ldt_table, size);
6015 unlock_user(p, ptr, size);
6016 return size;
6019 /* XXX: add locking support */
6020 static abi_long write_ldt(CPUX86State *env,
6021 abi_ulong ptr, unsigned long bytecount, int oldmode)
6023 struct target_modify_ldt_ldt_s ldt_info;
6024 struct target_modify_ldt_ldt_s *target_ldt_info;
6025 int seg_32bit, contents, read_exec_only, limit_in_pages;
6026 int seg_not_present, useable, lm;
6027 uint32_t *lp, entry_1, entry_2;
6029 if (bytecount != sizeof(ldt_info))
6030 return -TARGET_EINVAL;
6031 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6032 return -TARGET_EFAULT;
6033 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6034 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6035 ldt_info.limit = tswap32(target_ldt_info->limit);
6036 ldt_info.flags = tswap32(target_ldt_info->flags);
6037 unlock_user_struct(target_ldt_info, ptr, 0);
6039 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6040 return -TARGET_EINVAL;
6041 seg_32bit = ldt_info.flags & 1;
6042 contents = (ldt_info.flags >> 1) & 3;
6043 read_exec_only = (ldt_info.flags >> 3) & 1;
6044 limit_in_pages = (ldt_info.flags >> 4) & 1;
6045 seg_not_present = (ldt_info.flags >> 5) & 1;
6046 useable = (ldt_info.flags >> 6) & 1;
6047 #ifdef TARGET_ABI32
6048 lm = 0;
6049 #else
6050 lm = (ldt_info.flags >> 7) & 1;
6051 #endif
6052 if (contents == 3) {
6053 if (oldmode)
6054 return -TARGET_EINVAL;
6055 if (seg_not_present == 0)
6056 return -TARGET_EINVAL;
6058 /* allocate the LDT */
6059 if (!ldt_table) {
6060 env->ldt.base = target_mmap(0,
6061 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6062 PROT_READ|PROT_WRITE,
6063 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6064 if (env->ldt.base == -1)
6065 return -TARGET_ENOMEM;
6066 memset(g2h_untagged(env->ldt.base), 0,
6067 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6068 env->ldt.limit = 0xffff;
6069 ldt_table = g2h_untagged(env->ldt.base);
6072 /* NOTE: same code as Linux kernel */
6073 /* Allow LDTs to be cleared by the user. */
6074 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6075 if (oldmode ||
6076 (contents == 0 &&
6077 read_exec_only == 1 &&
6078 seg_32bit == 0 &&
6079 limit_in_pages == 0 &&
6080 seg_not_present == 1 &&
6081 useable == 0 )) {
6082 entry_1 = 0;
6083 entry_2 = 0;
6084 goto install;
6088 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6089 (ldt_info.limit & 0x0ffff);
6090 entry_2 = (ldt_info.base_addr & 0xff000000) |
6091 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6092 (ldt_info.limit & 0xf0000) |
6093 ((read_exec_only ^ 1) << 9) |
6094 (contents << 10) |
6095 ((seg_not_present ^ 1) << 15) |
6096 (seg_32bit << 22) |
6097 (limit_in_pages << 23) |
6098 (lm << 21) |
6099 0x7000;
6100 if (!oldmode)
6101 entry_2 |= (useable << 20);
6103 /* Install the new entry ... */
6104 install:
6105 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6106 lp[0] = tswap32(entry_1);
6107 lp[1] = tswap32(entry_2);
6108 return 0;
6111 /* specific and weird i386 syscalls */
6112 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6113 unsigned long bytecount)
6115 abi_long ret;
6117 switch (func) {
6118 case 0:
6119 ret = read_ldt(ptr, bytecount);
6120 break;
6121 case 1:
6122 ret = write_ldt(env, ptr, bytecount, 1);
6123 break;
6124 case 0x11:
6125 ret = write_ldt(env, ptr, bytecount, 0);
6126 break;
6127 default:
6128 ret = -TARGET_ENOSYS;
6129 break;
6131 return ret;
6134 #if defined(TARGET_ABI32)
6135 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6137 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6138 struct target_modify_ldt_ldt_s ldt_info;
6139 struct target_modify_ldt_ldt_s *target_ldt_info;
6140 int seg_32bit, contents, read_exec_only, limit_in_pages;
6141 int seg_not_present, useable, lm;
6142 uint32_t *lp, entry_1, entry_2;
6143 int i;
6145 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6146 if (!target_ldt_info)
6147 return -TARGET_EFAULT;
6148 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6149 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6150 ldt_info.limit = tswap32(target_ldt_info->limit);
6151 ldt_info.flags = tswap32(target_ldt_info->flags);
6152 if (ldt_info.entry_number == -1) {
6153 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6154 if (gdt_table[i] == 0) {
6155 ldt_info.entry_number = i;
6156 target_ldt_info->entry_number = tswap32(i);
6157 break;
6161 unlock_user_struct(target_ldt_info, ptr, 1);
6163 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6164 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6165 return -TARGET_EINVAL;
6166 seg_32bit = ldt_info.flags & 1;
6167 contents = (ldt_info.flags >> 1) & 3;
6168 read_exec_only = (ldt_info.flags >> 3) & 1;
6169 limit_in_pages = (ldt_info.flags >> 4) & 1;
6170 seg_not_present = (ldt_info.flags >> 5) & 1;
6171 useable = (ldt_info.flags >> 6) & 1;
6172 #ifdef TARGET_ABI32
6173 lm = 0;
6174 #else
6175 lm = (ldt_info.flags >> 7) & 1;
6176 #endif
6178 if (contents == 3) {
6179 if (seg_not_present == 0)
6180 return -TARGET_EINVAL;
6183 /* NOTE: same code as Linux kernel */
6184 /* Allow LDTs to be cleared by the user. */
6185 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6186 if ((contents == 0 &&
6187 read_exec_only == 1 &&
6188 seg_32bit == 0 &&
6189 limit_in_pages == 0 &&
6190 seg_not_present == 1 &&
6191 useable == 0 )) {
6192 entry_1 = 0;
6193 entry_2 = 0;
6194 goto install;
6198 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6199 (ldt_info.limit & 0x0ffff);
6200 entry_2 = (ldt_info.base_addr & 0xff000000) |
6201 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6202 (ldt_info.limit & 0xf0000) |
6203 ((read_exec_only ^ 1) << 9) |
6204 (contents << 10) |
6205 ((seg_not_present ^ 1) << 15) |
6206 (seg_32bit << 22) |
6207 (limit_in_pages << 23) |
6208 (useable << 20) |
6209 (lm << 21) |
6210 0x7000;
6212 /* Install the new entry ... */
6213 install:
6214 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6215 lp[0] = tswap32(entry_1);
6216 lp[1] = tswap32(entry_2);
6217 return 0;
6220 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6222 struct target_modify_ldt_ldt_s *target_ldt_info;
6223 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6224 uint32_t base_addr, limit, flags;
6225 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6226 int seg_not_present, useable, lm;
6227 uint32_t *lp, entry_1, entry_2;
6229 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6230 if (!target_ldt_info)
6231 return -TARGET_EFAULT;
6232 idx = tswap32(target_ldt_info->entry_number);
6233 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6234 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6235 unlock_user_struct(target_ldt_info, ptr, 1);
6236 return -TARGET_EINVAL;
6238 lp = (uint32_t *)(gdt_table + idx);
6239 entry_1 = tswap32(lp[0]);
6240 entry_2 = tswap32(lp[1]);
6242 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6243 contents = (entry_2 >> 10) & 3;
6244 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6245 seg_32bit = (entry_2 >> 22) & 1;
6246 limit_in_pages = (entry_2 >> 23) & 1;
6247 useable = (entry_2 >> 20) & 1;
6248 #ifdef TARGET_ABI32
6249 lm = 0;
6250 #else
6251 lm = (entry_2 >> 21) & 1;
6252 #endif
6253 flags = (seg_32bit << 0) | (contents << 1) |
6254 (read_exec_only << 3) | (limit_in_pages << 4) |
6255 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6256 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6257 base_addr = (entry_1 >> 16) |
6258 (entry_2 & 0xff000000) |
6259 ((entry_2 & 0xff) << 16);
6260 target_ldt_info->base_addr = tswapal(base_addr);
6261 target_ldt_info->limit = tswap32(limit);
6262 target_ldt_info->flags = tswap32(flags);
6263 unlock_user_struct(target_ldt_info, ptr, 1);
6264 return 0;
6267 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6269 return -TARGET_ENOSYS;
6271 #else
6272 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6274 abi_long ret = 0;
6275 abi_ulong val;
6276 int idx;
6278 switch(code) {
6279 case TARGET_ARCH_SET_GS:
6280 case TARGET_ARCH_SET_FS:
6281 if (code == TARGET_ARCH_SET_GS)
6282 idx = R_GS;
6283 else
6284 idx = R_FS;
6285 cpu_x86_load_seg(env, idx, 0);
6286 env->segs[idx].base = addr;
6287 break;
6288 case TARGET_ARCH_GET_GS:
6289 case TARGET_ARCH_GET_FS:
6290 if (code == TARGET_ARCH_GET_GS)
6291 idx = R_GS;
6292 else
6293 idx = R_FS;
6294 val = env->segs[idx].base;
6295 if (put_user(val, addr, abi_ulong))
6296 ret = -TARGET_EFAULT;
6297 break;
6298 default:
6299 ret = -TARGET_EINVAL;
6300 break;
6302 return ret;
6304 #endif /* defined(TARGET_ABI32 */
6305 #endif /* defined(TARGET_I386) */
6308 * These constants are generic. Supply any that are missing from the host.
6310 #ifndef PR_SET_NAME
6311 # define PR_SET_NAME 15
6312 # define PR_GET_NAME 16
6313 #endif
6314 #ifndef PR_SET_FP_MODE
6315 # define PR_SET_FP_MODE 45
6316 # define PR_GET_FP_MODE 46
6317 # define PR_FP_MODE_FR (1 << 0)
6318 # define PR_FP_MODE_FRE (1 << 1)
6319 #endif
6320 #ifndef PR_SVE_SET_VL
6321 # define PR_SVE_SET_VL 50
6322 # define PR_SVE_GET_VL 51
6323 # define PR_SVE_VL_LEN_MASK 0xffff
6324 # define PR_SVE_VL_INHERIT (1 << 17)
6325 #endif
6326 #ifndef PR_PAC_RESET_KEYS
6327 # define PR_PAC_RESET_KEYS 54
6328 # define PR_PAC_APIAKEY (1 << 0)
6329 # define PR_PAC_APIBKEY (1 << 1)
6330 # define PR_PAC_APDAKEY (1 << 2)
6331 # define PR_PAC_APDBKEY (1 << 3)
6332 # define PR_PAC_APGAKEY (1 << 4)
6333 #endif
6334 #ifndef PR_SET_TAGGED_ADDR_CTRL
6335 # define PR_SET_TAGGED_ADDR_CTRL 55
6336 # define PR_GET_TAGGED_ADDR_CTRL 56
6337 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6338 #endif
6339 #ifndef PR_MTE_TCF_SHIFT
6340 # define PR_MTE_TCF_SHIFT 1
6341 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6342 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6343 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6344 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6345 # define PR_MTE_TAG_SHIFT 3
6346 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6347 #endif
6348 #ifndef PR_SET_IO_FLUSHER
6349 # define PR_SET_IO_FLUSHER 57
6350 # define PR_GET_IO_FLUSHER 58
6351 #endif
6352 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6353 # define PR_SET_SYSCALL_USER_DISPATCH 59
6354 #endif
6355 #ifndef PR_SME_SET_VL
6356 # define PR_SME_SET_VL 63
6357 # define PR_SME_GET_VL 64
6358 # define PR_SME_VL_LEN_MASK 0xffff
6359 # define PR_SME_VL_INHERIT (1 << 17)
6360 #endif
6362 #include "target_prctl.h"
6364 static abi_long do_prctl_inval0(CPUArchState *env)
6366 return -TARGET_EINVAL;
6369 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6371 return -TARGET_EINVAL;
6374 #ifndef do_prctl_get_fp_mode
6375 #define do_prctl_get_fp_mode do_prctl_inval0
6376 #endif
6377 #ifndef do_prctl_set_fp_mode
6378 #define do_prctl_set_fp_mode do_prctl_inval1
6379 #endif
6380 #ifndef do_prctl_sve_get_vl
6381 #define do_prctl_sve_get_vl do_prctl_inval0
6382 #endif
6383 #ifndef do_prctl_sve_set_vl
6384 #define do_prctl_sve_set_vl do_prctl_inval1
6385 #endif
6386 #ifndef do_prctl_reset_keys
6387 #define do_prctl_reset_keys do_prctl_inval1
6388 #endif
6389 #ifndef do_prctl_set_tagged_addr_ctrl
6390 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6391 #endif
6392 #ifndef do_prctl_get_tagged_addr_ctrl
6393 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6394 #endif
6395 #ifndef do_prctl_get_unalign
6396 #define do_prctl_get_unalign do_prctl_inval1
6397 #endif
6398 #ifndef do_prctl_set_unalign
6399 #define do_prctl_set_unalign do_prctl_inval1
6400 #endif
6401 #ifndef do_prctl_sme_get_vl
6402 #define do_prctl_sme_get_vl do_prctl_inval0
6403 #endif
6404 #ifndef do_prctl_sme_set_vl
6405 #define do_prctl_sme_set_vl do_prctl_inval1
6406 #endif
6408 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6409 abi_long arg3, abi_long arg4, abi_long arg5)
6411 abi_long ret;
6413 switch (option) {
6414 case PR_GET_PDEATHSIG:
6416 int deathsig;
6417 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6418 arg3, arg4, arg5));
6419 if (!is_error(ret) &&
6420 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6421 return -TARGET_EFAULT;
6423 return ret;
6425 case PR_SET_PDEATHSIG:
6426 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6427 arg3, arg4, arg5));
6428 case PR_GET_NAME:
6430 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6431 if (!name) {
6432 return -TARGET_EFAULT;
6434 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6435 arg3, arg4, arg5));
6436 unlock_user(name, arg2, 16);
6437 return ret;
6439 case PR_SET_NAME:
6441 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6442 if (!name) {
6443 return -TARGET_EFAULT;
6445 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6446 arg3, arg4, arg5));
6447 unlock_user(name, arg2, 0);
6448 return ret;
6450 case PR_GET_FP_MODE:
6451 return do_prctl_get_fp_mode(env);
6452 case PR_SET_FP_MODE:
6453 return do_prctl_set_fp_mode(env, arg2);
6454 case PR_SVE_GET_VL:
6455 return do_prctl_sve_get_vl(env);
6456 case PR_SVE_SET_VL:
6457 return do_prctl_sve_set_vl(env, arg2);
6458 case PR_SME_GET_VL:
6459 return do_prctl_sme_get_vl(env);
6460 case PR_SME_SET_VL:
6461 return do_prctl_sme_set_vl(env, arg2);
6462 case PR_PAC_RESET_KEYS:
6463 if (arg3 || arg4 || arg5) {
6464 return -TARGET_EINVAL;
6466 return do_prctl_reset_keys(env, arg2);
6467 case PR_SET_TAGGED_ADDR_CTRL:
6468 if (arg3 || arg4 || arg5) {
6469 return -TARGET_EINVAL;
6471 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6472 case PR_GET_TAGGED_ADDR_CTRL:
6473 if (arg2 || arg3 || arg4 || arg5) {
6474 return -TARGET_EINVAL;
6476 return do_prctl_get_tagged_addr_ctrl(env);
6478 case PR_GET_UNALIGN:
6479 return do_prctl_get_unalign(env, arg2);
6480 case PR_SET_UNALIGN:
6481 return do_prctl_set_unalign(env, arg2);
6483 case PR_CAP_AMBIENT:
6484 case PR_CAPBSET_READ:
6485 case PR_CAPBSET_DROP:
6486 case PR_GET_DUMPABLE:
6487 case PR_SET_DUMPABLE:
6488 case PR_GET_KEEPCAPS:
6489 case PR_SET_KEEPCAPS:
6490 case PR_GET_SECUREBITS:
6491 case PR_SET_SECUREBITS:
6492 case PR_GET_TIMING:
6493 case PR_SET_TIMING:
6494 case PR_GET_TIMERSLACK:
6495 case PR_SET_TIMERSLACK:
6496 case PR_MCE_KILL:
6497 case PR_MCE_KILL_GET:
6498 case PR_GET_NO_NEW_PRIVS:
6499 case PR_SET_NO_NEW_PRIVS:
6500 case PR_GET_IO_FLUSHER:
6501 case PR_SET_IO_FLUSHER:
6502 /* Some prctl options have no pointer arguments and we can pass on. */
6503 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6505 case PR_GET_CHILD_SUBREAPER:
6506 case PR_SET_CHILD_SUBREAPER:
6507 case PR_GET_SPECULATION_CTRL:
6508 case PR_SET_SPECULATION_CTRL:
6509 case PR_GET_TID_ADDRESS:
6510 /* TODO */
6511 return -TARGET_EINVAL;
6513 case PR_GET_FPEXC:
6514 case PR_SET_FPEXC:
6515 /* Was used for SPE on PowerPC. */
6516 return -TARGET_EINVAL;
6518 case PR_GET_ENDIAN:
6519 case PR_SET_ENDIAN:
6520 case PR_GET_FPEMU:
6521 case PR_SET_FPEMU:
6522 case PR_SET_MM:
6523 case PR_GET_SECCOMP:
6524 case PR_SET_SECCOMP:
6525 case PR_SET_SYSCALL_USER_DISPATCH:
6526 case PR_GET_THP_DISABLE:
6527 case PR_SET_THP_DISABLE:
6528 case PR_GET_TSC:
6529 case PR_SET_TSC:
6530 /* Disable to prevent the target disabling stuff we need. */
6531 return -TARGET_EINVAL;
6533 default:
6534 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6535 option);
6536 return -TARGET_EINVAL;
6540 #define NEW_STACK_SIZE 0x40000
6543 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6544 typedef struct {
6545 CPUArchState *env;
6546 pthread_mutex_t mutex;
6547 pthread_cond_t cond;
6548 pthread_t thread;
6549 uint32_t tid;
6550 abi_ulong child_tidptr;
6551 abi_ulong parent_tidptr;
6552 sigset_t sigmask;
6553 } new_thread_info;
6555 static void *clone_func(void *arg)
6557 new_thread_info *info = arg;
6558 CPUArchState *env;
6559 CPUState *cpu;
6560 TaskState *ts;
6562 rcu_register_thread();
6563 tcg_register_thread();
6564 env = info->env;
6565 cpu = env_cpu(env);
6566 thread_cpu = cpu;
6567 ts = (TaskState *)cpu->opaque;
6568 info->tid = sys_gettid();
6569 task_settid(ts);
6570 if (info->child_tidptr)
6571 put_user_u32(info->tid, info->child_tidptr);
6572 if (info->parent_tidptr)
6573 put_user_u32(info->tid, info->parent_tidptr);
6574 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6575 /* Enable signals. */
6576 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6577 /* Signal to the parent that we're ready. */
6578 pthread_mutex_lock(&info->mutex);
6579 pthread_cond_broadcast(&info->cond);
6580 pthread_mutex_unlock(&info->mutex);
6581 /* Wait until the parent has finished initializing the tls state. */
6582 pthread_mutex_lock(&clone_lock);
6583 pthread_mutex_unlock(&clone_lock);
6584 cpu_loop(env);
6585 /* never exits */
6586 return NULL;
6589 /* do_fork() Must return host values and target errnos (unlike most
6590 do_*() functions). */
6591 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6592 abi_ulong parent_tidptr, target_ulong newtls,
6593 abi_ulong child_tidptr)
6595 CPUState *cpu = env_cpu(env);
6596 int ret;
6597 TaskState *ts;
6598 CPUState *new_cpu;
6599 CPUArchState *new_env;
6600 sigset_t sigmask;
6602 flags &= ~CLONE_IGNORED_FLAGS;
6604 /* Emulate vfork() with fork() */
6605 if (flags & CLONE_VFORK)
6606 flags &= ~(CLONE_VFORK | CLONE_VM);
6608 if (flags & CLONE_VM) {
6609 TaskState *parent_ts = (TaskState *)cpu->opaque;
6610 new_thread_info info;
6611 pthread_attr_t attr;
6613 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6614 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6615 return -TARGET_EINVAL;
6618 ts = g_new0(TaskState, 1);
6619 init_task_state(ts);
6621 /* Grab a mutex so that thread setup appears atomic. */
6622 pthread_mutex_lock(&clone_lock);
6625 * If this is our first additional thread, we need to ensure we
6626 * generate code for parallel execution and flush old translations.
6627 * Do this now so that the copy gets CF_PARALLEL too.
6629 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6630 cpu->tcg_cflags |= CF_PARALLEL;
6631 tb_flush(cpu);
6634 /* we create a new CPU instance. */
6635 new_env = cpu_copy(env);
6636 /* Init regs that differ from the parent. */
6637 cpu_clone_regs_child(new_env, newsp, flags);
6638 cpu_clone_regs_parent(env, flags);
6639 new_cpu = env_cpu(new_env);
6640 new_cpu->opaque = ts;
6641 ts->bprm = parent_ts->bprm;
6642 ts->info = parent_ts->info;
6643 ts->signal_mask = parent_ts->signal_mask;
6645 if (flags & CLONE_CHILD_CLEARTID) {
6646 ts->child_tidptr = child_tidptr;
6649 if (flags & CLONE_SETTLS) {
6650 cpu_set_tls (new_env, newtls);
6653 memset(&info, 0, sizeof(info));
6654 pthread_mutex_init(&info.mutex, NULL);
6655 pthread_mutex_lock(&info.mutex);
6656 pthread_cond_init(&info.cond, NULL);
6657 info.env = new_env;
6658 if (flags & CLONE_CHILD_SETTID) {
6659 info.child_tidptr = child_tidptr;
6661 if (flags & CLONE_PARENT_SETTID) {
6662 info.parent_tidptr = parent_tidptr;
6665 ret = pthread_attr_init(&attr);
6666 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6667 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6668 /* It is not safe to deliver signals until the child has finished
6669 initializing, so temporarily block all signals. */
6670 sigfillset(&sigmask);
6671 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6672 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6674 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6675 /* TODO: Free new CPU state if thread creation failed. */
6677 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6678 pthread_attr_destroy(&attr);
6679 if (ret == 0) {
6680 /* Wait for the child to initialize. */
6681 pthread_cond_wait(&info.cond, &info.mutex);
6682 ret = info.tid;
6683 } else {
6684 ret = -1;
6686 pthread_mutex_unlock(&info.mutex);
6687 pthread_cond_destroy(&info.cond);
6688 pthread_mutex_destroy(&info.mutex);
6689 pthread_mutex_unlock(&clone_lock);
6690 } else {
6691 /* if no CLONE_VM, we consider it is a fork */
6692 if (flags & CLONE_INVALID_FORK_FLAGS) {
6693 return -TARGET_EINVAL;
6696 /* We can't support custom termination signals */
6697 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6698 return -TARGET_EINVAL;
6701 if (block_signals()) {
6702 return -QEMU_ERESTARTSYS;
6705 fork_start();
6706 ret = fork();
6707 if (ret == 0) {
6708 /* Child Process. */
6709 cpu_clone_regs_child(env, newsp, flags);
6710 fork_end(1);
6711 /* There is a race condition here. The parent process could
6712 theoretically read the TID in the child process before the child
6713 tid is set. This would require using either ptrace
6714 (not implemented) or having *_tidptr to point at a shared memory
6715 mapping. We can't repeat the spinlock hack used above because
6716 the child process gets its own copy of the lock. */
6717 if (flags & CLONE_CHILD_SETTID)
6718 put_user_u32(sys_gettid(), child_tidptr);
6719 if (flags & CLONE_PARENT_SETTID)
6720 put_user_u32(sys_gettid(), parent_tidptr);
6721 ts = (TaskState *)cpu->opaque;
6722 if (flags & CLONE_SETTLS)
6723 cpu_set_tls (env, newtls);
6724 if (flags & CLONE_CHILD_CLEARTID)
6725 ts->child_tidptr = child_tidptr;
6726 } else {
6727 cpu_clone_regs_parent(env, flags);
6728 fork_end(0);
6731 return ret;
6734 /* warning : doesn't handle linux specific flags... */
6735 static int target_to_host_fcntl_cmd(int cmd)
6737 int ret;
6739 switch(cmd) {
6740 case TARGET_F_DUPFD:
6741 case TARGET_F_GETFD:
6742 case TARGET_F_SETFD:
6743 case TARGET_F_GETFL:
6744 case TARGET_F_SETFL:
6745 case TARGET_F_OFD_GETLK:
6746 case TARGET_F_OFD_SETLK:
6747 case TARGET_F_OFD_SETLKW:
6748 ret = cmd;
6749 break;
6750 case TARGET_F_GETLK:
6751 ret = F_GETLK64;
6752 break;
6753 case TARGET_F_SETLK:
6754 ret = F_SETLK64;
6755 break;
6756 case TARGET_F_SETLKW:
6757 ret = F_SETLKW64;
6758 break;
6759 case TARGET_F_GETOWN:
6760 ret = F_GETOWN;
6761 break;
6762 case TARGET_F_SETOWN:
6763 ret = F_SETOWN;
6764 break;
6765 case TARGET_F_GETSIG:
6766 ret = F_GETSIG;
6767 break;
6768 case TARGET_F_SETSIG:
6769 ret = F_SETSIG;
6770 break;
6771 #if TARGET_ABI_BITS == 32
6772 case TARGET_F_GETLK64:
6773 ret = F_GETLK64;
6774 break;
6775 case TARGET_F_SETLK64:
6776 ret = F_SETLK64;
6777 break;
6778 case TARGET_F_SETLKW64:
6779 ret = F_SETLKW64;
6780 break;
6781 #endif
6782 case TARGET_F_SETLEASE:
6783 ret = F_SETLEASE;
6784 break;
6785 case TARGET_F_GETLEASE:
6786 ret = F_GETLEASE;
6787 break;
6788 #ifdef F_DUPFD_CLOEXEC
6789 case TARGET_F_DUPFD_CLOEXEC:
6790 ret = F_DUPFD_CLOEXEC;
6791 break;
6792 #endif
6793 case TARGET_F_NOTIFY:
6794 ret = F_NOTIFY;
6795 break;
6796 #ifdef F_GETOWN_EX
6797 case TARGET_F_GETOWN_EX:
6798 ret = F_GETOWN_EX;
6799 break;
6800 #endif
6801 #ifdef F_SETOWN_EX
6802 case TARGET_F_SETOWN_EX:
6803 ret = F_SETOWN_EX;
6804 break;
6805 #endif
6806 #ifdef F_SETPIPE_SZ
6807 case TARGET_F_SETPIPE_SZ:
6808 ret = F_SETPIPE_SZ;
6809 break;
6810 case TARGET_F_GETPIPE_SZ:
6811 ret = F_GETPIPE_SZ;
6812 break;
6813 #endif
6814 #ifdef F_ADD_SEALS
6815 case TARGET_F_ADD_SEALS:
6816 ret = F_ADD_SEALS;
6817 break;
6818 case TARGET_F_GET_SEALS:
6819 ret = F_GET_SEALS;
6820 break;
6821 #endif
6822 default:
6823 ret = -TARGET_EINVAL;
6824 break;
6827 #if defined(__powerpc64__)
6828 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6829 * is not supported by kernel. The glibc fcntl call actually adjusts
6830 * them to 5, 6 and 7 before making the syscall(). Since we make the
6831 * syscall directly, adjust to what is supported by the kernel.
6833 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6834 ret -= F_GETLK64 - 5;
6836 #endif
6838 return ret;
6841 #define FLOCK_TRANSTBL \
6842 switch (type) { \
6843 TRANSTBL_CONVERT(F_RDLCK); \
6844 TRANSTBL_CONVERT(F_WRLCK); \
6845 TRANSTBL_CONVERT(F_UNLCK); \
6848 static int target_to_host_flock(int type)
6850 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6851 FLOCK_TRANSTBL
6852 #undef TRANSTBL_CONVERT
6853 return -TARGET_EINVAL;
6856 static int host_to_target_flock(int type)
6858 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6859 FLOCK_TRANSTBL
6860 #undef TRANSTBL_CONVERT
6861 /* if we don't know how to convert the value coming
6862 * from the host we copy to the target field as-is
6864 return type;
6867 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6868 abi_ulong target_flock_addr)
6870 struct target_flock *target_fl;
6871 int l_type;
6873 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6874 return -TARGET_EFAULT;
6877 __get_user(l_type, &target_fl->l_type);
6878 l_type = target_to_host_flock(l_type);
6879 if (l_type < 0) {
6880 return l_type;
6882 fl->l_type = l_type;
6883 __get_user(fl->l_whence, &target_fl->l_whence);
6884 __get_user(fl->l_start, &target_fl->l_start);
6885 __get_user(fl->l_len, &target_fl->l_len);
6886 __get_user(fl->l_pid, &target_fl->l_pid);
6887 unlock_user_struct(target_fl, target_flock_addr, 0);
6888 return 0;
6891 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6892 const struct flock64 *fl)
6894 struct target_flock *target_fl;
6895 short l_type;
6897 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6898 return -TARGET_EFAULT;
6901 l_type = host_to_target_flock(fl->l_type);
6902 __put_user(l_type, &target_fl->l_type);
6903 __put_user(fl->l_whence, &target_fl->l_whence);
6904 __put_user(fl->l_start, &target_fl->l_start);
6905 __put_user(fl->l_len, &target_fl->l_len);
6906 __put_user(fl->l_pid, &target_fl->l_pid);
6907 unlock_user_struct(target_fl, target_flock_addr, 1);
6908 return 0;
6911 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6912 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6914 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6915 struct target_oabi_flock64 {
6916 abi_short l_type;
6917 abi_short l_whence;
6918 abi_llong l_start;
6919 abi_llong l_len;
6920 abi_int l_pid;
6921 } QEMU_PACKED;
6923 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6924 abi_ulong target_flock_addr)
6926 struct target_oabi_flock64 *target_fl;
6927 int l_type;
6929 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6930 return -TARGET_EFAULT;
6933 __get_user(l_type, &target_fl->l_type);
6934 l_type = target_to_host_flock(l_type);
6935 if (l_type < 0) {
6936 return l_type;
6938 fl->l_type = l_type;
6939 __get_user(fl->l_whence, &target_fl->l_whence);
6940 __get_user(fl->l_start, &target_fl->l_start);
6941 __get_user(fl->l_len, &target_fl->l_len);
6942 __get_user(fl->l_pid, &target_fl->l_pid);
6943 unlock_user_struct(target_fl, target_flock_addr, 0);
6944 return 0;
6947 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6948 const struct flock64 *fl)
6950 struct target_oabi_flock64 *target_fl;
6951 short l_type;
6953 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6954 return -TARGET_EFAULT;
6957 l_type = host_to_target_flock(fl->l_type);
6958 __put_user(l_type, &target_fl->l_type);
6959 __put_user(fl->l_whence, &target_fl->l_whence);
6960 __put_user(fl->l_start, &target_fl->l_start);
6961 __put_user(fl->l_len, &target_fl->l_len);
6962 __put_user(fl->l_pid, &target_fl->l_pid);
6963 unlock_user_struct(target_fl, target_flock_addr, 1);
6964 return 0;
6966 #endif
6968 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6969 abi_ulong target_flock_addr)
6971 struct target_flock64 *target_fl;
6972 int l_type;
6974 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6975 return -TARGET_EFAULT;
6978 __get_user(l_type, &target_fl->l_type);
6979 l_type = target_to_host_flock(l_type);
6980 if (l_type < 0) {
6981 return l_type;
6983 fl->l_type = l_type;
6984 __get_user(fl->l_whence, &target_fl->l_whence);
6985 __get_user(fl->l_start, &target_fl->l_start);
6986 __get_user(fl->l_len, &target_fl->l_len);
6987 __get_user(fl->l_pid, &target_fl->l_pid);
6988 unlock_user_struct(target_fl, target_flock_addr, 0);
6989 return 0;
6992 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6993 const struct flock64 *fl)
6995 struct target_flock64 *target_fl;
6996 short l_type;
6998 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6999 return -TARGET_EFAULT;
7002 l_type = host_to_target_flock(fl->l_type);
7003 __put_user(l_type, &target_fl->l_type);
7004 __put_user(fl->l_whence, &target_fl->l_whence);
7005 __put_user(fl->l_start, &target_fl->l_start);
7006 __put_user(fl->l_len, &target_fl->l_len);
7007 __put_user(fl->l_pid, &target_fl->l_pid);
7008 unlock_user_struct(target_fl, target_flock_addr, 1);
7009 return 0;
7012 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7014 struct flock64 fl64;
7015 #ifdef F_GETOWN_EX
7016 struct f_owner_ex fox;
7017 struct target_f_owner_ex *target_fox;
7018 #endif
7019 abi_long ret;
7020 int host_cmd = target_to_host_fcntl_cmd(cmd);
7022 if (host_cmd == -TARGET_EINVAL)
7023 return host_cmd;
7025 switch(cmd) {
7026 case TARGET_F_GETLK:
7027 ret = copy_from_user_flock(&fl64, arg);
7028 if (ret) {
7029 return ret;
7031 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7032 if (ret == 0) {
7033 ret = copy_to_user_flock(arg, &fl64);
7035 break;
7037 case TARGET_F_SETLK:
7038 case TARGET_F_SETLKW:
7039 ret = copy_from_user_flock(&fl64, arg);
7040 if (ret) {
7041 return ret;
7043 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7044 break;
7046 case TARGET_F_GETLK64:
7047 case TARGET_F_OFD_GETLK:
7048 ret = copy_from_user_flock64(&fl64, arg);
7049 if (ret) {
7050 return ret;
7052 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7053 if (ret == 0) {
7054 ret = copy_to_user_flock64(arg, &fl64);
7056 break;
7057 case TARGET_F_SETLK64:
7058 case TARGET_F_SETLKW64:
7059 case TARGET_F_OFD_SETLK:
7060 case TARGET_F_OFD_SETLKW:
7061 ret = copy_from_user_flock64(&fl64, arg);
7062 if (ret) {
7063 return ret;
7065 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7066 break;
7068 case TARGET_F_GETFL:
7069 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7070 if (ret >= 0) {
7071 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7073 break;
7075 case TARGET_F_SETFL:
7076 ret = get_errno(safe_fcntl(fd, host_cmd,
7077 target_to_host_bitmask(arg,
7078 fcntl_flags_tbl)));
7079 break;
7081 #ifdef F_GETOWN_EX
7082 case TARGET_F_GETOWN_EX:
7083 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7084 if (ret >= 0) {
7085 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7086 return -TARGET_EFAULT;
7087 target_fox->type = tswap32(fox.type);
7088 target_fox->pid = tswap32(fox.pid);
7089 unlock_user_struct(target_fox, arg, 1);
7091 break;
7092 #endif
7094 #ifdef F_SETOWN_EX
7095 case TARGET_F_SETOWN_EX:
7096 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7097 return -TARGET_EFAULT;
7098 fox.type = tswap32(target_fox->type);
7099 fox.pid = tswap32(target_fox->pid);
7100 unlock_user_struct(target_fox, arg, 0);
7101 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7102 break;
7103 #endif
7105 case TARGET_F_SETSIG:
7106 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7107 break;
7109 case TARGET_F_GETSIG:
7110 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7111 break;
7113 case TARGET_F_SETOWN:
7114 case TARGET_F_GETOWN:
7115 case TARGET_F_SETLEASE:
7116 case TARGET_F_GETLEASE:
7117 case TARGET_F_SETPIPE_SZ:
7118 case TARGET_F_GETPIPE_SZ:
7119 case TARGET_F_ADD_SEALS:
7120 case TARGET_F_GET_SEALS:
7121 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7122 break;
7124 default:
7125 ret = get_errno(safe_fcntl(fd, cmd, arg));
7126 break;
7128 return ret;
7131 #ifdef USE_UID16
7133 static inline int high2lowuid(int uid)
7135 if (uid > 65535)
7136 return 65534;
7137 else
7138 return uid;
7141 static inline int high2lowgid(int gid)
7143 if (gid > 65535)
7144 return 65534;
7145 else
7146 return gid;
7149 static inline int low2highuid(int uid)
7151 if ((int16_t)uid == -1)
7152 return -1;
7153 else
7154 return uid;
7157 static inline int low2highgid(int gid)
7159 if ((int16_t)gid == -1)
7160 return -1;
7161 else
7162 return gid;
7164 static inline int tswapid(int id)
7166 return tswap16(id);
7169 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7171 #else /* !USE_UID16 */
7172 static inline int high2lowuid(int uid)
7174 return uid;
7176 static inline int high2lowgid(int gid)
7178 return gid;
7180 static inline int low2highuid(int uid)
7182 return uid;
7184 static inline int low2highgid(int gid)
7186 return gid;
7188 static inline int tswapid(int id)
7190 return tswap32(id);
7193 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7195 #endif /* USE_UID16 */
7197 /* We must do direct syscalls for setting UID/GID, because we want to
7198 * implement the Linux system call semantics of "change only for this thread",
7199 * not the libc/POSIX semantics of "change for all threads in process".
7200 * (See http://ewontfix.com/17/ for more details.)
7201 * We use the 32-bit version of the syscalls if present; if it is not
7202 * then either the host architecture supports 32-bit UIDs natively with
7203 * the standard syscall, or the 16-bit UID is the best we can do.
7205 #ifdef __NR_setuid32
7206 #define __NR_sys_setuid __NR_setuid32
7207 #else
7208 #define __NR_sys_setuid __NR_setuid
7209 #endif
7210 #ifdef __NR_setgid32
7211 #define __NR_sys_setgid __NR_setgid32
7212 #else
7213 #define __NR_sys_setgid __NR_setgid
7214 #endif
7215 #ifdef __NR_setresuid32
7216 #define __NR_sys_setresuid __NR_setresuid32
7217 #else
7218 #define __NR_sys_setresuid __NR_setresuid
7219 #endif
7220 #ifdef __NR_setresgid32
7221 #define __NR_sys_setresgid __NR_setresgid32
7222 #else
7223 #define __NR_sys_setresgid __NR_setresgid
7224 #endif
7226 _syscall1(int, sys_setuid, uid_t, uid)
7227 _syscall1(int, sys_setgid, gid_t, gid)
7228 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7229 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7231 void syscall_init(void)
7233 IOCTLEntry *ie;
7234 const argtype *arg_type;
7235 int size;
7237 thunk_init(STRUCT_MAX);
7239 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7240 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7241 #include "syscall_types.h"
7242 #undef STRUCT
7243 #undef STRUCT_SPECIAL
7245 /* we patch the ioctl size if necessary. We rely on the fact that
7246 no ioctl has all the bits at '1' in the size field */
7247 ie = ioctl_entries;
7248 while (ie->target_cmd != 0) {
7249 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7250 TARGET_IOC_SIZEMASK) {
7251 arg_type = ie->arg_type;
7252 if (arg_type[0] != TYPE_PTR) {
7253 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7254 ie->target_cmd);
7255 exit(1);
7257 arg_type++;
7258 size = thunk_type_size(arg_type, 0);
7259 ie->target_cmd = (ie->target_cmd &
7260 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7261 (size << TARGET_IOC_SIZESHIFT);
7264 /* automatic consistency check if same arch */
7265 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7266 (defined(__x86_64__) && defined(TARGET_X86_64))
7267 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7268 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7269 ie->name, ie->target_cmd, ie->host_cmd);
7271 #endif
7272 ie++;
7276 #ifdef TARGET_NR_truncate64
7277 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7278 abi_long arg2,
7279 abi_long arg3,
7280 abi_long arg4)
7282 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7283 arg2 = arg3;
7284 arg3 = arg4;
7286 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7288 #endif
7290 #ifdef TARGET_NR_ftruncate64
7291 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7292 abi_long arg2,
7293 abi_long arg3,
7294 abi_long arg4)
7296 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7297 arg2 = arg3;
7298 arg3 = arg4;
7300 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7302 #endif
7304 #if defined(TARGET_NR_timer_settime) || \
7305 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7306 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7307 abi_ulong target_addr)
7309 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7310 offsetof(struct target_itimerspec,
7311 it_interval)) ||
7312 target_to_host_timespec(&host_its->it_value, target_addr +
7313 offsetof(struct target_itimerspec,
7314 it_value))) {
7315 return -TARGET_EFAULT;
7318 return 0;
7320 #endif
7322 #if defined(TARGET_NR_timer_settime64) || \
7323 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7324 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7325 abi_ulong target_addr)
7327 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7328 offsetof(struct target__kernel_itimerspec,
7329 it_interval)) ||
7330 target_to_host_timespec64(&host_its->it_value, target_addr +
7331 offsetof(struct target__kernel_itimerspec,
7332 it_value))) {
7333 return -TARGET_EFAULT;
7336 return 0;
7338 #endif
7340 #if ((defined(TARGET_NR_timerfd_gettime) || \
7341 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7342 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7343 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7344 struct itimerspec *host_its)
7346 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7347 it_interval),
7348 &host_its->it_interval) ||
7349 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7350 it_value),
7351 &host_its->it_value)) {
7352 return -TARGET_EFAULT;
7354 return 0;
7356 #endif
7358 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7359 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7360 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7361 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7362 struct itimerspec *host_its)
7364 if (host_to_target_timespec64(target_addr +
7365 offsetof(struct target__kernel_itimerspec,
7366 it_interval),
7367 &host_its->it_interval) ||
7368 host_to_target_timespec64(target_addr +
7369 offsetof(struct target__kernel_itimerspec,
7370 it_value),
7371 &host_its->it_value)) {
7372 return -TARGET_EFAULT;
7374 return 0;
7376 #endif
7378 #if defined(TARGET_NR_adjtimex) || \
7379 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7380 static inline abi_long target_to_host_timex(struct timex *host_tx,
7381 abi_long target_addr)
7383 struct target_timex *target_tx;
7385 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7386 return -TARGET_EFAULT;
7389 __get_user(host_tx->modes, &target_tx->modes);
7390 __get_user(host_tx->offset, &target_tx->offset);
7391 __get_user(host_tx->freq, &target_tx->freq);
7392 __get_user(host_tx->maxerror, &target_tx->maxerror);
7393 __get_user(host_tx->esterror, &target_tx->esterror);
7394 __get_user(host_tx->status, &target_tx->status);
7395 __get_user(host_tx->constant, &target_tx->constant);
7396 __get_user(host_tx->precision, &target_tx->precision);
7397 __get_user(host_tx->tolerance, &target_tx->tolerance);
7398 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7399 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7400 __get_user(host_tx->tick, &target_tx->tick);
7401 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7402 __get_user(host_tx->jitter, &target_tx->jitter);
7403 __get_user(host_tx->shift, &target_tx->shift);
7404 __get_user(host_tx->stabil, &target_tx->stabil);
7405 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7406 __get_user(host_tx->calcnt, &target_tx->calcnt);
7407 __get_user(host_tx->errcnt, &target_tx->errcnt);
7408 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7409 __get_user(host_tx->tai, &target_tx->tai);
7411 unlock_user_struct(target_tx, target_addr, 0);
7412 return 0;
7415 static inline abi_long host_to_target_timex(abi_long target_addr,
7416 struct timex *host_tx)
7418 struct target_timex *target_tx;
7420 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7421 return -TARGET_EFAULT;
7424 __put_user(host_tx->modes, &target_tx->modes);
7425 __put_user(host_tx->offset, &target_tx->offset);
7426 __put_user(host_tx->freq, &target_tx->freq);
7427 __put_user(host_tx->maxerror, &target_tx->maxerror);
7428 __put_user(host_tx->esterror, &target_tx->esterror);
7429 __put_user(host_tx->status, &target_tx->status);
7430 __put_user(host_tx->constant, &target_tx->constant);
7431 __put_user(host_tx->precision, &target_tx->precision);
7432 __put_user(host_tx->tolerance, &target_tx->tolerance);
7433 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7434 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7435 __put_user(host_tx->tick, &target_tx->tick);
7436 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7437 __put_user(host_tx->jitter, &target_tx->jitter);
7438 __put_user(host_tx->shift, &target_tx->shift);
7439 __put_user(host_tx->stabil, &target_tx->stabil);
7440 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7441 __put_user(host_tx->calcnt, &target_tx->calcnt);
7442 __put_user(host_tx->errcnt, &target_tx->errcnt);
7443 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7444 __put_user(host_tx->tai, &target_tx->tai);
7446 unlock_user_struct(target_tx, target_addr, 1);
7447 return 0;
7449 #endif
7452 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7453 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7454 abi_long target_addr)
7456 struct target__kernel_timex *target_tx;
7458 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7459 offsetof(struct target__kernel_timex,
7460 time))) {
7461 return -TARGET_EFAULT;
7464 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7465 return -TARGET_EFAULT;
7468 __get_user(host_tx->modes, &target_tx->modes);
7469 __get_user(host_tx->offset, &target_tx->offset);
7470 __get_user(host_tx->freq, &target_tx->freq);
7471 __get_user(host_tx->maxerror, &target_tx->maxerror);
7472 __get_user(host_tx->esterror, &target_tx->esterror);
7473 __get_user(host_tx->status, &target_tx->status);
7474 __get_user(host_tx->constant, &target_tx->constant);
7475 __get_user(host_tx->precision, &target_tx->precision);
7476 __get_user(host_tx->tolerance, &target_tx->tolerance);
7477 __get_user(host_tx->tick, &target_tx->tick);
7478 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7479 __get_user(host_tx->jitter, &target_tx->jitter);
7480 __get_user(host_tx->shift, &target_tx->shift);
7481 __get_user(host_tx->stabil, &target_tx->stabil);
7482 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7483 __get_user(host_tx->calcnt, &target_tx->calcnt);
7484 __get_user(host_tx->errcnt, &target_tx->errcnt);
7485 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7486 __get_user(host_tx->tai, &target_tx->tai);
7488 unlock_user_struct(target_tx, target_addr, 0);
7489 return 0;
7492 static inline abi_long host_to_target_timex64(abi_long target_addr,
7493 struct timex *host_tx)
7495 struct target__kernel_timex *target_tx;
7497 if (copy_to_user_timeval64(target_addr +
7498 offsetof(struct target__kernel_timex, time),
7499 &host_tx->time)) {
7500 return -TARGET_EFAULT;
7503 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7504 return -TARGET_EFAULT;
7507 __put_user(host_tx->modes, &target_tx->modes);
7508 __put_user(host_tx->offset, &target_tx->offset);
7509 __put_user(host_tx->freq, &target_tx->freq);
7510 __put_user(host_tx->maxerror, &target_tx->maxerror);
7511 __put_user(host_tx->esterror, &target_tx->esterror);
7512 __put_user(host_tx->status, &target_tx->status);
7513 __put_user(host_tx->constant, &target_tx->constant);
7514 __put_user(host_tx->precision, &target_tx->precision);
7515 __put_user(host_tx->tolerance, &target_tx->tolerance);
7516 __put_user(host_tx->tick, &target_tx->tick);
7517 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7518 __put_user(host_tx->jitter, &target_tx->jitter);
7519 __put_user(host_tx->shift, &target_tx->shift);
7520 __put_user(host_tx->stabil, &target_tx->stabil);
7521 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7522 __put_user(host_tx->calcnt, &target_tx->calcnt);
7523 __put_user(host_tx->errcnt, &target_tx->errcnt);
7524 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7525 __put_user(host_tx->tai, &target_tx->tai);
7527 unlock_user_struct(target_tx, target_addr, 1);
7528 return 0;
7530 #endif
7532 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7533 #define sigev_notify_thread_id _sigev_un._tid
7534 #endif
7536 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7537 abi_ulong target_addr)
7539 struct target_sigevent *target_sevp;
7541 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7542 return -TARGET_EFAULT;
7545 /* This union is awkward on 64 bit systems because it has a 32 bit
7546 * integer and a pointer in it; we follow the conversion approach
7547 * used for handling sigval types in signal.c so the guest should get
7548 * the correct value back even if we did a 64 bit byteswap and it's
7549 * using the 32 bit integer.
7551 host_sevp->sigev_value.sival_ptr =
7552 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7553 host_sevp->sigev_signo =
7554 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7555 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7556 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7558 unlock_user_struct(target_sevp, target_addr, 1);
7559 return 0;
7562 #if defined(TARGET_NR_mlockall)
7563 static inline int target_to_host_mlockall_arg(int arg)
7565 int result = 0;
7567 if (arg & TARGET_MCL_CURRENT) {
7568 result |= MCL_CURRENT;
7570 if (arg & TARGET_MCL_FUTURE) {
7571 result |= MCL_FUTURE;
7573 #ifdef MCL_ONFAULT
7574 if (arg & TARGET_MCL_ONFAULT) {
7575 result |= MCL_ONFAULT;
7577 #endif
7579 return result;
7581 #endif
7583 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7584 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7585 defined(TARGET_NR_newfstatat))
7586 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7587 abi_ulong target_addr,
7588 struct stat *host_st)
7590 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7591 if (cpu_env->eabi) {
7592 struct target_eabi_stat64 *target_st;
7594 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7595 return -TARGET_EFAULT;
7596 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7597 __put_user(host_st->st_dev, &target_st->st_dev);
7598 __put_user(host_st->st_ino, &target_st->st_ino);
7599 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7600 __put_user(host_st->st_ino, &target_st->__st_ino);
7601 #endif
7602 __put_user(host_st->st_mode, &target_st->st_mode);
7603 __put_user(host_st->st_nlink, &target_st->st_nlink);
7604 __put_user(host_st->st_uid, &target_st->st_uid);
7605 __put_user(host_st->st_gid, &target_st->st_gid);
7606 __put_user(host_st->st_rdev, &target_st->st_rdev);
7607 __put_user(host_st->st_size, &target_st->st_size);
7608 __put_user(host_st->st_blksize, &target_st->st_blksize);
7609 __put_user(host_st->st_blocks, &target_st->st_blocks);
7610 __put_user(host_st->st_atime, &target_st->target_st_atime);
7611 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7612 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7613 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7614 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7615 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7616 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7617 #endif
7618 unlock_user_struct(target_st, target_addr, 1);
7619 } else
7620 #endif
7622 #if defined(TARGET_HAS_STRUCT_STAT64)
7623 struct target_stat64 *target_st;
7624 #else
7625 struct target_stat *target_st;
7626 #endif
7628 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7629 return -TARGET_EFAULT;
7630 memset(target_st, 0, sizeof(*target_st));
7631 __put_user(host_st->st_dev, &target_st->st_dev);
7632 __put_user(host_st->st_ino, &target_st->st_ino);
7633 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7634 __put_user(host_st->st_ino, &target_st->__st_ino);
7635 #endif
7636 __put_user(host_st->st_mode, &target_st->st_mode);
7637 __put_user(host_st->st_nlink, &target_st->st_nlink);
7638 __put_user(host_st->st_uid, &target_st->st_uid);
7639 __put_user(host_st->st_gid, &target_st->st_gid);
7640 __put_user(host_st->st_rdev, &target_st->st_rdev);
7641 /* XXX: better use of kernel struct */
7642 __put_user(host_st->st_size, &target_st->st_size);
7643 __put_user(host_st->st_blksize, &target_st->st_blksize);
7644 __put_user(host_st->st_blocks, &target_st->st_blocks);
7645 __put_user(host_st->st_atime, &target_st->target_st_atime);
7646 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7647 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7648 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7649 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7650 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7651 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7652 #endif
7653 unlock_user_struct(target_st, target_addr, 1);
7656 return 0;
7658 #endif
7660 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7661 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7662 abi_ulong target_addr)
7664 struct target_statx *target_stx;
7666 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7667 return -TARGET_EFAULT;
7669 memset(target_stx, 0, sizeof(*target_stx));
7671 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7672 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7673 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7674 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7675 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7676 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7677 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7678 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7679 __put_user(host_stx->stx_size, &target_stx->stx_size);
7680 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7681 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7682 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7683 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7684 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7685 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7686 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7687 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7688 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7689 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7690 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7691 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7692 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7693 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7695 unlock_user_struct(target_stx, target_addr, 1);
7697 return 0;
7699 #endif
7701 static int do_sys_futex(int *uaddr, int op, int val,
7702 const struct timespec *timeout, int *uaddr2,
7703 int val3)
7705 #if HOST_LONG_BITS == 64
7706 #if defined(__NR_futex)
7707 /* always a 64-bit time_t, it doesn't define _time64 version */
7708 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7710 #endif
7711 #else /* HOST_LONG_BITS == 64 */
7712 #if defined(__NR_futex_time64)
7713 if (sizeof(timeout->tv_sec) == 8) {
7714 /* _time64 function on 32bit arch */
7715 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7717 #endif
7718 #if defined(__NR_futex)
7719 /* old function on 32bit arch */
7720 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7721 #endif
7722 #endif /* HOST_LONG_BITS == 64 */
7723 g_assert_not_reached();
7726 static int do_safe_futex(int *uaddr, int op, int val,
7727 const struct timespec *timeout, int *uaddr2,
7728 int val3)
7730 #if HOST_LONG_BITS == 64
7731 #if defined(__NR_futex)
7732 /* always a 64-bit time_t, it doesn't define _time64 version */
7733 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7734 #endif
7735 #else /* HOST_LONG_BITS == 64 */
7736 #if defined(__NR_futex_time64)
7737 if (sizeof(timeout->tv_sec) == 8) {
7738 /* _time64 function on 32bit arch */
7739 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7740 val3));
7742 #endif
7743 #if defined(__NR_futex)
7744 /* old function on 32bit arch */
7745 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7746 #endif
7747 #endif /* HOST_LONG_BITS == 64 */
7748 return -TARGET_ENOSYS;
7751 /* ??? Using host futex calls even when target atomic operations
7752 are not really atomic probably breaks things. However implementing
7753 futexes locally would make futexes shared between multiple processes
7754 tricky. However they're probably useless because guest atomic
7755 operations won't work either. */
7756 #if defined(TARGET_NR_futex)
7757 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7758 target_ulong timeout, target_ulong uaddr2, int val3)
7760 struct timespec ts, *pts;
7761 int base_op;
7763 /* ??? We assume FUTEX_* constants are the same on both host
7764 and target. */
7765 #ifdef FUTEX_CMD_MASK
7766 base_op = op & FUTEX_CMD_MASK;
7767 #else
7768 base_op = op;
7769 #endif
7770 switch (base_op) {
7771 case FUTEX_WAIT:
7772 case FUTEX_WAIT_BITSET:
7773 if (timeout) {
7774 pts = &ts;
7775 target_to_host_timespec(pts, timeout);
7776 } else {
7777 pts = NULL;
7779 return do_safe_futex(g2h(cpu, uaddr),
7780 op, tswap32(val), pts, NULL, val3);
7781 case FUTEX_WAKE:
7782 return do_safe_futex(g2h(cpu, uaddr),
7783 op, val, NULL, NULL, 0);
7784 case FUTEX_FD:
7785 return do_safe_futex(g2h(cpu, uaddr),
7786 op, val, NULL, NULL, 0);
7787 case FUTEX_REQUEUE:
7788 case FUTEX_CMP_REQUEUE:
7789 case FUTEX_WAKE_OP:
7790 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7791 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7792 But the prototype takes a `struct timespec *'; insert casts
7793 to satisfy the compiler. We do not need to tswap TIMEOUT
7794 since it's not compared to guest memory. */
7795 pts = (struct timespec *)(uintptr_t) timeout;
7796 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7797 (base_op == FUTEX_CMP_REQUEUE
7798 ? tswap32(val3) : val3));
7799 default:
7800 return -TARGET_ENOSYS;
7803 #endif
7805 #if defined(TARGET_NR_futex_time64)
7806 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7807 int val, target_ulong timeout,
7808 target_ulong uaddr2, int val3)
7810 struct timespec ts, *pts;
7811 int base_op;
7813 /* ??? We assume FUTEX_* constants are the same on both host
7814 and target. */
7815 #ifdef FUTEX_CMD_MASK
7816 base_op = op & FUTEX_CMD_MASK;
7817 #else
7818 base_op = op;
7819 #endif
7820 switch (base_op) {
7821 case FUTEX_WAIT:
7822 case FUTEX_WAIT_BITSET:
7823 if (timeout) {
7824 pts = &ts;
7825 if (target_to_host_timespec64(pts, timeout)) {
7826 return -TARGET_EFAULT;
7828 } else {
7829 pts = NULL;
7831 return do_safe_futex(g2h(cpu, uaddr), op,
7832 tswap32(val), pts, NULL, val3);
7833 case FUTEX_WAKE:
7834 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7835 case FUTEX_FD:
7836 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7837 case FUTEX_REQUEUE:
7838 case FUTEX_CMP_REQUEUE:
7839 case FUTEX_WAKE_OP:
7840 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7841 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7842 But the prototype takes a `struct timespec *'; insert casts
7843 to satisfy the compiler. We do not need to tswap TIMEOUT
7844 since it's not compared to guest memory. */
7845 pts = (struct timespec *)(uintptr_t) timeout;
7846 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7847 (base_op == FUTEX_CMP_REQUEUE
7848 ? tswap32(val3) : val3));
7849 default:
7850 return -TARGET_ENOSYS;
7853 #endif
7855 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7856 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7857 abi_long handle, abi_long mount_id,
7858 abi_long flags)
7860 struct file_handle *target_fh;
7861 struct file_handle *fh;
7862 int mid = 0;
7863 abi_long ret;
7864 char *name;
7865 unsigned int size, total_size;
7867 if (get_user_s32(size, handle)) {
7868 return -TARGET_EFAULT;
7871 name = lock_user_string(pathname);
7872 if (!name) {
7873 return -TARGET_EFAULT;
7876 total_size = sizeof(struct file_handle) + size;
7877 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7878 if (!target_fh) {
7879 unlock_user(name, pathname, 0);
7880 return -TARGET_EFAULT;
7883 fh = g_malloc0(total_size);
7884 fh->handle_bytes = size;
7886 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7887 unlock_user(name, pathname, 0);
7889 /* man name_to_handle_at(2):
7890 * Other than the use of the handle_bytes field, the caller should treat
7891 * the file_handle structure as an opaque data type
7894 memcpy(target_fh, fh, total_size);
7895 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7896 target_fh->handle_type = tswap32(fh->handle_type);
7897 g_free(fh);
7898 unlock_user(target_fh, handle, total_size);
7900 if (put_user_s32(mid, mount_id)) {
7901 return -TARGET_EFAULT;
7904 return ret;
7907 #endif
7909 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7910 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7911 abi_long flags)
7913 struct file_handle *target_fh;
7914 struct file_handle *fh;
7915 unsigned int size, total_size;
7916 abi_long ret;
7918 if (get_user_s32(size, handle)) {
7919 return -TARGET_EFAULT;
7922 total_size = sizeof(struct file_handle) + size;
7923 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7924 if (!target_fh) {
7925 return -TARGET_EFAULT;
7928 fh = g_memdup(target_fh, total_size);
7929 fh->handle_bytes = size;
7930 fh->handle_type = tswap32(target_fh->handle_type);
7932 ret = get_errno(open_by_handle_at(mount_fd, fh,
7933 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7935 g_free(fh);
7937 unlock_user(target_fh, handle, total_size);
7939 return ret;
7941 #endif
7943 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7945 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7947 int host_flags;
7948 target_sigset_t *target_mask;
7949 sigset_t host_mask;
7950 abi_long ret;
7952 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7953 return -TARGET_EINVAL;
7955 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7956 return -TARGET_EFAULT;
7959 target_to_host_sigset(&host_mask, target_mask);
7961 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7963 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7964 if (ret >= 0) {
7965 fd_trans_register(ret, &target_signalfd_trans);
7968 unlock_user_struct(target_mask, mask, 0);
7970 return ret;
7972 #endif
7974 /* Map host to target signal numbers for the wait family of syscalls.
7975 Assume all other status bits are the same. */
7976 int host_to_target_waitstatus(int status)
7978 if (WIFSIGNALED(status)) {
7979 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7981 if (WIFSTOPPED(status)) {
7982 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7983 | (status & 0xff);
7985 return status;
7988 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7990 CPUState *cpu = env_cpu(cpu_env);
7991 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7992 int i;
7994 for (i = 0; i < bprm->argc; i++) {
7995 size_t len = strlen(bprm->argv[i]) + 1;
7997 if (write(fd, bprm->argv[i], len) != len) {
7998 return -1;
8002 return 0;
8005 static int open_self_maps(CPUArchState *cpu_env, int fd)
8007 CPUState *cpu = env_cpu(cpu_env);
8008 TaskState *ts = cpu->opaque;
8009 GSList *map_info = read_self_maps();
8010 GSList *s;
8011 int count;
8013 for (s = map_info; s; s = g_slist_next(s)) {
8014 MapInfo *e = (MapInfo *) s->data;
8016 if (h2g_valid(e->start)) {
8017 unsigned long min = e->start;
8018 unsigned long max = e->end;
8019 int flags = page_get_flags(h2g(min));
8020 const char *path;
8022 max = h2g_valid(max - 1) ?
8023 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8025 if (page_check_range(h2g(min), max - min, flags) == -1) {
8026 continue;
8029 if (h2g(min) == ts->info->stack_limit) {
8030 path = "[stack]";
8031 } else {
8032 path = e->path;
8035 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8036 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8037 h2g(min), h2g(max - 1) + 1,
8038 (flags & PAGE_READ) ? 'r' : '-',
8039 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8040 (flags & PAGE_EXEC) ? 'x' : '-',
8041 e->is_priv ? 'p' : 's',
8042 (uint64_t) e->offset, e->dev, e->inode);
8043 if (path) {
8044 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8045 } else {
8046 dprintf(fd, "\n");
8051 free_self_maps(map_info);
8053 #ifdef TARGET_VSYSCALL_PAGE
8055 * We only support execution from the vsyscall page.
8056 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8058 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8059 " --xp 00000000 00:00 0",
8060 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8061 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8062 #endif
8064 return 0;
8067 static int open_self_stat(CPUArchState *cpu_env, int fd)
8069 CPUState *cpu = env_cpu(cpu_env);
8070 TaskState *ts = cpu->opaque;
8071 g_autoptr(GString) buf = g_string_new(NULL);
8072 int i;
8074 for (i = 0; i < 44; i++) {
8075 if (i == 0) {
8076 /* pid */
8077 g_string_printf(buf, FMT_pid " ", getpid());
8078 } else if (i == 1) {
8079 /* app name */
8080 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8081 bin = bin ? bin + 1 : ts->bprm->argv[0];
8082 g_string_printf(buf, "(%.15s) ", bin);
8083 } else if (i == 3) {
8084 /* ppid */
8085 g_string_printf(buf, FMT_pid " ", getppid());
8086 } else if (i == 21) {
8087 /* starttime */
8088 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8089 } else if (i == 27) {
8090 /* stack bottom */
8091 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8092 } else {
8093 /* for the rest, there is MasterCard */
8094 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8097 if (write(fd, buf->str, buf->len) != buf->len) {
8098 return -1;
8102 return 0;
8105 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8107 CPUState *cpu = env_cpu(cpu_env);
8108 TaskState *ts = cpu->opaque;
8109 abi_ulong auxv = ts->info->saved_auxv;
8110 abi_ulong len = ts->info->auxv_len;
8111 char *ptr;
8114 * Auxiliary vector is stored in target process stack.
8115 * read in whole auxv vector and copy it to file
8117 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8118 if (ptr != NULL) {
8119 while (len > 0) {
8120 ssize_t r;
8121 r = write(fd, ptr, len);
8122 if (r <= 0) {
8123 break;
8125 len -= r;
8126 ptr += r;
8128 lseek(fd, 0, SEEK_SET);
8129 unlock_user(ptr, auxv, len);
8132 return 0;
8135 static int is_proc_myself(const char *filename, const char *entry)
8137 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8138 filename += strlen("/proc/");
8139 if (!strncmp(filename, "self/", strlen("self/"))) {
8140 filename += strlen("self/");
8141 } else if (*filename >= '1' && *filename <= '9') {
8142 char myself[80];
8143 snprintf(myself, sizeof(myself), "%d/", getpid());
8144 if (!strncmp(filename, myself, strlen(myself))) {
8145 filename += strlen(myself);
8146 } else {
8147 return 0;
8149 } else {
8150 return 0;
8152 if (!strcmp(filename, entry)) {
8153 return 1;
8156 return 0;
8159 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8160 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8161 static int is_proc(const char *filename, const char *entry)
8163 return strcmp(filename, entry) == 0;
8165 #endif
8167 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8168 static int open_net_route(CPUArchState *cpu_env, int fd)
8170 FILE *fp;
8171 char *line = NULL;
8172 size_t len = 0;
8173 ssize_t read;
8175 fp = fopen("/proc/net/route", "r");
8176 if (fp == NULL) {
8177 return -1;
8180 /* read header */
8182 read = getline(&line, &len, fp);
8183 dprintf(fd, "%s", line);
8185 /* read routes */
8187 while ((read = getline(&line, &len, fp)) != -1) {
8188 char iface[16];
8189 uint32_t dest, gw, mask;
8190 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8191 int fields;
8193 fields = sscanf(line,
8194 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8195 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8196 &mask, &mtu, &window, &irtt);
8197 if (fields != 11) {
8198 continue;
8200 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8201 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8202 metric, tswap32(mask), mtu, window, irtt);
8205 free(line);
8206 fclose(fp);
8208 return 0;
8210 #endif
8212 #if defined(TARGET_SPARC)
8213 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8215 dprintf(fd, "type\t\t: sun4u\n");
8216 return 0;
8218 #endif
8220 #if defined(TARGET_HPPA)
8221 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8223 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8224 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8225 dprintf(fd, "capabilities\t: os32\n");
8226 dprintf(fd, "model\t\t: 9000/778/B160L\n");
8227 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8228 return 0;
8230 #endif
8232 #if defined(TARGET_M68K)
8233 static int open_hardware(CPUArchState *cpu_env, int fd)
8235 dprintf(fd, "Model:\t\tqemu-m68k\n");
8236 return 0;
8238 #endif
8240 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8242 struct fake_open {
8243 const char *filename;
8244 int (*fill)(CPUArchState *cpu_env, int fd);
8245 int (*cmp)(const char *s1, const char *s2);
8247 const struct fake_open *fake_open;
8248 static const struct fake_open fakes[] = {
8249 { "maps", open_self_maps, is_proc_myself },
8250 { "stat", open_self_stat, is_proc_myself },
8251 { "auxv", open_self_auxv, is_proc_myself },
8252 { "cmdline", open_self_cmdline, is_proc_myself },
8253 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8254 { "/proc/net/route", open_net_route, is_proc },
8255 #endif
8256 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8257 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8258 #endif
8259 #if defined(TARGET_M68K)
8260 { "/proc/hardware", open_hardware, is_proc },
8261 #endif
8262 { NULL, NULL, NULL }
8265 if (is_proc_myself(pathname, "exe")) {
8266 int execfd = qemu_getauxval(AT_EXECFD);
8267 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8270 for (fake_open = fakes; fake_open->filename; fake_open++) {
8271 if (fake_open->cmp(pathname, fake_open->filename)) {
8272 break;
8276 if (fake_open->filename) {
8277 const char *tmpdir;
8278 char filename[PATH_MAX];
8279 int fd, r;
8281 fd = memfd_create("qemu-open", 0);
8282 if (fd < 0) {
8283 if (errno != ENOSYS) {
8284 return fd;
8286 /* create temporary file to map stat to */
8287 tmpdir = getenv("TMPDIR");
8288 if (!tmpdir)
8289 tmpdir = "/tmp";
8290 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8291 fd = mkstemp(filename);
8292 if (fd < 0) {
8293 return fd;
8295 unlink(filename);
8298 if ((r = fake_open->fill(cpu_env, fd))) {
8299 int e = errno;
8300 close(fd);
8301 errno = e;
8302 return r;
8304 lseek(fd, 0, SEEK_SET);
8306 return fd;
8309 return safe_openat(dirfd, path(pathname), flags, mode);
8312 #define TIMER_MAGIC 0x0caf0000
8313 #define TIMER_MAGIC_MASK 0xffff0000
8315 /* Convert QEMU provided timer ID back to internal 16bit index format */
8316 static target_timer_t get_timer_id(abi_long arg)
8318 target_timer_t timerid = arg;
8320 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8321 return -TARGET_EINVAL;
8324 timerid &= 0xffff;
8326 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8327 return -TARGET_EINVAL;
8330 return timerid;
8333 static int target_to_host_cpu_mask(unsigned long *host_mask,
8334 size_t host_size,
8335 abi_ulong target_addr,
8336 size_t target_size)
8338 unsigned target_bits = sizeof(abi_ulong) * 8;
8339 unsigned host_bits = sizeof(*host_mask) * 8;
8340 abi_ulong *target_mask;
8341 unsigned i, j;
8343 assert(host_size >= target_size);
8345 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8346 if (!target_mask) {
8347 return -TARGET_EFAULT;
8349 memset(host_mask, 0, host_size);
8351 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8352 unsigned bit = i * target_bits;
8353 abi_ulong val;
8355 __get_user(val, &target_mask[i]);
8356 for (j = 0; j < target_bits; j++, bit++) {
8357 if (val & (1UL << j)) {
8358 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8363 unlock_user(target_mask, target_addr, 0);
8364 return 0;
8367 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8368 size_t host_size,
8369 abi_ulong target_addr,
8370 size_t target_size)
8372 unsigned target_bits = sizeof(abi_ulong) * 8;
8373 unsigned host_bits = sizeof(*host_mask) * 8;
8374 abi_ulong *target_mask;
8375 unsigned i, j;
8377 assert(host_size >= target_size);
8379 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8380 if (!target_mask) {
8381 return -TARGET_EFAULT;
8384 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8385 unsigned bit = i * target_bits;
8386 abi_ulong val = 0;
8388 for (j = 0; j < target_bits; j++, bit++) {
8389 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8390 val |= 1UL << j;
8393 __put_user(val, &target_mask[i]);
8396 unlock_user(target_mask, target_addr, target_size);
8397 return 0;
8400 #ifdef TARGET_NR_getdents
8401 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8403 g_autofree void *hdirp = NULL;
8404 void *tdirp;
8405 int hlen, hoff, toff;
8406 int hreclen, treclen;
8407 off64_t prev_diroff = 0;
8409 hdirp = g_try_malloc(count);
8410 if (!hdirp) {
8411 return -TARGET_ENOMEM;
8414 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8415 hlen = sys_getdents(dirfd, hdirp, count);
8416 #else
8417 hlen = sys_getdents64(dirfd, hdirp, count);
8418 #endif
8420 hlen = get_errno(hlen);
8421 if (is_error(hlen)) {
8422 return hlen;
8425 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8426 if (!tdirp) {
8427 return -TARGET_EFAULT;
8430 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8431 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8432 struct linux_dirent *hde = hdirp + hoff;
8433 #else
8434 struct linux_dirent64 *hde = hdirp + hoff;
8435 #endif
8436 struct target_dirent *tde = tdirp + toff;
8437 int namelen;
8438 uint8_t type;
8440 namelen = strlen(hde->d_name);
8441 hreclen = hde->d_reclen;
8442 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8443 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8445 if (toff + treclen > count) {
8447 * If the host struct is smaller than the target struct, or
8448 * requires less alignment and thus packs into less space,
8449 * then the host can return more entries than we can pass
8450 * on to the guest.
8452 if (toff == 0) {
8453 toff = -TARGET_EINVAL; /* result buffer is too small */
8454 break;
8457 * Return what we have, resetting the file pointer to the
8458 * location of the first record not returned.
8460 lseek64(dirfd, prev_diroff, SEEK_SET);
8461 break;
8464 prev_diroff = hde->d_off;
8465 tde->d_ino = tswapal(hde->d_ino);
8466 tde->d_off = tswapal(hde->d_off);
8467 tde->d_reclen = tswap16(treclen);
8468 memcpy(tde->d_name, hde->d_name, namelen + 1);
8471 * The getdents type is in what was formerly a padding byte at the
8472 * end of the structure.
8474 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8475 type = *((uint8_t *)hde + hreclen - 1);
8476 #else
8477 type = hde->d_type;
8478 #endif
8479 *((uint8_t *)tde + treclen - 1) = type;
8482 unlock_user(tdirp, arg2, toff);
8483 return toff;
8485 #endif /* TARGET_NR_getdents */
8487 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8488 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8490 g_autofree void *hdirp = NULL;
8491 void *tdirp;
8492 int hlen, hoff, toff;
8493 int hreclen, treclen;
8494 off64_t prev_diroff = 0;
8496 hdirp = g_try_malloc(count);
8497 if (!hdirp) {
8498 return -TARGET_ENOMEM;
8501 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8502 if (is_error(hlen)) {
8503 return hlen;
8506 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8507 if (!tdirp) {
8508 return -TARGET_EFAULT;
8511 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8512 struct linux_dirent64 *hde = hdirp + hoff;
8513 struct target_dirent64 *tde = tdirp + toff;
8514 int namelen;
8516 namelen = strlen(hde->d_name) + 1;
8517 hreclen = hde->d_reclen;
8518 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8519 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8521 if (toff + treclen > count) {
8523 * If the host struct is smaller than the target struct, or
8524 * requires less alignment and thus packs into less space,
8525 * then the host can return more entries than we can pass
8526 * on to the guest.
8528 if (toff == 0) {
8529 toff = -TARGET_EINVAL; /* result buffer is too small */
8530 break;
8533 * Return what we have, resetting the file pointer to the
8534 * location of the first record not returned.
8536 lseek64(dirfd, prev_diroff, SEEK_SET);
8537 break;
8540 prev_diroff = hde->d_off;
8541 tde->d_ino = tswap64(hde->d_ino);
8542 tde->d_off = tswap64(hde->d_off);
8543 tde->d_reclen = tswap16(treclen);
8544 tde->d_type = hde->d_type;
8545 memcpy(tde->d_name, hde->d_name, namelen);
8548 unlock_user(tdirp, arg2, toff);
8549 return toff;
8551 #endif /* TARGET_NR_getdents64 */
8553 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8554 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8555 #endif
8557 /* This is an internal helper for do_syscall so that it is easier
8558 * to have a single return point, so that actions, such as logging
8559 * of syscall results, can be performed.
8560 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8562 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8563 abi_long arg2, abi_long arg3, abi_long arg4,
8564 abi_long arg5, abi_long arg6, abi_long arg7,
8565 abi_long arg8)
8567 CPUState *cpu = env_cpu(cpu_env);
8568 abi_long ret;
8569 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8570 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8571 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8572 || defined(TARGET_NR_statx)
8573 struct stat st;
8574 #endif
8575 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8576 || defined(TARGET_NR_fstatfs)
8577 struct statfs stfs;
8578 #endif
8579 void *p;
8581 switch(num) {
8582 case TARGET_NR_exit:
8583 /* In old applications this may be used to implement _exit(2).
8584 However in threaded applications it is used for thread termination,
8585 and _exit_group is used for application termination.
8586 Do thread termination if we have more then one thread. */
8588 if (block_signals()) {
8589 return -QEMU_ERESTARTSYS;
8592 pthread_mutex_lock(&clone_lock);
8594 if (CPU_NEXT(first_cpu)) {
8595 TaskState *ts = cpu->opaque;
8597 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8598 object_unref(OBJECT(cpu));
8600 * At this point the CPU should be unrealized and removed
8601 * from cpu lists. We can clean-up the rest of the thread
8602 * data without the lock held.
8605 pthread_mutex_unlock(&clone_lock);
8607 if (ts->child_tidptr) {
8608 put_user_u32(0, ts->child_tidptr);
8609 do_sys_futex(g2h(cpu, ts->child_tidptr),
8610 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8612 thread_cpu = NULL;
8613 g_free(ts);
8614 rcu_unregister_thread();
8615 pthread_exit(NULL);
8618 pthread_mutex_unlock(&clone_lock);
8619 preexit_cleanup(cpu_env, arg1);
8620 _exit(arg1);
8621 return 0; /* avoid warning */
8622 case TARGET_NR_read:
8623 if (arg2 == 0 && arg3 == 0) {
8624 return get_errno(safe_read(arg1, 0, 0));
8625 } else {
8626 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8627 return -TARGET_EFAULT;
8628 ret = get_errno(safe_read(arg1, p, arg3));
8629 if (ret >= 0 &&
8630 fd_trans_host_to_target_data(arg1)) {
8631 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8633 unlock_user(p, arg2, ret);
8635 return ret;
8636 case TARGET_NR_write:
8637 if (arg2 == 0 && arg3 == 0) {
8638 return get_errno(safe_write(arg1, 0, 0));
8640 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8641 return -TARGET_EFAULT;
8642 if (fd_trans_target_to_host_data(arg1)) {
8643 void *copy = g_malloc(arg3);
8644 memcpy(copy, p, arg3);
8645 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8646 if (ret >= 0) {
8647 ret = get_errno(safe_write(arg1, copy, ret));
8649 g_free(copy);
8650 } else {
8651 ret = get_errno(safe_write(arg1, p, arg3));
8653 unlock_user(p, arg2, 0);
8654 return ret;
8656 #ifdef TARGET_NR_open
8657 case TARGET_NR_open:
8658 if (!(p = lock_user_string(arg1)))
8659 return -TARGET_EFAULT;
8660 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8661 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8662 arg3));
8663 fd_trans_unregister(ret);
8664 unlock_user(p, arg1, 0);
8665 return ret;
8666 #endif
8667 case TARGET_NR_openat:
8668 if (!(p = lock_user_string(arg2)))
8669 return -TARGET_EFAULT;
8670 ret = get_errno(do_openat(cpu_env, arg1, p,
8671 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8672 arg4));
8673 fd_trans_unregister(ret);
8674 unlock_user(p, arg2, 0);
8675 return ret;
8676 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8677 case TARGET_NR_name_to_handle_at:
8678 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8679 return ret;
8680 #endif
8681 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8682 case TARGET_NR_open_by_handle_at:
8683 ret = do_open_by_handle_at(arg1, arg2, arg3);
8684 fd_trans_unregister(ret);
8685 return ret;
8686 #endif
8687 case TARGET_NR_close:
8688 fd_trans_unregister(arg1);
8689 return get_errno(close(arg1));
8691 case TARGET_NR_brk:
8692 return do_brk(arg1);
8693 #ifdef TARGET_NR_fork
8694 case TARGET_NR_fork:
8695 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8696 #endif
8697 #ifdef TARGET_NR_waitpid
8698 case TARGET_NR_waitpid:
8700 int status;
8701 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8702 if (!is_error(ret) && arg2 && ret
8703 && put_user_s32(host_to_target_waitstatus(status), arg2))
8704 return -TARGET_EFAULT;
8706 return ret;
8707 #endif
8708 #ifdef TARGET_NR_waitid
8709 case TARGET_NR_waitid:
8711 siginfo_t info;
8712 info.si_pid = 0;
8713 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8714 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8715 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8716 return -TARGET_EFAULT;
8717 host_to_target_siginfo(p, &info);
8718 unlock_user(p, arg3, sizeof(target_siginfo_t));
8721 return ret;
8722 #endif
8723 #ifdef TARGET_NR_creat /* not on alpha */
8724 case TARGET_NR_creat:
8725 if (!(p = lock_user_string(arg1)))
8726 return -TARGET_EFAULT;
8727 ret = get_errno(creat(p, arg2));
8728 fd_trans_unregister(ret);
8729 unlock_user(p, arg1, 0);
8730 return ret;
8731 #endif
8732 #ifdef TARGET_NR_link
8733 case TARGET_NR_link:
8735 void * p2;
8736 p = lock_user_string(arg1);
8737 p2 = lock_user_string(arg2);
8738 if (!p || !p2)
8739 ret = -TARGET_EFAULT;
8740 else
8741 ret = get_errno(link(p, p2));
8742 unlock_user(p2, arg2, 0);
8743 unlock_user(p, arg1, 0);
8745 return ret;
8746 #endif
8747 #if defined(TARGET_NR_linkat)
8748 case TARGET_NR_linkat:
8750 void * p2 = NULL;
8751 if (!arg2 || !arg4)
8752 return -TARGET_EFAULT;
8753 p = lock_user_string(arg2);
8754 p2 = lock_user_string(arg4);
8755 if (!p || !p2)
8756 ret = -TARGET_EFAULT;
8757 else
8758 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8759 unlock_user(p, arg2, 0);
8760 unlock_user(p2, arg4, 0);
8762 return ret;
8763 #endif
8764 #ifdef TARGET_NR_unlink
8765 case TARGET_NR_unlink:
8766 if (!(p = lock_user_string(arg1)))
8767 return -TARGET_EFAULT;
8768 ret = get_errno(unlink(p));
8769 unlock_user(p, arg1, 0);
8770 return ret;
8771 #endif
8772 #if defined(TARGET_NR_unlinkat)
8773 case TARGET_NR_unlinkat:
8774 if (!(p = lock_user_string(arg2)))
8775 return -TARGET_EFAULT;
8776 ret = get_errno(unlinkat(arg1, p, arg3));
8777 unlock_user(p, arg2, 0);
8778 return ret;
8779 #endif
8780 case TARGET_NR_execve:
8782 char **argp, **envp;
8783 int argc, envc;
8784 abi_ulong gp;
8785 abi_ulong guest_argp;
8786 abi_ulong guest_envp;
8787 abi_ulong addr;
8788 char **q;
8790 argc = 0;
8791 guest_argp = arg2;
8792 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8793 if (get_user_ual(addr, gp))
8794 return -TARGET_EFAULT;
8795 if (!addr)
8796 break;
8797 argc++;
8799 envc = 0;
8800 guest_envp = arg3;
8801 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8802 if (get_user_ual(addr, gp))
8803 return -TARGET_EFAULT;
8804 if (!addr)
8805 break;
8806 envc++;
8809 argp = g_new0(char *, argc + 1);
8810 envp = g_new0(char *, envc + 1);
8812 for (gp = guest_argp, q = argp; gp;
8813 gp += sizeof(abi_ulong), q++) {
8814 if (get_user_ual(addr, gp))
8815 goto execve_efault;
8816 if (!addr)
8817 break;
8818 if (!(*q = lock_user_string(addr)))
8819 goto execve_efault;
8821 *q = NULL;
8823 for (gp = guest_envp, q = envp; gp;
8824 gp += sizeof(abi_ulong), q++) {
8825 if (get_user_ual(addr, gp))
8826 goto execve_efault;
8827 if (!addr)
8828 break;
8829 if (!(*q = lock_user_string(addr)))
8830 goto execve_efault;
8832 *q = NULL;
8834 if (!(p = lock_user_string(arg1)))
8835 goto execve_efault;
8836 /* Although execve() is not an interruptible syscall it is
8837 * a special case where we must use the safe_syscall wrapper:
8838 * if we allow a signal to happen before we make the host
8839 * syscall then we will 'lose' it, because at the point of
8840 * execve the process leaves QEMU's control. So we use the
8841 * safe syscall wrapper to ensure that we either take the
8842 * signal as a guest signal, or else it does not happen
8843 * before the execve completes and makes it the other
8844 * program's problem.
8846 ret = get_errno(safe_execve(p, argp, envp));
8847 unlock_user(p, arg1, 0);
8849 goto execve_end;
8851 execve_efault:
8852 ret = -TARGET_EFAULT;
8854 execve_end:
8855 for (gp = guest_argp, q = argp; *q;
8856 gp += sizeof(abi_ulong), q++) {
8857 if (get_user_ual(addr, gp)
8858 || !addr)
8859 break;
8860 unlock_user(*q, addr, 0);
8862 for (gp = guest_envp, q = envp; *q;
8863 gp += sizeof(abi_ulong), q++) {
8864 if (get_user_ual(addr, gp)
8865 || !addr)
8866 break;
8867 unlock_user(*q, addr, 0);
8870 g_free(argp);
8871 g_free(envp);
8873 return ret;
8874 case TARGET_NR_chdir:
8875 if (!(p = lock_user_string(arg1)))
8876 return -TARGET_EFAULT;
8877 ret = get_errno(chdir(p));
8878 unlock_user(p, arg1, 0);
8879 return ret;
8880 #ifdef TARGET_NR_time
8881 case TARGET_NR_time:
8883 time_t host_time;
8884 ret = get_errno(time(&host_time));
8885 if (!is_error(ret)
8886 && arg1
8887 && put_user_sal(host_time, arg1))
8888 return -TARGET_EFAULT;
8890 return ret;
8891 #endif
8892 #ifdef TARGET_NR_mknod
8893 case TARGET_NR_mknod:
8894 if (!(p = lock_user_string(arg1)))
8895 return -TARGET_EFAULT;
8896 ret = get_errno(mknod(p, arg2, arg3));
8897 unlock_user(p, arg1, 0);
8898 return ret;
8899 #endif
8900 #if defined(TARGET_NR_mknodat)
8901 case TARGET_NR_mknodat:
8902 if (!(p = lock_user_string(arg2)))
8903 return -TARGET_EFAULT;
8904 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8905 unlock_user(p, arg2, 0);
8906 return ret;
8907 #endif
8908 #ifdef TARGET_NR_chmod
8909 case TARGET_NR_chmod:
8910 if (!(p = lock_user_string(arg1)))
8911 return -TARGET_EFAULT;
8912 ret = get_errno(chmod(p, arg2));
8913 unlock_user(p, arg1, 0);
8914 return ret;
8915 #endif
8916 #ifdef TARGET_NR_lseek
8917 case TARGET_NR_lseek:
8918 return get_errno(lseek(arg1, arg2, arg3));
8919 #endif
8920 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8921 /* Alpha specific */
8922 case TARGET_NR_getxpid:
8923 cpu_env->ir[IR_A4] = getppid();
8924 return get_errno(getpid());
8925 #endif
8926 #ifdef TARGET_NR_getpid
8927 case TARGET_NR_getpid:
8928 return get_errno(getpid());
8929 #endif
8930 case TARGET_NR_mount:
8932 /* need to look at the data field */
8933 void *p2, *p3;
8935 if (arg1) {
8936 p = lock_user_string(arg1);
8937 if (!p) {
8938 return -TARGET_EFAULT;
8940 } else {
8941 p = NULL;
8944 p2 = lock_user_string(arg2);
8945 if (!p2) {
8946 if (arg1) {
8947 unlock_user(p, arg1, 0);
8949 return -TARGET_EFAULT;
8952 if (arg3) {
8953 p3 = lock_user_string(arg3);
8954 if (!p3) {
8955 if (arg1) {
8956 unlock_user(p, arg1, 0);
8958 unlock_user(p2, arg2, 0);
8959 return -TARGET_EFAULT;
8961 } else {
8962 p3 = NULL;
8965 /* FIXME - arg5 should be locked, but it isn't clear how to
8966 * do that since it's not guaranteed to be a NULL-terminated
8967 * string.
8969 if (!arg5) {
8970 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8971 } else {
8972 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8974 ret = get_errno(ret);
8976 if (arg1) {
8977 unlock_user(p, arg1, 0);
8979 unlock_user(p2, arg2, 0);
8980 if (arg3) {
8981 unlock_user(p3, arg3, 0);
8984 return ret;
8985 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8986 #if defined(TARGET_NR_umount)
8987 case TARGET_NR_umount:
8988 #endif
8989 #if defined(TARGET_NR_oldumount)
8990 case TARGET_NR_oldumount:
8991 #endif
8992 if (!(p = lock_user_string(arg1)))
8993 return -TARGET_EFAULT;
8994 ret = get_errno(umount(p));
8995 unlock_user(p, arg1, 0);
8996 return ret;
8997 #endif
8998 #ifdef TARGET_NR_stime /* not on alpha */
8999 case TARGET_NR_stime:
9001 struct timespec ts;
9002 ts.tv_nsec = 0;
9003 if (get_user_sal(ts.tv_sec, arg1)) {
9004 return -TARGET_EFAULT;
9006 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9008 #endif
9009 #ifdef TARGET_NR_alarm /* not on alpha */
9010 case TARGET_NR_alarm:
9011 return alarm(arg1);
9012 #endif
9013 #ifdef TARGET_NR_pause /* not on alpha */
9014 case TARGET_NR_pause:
9015 if (!block_signals()) {
9016 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9018 return -TARGET_EINTR;
9019 #endif
9020 #ifdef TARGET_NR_utime
9021 case TARGET_NR_utime:
9023 struct utimbuf tbuf, *host_tbuf;
9024 struct target_utimbuf *target_tbuf;
9025 if (arg2) {
9026 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9027 return -TARGET_EFAULT;
9028 tbuf.actime = tswapal(target_tbuf->actime);
9029 tbuf.modtime = tswapal(target_tbuf->modtime);
9030 unlock_user_struct(target_tbuf, arg2, 0);
9031 host_tbuf = &tbuf;
9032 } else {
9033 host_tbuf = NULL;
9035 if (!(p = lock_user_string(arg1)))
9036 return -TARGET_EFAULT;
9037 ret = get_errno(utime(p, host_tbuf));
9038 unlock_user(p, arg1, 0);
9040 return ret;
9041 #endif
9042 #ifdef TARGET_NR_utimes
9043 case TARGET_NR_utimes:
9045 struct timeval *tvp, tv[2];
9046 if (arg2) {
9047 if (copy_from_user_timeval(&tv[0], arg2)
9048 || copy_from_user_timeval(&tv[1],
9049 arg2 + sizeof(struct target_timeval)))
9050 return -TARGET_EFAULT;
9051 tvp = tv;
9052 } else {
9053 tvp = NULL;
9055 if (!(p = lock_user_string(arg1)))
9056 return -TARGET_EFAULT;
9057 ret = get_errno(utimes(p, tvp));
9058 unlock_user(p, arg1, 0);
9060 return ret;
9061 #endif
9062 #if defined(TARGET_NR_futimesat)
9063 case TARGET_NR_futimesat:
9065 struct timeval *tvp, tv[2];
9066 if (arg3) {
9067 if (copy_from_user_timeval(&tv[0], arg3)
9068 || copy_from_user_timeval(&tv[1],
9069 arg3 + sizeof(struct target_timeval)))
9070 return -TARGET_EFAULT;
9071 tvp = tv;
9072 } else {
9073 tvp = NULL;
9075 if (!(p = lock_user_string(arg2))) {
9076 return -TARGET_EFAULT;
9078 ret = get_errno(futimesat(arg1, path(p), tvp));
9079 unlock_user(p, arg2, 0);
9081 return ret;
9082 #endif
9083 #ifdef TARGET_NR_access
9084 case TARGET_NR_access:
9085 if (!(p = lock_user_string(arg1))) {
9086 return -TARGET_EFAULT;
9088 ret = get_errno(access(path(p), arg2));
9089 unlock_user(p, arg1, 0);
9090 return ret;
9091 #endif
9092 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9093 case TARGET_NR_faccessat:
9094 if (!(p = lock_user_string(arg2))) {
9095 return -TARGET_EFAULT;
9097 ret = get_errno(faccessat(arg1, p, arg3, 0));
9098 unlock_user(p, arg2, 0);
9099 return ret;
9100 #endif
9101 #ifdef TARGET_NR_nice /* not on alpha */
9102 case TARGET_NR_nice:
9103 return get_errno(nice(arg1));
9104 #endif
9105 case TARGET_NR_sync:
9106 sync();
9107 return 0;
9108 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9109 case TARGET_NR_syncfs:
9110 return get_errno(syncfs(arg1));
9111 #endif
9112 case TARGET_NR_kill:
9113 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9114 #ifdef TARGET_NR_rename
9115 case TARGET_NR_rename:
9117 void *p2;
9118 p = lock_user_string(arg1);
9119 p2 = lock_user_string(arg2);
9120 if (!p || !p2)
9121 ret = -TARGET_EFAULT;
9122 else
9123 ret = get_errno(rename(p, p2));
9124 unlock_user(p2, arg2, 0);
9125 unlock_user(p, arg1, 0);
9127 return ret;
9128 #endif
9129 #if defined(TARGET_NR_renameat)
9130 case TARGET_NR_renameat:
9132 void *p2;
9133 p = lock_user_string(arg2);
9134 p2 = lock_user_string(arg4);
9135 if (!p || !p2)
9136 ret = -TARGET_EFAULT;
9137 else
9138 ret = get_errno(renameat(arg1, p, arg3, p2));
9139 unlock_user(p2, arg4, 0);
9140 unlock_user(p, arg2, 0);
9142 return ret;
9143 #endif
9144 #if defined(TARGET_NR_renameat2)
9145 case TARGET_NR_renameat2:
9147 void *p2;
9148 p = lock_user_string(arg2);
9149 p2 = lock_user_string(arg4);
9150 if (!p || !p2) {
9151 ret = -TARGET_EFAULT;
9152 } else {
9153 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9155 unlock_user(p2, arg4, 0);
9156 unlock_user(p, arg2, 0);
9158 return ret;
9159 #endif
9160 #ifdef TARGET_NR_mkdir
9161 case TARGET_NR_mkdir:
9162 if (!(p = lock_user_string(arg1)))
9163 return -TARGET_EFAULT;
9164 ret = get_errno(mkdir(p, arg2));
9165 unlock_user(p, arg1, 0);
9166 return ret;
9167 #endif
9168 #if defined(TARGET_NR_mkdirat)
9169 case TARGET_NR_mkdirat:
9170 if (!(p = lock_user_string(arg2)))
9171 return -TARGET_EFAULT;
9172 ret = get_errno(mkdirat(arg1, p, arg3));
9173 unlock_user(p, arg2, 0);
9174 return ret;
9175 #endif
9176 #ifdef TARGET_NR_rmdir
9177 case TARGET_NR_rmdir:
9178 if (!(p = lock_user_string(arg1)))
9179 return -TARGET_EFAULT;
9180 ret = get_errno(rmdir(p));
9181 unlock_user(p, arg1, 0);
9182 return ret;
9183 #endif
9184 case TARGET_NR_dup:
9185 ret = get_errno(dup(arg1));
9186 if (ret >= 0) {
9187 fd_trans_dup(arg1, ret);
9189 return ret;
9190 #ifdef TARGET_NR_pipe
9191 case TARGET_NR_pipe:
9192 return do_pipe(cpu_env, arg1, 0, 0);
9193 #endif
9194 #ifdef TARGET_NR_pipe2
9195 case TARGET_NR_pipe2:
9196 return do_pipe(cpu_env, arg1,
9197 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9198 #endif
9199 case TARGET_NR_times:
9201 struct target_tms *tmsp;
9202 struct tms tms;
9203 ret = get_errno(times(&tms));
9204 if (arg1) {
9205 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9206 if (!tmsp)
9207 return -TARGET_EFAULT;
9208 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9209 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9210 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9211 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9213 if (!is_error(ret))
9214 ret = host_to_target_clock_t(ret);
9216 return ret;
9217 case TARGET_NR_acct:
9218 if (arg1 == 0) {
9219 ret = get_errno(acct(NULL));
9220 } else {
9221 if (!(p = lock_user_string(arg1))) {
9222 return -TARGET_EFAULT;
9224 ret = get_errno(acct(path(p)));
9225 unlock_user(p, arg1, 0);
9227 return ret;
9228 #ifdef TARGET_NR_umount2
9229 case TARGET_NR_umount2:
9230 if (!(p = lock_user_string(arg1)))
9231 return -TARGET_EFAULT;
9232 ret = get_errno(umount2(p, arg2));
9233 unlock_user(p, arg1, 0);
9234 return ret;
9235 #endif
9236 case TARGET_NR_ioctl:
9237 return do_ioctl(arg1, arg2, arg3);
9238 #ifdef TARGET_NR_fcntl
9239 case TARGET_NR_fcntl:
9240 return do_fcntl(arg1, arg2, arg3);
9241 #endif
9242 case TARGET_NR_setpgid:
9243 return get_errno(setpgid(arg1, arg2));
9244 case TARGET_NR_umask:
9245 return get_errno(umask(arg1));
9246 case TARGET_NR_chroot:
9247 if (!(p = lock_user_string(arg1)))
9248 return -TARGET_EFAULT;
9249 ret = get_errno(chroot(p));
9250 unlock_user(p, arg1, 0);
9251 return ret;
9252 #ifdef TARGET_NR_dup2
9253 case TARGET_NR_dup2:
9254 ret = get_errno(dup2(arg1, arg2));
9255 if (ret >= 0) {
9256 fd_trans_dup(arg1, arg2);
9258 return ret;
9259 #endif
9260 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9261 case TARGET_NR_dup3:
9263 int host_flags;
9265 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9266 return -EINVAL;
9268 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9269 ret = get_errno(dup3(arg1, arg2, host_flags));
9270 if (ret >= 0) {
9271 fd_trans_dup(arg1, arg2);
9273 return ret;
9275 #endif
9276 #ifdef TARGET_NR_getppid /* not on alpha */
9277 case TARGET_NR_getppid:
9278 return get_errno(getppid());
9279 #endif
9280 #ifdef TARGET_NR_getpgrp
9281 case TARGET_NR_getpgrp:
9282 return get_errno(getpgrp());
9283 #endif
9284 case TARGET_NR_setsid:
9285 return get_errno(setsid());
9286 #ifdef TARGET_NR_sigaction
9287 case TARGET_NR_sigaction:
9289 #if defined(TARGET_MIPS)
9290 struct target_sigaction act, oact, *pact, *old_act;
9292 if (arg2) {
9293 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9294 return -TARGET_EFAULT;
9295 act._sa_handler = old_act->_sa_handler;
9296 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9297 act.sa_flags = old_act->sa_flags;
9298 unlock_user_struct(old_act, arg2, 0);
9299 pact = &act;
9300 } else {
9301 pact = NULL;
9304 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9306 if (!is_error(ret) && arg3) {
9307 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9308 return -TARGET_EFAULT;
9309 old_act->_sa_handler = oact._sa_handler;
9310 old_act->sa_flags = oact.sa_flags;
9311 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9312 old_act->sa_mask.sig[1] = 0;
9313 old_act->sa_mask.sig[2] = 0;
9314 old_act->sa_mask.sig[3] = 0;
9315 unlock_user_struct(old_act, arg3, 1);
9317 #else
9318 struct target_old_sigaction *old_act;
9319 struct target_sigaction act, oact, *pact;
9320 if (arg2) {
9321 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9322 return -TARGET_EFAULT;
9323 act._sa_handler = old_act->_sa_handler;
9324 target_siginitset(&act.sa_mask, old_act->sa_mask);
9325 act.sa_flags = old_act->sa_flags;
9326 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9327 act.sa_restorer = old_act->sa_restorer;
9328 #endif
9329 unlock_user_struct(old_act, arg2, 0);
9330 pact = &act;
9331 } else {
9332 pact = NULL;
9334 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9335 if (!is_error(ret) && arg3) {
9336 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9337 return -TARGET_EFAULT;
9338 old_act->_sa_handler = oact._sa_handler;
9339 old_act->sa_mask = oact.sa_mask.sig[0];
9340 old_act->sa_flags = oact.sa_flags;
9341 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9342 old_act->sa_restorer = oact.sa_restorer;
9343 #endif
9344 unlock_user_struct(old_act, arg3, 1);
9346 #endif
9348 return ret;
9349 #endif
9350 case TARGET_NR_rt_sigaction:
9353 * For Alpha and SPARC this is a 5 argument syscall, with
9354 * a 'restorer' parameter which must be copied into the
9355 * sa_restorer field of the sigaction struct.
9356 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9357 * and arg5 is the sigsetsize.
9359 #if defined(TARGET_ALPHA)
9360 target_ulong sigsetsize = arg4;
9361 target_ulong restorer = arg5;
9362 #elif defined(TARGET_SPARC)
9363 target_ulong restorer = arg4;
9364 target_ulong sigsetsize = arg5;
9365 #else
9366 target_ulong sigsetsize = arg4;
9367 target_ulong restorer = 0;
9368 #endif
9369 struct target_sigaction *act = NULL;
9370 struct target_sigaction *oact = NULL;
9372 if (sigsetsize != sizeof(target_sigset_t)) {
9373 return -TARGET_EINVAL;
9375 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9376 return -TARGET_EFAULT;
9378 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9379 ret = -TARGET_EFAULT;
9380 } else {
9381 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9382 if (oact) {
9383 unlock_user_struct(oact, arg3, 1);
9386 if (act) {
9387 unlock_user_struct(act, arg2, 0);
9390 return ret;
9391 #ifdef TARGET_NR_sgetmask /* not on alpha */
9392 case TARGET_NR_sgetmask:
9394 sigset_t cur_set;
9395 abi_ulong target_set;
9396 ret = do_sigprocmask(0, NULL, &cur_set);
9397 if (!ret) {
9398 host_to_target_old_sigset(&target_set, &cur_set);
9399 ret = target_set;
9402 return ret;
9403 #endif
9404 #ifdef TARGET_NR_ssetmask /* not on alpha */
9405 case TARGET_NR_ssetmask:
9407 sigset_t set, oset;
9408 abi_ulong target_set = arg1;
9409 target_to_host_old_sigset(&set, &target_set);
9410 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9411 if (!ret) {
9412 host_to_target_old_sigset(&target_set, &oset);
9413 ret = target_set;
9416 return ret;
9417 #endif
9418 #ifdef TARGET_NR_sigprocmask
9419 case TARGET_NR_sigprocmask:
9421 #if defined(TARGET_ALPHA)
9422 sigset_t set, oldset;
9423 abi_ulong mask;
9424 int how;
9426 switch (arg1) {
9427 case TARGET_SIG_BLOCK:
9428 how = SIG_BLOCK;
9429 break;
9430 case TARGET_SIG_UNBLOCK:
9431 how = SIG_UNBLOCK;
9432 break;
9433 case TARGET_SIG_SETMASK:
9434 how = SIG_SETMASK;
9435 break;
9436 default:
9437 return -TARGET_EINVAL;
9439 mask = arg2;
9440 target_to_host_old_sigset(&set, &mask);
9442 ret = do_sigprocmask(how, &set, &oldset);
9443 if (!is_error(ret)) {
9444 host_to_target_old_sigset(&mask, &oldset);
9445 ret = mask;
9446 cpu_env->ir[IR_V0] = 0; /* force no error */
9448 #else
9449 sigset_t set, oldset, *set_ptr;
9450 int how;
9452 if (arg2) {
9453 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9454 if (!p) {
9455 return -TARGET_EFAULT;
9457 target_to_host_old_sigset(&set, p);
9458 unlock_user(p, arg2, 0);
9459 set_ptr = &set;
9460 switch (arg1) {
9461 case TARGET_SIG_BLOCK:
9462 how = SIG_BLOCK;
9463 break;
9464 case TARGET_SIG_UNBLOCK:
9465 how = SIG_UNBLOCK;
9466 break;
9467 case TARGET_SIG_SETMASK:
9468 how = SIG_SETMASK;
9469 break;
9470 default:
9471 return -TARGET_EINVAL;
9473 } else {
9474 how = 0;
9475 set_ptr = NULL;
9477 ret = do_sigprocmask(how, set_ptr, &oldset);
9478 if (!is_error(ret) && arg3) {
9479 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9480 return -TARGET_EFAULT;
9481 host_to_target_old_sigset(p, &oldset);
9482 unlock_user(p, arg3, sizeof(target_sigset_t));
9484 #endif
9486 return ret;
9487 #endif
9488 case TARGET_NR_rt_sigprocmask:
9490 int how = arg1;
9491 sigset_t set, oldset, *set_ptr;
9493 if (arg4 != sizeof(target_sigset_t)) {
9494 return -TARGET_EINVAL;
9497 if (arg2) {
9498 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9499 if (!p) {
9500 return -TARGET_EFAULT;
9502 target_to_host_sigset(&set, p);
9503 unlock_user(p, arg2, 0);
9504 set_ptr = &set;
9505 switch(how) {
9506 case TARGET_SIG_BLOCK:
9507 how = SIG_BLOCK;
9508 break;
9509 case TARGET_SIG_UNBLOCK:
9510 how = SIG_UNBLOCK;
9511 break;
9512 case TARGET_SIG_SETMASK:
9513 how = SIG_SETMASK;
9514 break;
9515 default:
9516 return -TARGET_EINVAL;
9518 } else {
9519 how = 0;
9520 set_ptr = NULL;
9522 ret = do_sigprocmask(how, set_ptr, &oldset);
9523 if (!is_error(ret) && arg3) {
9524 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9525 return -TARGET_EFAULT;
9526 host_to_target_sigset(p, &oldset);
9527 unlock_user(p, arg3, sizeof(target_sigset_t));
9530 return ret;
9531 #ifdef TARGET_NR_sigpending
9532 case TARGET_NR_sigpending:
9534 sigset_t set;
9535 ret = get_errno(sigpending(&set));
9536 if (!is_error(ret)) {
9537 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9538 return -TARGET_EFAULT;
9539 host_to_target_old_sigset(p, &set);
9540 unlock_user(p, arg1, sizeof(target_sigset_t));
9543 return ret;
9544 #endif
9545 case TARGET_NR_rt_sigpending:
9547 sigset_t set;
9549 /* Yes, this check is >, not != like most. We follow the kernel's
9550 * logic and it does it like this because it implements
9551 * NR_sigpending through the same code path, and in that case
9552 * the old_sigset_t is smaller in size.
9554 if (arg2 > sizeof(target_sigset_t)) {
9555 return -TARGET_EINVAL;
9558 ret = get_errno(sigpending(&set));
9559 if (!is_error(ret)) {
9560 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9561 return -TARGET_EFAULT;
9562 host_to_target_sigset(p, &set);
9563 unlock_user(p, arg1, sizeof(target_sigset_t));
9566 return ret;
9567 #ifdef TARGET_NR_sigsuspend
9568 case TARGET_NR_sigsuspend:
9570 sigset_t *set;
9572 #if defined(TARGET_ALPHA)
9573 TaskState *ts = cpu->opaque;
9574 /* target_to_host_old_sigset will bswap back */
9575 abi_ulong mask = tswapal(arg1);
9576 set = &ts->sigsuspend_mask;
9577 target_to_host_old_sigset(set, &mask);
9578 #else
9579 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9580 if (ret != 0) {
9581 return ret;
9583 #endif
9584 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9585 finish_sigsuspend_mask(ret);
9587 return ret;
9588 #endif
9589 case TARGET_NR_rt_sigsuspend:
9591 sigset_t *set;
9593 ret = process_sigsuspend_mask(&set, arg1, arg2);
9594 if (ret != 0) {
9595 return ret;
9597 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9598 finish_sigsuspend_mask(ret);
9600 return ret;
9601 #ifdef TARGET_NR_rt_sigtimedwait
9602 case TARGET_NR_rt_sigtimedwait:
9604 sigset_t set;
9605 struct timespec uts, *puts;
9606 siginfo_t uinfo;
9608 if (arg4 != sizeof(target_sigset_t)) {
9609 return -TARGET_EINVAL;
9612 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9613 return -TARGET_EFAULT;
9614 target_to_host_sigset(&set, p);
9615 unlock_user(p, arg1, 0);
9616 if (arg3) {
9617 puts = &uts;
9618 if (target_to_host_timespec(puts, arg3)) {
9619 return -TARGET_EFAULT;
9621 } else {
9622 puts = NULL;
9624 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9625 SIGSET_T_SIZE));
9626 if (!is_error(ret)) {
9627 if (arg2) {
9628 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9630 if (!p) {
9631 return -TARGET_EFAULT;
9633 host_to_target_siginfo(p, &uinfo);
9634 unlock_user(p, arg2, sizeof(target_siginfo_t));
9636 ret = host_to_target_signal(ret);
9639 return ret;
9640 #endif
9641 #ifdef TARGET_NR_rt_sigtimedwait_time64
9642 case TARGET_NR_rt_sigtimedwait_time64:
9644 sigset_t set;
9645 struct timespec uts, *puts;
9646 siginfo_t uinfo;
9648 if (arg4 != sizeof(target_sigset_t)) {
9649 return -TARGET_EINVAL;
9652 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9653 if (!p) {
9654 return -TARGET_EFAULT;
9656 target_to_host_sigset(&set, p);
9657 unlock_user(p, arg1, 0);
9658 if (arg3) {
9659 puts = &uts;
9660 if (target_to_host_timespec64(puts, arg3)) {
9661 return -TARGET_EFAULT;
9663 } else {
9664 puts = NULL;
9666 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9667 SIGSET_T_SIZE));
9668 if (!is_error(ret)) {
9669 if (arg2) {
9670 p = lock_user(VERIFY_WRITE, arg2,
9671 sizeof(target_siginfo_t), 0);
9672 if (!p) {
9673 return -TARGET_EFAULT;
9675 host_to_target_siginfo(p, &uinfo);
9676 unlock_user(p, arg2, sizeof(target_siginfo_t));
9678 ret = host_to_target_signal(ret);
9681 return ret;
9682 #endif
9683 case TARGET_NR_rt_sigqueueinfo:
9685 siginfo_t uinfo;
9687 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9688 if (!p) {
9689 return -TARGET_EFAULT;
9691 target_to_host_siginfo(&uinfo, p);
9692 unlock_user(p, arg3, 0);
9693 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9695 return ret;
9696 case TARGET_NR_rt_tgsigqueueinfo:
9698 siginfo_t uinfo;
9700 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9701 if (!p) {
9702 return -TARGET_EFAULT;
9704 target_to_host_siginfo(&uinfo, p);
9705 unlock_user(p, arg4, 0);
9706 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9708 return ret;
9709 #ifdef TARGET_NR_sigreturn
9710 case TARGET_NR_sigreturn:
9711 if (block_signals()) {
9712 return -QEMU_ERESTARTSYS;
9714 return do_sigreturn(cpu_env);
9715 #endif
9716 case TARGET_NR_rt_sigreturn:
9717 if (block_signals()) {
9718 return -QEMU_ERESTARTSYS;
9720 return do_rt_sigreturn(cpu_env);
9721 case TARGET_NR_sethostname:
9722 if (!(p = lock_user_string(arg1)))
9723 return -TARGET_EFAULT;
9724 ret = get_errno(sethostname(p, arg2));
9725 unlock_user(p, arg1, 0);
9726 return ret;
9727 #ifdef TARGET_NR_setrlimit
9728 case TARGET_NR_setrlimit:
9730 int resource = target_to_host_resource(arg1);
9731 struct target_rlimit *target_rlim;
9732 struct rlimit rlim;
9733 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9734 return -TARGET_EFAULT;
9735 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9736 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9737 unlock_user_struct(target_rlim, arg2, 0);
9739 * If we just passed through resource limit settings for memory then
9740 * they would also apply to QEMU's own allocations, and QEMU will
9741 * crash or hang or die if its allocations fail. Ideally we would
9742 * track the guest allocations in QEMU and apply the limits ourselves.
9743 * For now, just tell the guest the call succeeded but don't actually
9744 * limit anything.
9746 if (resource != RLIMIT_AS &&
9747 resource != RLIMIT_DATA &&
9748 resource != RLIMIT_STACK) {
9749 return get_errno(setrlimit(resource, &rlim));
9750 } else {
9751 return 0;
9754 #endif
9755 #ifdef TARGET_NR_getrlimit
9756 case TARGET_NR_getrlimit:
9758 int resource = target_to_host_resource(arg1);
9759 struct target_rlimit *target_rlim;
9760 struct rlimit rlim;
9762 ret = get_errno(getrlimit(resource, &rlim));
9763 if (!is_error(ret)) {
9764 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9765 return -TARGET_EFAULT;
9766 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9767 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9768 unlock_user_struct(target_rlim, arg2, 1);
9771 return ret;
9772 #endif
9773 case TARGET_NR_getrusage:
9775 struct rusage rusage;
9776 ret = get_errno(getrusage(arg1, &rusage));
9777 if (!is_error(ret)) {
9778 ret = host_to_target_rusage(arg2, &rusage);
9781 return ret;
9782 #if defined(TARGET_NR_gettimeofday)
9783 case TARGET_NR_gettimeofday:
9785 struct timeval tv;
9786 struct timezone tz;
9788 ret = get_errno(gettimeofday(&tv, &tz));
9789 if (!is_error(ret)) {
9790 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9791 return -TARGET_EFAULT;
9793 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9794 return -TARGET_EFAULT;
9798 return ret;
9799 #endif
9800 #if defined(TARGET_NR_settimeofday)
9801 case TARGET_NR_settimeofday:
9803 struct timeval tv, *ptv = NULL;
9804 struct timezone tz, *ptz = NULL;
9806 if (arg1) {
9807 if (copy_from_user_timeval(&tv, arg1)) {
9808 return -TARGET_EFAULT;
9810 ptv = &tv;
9813 if (arg2) {
9814 if (copy_from_user_timezone(&tz, arg2)) {
9815 return -TARGET_EFAULT;
9817 ptz = &tz;
9820 return get_errno(settimeofday(ptv, ptz));
9822 #endif
9823 #if defined(TARGET_NR_select)
9824 case TARGET_NR_select:
9825 #if defined(TARGET_WANT_NI_OLD_SELECT)
9826 /* some architectures used to have old_select here
9827 * but now ENOSYS it.
9829 ret = -TARGET_ENOSYS;
9830 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9831 ret = do_old_select(arg1);
9832 #else
9833 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9834 #endif
9835 return ret;
9836 #endif
9837 #ifdef TARGET_NR_pselect6
9838 case TARGET_NR_pselect6:
9839 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9840 #endif
9841 #ifdef TARGET_NR_pselect6_time64
9842 case TARGET_NR_pselect6_time64:
9843 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9844 #endif
9845 #ifdef TARGET_NR_symlink
9846 case TARGET_NR_symlink:
9848 void *p2;
9849 p = lock_user_string(arg1);
9850 p2 = lock_user_string(arg2);
9851 if (!p || !p2)
9852 ret = -TARGET_EFAULT;
9853 else
9854 ret = get_errno(symlink(p, p2));
9855 unlock_user(p2, arg2, 0);
9856 unlock_user(p, arg1, 0);
9858 return ret;
9859 #endif
9860 #if defined(TARGET_NR_symlinkat)
9861 case TARGET_NR_symlinkat:
9863 void *p2;
9864 p = lock_user_string(arg1);
9865 p2 = lock_user_string(arg3);
9866 if (!p || !p2)
9867 ret = -TARGET_EFAULT;
9868 else
9869 ret = get_errno(symlinkat(p, arg2, p2));
9870 unlock_user(p2, arg3, 0);
9871 unlock_user(p, arg1, 0);
9873 return ret;
9874 #endif
9875 #ifdef TARGET_NR_readlink
9876 case TARGET_NR_readlink:
9878 void *p2;
9879 p = lock_user_string(arg1);
9880 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9881 if (!p || !p2) {
9882 ret = -TARGET_EFAULT;
9883 } else if (!arg3) {
9884 /* Short circuit this for the magic exe check. */
9885 ret = -TARGET_EINVAL;
9886 } else if (is_proc_myself((const char *)p, "exe")) {
9887 char real[PATH_MAX], *temp;
9888 temp = realpath(exec_path, real);
9889 /* Return value is # of bytes that we wrote to the buffer. */
9890 if (temp == NULL) {
9891 ret = get_errno(-1);
9892 } else {
9893 /* Don't worry about sign mismatch as earlier mapping
9894 * logic would have thrown a bad address error. */
9895 ret = MIN(strlen(real), arg3);
9896 /* We cannot NUL terminate the string. */
9897 memcpy(p2, real, ret);
9899 } else {
9900 ret = get_errno(readlink(path(p), p2, arg3));
9902 unlock_user(p2, arg2, ret);
9903 unlock_user(p, arg1, 0);
9905 return ret;
9906 #endif
9907 #if defined(TARGET_NR_readlinkat)
9908 case TARGET_NR_readlinkat:
9910 void *p2;
9911 p = lock_user_string(arg2);
9912 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9913 if (!p || !p2) {
9914 ret = -TARGET_EFAULT;
9915 } else if (is_proc_myself((const char *)p, "exe")) {
9916 char real[PATH_MAX], *temp;
9917 temp = realpath(exec_path, real);
9918 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9919 snprintf((char *)p2, arg4, "%s", real);
9920 } else {
9921 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9923 unlock_user(p2, arg3, ret);
9924 unlock_user(p, arg2, 0);
9926 return ret;
9927 #endif
9928 #ifdef TARGET_NR_swapon
9929 case TARGET_NR_swapon:
9930 if (!(p = lock_user_string(arg1)))
9931 return -TARGET_EFAULT;
9932 ret = get_errno(swapon(p, arg2));
9933 unlock_user(p, arg1, 0);
9934 return ret;
9935 #endif
9936 case TARGET_NR_reboot:
9937 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9938 /* arg4 must be ignored in all other cases */
9939 p = lock_user_string(arg4);
9940 if (!p) {
9941 return -TARGET_EFAULT;
9943 ret = get_errno(reboot(arg1, arg2, arg3, p));
9944 unlock_user(p, arg4, 0);
9945 } else {
9946 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9948 return ret;
9949 #ifdef TARGET_NR_mmap
9950 case TARGET_NR_mmap:
9951 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9952 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9953 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9954 || defined(TARGET_S390X)
9956 abi_ulong *v;
9957 abi_ulong v1, v2, v3, v4, v5, v6;
9958 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9959 return -TARGET_EFAULT;
9960 v1 = tswapal(v[0]);
9961 v2 = tswapal(v[1]);
9962 v3 = tswapal(v[2]);
9963 v4 = tswapal(v[3]);
9964 v5 = tswapal(v[4]);
9965 v6 = tswapal(v[5]);
9966 unlock_user(v, arg1, 0);
9967 ret = get_errno(target_mmap(v1, v2, v3,
9968 target_to_host_bitmask(v4, mmap_flags_tbl),
9969 v5, v6));
9971 #else
9972 /* mmap pointers are always untagged */
9973 ret = get_errno(target_mmap(arg1, arg2, arg3,
9974 target_to_host_bitmask(arg4, mmap_flags_tbl),
9975 arg5,
9976 arg6));
9977 #endif
9978 return ret;
9979 #endif
9980 #ifdef TARGET_NR_mmap2
9981 case TARGET_NR_mmap2:
9982 #ifndef MMAP_SHIFT
9983 #define MMAP_SHIFT 12
9984 #endif
9985 ret = target_mmap(arg1, arg2, arg3,
9986 target_to_host_bitmask(arg4, mmap_flags_tbl),
9987 arg5, arg6 << MMAP_SHIFT);
9988 return get_errno(ret);
9989 #endif
9990 case TARGET_NR_munmap:
9991 arg1 = cpu_untagged_addr(cpu, arg1);
9992 return get_errno(target_munmap(arg1, arg2));
9993 case TARGET_NR_mprotect:
9994 arg1 = cpu_untagged_addr(cpu, arg1);
9996 TaskState *ts = cpu->opaque;
9997 /* Special hack to detect libc making the stack executable. */
9998 if ((arg3 & PROT_GROWSDOWN)
9999 && arg1 >= ts->info->stack_limit
10000 && arg1 <= ts->info->start_stack) {
10001 arg3 &= ~PROT_GROWSDOWN;
10002 arg2 = arg2 + arg1 - ts->info->stack_limit;
10003 arg1 = ts->info->stack_limit;
10006 return get_errno(target_mprotect(arg1, arg2, arg3));
10007 #ifdef TARGET_NR_mremap
10008 case TARGET_NR_mremap:
10009 arg1 = cpu_untagged_addr(cpu, arg1);
10010 /* mremap new_addr (arg5) is always untagged */
10011 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10012 #endif
10013 /* ??? msync/mlock/munlock are broken for softmmu. */
10014 #ifdef TARGET_NR_msync
10015 case TARGET_NR_msync:
10016 return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10017 #endif
10018 #ifdef TARGET_NR_mlock
10019 case TARGET_NR_mlock:
10020 return get_errno(mlock(g2h(cpu, arg1), arg2));
10021 #endif
10022 #ifdef TARGET_NR_munlock
10023 case TARGET_NR_munlock:
10024 return get_errno(munlock(g2h(cpu, arg1), arg2));
10025 #endif
10026 #ifdef TARGET_NR_mlockall
10027 case TARGET_NR_mlockall:
10028 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10029 #endif
10030 #ifdef TARGET_NR_munlockall
10031 case TARGET_NR_munlockall:
10032 return get_errno(munlockall());
10033 #endif
10034 #ifdef TARGET_NR_truncate
10035 case TARGET_NR_truncate:
10036 if (!(p = lock_user_string(arg1)))
10037 return -TARGET_EFAULT;
10038 ret = get_errno(truncate(p, arg2));
10039 unlock_user(p, arg1, 0);
10040 return ret;
10041 #endif
10042 #ifdef TARGET_NR_ftruncate
10043 case TARGET_NR_ftruncate:
10044 return get_errno(ftruncate(arg1, arg2));
10045 #endif
10046 case TARGET_NR_fchmod:
10047 return get_errno(fchmod(arg1, arg2));
10048 #if defined(TARGET_NR_fchmodat)
10049 case TARGET_NR_fchmodat:
10050 if (!(p = lock_user_string(arg2)))
10051 return -TARGET_EFAULT;
10052 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10053 unlock_user(p, arg2, 0);
10054 return ret;
10055 #endif
10056 case TARGET_NR_getpriority:
10057 /* Note that negative values are valid for getpriority, so we must
10058 differentiate based on errno settings. */
10059 errno = 0;
10060 ret = getpriority(arg1, arg2);
10061 if (ret == -1 && errno != 0) {
10062 return -host_to_target_errno(errno);
10064 #ifdef TARGET_ALPHA
10065 /* Return value is the unbiased priority. Signal no error. */
10066 cpu_env->ir[IR_V0] = 0;
10067 #else
10068 /* Return value is a biased priority to avoid negative numbers. */
10069 ret = 20 - ret;
10070 #endif
10071 return ret;
10072 case TARGET_NR_setpriority:
10073 return get_errno(setpriority(arg1, arg2, arg3));
10074 #ifdef TARGET_NR_statfs
10075 case TARGET_NR_statfs:
10076 if (!(p = lock_user_string(arg1))) {
10077 return -TARGET_EFAULT;
10079 ret = get_errno(statfs(path(p), &stfs));
10080 unlock_user(p, arg1, 0);
10081 convert_statfs:
10082 if (!is_error(ret)) {
10083 struct target_statfs *target_stfs;
10085 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10086 return -TARGET_EFAULT;
10087 __put_user(stfs.f_type, &target_stfs->f_type);
10088 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10089 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10090 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10091 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10092 __put_user(stfs.f_files, &target_stfs->f_files);
10093 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10094 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10095 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10096 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10097 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10098 #ifdef _STATFS_F_FLAGS
10099 __put_user(stfs.f_flags, &target_stfs->f_flags);
10100 #else
10101 __put_user(0, &target_stfs->f_flags);
10102 #endif
10103 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10104 unlock_user_struct(target_stfs, arg2, 1);
10106 return ret;
10107 #endif
10108 #ifdef TARGET_NR_fstatfs
10109 case TARGET_NR_fstatfs:
10110 ret = get_errno(fstatfs(arg1, &stfs));
10111 goto convert_statfs;
10112 #endif
10113 #ifdef TARGET_NR_statfs64
10114 case TARGET_NR_statfs64:
10115 if (!(p = lock_user_string(arg1))) {
10116 return -TARGET_EFAULT;
10118 ret = get_errno(statfs(path(p), &stfs));
10119 unlock_user(p, arg1, 0);
10120 convert_statfs64:
10121 if (!is_error(ret)) {
10122 struct target_statfs64 *target_stfs;
10124 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10125 return -TARGET_EFAULT;
10126 __put_user(stfs.f_type, &target_stfs->f_type);
10127 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10128 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10129 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10130 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10131 __put_user(stfs.f_files, &target_stfs->f_files);
10132 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10133 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10134 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10135 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10136 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10137 #ifdef _STATFS_F_FLAGS
10138 __put_user(stfs.f_flags, &target_stfs->f_flags);
10139 #else
10140 __put_user(0, &target_stfs->f_flags);
10141 #endif
10142 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10143 unlock_user_struct(target_stfs, arg3, 1);
10145 return ret;
10146 case TARGET_NR_fstatfs64:
10147 ret = get_errno(fstatfs(arg1, &stfs));
10148 goto convert_statfs64;
10149 #endif
10150 #ifdef TARGET_NR_socketcall
10151 case TARGET_NR_socketcall:
10152 return do_socketcall(arg1, arg2);
10153 #endif
10154 #ifdef TARGET_NR_accept
10155 case TARGET_NR_accept:
10156 return do_accept4(arg1, arg2, arg3, 0);
10157 #endif
10158 #ifdef TARGET_NR_accept4
10159 case TARGET_NR_accept4:
10160 return do_accept4(arg1, arg2, arg3, arg4);
10161 #endif
10162 #ifdef TARGET_NR_bind
10163 case TARGET_NR_bind:
10164 return do_bind(arg1, arg2, arg3);
10165 #endif
10166 #ifdef TARGET_NR_connect
10167 case TARGET_NR_connect:
10168 return do_connect(arg1, arg2, arg3);
10169 #endif
10170 #ifdef TARGET_NR_getpeername
10171 case TARGET_NR_getpeername:
10172 return do_getpeername(arg1, arg2, arg3);
10173 #endif
10174 #ifdef TARGET_NR_getsockname
10175 case TARGET_NR_getsockname:
10176 return do_getsockname(arg1, arg2, arg3);
10177 #endif
10178 #ifdef TARGET_NR_getsockopt
10179 case TARGET_NR_getsockopt:
10180 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10181 #endif
10182 #ifdef TARGET_NR_listen
10183 case TARGET_NR_listen:
10184 return get_errno(listen(arg1, arg2));
10185 #endif
10186 #ifdef TARGET_NR_recv
10187 case TARGET_NR_recv:
10188 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10189 #endif
10190 #ifdef TARGET_NR_recvfrom
10191 case TARGET_NR_recvfrom:
10192 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10193 #endif
10194 #ifdef TARGET_NR_recvmsg
10195 case TARGET_NR_recvmsg:
10196 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10197 #endif
10198 #ifdef TARGET_NR_send
10199 case TARGET_NR_send:
10200 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10201 #endif
10202 #ifdef TARGET_NR_sendmsg
10203 case TARGET_NR_sendmsg:
10204 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10205 #endif
10206 #ifdef TARGET_NR_sendmmsg
10207 case TARGET_NR_sendmmsg:
10208 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10209 #endif
10210 #ifdef TARGET_NR_recvmmsg
10211 case TARGET_NR_recvmmsg:
10212 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10213 #endif
10214 #ifdef TARGET_NR_sendto
10215 case TARGET_NR_sendto:
10216 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10217 #endif
10218 #ifdef TARGET_NR_shutdown
10219 case TARGET_NR_shutdown:
10220 return get_errno(shutdown(arg1, arg2));
10221 #endif
10222 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10223 case TARGET_NR_getrandom:
10224 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10225 if (!p) {
10226 return -TARGET_EFAULT;
10228 ret = get_errno(getrandom(p, arg2, arg3));
10229 unlock_user(p, arg1, ret);
10230 return ret;
10231 #endif
10232 #ifdef TARGET_NR_socket
10233 case TARGET_NR_socket:
10234 return do_socket(arg1, arg2, arg3);
10235 #endif
10236 #ifdef TARGET_NR_socketpair
10237 case TARGET_NR_socketpair:
10238 return do_socketpair(arg1, arg2, arg3, arg4);
10239 #endif
10240 #ifdef TARGET_NR_setsockopt
10241 case TARGET_NR_setsockopt:
10242 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10243 #endif
10244 #if defined(TARGET_NR_syslog)
10245 case TARGET_NR_syslog:
10247 int len = arg2;
10249 switch (arg1) {
10250 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10251 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10252 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10253 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10254 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10255 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10256 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10257 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10258 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10259 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10260 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10261 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10263 if (len < 0) {
10264 return -TARGET_EINVAL;
10266 if (len == 0) {
10267 return 0;
10269 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10270 if (!p) {
10271 return -TARGET_EFAULT;
10273 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10274 unlock_user(p, arg2, arg3);
10276 return ret;
10277 default:
10278 return -TARGET_EINVAL;
10281 break;
10282 #endif
10283 case TARGET_NR_setitimer:
10285 struct itimerval value, ovalue, *pvalue;
10287 if (arg2) {
10288 pvalue = &value;
10289 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10290 || copy_from_user_timeval(&pvalue->it_value,
10291 arg2 + sizeof(struct target_timeval)))
10292 return -TARGET_EFAULT;
10293 } else {
10294 pvalue = NULL;
10296 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10297 if (!is_error(ret) && arg3) {
10298 if (copy_to_user_timeval(arg3,
10299 &ovalue.it_interval)
10300 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10301 &ovalue.it_value))
10302 return -TARGET_EFAULT;
10305 return ret;
10306 case TARGET_NR_getitimer:
10308 struct itimerval value;
10310 ret = get_errno(getitimer(arg1, &value));
10311 if (!is_error(ret) && arg2) {
10312 if (copy_to_user_timeval(arg2,
10313 &value.it_interval)
10314 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10315 &value.it_value))
10316 return -TARGET_EFAULT;
10319 return ret;
10320 #ifdef TARGET_NR_stat
10321 case TARGET_NR_stat:
10322 if (!(p = lock_user_string(arg1))) {
10323 return -TARGET_EFAULT;
10325 ret = get_errno(stat(path(p), &st));
10326 unlock_user(p, arg1, 0);
10327 goto do_stat;
10328 #endif
10329 #ifdef TARGET_NR_lstat
10330 case TARGET_NR_lstat:
10331 if (!(p = lock_user_string(arg1))) {
10332 return -TARGET_EFAULT;
10334 ret = get_errno(lstat(path(p), &st));
10335 unlock_user(p, arg1, 0);
10336 goto do_stat;
10337 #endif
10338 #ifdef TARGET_NR_fstat
10339 case TARGET_NR_fstat:
10341 ret = get_errno(fstat(arg1, &st));
10342 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10343 do_stat:
10344 #endif
10345 if (!is_error(ret)) {
10346 struct target_stat *target_st;
10348 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10349 return -TARGET_EFAULT;
10350 memset(target_st, 0, sizeof(*target_st));
10351 __put_user(st.st_dev, &target_st->st_dev);
10352 __put_user(st.st_ino, &target_st->st_ino);
10353 __put_user(st.st_mode, &target_st->st_mode);
10354 __put_user(st.st_uid, &target_st->st_uid);
10355 __put_user(st.st_gid, &target_st->st_gid);
10356 __put_user(st.st_nlink, &target_st->st_nlink);
10357 __put_user(st.st_rdev, &target_st->st_rdev);
10358 __put_user(st.st_size, &target_st->st_size);
10359 __put_user(st.st_blksize, &target_st->st_blksize);
10360 __put_user(st.st_blocks, &target_st->st_blocks);
10361 __put_user(st.st_atime, &target_st->target_st_atime);
10362 __put_user(st.st_mtime, &target_st->target_st_mtime);
10363 __put_user(st.st_ctime, &target_st->target_st_ctime);
10364 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10365 __put_user(st.st_atim.tv_nsec,
10366 &target_st->target_st_atime_nsec);
10367 __put_user(st.st_mtim.tv_nsec,
10368 &target_st->target_st_mtime_nsec);
10369 __put_user(st.st_ctim.tv_nsec,
10370 &target_st->target_st_ctime_nsec);
10371 #endif
10372 unlock_user_struct(target_st, arg2, 1);
10375 return ret;
10376 #endif
10377 case TARGET_NR_vhangup:
10378 return get_errno(vhangup());
10379 #ifdef TARGET_NR_syscall
10380 case TARGET_NR_syscall:
10381 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10382 arg6, arg7, arg8, 0);
10383 #endif
10384 #if defined(TARGET_NR_wait4)
10385 case TARGET_NR_wait4:
10387 int status;
10388 abi_long status_ptr = arg2;
10389 struct rusage rusage, *rusage_ptr;
10390 abi_ulong target_rusage = arg4;
10391 abi_long rusage_err;
10392 if (target_rusage)
10393 rusage_ptr = &rusage;
10394 else
10395 rusage_ptr = NULL;
10396 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10397 if (!is_error(ret)) {
10398 if (status_ptr && ret) {
10399 status = host_to_target_waitstatus(status);
10400 if (put_user_s32(status, status_ptr))
10401 return -TARGET_EFAULT;
10403 if (target_rusage) {
10404 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10405 if (rusage_err) {
10406 ret = rusage_err;
10411 return ret;
10412 #endif
10413 #ifdef TARGET_NR_swapoff
10414 case TARGET_NR_swapoff:
10415 if (!(p = lock_user_string(arg1)))
10416 return -TARGET_EFAULT;
10417 ret = get_errno(swapoff(p));
10418 unlock_user(p, arg1, 0);
10419 return ret;
10420 #endif
10421 case TARGET_NR_sysinfo:
10423 struct target_sysinfo *target_value;
10424 struct sysinfo value;
10425 ret = get_errno(sysinfo(&value));
10426 if (!is_error(ret) && arg1)
10428 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10429 return -TARGET_EFAULT;
10430 __put_user(value.uptime, &target_value->uptime);
10431 __put_user(value.loads[0], &target_value->loads[0]);
10432 __put_user(value.loads[1], &target_value->loads[1]);
10433 __put_user(value.loads[2], &target_value->loads[2]);
10434 __put_user(value.totalram, &target_value->totalram);
10435 __put_user(value.freeram, &target_value->freeram);
10436 __put_user(value.sharedram, &target_value->sharedram);
10437 __put_user(value.bufferram, &target_value->bufferram);
10438 __put_user(value.totalswap, &target_value->totalswap);
10439 __put_user(value.freeswap, &target_value->freeswap);
10440 __put_user(value.procs, &target_value->procs);
10441 __put_user(value.totalhigh, &target_value->totalhigh);
10442 __put_user(value.freehigh, &target_value->freehigh);
10443 __put_user(value.mem_unit, &target_value->mem_unit);
10444 unlock_user_struct(target_value, arg1, 1);
10447 return ret;
10448 #ifdef TARGET_NR_ipc
10449 case TARGET_NR_ipc:
10450 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10451 #endif
10452 #ifdef TARGET_NR_semget
10453 case TARGET_NR_semget:
10454 return get_errno(semget(arg1, arg2, arg3));
10455 #endif
10456 #ifdef TARGET_NR_semop
10457 case TARGET_NR_semop:
10458 return do_semtimedop(arg1, arg2, arg3, 0, false);
10459 #endif
10460 #ifdef TARGET_NR_semtimedop
10461 case TARGET_NR_semtimedop:
10462 return do_semtimedop(arg1, arg2, arg3, arg4, false);
10463 #endif
10464 #ifdef TARGET_NR_semtimedop_time64
10465 case TARGET_NR_semtimedop_time64:
10466 return do_semtimedop(arg1, arg2, arg3, arg4, true);
10467 #endif
10468 #ifdef TARGET_NR_semctl
10469 case TARGET_NR_semctl:
10470 return do_semctl(arg1, arg2, arg3, arg4);
10471 #endif
10472 #ifdef TARGET_NR_msgctl
10473 case TARGET_NR_msgctl:
10474 return do_msgctl(arg1, arg2, arg3);
10475 #endif
10476 #ifdef TARGET_NR_msgget
10477 case TARGET_NR_msgget:
10478 return get_errno(msgget(arg1, arg2));
10479 #endif
10480 #ifdef TARGET_NR_msgrcv
10481 case TARGET_NR_msgrcv:
10482 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10483 #endif
10484 #ifdef TARGET_NR_msgsnd
10485 case TARGET_NR_msgsnd:
10486 return do_msgsnd(arg1, arg2, arg3, arg4);
10487 #endif
10488 #ifdef TARGET_NR_shmget
10489 case TARGET_NR_shmget:
10490 return get_errno(shmget(arg1, arg2, arg3));
10491 #endif
10492 #ifdef TARGET_NR_shmctl
10493 case TARGET_NR_shmctl:
10494 return do_shmctl(arg1, arg2, arg3);
10495 #endif
10496 #ifdef TARGET_NR_shmat
10497 case TARGET_NR_shmat:
10498 return do_shmat(cpu_env, arg1, arg2, arg3);
10499 #endif
10500 #ifdef TARGET_NR_shmdt
10501 case TARGET_NR_shmdt:
10502 return do_shmdt(arg1);
10503 #endif
10504 case TARGET_NR_fsync:
10505 return get_errno(fsync(arg1));
10506 case TARGET_NR_clone:
10507 /* Linux manages to have three different orderings for its
10508 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10509 * match the kernel's CONFIG_CLONE_* settings.
10510 * Microblaze is further special in that it uses a sixth
10511 * implicit argument to clone for the TLS pointer.
10513 #if defined(TARGET_MICROBLAZE)
10514 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10515 #elif defined(TARGET_CLONE_BACKWARDS)
10516 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10517 #elif defined(TARGET_CLONE_BACKWARDS2)
10518 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10519 #else
10520 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10521 #endif
10522 return ret;
10523 #ifdef __NR_exit_group
10524 /* new thread calls */
10525 case TARGET_NR_exit_group:
10526 preexit_cleanup(cpu_env, arg1);
10527 return get_errno(exit_group(arg1));
10528 #endif
10529 case TARGET_NR_setdomainname:
10530 if (!(p = lock_user_string(arg1)))
10531 return -TARGET_EFAULT;
10532 ret = get_errno(setdomainname(p, arg2));
10533 unlock_user(p, arg1, 0);
10534 return ret;
10535 case TARGET_NR_uname:
10536 /* no need to transcode because we use the linux syscall */
10538 struct new_utsname * buf;
10540 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10541 return -TARGET_EFAULT;
10542 ret = get_errno(sys_uname(buf));
10543 if (!is_error(ret)) {
10544 /* Overwrite the native machine name with whatever is being
10545 emulated. */
10546 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10547 sizeof(buf->machine));
10548 /* Allow the user to override the reported release. */
10549 if (qemu_uname_release && *qemu_uname_release) {
10550 g_strlcpy(buf->release, qemu_uname_release,
10551 sizeof(buf->release));
10554 unlock_user_struct(buf, arg1, 1);
10556 return ret;
10557 #ifdef TARGET_I386
10558 case TARGET_NR_modify_ldt:
10559 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10560 #if !defined(TARGET_X86_64)
10561 case TARGET_NR_vm86:
10562 return do_vm86(cpu_env, arg1, arg2);
10563 #endif
10564 #endif
10565 #if defined(TARGET_NR_adjtimex)
10566 case TARGET_NR_adjtimex:
10568 struct timex host_buf;
10570 if (target_to_host_timex(&host_buf, arg1) != 0) {
10571 return -TARGET_EFAULT;
10573 ret = get_errno(adjtimex(&host_buf));
10574 if (!is_error(ret)) {
10575 if (host_to_target_timex(arg1, &host_buf) != 0) {
10576 return -TARGET_EFAULT;
10580 return ret;
10581 #endif
10582 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10583 case TARGET_NR_clock_adjtime:
10585 struct timex htx, *phtx = &htx;
10587 if (target_to_host_timex(phtx, arg2) != 0) {
10588 return -TARGET_EFAULT;
10590 ret = get_errno(clock_adjtime(arg1, phtx));
10591 if (!is_error(ret) && phtx) {
10592 if (host_to_target_timex(arg2, phtx) != 0) {
10593 return -TARGET_EFAULT;
10597 return ret;
10598 #endif
10599 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10600 case TARGET_NR_clock_adjtime64:
10602 struct timex htx;
10604 if (target_to_host_timex64(&htx, arg2) != 0) {
10605 return -TARGET_EFAULT;
10607 ret = get_errno(clock_adjtime(arg1, &htx));
10608 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10609 return -TARGET_EFAULT;
10612 return ret;
10613 #endif
10614 case TARGET_NR_getpgid:
10615 return get_errno(getpgid(arg1));
10616 case TARGET_NR_fchdir:
10617 return get_errno(fchdir(arg1));
10618 case TARGET_NR_personality:
10619 return get_errno(personality(arg1));
10620 #ifdef TARGET_NR__llseek /* Not on alpha */
10621 case TARGET_NR__llseek:
10623 int64_t res;
10624 #if !defined(__NR_llseek)
10625 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10626 if (res == -1) {
10627 ret = get_errno(res);
10628 } else {
10629 ret = 0;
10631 #else
10632 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10633 #endif
10634 if ((ret == 0) && put_user_s64(res, arg4)) {
10635 return -TARGET_EFAULT;
10638 return ret;
10639 #endif
10640 #ifdef TARGET_NR_getdents
10641 case TARGET_NR_getdents:
10642 return do_getdents(arg1, arg2, arg3);
10643 #endif /* TARGET_NR_getdents */
10644 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10645 case TARGET_NR_getdents64:
10646 return do_getdents64(arg1, arg2, arg3);
10647 #endif /* TARGET_NR_getdents64 */
10648 #if defined(TARGET_NR__newselect)
10649 case TARGET_NR__newselect:
10650 return do_select(arg1, arg2, arg3, arg4, arg5);
10651 #endif
10652 #ifdef TARGET_NR_poll
10653 case TARGET_NR_poll:
10654 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10655 #endif
10656 #ifdef TARGET_NR_ppoll
10657 case TARGET_NR_ppoll:
10658 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10659 #endif
10660 #ifdef TARGET_NR_ppoll_time64
10661 case TARGET_NR_ppoll_time64:
10662 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10663 #endif
10664 case TARGET_NR_flock:
10665 /* NOTE: the flock constant seems to be the same for every
10666 Linux platform */
10667 return get_errno(safe_flock(arg1, arg2));
10668 case TARGET_NR_readv:
10670 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10671 if (vec != NULL) {
10672 ret = get_errno(safe_readv(arg1, vec, arg3));
10673 unlock_iovec(vec, arg2, arg3, 1);
10674 } else {
10675 ret = -host_to_target_errno(errno);
10678 return ret;
10679 case TARGET_NR_writev:
10681 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10682 if (vec != NULL) {
10683 ret = get_errno(safe_writev(arg1, vec, arg3));
10684 unlock_iovec(vec, arg2, arg3, 0);
10685 } else {
10686 ret = -host_to_target_errno(errno);
10689 return ret;
10690 #if defined(TARGET_NR_preadv)
10691 case TARGET_NR_preadv:
10693 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10694 if (vec != NULL) {
10695 unsigned long low, high;
10697 target_to_host_low_high(arg4, arg5, &low, &high);
10698 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10699 unlock_iovec(vec, arg2, arg3, 1);
10700 } else {
10701 ret = -host_to_target_errno(errno);
10704 return ret;
10705 #endif
10706 #if defined(TARGET_NR_pwritev)
10707 case TARGET_NR_pwritev:
10709 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10710 if (vec != NULL) {
10711 unsigned long low, high;
10713 target_to_host_low_high(arg4, arg5, &low, &high);
10714 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10715 unlock_iovec(vec, arg2, arg3, 0);
10716 } else {
10717 ret = -host_to_target_errno(errno);
10720 return ret;
10721 #endif
10722 case TARGET_NR_getsid:
10723 return get_errno(getsid(arg1));
10724 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10725 case TARGET_NR_fdatasync:
10726 return get_errno(fdatasync(arg1));
10727 #endif
10728 case TARGET_NR_sched_getaffinity:
10730 unsigned int mask_size;
10731 unsigned long *mask;
10734 * sched_getaffinity needs multiples of ulong, so need to take
10735 * care of mismatches between target ulong and host ulong sizes.
10737 if (arg2 & (sizeof(abi_ulong) - 1)) {
10738 return -TARGET_EINVAL;
10740 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10742 mask = alloca(mask_size);
10743 memset(mask, 0, mask_size);
10744 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10746 if (!is_error(ret)) {
10747 if (ret > arg2) {
10748 /* More data returned than the caller's buffer will fit.
10749 * This only happens if sizeof(abi_long) < sizeof(long)
10750 * and the caller passed us a buffer holding an odd number
10751 * of abi_longs. If the host kernel is actually using the
10752 * extra 4 bytes then fail EINVAL; otherwise we can just
10753 * ignore them and only copy the interesting part.
10755 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10756 if (numcpus > arg2 * 8) {
10757 return -TARGET_EINVAL;
10759 ret = arg2;
10762 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10763 return -TARGET_EFAULT;
10767 return ret;
10768 case TARGET_NR_sched_setaffinity:
10770 unsigned int mask_size;
10771 unsigned long *mask;
10774 * sched_setaffinity needs multiples of ulong, so need to take
10775 * care of mismatches between target ulong and host ulong sizes.
10777 if (arg2 & (sizeof(abi_ulong) - 1)) {
10778 return -TARGET_EINVAL;
10780 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10781 mask = alloca(mask_size);
10783 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10784 if (ret) {
10785 return ret;
10788 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10790 case TARGET_NR_getcpu:
10792 unsigned cpu, node;
10793 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10794 arg2 ? &node : NULL,
10795 NULL));
10796 if (is_error(ret)) {
10797 return ret;
10799 if (arg1 && put_user_u32(cpu, arg1)) {
10800 return -TARGET_EFAULT;
10802 if (arg2 && put_user_u32(node, arg2)) {
10803 return -TARGET_EFAULT;
10806 return ret;
10807 case TARGET_NR_sched_setparam:
10809 struct target_sched_param *target_schp;
10810 struct sched_param schp;
10812 if (arg2 == 0) {
10813 return -TARGET_EINVAL;
10815 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10816 return -TARGET_EFAULT;
10818 schp.sched_priority = tswap32(target_schp->sched_priority);
10819 unlock_user_struct(target_schp, arg2, 0);
10820 return get_errno(sys_sched_setparam(arg1, &schp));
10822 case TARGET_NR_sched_getparam:
10824 struct target_sched_param *target_schp;
10825 struct sched_param schp;
10827 if (arg2 == 0) {
10828 return -TARGET_EINVAL;
10830 ret = get_errno(sys_sched_getparam(arg1, &schp));
10831 if (!is_error(ret)) {
10832 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10833 return -TARGET_EFAULT;
10835 target_schp->sched_priority = tswap32(schp.sched_priority);
10836 unlock_user_struct(target_schp, arg2, 1);
10839 return ret;
10840 case TARGET_NR_sched_setscheduler:
10842 struct target_sched_param *target_schp;
10843 struct sched_param schp;
10844 if (arg3 == 0) {
10845 return -TARGET_EINVAL;
10847 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10848 return -TARGET_EFAULT;
10850 schp.sched_priority = tswap32(target_schp->sched_priority);
10851 unlock_user_struct(target_schp, arg3, 0);
10852 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10854 case TARGET_NR_sched_getscheduler:
10855 return get_errno(sys_sched_getscheduler(arg1));
10856 case TARGET_NR_sched_getattr:
10858 struct target_sched_attr *target_scha;
10859 struct sched_attr scha;
10860 if (arg2 == 0) {
10861 return -TARGET_EINVAL;
10863 if (arg3 > sizeof(scha)) {
10864 arg3 = sizeof(scha);
10866 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10867 if (!is_error(ret)) {
10868 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10869 if (!target_scha) {
10870 return -TARGET_EFAULT;
10872 target_scha->size = tswap32(scha.size);
10873 target_scha->sched_policy = tswap32(scha.sched_policy);
10874 target_scha->sched_flags = tswap64(scha.sched_flags);
10875 target_scha->sched_nice = tswap32(scha.sched_nice);
10876 target_scha->sched_priority = tswap32(scha.sched_priority);
10877 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10878 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10879 target_scha->sched_period = tswap64(scha.sched_period);
10880 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10881 target_scha->sched_util_min = tswap32(scha.sched_util_min);
10882 target_scha->sched_util_max = tswap32(scha.sched_util_max);
10884 unlock_user(target_scha, arg2, arg3);
10886 return ret;
10888 case TARGET_NR_sched_setattr:
10890 struct target_sched_attr *target_scha;
10891 struct sched_attr scha;
10892 uint32_t size;
10893 int zeroed;
10894 if (arg2 == 0) {
10895 return -TARGET_EINVAL;
10897 if (get_user_u32(size, arg2)) {
10898 return -TARGET_EFAULT;
10900 if (!size) {
10901 size = offsetof(struct target_sched_attr, sched_util_min);
10903 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10904 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10905 return -TARGET_EFAULT;
10907 return -TARGET_E2BIG;
10910 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10911 if (zeroed < 0) {
10912 return zeroed;
10913 } else if (zeroed == 0) {
10914 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10915 return -TARGET_EFAULT;
10917 return -TARGET_E2BIG;
10919 if (size > sizeof(struct target_sched_attr)) {
10920 size = sizeof(struct target_sched_attr);
10923 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10924 if (!target_scha) {
10925 return -TARGET_EFAULT;
10927 scha.size = size;
10928 scha.sched_policy = tswap32(target_scha->sched_policy);
10929 scha.sched_flags = tswap64(target_scha->sched_flags);
10930 scha.sched_nice = tswap32(target_scha->sched_nice);
10931 scha.sched_priority = tswap32(target_scha->sched_priority);
10932 scha.sched_runtime = tswap64(target_scha->sched_runtime);
10933 scha.sched_deadline = tswap64(target_scha->sched_deadline);
10934 scha.sched_period = tswap64(target_scha->sched_period);
10935 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10936 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10937 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10939 unlock_user(target_scha, arg2, 0);
10940 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10942 case TARGET_NR_sched_yield:
10943 return get_errno(sched_yield());
10944 case TARGET_NR_sched_get_priority_max:
10945 return get_errno(sched_get_priority_max(arg1));
10946 case TARGET_NR_sched_get_priority_min:
10947 return get_errno(sched_get_priority_min(arg1));
10948 #ifdef TARGET_NR_sched_rr_get_interval
10949 case TARGET_NR_sched_rr_get_interval:
10951 struct timespec ts;
10952 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10953 if (!is_error(ret)) {
10954 ret = host_to_target_timespec(arg2, &ts);
10957 return ret;
10958 #endif
10959 #ifdef TARGET_NR_sched_rr_get_interval_time64
10960 case TARGET_NR_sched_rr_get_interval_time64:
10962 struct timespec ts;
10963 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10964 if (!is_error(ret)) {
10965 ret = host_to_target_timespec64(arg2, &ts);
10968 return ret;
10969 #endif
10970 #if defined(TARGET_NR_nanosleep)
10971 case TARGET_NR_nanosleep:
10973 struct timespec req, rem;
10974 target_to_host_timespec(&req, arg1);
10975 ret = get_errno(safe_nanosleep(&req, &rem));
10976 if (is_error(ret) && arg2) {
10977 host_to_target_timespec(arg2, &rem);
10980 return ret;
10981 #endif
10982 case TARGET_NR_prctl:
10983 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10984 break;
10985 #ifdef TARGET_NR_arch_prctl
10986 case TARGET_NR_arch_prctl:
10987 return do_arch_prctl(cpu_env, arg1, arg2);
10988 #endif
10989 #ifdef TARGET_NR_pread64
10990 case TARGET_NR_pread64:
10991 if (regpairs_aligned(cpu_env, num)) {
10992 arg4 = arg5;
10993 arg5 = arg6;
10995 if (arg2 == 0 && arg3 == 0) {
10996 /* Special-case NULL buffer and zero length, which should succeed */
10997 p = 0;
10998 } else {
10999 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11000 if (!p) {
11001 return -TARGET_EFAULT;
11004 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11005 unlock_user(p, arg2, ret);
11006 return ret;
11007 case TARGET_NR_pwrite64:
11008 if (regpairs_aligned(cpu_env, num)) {
11009 arg4 = arg5;
11010 arg5 = arg6;
11012 if (arg2 == 0 && arg3 == 0) {
11013 /* Special-case NULL buffer and zero length, which should succeed */
11014 p = 0;
11015 } else {
11016 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11017 if (!p) {
11018 return -TARGET_EFAULT;
11021 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11022 unlock_user(p, arg2, 0);
11023 return ret;
11024 #endif
11025 case TARGET_NR_getcwd:
11026 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11027 return -TARGET_EFAULT;
11028 ret = get_errno(sys_getcwd1(p, arg2));
11029 unlock_user(p, arg1, ret);
11030 return ret;
11031 case TARGET_NR_capget:
11032 case TARGET_NR_capset:
11034 struct target_user_cap_header *target_header;
11035 struct target_user_cap_data *target_data = NULL;
11036 struct __user_cap_header_struct header;
11037 struct __user_cap_data_struct data[2];
11038 struct __user_cap_data_struct *dataptr = NULL;
11039 int i, target_datalen;
11040 int data_items = 1;
11042 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11043 return -TARGET_EFAULT;
11045 header.version = tswap32(target_header->version);
11046 header.pid = tswap32(target_header->pid);
11048 if (header.version != _LINUX_CAPABILITY_VERSION) {
11049 /* Version 2 and up takes pointer to two user_data structs */
11050 data_items = 2;
11053 target_datalen = sizeof(*target_data) * data_items;
11055 if (arg2) {
11056 if (num == TARGET_NR_capget) {
11057 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11058 } else {
11059 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11061 if (!target_data) {
11062 unlock_user_struct(target_header, arg1, 0);
11063 return -TARGET_EFAULT;
11066 if (num == TARGET_NR_capset) {
11067 for (i = 0; i < data_items; i++) {
11068 data[i].effective = tswap32(target_data[i].effective);
11069 data[i].permitted = tswap32(target_data[i].permitted);
11070 data[i].inheritable = tswap32(target_data[i].inheritable);
11074 dataptr = data;
11077 if (num == TARGET_NR_capget) {
11078 ret = get_errno(capget(&header, dataptr));
11079 } else {
11080 ret = get_errno(capset(&header, dataptr));
11083 /* The kernel always updates version for both capget and capset */
11084 target_header->version = tswap32(header.version);
11085 unlock_user_struct(target_header, arg1, 1);
11087 if (arg2) {
11088 if (num == TARGET_NR_capget) {
11089 for (i = 0; i < data_items; i++) {
11090 target_data[i].effective = tswap32(data[i].effective);
11091 target_data[i].permitted = tswap32(data[i].permitted);
11092 target_data[i].inheritable = tswap32(data[i].inheritable);
11094 unlock_user(target_data, arg2, target_datalen);
11095 } else {
11096 unlock_user(target_data, arg2, 0);
11099 return ret;
11101 case TARGET_NR_sigaltstack:
11102 return do_sigaltstack(arg1, arg2, cpu_env);
11104 #ifdef CONFIG_SENDFILE
11105 #ifdef TARGET_NR_sendfile
11106 case TARGET_NR_sendfile:
11108 off_t *offp = NULL;
11109 off_t off;
11110 if (arg3) {
11111 ret = get_user_sal(off, arg3);
11112 if (is_error(ret)) {
11113 return ret;
11115 offp = &off;
11117 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11118 if (!is_error(ret) && arg3) {
11119 abi_long ret2 = put_user_sal(off, arg3);
11120 if (is_error(ret2)) {
11121 ret = ret2;
11124 return ret;
11126 #endif
11127 #ifdef TARGET_NR_sendfile64
11128 case TARGET_NR_sendfile64:
11130 off_t *offp = NULL;
11131 off_t off;
11132 if (arg3) {
11133 ret = get_user_s64(off, arg3);
11134 if (is_error(ret)) {
11135 return ret;
11137 offp = &off;
11139 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11140 if (!is_error(ret) && arg3) {
11141 abi_long ret2 = put_user_s64(off, arg3);
11142 if (is_error(ret2)) {
11143 ret = ret2;
11146 return ret;
11148 #endif
11149 #endif
11150 #ifdef TARGET_NR_vfork
11151 case TARGET_NR_vfork:
11152 return get_errno(do_fork(cpu_env,
11153 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11154 0, 0, 0, 0));
11155 #endif
11156 #ifdef TARGET_NR_ugetrlimit
11157 case TARGET_NR_ugetrlimit:
11159 struct rlimit rlim;
11160 int resource = target_to_host_resource(arg1);
11161 ret = get_errno(getrlimit(resource, &rlim));
11162 if (!is_error(ret)) {
11163 struct target_rlimit *target_rlim;
11164 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11165 return -TARGET_EFAULT;
11166 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11167 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11168 unlock_user_struct(target_rlim, arg2, 1);
11170 return ret;
11172 #endif
11173 #ifdef TARGET_NR_truncate64
11174 case TARGET_NR_truncate64:
11175 if (!(p = lock_user_string(arg1)))
11176 return -TARGET_EFAULT;
11177 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11178 unlock_user(p, arg1, 0);
11179 return ret;
11180 #endif
11181 #ifdef TARGET_NR_ftruncate64
11182 case TARGET_NR_ftruncate64:
11183 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11184 #endif
11185 #ifdef TARGET_NR_stat64
11186 case TARGET_NR_stat64:
11187 if (!(p = lock_user_string(arg1))) {
11188 return -TARGET_EFAULT;
11190 ret = get_errno(stat(path(p), &st));
11191 unlock_user(p, arg1, 0);
11192 if (!is_error(ret))
11193 ret = host_to_target_stat64(cpu_env, arg2, &st);
11194 return ret;
11195 #endif
11196 #ifdef TARGET_NR_lstat64
11197 case TARGET_NR_lstat64:
11198 if (!(p = lock_user_string(arg1))) {
11199 return -TARGET_EFAULT;
11201 ret = get_errno(lstat(path(p), &st));
11202 unlock_user(p, arg1, 0);
11203 if (!is_error(ret))
11204 ret = host_to_target_stat64(cpu_env, arg2, &st);
11205 return ret;
11206 #endif
11207 #ifdef TARGET_NR_fstat64
11208 case TARGET_NR_fstat64:
11209 ret = get_errno(fstat(arg1, &st));
11210 if (!is_error(ret))
11211 ret = host_to_target_stat64(cpu_env, arg2, &st);
11212 return ret;
11213 #endif
11214 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11215 #ifdef TARGET_NR_fstatat64
11216 case TARGET_NR_fstatat64:
11217 #endif
11218 #ifdef TARGET_NR_newfstatat
11219 case TARGET_NR_newfstatat:
11220 #endif
11221 if (!(p = lock_user_string(arg2))) {
11222 return -TARGET_EFAULT;
11224 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11225 unlock_user(p, arg2, 0);
11226 if (!is_error(ret))
11227 ret = host_to_target_stat64(cpu_env, arg3, &st);
11228 return ret;
11229 #endif
11230 #if defined(TARGET_NR_statx)
11231 case TARGET_NR_statx:
11233 struct target_statx *target_stx;
11234 int dirfd = arg1;
11235 int flags = arg3;
11237 p = lock_user_string(arg2);
11238 if (p == NULL) {
11239 return -TARGET_EFAULT;
11241 #if defined(__NR_statx)
11244 * It is assumed that struct statx is architecture independent.
11246 struct target_statx host_stx;
11247 int mask = arg4;
11249 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11250 if (!is_error(ret)) {
11251 if (host_to_target_statx(&host_stx, arg5) != 0) {
11252 unlock_user(p, arg2, 0);
11253 return -TARGET_EFAULT;
11257 if (ret != -TARGET_ENOSYS) {
11258 unlock_user(p, arg2, 0);
11259 return ret;
11262 #endif
11263 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11264 unlock_user(p, arg2, 0);
11266 if (!is_error(ret)) {
11267 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11268 return -TARGET_EFAULT;
11270 memset(target_stx, 0, sizeof(*target_stx));
11271 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11272 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11273 __put_user(st.st_ino, &target_stx->stx_ino);
11274 __put_user(st.st_mode, &target_stx->stx_mode);
11275 __put_user(st.st_uid, &target_stx->stx_uid);
11276 __put_user(st.st_gid, &target_stx->stx_gid);
11277 __put_user(st.st_nlink, &target_stx->stx_nlink);
11278 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11279 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11280 __put_user(st.st_size, &target_stx->stx_size);
11281 __put_user(st.st_blksize, &target_stx->stx_blksize);
11282 __put_user(st.st_blocks, &target_stx->stx_blocks);
11283 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11284 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11285 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11286 unlock_user_struct(target_stx, arg5, 1);
11289 return ret;
11290 #endif
11291 #ifdef TARGET_NR_lchown
11292 case TARGET_NR_lchown:
11293 if (!(p = lock_user_string(arg1)))
11294 return -TARGET_EFAULT;
11295 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11296 unlock_user(p, arg1, 0);
11297 return ret;
11298 #endif
11299 #ifdef TARGET_NR_getuid
11300 case TARGET_NR_getuid:
11301 return get_errno(high2lowuid(getuid()));
11302 #endif
11303 #ifdef TARGET_NR_getgid
11304 case TARGET_NR_getgid:
11305 return get_errno(high2lowgid(getgid()));
11306 #endif
11307 #ifdef TARGET_NR_geteuid
11308 case TARGET_NR_geteuid:
11309 return get_errno(high2lowuid(geteuid()));
11310 #endif
11311 #ifdef TARGET_NR_getegid
11312 case TARGET_NR_getegid:
11313 return get_errno(high2lowgid(getegid()));
11314 #endif
11315 case TARGET_NR_setreuid:
11316 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11317 case TARGET_NR_setregid:
11318 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11319 case TARGET_NR_getgroups:
11321 int gidsetsize = arg1;
11322 target_id *target_grouplist;
11323 gid_t *grouplist;
11324 int i;
11326 grouplist = alloca(gidsetsize * sizeof(gid_t));
11327 ret = get_errno(getgroups(gidsetsize, grouplist));
11328 if (gidsetsize == 0)
11329 return ret;
11330 if (!is_error(ret)) {
11331 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11332 if (!target_grouplist)
11333 return -TARGET_EFAULT;
11334 for(i = 0;i < ret; i++)
11335 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11336 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11339 return ret;
11340 case TARGET_NR_setgroups:
11342 int gidsetsize = arg1;
11343 target_id *target_grouplist;
11344 gid_t *grouplist = NULL;
11345 int i;
11346 if (gidsetsize) {
11347 grouplist = alloca(gidsetsize * sizeof(gid_t));
11348 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11349 if (!target_grouplist) {
11350 return -TARGET_EFAULT;
11352 for (i = 0; i < gidsetsize; i++) {
11353 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11355 unlock_user(target_grouplist, arg2, 0);
11357 return get_errno(setgroups(gidsetsize, grouplist));
11359 case TARGET_NR_fchown:
11360 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11361 #if defined(TARGET_NR_fchownat)
11362 case TARGET_NR_fchownat:
11363 if (!(p = lock_user_string(arg2)))
11364 return -TARGET_EFAULT;
11365 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11366 low2highgid(arg4), arg5));
11367 unlock_user(p, arg2, 0);
11368 return ret;
11369 #endif
11370 #ifdef TARGET_NR_setresuid
11371 case TARGET_NR_setresuid:
11372 return get_errno(sys_setresuid(low2highuid(arg1),
11373 low2highuid(arg2),
11374 low2highuid(arg3)));
11375 #endif
11376 #ifdef TARGET_NR_getresuid
11377 case TARGET_NR_getresuid:
11379 uid_t ruid, euid, suid;
11380 ret = get_errno(getresuid(&ruid, &euid, &suid));
11381 if (!is_error(ret)) {
11382 if (put_user_id(high2lowuid(ruid), arg1)
11383 || put_user_id(high2lowuid(euid), arg2)
11384 || put_user_id(high2lowuid(suid), arg3))
11385 return -TARGET_EFAULT;
11388 return ret;
11389 #endif
11390 #ifdef TARGET_NR_getresgid
11391 case TARGET_NR_setresgid:
11392 return get_errno(sys_setresgid(low2highgid(arg1),
11393 low2highgid(arg2),
11394 low2highgid(arg3)));
11395 #endif
11396 #ifdef TARGET_NR_getresgid
11397 case TARGET_NR_getresgid:
11399 gid_t rgid, egid, sgid;
11400 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11401 if (!is_error(ret)) {
11402 if (put_user_id(high2lowgid(rgid), arg1)
11403 || put_user_id(high2lowgid(egid), arg2)
11404 || put_user_id(high2lowgid(sgid), arg3))
11405 return -TARGET_EFAULT;
11408 return ret;
11409 #endif
11410 #ifdef TARGET_NR_chown
11411 case TARGET_NR_chown:
11412 if (!(p = lock_user_string(arg1)))
11413 return -TARGET_EFAULT;
11414 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11415 unlock_user(p, arg1, 0);
11416 return ret;
11417 #endif
11418 case TARGET_NR_setuid:
11419 return get_errno(sys_setuid(low2highuid(arg1)));
11420 case TARGET_NR_setgid:
11421 return get_errno(sys_setgid(low2highgid(arg1)));
11422 case TARGET_NR_setfsuid:
11423 return get_errno(setfsuid(arg1));
11424 case TARGET_NR_setfsgid:
11425 return get_errno(setfsgid(arg1));
11427 #ifdef TARGET_NR_lchown32
11428 case TARGET_NR_lchown32:
11429 if (!(p = lock_user_string(arg1)))
11430 return -TARGET_EFAULT;
11431 ret = get_errno(lchown(p, arg2, arg3));
11432 unlock_user(p, arg1, 0);
11433 return ret;
11434 #endif
11435 #ifdef TARGET_NR_getuid32
11436 case TARGET_NR_getuid32:
11437 return get_errno(getuid());
11438 #endif
11440 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11441 /* Alpha specific */
11442 case TARGET_NR_getxuid:
11444 uid_t euid;
11445 euid=geteuid();
11446 cpu_env->ir[IR_A4]=euid;
11448 return get_errno(getuid());
11449 #endif
11450 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11451 /* Alpha specific */
11452 case TARGET_NR_getxgid:
11454 uid_t egid;
11455 egid=getegid();
11456 cpu_env->ir[IR_A4]=egid;
11458 return get_errno(getgid());
11459 #endif
11460 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11461 /* Alpha specific */
11462 case TARGET_NR_osf_getsysinfo:
11463 ret = -TARGET_EOPNOTSUPP;
11464 switch (arg1) {
11465 case TARGET_GSI_IEEE_FP_CONTROL:
11467 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11468 uint64_t swcr = cpu_env->swcr;
11470 swcr &= ~SWCR_STATUS_MASK;
11471 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11473 if (put_user_u64 (swcr, arg2))
11474 return -TARGET_EFAULT;
11475 ret = 0;
11477 break;
11479 /* case GSI_IEEE_STATE_AT_SIGNAL:
11480 -- Not implemented in linux kernel.
11481 case GSI_UACPROC:
11482 -- Retrieves current unaligned access state; not much used.
11483 case GSI_PROC_TYPE:
11484 -- Retrieves implver information; surely not used.
11485 case GSI_GET_HWRPB:
11486 -- Grabs a copy of the HWRPB; surely not used.
11489 return ret;
11490 #endif
11491 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11492 /* Alpha specific */
11493 case TARGET_NR_osf_setsysinfo:
11494 ret = -TARGET_EOPNOTSUPP;
11495 switch (arg1) {
11496 case TARGET_SSI_IEEE_FP_CONTROL:
11498 uint64_t swcr, fpcr;
11500 if (get_user_u64 (swcr, arg2)) {
11501 return -TARGET_EFAULT;
11505 * The kernel calls swcr_update_status to update the
11506 * status bits from the fpcr at every point that it
11507 * could be queried. Therefore, we store the status
11508 * bits only in FPCR.
11510 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11512 fpcr = cpu_alpha_load_fpcr(cpu_env);
11513 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11514 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11515 cpu_alpha_store_fpcr(cpu_env, fpcr);
11516 ret = 0;
11518 break;
11520 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11522 uint64_t exc, fpcr, fex;
11524 if (get_user_u64(exc, arg2)) {
11525 return -TARGET_EFAULT;
11527 exc &= SWCR_STATUS_MASK;
11528 fpcr = cpu_alpha_load_fpcr(cpu_env);
11530 /* Old exceptions are not signaled. */
11531 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11532 fex = exc & ~fex;
11533 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11534 fex &= (cpu_env)->swcr;
11536 /* Update the hardware fpcr. */
11537 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11538 cpu_alpha_store_fpcr(cpu_env, fpcr);
11540 if (fex) {
11541 int si_code = TARGET_FPE_FLTUNK;
11542 target_siginfo_t info;
11544 if (fex & SWCR_TRAP_ENABLE_DNO) {
11545 si_code = TARGET_FPE_FLTUND;
11547 if (fex & SWCR_TRAP_ENABLE_INE) {
11548 si_code = TARGET_FPE_FLTRES;
11550 if (fex & SWCR_TRAP_ENABLE_UNF) {
11551 si_code = TARGET_FPE_FLTUND;
11553 if (fex & SWCR_TRAP_ENABLE_OVF) {
11554 si_code = TARGET_FPE_FLTOVF;
11556 if (fex & SWCR_TRAP_ENABLE_DZE) {
11557 si_code = TARGET_FPE_FLTDIV;
11559 if (fex & SWCR_TRAP_ENABLE_INV) {
11560 si_code = TARGET_FPE_FLTINV;
11563 info.si_signo = SIGFPE;
11564 info.si_errno = 0;
11565 info.si_code = si_code;
11566 info._sifields._sigfault._addr = (cpu_env)->pc;
11567 queue_signal(cpu_env, info.si_signo,
11568 QEMU_SI_FAULT, &info);
11570 ret = 0;
11572 break;
11574 /* case SSI_NVPAIRS:
11575 -- Used with SSIN_UACPROC to enable unaligned accesses.
11576 case SSI_IEEE_STATE_AT_SIGNAL:
11577 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11578 -- Not implemented in linux kernel
11581 return ret;
11582 #endif
11583 #ifdef TARGET_NR_osf_sigprocmask
11584 /* Alpha specific. */
11585 case TARGET_NR_osf_sigprocmask:
11587 abi_ulong mask;
11588 int how;
11589 sigset_t set, oldset;
11591 switch(arg1) {
11592 case TARGET_SIG_BLOCK:
11593 how = SIG_BLOCK;
11594 break;
11595 case TARGET_SIG_UNBLOCK:
11596 how = SIG_UNBLOCK;
11597 break;
11598 case TARGET_SIG_SETMASK:
11599 how = SIG_SETMASK;
11600 break;
11601 default:
11602 return -TARGET_EINVAL;
11604 mask = arg2;
11605 target_to_host_old_sigset(&set, &mask);
11606 ret = do_sigprocmask(how, &set, &oldset);
11607 if (!ret) {
11608 host_to_target_old_sigset(&mask, &oldset);
11609 ret = mask;
11612 return ret;
11613 #endif
11615 #ifdef TARGET_NR_getgid32
11616 case TARGET_NR_getgid32:
11617 return get_errno(getgid());
11618 #endif
11619 #ifdef TARGET_NR_geteuid32
11620 case TARGET_NR_geteuid32:
11621 return get_errno(geteuid());
11622 #endif
11623 #ifdef TARGET_NR_getegid32
11624 case TARGET_NR_getegid32:
11625 return get_errno(getegid());
11626 #endif
11627 #ifdef TARGET_NR_setreuid32
11628 case TARGET_NR_setreuid32:
11629 return get_errno(setreuid(arg1, arg2));
11630 #endif
11631 #ifdef TARGET_NR_setregid32
11632 case TARGET_NR_setregid32:
11633 return get_errno(setregid(arg1, arg2));
11634 #endif
11635 #ifdef TARGET_NR_getgroups32
11636 case TARGET_NR_getgroups32:
11638 int gidsetsize = arg1;
11639 uint32_t *target_grouplist;
11640 gid_t *grouplist;
11641 int i;
11643 grouplist = alloca(gidsetsize * sizeof(gid_t));
11644 ret = get_errno(getgroups(gidsetsize, grouplist));
11645 if (gidsetsize == 0)
11646 return ret;
11647 if (!is_error(ret)) {
11648 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11649 if (!target_grouplist) {
11650 return -TARGET_EFAULT;
11652 for(i = 0;i < ret; i++)
11653 target_grouplist[i] = tswap32(grouplist[i]);
11654 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11657 return ret;
11658 #endif
11659 #ifdef TARGET_NR_setgroups32
11660 case TARGET_NR_setgroups32:
11662 int gidsetsize = arg1;
11663 uint32_t *target_grouplist;
11664 gid_t *grouplist;
11665 int i;
11667 grouplist = alloca(gidsetsize * sizeof(gid_t));
11668 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11669 if (!target_grouplist) {
11670 return -TARGET_EFAULT;
11672 for(i = 0;i < gidsetsize; i++)
11673 grouplist[i] = tswap32(target_grouplist[i]);
11674 unlock_user(target_grouplist, arg2, 0);
11675 return get_errno(setgroups(gidsetsize, grouplist));
11677 #endif
11678 #ifdef TARGET_NR_fchown32
11679 case TARGET_NR_fchown32:
11680 return get_errno(fchown(arg1, arg2, arg3));
11681 #endif
11682 #ifdef TARGET_NR_setresuid32
11683 case TARGET_NR_setresuid32:
11684 return get_errno(sys_setresuid(arg1, arg2, arg3));
11685 #endif
11686 #ifdef TARGET_NR_getresuid32
11687 case TARGET_NR_getresuid32:
11689 uid_t ruid, euid, suid;
11690 ret = get_errno(getresuid(&ruid, &euid, &suid));
11691 if (!is_error(ret)) {
11692 if (put_user_u32(ruid, arg1)
11693 || put_user_u32(euid, arg2)
11694 || put_user_u32(suid, arg3))
11695 return -TARGET_EFAULT;
11698 return ret;
11699 #endif
11700 #ifdef TARGET_NR_setresgid32
11701 case TARGET_NR_setresgid32:
11702 return get_errno(sys_setresgid(arg1, arg2, arg3));
11703 #endif
11704 #ifdef TARGET_NR_getresgid32
11705 case TARGET_NR_getresgid32:
11707 gid_t rgid, egid, sgid;
11708 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11709 if (!is_error(ret)) {
11710 if (put_user_u32(rgid, arg1)
11711 || put_user_u32(egid, arg2)
11712 || put_user_u32(sgid, arg3))
11713 return -TARGET_EFAULT;
11716 return ret;
11717 #endif
11718 #ifdef TARGET_NR_chown32
11719 case TARGET_NR_chown32:
11720 if (!(p = lock_user_string(arg1)))
11721 return -TARGET_EFAULT;
11722 ret = get_errno(chown(p, arg2, arg3));
11723 unlock_user(p, arg1, 0);
11724 return ret;
11725 #endif
11726 #ifdef TARGET_NR_setuid32
11727 case TARGET_NR_setuid32:
11728 return get_errno(sys_setuid(arg1));
11729 #endif
11730 #ifdef TARGET_NR_setgid32
11731 case TARGET_NR_setgid32:
11732 return get_errno(sys_setgid(arg1));
11733 #endif
11734 #ifdef TARGET_NR_setfsuid32
11735 case TARGET_NR_setfsuid32:
11736 return get_errno(setfsuid(arg1));
11737 #endif
11738 #ifdef TARGET_NR_setfsgid32
11739 case TARGET_NR_setfsgid32:
11740 return get_errno(setfsgid(arg1));
11741 #endif
11742 #ifdef TARGET_NR_mincore
11743 case TARGET_NR_mincore:
11745 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11746 if (!a) {
11747 return -TARGET_ENOMEM;
11749 p = lock_user_string(arg3);
11750 if (!p) {
11751 ret = -TARGET_EFAULT;
11752 } else {
11753 ret = get_errno(mincore(a, arg2, p));
11754 unlock_user(p, arg3, ret);
11756 unlock_user(a, arg1, 0);
11758 return ret;
11759 #endif
11760 #ifdef TARGET_NR_arm_fadvise64_64
11761 case TARGET_NR_arm_fadvise64_64:
11762 /* arm_fadvise64_64 looks like fadvise64_64 but
11763 * with different argument order: fd, advice, offset, len
11764 * rather than the usual fd, offset, len, advice.
11765 * Note that offset and len are both 64-bit so appear as
11766 * pairs of 32-bit registers.
11768 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11769 target_offset64(arg5, arg6), arg2);
11770 return -host_to_target_errno(ret);
11771 #endif
11773 #if TARGET_ABI_BITS == 32
11775 #ifdef TARGET_NR_fadvise64_64
11776 case TARGET_NR_fadvise64_64:
11777 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11778 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11779 ret = arg2;
11780 arg2 = arg3;
11781 arg3 = arg4;
11782 arg4 = arg5;
11783 arg5 = arg6;
11784 arg6 = ret;
11785 #else
11786 /* 6 args: fd, offset (high, low), len (high, low), advice */
11787 if (regpairs_aligned(cpu_env, num)) {
11788 /* offset is in (3,4), len in (5,6) and advice in 7 */
11789 arg2 = arg3;
11790 arg3 = arg4;
11791 arg4 = arg5;
11792 arg5 = arg6;
11793 arg6 = arg7;
11795 #endif
11796 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11797 target_offset64(arg4, arg5), arg6);
11798 return -host_to_target_errno(ret);
11799 #endif
11801 #ifdef TARGET_NR_fadvise64
11802 case TARGET_NR_fadvise64:
11803 /* 5 args: fd, offset (high, low), len, advice */
11804 if (regpairs_aligned(cpu_env, num)) {
11805 /* offset is in (3,4), len in 5 and advice in 6 */
11806 arg2 = arg3;
11807 arg3 = arg4;
11808 arg4 = arg5;
11809 arg5 = arg6;
11811 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11812 return -host_to_target_errno(ret);
11813 #endif
11815 #else /* not a 32-bit ABI */
11816 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11817 #ifdef TARGET_NR_fadvise64_64
11818 case TARGET_NR_fadvise64_64:
11819 #endif
11820 #ifdef TARGET_NR_fadvise64
11821 case TARGET_NR_fadvise64:
11822 #endif
11823 #ifdef TARGET_S390X
11824 switch (arg4) {
11825 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11826 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11827 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11828 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11829 default: break;
11831 #endif
11832 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11833 #endif
11834 #endif /* end of 64-bit ABI fadvise handling */
11836 #ifdef TARGET_NR_madvise
11837 case TARGET_NR_madvise:
11838 return target_madvise(arg1, arg2, arg3);
11839 #endif
11840 #ifdef TARGET_NR_fcntl64
11841 case TARGET_NR_fcntl64:
11843 int cmd;
11844 struct flock64 fl;
11845 from_flock64_fn *copyfrom = copy_from_user_flock64;
11846 to_flock64_fn *copyto = copy_to_user_flock64;
11848 #ifdef TARGET_ARM
11849 if (!cpu_env->eabi) {
11850 copyfrom = copy_from_user_oabi_flock64;
11851 copyto = copy_to_user_oabi_flock64;
11853 #endif
11855 cmd = target_to_host_fcntl_cmd(arg2);
11856 if (cmd == -TARGET_EINVAL) {
11857 return cmd;
11860 switch(arg2) {
11861 case TARGET_F_GETLK64:
11862 ret = copyfrom(&fl, arg3);
11863 if (ret) {
11864 break;
11866 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11867 if (ret == 0) {
11868 ret = copyto(arg3, &fl);
11870 break;
11872 case TARGET_F_SETLK64:
11873 case TARGET_F_SETLKW64:
11874 ret = copyfrom(&fl, arg3);
11875 if (ret) {
11876 break;
11878 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11879 break;
11880 default:
11881 ret = do_fcntl(arg1, arg2, arg3);
11882 break;
11884 return ret;
11886 #endif
11887 #ifdef TARGET_NR_cacheflush
11888 case TARGET_NR_cacheflush:
11889 /* self-modifying code is handled automatically, so nothing needed */
11890 return 0;
11891 #endif
11892 #ifdef TARGET_NR_getpagesize
11893 case TARGET_NR_getpagesize:
11894 return TARGET_PAGE_SIZE;
11895 #endif
11896 case TARGET_NR_gettid:
11897 return get_errno(sys_gettid());
11898 #ifdef TARGET_NR_readahead
11899 case TARGET_NR_readahead:
11900 #if TARGET_ABI_BITS == 32
11901 if (regpairs_aligned(cpu_env, num)) {
11902 arg2 = arg3;
11903 arg3 = arg4;
11904 arg4 = arg5;
11906 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11907 #else
11908 ret = get_errno(readahead(arg1, arg2, arg3));
11909 #endif
11910 return ret;
11911 #endif
11912 #ifdef CONFIG_ATTR
11913 #ifdef TARGET_NR_setxattr
11914 case TARGET_NR_listxattr:
11915 case TARGET_NR_llistxattr:
11917 void *p, *b = 0;
11918 if (arg2) {
11919 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11920 if (!b) {
11921 return -TARGET_EFAULT;
11924 p = lock_user_string(arg1);
11925 if (p) {
11926 if (num == TARGET_NR_listxattr) {
11927 ret = get_errno(listxattr(p, b, arg3));
11928 } else {
11929 ret = get_errno(llistxattr(p, b, arg3));
11931 } else {
11932 ret = -TARGET_EFAULT;
11934 unlock_user(p, arg1, 0);
11935 unlock_user(b, arg2, arg3);
11936 return ret;
11938 case TARGET_NR_flistxattr:
11940 void *b = 0;
11941 if (arg2) {
11942 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11943 if (!b) {
11944 return -TARGET_EFAULT;
11947 ret = get_errno(flistxattr(arg1, b, arg3));
11948 unlock_user(b, arg2, arg3);
11949 return ret;
11951 case TARGET_NR_setxattr:
11952 case TARGET_NR_lsetxattr:
11954 void *p, *n, *v = 0;
11955 if (arg3) {
11956 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11957 if (!v) {
11958 return -TARGET_EFAULT;
11961 p = lock_user_string(arg1);
11962 n = lock_user_string(arg2);
11963 if (p && n) {
11964 if (num == TARGET_NR_setxattr) {
11965 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11966 } else {
11967 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11969 } else {
11970 ret = -TARGET_EFAULT;
11972 unlock_user(p, arg1, 0);
11973 unlock_user(n, arg2, 0);
11974 unlock_user(v, arg3, 0);
11976 return ret;
11977 case TARGET_NR_fsetxattr:
11979 void *n, *v = 0;
11980 if (arg3) {
11981 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11982 if (!v) {
11983 return -TARGET_EFAULT;
11986 n = lock_user_string(arg2);
11987 if (n) {
11988 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11989 } else {
11990 ret = -TARGET_EFAULT;
11992 unlock_user(n, arg2, 0);
11993 unlock_user(v, arg3, 0);
11995 return ret;
11996 case TARGET_NR_getxattr:
11997 case TARGET_NR_lgetxattr:
11999 void *p, *n, *v = 0;
12000 if (arg3) {
12001 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12002 if (!v) {
12003 return -TARGET_EFAULT;
12006 p = lock_user_string(arg1);
12007 n = lock_user_string(arg2);
12008 if (p && n) {
12009 if (num == TARGET_NR_getxattr) {
12010 ret = get_errno(getxattr(p, n, v, arg4));
12011 } else {
12012 ret = get_errno(lgetxattr(p, n, v, arg4));
12014 } else {
12015 ret = -TARGET_EFAULT;
12017 unlock_user(p, arg1, 0);
12018 unlock_user(n, arg2, 0);
12019 unlock_user(v, arg3, arg4);
12021 return ret;
12022 case TARGET_NR_fgetxattr:
12024 void *n, *v = 0;
12025 if (arg3) {
12026 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12027 if (!v) {
12028 return -TARGET_EFAULT;
12031 n = lock_user_string(arg2);
12032 if (n) {
12033 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12034 } else {
12035 ret = -TARGET_EFAULT;
12037 unlock_user(n, arg2, 0);
12038 unlock_user(v, arg3, arg4);
12040 return ret;
12041 case TARGET_NR_removexattr:
12042 case TARGET_NR_lremovexattr:
12044 void *p, *n;
12045 p = lock_user_string(arg1);
12046 n = lock_user_string(arg2);
12047 if (p && n) {
12048 if (num == TARGET_NR_removexattr) {
12049 ret = get_errno(removexattr(p, n));
12050 } else {
12051 ret = get_errno(lremovexattr(p, n));
12053 } else {
12054 ret = -TARGET_EFAULT;
12056 unlock_user(p, arg1, 0);
12057 unlock_user(n, arg2, 0);
12059 return ret;
12060 case TARGET_NR_fremovexattr:
12062 void *n;
12063 n = lock_user_string(arg2);
12064 if (n) {
12065 ret = get_errno(fremovexattr(arg1, n));
12066 } else {
12067 ret = -TARGET_EFAULT;
12069 unlock_user(n, arg2, 0);
12071 return ret;
12072 #endif
12073 #endif /* CONFIG_ATTR */
12074 #ifdef TARGET_NR_set_thread_area
12075 case TARGET_NR_set_thread_area:
12076 #if defined(TARGET_MIPS)
12077 cpu_env->active_tc.CP0_UserLocal = arg1;
12078 return 0;
12079 #elif defined(TARGET_CRIS)
12080 if (arg1 & 0xff)
12081 ret = -TARGET_EINVAL;
12082 else {
12083 cpu_env->pregs[PR_PID] = arg1;
12084 ret = 0;
12086 return ret;
12087 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12088 return do_set_thread_area(cpu_env, arg1);
12089 #elif defined(TARGET_M68K)
12091 TaskState *ts = cpu->opaque;
12092 ts->tp_value = arg1;
12093 return 0;
12095 #else
12096 return -TARGET_ENOSYS;
12097 #endif
12098 #endif
12099 #ifdef TARGET_NR_get_thread_area
12100 case TARGET_NR_get_thread_area:
12101 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12102 return do_get_thread_area(cpu_env, arg1);
12103 #elif defined(TARGET_M68K)
12105 TaskState *ts = cpu->opaque;
12106 return ts->tp_value;
12108 #else
12109 return -TARGET_ENOSYS;
12110 #endif
12111 #endif
12112 #ifdef TARGET_NR_getdomainname
12113 case TARGET_NR_getdomainname:
12114 return -TARGET_ENOSYS;
12115 #endif
12117 #ifdef TARGET_NR_clock_settime
12118 case TARGET_NR_clock_settime:
12120 struct timespec ts;
12122 ret = target_to_host_timespec(&ts, arg2);
12123 if (!is_error(ret)) {
12124 ret = get_errno(clock_settime(arg1, &ts));
12126 return ret;
12128 #endif
12129 #ifdef TARGET_NR_clock_settime64
12130 case TARGET_NR_clock_settime64:
12132 struct timespec ts;
12134 ret = target_to_host_timespec64(&ts, arg2);
12135 if (!is_error(ret)) {
12136 ret = get_errno(clock_settime(arg1, &ts));
12138 return ret;
12140 #endif
12141 #ifdef TARGET_NR_clock_gettime
12142 case TARGET_NR_clock_gettime:
12144 struct timespec ts;
12145 ret = get_errno(clock_gettime(arg1, &ts));
12146 if (!is_error(ret)) {
12147 ret = host_to_target_timespec(arg2, &ts);
12149 return ret;
12151 #endif
12152 #ifdef TARGET_NR_clock_gettime64
12153 case TARGET_NR_clock_gettime64:
12155 struct timespec ts;
12156 ret = get_errno(clock_gettime(arg1, &ts));
12157 if (!is_error(ret)) {
12158 ret = host_to_target_timespec64(arg2, &ts);
12160 return ret;
12162 #endif
12163 #ifdef TARGET_NR_clock_getres
12164 case TARGET_NR_clock_getres:
12166 struct timespec ts;
12167 ret = get_errno(clock_getres(arg1, &ts));
12168 if (!is_error(ret)) {
12169 host_to_target_timespec(arg2, &ts);
12171 return ret;
12173 #endif
12174 #ifdef TARGET_NR_clock_getres_time64
12175 case TARGET_NR_clock_getres_time64:
12177 struct timespec ts;
12178 ret = get_errno(clock_getres(arg1, &ts));
12179 if (!is_error(ret)) {
12180 host_to_target_timespec64(arg2, &ts);
12182 return ret;
12184 #endif
12185 #ifdef TARGET_NR_clock_nanosleep
12186 case TARGET_NR_clock_nanosleep:
12188 struct timespec ts;
12189 if (target_to_host_timespec(&ts, arg3)) {
12190 return -TARGET_EFAULT;
12192 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12193 &ts, arg4 ? &ts : NULL));
12195 * if the call is interrupted by a signal handler, it fails
12196 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12197 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12199 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12200 host_to_target_timespec(arg4, &ts)) {
12201 return -TARGET_EFAULT;
12204 return ret;
12206 #endif
12207 #ifdef TARGET_NR_clock_nanosleep_time64
12208 case TARGET_NR_clock_nanosleep_time64:
12210 struct timespec ts;
12212 if (target_to_host_timespec64(&ts, arg3)) {
12213 return -TARGET_EFAULT;
12216 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12217 &ts, arg4 ? &ts : NULL));
12219 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12220 host_to_target_timespec64(arg4, &ts)) {
12221 return -TARGET_EFAULT;
12223 return ret;
12225 #endif
12227 #if defined(TARGET_NR_set_tid_address)
12228 case TARGET_NR_set_tid_address:
12230 TaskState *ts = cpu->opaque;
12231 ts->child_tidptr = arg1;
12232 /* do not call host set_tid_address() syscall, instead return tid() */
12233 return get_errno(sys_gettid());
12235 #endif
12237 case TARGET_NR_tkill:
12238 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12240 case TARGET_NR_tgkill:
12241 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12242 target_to_host_signal(arg3)));
12244 #ifdef TARGET_NR_set_robust_list
12245 case TARGET_NR_set_robust_list:
12246 case TARGET_NR_get_robust_list:
12247 /* The ABI for supporting robust futexes has userspace pass
12248 * the kernel a pointer to a linked list which is updated by
12249 * userspace after the syscall; the list is walked by the kernel
12250 * when the thread exits. Since the linked list in QEMU guest
12251 * memory isn't a valid linked list for the host and we have
12252 * no way to reliably intercept the thread-death event, we can't
12253 * support these. Silently return ENOSYS so that guest userspace
12254 * falls back to a non-robust futex implementation (which should
12255 * be OK except in the corner case of the guest crashing while
12256 * holding a mutex that is shared with another process via
12257 * shared memory).
12259 return -TARGET_ENOSYS;
12260 #endif
12262 #if defined(TARGET_NR_utimensat)
12263 case TARGET_NR_utimensat:
12265 struct timespec *tsp, ts[2];
12266 if (!arg3) {
12267 tsp = NULL;
12268 } else {
12269 if (target_to_host_timespec(ts, arg3)) {
12270 return -TARGET_EFAULT;
12272 if (target_to_host_timespec(ts + 1, arg3 +
12273 sizeof(struct target_timespec))) {
12274 return -TARGET_EFAULT;
12276 tsp = ts;
12278 if (!arg2)
12279 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12280 else {
12281 if (!(p = lock_user_string(arg2))) {
12282 return -TARGET_EFAULT;
12284 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12285 unlock_user(p, arg2, 0);
12288 return ret;
12289 #endif
12290 #ifdef TARGET_NR_utimensat_time64
12291 case TARGET_NR_utimensat_time64:
12293 struct timespec *tsp, ts[2];
12294 if (!arg3) {
12295 tsp = NULL;
12296 } else {
12297 if (target_to_host_timespec64(ts, arg3)) {
12298 return -TARGET_EFAULT;
12300 if (target_to_host_timespec64(ts + 1, arg3 +
12301 sizeof(struct target__kernel_timespec))) {
12302 return -TARGET_EFAULT;
12304 tsp = ts;
12306 if (!arg2)
12307 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12308 else {
12309 p = lock_user_string(arg2);
12310 if (!p) {
12311 return -TARGET_EFAULT;
12313 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12314 unlock_user(p, arg2, 0);
12317 return ret;
12318 #endif
12319 #ifdef TARGET_NR_futex
12320 case TARGET_NR_futex:
12321 return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12322 #endif
12323 #ifdef TARGET_NR_futex_time64
12324 case TARGET_NR_futex_time64:
12325 return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12326 #endif
12327 #ifdef CONFIG_INOTIFY
12328 #if defined(TARGET_NR_inotify_init)
12329 case TARGET_NR_inotify_init:
12330 ret = get_errno(inotify_init());
12331 if (ret >= 0) {
12332 fd_trans_register(ret, &target_inotify_trans);
12334 return ret;
12335 #endif
12336 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12337 case TARGET_NR_inotify_init1:
12338 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12339 fcntl_flags_tbl)));
12340 if (ret >= 0) {
12341 fd_trans_register(ret, &target_inotify_trans);
12343 return ret;
12344 #endif
12345 #if defined(TARGET_NR_inotify_add_watch)
12346 case TARGET_NR_inotify_add_watch:
12347 p = lock_user_string(arg2);
12348 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12349 unlock_user(p, arg2, 0);
12350 return ret;
12351 #endif
12352 #if defined(TARGET_NR_inotify_rm_watch)
12353 case TARGET_NR_inotify_rm_watch:
12354 return get_errno(inotify_rm_watch(arg1, arg2));
12355 #endif
12356 #endif
12358 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12359 case TARGET_NR_mq_open:
12361 struct mq_attr posix_mq_attr;
12362 struct mq_attr *pposix_mq_attr;
12363 int host_flags;
12365 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12366 pposix_mq_attr = NULL;
12367 if (arg4) {
12368 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12369 return -TARGET_EFAULT;
12371 pposix_mq_attr = &posix_mq_attr;
12373 p = lock_user_string(arg1 - 1);
12374 if (!p) {
12375 return -TARGET_EFAULT;
12377 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12378 unlock_user (p, arg1, 0);
12380 return ret;
12382 case TARGET_NR_mq_unlink:
12383 p = lock_user_string(arg1 - 1);
12384 if (!p) {
12385 return -TARGET_EFAULT;
12387 ret = get_errno(mq_unlink(p));
12388 unlock_user (p, arg1, 0);
12389 return ret;
12391 #ifdef TARGET_NR_mq_timedsend
12392 case TARGET_NR_mq_timedsend:
12394 struct timespec ts;
12396 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12397 if (arg5 != 0) {
12398 if (target_to_host_timespec(&ts, arg5)) {
12399 return -TARGET_EFAULT;
12401 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12402 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12403 return -TARGET_EFAULT;
12405 } else {
12406 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12408 unlock_user (p, arg2, arg3);
12410 return ret;
12411 #endif
12412 #ifdef TARGET_NR_mq_timedsend_time64
12413 case TARGET_NR_mq_timedsend_time64:
12415 struct timespec ts;
12417 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12418 if (arg5 != 0) {
12419 if (target_to_host_timespec64(&ts, arg5)) {
12420 return -TARGET_EFAULT;
12422 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12423 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12424 return -TARGET_EFAULT;
12426 } else {
12427 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12429 unlock_user(p, arg2, arg3);
12431 return ret;
12432 #endif
12434 #ifdef TARGET_NR_mq_timedreceive
12435 case TARGET_NR_mq_timedreceive:
12437 struct timespec ts;
12438 unsigned int prio;
12440 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12441 if (arg5 != 0) {
12442 if (target_to_host_timespec(&ts, arg5)) {
12443 return -TARGET_EFAULT;
12445 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12446 &prio, &ts));
12447 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12448 return -TARGET_EFAULT;
12450 } else {
12451 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12452 &prio, NULL));
12454 unlock_user (p, arg2, arg3);
12455 if (arg4 != 0)
12456 put_user_u32(prio, arg4);
12458 return ret;
12459 #endif
12460 #ifdef TARGET_NR_mq_timedreceive_time64
12461 case TARGET_NR_mq_timedreceive_time64:
12463 struct timespec ts;
12464 unsigned int prio;
12466 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12467 if (arg5 != 0) {
12468 if (target_to_host_timespec64(&ts, arg5)) {
12469 return -TARGET_EFAULT;
12471 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12472 &prio, &ts));
12473 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12474 return -TARGET_EFAULT;
12476 } else {
12477 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12478 &prio, NULL));
12480 unlock_user(p, arg2, arg3);
12481 if (arg4 != 0) {
12482 put_user_u32(prio, arg4);
12485 return ret;
12486 #endif
12488 /* Not implemented for now... */
12489 /* case TARGET_NR_mq_notify: */
12490 /* break; */
12492 case TARGET_NR_mq_getsetattr:
12494 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12495 ret = 0;
12496 if (arg2 != 0) {
12497 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12498 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12499 &posix_mq_attr_out));
12500 } else if (arg3 != 0) {
12501 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12503 if (ret == 0 && arg3 != 0) {
12504 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12507 return ret;
12508 #endif
12510 #ifdef CONFIG_SPLICE
12511 #ifdef TARGET_NR_tee
12512 case TARGET_NR_tee:
12514 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12516 return ret;
12517 #endif
12518 #ifdef TARGET_NR_splice
12519 case TARGET_NR_splice:
12521 loff_t loff_in, loff_out;
12522 loff_t *ploff_in = NULL, *ploff_out = NULL;
12523 if (arg2) {
12524 if (get_user_u64(loff_in, arg2)) {
12525 return -TARGET_EFAULT;
12527 ploff_in = &loff_in;
12529 if (arg4) {
12530 if (get_user_u64(loff_out, arg4)) {
12531 return -TARGET_EFAULT;
12533 ploff_out = &loff_out;
12535 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12536 if (arg2) {
12537 if (put_user_u64(loff_in, arg2)) {
12538 return -TARGET_EFAULT;
12541 if (arg4) {
12542 if (put_user_u64(loff_out, arg4)) {
12543 return -TARGET_EFAULT;
12547 return ret;
12548 #endif
12549 #ifdef TARGET_NR_vmsplice
12550 case TARGET_NR_vmsplice:
12552 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12553 if (vec != NULL) {
12554 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12555 unlock_iovec(vec, arg2, arg3, 0);
12556 } else {
12557 ret = -host_to_target_errno(errno);
12560 return ret;
12561 #endif
12562 #endif /* CONFIG_SPLICE */
12563 #ifdef CONFIG_EVENTFD
12564 #if defined(TARGET_NR_eventfd)
12565 case TARGET_NR_eventfd:
12566 ret = get_errno(eventfd(arg1, 0));
12567 if (ret >= 0) {
12568 fd_trans_register(ret, &target_eventfd_trans);
12570 return ret;
12571 #endif
12572 #if defined(TARGET_NR_eventfd2)
12573 case TARGET_NR_eventfd2:
12575 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12576 if (arg2 & TARGET_O_NONBLOCK) {
12577 host_flags |= O_NONBLOCK;
12579 if (arg2 & TARGET_O_CLOEXEC) {
12580 host_flags |= O_CLOEXEC;
12582 ret = get_errno(eventfd(arg1, host_flags));
12583 if (ret >= 0) {
12584 fd_trans_register(ret, &target_eventfd_trans);
12586 return ret;
12588 #endif
12589 #endif /* CONFIG_EVENTFD */
12590 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12591 case TARGET_NR_fallocate:
12592 #if TARGET_ABI_BITS == 32
12593 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12594 target_offset64(arg5, arg6)));
12595 #else
12596 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12597 #endif
12598 return ret;
12599 #endif
12600 #if defined(CONFIG_SYNC_FILE_RANGE)
12601 #if defined(TARGET_NR_sync_file_range)
12602 case TARGET_NR_sync_file_range:
12603 #if TARGET_ABI_BITS == 32
12604 #if defined(TARGET_MIPS)
12605 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12606 target_offset64(arg5, arg6), arg7));
12607 #else
12608 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12609 target_offset64(arg4, arg5), arg6));
12610 #endif /* !TARGET_MIPS */
12611 #else
12612 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12613 #endif
12614 return ret;
12615 #endif
12616 #if defined(TARGET_NR_sync_file_range2) || \
12617 defined(TARGET_NR_arm_sync_file_range)
12618 #if defined(TARGET_NR_sync_file_range2)
12619 case TARGET_NR_sync_file_range2:
12620 #endif
12621 #if defined(TARGET_NR_arm_sync_file_range)
12622 case TARGET_NR_arm_sync_file_range:
12623 #endif
12624 /* This is like sync_file_range but the arguments are reordered */
12625 #if TARGET_ABI_BITS == 32
12626 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12627 target_offset64(arg5, arg6), arg2));
12628 #else
12629 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12630 #endif
12631 return ret;
12632 #endif
12633 #endif
12634 #if defined(TARGET_NR_signalfd4)
12635 case TARGET_NR_signalfd4:
12636 return do_signalfd4(arg1, arg2, arg4);
12637 #endif
12638 #if defined(TARGET_NR_signalfd)
12639 case TARGET_NR_signalfd:
12640 return do_signalfd4(arg1, arg2, 0);
12641 #endif
12642 #if defined(CONFIG_EPOLL)
12643 #if defined(TARGET_NR_epoll_create)
12644 case TARGET_NR_epoll_create:
12645 return get_errno(epoll_create(arg1));
12646 #endif
12647 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12648 case TARGET_NR_epoll_create1:
12649 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12650 #endif
12651 #if defined(TARGET_NR_epoll_ctl)
12652 case TARGET_NR_epoll_ctl:
12654 struct epoll_event ep;
12655 struct epoll_event *epp = 0;
12656 if (arg4) {
12657 if (arg2 != EPOLL_CTL_DEL) {
12658 struct target_epoll_event *target_ep;
12659 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12660 return -TARGET_EFAULT;
12662 ep.events = tswap32(target_ep->events);
12664 * The epoll_data_t union is just opaque data to the kernel,
12665 * so we transfer all 64 bits across and need not worry what
12666 * actual data type it is.
12668 ep.data.u64 = tswap64(target_ep->data.u64);
12669 unlock_user_struct(target_ep, arg4, 0);
12672 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12673 * non-null pointer, even though this argument is ignored.
12676 epp = &ep;
12678 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12680 #endif
12682 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12683 #if defined(TARGET_NR_epoll_wait)
12684 case TARGET_NR_epoll_wait:
12685 #endif
12686 #if defined(TARGET_NR_epoll_pwait)
12687 case TARGET_NR_epoll_pwait:
12688 #endif
12690 struct target_epoll_event *target_ep;
12691 struct epoll_event *ep;
12692 int epfd = arg1;
12693 int maxevents = arg3;
12694 int timeout = arg4;
12696 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12697 return -TARGET_EINVAL;
12700 target_ep = lock_user(VERIFY_WRITE, arg2,
12701 maxevents * sizeof(struct target_epoll_event), 1);
12702 if (!target_ep) {
12703 return -TARGET_EFAULT;
12706 ep = g_try_new(struct epoll_event, maxevents);
12707 if (!ep) {
12708 unlock_user(target_ep, arg2, 0);
12709 return -TARGET_ENOMEM;
12712 switch (num) {
12713 #if defined(TARGET_NR_epoll_pwait)
12714 case TARGET_NR_epoll_pwait:
12716 sigset_t *set = NULL;
12718 if (arg5) {
12719 ret = process_sigsuspend_mask(&set, arg5, arg6);
12720 if (ret != 0) {
12721 break;
12725 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12726 set, SIGSET_T_SIZE));
12728 if (set) {
12729 finish_sigsuspend_mask(ret);
12731 break;
12733 #endif
12734 #if defined(TARGET_NR_epoll_wait)
12735 case TARGET_NR_epoll_wait:
12736 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12737 NULL, 0));
12738 break;
12739 #endif
12740 default:
12741 ret = -TARGET_ENOSYS;
12743 if (!is_error(ret)) {
12744 int i;
12745 for (i = 0; i < ret; i++) {
12746 target_ep[i].events = tswap32(ep[i].events);
12747 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12749 unlock_user(target_ep, arg2,
12750 ret * sizeof(struct target_epoll_event));
12751 } else {
12752 unlock_user(target_ep, arg2, 0);
12754 g_free(ep);
12755 return ret;
12757 #endif
12758 #endif
12759 #ifdef TARGET_NR_prlimit64
12760 case TARGET_NR_prlimit64:
12762 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12763 struct target_rlimit64 *target_rnew, *target_rold;
12764 struct host_rlimit64 rnew, rold, *rnewp = 0;
12765 int resource = target_to_host_resource(arg2);
12767 if (arg3 && (resource != RLIMIT_AS &&
12768 resource != RLIMIT_DATA &&
12769 resource != RLIMIT_STACK)) {
12770 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12771 return -TARGET_EFAULT;
12773 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12774 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12775 unlock_user_struct(target_rnew, arg3, 0);
12776 rnewp = &rnew;
12779 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12780 if (!is_error(ret) && arg4) {
12781 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12782 return -TARGET_EFAULT;
12784 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12785 target_rold->rlim_max = tswap64(rold.rlim_max);
12786 unlock_user_struct(target_rold, arg4, 1);
12788 return ret;
12790 #endif
12791 #ifdef TARGET_NR_gethostname
12792 case TARGET_NR_gethostname:
12794 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12795 if (name) {
12796 ret = get_errno(gethostname(name, arg2));
12797 unlock_user(name, arg1, arg2);
12798 } else {
12799 ret = -TARGET_EFAULT;
12801 return ret;
12803 #endif
12804 #ifdef TARGET_NR_atomic_cmpxchg_32
12805 case TARGET_NR_atomic_cmpxchg_32:
12807 /* should use start_exclusive from main.c */
12808 abi_ulong mem_value;
12809 if (get_user_u32(mem_value, arg6)) {
12810 target_siginfo_t info;
12811 info.si_signo = SIGSEGV;
12812 info.si_errno = 0;
12813 info.si_code = TARGET_SEGV_MAPERR;
12814 info._sifields._sigfault._addr = arg6;
12815 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12816 ret = 0xdeadbeef;
12819 if (mem_value == arg2)
12820 put_user_u32(arg1, arg6);
12821 return mem_value;
12823 #endif
12824 #ifdef TARGET_NR_atomic_barrier
12825 case TARGET_NR_atomic_barrier:
12826 /* Like the kernel implementation and the
12827 qemu arm barrier, no-op this? */
12828 return 0;
12829 #endif
12831 #ifdef TARGET_NR_timer_create
12832 case TARGET_NR_timer_create:
12834 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12836 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12838 int clkid = arg1;
12839 int timer_index = next_free_host_timer();
12841 if (timer_index < 0) {
12842 ret = -TARGET_EAGAIN;
12843 } else {
12844 timer_t *phtimer = g_posix_timers + timer_index;
12846 if (arg2) {
12847 phost_sevp = &host_sevp;
12848 ret = target_to_host_sigevent(phost_sevp, arg2);
12849 if (ret != 0) {
12850 return ret;
12854 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12855 if (ret) {
12856 phtimer = NULL;
12857 } else {
12858 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12859 return -TARGET_EFAULT;
12863 return ret;
12865 #endif
12867 #ifdef TARGET_NR_timer_settime
12868 case TARGET_NR_timer_settime:
12870 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12871 * struct itimerspec * old_value */
12872 target_timer_t timerid = get_timer_id(arg1);
12874 if (timerid < 0) {
12875 ret = timerid;
12876 } else if (arg3 == 0) {
12877 ret = -TARGET_EINVAL;
12878 } else {
12879 timer_t htimer = g_posix_timers[timerid];
12880 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12882 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12883 return -TARGET_EFAULT;
12885 ret = get_errno(
12886 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12887 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12888 return -TARGET_EFAULT;
12891 return ret;
12893 #endif
12895 #ifdef TARGET_NR_timer_settime64
12896 case TARGET_NR_timer_settime64:
12898 target_timer_t timerid = get_timer_id(arg1);
12900 if (timerid < 0) {
12901 ret = timerid;
12902 } else if (arg3 == 0) {
12903 ret = -TARGET_EINVAL;
12904 } else {
12905 timer_t htimer = g_posix_timers[timerid];
12906 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12908 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12909 return -TARGET_EFAULT;
12911 ret = get_errno(
12912 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12913 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12914 return -TARGET_EFAULT;
12917 return ret;
12919 #endif
12921 #ifdef TARGET_NR_timer_gettime
12922 case TARGET_NR_timer_gettime:
12924 /* args: timer_t timerid, struct itimerspec *curr_value */
12925 target_timer_t timerid = get_timer_id(arg1);
12927 if (timerid < 0) {
12928 ret = timerid;
12929 } else if (!arg2) {
12930 ret = -TARGET_EFAULT;
12931 } else {
12932 timer_t htimer = g_posix_timers[timerid];
12933 struct itimerspec hspec;
12934 ret = get_errno(timer_gettime(htimer, &hspec));
12936 if (host_to_target_itimerspec(arg2, &hspec)) {
12937 ret = -TARGET_EFAULT;
12940 return ret;
12942 #endif
12944 #ifdef TARGET_NR_timer_gettime64
12945 case TARGET_NR_timer_gettime64:
12947 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12948 target_timer_t timerid = get_timer_id(arg1);
12950 if (timerid < 0) {
12951 ret = timerid;
12952 } else if (!arg2) {
12953 ret = -TARGET_EFAULT;
12954 } else {
12955 timer_t htimer = g_posix_timers[timerid];
12956 struct itimerspec hspec;
12957 ret = get_errno(timer_gettime(htimer, &hspec));
12959 if (host_to_target_itimerspec64(arg2, &hspec)) {
12960 ret = -TARGET_EFAULT;
12963 return ret;
12965 #endif
12967 #ifdef TARGET_NR_timer_getoverrun
12968 case TARGET_NR_timer_getoverrun:
12970 /* args: timer_t timerid */
12971 target_timer_t timerid = get_timer_id(arg1);
12973 if (timerid < 0) {
12974 ret = timerid;
12975 } else {
12976 timer_t htimer = g_posix_timers[timerid];
12977 ret = get_errno(timer_getoverrun(htimer));
12979 return ret;
12981 #endif
12983 #ifdef TARGET_NR_timer_delete
12984 case TARGET_NR_timer_delete:
12986 /* args: timer_t timerid */
12987 target_timer_t timerid = get_timer_id(arg1);
12989 if (timerid < 0) {
12990 ret = timerid;
12991 } else {
12992 timer_t htimer = g_posix_timers[timerid];
12993 ret = get_errno(timer_delete(htimer));
12994 g_posix_timers[timerid] = 0;
12996 return ret;
12998 #endif
13000 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13001 case TARGET_NR_timerfd_create:
13002 return get_errno(timerfd_create(arg1,
13003 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13004 #endif
13006 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13007 case TARGET_NR_timerfd_gettime:
13009 struct itimerspec its_curr;
13011 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13013 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13014 return -TARGET_EFAULT;
13017 return ret;
13018 #endif
13020 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13021 case TARGET_NR_timerfd_gettime64:
13023 struct itimerspec its_curr;
13025 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13027 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13028 return -TARGET_EFAULT;
13031 return ret;
13032 #endif
13034 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13035 case TARGET_NR_timerfd_settime:
13037 struct itimerspec its_new, its_old, *p_new;
13039 if (arg3) {
13040 if (target_to_host_itimerspec(&its_new, arg3)) {
13041 return -TARGET_EFAULT;
13043 p_new = &its_new;
13044 } else {
13045 p_new = NULL;
13048 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13050 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13051 return -TARGET_EFAULT;
13054 return ret;
13055 #endif
13057 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13058 case TARGET_NR_timerfd_settime64:
13060 struct itimerspec its_new, its_old, *p_new;
13062 if (arg3) {
13063 if (target_to_host_itimerspec64(&its_new, arg3)) {
13064 return -TARGET_EFAULT;
13066 p_new = &its_new;
13067 } else {
13068 p_new = NULL;
13071 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13073 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13074 return -TARGET_EFAULT;
13077 return ret;
13078 #endif
13080 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13081 case TARGET_NR_ioprio_get:
13082 return get_errno(ioprio_get(arg1, arg2));
13083 #endif
13085 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13086 case TARGET_NR_ioprio_set:
13087 return get_errno(ioprio_set(arg1, arg2, arg3));
13088 #endif
13090 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13091 case TARGET_NR_setns:
13092 return get_errno(setns(arg1, arg2));
13093 #endif
13094 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13095 case TARGET_NR_unshare:
13096 return get_errno(unshare(arg1));
13097 #endif
13098 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13099 case TARGET_NR_kcmp:
13100 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13101 #endif
13102 #ifdef TARGET_NR_swapcontext
13103 case TARGET_NR_swapcontext:
13104 /* PowerPC specific. */
13105 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13106 #endif
13107 #ifdef TARGET_NR_memfd_create
13108 case TARGET_NR_memfd_create:
13109 p = lock_user_string(arg1);
13110 if (!p) {
13111 return -TARGET_EFAULT;
13113 ret = get_errno(memfd_create(p, arg2));
13114 fd_trans_unregister(ret);
13115 unlock_user(p, arg1, 0);
13116 return ret;
13117 #endif
13118 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13119 case TARGET_NR_membarrier:
13120 return get_errno(membarrier(arg1, arg2));
13121 #endif
13123 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13124 case TARGET_NR_copy_file_range:
13126 loff_t inoff, outoff;
13127 loff_t *pinoff = NULL, *poutoff = NULL;
13129 if (arg2) {
13130 if (get_user_u64(inoff, arg2)) {
13131 return -TARGET_EFAULT;
13133 pinoff = &inoff;
13135 if (arg4) {
13136 if (get_user_u64(outoff, arg4)) {
13137 return -TARGET_EFAULT;
13139 poutoff = &outoff;
13141 /* Do not sign-extend the count parameter. */
13142 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13143 (abi_ulong)arg5, arg6));
13144 if (!is_error(ret) && ret > 0) {
13145 if (arg2) {
13146 if (put_user_u64(inoff, arg2)) {
13147 return -TARGET_EFAULT;
13150 if (arg4) {
13151 if (put_user_u64(outoff, arg4)) {
13152 return -TARGET_EFAULT;
13157 return ret;
13158 #endif
13160 #if defined(TARGET_NR_pivot_root)
13161 case TARGET_NR_pivot_root:
13163 void *p2;
13164 p = lock_user_string(arg1); /* new_root */
13165 p2 = lock_user_string(arg2); /* put_old */
13166 if (!p || !p2) {
13167 ret = -TARGET_EFAULT;
13168 } else {
13169 ret = get_errno(pivot_root(p, p2));
13171 unlock_user(p2, arg2, 0);
13172 unlock_user(p, arg1, 0);
13174 return ret;
13175 #endif
13177 default:
13178 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13179 return -TARGET_ENOSYS;
13181 return ret;
13184 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13185 abi_long arg2, abi_long arg3, abi_long arg4,
13186 abi_long arg5, abi_long arg6, abi_long arg7,
13187 abi_long arg8)
13189 CPUState *cpu = env_cpu(cpu_env);
13190 abi_long ret;
13192 #ifdef DEBUG_ERESTARTSYS
13193 /* Debug-only code for exercising the syscall-restart code paths
13194 * in the per-architecture cpu main loops: restart every syscall
13195 * the guest makes once before letting it through.
13198 static bool flag;
13199 flag = !flag;
13200 if (flag) {
13201 return -QEMU_ERESTARTSYS;
13204 #endif
13206 record_syscall_start(cpu, num, arg1,
13207 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13209 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13210 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13213 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13214 arg5, arg6, arg7, arg8);
13216 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13217 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13218 arg3, arg4, arg5, arg6);
13221 record_syscall_return(cpu, num, ret);
13222 return ret;