ui/sdl2: Grab Alt+F4 also under Windows
[qemu/ar7.git] / linux-user / syscall.c
blob938de04bb5dfdb16ddae2fd9137697ca3e524114
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
147 #ifndef CLONE_IO
148 #define CLONE_IO 0x80000000 /* Clone io context */
149 #endif
151 /* We can't directly call the host clone syscall, because this will
152 * badly confuse libc (breaking mutexes, for example). So we must
153 * divide clone flags into:
154 * * flag combinations that look like pthread_create()
155 * * flag combinations that look like fork()
156 * * flags we can implement within QEMU itself
157 * * flags we can't support and will return an error for
159 /* For thread creation, all these flags must be present; for
160 * fork, none must be present.
162 #define CLONE_THREAD_FLAGS \
163 (CLONE_VM | CLONE_FS | CLONE_FILES | \
164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
166 /* These flags are ignored:
167 * CLONE_DETACHED is now ignored by the kernel;
168 * CLONE_IO is just an optimisation hint to the I/O scheduler
170 #define CLONE_IGNORED_FLAGS \
171 (CLONE_DETACHED | CLONE_IO)
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS \
179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS \
184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
187 #define CLONE_INVALID_FORK_FLAGS \
188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
190 #define CLONE_INVALID_THREAD_FLAGS \
191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
192 CLONE_IGNORED_FLAGS))
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195 * have almost all been allocated. We cannot support any of
196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198 * The checks against the invalid thread masks above will catch these.
199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203 * once. This exercises the codepaths for restart.
205 //#define DEBUG_ERESTARTSYS
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
221 #define _syscall0(type,name) \
222 static type name (void) \
224 return syscall(__NR_##name); \
227 #define _syscall1(type,name,type1,arg1) \
228 static type name (type1 arg1) \
230 return syscall(__NR_##name, arg1); \
233 #define _syscall2(type,name,type1,arg1,type2,arg2) \
234 static type name (type1 arg1,type2 arg2) \
236 return syscall(__NR_##name, arg1, arg2); \
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
240 static type name (type1 arg1,type2 arg2,type3 arg3) \
242 return syscall(__NR_##name, arg1, arg2, arg3); \
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
252 type5,arg5) \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
260 type5,arg5,type6,arg6) \
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
262 type6 arg6) \
264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
301 /* For the 64-bit guest on 32-bit host case we must emulate
302 * getdents using getdents64, because otherwise the host
303 * might hand us back more dirent records than we can fit
304 * into the guest buffer after structure format conversion.
305 * Otherwise we emulate getdents with getdents if the host has it.
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
321 loff_t *, res, uint, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325 siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339 const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343 const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350 unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357 unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360 unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363 uint32_t size;
364 uint32_t sched_policy;
365 uint64_t sched_flags;
366 int32_t sched_nice;
367 uint32_t sched_priority;
368 uint64_t sched_runtime;
369 uint64_t sched_deadline;
370 uint64_t sched_period;
371 uint32_t sched_util_min;
372 uint32_t sched_util_max;
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376 unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379 unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384 const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387 struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390 const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394 void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396 struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398 struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411 unsigned long, idx1, unsigned long, idx2)
412 #endif
415 * It is assumed that struct statx is architecture independent.
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419 unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
427 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
428 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
429 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
430 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
431 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
432 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
433 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
434 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
435 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
436 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
437 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
439 #if defined(O_DIRECT)
440 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
441 #endif
442 #if defined(O_NOATIME)
443 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
444 #endif
445 #if defined(O_CLOEXEC)
446 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
447 #endif
448 #if defined(O_PATH)
449 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
450 #endif
451 #if defined(O_TMPFILE)
452 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
453 #endif
454 /* Don't terminate the list prematurely on 64-bit host+guest. */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458 { 0, 0, 0, 0 }
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467 const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470 const struct timespec times[2], int flags)
472 errno = ENOSYS;
473 return -1;
475 #endif
476 #endif /* TARGET_NR_utimensat */
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482 const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485 int newfd, const char *new, int flags)
487 if (flags == 0) {
488 return renameat(oldfd, old, newfd, new);
490 errno = ENOSYS;
491 return -1;
493 #endif
494 #endif /* TARGET_NR_renameat2 */
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513 uint64_t rlim_cur;
514 uint64_t rlim_max;
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517 const struct host_rlimit64 *, new_limit,
518 struct host_rlimit64 *, old_limit)
519 #endif
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
528 static inline int next_free_host_timer(void)
530 int k;
531 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533 return k;
536 return -1;
539 static inline void free_host_timer_slot(int id)
541 qatomic_store_release(g_posix_timer_allocated + id, 0);
543 #endif
545 static inline int host_to_target_errno(int host_errno)
547 switch (host_errno) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551 default:
552 return host_errno;
556 static inline int target_to_host_errno(int target_errno)
558 switch (target_errno) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562 default:
563 return target_errno;
567 abi_long get_errno(abi_long ret)
569 if (ret == -1)
570 return -host_to_target_errno(errno);
571 else
572 return ret;
575 const char *target_strerror(int err)
577 if (err == QEMU_ERESTARTSYS) {
578 return "To be restarted";
580 if (err == QEMU_ESIGRETURN) {
581 return "Successful exit from sigreturn";
584 return strerror(target_to_host_errno(err));
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
589 int i;
590 uint8_t b;
591 if (usize <= ksize) {
592 return 1;
594 for (i = ksize; i < usize; i++) {
595 if (get_user_u8(b, addr + i)) {
596 return -TARGET_EFAULT;
598 if (b != 0) {
599 return 0;
602 return 1;
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
608 return safe_syscall(__NR_##name); \
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
614 return safe_syscall(__NR_##name, arg1); \
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
620 return safe_syscall(__NR_##name, arg1, arg2); \
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630 type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637 type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639 type5 arg5) \
641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645 type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647 type5 arg5, type6 arg6) \
649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655 int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658 struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661 int, options, struct rusage *, rusage)
662 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
663 char **, argv, char **, envp, int, flags)
664 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
665 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
666 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
667 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
668 #endif
669 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
670 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
671 struct timespec *, tsp, const sigset_t *, sigmask,
672 size_t, sigsetsize)
673 #endif
674 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
675 int, maxevents, int, timeout, const sigset_t *, sigmask,
676 size_t, sigsetsize)
677 #if defined(__NR_futex)
678 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
679 const struct timespec *,timeout,int *,uaddr2,int,val3)
680 #endif
681 #if defined(__NR_futex_time64)
682 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
683 const struct timespec *,timeout,int *,uaddr2,int,val3)
684 #endif
685 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
686 safe_syscall2(int, kill, pid_t, pid, int, sig)
687 safe_syscall2(int, tkill, int, tid, int, sig)
688 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
689 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
690 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
692 unsigned long, pos_l, unsigned long, pos_h)
693 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
694 unsigned long, pos_l, unsigned long, pos_h)
695 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
696 socklen_t, addrlen)
697 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
698 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
699 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
700 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
701 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
702 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
703 safe_syscall2(int, flock, int, fd, int, operation)
704 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
705 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
706 const struct timespec *, uts, size_t, sigsetsize)
707 #endif
708 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
709 int, flags)
710 #if defined(TARGET_NR_nanosleep)
711 safe_syscall2(int, nanosleep, const struct timespec *, req,
712 struct timespec *, rem)
713 #endif
714 #if defined(TARGET_NR_clock_nanosleep) || \
715 defined(TARGET_NR_clock_nanosleep_time64)
716 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
717 const struct timespec *, req, struct timespec *, rem)
718 #endif
719 #ifdef __NR_ipc
720 #ifdef __s390x__
721 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
722 void *, ptr)
723 #else
724 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
725 void *, ptr, long, fifth)
726 #endif
727 #endif
728 #ifdef __NR_msgsnd
729 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
730 int, flags)
731 #endif
732 #ifdef __NR_msgrcv
733 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
734 long, msgtype, int, flags)
735 #endif
736 #ifdef __NR_semtimedop
737 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
738 unsigned, nsops, const struct timespec *, timeout)
739 #endif
740 #if defined(TARGET_NR_mq_timedsend) || \
741 defined(TARGET_NR_mq_timedsend_time64)
742 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
743 size_t, len, unsigned, prio, const struct timespec *, timeout)
744 #endif
745 #if defined(TARGET_NR_mq_timedreceive) || \
746 defined(TARGET_NR_mq_timedreceive_time64)
747 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
748 size_t, len, unsigned *, prio, const struct timespec *, timeout)
749 #endif
750 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
751 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
752 int, outfd, loff_t *, poutoff, size_t, length,
753 unsigned int, flags)
754 #endif
756 /* We do ioctl like this rather than via safe_syscall3 to preserve the
757 * "third argument might be integer or pointer or not present" behaviour of
758 * the libc function.
760 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
761 /* Similarly for fcntl. Note that callers must always:
762 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
763 * use the flock64 struct rather than unsuffixed flock
764 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
766 #ifdef __NR_fcntl64
767 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
768 #else
769 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
770 #endif
772 static inline int host_to_target_sock_type(int host_type)
774 int target_type;
776 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
777 case SOCK_DGRAM:
778 target_type = TARGET_SOCK_DGRAM;
779 break;
780 case SOCK_STREAM:
781 target_type = TARGET_SOCK_STREAM;
782 break;
783 default:
784 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
785 break;
788 #if defined(SOCK_CLOEXEC)
789 if (host_type & SOCK_CLOEXEC) {
790 target_type |= TARGET_SOCK_CLOEXEC;
792 #endif
794 #if defined(SOCK_NONBLOCK)
795 if (host_type & SOCK_NONBLOCK) {
796 target_type |= TARGET_SOCK_NONBLOCK;
798 #endif
800 return target_type;
803 static abi_ulong target_brk;
804 static abi_ulong brk_page;
806 void target_set_brk(abi_ulong new_brk)
808 target_brk = new_brk;
809 brk_page = HOST_PAGE_ALIGN(target_brk);
812 /* do_brk() must return target values and target errnos. */
813 abi_long do_brk(abi_ulong brk_val)
815 abi_long mapped_addr;
816 abi_ulong new_alloc_size;
817 abi_ulong new_brk, new_host_brk_page;
819 /* brk pointers are always untagged */
821 /* return old brk value if brk_val unchanged or zero */
822 if (!brk_val || brk_val == target_brk) {
823 return target_brk;
826 new_brk = TARGET_PAGE_ALIGN(brk_val);
827 new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
829 /* brk_val and old target_brk might be on the same page */
830 if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
831 if (brk_val > target_brk) {
832 /* empty remaining bytes in (possibly larger) host page */
833 memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
835 target_brk = brk_val;
836 return target_brk;
839 /* Release heap if necesary */
840 if (new_brk < target_brk) {
841 /* empty remaining bytes in (possibly larger) host page */
842 memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
844 /* free unused host pages and set new brk_page */
845 target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
846 brk_page = new_host_brk_page;
848 target_brk = brk_val;
849 return target_brk;
852 /* We need to allocate more memory after the brk... Note that
853 * we don't use MAP_FIXED because that will map over the top of
854 * any existing mapping (like the one with the host libc or qemu
855 * itself); instead we treat "mapped but at wrong address" as
856 * a failure and unmap again.
858 new_alloc_size = new_host_brk_page - brk_page;
859 if (new_alloc_size) {
860 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
861 PROT_READ|PROT_WRITE,
862 MAP_ANON|MAP_PRIVATE, 0, 0));
863 } else {
864 mapped_addr = brk_page;
867 if (mapped_addr == brk_page) {
868 /* Heap contents are initialized to zero, as for anonymous
869 * mapped pages. Technically the new pages are already
870 * initialized to zero since they *are* anonymous mapped
871 * pages, however we have to take care with the contents that
872 * come from the remaining part of the previous page: it may
873 * contains garbage data due to a previous heap usage (grown
874 * then shrunken). */
875 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
877 target_brk = brk_val;
878 brk_page = new_host_brk_page;
879 return target_brk;
880 } else if (mapped_addr != -1) {
881 /* Mapped but at wrong address, meaning there wasn't actually
882 * enough space for this brk.
884 target_munmap(mapped_addr, new_alloc_size);
885 mapped_addr = -1;
888 #if defined(TARGET_ALPHA)
889 /* We (partially) emulate OSF/1 on Alpha, which requires we
890 return a proper errno, not an unchanged brk value. */
891 return -TARGET_ENOMEM;
892 #endif
893 /* For everything else, return the previous break. */
894 return target_brk;
897 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
898 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
899 static inline abi_long copy_from_user_fdset(fd_set *fds,
900 abi_ulong target_fds_addr,
901 int n)
903 int i, nw, j, k;
904 abi_ulong b, *target_fds;
906 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
907 if (!(target_fds = lock_user(VERIFY_READ,
908 target_fds_addr,
909 sizeof(abi_ulong) * nw,
910 1)))
911 return -TARGET_EFAULT;
913 FD_ZERO(fds);
914 k = 0;
915 for (i = 0; i < nw; i++) {
916 /* grab the abi_ulong */
917 __get_user(b, &target_fds[i]);
918 for (j = 0; j < TARGET_ABI_BITS; j++) {
919 /* check the bit inside the abi_ulong */
920 if ((b >> j) & 1)
921 FD_SET(k, fds);
922 k++;
926 unlock_user(target_fds, target_fds_addr, 0);
928 return 0;
931 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
932 abi_ulong target_fds_addr,
933 int n)
935 if (target_fds_addr) {
936 if (copy_from_user_fdset(fds, target_fds_addr, n))
937 return -TARGET_EFAULT;
938 *fds_ptr = fds;
939 } else {
940 *fds_ptr = NULL;
942 return 0;
945 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
946 const fd_set *fds,
947 int n)
949 int i, nw, j, k;
950 abi_long v;
951 abi_ulong *target_fds;
953 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
954 if (!(target_fds = lock_user(VERIFY_WRITE,
955 target_fds_addr,
956 sizeof(abi_ulong) * nw,
957 0)))
958 return -TARGET_EFAULT;
960 k = 0;
961 for (i = 0; i < nw; i++) {
962 v = 0;
963 for (j = 0; j < TARGET_ABI_BITS; j++) {
964 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
965 k++;
967 __put_user(v, &target_fds[i]);
970 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
972 return 0;
974 #endif
976 #if defined(__alpha__)
977 #define HOST_HZ 1024
978 #else
979 #define HOST_HZ 100
980 #endif
982 static inline abi_long host_to_target_clock_t(long ticks)
984 #if HOST_HZ == TARGET_HZ
985 return ticks;
986 #else
987 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
988 #endif
991 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
992 const struct rusage *rusage)
994 struct target_rusage *target_rusage;
996 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
997 return -TARGET_EFAULT;
998 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
999 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1000 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1001 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1002 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1003 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1004 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1005 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1006 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1007 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1008 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1009 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1010 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1011 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1012 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1013 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1014 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1015 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1016 unlock_user_struct(target_rusage, target_addr, 1);
1018 return 0;
1021 #ifdef TARGET_NR_setrlimit
1022 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1024 abi_ulong target_rlim_swap;
1025 rlim_t result;
1027 target_rlim_swap = tswapal(target_rlim);
1028 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1029 return RLIM_INFINITY;
1031 result = target_rlim_swap;
1032 if (target_rlim_swap != (rlim_t)result)
1033 return RLIM_INFINITY;
1035 return result;
1037 #endif
1039 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1040 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1042 abi_ulong target_rlim_swap;
1043 abi_ulong result;
1045 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1046 target_rlim_swap = TARGET_RLIM_INFINITY;
1047 else
1048 target_rlim_swap = rlim;
1049 result = tswapal(target_rlim_swap);
1051 return result;
1053 #endif
1055 static inline int target_to_host_resource(int code)
1057 switch (code) {
1058 case TARGET_RLIMIT_AS:
1059 return RLIMIT_AS;
1060 case TARGET_RLIMIT_CORE:
1061 return RLIMIT_CORE;
1062 case TARGET_RLIMIT_CPU:
1063 return RLIMIT_CPU;
1064 case TARGET_RLIMIT_DATA:
1065 return RLIMIT_DATA;
1066 case TARGET_RLIMIT_FSIZE:
1067 return RLIMIT_FSIZE;
1068 case TARGET_RLIMIT_LOCKS:
1069 return RLIMIT_LOCKS;
1070 case TARGET_RLIMIT_MEMLOCK:
1071 return RLIMIT_MEMLOCK;
1072 case TARGET_RLIMIT_MSGQUEUE:
1073 return RLIMIT_MSGQUEUE;
1074 case TARGET_RLIMIT_NICE:
1075 return RLIMIT_NICE;
1076 case TARGET_RLIMIT_NOFILE:
1077 return RLIMIT_NOFILE;
1078 case TARGET_RLIMIT_NPROC:
1079 return RLIMIT_NPROC;
1080 case TARGET_RLIMIT_RSS:
1081 return RLIMIT_RSS;
1082 case TARGET_RLIMIT_RTPRIO:
1083 return RLIMIT_RTPRIO;
1084 #ifdef RLIMIT_RTTIME
1085 case TARGET_RLIMIT_RTTIME:
1086 return RLIMIT_RTTIME;
1087 #endif
1088 case TARGET_RLIMIT_SIGPENDING:
1089 return RLIMIT_SIGPENDING;
1090 case TARGET_RLIMIT_STACK:
1091 return RLIMIT_STACK;
1092 default:
1093 return code;
1097 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1098 abi_ulong target_tv_addr)
1100 struct target_timeval *target_tv;
1102 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1103 return -TARGET_EFAULT;
1106 __get_user(tv->tv_sec, &target_tv->tv_sec);
1107 __get_user(tv->tv_usec, &target_tv->tv_usec);
1109 unlock_user_struct(target_tv, target_tv_addr, 0);
1111 return 0;
1114 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1115 const struct timeval *tv)
1117 struct target_timeval *target_tv;
1119 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1120 return -TARGET_EFAULT;
1123 __put_user(tv->tv_sec, &target_tv->tv_sec);
1124 __put_user(tv->tv_usec, &target_tv->tv_usec);
1126 unlock_user_struct(target_tv, target_tv_addr, 1);
1128 return 0;
1131 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1132 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1133 abi_ulong target_tv_addr)
1135 struct target__kernel_sock_timeval *target_tv;
1137 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1138 return -TARGET_EFAULT;
1141 __get_user(tv->tv_sec, &target_tv->tv_sec);
1142 __get_user(tv->tv_usec, &target_tv->tv_usec);
1144 unlock_user_struct(target_tv, target_tv_addr, 0);
1146 return 0;
1148 #endif
1150 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1151 const struct timeval *tv)
1153 struct target__kernel_sock_timeval *target_tv;
1155 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1156 return -TARGET_EFAULT;
1159 __put_user(tv->tv_sec, &target_tv->tv_sec);
1160 __put_user(tv->tv_usec, &target_tv->tv_usec);
1162 unlock_user_struct(target_tv, target_tv_addr, 1);
1164 return 0;
1167 #if defined(TARGET_NR_futex) || \
1168 defined(TARGET_NR_rt_sigtimedwait) || \
1169 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1170 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1171 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1172 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1173 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1174 defined(TARGET_NR_timer_settime) || \
1175 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1176 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1177 abi_ulong target_addr)
1179 struct target_timespec *target_ts;
1181 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1182 return -TARGET_EFAULT;
1184 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1185 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1186 unlock_user_struct(target_ts, target_addr, 0);
1187 return 0;
1189 #endif
1191 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1192 defined(TARGET_NR_timer_settime64) || \
1193 defined(TARGET_NR_mq_timedsend_time64) || \
1194 defined(TARGET_NR_mq_timedreceive_time64) || \
1195 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1196 defined(TARGET_NR_clock_nanosleep_time64) || \
1197 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1198 defined(TARGET_NR_utimensat) || \
1199 defined(TARGET_NR_utimensat_time64) || \
1200 defined(TARGET_NR_semtimedop_time64) || \
1201 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1202 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1203 abi_ulong target_addr)
1205 struct target__kernel_timespec *target_ts;
1207 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1208 return -TARGET_EFAULT;
1210 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1211 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212 /* in 32bit mode, this drops the padding */
1213 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1214 unlock_user_struct(target_ts, target_addr, 0);
1215 return 0;
1217 #endif
1219 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1220 struct timespec *host_ts)
1222 struct target_timespec *target_ts;
1224 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1225 return -TARGET_EFAULT;
1227 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1228 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1229 unlock_user_struct(target_ts, target_addr, 1);
1230 return 0;
1233 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1234 struct timespec *host_ts)
1236 struct target__kernel_timespec *target_ts;
1238 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1239 return -TARGET_EFAULT;
1241 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1242 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243 unlock_user_struct(target_ts, target_addr, 1);
1244 return 0;
1247 #if defined(TARGET_NR_gettimeofday)
1248 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1249 struct timezone *tz)
1251 struct target_timezone *target_tz;
1253 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1254 return -TARGET_EFAULT;
1257 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1258 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1260 unlock_user_struct(target_tz, target_tz_addr, 1);
1262 return 0;
1264 #endif
1266 #if defined(TARGET_NR_settimeofday)
1267 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1268 abi_ulong target_tz_addr)
1270 struct target_timezone *target_tz;
1272 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1273 return -TARGET_EFAULT;
1276 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1277 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1279 unlock_user_struct(target_tz, target_tz_addr, 0);
1281 return 0;
1283 #endif
1285 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1286 #include <mqueue.h>
1288 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1289 abi_ulong target_mq_attr_addr)
1291 struct target_mq_attr *target_mq_attr;
1293 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1294 target_mq_attr_addr, 1))
1295 return -TARGET_EFAULT;
1297 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1298 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1299 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1300 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1302 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1304 return 0;
1307 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1308 const struct mq_attr *attr)
1310 struct target_mq_attr *target_mq_attr;
1312 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1313 target_mq_attr_addr, 0))
1314 return -TARGET_EFAULT;
1316 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1317 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1318 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1319 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1321 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1323 return 0;
1325 #endif
1327 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1328 /* do_select() must return target values and target errnos. */
1329 static abi_long do_select(int n,
1330 abi_ulong rfd_addr, abi_ulong wfd_addr,
1331 abi_ulong efd_addr, abi_ulong target_tv_addr)
1333 fd_set rfds, wfds, efds;
1334 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1335 struct timeval tv;
1336 struct timespec ts, *ts_ptr;
1337 abi_long ret;
1339 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1340 if (ret) {
1341 return ret;
1343 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1344 if (ret) {
1345 return ret;
1347 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1348 if (ret) {
1349 return ret;
1352 if (target_tv_addr) {
1353 if (copy_from_user_timeval(&tv, target_tv_addr))
1354 return -TARGET_EFAULT;
1355 ts.tv_sec = tv.tv_sec;
1356 ts.tv_nsec = tv.tv_usec * 1000;
1357 ts_ptr = &ts;
1358 } else {
1359 ts_ptr = NULL;
1362 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1363 ts_ptr, NULL));
1365 if (!is_error(ret)) {
1366 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1367 return -TARGET_EFAULT;
1368 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1369 return -TARGET_EFAULT;
1370 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1371 return -TARGET_EFAULT;
1373 if (target_tv_addr) {
1374 tv.tv_sec = ts.tv_sec;
1375 tv.tv_usec = ts.tv_nsec / 1000;
1376 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1377 return -TARGET_EFAULT;
1382 return ret;
1385 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1386 static abi_long do_old_select(abi_ulong arg1)
1388 struct target_sel_arg_struct *sel;
1389 abi_ulong inp, outp, exp, tvp;
1390 long nsel;
1392 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1393 return -TARGET_EFAULT;
1396 nsel = tswapal(sel->n);
1397 inp = tswapal(sel->inp);
1398 outp = tswapal(sel->outp);
1399 exp = tswapal(sel->exp);
1400 tvp = tswapal(sel->tvp);
1402 unlock_user_struct(sel, arg1, 0);
1404 return do_select(nsel, inp, outp, exp, tvp);
1406 #endif
1407 #endif
1409 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1410 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1411 abi_long arg4, abi_long arg5, abi_long arg6,
1412 bool time64)
1414 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1415 fd_set rfds, wfds, efds;
1416 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1417 struct timespec ts, *ts_ptr;
1418 abi_long ret;
1421 * The 6th arg is actually two args smashed together,
1422 * so we cannot use the C library.
1424 struct {
1425 sigset_t *set;
1426 size_t size;
1427 } sig, *sig_ptr;
1429 abi_ulong arg_sigset, arg_sigsize, *arg7;
1431 n = arg1;
1432 rfd_addr = arg2;
1433 wfd_addr = arg3;
1434 efd_addr = arg4;
1435 ts_addr = arg5;
1437 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1438 if (ret) {
1439 return ret;
1441 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1442 if (ret) {
1443 return ret;
1445 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1446 if (ret) {
1447 return ret;
1451 * This takes a timespec, and not a timeval, so we cannot
1452 * use the do_select() helper ...
1454 if (ts_addr) {
1455 if (time64) {
1456 if (target_to_host_timespec64(&ts, ts_addr)) {
1457 return -TARGET_EFAULT;
1459 } else {
1460 if (target_to_host_timespec(&ts, ts_addr)) {
1461 return -TARGET_EFAULT;
1464 ts_ptr = &ts;
1465 } else {
1466 ts_ptr = NULL;
1469 /* Extract the two packed args for the sigset */
1470 sig_ptr = NULL;
1471 if (arg6) {
1472 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1473 if (!arg7) {
1474 return -TARGET_EFAULT;
1476 arg_sigset = tswapal(arg7[0]);
1477 arg_sigsize = tswapal(arg7[1]);
1478 unlock_user(arg7, arg6, 0);
1480 if (arg_sigset) {
1481 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1482 if (ret != 0) {
1483 return ret;
1485 sig_ptr = &sig;
1486 sig.size = SIGSET_T_SIZE;
1490 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1491 ts_ptr, sig_ptr));
1493 if (sig_ptr) {
1494 finish_sigsuspend_mask(ret);
1497 if (!is_error(ret)) {
1498 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1499 return -TARGET_EFAULT;
1501 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1502 return -TARGET_EFAULT;
1504 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1505 return -TARGET_EFAULT;
1507 if (time64) {
1508 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1509 return -TARGET_EFAULT;
1511 } else {
1512 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1513 return -TARGET_EFAULT;
1517 return ret;
1519 #endif
1521 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1522 defined(TARGET_NR_ppoll_time64)
1523 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1524 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1526 struct target_pollfd *target_pfd;
1527 unsigned int nfds = arg2;
1528 struct pollfd *pfd;
1529 unsigned int i;
1530 abi_long ret;
1532 pfd = NULL;
1533 target_pfd = NULL;
1534 if (nfds) {
1535 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1536 return -TARGET_EINVAL;
1538 target_pfd = lock_user(VERIFY_WRITE, arg1,
1539 sizeof(struct target_pollfd) * nfds, 1);
1540 if (!target_pfd) {
1541 return -TARGET_EFAULT;
1544 pfd = alloca(sizeof(struct pollfd) * nfds);
1545 for (i = 0; i < nfds; i++) {
1546 pfd[i].fd = tswap32(target_pfd[i].fd);
1547 pfd[i].events = tswap16(target_pfd[i].events);
1550 if (ppoll) {
1551 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1552 sigset_t *set = NULL;
1554 if (arg3) {
1555 if (time64) {
1556 if (target_to_host_timespec64(timeout_ts, arg3)) {
1557 unlock_user(target_pfd, arg1, 0);
1558 return -TARGET_EFAULT;
1560 } else {
1561 if (target_to_host_timespec(timeout_ts, arg3)) {
1562 unlock_user(target_pfd, arg1, 0);
1563 return -TARGET_EFAULT;
1566 } else {
1567 timeout_ts = NULL;
1570 if (arg4) {
1571 ret = process_sigsuspend_mask(&set, arg4, arg5);
1572 if (ret != 0) {
1573 unlock_user(target_pfd, arg1, 0);
1574 return ret;
1578 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1579 set, SIGSET_T_SIZE));
1581 if (set) {
1582 finish_sigsuspend_mask(ret);
1584 if (!is_error(ret) && arg3) {
1585 if (time64) {
1586 if (host_to_target_timespec64(arg3, timeout_ts)) {
1587 return -TARGET_EFAULT;
1589 } else {
1590 if (host_to_target_timespec(arg3, timeout_ts)) {
1591 return -TARGET_EFAULT;
1595 } else {
1596 struct timespec ts, *pts;
1598 if (arg3 >= 0) {
1599 /* Convert ms to secs, ns */
1600 ts.tv_sec = arg3 / 1000;
1601 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1602 pts = &ts;
1603 } else {
1604 /* -ve poll() timeout means "infinite" */
1605 pts = NULL;
1607 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1610 if (!is_error(ret)) {
1611 for (i = 0; i < nfds; i++) {
1612 target_pfd[i].revents = tswap16(pfd[i].revents);
1615 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1616 return ret;
1618 #endif
1620 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1621 int flags, int is_pipe2)
1623 int host_pipe[2];
1624 abi_long ret;
1625 ret = pipe2(host_pipe, flags);
1627 if (is_error(ret))
1628 return get_errno(ret);
1630 /* Several targets have special calling conventions for the original
1631 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1632 if (!is_pipe2) {
1633 #if defined(TARGET_ALPHA)
1634 cpu_env->ir[IR_A4] = host_pipe[1];
1635 return host_pipe[0];
1636 #elif defined(TARGET_MIPS)
1637 cpu_env->active_tc.gpr[3] = host_pipe[1];
1638 return host_pipe[0];
1639 #elif defined(TARGET_SH4)
1640 cpu_env->gregs[1] = host_pipe[1];
1641 return host_pipe[0];
1642 #elif defined(TARGET_SPARC)
1643 cpu_env->regwptr[1] = host_pipe[1];
1644 return host_pipe[0];
1645 #endif
1648 if (put_user_s32(host_pipe[0], pipedes)
1649 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1650 return -TARGET_EFAULT;
1651 return get_errno(ret);
1654 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1655 abi_ulong target_addr,
1656 socklen_t len)
1658 struct target_ip_mreqn *target_smreqn;
1660 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1661 if (!target_smreqn)
1662 return -TARGET_EFAULT;
1663 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1664 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1665 if (len == sizeof(struct target_ip_mreqn))
1666 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1667 unlock_user(target_smreqn, target_addr, 0);
1669 return 0;
1672 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1673 abi_ulong target_addr,
1674 socklen_t len)
1676 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1677 sa_family_t sa_family;
1678 struct target_sockaddr *target_saddr;
1680 if (fd_trans_target_to_host_addr(fd)) {
1681 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1684 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1685 if (!target_saddr)
1686 return -TARGET_EFAULT;
1688 sa_family = tswap16(target_saddr->sa_family);
1690 /* Oops. The caller might send a incomplete sun_path; sun_path
1691 * must be terminated by \0 (see the manual page), but
1692 * unfortunately it is quite common to specify sockaddr_un
1693 * length as "strlen(x->sun_path)" while it should be
1694 * "strlen(...) + 1". We'll fix that here if needed.
1695 * Linux kernel has a similar feature.
1698 if (sa_family == AF_UNIX) {
1699 if (len < unix_maxlen && len > 0) {
1700 char *cp = (char*)target_saddr;
1702 if ( cp[len-1] && !cp[len] )
1703 len++;
1705 if (len > unix_maxlen)
1706 len = unix_maxlen;
1709 memcpy(addr, target_saddr, len);
1710 addr->sa_family = sa_family;
1711 if (sa_family == AF_NETLINK) {
1712 struct sockaddr_nl *nladdr;
1714 nladdr = (struct sockaddr_nl *)addr;
1715 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1716 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1717 } else if (sa_family == AF_PACKET) {
1718 struct target_sockaddr_ll *lladdr;
1720 lladdr = (struct target_sockaddr_ll *)addr;
1721 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1722 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1723 } else if (sa_family == AF_INET6) {
1724 struct sockaddr_in6 *in6addr;
1726 in6addr = (struct sockaddr_in6 *)addr;
1727 in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1729 unlock_user(target_saddr, target_addr, 0);
1731 return 0;
1734 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1735 struct sockaddr *addr,
1736 socklen_t len)
1738 struct target_sockaddr *target_saddr;
1740 if (len == 0) {
1741 return 0;
1743 assert(addr);
1745 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1746 if (!target_saddr)
1747 return -TARGET_EFAULT;
1748 memcpy(target_saddr, addr, len);
1749 if (len >= offsetof(struct target_sockaddr, sa_family) +
1750 sizeof(target_saddr->sa_family)) {
1751 target_saddr->sa_family = tswap16(addr->sa_family);
1753 if (addr->sa_family == AF_NETLINK &&
1754 len >= sizeof(struct target_sockaddr_nl)) {
1755 struct target_sockaddr_nl *target_nl =
1756 (struct target_sockaddr_nl *)target_saddr;
1757 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1758 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1759 } else if (addr->sa_family == AF_PACKET) {
1760 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1761 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1762 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1763 } else if (addr->sa_family == AF_INET6 &&
1764 len >= sizeof(struct target_sockaddr_in6)) {
1765 struct target_sockaddr_in6 *target_in6 =
1766 (struct target_sockaddr_in6 *)target_saddr;
1767 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1769 unlock_user(target_saddr, target_addr, len);
1771 return 0;
1774 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1775 struct target_msghdr *target_msgh)
1777 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1778 abi_long msg_controllen;
1779 abi_ulong target_cmsg_addr;
1780 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1781 socklen_t space = 0;
1783 msg_controllen = tswapal(target_msgh->msg_controllen);
1784 if (msg_controllen < sizeof (struct target_cmsghdr))
1785 goto the_end;
1786 target_cmsg_addr = tswapal(target_msgh->msg_control);
1787 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1788 target_cmsg_start = target_cmsg;
1789 if (!target_cmsg)
1790 return -TARGET_EFAULT;
1792 while (cmsg && target_cmsg) {
1793 void *data = CMSG_DATA(cmsg);
1794 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1796 int len = tswapal(target_cmsg->cmsg_len)
1797 - sizeof(struct target_cmsghdr);
1799 space += CMSG_SPACE(len);
1800 if (space > msgh->msg_controllen) {
1801 space -= CMSG_SPACE(len);
1802 /* This is a QEMU bug, since we allocated the payload
1803 * area ourselves (unlike overflow in host-to-target
1804 * conversion, which is just the guest giving us a buffer
1805 * that's too small). It can't happen for the payload types
1806 * we currently support; if it becomes an issue in future
1807 * we would need to improve our allocation strategy to
1808 * something more intelligent than "twice the size of the
1809 * target buffer we're reading from".
1811 qemu_log_mask(LOG_UNIMP,
1812 ("Unsupported ancillary data %d/%d: "
1813 "unhandled msg size\n"),
1814 tswap32(target_cmsg->cmsg_level),
1815 tswap32(target_cmsg->cmsg_type));
1816 break;
1819 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1820 cmsg->cmsg_level = SOL_SOCKET;
1821 } else {
1822 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1824 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1825 cmsg->cmsg_len = CMSG_LEN(len);
1827 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1828 int *fd = (int *)data;
1829 int *target_fd = (int *)target_data;
1830 int i, numfds = len / sizeof(int);
1832 for (i = 0; i < numfds; i++) {
1833 __get_user(fd[i], target_fd + i);
1835 } else if (cmsg->cmsg_level == SOL_SOCKET
1836 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1837 struct ucred *cred = (struct ucred *)data;
1838 struct target_ucred *target_cred =
1839 (struct target_ucred *)target_data;
1841 __get_user(cred->pid, &target_cred->pid);
1842 __get_user(cred->uid, &target_cred->uid);
1843 __get_user(cred->gid, &target_cred->gid);
1844 } else if (cmsg->cmsg_level == SOL_ALG) {
1845 uint32_t *dst = (uint32_t *)data;
1847 memcpy(dst, target_data, len);
1848 /* fix endianess of first 32-bit word */
1849 if (len >= sizeof(uint32_t)) {
1850 *dst = tswap32(*dst);
1852 } else {
1853 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1854 cmsg->cmsg_level, cmsg->cmsg_type);
1855 memcpy(data, target_data, len);
1858 cmsg = CMSG_NXTHDR(msgh, cmsg);
1859 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1860 target_cmsg_start);
1862 unlock_user(target_cmsg, target_cmsg_addr, 0);
1863 the_end:
1864 msgh->msg_controllen = space;
1865 return 0;
1868 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1869 struct msghdr *msgh)
1871 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872 abi_long msg_controllen;
1873 abi_ulong target_cmsg_addr;
1874 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875 socklen_t space = 0;
1877 msg_controllen = tswapal(target_msgh->msg_controllen);
1878 if (msg_controllen < sizeof (struct target_cmsghdr))
1879 goto the_end;
1880 target_cmsg_addr = tswapal(target_msgh->msg_control);
1881 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1882 target_cmsg_start = target_cmsg;
1883 if (!target_cmsg)
1884 return -TARGET_EFAULT;
1886 while (cmsg && target_cmsg) {
1887 void *data = CMSG_DATA(cmsg);
1888 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1890 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1891 int tgt_len, tgt_space;
1893 /* We never copy a half-header but may copy half-data;
1894 * this is Linux's behaviour in put_cmsg(). Note that
1895 * truncation here is a guest problem (which we report
1896 * to the guest via the CTRUNC bit), unlike truncation
1897 * in target_to_host_cmsg, which is a QEMU bug.
1899 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1900 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1901 break;
1904 if (cmsg->cmsg_level == SOL_SOCKET) {
1905 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1906 } else {
1907 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1909 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1911 /* Payload types which need a different size of payload on
1912 * the target must adjust tgt_len here.
1914 tgt_len = len;
1915 switch (cmsg->cmsg_level) {
1916 case SOL_SOCKET:
1917 switch (cmsg->cmsg_type) {
1918 case SO_TIMESTAMP:
1919 tgt_len = sizeof(struct target_timeval);
1920 break;
1921 default:
1922 break;
1924 break;
1925 default:
1926 break;
1929 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1930 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1931 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1934 /* We must now copy-and-convert len bytes of payload
1935 * into tgt_len bytes of destination space. Bear in mind
1936 * that in both source and destination we may be dealing
1937 * with a truncated value!
1939 switch (cmsg->cmsg_level) {
1940 case SOL_SOCKET:
1941 switch (cmsg->cmsg_type) {
1942 case SCM_RIGHTS:
1944 int *fd = (int *)data;
1945 int *target_fd = (int *)target_data;
1946 int i, numfds = tgt_len / sizeof(int);
1948 for (i = 0; i < numfds; i++) {
1949 __put_user(fd[i], target_fd + i);
1951 break;
1953 case SO_TIMESTAMP:
1955 struct timeval *tv = (struct timeval *)data;
1956 struct target_timeval *target_tv =
1957 (struct target_timeval *)target_data;
1959 if (len != sizeof(struct timeval) ||
1960 tgt_len != sizeof(struct target_timeval)) {
1961 goto unimplemented;
1964 /* copy struct timeval to target */
1965 __put_user(tv->tv_sec, &target_tv->tv_sec);
1966 __put_user(tv->tv_usec, &target_tv->tv_usec);
1967 break;
1969 case SCM_CREDENTIALS:
1971 struct ucred *cred = (struct ucred *)data;
1972 struct target_ucred *target_cred =
1973 (struct target_ucred *)target_data;
1975 __put_user(cred->pid, &target_cred->pid);
1976 __put_user(cred->uid, &target_cred->uid);
1977 __put_user(cred->gid, &target_cred->gid);
1978 break;
1980 default:
1981 goto unimplemented;
1983 break;
1985 case SOL_IP:
1986 switch (cmsg->cmsg_type) {
1987 case IP_TTL:
1989 uint32_t *v = (uint32_t *)data;
1990 uint32_t *t_int = (uint32_t *)target_data;
1992 if (len != sizeof(uint32_t) ||
1993 tgt_len != sizeof(uint32_t)) {
1994 goto unimplemented;
1996 __put_user(*v, t_int);
1997 break;
1999 case IP_RECVERR:
2001 struct errhdr_t {
2002 struct sock_extended_err ee;
2003 struct sockaddr_in offender;
2005 struct errhdr_t *errh = (struct errhdr_t *)data;
2006 struct errhdr_t *target_errh =
2007 (struct errhdr_t *)target_data;
2009 if (len != sizeof(struct errhdr_t) ||
2010 tgt_len != sizeof(struct errhdr_t)) {
2011 goto unimplemented;
2013 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2014 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2015 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2016 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2017 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2018 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2019 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2020 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2021 (void *) &errh->offender, sizeof(errh->offender));
2022 break;
2024 default:
2025 goto unimplemented;
2027 break;
2029 case SOL_IPV6:
2030 switch (cmsg->cmsg_type) {
2031 case IPV6_HOPLIMIT:
2033 uint32_t *v = (uint32_t *)data;
2034 uint32_t *t_int = (uint32_t *)target_data;
2036 if (len != sizeof(uint32_t) ||
2037 tgt_len != sizeof(uint32_t)) {
2038 goto unimplemented;
2040 __put_user(*v, t_int);
2041 break;
2043 case IPV6_RECVERR:
2045 struct errhdr6_t {
2046 struct sock_extended_err ee;
2047 struct sockaddr_in6 offender;
2049 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2050 struct errhdr6_t *target_errh =
2051 (struct errhdr6_t *)target_data;
2053 if (len != sizeof(struct errhdr6_t) ||
2054 tgt_len != sizeof(struct errhdr6_t)) {
2055 goto unimplemented;
2057 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2058 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2059 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2060 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2061 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2062 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2063 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2064 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2065 (void *) &errh->offender, sizeof(errh->offender));
2066 break;
2068 default:
2069 goto unimplemented;
2071 break;
2073 default:
2074 unimplemented:
2075 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2076 cmsg->cmsg_level, cmsg->cmsg_type);
2077 memcpy(target_data, data, MIN(len, tgt_len));
2078 if (tgt_len > len) {
2079 memset(target_data + len, 0, tgt_len - len);
2083 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2084 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2085 if (msg_controllen < tgt_space) {
2086 tgt_space = msg_controllen;
2088 msg_controllen -= tgt_space;
2089 space += tgt_space;
2090 cmsg = CMSG_NXTHDR(msgh, cmsg);
2091 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2092 target_cmsg_start);
2094 unlock_user(target_cmsg, target_cmsg_addr, space);
2095 the_end:
2096 target_msgh->msg_controllen = tswapal(space);
2097 return 0;
2100 /* do_setsockopt() Must return target values and target errnos. */
2101 static abi_long do_setsockopt(int sockfd, int level, int optname,
2102 abi_ulong optval_addr, socklen_t optlen)
2104 abi_long ret;
2105 int val;
2106 struct ip_mreqn *ip_mreq;
2107 struct ip_mreq_source *ip_mreq_source;
2109 switch(level) {
2110 case SOL_TCP:
2111 case SOL_UDP:
2112 /* TCP and UDP options all take an 'int' value. */
2113 if (optlen < sizeof(uint32_t))
2114 return -TARGET_EINVAL;
2116 if (get_user_u32(val, optval_addr))
2117 return -TARGET_EFAULT;
2118 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2119 break;
2120 case SOL_IP:
2121 switch(optname) {
2122 case IP_TOS:
2123 case IP_TTL:
2124 case IP_HDRINCL:
2125 case IP_ROUTER_ALERT:
2126 case IP_RECVOPTS:
2127 case IP_RETOPTS:
2128 case IP_PKTINFO:
2129 case IP_MTU_DISCOVER:
2130 case IP_RECVERR:
2131 case IP_RECVTTL:
2132 case IP_RECVTOS:
2133 #ifdef IP_FREEBIND
2134 case IP_FREEBIND:
2135 #endif
2136 case IP_MULTICAST_TTL:
2137 case IP_MULTICAST_LOOP:
2138 val = 0;
2139 if (optlen >= sizeof(uint32_t)) {
2140 if (get_user_u32(val, optval_addr))
2141 return -TARGET_EFAULT;
2142 } else if (optlen >= 1) {
2143 if (get_user_u8(val, optval_addr))
2144 return -TARGET_EFAULT;
2146 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2147 break;
2148 case IP_ADD_MEMBERSHIP:
2149 case IP_DROP_MEMBERSHIP:
2150 if (optlen < sizeof (struct target_ip_mreq) ||
2151 optlen > sizeof (struct target_ip_mreqn))
2152 return -TARGET_EINVAL;
2154 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2155 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2156 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2157 break;
2159 case IP_BLOCK_SOURCE:
2160 case IP_UNBLOCK_SOURCE:
2161 case IP_ADD_SOURCE_MEMBERSHIP:
2162 case IP_DROP_SOURCE_MEMBERSHIP:
2163 if (optlen != sizeof (struct target_ip_mreq_source))
2164 return -TARGET_EINVAL;
2166 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2167 if (!ip_mreq_source) {
2168 return -TARGET_EFAULT;
2170 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2171 unlock_user (ip_mreq_source, optval_addr, 0);
2172 break;
2174 default:
2175 goto unimplemented;
2177 break;
2178 case SOL_IPV6:
2179 switch (optname) {
2180 case IPV6_MTU_DISCOVER:
2181 case IPV6_MTU:
2182 case IPV6_V6ONLY:
2183 case IPV6_RECVPKTINFO:
2184 case IPV6_UNICAST_HOPS:
2185 case IPV6_MULTICAST_HOPS:
2186 case IPV6_MULTICAST_LOOP:
2187 case IPV6_RECVERR:
2188 case IPV6_RECVHOPLIMIT:
2189 case IPV6_2292HOPLIMIT:
2190 case IPV6_CHECKSUM:
2191 case IPV6_ADDRFORM:
2192 case IPV6_2292PKTINFO:
2193 case IPV6_RECVTCLASS:
2194 case IPV6_RECVRTHDR:
2195 case IPV6_2292RTHDR:
2196 case IPV6_RECVHOPOPTS:
2197 case IPV6_2292HOPOPTS:
2198 case IPV6_RECVDSTOPTS:
2199 case IPV6_2292DSTOPTS:
2200 case IPV6_TCLASS:
2201 case IPV6_ADDR_PREFERENCES:
2202 #ifdef IPV6_RECVPATHMTU
2203 case IPV6_RECVPATHMTU:
2204 #endif
2205 #ifdef IPV6_TRANSPARENT
2206 case IPV6_TRANSPARENT:
2207 #endif
2208 #ifdef IPV6_FREEBIND
2209 case IPV6_FREEBIND:
2210 #endif
2211 #ifdef IPV6_RECVORIGDSTADDR
2212 case IPV6_RECVORIGDSTADDR:
2213 #endif
2214 val = 0;
2215 if (optlen < sizeof(uint32_t)) {
2216 return -TARGET_EINVAL;
2218 if (get_user_u32(val, optval_addr)) {
2219 return -TARGET_EFAULT;
2221 ret = get_errno(setsockopt(sockfd, level, optname,
2222 &val, sizeof(val)));
2223 break;
2224 case IPV6_PKTINFO:
2226 struct in6_pktinfo pki;
2228 if (optlen < sizeof(pki)) {
2229 return -TARGET_EINVAL;
2232 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2233 return -TARGET_EFAULT;
2236 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2238 ret = get_errno(setsockopt(sockfd, level, optname,
2239 &pki, sizeof(pki)));
2240 break;
2242 case IPV6_ADD_MEMBERSHIP:
2243 case IPV6_DROP_MEMBERSHIP:
2245 struct ipv6_mreq ipv6mreq;
2247 if (optlen < sizeof(ipv6mreq)) {
2248 return -TARGET_EINVAL;
2251 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2252 return -TARGET_EFAULT;
2255 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2257 ret = get_errno(setsockopt(sockfd, level, optname,
2258 &ipv6mreq, sizeof(ipv6mreq)));
2259 break;
2261 default:
2262 goto unimplemented;
2264 break;
2265 case SOL_ICMPV6:
2266 switch (optname) {
2267 case ICMPV6_FILTER:
2269 struct icmp6_filter icmp6f;
2271 if (optlen > sizeof(icmp6f)) {
2272 optlen = sizeof(icmp6f);
2275 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2276 return -TARGET_EFAULT;
2279 for (val = 0; val < 8; val++) {
2280 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2283 ret = get_errno(setsockopt(sockfd, level, optname,
2284 &icmp6f, optlen));
2285 break;
2287 default:
2288 goto unimplemented;
2290 break;
2291 case SOL_RAW:
2292 switch (optname) {
2293 case ICMP_FILTER:
2294 case IPV6_CHECKSUM:
2295 /* those take an u32 value */
2296 if (optlen < sizeof(uint32_t)) {
2297 return -TARGET_EINVAL;
2300 if (get_user_u32(val, optval_addr)) {
2301 return -TARGET_EFAULT;
2303 ret = get_errno(setsockopt(sockfd, level, optname,
2304 &val, sizeof(val)));
2305 break;
2307 default:
2308 goto unimplemented;
2310 break;
2311 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2312 case SOL_ALG:
2313 switch (optname) {
2314 case ALG_SET_KEY:
2316 char *alg_key = g_malloc(optlen);
2318 if (!alg_key) {
2319 return -TARGET_ENOMEM;
2321 if (copy_from_user(alg_key, optval_addr, optlen)) {
2322 g_free(alg_key);
2323 return -TARGET_EFAULT;
2325 ret = get_errno(setsockopt(sockfd, level, optname,
2326 alg_key, optlen));
2327 g_free(alg_key);
2328 break;
2330 case ALG_SET_AEAD_AUTHSIZE:
2332 ret = get_errno(setsockopt(sockfd, level, optname,
2333 NULL, optlen));
2334 break;
2336 default:
2337 goto unimplemented;
2339 break;
2340 #endif
2341 case TARGET_SOL_SOCKET:
2342 switch (optname) {
2343 case TARGET_SO_RCVTIMEO:
2345 struct timeval tv;
2347 optname = SO_RCVTIMEO;
2349 set_timeout:
2350 if (optlen != sizeof(struct target_timeval)) {
2351 return -TARGET_EINVAL;
2354 if (copy_from_user_timeval(&tv, optval_addr)) {
2355 return -TARGET_EFAULT;
2358 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2359 &tv, sizeof(tv)));
2360 return ret;
2362 case TARGET_SO_SNDTIMEO:
2363 optname = SO_SNDTIMEO;
2364 goto set_timeout;
2365 case TARGET_SO_ATTACH_FILTER:
2367 struct target_sock_fprog *tfprog;
2368 struct target_sock_filter *tfilter;
2369 struct sock_fprog fprog;
2370 struct sock_filter *filter;
2371 int i;
2373 if (optlen != sizeof(*tfprog)) {
2374 return -TARGET_EINVAL;
2376 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2377 return -TARGET_EFAULT;
2379 if (!lock_user_struct(VERIFY_READ, tfilter,
2380 tswapal(tfprog->filter), 0)) {
2381 unlock_user_struct(tfprog, optval_addr, 1);
2382 return -TARGET_EFAULT;
2385 fprog.len = tswap16(tfprog->len);
2386 filter = g_try_new(struct sock_filter, fprog.len);
2387 if (filter == NULL) {
2388 unlock_user_struct(tfilter, tfprog->filter, 1);
2389 unlock_user_struct(tfprog, optval_addr, 1);
2390 return -TARGET_ENOMEM;
2392 for (i = 0; i < fprog.len; i++) {
2393 filter[i].code = tswap16(tfilter[i].code);
2394 filter[i].jt = tfilter[i].jt;
2395 filter[i].jf = tfilter[i].jf;
2396 filter[i].k = tswap32(tfilter[i].k);
2398 fprog.filter = filter;
2400 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2401 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2402 g_free(filter);
2404 unlock_user_struct(tfilter, tfprog->filter, 1);
2405 unlock_user_struct(tfprog, optval_addr, 1);
2406 return ret;
2408 case TARGET_SO_BINDTODEVICE:
2410 char *dev_ifname, *addr_ifname;
2412 if (optlen > IFNAMSIZ - 1) {
2413 optlen = IFNAMSIZ - 1;
2415 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2416 if (!dev_ifname) {
2417 return -TARGET_EFAULT;
2419 optname = SO_BINDTODEVICE;
2420 addr_ifname = alloca(IFNAMSIZ);
2421 memcpy(addr_ifname, dev_ifname, optlen);
2422 addr_ifname[optlen] = 0;
2423 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2424 addr_ifname, optlen));
2425 unlock_user (dev_ifname, optval_addr, 0);
2426 return ret;
2428 case TARGET_SO_LINGER:
2430 struct linger lg;
2431 struct target_linger *tlg;
2433 if (optlen != sizeof(struct target_linger)) {
2434 return -TARGET_EINVAL;
2436 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2437 return -TARGET_EFAULT;
2439 __get_user(lg.l_onoff, &tlg->l_onoff);
2440 __get_user(lg.l_linger, &tlg->l_linger);
2441 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2442 &lg, sizeof(lg)));
2443 unlock_user_struct(tlg, optval_addr, 0);
2444 return ret;
2446 /* Options with 'int' argument. */
2447 case TARGET_SO_DEBUG:
2448 optname = SO_DEBUG;
2449 break;
2450 case TARGET_SO_REUSEADDR:
2451 optname = SO_REUSEADDR;
2452 break;
2453 #ifdef SO_REUSEPORT
2454 case TARGET_SO_REUSEPORT:
2455 optname = SO_REUSEPORT;
2456 break;
2457 #endif
2458 case TARGET_SO_TYPE:
2459 optname = SO_TYPE;
2460 break;
2461 case TARGET_SO_ERROR:
2462 optname = SO_ERROR;
2463 break;
2464 case TARGET_SO_DONTROUTE:
2465 optname = SO_DONTROUTE;
2466 break;
2467 case TARGET_SO_BROADCAST:
2468 optname = SO_BROADCAST;
2469 break;
2470 case TARGET_SO_SNDBUF:
2471 optname = SO_SNDBUF;
2472 break;
2473 case TARGET_SO_SNDBUFFORCE:
2474 optname = SO_SNDBUFFORCE;
2475 break;
2476 case TARGET_SO_RCVBUF:
2477 optname = SO_RCVBUF;
2478 break;
2479 case TARGET_SO_RCVBUFFORCE:
2480 optname = SO_RCVBUFFORCE;
2481 break;
2482 case TARGET_SO_KEEPALIVE:
2483 optname = SO_KEEPALIVE;
2484 break;
2485 case TARGET_SO_OOBINLINE:
2486 optname = SO_OOBINLINE;
2487 break;
2488 case TARGET_SO_NO_CHECK:
2489 optname = SO_NO_CHECK;
2490 break;
2491 case TARGET_SO_PRIORITY:
2492 optname = SO_PRIORITY;
2493 break;
2494 #ifdef SO_BSDCOMPAT
2495 case TARGET_SO_BSDCOMPAT:
2496 optname = SO_BSDCOMPAT;
2497 break;
2498 #endif
2499 case TARGET_SO_PASSCRED:
2500 optname = SO_PASSCRED;
2501 break;
2502 case TARGET_SO_PASSSEC:
2503 optname = SO_PASSSEC;
2504 break;
2505 case TARGET_SO_TIMESTAMP:
2506 optname = SO_TIMESTAMP;
2507 break;
2508 case TARGET_SO_RCVLOWAT:
2509 optname = SO_RCVLOWAT;
2510 break;
2511 default:
2512 goto unimplemented;
2514 if (optlen < sizeof(uint32_t))
2515 return -TARGET_EINVAL;
2517 if (get_user_u32(val, optval_addr))
2518 return -TARGET_EFAULT;
2519 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2520 break;
2521 #ifdef SOL_NETLINK
2522 case SOL_NETLINK:
2523 switch (optname) {
2524 case NETLINK_PKTINFO:
2525 case NETLINK_ADD_MEMBERSHIP:
2526 case NETLINK_DROP_MEMBERSHIP:
2527 case NETLINK_BROADCAST_ERROR:
2528 case NETLINK_NO_ENOBUFS:
2529 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2530 case NETLINK_LISTEN_ALL_NSID:
2531 case NETLINK_CAP_ACK:
2532 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2533 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2534 case NETLINK_EXT_ACK:
2535 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2536 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2537 case NETLINK_GET_STRICT_CHK:
2538 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2539 break;
2540 default:
2541 goto unimplemented;
2543 val = 0;
2544 if (optlen < sizeof(uint32_t)) {
2545 return -TARGET_EINVAL;
2547 if (get_user_u32(val, optval_addr)) {
2548 return -TARGET_EFAULT;
2550 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2551 sizeof(val)));
2552 break;
2553 #endif /* SOL_NETLINK */
2554 default:
2555 unimplemented:
2556 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2557 level, optname);
2558 ret = -TARGET_ENOPROTOOPT;
2560 return ret;
2563 /* do_getsockopt() Must return target values and target errnos. */
2564 static abi_long do_getsockopt(int sockfd, int level, int optname,
2565 abi_ulong optval_addr, abi_ulong optlen)
2567 abi_long ret;
2568 int len, val;
2569 socklen_t lv;
2571 switch(level) {
2572 case TARGET_SOL_SOCKET:
2573 level = SOL_SOCKET;
2574 switch (optname) {
2575 /* These don't just return a single integer */
2576 case TARGET_SO_PEERNAME:
2577 goto unimplemented;
2578 case TARGET_SO_RCVTIMEO: {
2579 struct timeval tv;
2580 socklen_t tvlen;
2582 optname = SO_RCVTIMEO;
2584 get_timeout:
2585 if (get_user_u32(len, optlen)) {
2586 return -TARGET_EFAULT;
2588 if (len < 0) {
2589 return -TARGET_EINVAL;
2592 tvlen = sizeof(tv);
2593 ret = get_errno(getsockopt(sockfd, level, optname,
2594 &tv, &tvlen));
2595 if (ret < 0) {
2596 return ret;
2598 if (len > sizeof(struct target_timeval)) {
2599 len = sizeof(struct target_timeval);
2601 if (copy_to_user_timeval(optval_addr, &tv)) {
2602 return -TARGET_EFAULT;
2604 if (put_user_u32(len, optlen)) {
2605 return -TARGET_EFAULT;
2607 break;
2609 case TARGET_SO_SNDTIMEO:
2610 optname = SO_SNDTIMEO;
2611 goto get_timeout;
2612 case TARGET_SO_PEERCRED: {
2613 struct ucred cr;
2614 socklen_t crlen;
2615 struct target_ucred *tcr;
2617 if (get_user_u32(len, optlen)) {
2618 return -TARGET_EFAULT;
2620 if (len < 0) {
2621 return -TARGET_EINVAL;
2624 crlen = sizeof(cr);
2625 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2626 &cr, &crlen));
2627 if (ret < 0) {
2628 return ret;
2630 if (len > crlen) {
2631 len = crlen;
2633 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2634 return -TARGET_EFAULT;
2636 __put_user(cr.pid, &tcr->pid);
2637 __put_user(cr.uid, &tcr->uid);
2638 __put_user(cr.gid, &tcr->gid);
2639 unlock_user_struct(tcr, optval_addr, 1);
2640 if (put_user_u32(len, optlen)) {
2641 return -TARGET_EFAULT;
2643 break;
2645 case TARGET_SO_PEERSEC: {
2646 char *name;
2648 if (get_user_u32(len, optlen)) {
2649 return -TARGET_EFAULT;
2651 if (len < 0) {
2652 return -TARGET_EINVAL;
2654 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2655 if (!name) {
2656 return -TARGET_EFAULT;
2658 lv = len;
2659 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2660 name, &lv));
2661 if (put_user_u32(lv, optlen)) {
2662 ret = -TARGET_EFAULT;
2664 unlock_user(name, optval_addr, lv);
2665 break;
2667 case TARGET_SO_LINGER:
2669 struct linger lg;
2670 socklen_t lglen;
2671 struct target_linger *tlg;
2673 if (get_user_u32(len, optlen)) {
2674 return -TARGET_EFAULT;
2676 if (len < 0) {
2677 return -TARGET_EINVAL;
2680 lglen = sizeof(lg);
2681 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2682 &lg, &lglen));
2683 if (ret < 0) {
2684 return ret;
2686 if (len > lglen) {
2687 len = lglen;
2689 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2690 return -TARGET_EFAULT;
2692 __put_user(lg.l_onoff, &tlg->l_onoff);
2693 __put_user(lg.l_linger, &tlg->l_linger);
2694 unlock_user_struct(tlg, optval_addr, 1);
2695 if (put_user_u32(len, optlen)) {
2696 return -TARGET_EFAULT;
2698 break;
2700 /* Options with 'int' argument. */
2701 case TARGET_SO_DEBUG:
2702 optname = SO_DEBUG;
2703 goto int_case;
2704 case TARGET_SO_REUSEADDR:
2705 optname = SO_REUSEADDR;
2706 goto int_case;
2707 #ifdef SO_REUSEPORT
2708 case TARGET_SO_REUSEPORT:
2709 optname = SO_REUSEPORT;
2710 goto int_case;
2711 #endif
2712 case TARGET_SO_TYPE:
2713 optname = SO_TYPE;
2714 goto int_case;
2715 case TARGET_SO_ERROR:
2716 optname = SO_ERROR;
2717 goto int_case;
2718 case TARGET_SO_DONTROUTE:
2719 optname = SO_DONTROUTE;
2720 goto int_case;
2721 case TARGET_SO_BROADCAST:
2722 optname = SO_BROADCAST;
2723 goto int_case;
2724 case TARGET_SO_SNDBUF:
2725 optname = SO_SNDBUF;
2726 goto int_case;
2727 case TARGET_SO_RCVBUF:
2728 optname = SO_RCVBUF;
2729 goto int_case;
2730 case TARGET_SO_KEEPALIVE:
2731 optname = SO_KEEPALIVE;
2732 goto int_case;
2733 case TARGET_SO_OOBINLINE:
2734 optname = SO_OOBINLINE;
2735 goto int_case;
2736 case TARGET_SO_NO_CHECK:
2737 optname = SO_NO_CHECK;
2738 goto int_case;
2739 case TARGET_SO_PRIORITY:
2740 optname = SO_PRIORITY;
2741 goto int_case;
2742 #ifdef SO_BSDCOMPAT
2743 case TARGET_SO_BSDCOMPAT:
2744 optname = SO_BSDCOMPAT;
2745 goto int_case;
2746 #endif
2747 case TARGET_SO_PASSCRED:
2748 optname = SO_PASSCRED;
2749 goto int_case;
2750 case TARGET_SO_TIMESTAMP:
2751 optname = SO_TIMESTAMP;
2752 goto int_case;
2753 case TARGET_SO_RCVLOWAT:
2754 optname = SO_RCVLOWAT;
2755 goto int_case;
2756 case TARGET_SO_ACCEPTCONN:
2757 optname = SO_ACCEPTCONN;
2758 goto int_case;
2759 case TARGET_SO_PROTOCOL:
2760 optname = SO_PROTOCOL;
2761 goto int_case;
2762 case TARGET_SO_DOMAIN:
2763 optname = SO_DOMAIN;
2764 goto int_case;
2765 default:
2766 goto int_case;
2768 break;
2769 case SOL_TCP:
2770 case SOL_UDP:
2771 /* TCP and UDP options all take an 'int' value. */
2772 int_case:
2773 if (get_user_u32(len, optlen))
2774 return -TARGET_EFAULT;
2775 if (len < 0)
2776 return -TARGET_EINVAL;
2777 lv = sizeof(lv);
2778 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2779 if (ret < 0)
2780 return ret;
2781 switch (optname) {
2782 case SO_TYPE:
2783 val = host_to_target_sock_type(val);
2784 break;
2785 case SO_ERROR:
2786 val = host_to_target_errno(val);
2787 break;
2789 if (len > lv)
2790 len = lv;
2791 if (len == 4) {
2792 if (put_user_u32(val, optval_addr))
2793 return -TARGET_EFAULT;
2794 } else {
2795 if (put_user_u8(val, optval_addr))
2796 return -TARGET_EFAULT;
2798 if (put_user_u32(len, optlen))
2799 return -TARGET_EFAULT;
2800 break;
2801 case SOL_IP:
2802 switch(optname) {
2803 case IP_TOS:
2804 case IP_TTL:
2805 case IP_HDRINCL:
2806 case IP_ROUTER_ALERT:
2807 case IP_RECVOPTS:
2808 case IP_RETOPTS:
2809 case IP_PKTINFO:
2810 case IP_MTU_DISCOVER:
2811 case IP_RECVERR:
2812 case IP_RECVTOS:
2813 #ifdef IP_FREEBIND
2814 case IP_FREEBIND:
2815 #endif
2816 case IP_MULTICAST_TTL:
2817 case IP_MULTICAST_LOOP:
2818 if (get_user_u32(len, optlen))
2819 return -TARGET_EFAULT;
2820 if (len < 0)
2821 return -TARGET_EINVAL;
2822 lv = sizeof(lv);
2823 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2824 if (ret < 0)
2825 return ret;
2826 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2827 len = 1;
2828 if (put_user_u32(len, optlen)
2829 || put_user_u8(val, optval_addr))
2830 return -TARGET_EFAULT;
2831 } else {
2832 if (len > sizeof(int))
2833 len = sizeof(int);
2834 if (put_user_u32(len, optlen)
2835 || put_user_u32(val, optval_addr))
2836 return -TARGET_EFAULT;
2838 break;
2839 default:
2840 ret = -TARGET_ENOPROTOOPT;
2841 break;
2843 break;
2844 case SOL_IPV6:
2845 switch (optname) {
2846 case IPV6_MTU_DISCOVER:
2847 case IPV6_MTU:
2848 case IPV6_V6ONLY:
2849 case IPV6_RECVPKTINFO:
2850 case IPV6_UNICAST_HOPS:
2851 case IPV6_MULTICAST_HOPS:
2852 case IPV6_MULTICAST_LOOP:
2853 case IPV6_RECVERR:
2854 case IPV6_RECVHOPLIMIT:
2855 case IPV6_2292HOPLIMIT:
2856 case IPV6_CHECKSUM:
2857 case IPV6_ADDRFORM:
2858 case IPV6_2292PKTINFO:
2859 case IPV6_RECVTCLASS:
2860 case IPV6_RECVRTHDR:
2861 case IPV6_2292RTHDR:
2862 case IPV6_RECVHOPOPTS:
2863 case IPV6_2292HOPOPTS:
2864 case IPV6_RECVDSTOPTS:
2865 case IPV6_2292DSTOPTS:
2866 case IPV6_TCLASS:
2867 case IPV6_ADDR_PREFERENCES:
2868 #ifdef IPV6_RECVPATHMTU
2869 case IPV6_RECVPATHMTU:
2870 #endif
2871 #ifdef IPV6_TRANSPARENT
2872 case IPV6_TRANSPARENT:
2873 #endif
2874 #ifdef IPV6_FREEBIND
2875 case IPV6_FREEBIND:
2876 #endif
2877 #ifdef IPV6_RECVORIGDSTADDR
2878 case IPV6_RECVORIGDSTADDR:
2879 #endif
2880 if (get_user_u32(len, optlen))
2881 return -TARGET_EFAULT;
2882 if (len < 0)
2883 return -TARGET_EINVAL;
2884 lv = sizeof(lv);
2885 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2886 if (ret < 0)
2887 return ret;
2888 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2889 len = 1;
2890 if (put_user_u32(len, optlen)
2891 || put_user_u8(val, optval_addr))
2892 return -TARGET_EFAULT;
2893 } else {
2894 if (len > sizeof(int))
2895 len = sizeof(int);
2896 if (put_user_u32(len, optlen)
2897 || put_user_u32(val, optval_addr))
2898 return -TARGET_EFAULT;
2900 break;
2901 default:
2902 ret = -TARGET_ENOPROTOOPT;
2903 break;
2905 break;
2906 #ifdef SOL_NETLINK
2907 case SOL_NETLINK:
2908 switch (optname) {
2909 case NETLINK_PKTINFO:
2910 case NETLINK_BROADCAST_ERROR:
2911 case NETLINK_NO_ENOBUFS:
2912 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2913 case NETLINK_LISTEN_ALL_NSID:
2914 case NETLINK_CAP_ACK:
2915 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2917 case NETLINK_EXT_ACK:
2918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2920 case NETLINK_GET_STRICT_CHK:
2921 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2922 if (get_user_u32(len, optlen)) {
2923 return -TARGET_EFAULT;
2925 if (len != sizeof(val)) {
2926 return -TARGET_EINVAL;
2928 lv = len;
2929 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2930 if (ret < 0) {
2931 return ret;
2933 if (put_user_u32(lv, optlen)
2934 || put_user_u32(val, optval_addr)) {
2935 return -TARGET_EFAULT;
2937 break;
2938 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2939 case NETLINK_LIST_MEMBERSHIPS:
2941 uint32_t *results;
2942 int i;
2943 if (get_user_u32(len, optlen)) {
2944 return -TARGET_EFAULT;
2946 if (len < 0) {
2947 return -TARGET_EINVAL;
2949 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2950 if (!results && len > 0) {
2951 return -TARGET_EFAULT;
2953 lv = len;
2954 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2955 if (ret < 0) {
2956 unlock_user(results, optval_addr, 0);
2957 return ret;
2959 /* swap host endianess to target endianess. */
2960 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2961 results[i] = tswap32(results[i]);
2963 if (put_user_u32(lv, optlen)) {
2964 return -TARGET_EFAULT;
2966 unlock_user(results, optval_addr, 0);
2967 break;
2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2970 default:
2971 goto unimplemented;
2973 break;
2974 #endif /* SOL_NETLINK */
2975 default:
2976 unimplemented:
2977 qemu_log_mask(LOG_UNIMP,
2978 "getsockopt level=%d optname=%d not yet supported\n",
2979 level, optname);
2980 ret = -TARGET_EOPNOTSUPP;
2981 break;
2983 return ret;
2986 /* Convert target low/high pair representing file offset into the host
2987 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2988 * as the kernel doesn't handle them either.
2990 static void target_to_host_low_high(abi_ulong tlow,
2991 abi_ulong thigh,
2992 unsigned long *hlow,
2993 unsigned long *hhigh)
2995 uint64_t off = tlow |
2996 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2997 TARGET_LONG_BITS / 2;
2999 *hlow = off;
3000 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3003 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3004 abi_ulong count, int copy)
3006 struct target_iovec *target_vec;
3007 struct iovec *vec;
3008 abi_ulong total_len, max_len;
3009 int i;
3010 int err = 0;
3011 bool bad_address = false;
3013 if (count == 0) {
3014 errno = 0;
3015 return NULL;
3017 if (count > IOV_MAX) {
3018 errno = EINVAL;
3019 return NULL;
3022 vec = g_try_new0(struct iovec, count);
3023 if (vec == NULL) {
3024 errno = ENOMEM;
3025 return NULL;
3028 target_vec = lock_user(VERIFY_READ, target_addr,
3029 count * sizeof(struct target_iovec), 1);
3030 if (target_vec == NULL) {
3031 err = EFAULT;
3032 goto fail2;
3035 /* ??? If host page size > target page size, this will result in a
3036 value larger than what we can actually support. */
3037 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3038 total_len = 0;
3040 for (i = 0; i < count; i++) {
3041 abi_ulong base = tswapal(target_vec[i].iov_base);
3042 abi_long len = tswapal(target_vec[i].iov_len);
3044 if (len < 0) {
3045 err = EINVAL;
3046 goto fail;
3047 } else if (len == 0) {
3048 /* Zero length pointer is ignored. */
3049 vec[i].iov_base = 0;
3050 } else {
3051 vec[i].iov_base = lock_user(type, base, len, copy);
3052 /* If the first buffer pointer is bad, this is a fault. But
3053 * subsequent bad buffers will result in a partial write; this
3054 * is realized by filling the vector with null pointers and
3055 * zero lengths. */
3056 if (!vec[i].iov_base) {
3057 if (i == 0) {
3058 err = EFAULT;
3059 goto fail;
3060 } else {
3061 bad_address = true;
3064 if (bad_address) {
3065 len = 0;
3067 if (len > max_len - total_len) {
3068 len = max_len - total_len;
3071 vec[i].iov_len = len;
3072 total_len += len;
3075 unlock_user(target_vec, target_addr, 0);
3076 return vec;
3078 fail:
3079 while (--i >= 0) {
3080 if (tswapal(target_vec[i].iov_len) > 0) {
3081 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3084 unlock_user(target_vec, target_addr, 0);
3085 fail2:
3086 g_free(vec);
3087 errno = err;
3088 return NULL;
3091 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3092 abi_ulong count, int copy)
3094 struct target_iovec *target_vec;
3095 int i;
3097 target_vec = lock_user(VERIFY_READ, target_addr,
3098 count * sizeof(struct target_iovec), 1);
3099 if (target_vec) {
3100 for (i = 0; i < count; i++) {
3101 abi_ulong base = tswapal(target_vec[i].iov_base);
3102 abi_long len = tswapal(target_vec[i].iov_len);
3103 if (len < 0) {
3104 break;
3106 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3108 unlock_user(target_vec, target_addr, 0);
3111 g_free(vec);
3114 static inline int target_to_host_sock_type(int *type)
3116 int host_type = 0;
3117 int target_type = *type;
3119 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3120 case TARGET_SOCK_DGRAM:
3121 host_type = SOCK_DGRAM;
3122 break;
3123 case TARGET_SOCK_STREAM:
3124 host_type = SOCK_STREAM;
3125 break;
3126 default:
3127 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3128 break;
3130 if (target_type & TARGET_SOCK_CLOEXEC) {
3131 #if defined(SOCK_CLOEXEC)
3132 host_type |= SOCK_CLOEXEC;
3133 #else
3134 return -TARGET_EINVAL;
3135 #endif
3137 if (target_type & TARGET_SOCK_NONBLOCK) {
3138 #if defined(SOCK_NONBLOCK)
3139 host_type |= SOCK_NONBLOCK;
3140 #elif !defined(O_NONBLOCK)
3141 return -TARGET_EINVAL;
3142 #endif
3144 *type = host_type;
3145 return 0;
3148 /* Try to emulate socket type flags after socket creation. */
3149 static int sock_flags_fixup(int fd, int target_type)
3151 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3152 if (target_type & TARGET_SOCK_NONBLOCK) {
3153 int flags = fcntl(fd, F_GETFL);
3154 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3155 close(fd);
3156 return -TARGET_EINVAL;
3159 #endif
3160 return fd;
3163 /* do_socket() Must return target values and target errnos. */
3164 static abi_long do_socket(int domain, int type, int protocol)
3166 int target_type = type;
3167 int ret;
3169 ret = target_to_host_sock_type(&type);
3170 if (ret) {
3171 return ret;
3174 if (domain == PF_NETLINK && !(
3175 #ifdef CONFIG_RTNETLINK
3176 protocol == NETLINK_ROUTE ||
3177 #endif
3178 protocol == NETLINK_KOBJECT_UEVENT ||
3179 protocol == NETLINK_AUDIT)) {
3180 return -TARGET_EPROTONOSUPPORT;
3183 if (domain == AF_PACKET ||
3184 (domain == AF_INET && type == SOCK_PACKET)) {
3185 protocol = tswap16(protocol);
3188 ret = get_errno(socket(domain, type, protocol));
3189 if (ret >= 0) {
3190 ret = sock_flags_fixup(ret, target_type);
3191 if (type == SOCK_PACKET) {
3192 /* Manage an obsolete case :
3193 * if socket type is SOCK_PACKET, bind by name
3195 fd_trans_register(ret, &target_packet_trans);
3196 } else if (domain == PF_NETLINK) {
3197 switch (protocol) {
3198 #ifdef CONFIG_RTNETLINK
3199 case NETLINK_ROUTE:
3200 fd_trans_register(ret, &target_netlink_route_trans);
3201 break;
3202 #endif
3203 case NETLINK_KOBJECT_UEVENT:
3204 /* nothing to do: messages are strings */
3205 break;
3206 case NETLINK_AUDIT:
3207 fd_trans_register(ret, &target_netlink_audit_trans);
3208 break;
3209 default:
3210 g_assert_not_reached();
3214 return ret;
3217 /* do_bind() Must return target values and target errnos. */
3218 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3219 socklen_t addrlen)
3221 void *addr;
3222 abi_long ret;
3224 if ((int)addrlen < 0) {
3225 return -TARGET_EINVAL;
3228 addr = alloca(addrlen+1);
3230 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3231 if (ret)
3232 return ret;
3234 return get_errno(bind(sockfd, addr, addrlen));
3237 /* do_connect() Must return target values and target errnos. */
3238 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3239 socklen_t addrlen)
3241 void *addr;
3242 abi_long ret;
3244 if ((int)addrlen < 0) {
3245 return -TARGET_EINVAL;
3248 addr = alloca(addrlen+1);
3250 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3251 if (ret)
3252 return ret;
3254 return get_errno(safe_connect(sockfd, addr, addrlen));
3257 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3258 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3259 int flags, int send)
3261 abi_long ret, len;
3262 struct msghdr msg;
3263 abi_ulong count;
3264 struct iovec *vec;
3265 abi_ulong target_vec;
3267 if (msgp->msg_name) {
3268 msg.msg_namelen = tswap32(msgp->msg_namelen);
3269 msg.msg_name = alloca(msg.msg_namelen+1);
3270 ret = target_to_host_sockaddr(fd, msg.msg_name,
3271 tswapal(msgp->msg_name),
3272 msg.msg_namelen);
3273 if (ret == -TARGET_EFAULT) {
3274 /* For connected sockets msg_name and msg_namelen must
3275 * be ignored, so returning EFAULT immediately is wrong.
3276 * Instead, pass a bad msg_name to the host kernel, and
3277 * let it decide whether to return EFAULT or not.
3279 msg.msg_name = (void *)-1;
3280 } else if (ret) {
3281 goto out2;
3283 } else {
3284 msg.msg_name = NULL;
3285 msg.msg_namelen = 0;
3287 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3288 msg.msg_control = alloca(msg.msg_controllen);
3289 memset(msg.msg_control, 0, msg.msg_controllen);
3291 msg.msg_flags = tswap32(msgp->msg_flags);
3293 count = tswapal(msgp->msg_iovlen);
3294 target_vec = tswapal(msgp->msg_iov);
3296 if (count > IOV_MAX) {
3297 /* sendrcvmsg returns a different errno for this condition than
3298 * readv/writev, so we must catch it here before lock_iovec() does.
3300 ret = -TARGET_EMSGSIZE;
3301 goto out2;
3304 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3305 target_vec, count, send);
3306 if (vec == NULL) {
3307 ret = -host_to_target_errno(errno);
3308 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3309 if (!send || ret) {
3310 goto out2;
3313 msg.msg_iovlen = count;
3314 msg.msg_iov = vec;
3316 if (send) {
3317 if (fd_trans_target_to_host_data(fd)) {
3318 void *host_msg;
3320 host_msg = g_malloc(msg.msg_iov->iov_len);
3321 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3322 ret = fd_trans_target_to_host_data(fd)(host_msg,
3323 msg.msg_iov->iov_len);
3324 if (ret >= 0) {
3325 msg.msg_iov->iov_base = host_msg;
3326 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3328 g_free(host_msg);
3329 } else {
3330 ret = target_to_host_cmsg(&msg, msgp);
3331 if (ret == 0) {
3332 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3335 } else {
3336 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3337 if (!is_error(ret)) {
3338 len = ret;
3339 if (fd_trans_host_to_target_data(fd)) {
3340 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3341 MIN(msg.msg_iov->iov_len, len));
3343 if (!is_error(ret)) {
3344 ret = host_to_target_cmsg(msgp, &msg);
3346 if (!is_error(ret)) {
3347 msgp->msg_namelen = tswap32(msg.msg_namelen);
3348 msgp->msg_flags = tswap32(msg.msg_flags);
3349 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3350 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3351 msg.msg_name, msg.msg_namelen);
3352 if (ret) {
3353 goto out;
3357 ret = len;
3362 out:
3363 if (vec) {
3364 unlock_iovec(vec, target_vec, count, !send);
3366 out2:
3367 return ret;
3370 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3371 int flags, int send)
3373 abi_long ret;
3374 struct target_msghdr *msgp;
3376 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3377 msgp,
3378 target_msg,
3379 send ? 1 : 0)) {
3380 return -TARGET_EFAULT;
3382 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3383 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3384 return ret;
3387 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3388 * so it might not have this *mmsg-specific flag either.
3390 #ifndef MSG_WAITFORONE
3391 #define MSG_WAITFORONE 0x10000
3392 #endif
3394 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3395 unsigned int vlen, unsigned int flags,
3396 int send)
3398 struct target_mmsghdr *mmsgp;
3399 abi_long ret = 0;
3400 int i;
3402 if (vlen > UIO_MAXIOV) {
3403 vlen = UIO_MAXIOV;
3406 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3407 if (!mmsgp) {
3408 return -TARGET_EFAULT;
3411 for (i = 0; i < vlen; i++) {
3412 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3413 if (is_error(ret)) {
3414 break;
3416 mmsgp[i].msg_len = tswap32(ret);
3417 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3418 if (flags & MSG_WAITFORONE) {
3419 flags |= MSG_DONTWAIT;
3423 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3425 /* Return number of datagrams sent if we sent any at all;
3426 * otherwise return the error.
3428 if (i) {
3429 return i;
3431 return ret;
3434 /* do_accept4() Must return target values and target errnos. */
3435 static abi_long do_accept4(int fd, abi_ulong target_addr,
3436 abi_ulong target_addrlen_addr, int flags)
3438 socklen_t addrlen, ret_addrlen;
3439 void *addr;
3440 abi_long ret;
3441 int host_flags;
3443 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3445 if (target_addr == 0) {
3446 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3449 /* linux returns EFAULT if addrlen pointer is invalid */
3450 if (get_user_u32(addrlen, target_addrlen_addr))
3451 return -TARGET_EFAULT;
3453 if ((int)addrlen < 0) {
3454 return -TARGET_EINVAL;
3457 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3458 return -TARGET_EFAULT;
3461 addr = alloca(addrlen);
3463 ret_addrlen = addrlen;
3464 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3465 if (!is_error(ret)) {
3466 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3467 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3468 ret = -TARGET_EFAULT;
3471 return ret;
3474 /* do_getpeername() Must return target values and target errnos. */
3475 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3476 abi_ulong target_addrlen_addr)
3478 socklen_t addrlen, ret_addrlen;
3479 void *addr;
3480 abi_long ret;
3482 if (get_user_u32(addrlen, target_addrlen_addr))
3483 return -TARGET_EFAULT;
3485 if ((int)addrlen < 0) {
3486 return -TARGET_EINVAL;
3489 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3490 return -TARGET_EFAULT;
3493 addr = alloca(addrlen);
3495 ret_addrlen = addrlen;
3496 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3497 if (!is_error(ret)) {
3498 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3499 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3500 ret = -TARGET_EFAULT;
3503 return ret;
3506 /* do_getsockname() Must return target values and target errnos. */
3507 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3508 abi_ulong target_addrlen_addr)
3510 socklen_t addrlen, ret_addrlen;
3511 void *addr;
3512 abi_long ret;
3514 if (get_user_u32(addrlen, target_addrlen_addr))
3515 return -TARGET_EFAULT;
3517 if ((int)addrlen < 0) {
3518 return -TARGET_EINVAL;
3521 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3522 return -TARGET_EFAULT;
3525 addr = alloca(addrlen);
3527 ret_addrlen = addrlen;
3528 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3529 if (!is_error(ret)) {
3530 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3531 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3532 ret = -TARGET_EFAULT;
3535 return ret;
3538 /* do_socketpair() Must return target values and target errnos. */
3539 static abi_long do_socketpair(int domain, int type, int protocol,
3540 abi_ulong target_tab_addr)
3542 int tab[2];
3543 abi_long ret;
3545 target_to_host_sock_type(&type);
3547 ret = get_errno(socketpair(domain, type, protocol, tab));
3548 if (!is_error(ret)) {
3549 if (put_user_s32(tab[0], target_tab_addr)
3550 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3551 ret = -TARGET_EFAULT;
3553 return ret;
3556 /* do_sendto() Must return target values and target errnos. */
3557 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3558 abi_ulong target_addr, socklen_t addrlen)
3560 void *addr;
3561 void *host_msg;
3562 void *copy_msg = NULL;
3563 abi_long ret;
3565 if ((int)addrlen < 0) {
3566 return -TARGET_EINVAL;
3569 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3570 if (!host_msg)
3571 return -TARGET_EFAULT;
3572 if (fd_trans_target_to_host_data(fd)) {
3573 copy_msg = host_msg;
3574 host_msg = g_malloc(len);
3575 memcpy(host_msg, copy_msg, len);
3576 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3577 if (ret < 0) {
3578 goto fail;
3581 if (target_addr) {
3582 addr = alloca(addrlen+1);
3583 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3584 if (ret) {
3585 goto fail;
3587 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3588 } else {
3589 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3591 fail:
3592 if (copy_msg) {
3593 g_free(host_msg);
3594 host_msg = copy_msg;
3596 unlock_user(host_msg, msg, 0);
3597 return ret;
3600 /* do_recvfrom() Must return target values and target errnos. */
3601 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3602 abi_ulong target_addr,
3603 abi_ulong target_addrlen)
3605 socklen_t addrlen, ret_addrlen;
3606 void *addr;
3607 void *host_msg;
3608 abi_long ret;
3610 if (!msg) {
3611 host_msg = NULL;
3612 } else {
3613 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3614 if (!host_msg) {
3615 return -TARGET_EFAULT;
3618 if (target_addr) {
3619 if (get_user_u32(addrlen, target_addrlen)) {
3620 ret = -TARGET_EFAULT;
3621 goto fail;
3623 if ((int)addrlen < 0) {
3624 ret = -TARGET_EINVAL;
3625 goto fail;
3627 addr = alloca(addrlen);
3628 ret_addrlen = addrlen;
3629 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3630 addr, &ret_addrlen));
3631 } else {
3632 addr = NULL; /* To keep compiler quiet. */
3633 addrlen = 0; /* To keep compiler quiet. */
3634 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3636 if (!is_error(ret)) {
3637 if (fd_trans_host_to_target_data(fd)) {
3638 abi_long trans;
3639 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3640 if (is_error(trans)) {
3641 ret = trans;
3642 goto fail;
3645 if (target_addr) {
3646 host_to_target_sockaddr(target_addr, addr,
3647 MIN(addrlen, ret_addrlen));
3648 if (put_user_u32(ret_addrlen, target_addrlen)) {
3649 ret = -TARGET_EFAULT;
3650 goto fail;
3653 unlock_user(host_msg, msg, len);
3654 } else {
3655 fail:
3656 unlock_user(host_msg, msg, 0);
3658 return ret;
3661 #ifdef TARGET_NR_socketcall
3662 /* do_socketcall() must return target values and target errnos. */
3663 static abi_long do_socketcall(int num, abi_ulong vptr)
3665 static const unsigned nargs[] = { /* number of arguments per operation */
3666 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3667 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3668 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3669 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3670 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3671 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3672 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3673 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3674 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3675 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3676 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3677 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3678 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3679 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3680 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3681 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3682 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3683 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3684 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3685 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3687 abi_long a[6]; /* max 6 args */
3688 unsigned i;
3690 /* check the range of the first argument num */
3691 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3692 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3693 return -TARGET_EINVAL;
3695 /* ensure we have space for args */
3696 if (nargs[num] > ARRAY_SIZE(a)) {
3697 return -TARGET_EINVAL;
3699 /* collect the arguments in a[] according to nargs[] */
3700 for (i = 0; i < nargs[num]; ++i) {
3701 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3702 return -TARGET_EFAULT;
3705 /* now when we have the args, invoke the appropriate underlying function */
3706 switch (num) {
3707 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3708 return do_socket(a[0], a[1], a[2]);
3709 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3710 return do_bind(a[0], a[1], a[2]);
3711 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3712 return do_connect(a[0], a[1], a[2]);
3713 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3714 return get_errno(listen(a[0], a[1]));
3715 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3716 return do_accept4(a[0], a[1], a[2], 0);
3717 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3718 return do_getsockname(a[0], a[1], a[2]);
3719 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3720 return do_getpeername(a[0], a[1], a[2]);
3721 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3722 return do_socketpair(a[0], a[1], a[2], a[3]);
3723 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3724 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3725 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3726 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3727 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3728 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3729 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3730 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3731 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3732 return get_errno(shutdown(a[0], a[1]));
3733 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3734 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3735 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3736 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3737 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3738 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3739 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3740 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3741 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3742 return do_accept4(a[0], a[1], a[2], a[3]);
3743 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3744 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3745 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3746 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3747 default:
3748 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3749 return -TARGET_EINVAL;
3752 #endif
3754 #define N_SHM_REGIONS 32
3756 static struct shm_region {
3757 abi_ulong start;
3758 abi_ulong size;
3759 bool in_use;
3760 } shm_regions[N_SHM_REGIONS];
3762 #ifndef TARGET_SEMID64_DS
3763 /* asm-generic version of this struct */
3764 struct target_semid64_ds
3766 struct target_ipc_perm sem_perm;
3767 abi_ulong sem_otime;
3768 #if TARGET_ABI_BITS == 32
3769 abi_ulong __unused1;
3770 #endif
3771 abi_ulong sem_ctime;
3772 #if TARGET_ABI_BITS == 32
3773 abi_ulong __unused2;
3774 #endif
3775 abi_ulong sem_nsems;
3776 abi_ulong __unused3;
3777 abi_ulong __unused4;
3779 #endif
3781 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3782 abi_ulong target_addr)
3784 struct target_ipc_perm *target_ip;
3785 struct target_semid64_ds *target_sd;
3787 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3788 return -TARGET_EFAULT;
3789 target_ip = &(target_sd->sem_perm);
3790 host_ip->__key = tswap32(target_ip->__key);
3791 host_ip->uid = tswap32(target_ip->uid);
3792 host_ip->gid = tswap32(target_ip->gid);
3793 host_ip->cuid = tswap32(target_ip->cuid);
3794 host_ip->cgid = tswap32(target_ip->cgid);
3795 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3796 host_ip->mode = tswap32(target_ip->mode);
3797 #else
3798 host_ip->mode = tswap16(target_ip->mode);
3799 #endif
3800 #if defined(TARGET_PPC)
3801 host_ip->__seq = tswap32(target_ip->__seq);
3802 #else
3803 host_ip->__seq = tswap16(target_ip->__seq);
3804 #endif
3805 unlock_user_struct(target_sd, target_addr, 0);
3806 return 0;
3809 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3810 struct ipc_perm *host_ip)
3812 struct target_ipc_perm *target_ip;
3813 struct target_semid64_ds *target_sd;
3815 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3816 return -TARGET_EFAULT;
3817 target_ip = &(target_sd->sem_perm);
3818 target_ip->__key = tswap32(host_ip->__key);
3819 target_ip->uid = tswap32(host_ip->uid);
3820 target_ip->gid = tswap32(host_ip->gid);
3821 target_ip->cuid = tswap32(host_ip->cuid);
3822 target_ip->cgid = tswap32(host_ip->cgid);
3823 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3824 target_ip->mode = tswap32(host_ip->mode);
3825 #else
3826 target_ip->mode = tswap16(host_ip->mode);
3827 #endif
3828 #if defined(TARGET_PPC)
3829 target_ip->__seq = tswap32(host_ip->__seq);
3830 #else
3831 target_ip->__seq = tswap16(host_ip->__seq);
3832 #endif
3833 unlock_user_struct(target_sd, target_addr, 1);
3834 return 0;
3837 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3838 abi_ulong target_addr)
3840 struct target_semid64_ds *target_sd;
3842 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3843 return -TARGET_EFAULT;
3844 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3845 return -TARGET_EFAULT;
3846 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3847 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3848 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3849 unlock_user_struct(target_sd, target_addr, 0);
3850 return 0;
3853 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3854 struct semid_ds *host_sd)
3856 struct target_semid64_ds *target_sd;
3858 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3859 return -TARGET_EFAULT;
3860 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3861 return -TARGET_EFAULT;
3862 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3863 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3864 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3865 unlock_user_struct(target_sd, target_addr, 1);
3866 return 0;
3869 struct target_seminfo {
3870 int semmap;
3871 int semmni;
3872 int semmns;
3873 int semmnu;
3874 int semmsl;
3875 int semopm;
3876 int semume;
3877 int semusz;
3878 int semvmx;
3879 int semaem;
3882 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3883 struct seminfo *host_seminfo)
3885 struct target_seminfo *target_seminfo;
3886 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3887 return -TARGET_EFAULT;
3888 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3889 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3890 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3891 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3892 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3893 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3894 __put_user(host_seminfo->semume, &target_seminfo->semume);
3895 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3896 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3897 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3898 unlock_user_struct(target_seminfo, target_addr, 1);
3899 return 0;
3902 union semun {
3903 int val;
3904 struct semid_ds *buf;
3905 unsigned short *array;
3906 struct seminfo *__buf;
3909 union target_semun {
3910 int val;
3911 abi_ulong buf;
3912 abi_ulong array;
3913 abi_ulong __buf;
3916 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3917 abi_ulong target_addr)
3919 int nsems;
3920 unsigned short *array;
3921 union semun semun;
3922 struct semid_ds semid_ds;
3923 int i, ret;
3925 semun.buf = &semid_ds;
3927 ret = semctl(semid, 0, IPC_STAT, semun);
3928 if (ret == -1)
3929 return get_errno(ret);
3931 nsems = semid_ds.sem_nsems;
3933 *host_array = g_try_new(unsigned short, nsems);
3934 if (!*host_array) {
3935 return -TARGET_ENOMEM;
3937 array = lock_user(VERIFY_READ, target_addr,
3938 nsems*sizeof(unsigned short), 1);
3939 if (!array) {
3940 g_free(*host_array);
3941 return -TARGET_EFAULT;
3944 for(i=0; i<nsems; i++) {
3945 __get_user((*host_array)[i], &array[i]);
3947 unlock_user(array, target_addr, 0);
3949 return 0;
3952 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3953 unsigned short **host_array)
3955 int nsems;
3956 unsigned short *array;
3957 union semun semun;
3958 struct semid_ds semid_ds;
3959 int i, ret;
3961 semun.buf = &semid_ds;
3963 ret = semctl(semid, 0, IPC_STAT, semun);
3964 if (ret == -1)
3965 return get_errno(ret);
3967 nsems = semid_ds.sem_nsems;
3969 array = lock_user(VERIFY_WRITE, target_addr,
3970 nsems*sizeof(unsigned short), 0);
3971 if (!array)
3972 return -TARGET_EFAULT;
3974 for(i=0; i<nsems; i++) {
3975 __put_user((*host_array)[i], &array[i]);
3977 g_free(*host_array);
3978 unlock_user(array, target_addr, 1);
3980 return 0;
3983 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3984 abi_ulong target_arg)
3986 union target_semun target_su = { .buf = target_arg };
3987 union semun arg;
3988 struct semid_ds dsarg;
3989 unsigned short *array = NULL;
3990 struct seminfo seminfo;
3991 abi_long ret = -TARGET_EINVAL;
3992 abi_long err;
3993 cmd &= 0xff;
3995 switch( cmd ) {
3996 case GETVAL:
3997 case SETVAL:
3998 /* In 64 bit cross-endian situations, we will erroneously pick up
3999 * the wrong half of the union for the "val" element. To rectify
4000 * this, the entire 8-byte structure is byteswapped, followed by
4001 * a swap of the 4 byte val field. In other cases, the data is
4002 * already in proper host byte order. */
4003 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4004 target_su.buf = tswapal(target_su.buf);
4005 arg.val = tswap32(target_su.val);
4006 } else {
4007 arg.val = target_su.val;
4009 ret = get_errno(semctl(semid, semnum, cmd, arg));
4010 break;
4011 case GETALL:
4012 case SETALL:
4013 err = target_to_host_semarray(semid, &array, target_su.array);
4014 if (err)
4015 return err;
4016 arg.array = array;
4017 ret = get_errno(semctl(semid, semnum, cmd, arg));
4018 err = host_to_target_semarray(semid, target_su.array, &array);
4019 if (err)
4020 return err;
4021 break;
4022 case IPC_STAT:
4023 case IPC_SET:
4024 case SEM_STAT:
4025 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4026 if (err)
4027 return err;
4028 arg.buf = &dsarg;
4029 ret = get_errno(semctl(semid, semnum, cmd, arg));
4030 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4031 if (err)
4032 return err;
4033 break;
4034 case IPC_INFO:
4035 case SEM_INFO:
4036 arg.__buf = &seminfo;
4037 ret = get_errno(semctl(semid, semnum, cmd, arg));
4038 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4039 if (err)
4040 return err;
4041 break;
4042 case IPC_RMID:
4043 case GETPID:
4044 case GETNCNT:
4045 case GETZCNT:
4046 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4047 break;
4050 return ret;
4053 struct target_sembuf {
4054 unsigned short sem_num;
4055 short sem_op;
4056 short sem_flg;
4059 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4060 abi_ulong target_addr,
4061 unsigned nsops)
4063 struct target_sembuf *target_sembuf;
4064 int i;
4066 target_sembuf = lock_user(VERIFY_READ, target_addr,
4067 nsops*sizeof(struct target_sembuf), 1);
4068 if (!target_sembuf)
4069 return -TARGET_EFAULT;
4071 for(i=0; i<nsops; i++) {
4072 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4073 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4074 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4077 unlock_user(target_sembuf, target_addr, 0);
4079 return 0;
4082 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4083 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4086 * This macro is required to handle the s390 variants, which passes the
4087 * arguments in a different order than default.
4089 #ifdef __s390x__
4090 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4091 (__nsops), (__timeout), (__sops)
4092 #else
4093 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4094 (__nsops), 0, (__sops), (__timeout)
4095 #endif
4097 static inline abi_long do_semtimedop(int semid,
4098 abi_long ptr,
4099 unsigned nsops,
4100 abi_long timeout, bool time64)
4102 struct sembuf *sops;
4103 struct timespec ts, *pts = NULL;
4104 abi_long ret;
4106 if (timeout) {
4107 pts = &ts;
4108 if (time64) {
4109 if (target_to_host_timespec64(pts, timeout)) {
4110 return -TARGET_EFAULT;
4112 } else {
4113 if (target_to_host_timespec(pts, timeout)) {
4114 return -TARGET_EFAULT;
4119 if (nsops > TARGET_SEMOPM) {
4120 return -TARGET_E2BIG;
4123 sops = g_new(struct sembuf, nsops);
4125 if (target_to_host_sembuf(sops, ptr, nsops)) {
4126 g_free(sops);
4127 return -TARGET_EFAULT;
4130 ret = -TARGET_ENOSYS;
4131 #ifdef __NR_semtimedop
4132 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4133 #endif
4134 #ifdef __NR_ipc
4135 if (ret == -TARGET_ENOSYS) {
4136 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4137 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4139 #endif
4140 g_free(sops);
4141 return ret;
4143 #endif
4145 struct target_msqid_ds
4147 struct target_ipc_perm msg_perm;
4148 abi_ulong msg_stime;
4149 #if TARGET_ABI_BITS == 32
4150 abi_ulong __unused1;
4151 #endif
4152 abi_ulong msg_rtime;
4153 #if TARGET_ABI_BITS == 32
4154 abi_ulong __unused2;
4155 #endif
4156 abi_ulong msg_ctime;
4157 #if TARGET_ABI_BITS == 32
4158 abi_ulong __unused3;
4159 #endif
4160 abi_ulong __msg_cbytes;
4161 abi_ulong msg_qnum;
4162 abi_ulong msg_qbytes;
4163 abi_ulong msg_lspid;
4164 abi_ulong msg_lrpid;
4165 abi_ulong __unused4;
4166 abi_ulong __unused5;
4169 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4170 abi_ulong target_addr)
4172 struct target_msqid_ds *target_md;
4174 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4175 return -TARGET_EFAULT;
4176 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4177 return -TARGET_EFAULT;
4178 host_md->msg_stime = tswapal(target_md->msg_stime);
4179 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4180 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4181 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4182 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4183 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4184 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4185 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4186 unlock_user_struct(target_md, target_addr, 0);
4187 return 0;
4190 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4191 struct msqid_ds *host_md)
4193 struct target_msqid_ds *target_md;
4195 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4196 return -TARGET_EFAULT;
4197 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4198 return -TARGET_EFAULT;
4199 target_md->msg_stime = tswapal(host_md->msg_stime);
4200 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4201 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4202 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4203 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4204 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4205 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4206 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4207 unlock_user_struct(target_md, target_addr, 1);
4208 return 0;
4211 struct target_msginfo {
4212 int msgpool;
4213 int msgmap;
4214 int msgmax;
4215 int msgmnb;
4216 int msgmni;
4217 int msgssz;
4218 int msgtql;
4219 unsigned short int msgseg;
4222 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4223 struct msginfo *host_msginfo)
4225 struct target_msginfo *target_msginfo;
4226 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4227 return -TARGET_EFAULT;
4228 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4229 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4230 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4231 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4232 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4233 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4234 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4235 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4236 unlock_user_struct(target_msginfo, target_addr, 1);
4237 return 0;
4240 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4242 struct msqid_ds dsarg;
4243 struct msginfo msginfo;
4244 abi_long ret = -TARGET_EINVAL;
4246 cmd &= 0xff;
4248 switch (cmd) {
4249 case IPC_STAT:
4250 case IPC_SET:
4251 case MSG_STAT:
4252 if (target_to_host_msqid_ds(&dsarg,ptr))
4253 return -TARGET_EFAULT;
4254 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4255 if (host_to_target_msqid_ds(ptr,&dsarg))
4256 return -TARGET_EFAULT;
4257 break;
4258 case IPC_RMID:
4259 ret = get_errno(msgctl(msgid, cmd, NULL));
4260 break;
4261 case IPC_INFO:
4262 case MSG_INFO:
4263 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4264 if (host_to_target_msginfo(ptr, &msginfo))
4265 return -TARGET_EFAULT;
4266 break;
4269 return ret;
4272 struct target_msgbuf {
4273 abi_long mtype;
4274 char mtext[1];
4277 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4278 ssize_t msgsz, int msgflg)
4280 struct target_msgbuf *target_mb;
4281 struct msgbuf *host_mb;
4282 abi_long ret = 0;
4284 if (msgsz < 0) {
4285 return -TARGET_EINVAL;
4288 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4289 return -TARGET_EFAULT;
4290 host_mb = g_try_malloc(msgsz + sizeof(long));
4291 if (!host_mb) {
4292 unlock_user_struct(target_mb, msgp, 0);
4293 return -TARGET_ENOMEM;
4295 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4296 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4297 ret = -TARGET_ENOSYS;
4298 #ifdef __NR_msgsnd
4299 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4300 #endif
4301 #ifdef __NR_ipc
4302 if (ret == -TARGET_ENOSYS) {
4303 #ifdef __s390x__
4304 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4305 host_mb));
4306 #else
4307 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4308 host_mb, 0));
4309 #endif
4311 #endif
4312 g_free(host_mb);
4313 unlock_user_struct(target_mb, msgp, 0);
4315 return ret;
4318 #ifdef __NR_ipc
4319 #if defined(__sparc__)
4320 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4321 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4322 #elif defined(__s390x__)
4323 /* The s390 sys_ipc variant has only five parameters. */
4324 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4325 ((long int[]){(long int)__msgp, __msgtyp})
4326 #else
4327 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4328 ((long int[]){(long int)__msgp, __msgtyp}), 0
4329 #endif
4330 #endif
4332 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4333 ssize_t msgsz, abi_long msgtyp,
4334 int msgflg)
4336 struct target_msgbuf *target_mb;
4337 char *target_mtext;
4338 struct msgbuf *host_mb;
4339 abi_long ret = 0;
4341 if (msgsz < 0) {
4342 return -TARGET_EINVAL;
4345 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4346 return -TARGET_EFAULT;
4348 host_mb = g_try_malloc(msgsz + sizeof(long));
4349 if (!host_mb) {
4350 ret = -TARGET_ENOMEM;
4351 goto end;
4353 ret = -TARGET_ENOSYS;
4354 #ifdef __NR_msgrcv
4355 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4356 #endif
4357 #ifdef __NR_ipc
4358 if (ret == -TARGET_ENOSYS) {
4359 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4360 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4362 #endif
4364 if (ret > 0) {
4365 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4366 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4367 if (!target_mtext) {
4368 ret = -TARGET_EFAULT;
4369 goto end;
4371 memcpy(target_mb->mtext, host_mb->mtext, ret);
4372 unlock_user(target_mtext, target_mtext_addr, ret);
4375 target_mb->mtype = tswapal(host_mb->mtype);
4377 end:
4378 if (target_mb)
4379 unlock_user_struct(target_mb, msgp, 1);
4380 g_free(host_mb);
4381 return ret;
4384 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4385 abi_ulong target_addr)
4387 struct target_shmid_ds *target_sd;
4389 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4390 return -TARGET_EFAULT;
4391 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4392 return -TARGET_EFAULT;
4393 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4394 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4395 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4396 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4397 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4398 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4399 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4400 unlock_user_struct(target_sd, target_addr, 0);
4401 return 0;
4404 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4405 struct shmid_ds *host_sd)
4407 struct target_shmid_ds *target_sd;
4409 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4410 return -TARGET_EFAULT;
4411 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4412 return -TARGET_EFAULT;
4413 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4414 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4415 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4416 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4417 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4418 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4419 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4420 unlock_user_struct(target_sd, target_addr, 1);
4421 return 0;
4424 struct target_shminfo {
4425 abi_ulong shmmax;
4426 abi_ulong shmmin;
4427 abi_ulong shmmni;
4428 abi_ulong shmseg;
4429 abi_ulong shmall;
4432 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4433 struct shminfo *host_shminfo)
4435 struct target_shminfo *target_shminfo;
4436 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4437 return -TARGET_EFAULT;
4438 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4439 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4440 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4441 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4442 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4443 unlock_user_struct(target_shminfo, target_addr, 1);
4444 return 0;
4447 struct target_shm_info {
4448 int used_ids;
4449 abi_ulong shm_tot;
4450 abi_ulong shm_rss;
4451 abi_ulong shm_swp;
4452 abi_ulong swap_attempts;
4453 abi_ulong swap_successes;
4456 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4457 struct shm_info *host_shm_info)
4459 struct target_shm_info *target_shm_info;
4460 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4461 return -TARGET_EFAULT;
4462 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4463 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4464 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4465 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4466 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4467 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4468 unlock_user_struct(target_shm_info, target_addr, 1);
4469 return 0;
4472 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4474 struct shmid_ds dsarg;
4475 struct shminfo shminfo;
4476 struct shm_info shm_info;
4477 abi_long ret = -TARGET_EINVAL;
4479 cmd &= 0xff;
4481 switch(cmd) {
4482 case IPC_STAT:
4483 case IPC_SET:
4484 case SHM_STAT:
4485 if (target_to_host_shmid_ds(&dsarg, buf))
4486 return -TARGET_EFAULT;
4487 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4488 if (host_to_target_shmid_ds(buf, &dsarg))
4489 return -TARGET_EFAULT;
4490 break;
4491 case IPC_INFO:
4492 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4493 if (host_to_target_shminfo(buf, &shminfo))
4494 return -TARGET_EFAULT;
4495 break;
4496 case SHM_INFO:
4497 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4498 if (host_to_target_shm_info(buf, &shm_info))
4499 return -TARGET_EFAULT;
4500 break;
4501 case IPC_RMID:
4502 case SHM_LOCK:
4503 case SHM_UNLOCK:
4504 ret = get_errno(shmctl(shmid, cmd, NULL));
4505 break;
4508 return ret;
4511 #ifndef TARGET_FORCE_SHMLBA
4512 /* For most architectures, SHMLBA is the same as the page size;
4513 * some architectures have larger values, in which case they should
4514 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4515 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4516 * and defining its own value for SHMLBA.
4518 * The kernel also permits SHMLBA to be set by the architecture to a
4519 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4520 * this means that addresses are rounded to the large size if
4521 * SHM_RND is set but addresses not aligned to that size are not rejected
4522 * as long as they are at least page-aligned. Since the only architecture
4523 * which uses this is ia64 this code doesn't provide for that oddity.
4525 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4527 return TARGET_PAGE_SIZE;
4529 #endif
4531 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4532 int shmid, abi_ulong shmaddr, int shmflg)
4534 CPUState *cpu = env_cpu(cpu_env);
4535 abi_long raddr;
4536 void *host_raddr;
4537 struct shmid_ds shm_info;
4538 int i,ret;
4539 abi_ulong shmlba;
4541 /* shmat pointers are always untagged */
4543 /* find out the length of the shared memory segment */
4544 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4545 if (is_error(ret)) {
4546 /* can't get length, bail out */
4547 return ret;
4550 shmlba = target_shmlba(cpu_env);
4552 if (shmaddr & (shmlba - 1)) {
4553 if (shmflg & SHM_RND) {
4554 shmaddr &= ~(shmlba - 1);
4555 } else {
4556 return -TARGET_EINVAL;
4559 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4560 return -TARGET_EINVAL;
4563 mmap_lock();
4566 * We're mapping shared memory, so ensure we generate code for parallel
4567 * execution and flush old translations. This will work up to the level
4568 * supported by the host -- anything that requires EXCP_ATOMIC will not
4569 * be atomic with respect to an external process.
4571 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4572 cpu->tcg_cflags |= CF_PARALLEL;
4573 tb_flush(cpu);
4576 if (shmaddr)
4577 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4578 else {
4579 abi_ulong mmap_start;
4581 /* In order to use the host shmat, we need to honor host SHMLBA. */
4582 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4584 if (mmap_start == -1) {
4585 errno = ENOMEM;
4586 host_raddr = (void *)-1;
4587 } else
4588 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4589 shmflg | SHM_REMAP);
4592 if (host_raddr == (void *)-1) {
4593 mmap_unlock();
4594 return get_errno((long)host_raddr);
4596 raddr=h2g((unsigned long)host_raddr);
4598 page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4599 PAGE_VALID | PAGE_RESET | PAGE_READ |
4600 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4602 for (i = 0; i < N_SHM_REGIONS; i++) {
4603 if (!shm_regions[i].in_use) {
4604 shm_regions[i].in_use = true;
4605 shm_regions[i].start = raddr;
4606 shm_regions[i].size = shm_info.shm_segsz;
4607 break;
4611 mmap_unlock();
4612 return raddr;
4616 static inline abi_long do_shmdt(abi_ulong shmaddr)
4618 int i;
4619 abi_long rv;
4621 /* shmdt pointers are always untagged */
4623 mmap_lock();
4625 for (i = 0; i < N_SHM_REGIONS; ++i) {
4626 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4627 shm_regions[i].in_use = false;
4628 page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4629 break;
4632 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4634 mmap_unlock();
4636 return rv;
4639 #ifdef TARGET_NR_ipc
4640 /* ??? This only works with linear mappings. */
4641 /* do_ipc() must return target values and target errnos. */
4642 static abi_long do_ipc(CPUArchState *cpu_env,
4643 unsigned int call, abi_long first,
4644 abi_long second, abi_long third,
4645 abi_long ptr, abi_long fifth)
4647 int version;
4648 abi_long ret = 0;
4650 version = call >> 16;
4651 call &= 0xffff;
4653 switch (call) {
4654 case IPCOP_semop:
4655 ret = do_semtimedop(first, ptr, second, 0, false);
4656 break;
4657 case IPCOP_semtimedop:
4659 * The s390 sys_ipc variant has only five parameters instead of six
4660 * (as for default variant) and the only difference is the handling of
4661 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4662 * to a struct timespec where the generic variant uses fifth parameter.
4664 #if defined(TARGET_S390X)
4665 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4666 #else
4667 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4668 #endif
4669 break;
4671 case IPCOP_semget:
4672 ret = get_errno(semget(first, second, third));
4673 break;
4675 case IPCOP_semctl: {
4676 /* The semun argument to semctl is passed by value, so dereference the
4677 * ptr argument. */
4678 abi_ulong atptr;
4679 get_user_ual(atptr, ptr);
4680 ret = do_semctl(first, second, third, atptr);
4681 break;
4684 case IPCOP_msgget:
4685 ret = get_errno(msgget(first, second));
4686 break;
4688 case IPCOP_msgsnd:
4689 ret = do_msgsnd(first, ptr, second, third);
4690 break;
4692 case IPCOP_msgctl:
4693 ret = do_msgctl(first, second, ptr);
4694 break;
4696 case IPCOP_msgrcv:
4697 switch (version) {
4698 case 0:
4700 struct target_ipc_kludge {
4701 abi_long msgp;
4702 abi_long msgtyp;
4703 } *tmp;
4705 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4706 ret = -TARGET_EFAULT;
4707 break;
4710 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4712 unlock_user_struct(tmp, ptr, 0);
4713 break;
4715 default:
4716 ret = do_msgrcv(first, ptr, second, fifth, third);
4718 break;
4720 case IPCOP_shmat:
4721 switch (version) {
4722 default:
4724 abi_ulong raddr;
4725 raddr = do_shmat(cpu_env, first, ptr, second);
4726 if (is_error(raddr))
4727 return get_errno(raddr);
4728 if (put_user_ual(raddr, third))
4729 return -TARGET_EFAULT;
4730 break;
4732 case 1:
4733 ret = -TARGET_EINVAL;
4734 break;
4736 break;
4737 case IPCOP_shmdt:
4738 ret = do_shmdt(ptr);
4739 break;
4741 case IPCOP_shmget:
4742 /* IPC_* flag values are the same on all linux platforms */
4743 ret = get_errno(shmget(first, second, third));
4744 break;
4746 /* IPC_* and SHM_* command values are the same on all linux platforms */
4747 case IPCOP_shmctl:
4748 ret = do_shmctl(first, second, ptr);
4749 break;
4750 default:
4751 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4752 call, version);
4753 ret = -TARGET_ENOSYS;
4754 break;
4756 return ret;
4758 #endif
4760 /* kernel structure types definitions */
4762 #define STRUCT(name, ...) STRUCT_ ## name,
4763 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4764 enum {
4765 #include "syscall_types.h"
4766 STRUCT_MAX
4768 #undef STRUCT
4769 #undef STRUCT_SPECIAL
4771 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4772 #define STRUCT_SPECIAL(name)
4773 #include "syscall_types.h"
4774 #undef STRUCT
4775 #undef STRUCT_SPECIAL
4777 #define MAX_STRUCT_SIZE 4096
4779 #ifdef CONFIG_FIEMAP
4780 /* So fiemap access checks don't overflow on 32 bit systems.
4781 * This is very slightly smaller than the limit imposed by
4782 * the underlying kernel.
4784 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4785 / sizeof(struct fiemap_extent))
4787 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4788 int fd, int cmd, abi_long arg)
4790 /* The parameter for this ioctl is a struct fiemap followed
4791 * by an array of struct fiemap_extent whose size is set
4792 * in fiemap->fm_extent_count. The array is filled in by the
4793 * ioctl.
4795 int target_size_in, target_size_out;
4796 struct fiemap *fm;
4797 const argtype *arg_type = ie->arg_type;
4798 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4799 void *argptr, *p;
4800 abi_long ret;
4801 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4802 uint32_t outbufsz;
4803 int free_fm = 0;
4805 assert(arg_type[0] == TYPE_PTR);
4806 assert(ie->access == IOC_RW);
4807 arg_type++;
4808 target_size_in = thunk_type_size(arg_type, 0);
4809 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4810 if (!argptr) {
4811 return -TARGET_EFAULT;
4813 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4814 unlock_user(argptr, arg, 0);
4815 fm = (struct fiemap *)buf_temp;
4816 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4817 return -TARGET_EINVAL;
4820 outbufsz = sizeof (*fm) +
4821 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4823 if (outbufsz > MAX_STRUCT_SIZE) {
4824 /* We can't fit all the extents into the fixed size buffer.
4825 * Allocate one that is large enough and use it instead.
4827 fm = g_try_malloc(outbufsz);
4828 if (!fm) {
4829 return -TARGET_ENOMEM;
4831 memcpy(fm, buf_temp, sizeof(struct fiemap));
4832 free_fm = 1;
4834 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4835 if (!is_error(ret)) {
4836 target_size_out = target_size_in;
4837 /* An extent_count of 0 means we were only counting the extents
4838 * so there are no structs to copy
4840 if (fm->fm_extent_count != 0) {
4841 target_size_out += fm->fm_mapped_extents * extent_size;
4843 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4844 if (!argptr) {
4845 ret = -TARGET_EFAULT;
4846 } else {
4847 /* Convert the struct fiemap */
4848 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4849 if (fm->fm_extent_count != 0) {
4850 p = argptr + target_size_in;
4851 /* ...and then all the struct fiemap_extents */
4852 for (i = 0; i < fm->fm_mapped_extents; i++) {
4853 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4854 THUNK_TARGET);
4855 p += extent_size;
4858 unlock_user(argptr, arg, target_size_out);
4861 if (free_fm) {
4862 g_free(fm);
4864 return ret;
4866 #endif
4868 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4869 int fd, int cmd, abi_long arg)
4871 const argtype *arg_type = ie->arg_type;
4872 int target_size;
4873 void *argptr;
4874 int ret;
4875 struct ifconf *host_ifconf;
4876 uint32_t outbufsz;
4877 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4878 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4879 int target_ifreq_size;
4880 int nb_ifreq;
4881 int free_buf = 0;
4882 int i;
4883 int target_ifc_len;
4884 abi_long target_ifc_buf;
4885 int host_ifc_len;
4886 char *host_ifc_buf;
4888 assert(arg_type[0] == TYPE_PTR);
4889 assert(ie->access == IOC_RW);
4891 arg_type++;
4892 target_size = thunk_type_size(arg_type, 0);
4894 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4895 if (!argptr)
4896 return -TARGET_EFAULT;
4897 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4898 unlock_user(argptr, arg, 0);
4900 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4901 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4902 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4904 if (target_ifc_buf != 0) {
4905 target_ifc_len = host_ifconf->ifc_len;
4906 nb_ifreq = target_ifc_len / target_ifreq_size;
4907 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4909 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4910 if (outbufsz > MAX_STRUCT_SIZE) {
4912 * We can't fit all the extents into the fixed size buffer.
4913 * Allocate one that is large enough and use it instead.
4915 host_ifconf = g_try_malloc(outbufsz);
4916 if (!host_ifconf) {
4917 return -TARGET_ENOMEM;
4919 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4920 free_buf = 1;
4922 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4924 host_ifconf->ifc_len = host_ifc_len;
4925 } else {
4926 host_ifc_buf = NULL;
4928 host_ifconf->ifc_buf = host_ifc_buf;
4930 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4931 if (!is_error(ret)) {
4932 /* convert host ifc_len to target ifc_len */
4934 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4935 target_ifc_len = nb_ifreq * target_ifreq_size;
4936 host_ifconf->ifc_len = target_ifc_len;
4938 /* restore target ifc_buf */
4940 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4942 /* copy struct ifconf to target user */
4944 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4945 if (!argptr)
4946 return -TARGET_EFAULT;
4947 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4948 unlock_user(argptr, arg, target_size);
4950 if (target_ifc_buf != 0) {
4951 /* copy ifreq[] to target user */
4952 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4953 for (i = 0; i < nb_ifreq ; i++) {
4954 thunk_convert(argptr + i * target_ifreq_size,
4955 host_ifc_buf + i * sizeof(struct ifreq),
4956 ifreq_arg_type, THUNK_TARGET);
4958 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4962 if (free_buf) {
4963 g_free(host_ifconf);
4966 return ret;
4969 #if defined(CONFIG_USBFS)
4970 #if HOST_LONG_BITS > 64
4971 #error USBDEVFS thunks do not support >64 bit hosts yet.
4972 #endif
4973 struct live_urb {
4974 uint64_t target_urb_adr;
4975 uint64_t target_buf_adr;
4976 char *target_buf_ptr;
4977 struct usbdevfs_urb host_urb;
4980 static GHashTable *usbdevfs_urb_hashtable(void)
4982 static GHashTable *urb_hashtable;
4984 if (!urb_hashtable) {
4985 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4987 return urb_hashtable;
4990 static void urb_hashtable_insert(struct live_urb *urb)
4992 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4993 g_hash_table_insert(urb_hashtable, urb, urb);
4996 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4998 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4999 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5002 static void urb_hashtable_remove(struct live_urb *urb)
5004 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5005 g_hash_table_remove(urb_hashtable, urb);
5008 static abi_long
5009 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5010 int fd, int cmd, abi_long arg)
5012 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5013 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5014 struct live_urb *lurb;
5015 void *argptr;
5016 uint64_t hurb;
5017 int target_size;
5018 uintptr_t target_urb_adr;
5019 abi_long ret;
5021 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5023 memset(buf_temp, 0, sizeof(uint64_t));
5024 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5025 if (is_error(ret)) {
5026 return ret;
5029 memcpy(&hurb, buf_temp, sizeof(uint64_t));
5030 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5031 if (!lurb->target_urb_adr) {
5032 return -TARGET_EFAULT;
5034 urb_hashtable_remove(lurb);
5035 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5036 lurb->host_urb.buffer_length);
5037 lurb->target_buf_ptr = NULL;
5039 /* restore the guest buffer pointer */
5040 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5042 /* update the guest urb struct */
5043 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5044 if (!argptr) {
5045 g_free(lurb);
5046 return -TARGET_EFAULT;
5048 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5049 unlock_user(argptr, lurb->target_urb_adr, target_size);
5051 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5052 /* write back the urb handle */
5053 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5054 if (!argptr) {
5055 g_free(lurb);
5056 return -TARGET_EFAULT;
5059 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5060 target_urb_adr = lurb->target_urb_adr;
5061 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5062 unlock_user(argptr, arg, target_size);
5064 g_free(lurb);
5065 return ret;
5068 static abi_long
5069 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5070 uint8_t *buf_temp __attribute__((unused)),
5071 int fd, int cmd, abi_long arg)
5073 struct live_urb *lurb;
5075 /* map target address back to host URB with metadata. */
5076 lurb = urb_hashtable_lookup(arg);
5077 if (!lurb) {
5078 return -TARGET_EFAULT;
5080 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5083 static abi_long
5084 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5085 int fd, int cmd, abi_long arg)
5087 const argtype *arg_type = ie->arg_type;
5088 int target_size;
5089 abi_long ret;
5090 void *argptr;
5091 int rw_dir;
5092 struct live_urb *lurb;
5095 * each submitted URB needs to map to a unique ID for the
5096 * kernel, and that unique ID needs to be a pointer to
5097 * host memory. hence, we need to malloc for each URB.
5098 * isochronous transfers have a variable length struct.
5100 arg_type++;
5101 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5103 /* construct host copy of urb and metadata */
5104 lurb = g_try_new0(struct live_urb, 1);
5105 if (!lurb) {
5106 return -TARGET_ENOMEM;
5109 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5110 if (!argptr) {
5111 g_free(lurb);
5112 return -TARGET_EFAULT;
5114 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5115 unlock_user(argptr, arg, 0);
5117 lurb->target_urb_adr = arg;
5118 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5120 /* buffer space used depends on endpoint type so lock the entire buffer */
5121 /* control type urbs should check the buffer contents for true direction */
5122 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5123 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5124 lurb->host_urb.buffer_length, 1);
5125 if (lurb->target_buf_ptr == NULL) {
5126 g_free(lurb);
5127 return -TARGET_EFAULT;
5130 /* update buffer pointer in host copy */
5131 lurb->host_urb.buffer = lurb->target_buf_ptr;
5133 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5134 if (is_error(ret)) {
5135 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5136 g_free(lurb);
5137 } else {
5138 urb_hashtable_insert(lurb);
5141 return ret;
5143 #endif /* CONFIG_USBFS */
5145 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5146 int cmd, abi_long arg)
5148 void *argptr;
5149 struct dm_ioctl *host_dm;
5150 abi_long guest_data;
5151 uint32_t guest_data_size;
5152 int target_size;
5153 const argtype *arg_type = ie->arg_type;
5154 abi_long ret;
5155 void *big_buf = NULL;
5156 char *host_data;
5158 arg_type++;
5159 target_size = thunk_type_size(arg_type, 0);
5160 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5161 if (!argptr) {
5162 ret = -TARGET_EFAULT;
5163 goto out;
5165 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5166 unlock_user(argptr, arg, 0);
5168 /* buf_temp is too small, so fetch things into a bigger buffer */
5169 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5170 memcpy(big_buf, buf_temp, target_size);
5171 buf_temp = big_buf;
5172 host_dm = big_buf;
5174 guest_data = arg + host_dm->data_start;
5175 if ((guest_data - arg) < 0) {
5176 ret = -TARGET_EINVAL;
5177 goto out;
5179 guest_data_size = host_dm->data_size - host_dm->data_start;
5180 host_data = (char*)host_dm + host_dm->data_start;
5182 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5183 if (!argptr) {
5184 ret = -TARGET_EFAULT;
5185 goto out;
5188 switch (ie->host_cmd) {
5189 case DM_REMOVE_ALL:
5190 case DM_LIST_DEVICES:
5191 case DM_DEV_CREATE:
5192 case DM_DEV_REMOVE:
5193 case DM_DEV_SUSPEND:
5194 case DM_DEV_STATUS:
5195 case DM_DEV_WAIT:
5196 case DM_TABLE_STATUS:
5197 case DM_TABLE_CLEAR:
5198 case DM_TABLE_DEPS:
5199 case DM_LIST_VERSIONS:
5200 /* no input data */
5201 break;
5202 case DM_DEV_RENAME:
5203 case DM_DEV_SET_GEOMETRY:
5204 /* data contains only strings */
5205 memcpy(host_data, argptr, guest_data_size);
5206 break;
5207 case DM_TARGET_MSG:
5208 memcpy(host_data, argptr, guest_data_size);
5209 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5210 break;
5211 case DM_TABLE_LOAD:
5213 void *gspec = argptr;
5214 void *cur_data = host_data;
5215 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5216 int spec_size = thunk_type_size(arg_type, 0);
5217 int i;
5219 for (i = 0; i < host_dm->target_count; i++) {
5220 struct dm_target_spec *spec = cur_data;
5221 uint32_t next;
5222 int slen;
5224 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5225 slen = strlen((char*)gspec + spec_size) + 1;
5226 next = spec->next;
5227 spec->next = sizeof(*spec) + slen;
5228 strcpy((char*)&spec[1], gspec + spec_size);
5229 gspec += next;
5230 cur_data += spec->next;
5232 break;
5234 default:
5235 ret = -TARGET_EINVAL;
5236 unlock_user(argptr, guest_data, 0);
5237 goto out;
5239 unlock_user(argptr, guest_data, 0);
5241 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5242 if (!is_error(ret)) {
5243 guest_data = arg + host_dm->data_start;
5244 guest_data_size = host_dm->data_size - host_dm->data_start;
5245 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5246 switch (ie->host_cmd) {
5247 case DM_REMOVE_ALL:
5248 case DM_DEV_CREATE:
5249 case DM_DEV_REMOVE:
5250 case DM_DEV_RENAME:
5251 case DM_DEV_SUSPEND:
5252 case DM_DEV_STATUS:
5253 case DM_TABLE_LOAD:
5254 case DM_TABLE_CLEAR:
5255 case DM_TARGET_MSG:
5256 case DM_DEV_SET_GEOMETRY:
5257 /* no return data */
5258 break;
5259 case DM_LIST_DEVICES:
5261 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5262 uint32_t remaining_data = guest_data_size;
5263 void *cur_data = argptr;
5264 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5265 int nl_size = 12; /* can't use thunk_size due to alignment */
5267 while (1) {
5268 uint32_t next = nl->next;
5269 if (next) {
5270 nl->next = nl_size + (strlen(nl->name) + 1);
5272 if (remaining_data < nl->next) {
5273 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5274 break;
5276 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5277 strcpy(cur_data + nl_size, nl->name);
5278 cur_data += nl->next;
5279 remaining_data -= nl->next;
5280 if (!next) {
5281 break;
5283 nl = (void*)nl + next;
5285 break;
5287 case DM_DEV_WAIT:
5288 case DM_TABLE_STATUS:
5290 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5291 void *cur_data = argptr;
5292 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5293 int spec_size = thunk_type_size(arg_type, 0);
5294 int i;
5296 for (i = 0; i < host_dm->target_count; i++) {
5297 uint32_t next = spec->next;
5298 int slen = strlen((char*)&spec[1]) + 1;
5299 spec->next = (cur_data - argptr) + spec_size + slen;
5300 if (guest_data_size < spec->next) {
5301 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5302 break;
5304 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5305 strcpy(cur_data + spec_size, (char*)&spec[1]);
5306 cur_data = argptr + spec->next;
5307 spec = (void*)host_dm + host_dm->data_start + next;
5309 break;
5311 case DM_TABLE_DEPS:
5313 void *hdata = (void*)host_dm + host_dm->data_start;
5314 int count = *(uint32_t*)hdata;
5315 uint64_t *hdev = hdata + 8;
5316 uint64_t *gdev = argptr + 8;
5317 int i;
5319 *(uint32_t*)argptr = tswap32(count);
5320 for (i = 0; i < count; i++) {
5321 *gdev = tswap64(*hdev);
5322 gdev++;
5323 hdev++;
5325 break;
5327 case DM_LIST_VERSIONS:
5329 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5330 uint32_t remaining_data = guest_data_size;
5331 void *cur_data = argptr;
5332 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5333 int vers_size = thunk_type_size(arg_type, 0);
5335 while (1) {
5336 uint32_t next = vers->next;
5337 if (next) {
5338 vers->next = vers_size + (strlen(vers->name) + 1);
5340 if (remaining_data < vers->next) {
5341 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5342 break;
5344 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5345 strcpy(cur_data + vers_size, vers->name);
5346 cur_data += vers->next;
5347 remaining_data -= vers->next;
5348 if (!next) {
5349 break;
5351 vers = (void*)vers + next;
5353 break;
5355 default:
5356 unlock_user(argptr, guest_data, 0);
5357 ret = -TARGET_EINVAL;
5358 goto out;
5360 unlock_user(argptr, guest_data, guest_data_size);
5362 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5363 if (!argptr) {
5364 ret = -TARGET_EFAULT;
5365 goto out;
5367 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5368 unlock_user(argptr, arg, target_size);
5370 out:
5371 g_free(big_buf);
5372 return ret;
5375 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5376 int cmd, abi_long arg)
5378 void *argptr;
5379 int target_size;
5380 const argtype *arg_type = ie->arg_type;
5381 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5382 abi_long ret;
5384 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5385 struct blkpg_partition host_part;
5387 /* Read and convert blkpg */
5388 arg_type++;
5389 target_size = thunk_type_size(arg_type, 0);
5390 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5391 if (!argptr) {
5392 ret = -TARGET_EFAULT;
5393 goto out;
5395 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5396 unlock_user(argptr, arg, 0);
5398 switch (host_blkpg->op) {
5399 case BLKPG_ADD_PARTITION:
5400 case BLKPG_DEL_PARTITION:
5401 /* payload is struct blkpg_partition */
5402 break;
5403 default:
5404 /* Unknown opcode */
5405 ret = -TARGET_EINVAL;
5406 goto out;
5409 /* Read and convert blkpg->data */
5410 arg = (abi_long)(uintptr_t)host_blkpg->data;
5411 target_size = thunk_type_size(part_arg_type, 0);
5412 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5413 if (!argptr) {
5414 ret = -TARGET_EFAULT;
5415 goto out;
5417 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5418 unlock_user(argptr, arg, 0);
5420 /* Swizzle the data pointer to our local copy and call! */
5421 host_blkpg->data = &host_part;
5422 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5424 out:
5425 return ret;
5428 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5429 int fd, int cmd, abi_long arg)
5431 const argtype *arg_type = ie->arg_type;
5432 const StructEntry *se;
5433 const argtype *field_types;
5434 const int *dst_offsets, *src_offsets;
5435 int target_size;
5436 void *argptr;
5437 abi_ulong *target_rt_dev_ptr = NULL;
5438 unsigned long *host_rt_dev_ptr = NULL;
5439 abi_long ret;
5440 int i;
5442 assert(ie->access == IOC_W);
5443 assert(*arg_type == TYPE_PTR);
5444 arg_type++;
5445 assert(*arg_type == TYPE_STRUCT);
5446 target_size = thunk_type_size(arg_type, 0);
5447 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5448 if (!argptr) {
5449 return -TARGET_EFAULT;
5451 arg_type++;
5452 assert(*arg_type == (int)STRUCT_rtentry);
5453 se = struct_entries + *arg_type++;
5454 assert(se->convert[0] == NULL);
5455 /* convert struct here to be able to catch rt_dev string */
5456 field_types = se->field_types;
5457 dst_offsets = se->field_offsets[THUNK_HOST];
5458 src_offsets = se->field_offsets[THUNK_TARGET];
5459 for (i = 0; i < se->nb_fields; i++) {
5460 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5461 assert(*field_types == TYPE_PTRVOID);
5462 target_rt_dev_ptr = argptr + src_offsets[i];
5463 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5464 if (*target_rt_dev_ptr != 0) {
5465 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5466 tswapal(*target_rt_dev_ptr));
5467 if (!*host_rt_dev_ptr) {
5468 unlock_user(argptr, arg, 0);
5469 return -TARGET_EFAULT;
5471 } else {
5472 *host_rt_dev_ptr = 0;
5474 field_types++;
5475 continue;
5477 field_types = thunk_convert(buf_temp + dst_offsets[i],
5478 argptr + src_offsets[i],
5479 field_types, THUNK_HOST);
5481 unlock_user(argptr, arg, 0);
5483 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5485 assert(host_rt_dev_ptr != NULL);
5486 assert(target_rt_dev_ptr != NULL);
5487 if (*host_rt_dev_ptr != 0) {
5488 unlock_user((void *)*host_rt_dev_ptr,
5489 *target_rt_dev_ptr, 0);
5491 return ret;
5494 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5495 int fd, int cmd, abi_long arg)
5497 int sig = target_to_host_signal(arg);
5498 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5501 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5502 int fd, int cmd, abi_long arg)
5504 struct timeval tv;
5505 abi_long ret;
5507 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5508 if (is_error(ret)) {
5509 return ret;
5512 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5513 if (copy_to_user_timeval(arg, &tv)) {
5514 return -TARGET_EFAULT;
5516 } else {
5517 if (copy_to_user_timeval64(arg, &tv)) {
5518 return -TARGET_EFAULT;
5522 return ret;
5525 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5526 int fd, int cmd, abi_long arg)
5528 struct timespec ts;
5529 abi_long ret;
5531 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5532 if (is_error(ret)) {
5533 return ret;
5536 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5537 if (host_to_target_timespec(arg, &ts)) {
5538 return -TARGET_EFAULT;
5540 } else{
5541 if (host_to_target_timespec64(arg, &ts)) {
5542 return -TARGET_EFAULT;
5546 return ret;
5549 #ifdef TIOCGPTPEER
5550 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5551 int fd, int cmd, abi_long arg)
5553 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5554 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5556 #endif
5558 #ifdef HAVE_DRM_H
5560 static void unlock_drm_version(struct drm_version *host_ver,
5561 struct target_drm_version *target_ver,
5562 bool copy)
5564 unlock_user(host_ver->name, target_ver->name,
5565 copy ? host_ver->name_len : 0);
5566 unlock_user(host_ver->date, target_ver->date,
5567 copy ? host_ver->date_len : 0);
5568 unlock_user(host_ver->desc, target_ver->desc,
5569 copy ? host_ver->desc_len : 0);
5572 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5573 struct target_drm_version *target_ver)
5575 memset(host_ver, 0, sizeof(*host_ver));
5577 __get_user(host_ver->name_len, &target_ver->name_len);
5578 if (host_ver->name_len) {
5579 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5580 target_ver->name_len, 0);
5581 if (!host_ver->name) {
5582 return -EFAULT;
5586 __get_user(host_ver->date_len, &target_ver->date_len);
5587 if (host_ver->date_len) {
5588 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5589 target_ver->date_len, 0);
5590 if (!host_ver->date) {
5591 goto err;
5595 __get_user(host_ver->desc_len, &target_ver->desc_len);
5596 if (host_ver->desc_len) {
5597 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5598 target_ver->desc_len, 0);
5599 if (!host_ver->desc) {
5600 goto err;
5604 return 0;
5605 err:
5606 unlock_drm_version(host_ver, target_ver, false);
5607 return -EFAULT;
5610 static inline void host_to_target_drmversion(
5611 struct target_drm_version *target_ver,
5612 struct drm_version *host_ver)
5614 __put_user(host_ver->version_major, &target_ver->version_major);
5615 __put_user(host_ver->version_minor, &target_ver->version_minor);
5616 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5617 __put_user(host_ver->name_len, &target_ver->name_len);
5618 __put_user(host_ver->date_len, &target_ver->date_len);
5619 __put_user(host_ver->desc_len, &target_ver->desc_len);
5620 unlock_drm_version(host_ver, target_ver, true);
5623 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5624 int fd, int cmd, abi_long arg)
5626 struct drm_version *ver;
5627 struct target_drm_version *target_ver;
5628 abi_long ret;
5630 switch (ie->host_cmd) {
5631 case DRM_IOCTL_VERSION:
5632 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5633 return -TARGET_EFAULT;
5635 ver = (struct drm_version *)buf_temp;
5636 ret = target_to_host_drmversion(ver, target_ver);
5637 if (!is_error(ret)) {
5638 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5639 if (is_error(ret)) {
5640 unlock_drm_version(ver, target_ver, false);
5641 } else {
5642 host_to_target_drmversion(target_ver, ver);
5645 unlock_user_struct(target_ver, arg, 0);
5646 return ret;
5648 return -TARGET_ENOSYS;
5651 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5652 struct drm_i915_getparam *gparam,
5653 int fd, abi_long arg)
5655 abi_long ret;
5656 int value;
5657 struct target_drm_i915_getparam *target_gparam;
5659 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5660 return -TARGET_EFAULT;
5663 __get_user(gparam->param, &target_gparam->param);
5664 gparam->value = &value;
5665 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5666 put_user_s32(value, target_gparam->value);
5668 unlock_user_struct(target_gparam, arg, 0);
5669 return ret;
5672 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5673 int fd, int cmd, abi_long arg)
5675 switch (ie->host_cmd) {
5676 case DRM_IOCTL_I915_GETPARAM:
5677 return do_ioctl_drm_i915_getparam(ie,
5678 (struct drm_i915_getparam *)buf_temp,
5679 fd, arg);
5680 default:
5681 return -TARGET_ENOSYS;
5685 #endif
5687 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5688 int fd, int cmd, abi_long arg)
5690 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5691 struct tun_filter *target_filter;
5692 char *target_addr;
5694 assert(ie->access == IOC_W);
5696 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5697 if (!target_filter) {
5698 return -TARGET_EFAULT;
5700 filter->flags = tswap16(target_filter->flags);
5701 filter->count = tswap16(target_filter->count);
5702 unlock_user(target_filter, arg, 0);
5704 if (filter->count) {
5705 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5706 MAX_STRUCT_SIZE) {
5707 return -TARGET_EFAULT;
5710 target_addr = lock_user(VERIFY_READ,
5711 arg + offsetof(struct tun_filter, addr),
5712 filter->count * ETH_ALEN, 1);
5713 if (!target_addr) {
5714 return -TARGET_EFAULT;
5716 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5717 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5720 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5723 IOCTLEntry ioctl_entries[] = {
5724 #define IOCTL(cmd, access, ...) \
5725 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5726 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5727 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5728 #define IOCTL_IGNORE(cmd) \
5729 { TARGET_ ## cmd, 0, #cmd },
5730 #include "ioctls.h"
5731 { 0, 0, },
5734 /* ??? Implement proper locking for ioctls. */
5735 /* do_ioctl() Must return target values and target errnos. */
5736 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5738 const IOCTLEntry *ie;
5739 const argtype *arg_type;
5740 abi_long ret;
5741 uint8_t buf_temp[MAX_STRUCT_SIZE];
5742 int target_size;
5743 void *argptr;
5745 ie = ioctl_entries;
5746 for(;;) {
5747 if (ie->target_cmd == 0) {
5748 qemu_log_mask(
5749 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5750 return -TARGET_ENOSYS;
5752 if (ie->target_cmd == cmd)
5753 break;
5754 ie++;
5756 arg_type = ie->arg_type;
5757 if (ie->do_ioctl) {
5758 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5759 } else if (!ie->host_cmd) {
5760 /* Some architectures define BSD ioctls in their headers
5761 that are not implemented in Linux. */
5762 return -TARGET_ENOSYS;
5765 switch(arg_type[0]) {
5766 case TYPE_NULL:
5767 /* no argument */
5768 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5769 break;
5770 case TYPE_PTRVOID:
5771 case TYPE_INT:
5772 case TYPE_LONG:
5773 case TYPE_ULONG:
5774 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5775 break;
5776 case TYPE_PTR:
5777 arg_type++;
5778 target_size = thunk_type_size(arg_type, 0);
5779 switch(ie->access) {
5780 case IOC_R:
5781 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5782 if (!is_error(ret)) {
5783 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5784 if (!argptr)
5785 return -TARGET_EFAULT;
5786 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5787 unlock_user(argptr, arg, target_size);
5789 break;
5790 case IOC_W:
5791 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5792 if (!argptr)
5793 return -TARGET_EFAULT;
5794 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5795 unlock_user(argptr, arg, 0);
5796 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5797 break;
5798 default:
5799 case IOC_RW:
5800 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5801 if (!argptr)
5802 return -TARGET_EFAULT;
5803 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5804 unlock_user(argptr, arg, 0);
5805 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5806 if (!is_error(ret)) {
5807 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5808 if (!argptr)
5809 return -TARGET_EFAULT;
5810 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5811 unlock_user(argptr, arg, target_size);
5813 break;
5815 break;
5816 default:
5817 qemu_log_mask(LOG_UNIMP,
5818 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5819 (long)cmd, arg_type[0]);
5820 ret = -TARGET_ENOSYS;
5821 break;
5823 return ret;
5826 static const bitmask_transtbl iflag_tbl[] = {
5827 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5828 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5829 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5830 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5831 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5832 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5833 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5834 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5835 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5836 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5837 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5838 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5839 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5840 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5841 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5842 { 0, 0, 0, 0 }
5845 static const bitmask_transtbl oflag_tbl[] = {
5846 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5847 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5848 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5849 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5850 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5851 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5852 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5853 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5854 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5855 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5856 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5857 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5858 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5859 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5860 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5861 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5862 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5863 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5864 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5865 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5866 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5867 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5868 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5869 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5870 { 0, 0, 0, 0 }
5873 static const bitmask_transtbl cflag_tbl[] = {
5874 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5875 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5876 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5877 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5878 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5879 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5880 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5881 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5882 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5883 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5884 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5885 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5886 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5887 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5888 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5889 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5890 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5891 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5892 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5893 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5894 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5895 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5896 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5897 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5898 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5899 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5900 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5901 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5902 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5903 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5904 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5905 { 0, 0, 0, 0 }
5908 static const bitmask_transtbl lflag_tbl[] = {
5909 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5910 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5911 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5912 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5913 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5914 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5915 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5916 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5917 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5918 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5919 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5920 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5921 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5922 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5923 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5924 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5925 { 0, 0, 0, 0 }
5928 static void target_to_host_termios (void *dst, const void *src)
5930 struct host_termios *host = dst;
5931 const struct target_termios *target = src;
5933 host->c_iflag =
5934 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5935 host->c_oflag =
5936 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5937 host->c_cflag =
5938 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5939 host->c_lflag =
5940 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5941 host->c_line = target->c_line;
5943 memset(host->c_cc, 0, sizeof(host->c_cc));
5944 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5945 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5946 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5947 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5948 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5949 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5950 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5951 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5952 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5953 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5954 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5955 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5956 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5957 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5958 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5959 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5960 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5963 static void host_to_target_termios (void *dst, const void *src)
5965 struct target_termios *target = dst;
5966 const struct host_termios *host = src;
5968 target->c_iflag =
5969 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5970 target->c_oflag =
5971 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5972 target->c_cflag =
5973 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5974 target->c_lflag =
5975 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5976 target->c_line = host->c_line;
5978 memset(target->c_cc, 0, sizeof(target->c_cc));
5979 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5980 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5981 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5982 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5983 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5984 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5985 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5986 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5987 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5988 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5989 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5990 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5991 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5992 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5993 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5994 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5995 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5998 static const StructEntry struct_termios_def = {
5999 .convert = { host_to_target_termios, target_to_host_termios },
6000 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6001 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6002 .print = print_termios,
6005 static const bitmask_transtbl mmap_flags_tbl[] = {
6006 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6007 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6008 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6009 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6010 MAP_ANONYMOUS, MAP_ANONYMOUS },
6011 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6012 MAP_GROWSDOWN, MAP_GROWSDOWN },
6013 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6014 MAP_DENYWRITE, MAP_DENYWRITE },
6015 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6016 MAP_EXECUTABLE, MAP_EXECUTABLE },
6017 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6018 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6019 MAP_NORESERVE, MAP_NORESERVE },
6020 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6021 /* MAP_STACK had been ignored by the kernel for quite some time.
6022 Recognize it for the target insofar as we do not want to pass
6023 it through to the host. */
6024 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6025 { 0, 0, 0, 0 }
6029 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6030 * TARGET_I386 is defined if TARGET_X86_64 is defined
6032 #if defined(TARGET_I386)
6034 /* NOTE: there is really one LDT for all the threads */
6035 static uint8_t *ldt_table;
6037 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6039 int size;
6040 void *p;
6042 if (!ldt_table)
6043 return 0;
6044 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6045 if (size > bytecount)
6046 size = bytecount;
6047 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6048 if (!p)
6049 return -TARGET_EFAULT;
6050 /* ??? Should this by byteswapped? */
6051 memcpy(p, ldt_table, size);
6052 unlock_user(p, ptr, size);
6053 return size;
6056 /* XXX: add locking support */
6057 static abi_long write_ldt(CPUX86State *env,
6058 abi_ulong ptr, unsigned long bytecount, int oldmode)
6060 struct target_modify_ldt_ldt_s ldt_info;
6061 struct target_modify_ldt_ldt_s *target_ldt_info;
6062 int seg_32bit, contents, read_exec_only, limit_in_pages;
6063 int seg_not_present, useable, lm;
6064 uint32_t *lp, entry_1, entry_2;
6066 if (bytecount != sizeof(ldt_info))
6067 return -TARGET_EINVAL;
6068 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6069 return -TARGET_EFAULT;
6070 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6071 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6072 ldt_info.limit = tswap32(target_ldt_info->limit);
6073 ldt_info.flags = tswap32(target_ldt_info->flags);
6074 unlock_user_struct(target_ldt_info, ptr, 0);
6076 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6077 return -TARGET_EINVAL;
6078 seg_32bit = ldt_info.flags & 1;
6079 contents = (ldt_info.flags >> 1) & 3;
6080 read_exec_only = (ldt_info.flags >> 3) & 1;
6081 limit_in_pages = (ldt_info.flags >> 4) & 1;
6082 seg_not_present = (ldt_info.flags >> 5) & 1;
6083 useable = (ldt_info.flags >> 6) & 1;
6084 #ifdef TARGET_ABI32
6085 lm = 0;
6086 #else
6087 lm = (ldt_info.flags >> 7) & 1;
6088 #endif
6089 if (contents == 3) {
6090 if (oldmode)
6091 return -TARGET_EINVAL;
6092 if (seg_not_present == 0)
6093 return -TARGET_EINVAL;
6095 /* allocate the LDT */
6096 if (!ldt_table) {
6097 env->ldt.base = target_mmap(0,
6098 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6099 PROT_READ|PROT_WRITE,
6100 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6101 if (env->ldt.base == -1)
6102 return -TARGET_ENOMEM;
6103 memset(g2h_untagged(env->ldt.base), 0,
6104 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6105 env->ldt.limit = 0xffff;
6106 ldt_table = g2h_untagged(env->ldt.base);
6109 /* NOTE: same code as Linux kernel */
6110 /* Allow LDTs to be cleared by the user. */
6111 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6112 if (oldmode ||
6113 (contents == 0 &&
6114 read_exec_only == 1 &&
6115 seg_32bit == 0 &&
6116 limit_in_pages == 0 &&
6117 seg_not_present == 1 &&
6118 useable == 0 )) {
6119 entry_1 = 0;
6120 entry_2 = 0;
6121 goto install;
6125 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6126 (ldt_info.limit & 0x0ffff);
6127 entry_2 = (ldt_info.base_addr & 0xff000000) |
6128 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6129 (ldt_info.limit & 0xf0000) |
6130 ((read_exec_only ^ 1) << 9) |
6131 (contents << 10) |
6132 ((seg_not_present ^ 1) << 15) |
6133 (seg_32bit << 22) |
6134 (limit_in_pages << 23) |
6135 (lm << 21) |
6136 0x7000;
6137 if (!oldmode)
6138 entry_2 |= (useable << 20);
6140 /* Install the new entry ... */
6141 install:
6142 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6143 lp[0] = tswap32(entry_1);
6144 lp[1] = tswap32(entry_2);
6145 return 0;
6148 /* specific and weird i386 syscalls */
6149 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6150 unsigned long bytecount)
6152 abi_long ret;
6154 switch (func) {
6155 case 0:
6156 ret = read_ldt(ptr, bytecount);
6157 break;
6158 case 1:
6159 ret = write_ldt(env, ptr, bytecount, 1);
6160 break;
6161 case 0x11:
6162 ret = write_ldt(env, ptr, bytecount, 0);
6163 break;
6164 default:
6165 ret = -TARGET_ENOSYS;
6166 break;
6168 return ret;
6171 #if defined(TARGET_ABI32)
6172 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6174 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6175 struct target_modify_ldt_ldt_s ldt_info;
6176 struct target_modify_ldt_ldt_s *target_ldt_info;
6177 int seg_32bit, contents, read_exec_only, limit_in_pages;
6178 int seg_not_present, useable, lm;
6179 uint32_t *lp, entry_1, entry_2;
6180 int i;
6182 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6183 if (!target_ldt_info)
6184 return -TARGET_EFAULT;
6185 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6186 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6187 ldt_info.limit = tswap32(target_ldt_info->limit);
6188 ldt_info.flags = tswap32(target_ldt_info->flags);
6189 if (ldt_info.entry_number == -1) {
6190 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6191 if (gdt_table[i] == 0) {
6192 ldt_info.entry_number = i;
6193 target_ldt_info->entry_number = tswap32(i);
6194 break;
6198 unlock_user_struct(target_ldt_info, ptr, 1);
6200 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6201 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6202 return -TARGET_EINVAL;
6203 seg_32bit = ldt_info.flags & 1;
6204 contents = (ldt_info.flags >> 1) & 3;
6205 read_exec_only = (ldt_info.flags >> 3) & 1;
6206 limit_in_pages = (ldt_info.flags >> 4) & 1;
6207 seg_not_present = (ldt_info.flags >> 5) & 1;
6208 useable = (ldt_info.flags >> 6) & 1;
6209 #ifdef TARGET_ABI32
6210 lm = 0;
6211 #else
6212 lm = (ldt_info.flags >> 7) & 1;
6213 #endif
6215 if (contents == 3) {
6216 if (seg_not_present == 0)
6217 return -TARGET_EINVAL;
6220 /* NOTE: same code as Linux kernel */
6221 /* Allow LDTs to be cleared by the user. */
6222 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6223 if ((contents == 0 &&
6224 read_exec_only == 1 &&
6225 seg_32bit == 0 &&
6226 limit_in_pages == 0 &&
6227 seg_not_present == 1 &&
6228 useable == 0 )) {
6229 entry_1 = 0;
6230 entry_2 = 0;
6231 goto install;
6235 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6236 (ldt_info.limit & 0x0ffff);
6237 entry_2 = (ldt_info.base_addr & 0xff000000) |
6238 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6239 (ldt_info.limit & 0xf0000) |
6240 ((read_exec_only ^ 1) << 9) |
6241 (contents << 10) |
6242 ((seg_not_present ^ 1) << 15) |
6243 (seg_32bit << 22) |
6244 (limit_in_pages << 23) |
6245 (useable << 20) |
6246 (lm << 21) |
6247 0x7000;
6249 /* Install the new entry ... */
6250 install:
6251 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6252 lp[0] = tswap32(entry_1);
6253 lp[1] = tswap32(entry_2);
6254 return 0;
6257 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6259 struct target_modify_ldt_ldt_s *target_ldt_info;
6260 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6261 uint32_t base_addr, limit, flags;
6262 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6263 int seg_not_present, useable, lm;
6264 uint32_t *lp, entry_1, entry_2;
6266 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6267 if (!target_ldt_info)
6268 return -TARGET_EFAULT;
6269 idx = tswap32(target_ldt_info->entry_number);
6270 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6271 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6272 unlock_user_struct(target_ldt_info, ptr, 1);
6273 return -TARGET_EINVAL;
6275 lp = (uint32_t *)(gdt_table + idx);
6276 entry_1 = tswap32(lp[0]);
6277 entry_2 = tswap32(lp[1]);
6279 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6280 contents = (entry_2 >> 10) & 3;
6281 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6282 seg_32bit = (entry_2 >> 22) & 1;
6283 limit_in_pages = (entry_2 >> 23) & 1;
6284 useable = (entry_2 >> 20) & 1;
6285 #ifdef TARGET_ABI32
6286 lm = 0;
6287 #else
6288 lm = (entry_2 >> 21) & 1;
6289 #endif
6290 flags = (seg_32bit << 0) | (contents << 1) |
6291 (read_exec_only << 3) | (limit_in_pages << 4) |
6292 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6293 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6294 base_addr = (entry_1 >> 16) |
6295 (entry_2 & 0xff000000) |
6296 ((entry_2 & 0xff) << 16);
6297 target_ldt_info->base_addr = tswapal(base_addr);
6298 target_ldt_info->limit = tswap32(limit);
6299 target_ldt_info->flags = tswap32(flags);
6300 unlock_user_struct(target_ldt_info, ptr, 1);
6301 return 0;
6304 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6306 return -TARGET_ENOSYS;
6308 #else
6309 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6311 abi_long ret = 0;
6312 abi_ulong val;
6313 int idx;
6315 switch(code) {
6316 case TARGET_ARCH_SET_GS:
6317 case TARGET_ARCH_SET_FS:
6318 if (code == TARGET_ARCH_SET_GS)
6319 idx = R_GS;
6320 else
6321 idx = R_FS;
6322 cpu_x86_load_seg(env, idx, 0);
6323 env->segs[idx].base = addr;
6324 break;
6325 case TARGET_ARCH_GET_GS:
6326 case TARGET_ARCH_GET_FS:
6327 if (code == TARGET_ARCH_GET_GS)
6328 idx = R_GS;
6329 else
6330 idx = R_FS;
6331 val = env->segs[idx].base;
6332 if (put_user(val, addr, abi_ulong))
6333 ret = -TARGET_EFAULT;
6334 break;
6335 default:
6336 ret = -TARGET_EINVAL;
6337 break;
6339 return ret;
6341 #endif /* defined(TARGET_ABI32 */
6342 #endif /* defined(TARGET_I386) */
6345 * These constants are generic. Supply any that are missing from the host.
6347 #ifndef PR_SET_NAME
6348 # define PR_SET_NAME 15
6349 # define PR_GET_NAME 16
6350 #endif
6351 #ifndef PR_SET_FP_MODE
6352 # define PR_SET_FP_MODE 45
6353 # define PR_GET_FP_MODE 46
6354 # define PR_FP_MODE_FR (1 << 0)
6355 # define PR_FP_MODE_FRE (1 << 1)
6356 #endif
6357 #ifndef PR_SVE_SET_VL
6358 # define PR_SVE_SET_VL 50
6359 # define PR_SVE_GET_VL 51
6360 # define PR_SVE_VL_LEN_MASK 0xffff
6361 # define PR_SVE_VL_INHERIT (1 << 17)
6362 #endif
6363 #ifndef PR_PAC_RESET_KEYS
6364 # define PR_PAC_RESET_KEYS 54
6365 # define PR_PAC_APIAKEY (1 << 0)
6366 # define PR_PAC_APIBKEY (1 << 1)
6367 # define PR_PAC_APDAKEY (1 << 2)
6368 # define PR_PAC_APDBKEY (1 << 3)
6369 # define PR_PAC_APGAKEY (1 << 4)
6370 #endif
6371 #ifndef PR_SET_TAGGED_ADDR_CTRL
6372 # define PR_SET_TAGGED_ADDR_CTRL 55
6373 # define PR_GET_TAGGED_ADDR_CTRL 56
6374 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6375 #endif
6376 #ifndef PR_MTE_TCF_SHIFT
6377 # define PR_MTE_TCF_SHIFT 1
6378 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6379 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6380 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6381 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6382 # define PR_MTE_TAG_SHIFT 3
6383 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6384 #endif
6385 #ifndef PR_SET_IO_FLUSHER
6386 # define PR_SET_IO_FLUSHER 57
6387 # define PR_GET_IO_FLUSHER 58
6388 #endif
6389 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6390 # define PR_SET_SYSCALL_USER_DISPATCH 59
6391 #endif
6392 #ifndef PR_SME_SET_VL
6393 # define PR_SME_SET_VL 63
6394 # define PR_SME_GET_VL 64
6395 # define PR_SME_VL_LEN_MASK 0xffff
6396 # define PR_SME_VL_INHERIT (1 << 17)
6397 #endif
6399 #include "target_prctl.h"
6401 static abi_long do_prctl_inval0(CPUArchState *env)
6403 return -TARGET_EINVAL;
6406 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6408 return -TARGET_EINVAL;
6411 #ifndef do_prctl_get_fp_mode
6412 #define do_prctl_get_fp_mode do_prctl_inval0
6413 #endif
6414 #ifndef do_prctl_set_fp_mode
6415 #define do_prctl_set_fp_mode do_prctl_inval1
6416 #endif
6417 #ifndef do_prctl_sve_get_vl
6418 #define do_prctl_sve_get_vl do_prctl_inval0
6419 #endif
6420 #ifndef do_prctl_sve_set_vl
6421 #define do_prctl_sve_set_vl do_prctl_inval1
6422 #endif
6423 #ifndef do_prctl_reset_keys
6424 #define do_prctl_reset_keys do_prctl_inval1
6425 #endif
6426 #ifndef do_prctl_set_tagged_addr_ctrl
6427 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6428 #endif
6429 #ifndef do_prctl_get_tagged_addr_ctrl
6430 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6431 #endif
6432 #ifndef do_prctl_get_unalign
6433 #define do_prctl_get_unalign do_prctl_inval1
6434 #endif
6435 #ifndef do_prctl_set_unalign
6436 #define do_prctl_set_unalign do_prctl_inval1
6437 #endif
6438 #ifndef do_prctl_sme_get_vl
6439 #define do_prctl_sme_get_vl do_prctl_inval0
6440 #endif
6441 #ifndef do_prctl_sme_set_vl
6442 #define do_prctl_sme_set_vl do_prctl_inval1
6443 #endif
6445 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6446 abi_long arg3, abi_long arg4, abi_long arg5)
6448 abi_long ret;
6450 switch (option) {
6451 case PR_GET_PDEATHSIG:
6453 int deathsig;
6454 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6455 arg3, arg4, arg5));
6456 if (!is_error(ret) &&
6457 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6458 return -TARGET_EFAULT;
6460 return ret;
6462 case PR_SET_PDEATHSIG:
6463 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6464 arg3, arg4, arg5));
6465 case PR_GET_NAME:
6467 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6468 if (!name) {
6469 return -TARGET_EFAULT;
6471 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6472 arg3, arg4, arg5));
6473 unlock_user(name, arg2, 16);
6474 return ret;
6476 case PR_SET_NAME:
6478 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6479 if (!name) {
6480 return -TARGET_EFAULT;
6482 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6483 arg3, arg4, arg5));
6484 unlock_user(name, arg2, 0);
6485 return ret;
6487 case PR_GET_FP_MODE:
6488 return do_prctl_get_fp_mode(env);
6489 case PR_SET_FP_MODE:
6490 return do_prctl_set_fp_mode(env, arg2);
6491 case PR_SVE_GET_VL:
6492 return do_prctl_sve_get_vl(env);
6493 case PR_SVE_SET_VL:
6494 return do_prctl_sve_set_vl(env, arg2);
6495 case PR_SME_GET_VL:
6496 return do_prctl_sme_get_vl(env);
6497 case PR_SME_SET_VL:
6498 return do_prctl_sme_set_vl(env, arg2);
6499 case PR_PAC_RESET_KEYS:
6500 if (arg3 || arg4 || arg5) {
6501 return -TARGET_EINVAL;
6503 return do_prctl_reset_keys(env, arg2);
6504 case PR_SET_TAGGED_ADDR_CTRL:
6505 if (arg3 || arg4 || arg5) {
6506 return -TARGET_EINVAL;
6508 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6509 case PR_GET_TAGGED_ADDR_CTRL:
6510 if (arg2 || arg3 || arg4 || arg5) {
6511 return -TARGET_EINVAL;
6513 return do_prctl_get_tagged_addr_ctrl(env);
6515 case PR_GET_UNALIGN:
6516 return do_prctl_get_unalign(env, arg2);
6517 case PR_SET_UNALIGN:
6518 return do_prctl_set_unalign(env, arg2);
6520 case PR_CAP_AMBIENT:
6521 case PR_CAPBSET_READ:
6522 case PR_CAPBSET_DROP:
6523 case PR_GET_DUMPABLE:
6524 case PR_SET_DUMPABLE:
6525 case PR_GET_KEEPCAPS:
6526 case PR_SET_KEEPCAPS:
6527 case PR_GET_SECUREBITS:
6528 case PR_SET_SECUREBITS:
6529 case PR_GET_TIMING:
6530 case PR_SET_TIMING:
6531 case PR_GET_TIMERSLACK:
6532 case PR_SET_TIMERSLACK:
6533 case PR_MCE_KILL:
6534 case PR_MCE_KILL_GET:
6535 case PR_GET_NO_NEW_PRIVS:
6536 case PR_SET_NO_NEW_PRIVS:
6537 case PR_GET_IO_FLUSHER:
6538 case PR_SET_IO_FLUSHER:
6539 /* Some prctl options have no pointer arguments and we can pass on. */
6540 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6542 case PR_GET_CHILD_SUBREAPER:
6543 case PR_SET_CHILD_SUBREAPER:
6544 case PR_GET_SPECULATION_CTRL:
6545 case PR_SET_SPECULATION_CTRL:
6546 case PR_GET_TID_ADDRESS:
6547 /* TODO */
6548 return -TARGET_EINVAL;
6550 case PR_GET_FPEXC:
6551 case PR_SET_FPEXC:
6552 /* Was used for SPE on PowerPC. */
6553 return -TARGET_EINVAL;
6555 case PR_GET_ENDIAN:
6556 case PR_SET_ENDIAN:
6557 case PR_GET_FPEMU:
6558 case PR_SET_FPEMU:
6559 case PR_SET_MM:
6560 case PR_GET_SECCOMP:
6561 case PR_SET_SECCOMP:
6562 case PR_SET_SYSCALL_USER_DISPATCH:
6563 case PR_GET_THP_DISABLE:
6564 case PR_SET_THP_DISABLE:
6565 case PR_GET_TSC:
6566 case PR_SET_TSC:
6567 /* Disable to prevent the target disabling stuff we need. */
6568 return -TARGET_EINVAL;
6570 default:
6571 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6572 option);
6573 return -TARGET_EINVAL;
6577 #define NEW_STACK_SIZE 0x40000
6580 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6581 typedef struct {
6582 CPUArchState *env;
6583 pthread_mutex_t mutex;
6584 pthread_cond_t cond;
6585 pthread_t thread;
6586 uint32_t tid;
6587 abi_ulong child_tidptr;
6588 abi_ulong parent_tidptr;
6589 sigset_t sigmask;
6590 } new_thread_info;
6592 static void * G_NORETURN clone_func(void *arg)
6594 new_thread_info *info = arg;
6595 CPUArchState *env;
6596 CPUState *cpu;
6597 TaskState *ts;
6599 rcu_register_thread();
6600 tcg_register_thread();
6601 env = info->env;
6602 cpu = env_cpu(env);
6603 thread_cpu = cpu;
6604 ts = (TaskState *)cpu->opaque;
6605 info->tid = sys_gettid();
6606 task_settid(ts);
6607 if (info->child_tidptr)
6608 put_user_u32(info->tid, info->child_tidptr);
6609 if (info->parent_tidptr)
6610 put_user_u32(info->tid, info->parent_tidptr);
6611 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6612 /* Enable signals. */
6613 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6614 /* Signal to the parent that we're ready. */
6615 pthread_mutex_lock(&info->mutex);
6616 pthread_cond_broadcast(&info->cond);
6617 pthread_mutex_unlock(&info->mutex);
6618 /* Wait until the parent has finished initializing the tls state. */
6619 pthread_mutex_lock(&clone_lock);
6620 pthread_mutex_unlock(&clone_lock);
6621 cpu_loop(env);
6622 /* never exits */
6625 /* do_fork() Must return host values and target errnos (unlike most
6626 do_*() functions). */
6627 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6628 abi_ulong parent_tidptr, target_ulong newtls,
6629 abi_ulong child_tidptr)
6631 CPUState *cpu = env_cpu(env);
6632 int ret;
6633 TaskState *ts;
6634 CPUState *new_cpu;
6635 CPUArchState *new_env;
6636 sigset_t sigmask;
6638 flags &= ~CLONE_IGNORED_FLAGS;
6640 /* Emulate vfork() with fork() */
6641 if (flags & CLONE_VFORK)
6642 flags &= ~(CLONE_VFORK | CLONE_VM);
6644 if (flags & CLONE_VM) {
6645 TaskState *parent_ts = (TaskState *)cpu->opaque;
6646 new_thread_info info;
6647 pthread_attr_t attr;
6649 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6650 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6651 return -TARGET_EINVAL;
6654 ts = g_new0(TaskState, 1);
6655 init_task_state(ts);
6657 /* Grab a mutex so that thread setup appears atomic. */
6658 pthread_mutex_lock(&clone_lock);
6661 * If this is our first additional thread, we need to ensure we
6662 * generate code for parallel execution and flush old translations.
6663 * Do this now so that the copy gets CF_PARALLEL too.
6665 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6666 cpu->tcg_cflags |= CF_PARALLEL;
6667 tb_flush(cpu);
6670 /* we create a new CPU instance. */
6671 new_env = cpu_copy(env);
6672 /* Init regs that differ from the parent. */
6673 cpu_clone_regs_child(new_env, newsp, flags);
6674 cpu_clone_regs_parent(env, flags);
6675 new_cpu = env_cpu(new_env);
6676 new_cpu->opaque = ts;
6677 ts->bprm = parent_ts->bprm;
6678 ts->info = parent_ts->info;
6679 ts->signal_mask = parent_ts->signal_mask;
6681 if (flags & CLONE_CHILD_CLEARTID) {
6682 ts->child_tidptr = child_tidptr;
6685 if (flags & CLONE_SETTLS) {
6686 cpu_set_tls (new_env, newtls);
6689 memset(&info, 0, sizeof(info));
6690 pthread_mutex_init(&info.mutex, NULL);
6691 pthread_mutex_lock(&info.mutex);
6692 pthread_cond_init(&info.cond, NULL);
6693 info.env = new_env;
6694 if (flags & CLONE_CHILD_SETTID) {
6695 info.child_tidptr = child_tidptr;
6697 if (flags & CLONE_PARENT_SETTID) {
6698 info.parent_tidptr = parent_tidptr;
6701 ret = pthread_attr_init(&attr);
6702 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6703 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6704 /* It is not safe to deliver signals until the child has finished
6705 initializing, so temporarily block all signals. */
6706 sigfillset(&sigmask);
6707 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6708 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6710 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6711 /* TODO: Free new CPU state if thread creation failed. */
6713 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6714 pthread_attr_destroy(&attr);
6715 if (ret == 0) {
6716 /* Wait for the child to initialize. */
6717 pthread_cond_wait(&info.cond, &info.mutex);
6718 ret = info.tid;
6719 } else {
6720 ret = -1;
6722 pthread_mutex_unlock(&info.mutex);
6723 pthread_cond_destroy(&info.cond);
6724 pthread_mutex_destroy(&info.mutex);
6725 pthread_mutex_unlock(&clone_lock);
6726 } else {
6727 /* if no CLONE_VM, we consider it is a fork */
6728 if (flags & CLONE_INVALID_FORK_FLAGS) {
6729 return -TARGET_EINVAL;
6732 /* We can't support custom termination signals */
6733 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6734 return -TARGET_EINVAL;
6737 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6738 if (flags & CLONE_PIDFD) {
6739 return -TARGET_EINVAL;
6741 #endif
6743 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6744 if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6745 return -TARGET_EINVAL;
6748 if (block_signals()) {
6749 return -QEMU_ERESTARTSYS;
6752 fork_start();
6753 ret = fork();
6754 if (ret == 0) {
6755 /* Child Process. */
6756 cpu_clone_regs_child(env, newsp, flags);
6757 fork_end(1);
6758 /* There is a race condition here. The parent process could
6759 theoretically read the TID in the child process before the child
6760 tid is set. This would require using either ptrace
6761 (not implemented) or having *_tidptr to point at a shared memory
6762 mapping. We can't repeat the spinlock hack used above because
6763 the child process gets its own copy of the lock. */
6764 if (flags & CLONE_CHILD_SETTID)
6765 put_user_u32(sys_gettid(), child_tidptr);
6766 if (flags & CLONE_PARENT_SETTID)
6767 put_user_u32(sys_gettid(), parent_tidptr);
6768 ts = (TaskState *)cpu->opaque;
6769 if (flags & CLONE_SETTLS)
6770 cpu_set_tls (env, newtls);
6771 if (flags & CLONE_CHILD_CLEARTID)
6772 ts->child_tidptr = child_tidptr;
6773 } else {
6774 cpu_clone_regs_parent(env, flags);
6775 if (flags & CLONE_PIDFD) {
6776 int pid_fd = 0;
6777 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6778 int pid_child = ret;
6779 pid_fd = pidfd_open(pid_child, 0);
6780 if (pid_fd >= 0) {
6781 fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6782 | FD_CLOEXEC);
6783 } else {
6784 pid_fd = 0;
6786 #endif
6787 put_user_u32(pid_fd, parent_tidptr);
6789 fork_end(0);
6791 g_assert(!cpu_in_exclusive_context(cpu));
6793 return ret;
6796 /* warning : doesn't handle linux specific flags... */
6797 static int target_to_host_fcntl_cmd(int cmd)
6799 int ret;
6801 switch(cmd) {
6802 case TARGET_F_DUPFD:
6803 case TARGET_F_GETFD:
6804 case TARGET_F_SETFD:
6805 case TARGET_F_GETFL:
6806 case TARGET_F_SETFL:
6807 case TARGET_F_OFD_GETLK:
6808 case TARGET_F_OFD_SETLK:
6809 case TARGET_F_OFD_SETLKW:
6810 ret = cmd;
6811 break;
6812 case TARGET_F_GETLK:
6813 ret = F_GETLK64;
6814 break;
6815 case TARGET_F_SETLK:
6816 ret = F_SETLK64;
6817 break;
6818 case TARGET_F_SETLKW:
6819 ret = F_SETLKW64;
6820 break;
6821 case TARGET_F_GETOWN:
6822 ret = F_GETOWN;
6823 break;
6824 case TARGET_F_SETOWN:
6825 ret = F_SETOWN;
6826 break;
6827 case TARGET_F_GETSIG:
6828 ret = F_GETSIG;
6829 break;
6830 case TARGET_F_SETSIG:
6831 ret = F_SETSIG;
6832 break;
6833 #if TARGET_ABI_BITS == 32
6834 case TARGET_F_GETLK64:
6835 ret = F_GETLK64;
6836 break;
6837 case TARGET_F_SETLK64:
6838 ret = F_SETLK64;
6839 break;
6840 case TARGET_F_SETLKW64:
6841 ret = F_SETLKW64;
6842 break;
6843 #endif
6844 case TARGET_F_SETLEASE:
6845 ret = F_SETLEASE;
6846 break;
6847 case TARGET_F_GETLEASE:
6848 ret = F_GETLEASE;
6849 break;
6850 #ifdef F_DUPFD_CLOEXEC
6851 case TARGET_F_DUPFD_CLOEXEC:
6852 ret = F_DUPFD_CLOEXEC;
6853 break;
6854 #endif
6855 case TARGET_F_NOTIFY:
6856 ret = F_NOTIFY;
6857 break;
6858 #ifdef F_GETOWN_EX
6859 case TARGET_F_GETOWN_EX:
6860 ret = F_GETOWN_EX;
6861 break;
6862 #endif
6863 #ifdef F_SETOWN_EX
6864 case TARGET_F_SETOWN_EX:
6865 ret = F_SETOWN_EX;
6866 break;
6867 #endif
6868 #ifdef F_SETPIPE_SZ
6869 case TARGET_F_SETPIPE_SZ:
6870 ret = F_SETPIPE_SZ;
6871 break;
6872 case TARGET_F_GETPIPE_SZ:
6873 ret = F_GETPIPE_SZ;
6874 break;
6875 #endif
6876 #ifdef F_ADD_SEALS
6877 case TARGET_F_ADD_SEALS:
6878 ret = F_ADD_SEALS;
6879 break;
6880 case TARGET_F_GET_SEALS:
6881 ret = F_GET_SEALS;
6882 break;
6883 #endif
6884 default:
6885 ret = -TARGET_EINVAL;
6886 break;
6889 #if defined(__powerpc64__)
6890 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6891 * is not supported by kernel. The glibc fcntl call actually adjusts
6892 * them to 5, 6 and 7 before making the syscall(). Since we make the
6893 * syscall directly, adjust to what is supported by the kernel.
6895 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6896 ret -= F_GETLK64 - 5;
6898 #endif
6900 return ret;
6903 #define FLOCK_TRANSTBL \
6904 switch (type) { \
6905 TRANSTBL_CONVERT(F_RDLCK); \
6906 TRANSTBL_CONVERT(F_WRLCK); \
6907 TRANSTBL_CONVERT(F_UNLCK); \
6910 static int target_to_host_flock(int type)
6912 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6913 FLOCK_TRANSTBL
6914 #undef TRANSTBL_CONVERT
6915 return -TARGET_EINVAL;
6918 static int host_to_target_flock(int type)
6920 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6921 FLOCK_TRANSTBL
6922 #undef TRANSTBL_CONVERT
6923 /* if we don't know how to convert the value coming
6924 * from the host we copy to the target field as-is
6926 return type;
6929 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6930 abi_ulong target_flock_addr)
6932 struct target_flock *target_fl;
6933 int l_type;
6935 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6936 return -TARGET_EFAULT;
6939 __get_user(l_type, &target_fl->l_type);
6940 l_type = target_to_host_flock(l_type);
6941 if (l_type < 0) {
6942 return l_type;
6944 fl->l_type = l_type;
6945 __get_user(fl->l_whence, &target_fl->l_whence);
6946 __get_user(fl->l_start, &target_fl->l_start);
6947 __get_user(fl->l_len, &target_fl->l_len);
6948 __get_user(fl->l_pid, &target_fl->l_pid);
6949 unlock_user_struct(target_fl, target_flock_addr, 0);
6950 return 0;
6953 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6954 const struct flock64 *fl)
6956 struct target_flock *target_fl;
6957 short l_type;
6959 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6960 return -TARGET_EFAULT;
6963 l_type = host_to_target_flock(fl->l_type);
6964 __put_user(l_type, &target_fl->l_type);
6965 __put_user(fl->l_whence, &target_fl->l_whence);
6966 __put_user(fl->l_start, &target_fl->l_start);
6967 __put_user(fl->l_len, &target_fl->l_len);
6968 __put_user(fl->l_pid, &target_fl->l_pid);
6969 unlock_user_struct(target_fl, target_flock_addr, 1);
6970 return 0;
6973 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6974 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6976 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6977 struct target_oabi_flock64 {
6978 abi_short l_type;
6979 abi_short l_whence;
6980 abi_llong l_start;
6981 abi_llong l_len;
6982 abi_int l_pid;
6983 } QEMU_PACKED;
6985 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6986 abi_ulong target_flock_addr)
6988 struct target_oabi_flock64 *target_fl;
6989 int l_type;
6991 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6992 return -TARGET_EFAULT;
6995 __get_user(l_type, &target_fl->l_type);
6996 l_type = target_to_host_flock(l_type);
6997 if (l_type < 0) {
6998 return l_type;
7000 fl->l_type = l_type;
7001 __get_user(fl->l_whence, &target_fl->l_whence);
7002 __get_user(fl->l_start, &target_fl->l_start);
7003 __get_user(fl->l_len, &target_fl->l_len);
7004 __get_user(fl->l_pid, &target_fl->l_pid);
7005 unlock_user_struct(target_fl, target_flock_addr, 0);
7006 return 0;
7009 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7010 const struct flock64 *fl)
7012 struct target_oabi_flock64 *target_fl;
7013 short l_type;
7015 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7016 return -TARGET_EFAULT;
7019 l_type = host_to_target_flock(fl->l_type);
7020 __put_user(l_type, &target_fl->l_type);
7021 __put_user(fl->l_whence, &target_fl->l_whence);
7022 __put_user(fl->l_start, &target_fl->l_start);
7023 __put_user(fl->l_len, &target_fl->l_len);
7024 __put_user(fl->l_pid, &target_fl->l_pid);
7025 unlock_user_struct(target_fl, target_flock_addr, 1);
7026 return 0;
7028 #endif
7030 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7031 abi_ulong target_flock_addr)
7033 struct target_flock64 *target_fl;
7034 int l_type;
7036 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7037 return -TARGET_EFAULT;
7040 __get_user(l_type, &target_fl->l_type);
7041 l_type = target_to_host_flock(l_type);
7042 if (l_type < 0) {
7043 return l_type;
7045 fl->l_type = l_type;
7046 __get_user(fl->l_whence, &target_fl->l_whence);
7047 __get_user(fl->l_start, &target_fl->l_start);
7048 __get_user(fl->l_len, &target_fl->l_len);
7049 __get_user(fl->l_pid, &target_fl->l_pid);
7050 unlock_user_struct(target_fl, target_flock_addr, 0);
7051 return 0;
7054 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7055 const struct flock64 *fl)
7057 struct target_flock64 *target_fl;
7058 short l_type;
7060 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7061 return -TARGET_EFAULT;
7064 l_type = host_to_target_flock(fl->l_type);
7065 __put_user(l_type, &target_fl->l_type);
7066 __put_user(fl->l_whence, &target_fl->l_whence);
7067 __put_user(fl->l_start, &target_fl->l_start);
7068 __put_user(fl->l_len, &target_fl->l_len);
7069 __put_user(fl->l_pid, &target_fl->l_pid);
7070 unlock_user_struct(target_fl, target_flock_addr, 1);
7071 return 0;
7074 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7076 struct flock64 fl64;
7077 #ifdef F_GETOWN_EX
7078 struct f_owner_ex fox;
7079 struct target_f_owner_ex *target_fox;
7080 #endif
7081 abi_long ret;
7082 int host_cmd = target_to_host_fcntl_cmd(cmd);
7084 if (host_cmd == -TARGET_EINVAL)
7085 return host_cmd;
7087 switch(cmd) {
7088 case TARGET_F_GETLK:
7089 ret = copy_from_user_flock(&fl64, arg);
7090 if (ret) {
7091 return ret;
7093 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7094 if (ret == 0) {
7095 ret = copy_to_user_flock(arg, &fl64);
7097 break;
7099 case TARGET_F_SETLK:
7100 case TARGET_F_SETLKW:
7101 ret = copy_from_user_flock(&fl64, arg);
7102 if (ret) {
7103 return ret;
7105 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7106 break;
7108 case TARGET_F_GETLK64:
7109 case TARGET_F_OFD_GETLK:
7110 ret = copy_from_user_flock64(&fl64, arg);
7111 if (ret) {
7112 return ret;
7114 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7115 if (ret == 0) {
7116 ret = copy_to_user_flock64(arg, &fl64);
7118 break;
7119 case TARGET_F_SETLK64:
7120 case TARGET_F_SETLKW64:
7121 case TARGET_F_OFD_SETLK:
7122 case TARGET_F_OFD_SETLKW:
7123 ret = copy_from_user_flock64(&fl64, arg);
7124 if (ret) {
7125 return ret;
7127 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7128 break;
7130 case TARGET_F_GETFL:
7131 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7132 if (ret >= 0) {
7133 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7135 break;
7137 case TARGET_F_SETFL:
7138 ret = get_errno(safe_fcntl(fd, host_cmd,
7139 target_to_host_bitmask(arg,
7140 fcntl_flags_tbl)));
7141 break;
7143 #ifdef F_GETOWN_EX
7144 case TARGET_F_GETOWN_EX:
7145 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7146 if (ret >= 0) {
7147 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7148 return -TARGET_EFAULT;
7149 target_fox->type = tswap32(fox.type);
7150 target_fox->pid = tswap32(fox.pid);
7151 unlock_user_struct(target_fox, arg, 1);
7153 break;
7154 #endif
7156 #ifdef F_SETOWN_EX
7157 case TARGET_F_SETOWN_EX:
7158 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7159 return -TARGET_EFAULT;
7160 fox.type = tswap32(target_fox->type);
7161 fox.pid = tswap32(target_fox->pid);
7162 unlock_user_struct(target_fox, arg, 0);
7163 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7164 break;
7165 #endif
7167 case TARGET_F_SETSIG:
7168 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7169 break;
7171 case TARGET_F_GETSIG:
7172 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7173 break;
7175 case TARGET_F_SETOWN:
7176 case TARGET_F_GETOWN:
7177 case TARGET_F_SETLEASE:
7178 case TARGET_F_GETLEASE:
7179 case TARGET_F_SETPIPE_SZ:
7180 case TARGET_F_GETPIPE_SZ:
7181 case TARGET_F_ADD_SEALS:
7182 case TARGET_F_GET_SEALS:
7183 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7184 break;
7186 default:
7187 ret = get_errno(safe_fcntl(fd, cmd, arg));
7188 break;
7190 return ret;
7193 #ifdef USE_UID16
7195 static inline int high2lowuid(int uid)
7197 if (uid > 65535)
7198 return 65534;
7199 else
7200 return uid;
7203 static inline int high2lowgid(int gid)
7205 if (gid > 65535)
7206 return 65534;
7207 else
7208 return gid;
7211 static inline int low2highuid(int uid)
7213 if ((int16_t)uid == -1)
7214 return -1;
7215 else
7216 return uid;
7219 static inline int low2highgid(int gid)
7221 if ((int16_t)gid == -1)
7222 return -1;
7223 else
7224 return gid;
7226 static inline int tswapid(int id)
7228 return tswap16(id);
7231 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7233 #else /* !USE_UID16 */
7234 static inline int high2lowuid(int uid)
7236 return uid;
7238 static inline int high2lowgid(int gid)
7240 return gid;
7242 static inline int low2highuid(int uid)
7244 return uid;
7246 static inline int low2highgid(int gid)
7248 return gid;
7250 static inline int tswapid(int id)
7252 return tswap32(id);
7255 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7257 #endif /* USE_UID16 */
7259 /* We must do direct syscalls for setting UID/GID, because we want to
7260 * implement the Linux system call semantics of "change only for this thread",
7261 * not the libc/POSIX semantics of "change for all threads in process".
7262 * (See http://ewontfix.com/17/ for more details.)
7263 * We use the 32-bit version of the syscalls if present; if it is not
7264 * then either the host architecture supports 32-bit UIDs natively with
7265 * the standard syscall, or the 16-bit UID is the best we can do.
7267 #ifdef __NR_setuid32
7268 #define __NR_sys_setuid __NR_setuid32
7269 #else
7270 #define __NR_sys_setuid __NR_setuid
7271 #endif
7272 #ifdef __NR_setgid32
7273 #define __NR_sys_setgid __NR_setgid32
7274 #else
7275 #define __NR_sys_setgid __NR_setgid
7276 #endif
7277 #ifdef __NR_setresuid32
7278 #define __NR_sys_setresuid __NR_setresuid32
7279 #else
7280 #define __NR_sys_setresuid __NR_setresuid
7281 #endif
7282 #ifdef __NR_setresgid32
7283 #define __NR_sys_setresgid __NR_setresgid32
7284 #else
7285 #define __NR_sys_setresgid __NR_setresgid
7286 #endif
7288 _syscall1(int, sys_setuid, uid_t, uid)
7289 _syscall1(int, sys_setgid, gid_t, gid)
7290 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7291 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7293 void syscall_init(void)
7295 IOCTLEntry *ie;
7296 const argtype *arg_type;
7297 int size;
7299 thunk_init(STRUCT_MAX);
7301 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7302 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7303 #include "syscall_types.h"
7304 #undef STRUCT
7305 #undef STRUCT_SPECIAL
7307 /* we patch the ioctl size if necessary. We rely on the fact that
7308 no ioctl has all the bits at '1' in the size field */
7309 ie = ioctl_entries;
7310 while (ie->target_cmd != 0) {
7311 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7312 TARGET_IOC_SIZEMASK) {
7313 arg_type = ie->arg_type;
7314 if (arg_type[0] != TYPE_PTR) {
7315 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7316 ie->target_cmd);
7317 exit(1);
7319 arg_type++;
7320 size = thunk_type_size(arg_type, 0);
7321 ie->target_cmd = (ie->target_cmd &
7322 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7323 (size << TARGET_IOC_SIZESHIFT);
7326 /* automatic consistency check if same arch */
7327 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7328 (defined(__x86_64__) && defined(TARGET_X86_64))
7329 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7330 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7331 ie->name, ie->target_cmd, ie->host_cmd);
7333 #endif
7334 ie++;
7338 #ifdef TARGET_NR_truncate64
7339 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7340 abi_long arg2,
7341 abi_long arg3,
7342 abi_long arg4)
7344 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7345 arg2 = arg3;
7346 arg3 = arg4;
7348 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7350 #endif
7352 #ifdef TARGET_NR_ftruncate64
7353 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7354 abi_long arg2,
7355 abi_long arg3,
7356 abi_long arg4)
7358 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7359 arg2 = arg3;
7360 arg3 = arg4;
7362 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7364 #endif
7366 #if defined(TARGET_NR_timer_settime) || \
7367 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7368 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7369 abi_ulong target_addr)
7371 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7372 offsetof(struct target_itimerspec,
7373 it_interval)) ||
7374 target_to_host_timespec(&host_its->it_value, target_addr +
7375 offsetof(struct target_itimerspec,
7376 it_value))) {
7377 return -TARGET_EFAULT;
7380 return 0;
7382 #endif
7384 #if defined(TARGET_NR_timer_settime64) || \
7385 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7386 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7387 abi_ulong target_addr)
7389 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7390 offsetof(struct target__kernel_itimerspec,
7391 it_interval)) ||
7392 target_to_host_timespec64(&host_its->it_value, target_addr +
7393 offsetof(struct target__kernel_itimerspec,
7394 it_value))) {
7395 return -TARGET_EFAULT;
7398 return 0;
7400 #endif
7402 #if ((defined(TARGET_NR_timerfd_gettime) || \
7403 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7404 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7405 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7406 struct itimerspec *host_its)
7408 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7409 it_interval),
7410 &host_its->it_interval) ||
7411 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7412 it_value),
7413 &host_its->it_value)) {
7414 return -TARGET_EFAULT;
7416 return 0;
7418 #endif
7420 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7421 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7422 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7423 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7424 struct itimerspec *host_its)
7426 if (host_to_target_timespec64(target_addr +
7427 offsetof(struct target__kernel_itimerspec,
7428 it_interval),
7429 &host_its->it_interval) ||
7430 host_to_target_timespec64(target_addr +
7431 offsetof(struct target__kernel_itimerspec,
7432 it_value),
7433 &host_its->it_value)) {
7434 return -TARGET_EFAULT;
7436 return 0;
7438 #endif
7440 #if defined(TARGET_NR_adjtimex) || \
7441 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7442 static inline abi_long target_to_host_timex(struct timex *host_tx,
7443 abi_long target_addr)
7445 struct target_timex *target_tx;
7447 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7448 return -TARGET_EFAULT;
7451 __get_user(host_tx->modes, &target_tx->modes);
7452 __get_user(host_tx->offset, &target_tx->offset);
7453 __get_user(host_tx->freq, &target_tx->freq);
7454 __get_user(host_tx->maxerror, &target_tx->maxerror);
7455 __get_user(host_tx->esterror, &target_tx->esterror);
7456 __get_user(host_tx->status, &target_tx->status);
7457 __get_user(host_tx->constant, &target_tx->constant);
7458 __get_user(host_tx->precision, &target_tx->precision);
7459 __get_user(host_tx->tolerance, &target_tx->tolerance);
7460 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7461 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7462 __get_user(host_tx->tick, &target_tx->tick);
7463 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7464 __get_user(host_tx->jitter, &target_tx->jitter);
7465 __get_user(host_tx->shift, &target_tx->shift);
7466 __get_user(host_tx->stabil, &target_tx->stabil);
7467 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7468 __get_user(host_tx->calcnt, &target_tx->calcnt);
7469 __get_user(host_tx->errcnt, &target_tx->errcnt);
7470 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7471 __get_user(host_tx->tai, &target_tx->tai);
7473 unlock_user_struct(target_tx, target_addr, 0);
7474 return 0;
7477 static inline abi_long host_to_target_timex(abi_long target_addr,
7478 struct timex *host_tx)
7480 struct target_timex *target_tx;
7482 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7483 return -TARGET_EFAULT;
7486 __put_user(host_tx->modes, &target_tx->modes);
7487 __put_user(host_tx->offset, &target_tx->offset);
7488 __put_user(host_tx->freq, &target_tx->freq);
7489 __put_user(host_tx->maxerror, &target_tx->maxerror);
7490 __put_user(host_tx->esterror, &target_tx->esterror);
7491 __put_user(host_tx->status, &target_tx->status);
7492 __put_user(host_tx->constant, &target_tx->constant);
7493 __put_user(host_tx->precision, &target_tx->precision);
7494 __put_user(host_tx->tolerance, &target_tx->tolerance);
7495 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7496 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7497 __put_user(host_tx->tick, &target_tx->tick);
7498 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7499 __put_user(host_tx->jitter, &target_tx->jitter);
7500 __put_user(host_tx->shift, &target_tx->shift);
7501 __put_user(host_tx->stabil, &target_tx->stabil);
7502 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7503 __put_user(host_tx->calcnt, &target_tx->calcnt);
7504 __put_user(host_tx->errcnt, &target_tx->errcnt);
7505 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7506 __put_user(host_tx->tai, &target_tx->tai);
7508 unlock_user_struct(target_tx, target_addr, 1);
7509 return 0;
7511 #endif
7514 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7515 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7516 abi_long target_addr)
7518 struct target__kernel_timex *target_tx;
7520 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7521 offsetof(struct target__kernel_timex,
7522 time))) {
7523 return -TARGET_EFAULT;
7526 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7527 return -TARGET_EFAULT;
7530 __get_user(host_tx->modes, &target_tx->modes);
7531 __get_user(host_tx->offset, &target_tx->offset);
7532 __get_user(host_tx->freq, &target_tx->freq);
7533 __get_user(host_tx->maxerror, &target_tx->maxerror);
7534 __get_user(host_tx->esterror, &target_tx->esterror);
7535 __get_user(host_tx->status, &target_tx->status);
7536 __get_user(host_tx->constant, &target_tx->constant);
7537 __get_user(host_tx->precision, &target_tx->precision);
7538 __get_user(host_tx->tolerance, &target_tx->tolerance);
7539 __get_user(host_tx->tick, &target_tx->tick);
7540 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7541 __get_user(host_tx->jitter, &target_tx->jitter);
7542 __get_user(host_tx->shift, &target_tx->shift);
7543 __get_user(host_tx->stabil, &target_tx->stabil);
7544 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7545 __get_user(host_tx->calcnt, &target_tx->calcnt);
7546 __get_user(host_tx->errcnt, &target_tx->errcnt);
7547 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7548 __get_user(host_tx->tai, &target_tx->tai);
7550 unlock_user_struct(target_tx, target_addr, 0);
7551 return 0;
7554 static inline abi_long host_to_target_timex64(abi_long target_addr,
7555 struct timex *host_tx)
7557 struct target__kernel_timex *target_tx;
7559 if (copy_to_user_timeval64(target_addr +
7560 offsetof(struct target__kernel_timex, time),
7561 &host_tx->time)) {
7562 return -TARGET_EFAULT;
7565 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7566 return -TARGET_EFAULT;
7569 __put_user(host_tx->modes, &target_tx->modes);
7570 __put_user(host_tx->offset, &target_tx->offset);
7571 __put_user(host_tx->freq, &target_tx->freq);
7572 __put_user(host_tx->maxerror, &target_tx->maxerror);
7573 __put_user(host_tx->esterror, &target_tx->esterror);
7574 __put_user(host_tx->status, &target_tx->status);
7575 __put_user(host_tx->constant, &target_tx->constant);
7576 __put_user(host_tx->precision, &target_tx->precision);
7577 __put_user(host_tx->tolerance, &target_tx->tolerance);
7578 __put_user(host_tx->tick, &target_tx->tick);
7579 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7580 __put_user(host_tx->jitter, &target_tx->jitter);
7581 __put_user(host_tx->shift, &target_tx->shift);
7582 __put_user(host_tx->stabil, &target_tx->stabil);
7583 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7584 __put_user(host_tx->calcnt, &target_tx->calcnt);
7585 __put_user(host_tx->errcnt, &target_tx->errcnt);
7586 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7587 __put_user(host_tx->tai, &target_tx->tai);
7589 unlock_user_struct(target_tx, target_addr, 1);
7590 return 0;
7592 #endif
7594 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7595 #define sigev_notify_thread_id _sigev_un._tid
7596 #endif
7598 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7599 abi_ulong target_addr)
7601 struct target_sigevent *target_sevp;
7603 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7604 return -TARGET_EFAULT;
7607 /* This union is awkward on 64 bit systems because it has a 32 bit
7608 * integer and a pointer in it; we follow the conversion approach
7609 * used for handling sigval types in signal.c so the guest should get
7610 * the correct value back even if we did a 64 bit byteswap and it's
7611 * using the 32 bit integer.
7613 host_sevp->sigev_value.sival_ptr =
7614 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7615 host_sevp->sigev_signo =
7616 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7617 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7618 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7620 unlock_user_struct(target_sevp, target_addr, 1);
7621 return 0;
7624 #if defined(TARGET_NR_mlockall)
7625 static inline int target_to_host_mlockall_arg(int arg)
7627 int result = 0;
7629 if (arg & TARGET_MCL_CURRENT) {
7630 result |= MCL_CURRENT;
7632 if (arg & TARGET_MCL_FUTURE) {
7633 result |= MCL_FUTURE;
7635 #ifdef MCL_ONFAULT
7636 if (arg & TARGET_MCL_ONFAULT) {
7637 result |= MCL_ONFAULT;
7639 #endif
7641 return result;
7643 #endif
7645 static inline int target_to_host_msync_arg(abi_long arg)
7647 return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7648 ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7649 ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7650 (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7653 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7654 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7655 defined(TARGET_NR_newfstatat))
7656 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7657 abi_ulong target_addr,
7658 struct stat *host_st)
7660 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7661 if (cpu_env->eabi) {
7662 struct target_eabi_stat64 *target_st;
7664 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7665 return -TARGET_EFAULT;
7666 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7667 __put_user(host_st->st_dev, &target_st->st_dev);
7668 __put_user(host_st->st_ino, &target_st->st_ino);
7669 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7670 __put_user(host_st->st_ino, &target_st->__st_ino);
7671 #endif
7672 __put_user(host_st->st_mode, &target_st->st_mode);
7673 __put_user(host_st->st_nlink, &target_st->st_nlink);
7674 __put_user(host_st->st_uid, &target_st->st_uid);
7675 __put_user(host_st->st_gid, &target_st->st_gid);
7676 __put_user(host_st->st_rdev, &target_st->st_rdev);
7677 __put_user(host_st->st_size, &target_st->st_size);
7678 __put_user(host_st->st_blksize, &target_st->st_blksize);
7679 __put_user(host_st->st_blocks, &target_st->st_blocks);
7680 __put_user(host_st->st_atime, &target_st->target_st_atime);
7681 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7682 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7683 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7684 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7685 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7686 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7687 #endif
7688 unlock_user_struct(target_st, target_addr, 1);
7689 } else
7690 #endif
7692 #if defined(TARGET_HAS_STRUCT_STAT64)
7693 struct target_stat64 *target_st;
7694 #else
7695 struct target_stat *target_st;
7696 #endif
7698 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7699 return -TARGET_EFAULT;
7700 memset(target_st, 0, sizeof(*target_st));
7701 __put_user(host_st->st_dev, &target_st->st_dev);
7702 __put_user(host_st->st_ino, &target_st->st_ino);
7703 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7704 __put_user(host_st->st_ino, &target_st->__st_ino);
7705 #endif
7706 __put_user(host_st->st_mode, &target_st->st_mode);
7707 __put_user(host_st->st_nlink, &target_st->st_nlink);
7708 __put_user(host_st->st_uid, &target_st->st_uid);
7709 __put_user(host_st->st_gid, &target_st->st_gid);
7710 __put_user(host_st->st_rdev, &target_st->st_rdev);
7711 /* XXX: better use of kernel struct */
7712 __put_user(host_st->st_size, &target_st->st_size);
7713 __put_user(host_st->st_blksize, &target_st->st_blksize);
7714 __put_user(host_st->st_blocks, &target_st->st_blocks);
7715 __put_user(host_st->st_atime, &target_st->target_st_atime);
7716 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7717 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7718 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7719 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7720 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7721 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7722 #endif
7723 unlock_user_struct(target_st, target_addr, 1);
7726 return 0;
7728 #endif
7730 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7731 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7732 abi_ulong target_addr)
7734 struct target_statx *target_stx;
7736 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7737 return -TARGET_EFAULT;
7739 memset(target_stx, 0, sizeof(*target_stx));
7741 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7742 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7743 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7744 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7745 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7746 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7747 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7748 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7749 __put_user(host_stx->stx_size, &target_stx->stx_size);
7750 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7751 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7752 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7753 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7754 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7755 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7756 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7757 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7758 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7759 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7760 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7761 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7762 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7763 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7765 unlock_user_struct(target_stx, target_addr, 1);
7767 return 0;
7769 #endif
7771 static int do_sys_futex(int *uaddr, int op, int val,
7772 const struct timespec *timeout, int *uaddr2,
7773 int val3)
7775 #if HOST_LONG_BITS == 64
7776 #if defined(__NR_futex)
7777 /* always a 64-bit time_t, it doesn't define _time64 version */
7778 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7780 #endif
7781 #else /* HOST_LONG_BITS == 64 */
7782 #if defined(__NR_futex_time64)
7783 if (sizeof(timeout->tv_sec) == 8) {
7784 /* _time64 function on 32bit arch */
7785 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7787 #endif
7788 #if defined(__NR_futex)
7789 /* old function on 32bit arch */
7790 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7791 #endif
7792 #endif /* HOST_LONG_BITS == 64 */
7793 g_assert_not_reached();
7796 static int do_safe_futex(int *uaddr, int op, int val,
7797 const struct timespec *timeout, int *uaddr2,
7798 int val3)
7800 #if HOST_LONG_BITS == 64
7801 #if defined(__NR_futex)
7802 /* always a 64-bit time_t, it doesn't define _time64 version */
7803 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7804 #endif
7805 #else /* HOST_LONG_BITS == 64 */
7806 #if defined(__NR_futex_time64)
7807 if (sizeof(timeout->tv_sec) == 8) {
7808 /* _time64 function on 32bit arch */
7809 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7810 val3));
7812 #endif
7813 #if defined(__NR_futex)
7814 /* old function on 32bit arch */
7815 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7816 #endif
7817 #endif /* HOST_LONG_BITS == 64 */
7818 return -TARGET_ENOSYS;
7821 /* ??? Using host futex calls even when target atomic operations
7822 are not really atomic probably breaks things. However implementing
7823 futexes locally would make futexes shared between multiple processes
7824 tricky. However they're probably useless because guest atomic
7825 operations won't work either. */
7826 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7827 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7828 int op, int val, target_ulong timeout,
7829 target_ulong uaddr2, int val3)
7831 struct timespec ts, *pts = NULL;
7832 void *haddr2 = NULL;
7833 int base_op;
7835 /* We assume FUTEX_* constants are the same on both host and target. */
7836 #ifdef FUTEX_CMD_MASK
7837 base_op = op & FUTEX_CMD_MASK;
7838 #else
7839 base_op = op;
7840 #endif
7841 switch (base_op) {
7842 case FUTEX_WAIT:
7843 case FUTEX_WAIT_BITSET:
7844 val = tswap32(val);
7845 break;
7846 case FUTEX_WAIT_REQUEUE_PI:
7847 val = tswap32(val);
7848 haddr2 = g2h(cpu, uaddr2);
7849 break;
7850 case FUTEX_LOCK_PI:
7851 case FUTEX_LOCK_PI2:
7852 break;
7853 case FUTEX_WAKE:
7854 case FUTEX_WAKE_BITSET:
7855 case FUTEX_TRYLOCK_PI:
7856 case FUTEX_UNLOCK_PI:
7857 timeout = 0;
7858 break;
7859 case FUTEX_FD:
7860 val = target_to_host_signal(val);
7861 timeout = 0;
7862 break;
7863 case FUTEX_CMP_REQUEUE:
7864 case FUTEX_CMP_REQUEUE_PI:
7865 val3 = tswap32(val3);
7866 /* fall through */
7867 case FUTEX_REQUEUE:
7868 case FUTEX_WAKE_OP:
7870 * For these, the 4th argument is not TIMEOUT, but VAL2.
7871 * But the prototype of do_safe_futex takes a pointer, so
7872 * insert casts to satisfy the compiler. We do not need
7873 * to tswap VAL2 since it's not compared to guest memory.
7875 pts = (struct timespec *)(uintptr_t)timeout;
7876 timeout = 0;
7877 haddr2 = g2h(cpu, uaddr2);
7878 break;
7879 default:
7880 return -TARGET_ENOSYS;
7882 if (timeout) {
7883 pts = &ts;
7884 if (time64
7885 ? target_to_host_timespec64(pts, timeout)
7886 : target_to_host_timespec(pts, timeout)) {
7887 return -TARGET_EFAULT;
7890 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7892 #endif
7894 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7895 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7896 abi_long handle, abi_long mount_id,
7897 abi_long flags)
7899 struct file_handle *target_fh;
7900 struct file_handle *fh;
7901 int mid = 0;
7902 abi_long ret;
7903 char *name;
7904 unsigned int size, total_size;
7906 if (get_user_s32(size, handle)) {
7907 return -TARGET_EFAULT;
7910 name = lock_user_string(pathname);
7911 if (!name) {
7912 return -TARGET_EFAULT;
7915 total_size = sizeof(struct file_handle) + size;
7916 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7917 if (!target_fh) {
7918 unlock_user(name, pathname, 0);
7919 return -TARGET_EFAULT;
7922 fh = g_malloc0(total_size);
7923 fh->handle_bytes = size;
7925 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7926 unlock_user(name, pathname, 0);
7928 /* man name_to_handle_at(2):
7929 * Other than the use of the handle_bytes field, the caller should treat
7930 * the file_handle structure as an opaque data type
7933 memcpy(target_fh, fh, total_size);
7934 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7935 target_fh->handle_type = tswap32(fh->handle_type);
7936 g_free(fh);
7937 unlock_user(target_fh, handle, total_size);
7939 if (put_user_s32(mid, mount_id)) {
7940 return -TARGET_EFAULT;
7943 return ret;
7946 #endif
7948 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7949 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7950 abi_long flags)
7952 struct file_handle *target_fh;
7953 struct file_handle *fh;
7954 unsigned int size, total_size;
7955 abi_long ret;
7957 if (get_user_s32(size, handle)) {
7958 return -TARGET_EFAULT;
7961 total_size = sizeof(struct file_handle) + size;
7962 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7963 if (!target_fh) {
7964 return -TARGET_EFAULT;
7967 fh = g_memdup(target_fh, total_size);
7968 fh->handle_bytes = size;
7969 fh->handle_type = tswap32(target_fh->handle_type);
7971 ret = get_errno(open_by_handle_at(mount_fd, fh,
7972 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7974 g_free(fh);
7976 unlock_user(target_fh, handle, total_size);
7978 return ret;
7980 #endif
7982 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7984 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7986 int host_flags;
7987 target_sigset_t *target_mask;
7988 sigset_t host_mask;
7989 abi_long ret;
7991 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7992 return -TARGET_EINVAL;
7994 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7995 return -TARGET_EFAULT;
7998 target_to_host_sigset(&host_mask, target_mask);
8000 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8002 ret = get_errno(signalfd(fd, &host_mask, host_flags));
8003 if (ret >= 0) {
8004 fd_trans_register(ret, &target_signalfd_trans);
8007 unlock_user_struct(target_mask, mask, 0);
8009 return ret;
8011 #endif
8013 /* Map host to target signal numbers for the wait family of syscalls.
8014 Assume all other status bits are the same. */
8015 int host_to_target_waitstatus(int status)
8017 if (WIFSIGNALED(status)) {
8018 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8020 if (WIFSTOPPED(status)) {
8021 return (host_to_target_signal(WSTOPSIG(status)) << 8)
8022 | (status & 0xff);
8024 return status;
8027 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8029 CPUState *cpu = env_cpu(cpu_env);
8030 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8031 int i;
8033 for (i = 0; i < bprm->argc; i++) {
8034 size_t len = strlen(bprm->argv[i]) + 1;
8036 if (write(fd, bprm->argv[i], len) != len) {
8037 return -1;
8041 return 0;
8044 static int open_self_maps(CPUArchState *cpu_env, int fd)
8046 CPUState *cpu = env_cpu(cpu_env);
8047 TaskState *ts = cpu->opaque;
8048 GSList *map_info = read_self_maps();
8049 GSList *s;
8050 int count;
8052 for (s = map_info; s; s = g_slist_next(s)) {
8053 MapInfo *e = (MapInfo *) s->data;
8055 if (h2g_valid(e->start)) {
8056 unsigned long min = e->start;
8057 unsigned long max = e->end;
8058 int flags = page_get_flags(h2g(min));
8059 const char *path;
8061 max = h2g_valid(max - 1) ?
8062 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8064 if (page_check_range(h2g(min), max - min, flags) == -1) {
8065 continue;
8068 #ifdef TARGET_HPPA
8069 if (h2g(max) == ts->info->stack_limit) {
8070 #else
8071 if (h2g(min) == ts->info->stack_limit) {
8072 #endif
8073 path = "[stack]";
8074 } else {
8075 path = e->path;
8078 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8079 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8080 h2g(min), h2g(max - 1) + 1,
8081 (flags & PAGE_READ) ? 'r' : '-',
8082 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8083 (flags & PAGE_EXEC) ? 'x' : '-',
8084 e->is_priv ? 'p' : 's',
8085 (uint64_t) e->offset, e->dev, e->inode);
8086 if (path) {
8087 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8088 } else {
8089 dprintf(fd, "\n");
8094 free_self_maps(map_info);
8096 #ifdef TARGET_VSYSCALL_PAGE
8098 * We only support execution from the vsyscall page.
8099 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8101 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8102 " --xp 00000000 00:00 0",
8103 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8104 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8105 #endif
8107 return 0;
8110 static int open_self_stat(CPUArchState *cpu_env, int fd)
8112 CPUState *cpu = env_cpu(cpu_env);
8113 TaskState *ts = cpu->opaque;
8114 g_autoptr(GString) buf = g_string_new(NULL);
8115 int i;
8117 for (i = 0; i < 44; i++) {
8118 if (i == 0) {
8119 /* pid */
8120 g_string_printf(buf, FMT_pid " ", getpid());
8121 } else if (i == 1) {
8122 /* app name */
8123 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8124 bin = bin ? bin + 1 : ts->bprm->argv[0];
8125 g_string_printf(buf, "(%.15s) ", bin);
8126 } else if (i == 2) {
8127 /* task state */
8128 g_string_assign(buf, "R "); /* we are running right now */
8129 } else if (i == 3) {
8130 /* ppid */
8131 g_string_printf(buf, FMT_pid " ", getppid());
8132 } else if (i == 21) {
8133 /* starttime */
8134 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8135 } else if (i == 27) {
8136 /* stack bottom */
8137 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8138 } else {
8139 /* for the rest, there is MasterCard */
8140 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8143 if (write(fd, buf->str, buf->len) != buf->len) {
8144 return -1;
8148 return 0;
8151 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8153 CPUState *cpu = env_cpu(cpu_env);
8154 TaskState *ts = cpu->opaque;
8155 abi_ulong auxv = ts->info->saved_auxv;
8156 abi_ulong len = ts->info->auxv_len;
8157 char *ptr;
8160 * Auxiliary vector is stored in target process stack.
8161 * read in whole auxv vector and copy it to file
8163 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8164 if (ptr != NULL) {
8165 while (len > 0) {
8166 ssize_t r;
8167 r = write(fd, ptr, len);
8168 if (r <= 0) {
8169 break;
8171 len -= r;
8172 ptr += r;
8174 lseek(fd, 0, SEEK_SET);
8175 unlock_user(ptr, auxv, len);
8178 return 0;
8181 static int is_proc_myself(const char *filename, const char *entry)
8183 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8184 filename += strlen("/proc/");
8185 if (!strncmp(filename, "self/", strlen("self/"))) {
8186 filename += strlen("self/");
8187 } else if (*filename >= '1' && *filename <= '9') {
8188 char myself[80];
8189 snprintf(myself, sizeof(myself), "%d/", getpid());
8190 if (!strncmp(filename, myself, strlen(myself))) {
8191 filename += strlen(myself);
8192 } else {
8193 return 0;
8195 } else {
8196 return 0;
8198 if (!strcmp(filename, entry)) {
8199 return 1;
8202 return 0;
8205 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8206 const char *fmt, int code)
8208 if (logfile) {
8209 CPUState *cs = env_cpu(env);
8211 fprintf(logfile, fmt, code);
8212 fprintf(logfile, "Failing executable: %s\n", exec_path);
8213 cpu_dump_state(cs, logfile, 0);
8214 open_self_maps(env, fileno(logfile));
8218 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8220 /* dump to console */
8221 excp_dump_file(stderr, env, fmt, code);
8223 /* dump to log file */
8224 if (qemu_log_separate()) {
8225 FILE *logfile = qemu_log_trylock();
8227 excp_dump_file(logfile, env, fmt, code);
8228 qemu_log_unlock(logfile);
8232 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8233 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8234 static int is_proc(const char *filename, const char *entry)
8236 return strcmp(filename, entry) == 0;
8238 #endif
8240 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8241 static int open_net_route(CPUArchState *cpu_env, int fd)
8243 FILE *fp;
8244 char *line = NULL;
8245 size_t len = 0;
8246 ssize_t read;
8248 fp = fopen("/proc/net/route", "r");
8249 if (fp == NULL) {
8250 return -1;
8253 /* read header */
8255 read = getline(&line, &len, fp);
8256 dprintf(fd, "%s", line);
8258 /* read routes */
8260 while ((read = getline(&line, &len, fp)) != -1) {
8261 char iface[16];
8262 uint32_t dest, gw, mask;
8263 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8264 int fields;
8266 fields = sscanf(line,
8267 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8268 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8269 &mask, &mtu, &window, &irtt);
8270 if (fields != 11) {
8271 continue;
8273 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8274 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8275 metric, tswap32(mask), mtu, window, irtt);
8278 free(line);
8279 fclose(fp);
8281 return 0;
8283 #endif
8285 #if defined(TARGET_SPARC)
8286 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8288 dprintf(fd, "type\t\t: sun4u\n");
8289 return 0;
8291 #endif
8293 #if defined(TARGET_HPPA)
8294 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8296 int i, num_cpus;
8298 num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8299 for (i = 0; i < num_cpus; i++) {
8300 dprintf(fd, "processor\t: %d\n", i);
8301 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8302 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8303 dprintf(fd, "capabilities\t: os32\n");
8304 dprintf(fd, "model\t\t: 9000/778/B160L - "
8305 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8307 return 0;
8309 #endif
8311 #if defined(TARGET_M68K)
8312 static int open_hardware(CPUArchState *cpu_env, int fd)
8314 dprintf(fd, "Model:\t\tqemu-m68k\n");
8315 return 0;
8317 #endif
8319 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8321 struct fake_open {
8322 const char *filename;
8323 int (*fill)(CPUArchState *cpu_env, int fd);
8324 int (*cmp)(const char *s1, const char *s2);
8326 const struct fake_open *fake_open;
8327 static const struct fake_open fakes[] = {
8328 { "maps", open_self_maps, is_proc_myself },
8329 { "stat", open_self_stat, is_proc_myself },
8330 { "auxv", open_self_auxv, is_proc_myself },
8331 { "cmdline", open_self_cmdline, is_proc_myself },
8332 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8333 { "/proc/net/route", open_net_route, is_proc },
8334 #endif
8335 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8336 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8337 #endif
8338 #if defined(TARGET_M68K)
8339 { "/proc/hardware", open_hardware, is_proc },
8340 #endif
8341 { NULL, NULL, NULL }
8344 if (is_proc_myself(pathname, "exe")) {
8345 return safe_openat(dirfd, exec_path, flags, mode);
8348 for (fake_open = fakes; fake_open->filename; fake_open++) {
8349 if (fake_open->cmp(pathname, fake_open->filename)) {
8350 break;
8354 if (fake_open->filename) {
8355 const char *tmpdir;
8356 char filename[PATH_MAX];
8357 int fd, r;
8359 fd = memfd_create("qemu-open", 0);
8360 if (fd < 0) {
8361 if (errno != ENOSYS) {
8362 return fd;
8364 /* create temporary file to map stat to */
8365 tmpdir = getenv("TMPDIR");
8366 if (!tmpdir)
8367 tmpdir = "/tmp";
8368 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8369 fd = mkstemp(filename);
8370 if (fd < 0) {
8371 return fd;
8373 unlink(filename);
8376 if ((r = fake_open->fill(cpu_env, fd))) {
8377 int e = errno;
8378 close(fd);
8379 errno = e;
8380 return r;
8382 lseek(fd, 0, SEEK_SET);
8384 return fd;
8387 return safe_openat(dirfd, path(pathname), flags, mode);
8390 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8391 abi_long pathname, abi_long guest_argp,
8392 abi_long guest_envp, int flags)
8394 int ret;
8395 char **argp, **envp;
8396 int argc, envc;
8397 abi_ulong gp;
8398 abi_ulong addr;
8399 char **q;
8400 void *p;
8402 argc = 0;
8404 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8405 if (get_user_ual(addr, gp)) {
8406 return -TARGET_EFAULT;
8408 if (!addr) {
8409 break;
8411 argc++;
8413 envc = 0;
8414 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8415 if (get_user_ual(addr, gp)) {
8416 return -TARGET_EFAULT;
8418 if (!addr) {
8419 break;
8421 envc++;
8424 argp = g_new0(char *, argc + 1);
8425 envp = g_new0(char *, envc + 1);
8427 for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8428 if (get_user_ual(addr, gp)) {
8429 goto execve_efault;
8431 if (!addr) {
8432 break;
8434 *q = lock_user_string(addr);
8435 if (!*q) {
8436 goto execve_efault;
8439 *q = NULL;
8441 for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8442 if (get_user_ual(addr, gp)) {
8443 goto execve_efault;
8445 if (!addr) {
8446 break;
8448 *q = lock_user_string(addr);
8449 if (!*q) {
8450 goto execve_efault;
8453 *q = NULL;
8456 * Although execve() is not an interruptible syscall it is
8457 * a special case where we must use the safe_syscall wrapper:
8458 * if we allow a signal to happen before we make the host
8459 * syscall then we will 'lose' it, because at the point of
8460 * execve the process leaves QEMU's control. So we use the
8461 * safe syscall wrapper to ensure that we either take the
8462 * signal as a guest signal, or else it does not happen
8463 * before the execve completes and makes it the other
8464 * program's problem.
8466 p = lock_user_string(pathname);
8467 if (!p) {
8468 goto execve_efault;
8471 if (is_proc_myself(p, "exe")) {
8472 ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8473 } else {
8474 ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8477 unlock_user(p, pathname, 0);
8479 goto execve_end;
8481 execve_efault:
8482 ret = -TARGET_EFAULT;
8484 execve_end:
8485 for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8486 if (get_user_ual(addr, gp) || !addr) {
8487 break;
8489 unlock_user(*q, addr, 0);
8491 for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8492 if (get_user_ual(addr, gp) || !addr) {
8493 break;
8495 unlock_user(*q, addr, 0);
8498 g_free(argp);
8499 g_free(envp);
8500 return ret;
8503 #define TIMER_MAGIC 0x0caf0000
8504 #define TIMER_MAGIC_MASK 0xffff0000
8506 /* Convert QEMU provided timer ID back to internal 16bit index format */
8507 static target_timer_t get_timer_id(abi_long arg)
8509 target_timer_t timerid = arg;
8511 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8512 return -TARGET_EINVAL;
8515 timerid &= 0xffff;
8517 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8518 return -TARGET_EINVAL;
8521 return timerid;
8524 static int target_to_host_cpu_mask(unsigned long *host_mask,
8525 size_t host_size,
8526 abi_ulong target_addr,
8527 size_t target_size)
8529 unsigned target_bits = sizeof(abi_ulong) * 8;
8530 unsigned host_bits = sizeof(*host_mask) * 8;
8531 abi_ulong *target_mask;
8532 unsigned i, j;
8534 assert(host_size >= target_size);
8536 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8537 if (!target_mask) {
8538 return -TARGET_EFAULT;
8540 memset(host_mask, 0, host_size);
8542 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8543 unsigned bit = i * target_bits;
8544 abi_ulong val;
8546 __get_user(val, &target_mask[i]);
8547 for (j = 0; j < target_bits; j++, bit++) {
8548 if (val & (1UL << j)) {
8549 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8554 unlock_user(target_mask, target_addr, 0);
8555 return 0;
8558 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8559 size_t host_size,
8560 abi_ulong target_addr,
8561 size_t target_size)
8563 unsigned target_bits = sizeof(abi_ulong) * 8;
8564 unsigned host_bits = sizeof(*host_mask) * 8;
8565 abi_ulong *target_mask;
8566 unsigned i, j;
8568 assert(host_size >= target_size);
8570 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8571 if (!target_mask) {
8572 return -TARGET_EFAULT;
8575 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8576 unsigned bit = i * target_bits;
8577 abi_ulong val = 0;
8579 for (j = 0; j < target_bits; j++, bit++) {
8580 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8581 val |= 1UL << j;
8584 __put_user(val, &target_mask[i]);
8587 unlock_user(target_mask, target_addr, target_size);
8588 return 0;
8591 #ifdef TARGET_NR_getdents
8592 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8594 g_autofree void *hdirp = NULL;
8595 void *tdirp;
8596 int hlen, hoff, toff;
8597 int hreclen, treclen;
8598 off64_t prev_diroff = 0;
8600 hdirp = g_try_malloc(count);
8601 if (!hdirp) {
8602 return -TARGET_ENOMEM;
8605 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8606 hlen = sys_getdents(dirfd, hdirp, count);
8607 #else
8608 hlen = sys_getdents64(dirfd, hdirp, count);
8609 #endif
8611 hlen = get_errno(hlen);
8612 if (is_error(hlen)) {
8613 return hlen;
8616 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8617 if (!tdirp) {
8618 return -TARGET_EFAULT;
8621 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8622 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8623 struct linux_dirent *hde = hdirp + hoff;
8624 #else
8625 struct linux_dirent64 *hde = hdirp + hoff;
8626 #endif
8627 struct target_dirent *tde = tdirp + toff;
8628 int namelen;
8629 uint8_t type;
8631 namelen = strlen(hde->d_name);
8632 hreclen = hde->d_reclen;
8633 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8634 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8636 if (toff + treclen > count) {
8638 * If the host struct is smaller than the target struct, or
8639 * requires less alignment and thus packs into less space,
8640 * then the host can return more entries than we can pass
8641 * on to the guest.
8643 if (toff == 0) {
8644 toff = -TARGET_EINVAL; /* result buffer is too small */
8645 break;
8648 * Return what we have, resetting the file pointer to the
8649 * location of the first record not returned.
8651 lseek64(dirfd, prev_diroff, SEEK_SET);
8652 break;
8655 prev_diroff = hde->d_off;
8656 tde->d_ino = tswapal(hde->d_ino);
8657 tde->d_off = tswapal(hde->d_off);
8658 tde->d_reclen = tswap16(treclen);
8659 memcpy(tde->d_name, hde->d_name, namelen + 1);
8662 * The getdents type is in what was formerly a padding byte at the
8663 * end of the structure.
8665 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8666 type = *((uint8_t *)hde + hreclen - 1);
8667 #else
8668 type = hde->d_type;
8669 #endif
8670 *((uint8_t *)tde + treclen - 1) = type;
8673 unlock_user(tdirp, arg2, toff);
8674 return toff;
8676 #endif /* TARGET_NR_getdents */
8678 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8679 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8681 g_autofree void *hdirp = NULL;
8682 void *tdirp;
8683 int hlen, hoff, toff;
8684 int hreclen, treclen;
8685 off64_t prev_diroff = 0;
8687 hdirp = g_try_malloc(count);
8688 if (!hdirp) {
8689 return -TARGET_ENOMEM;
8692 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8693 if (is_error(hlen)) {
8694 return hlen;
8697 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8698 if (!tdirp) {
8699 return -TARGET_EFAULT;
8702 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8703 struct linux_dirent64 *hde = hdirp + hoff;
8704 struct target_dirent64 *tde = tdirp + toff;
8705 int namelen;
8707 namelen = strlen(hde->d_name) + 1;
8708 hreclen = hde->d_reclen;
8709 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8710 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8712 if (toff + treclen > count) {
8714 * If the host struct is smaller than the target struct, or
8715 * requires less alignment and thus packs into less space,
8716 * then the host can return more entries than we can pass
8717 * on to the guest.
8719 if (toff == 0) {
8720 toff = -TARGET_EINVAL; /* result buffer is too small */
8721 break;
8724 * Return what we have, resetting the file pointer to the
8725 * location of the first record not returned.
8727 lseek64(dirfd, prev_diroff, SEEK_SET);
8728 break;
8731 prev_diroff = hde->d_off;
8732 tde->d_ino = tswap64(hde->d_ino);
8733 tde->d_off = tswap64(hde->d_off);
8734 tde->d_reclen = tswap16(treclen);
8735 tde->d_type = hde->d_type;
8736 memcpy(tde->d_name, hde->d_name, namelen);
8739 unlock_user(tdirp, arg2, toff);
8740 return toff;
8742 #endif /* TARGET_NR_getdents64 */
8744 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8745 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8746 #endif
8748 /* This is an internal helper for do_syscall so that it is easier
8749 * to have a single return point, so that actions, such as logging
8750 * of syscall results, can be performed.
8751 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8753 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8754 abi_long arg2, abi_long arg3, abi_long arg4,
8755 abi_long arg5, abi_long arg6, abi_long arg7,
8756 abi_long arg8)
8758 CPUState *cpu = env_cpu(cpu_env);
8759 abi_long ret;
8760 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8761 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8762 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8763 || defined(TARGET_NR_statx)
8764 struct stat st;
8765 #endif
8766 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8767 || defined(TARGET_NR_fstatfs)
8768 struct statfs stfs;
8769 #endif
8770 void *p;
8772 switch(num) {
8773 case TARGET_NR_exit:
8774 /* In old applications this may be used to implement _exit(2).
8775 However in threaded applications it is used for thread termination,
8776 and _exit_group is used for application termination.
8777 Do thread termination if we have more then one thread. */
8779 if (block_signals()) {
8780 return -QEMU_ERESTARTSYS;
8783 pthread_mutex_lock(&clone_lock);
8785 if (CPU_NEXT(first_cpu)) {
8786 TaskState *ts = cpu->opaque;
8788 if (ts->child_tidptr) {
8789 put_user_u32(0, ts->child_tidptr);
8790 do_sys_futex(g2h(cpu, ts->child_tidptr),
8791 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8794 object_unparent(OBJECT(cpu));
8795 object_unref(OBJECT(cpu));
8797 * At this point the CPU should be unrealized and removed
8798 * from cpu lists. We can clean-up the rest of the thread
8799 * data without the lock held.
8802 pthread_mutex_unlock(&clone_lock);
8804 thread_cpu = NULL;
8805 g_free(ts);
8806 rcu_unregister_thread();
8807 pthread_exit(NULL);
8810 pthread_mutex_unlock(&clone_lock);
8811 preexit_cleanup(cpu_env, arg1);
8812 _exit(arg1);
8813 return 0; /* avoid warning */
8814 case TARGET_NR_read:
8815 if (arg2 == 0 && arg3 == 0) {
8816 return get_errno(safe_read(arg1, 0, 0));
8817 } else {
8818 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8819 return -TARGET_EFAULT;
8820 ret = get_errno(safe_read(arg1, p, arg3));
8821 if (ret >= 0 &&
8822 fd_trans_host_to_target_data(arg1)) {
8823 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8825 unlock_user(p, arg2, ret);
8827 return ret;
8828 case TARGET_NR_write:
8829 if (arg2 == 0 && arg3 == 0) {
8830 return get_errno(safe_write(arg1, 0, 0));
8832 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8833 return -TARGET_EFAULT;
8834 if (fd_trans_target_to_host_data(arg1)) {
8835 void *copy = g_malloc(arg3);
8836 memcpy(copy, p, arg3);
8837 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8838 if (ret >= 0) {
8839 ret = get_errno(safe_write(arg1, copy, ret));
8841 g_free(copy);
8842 } else {
8843 ret = get_errno(safe_write(arg1, p, arg3));
8845 unlock_user(p, arg2, 0);
8846 return ret;
8848 #ifdef TARGET_NR_open
8849 case TARGET_NR_open:
8850 if (!(p = lock_user_string(arg1)))
8851 return -TARGET_EFAULT;
8852 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8853 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8854 arg3));
8855 fd_trans_unregister(ret);
8856 unlock_user(p, arg1, 0);
8857 return ret;
8858 #endif
8859 case TARGET_NR_openat:
8860 if (!(p = lock_user_string(arg2)))
8861 return -TARGET_EFAULT;
8862 ret = get_errno(do_openat(cpu_env, arg1, p,
8863 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8864 arg4));
8865 fd_trans_unregister(ret);
8866 unlock_user(p, arg2, 0);
8867 return ret;
8868 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8869 case TARGET_NR_name_to_handle_at:
8870 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8871 return ret;
8872 #endif
8873 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8874 case TARGET_NR_open_by_handle_at:
8875 ret = do_open_by_handle_at(arg1, arg2, arg3);
8876 fd_trans_unregister(ret);
8877 return ret;
8878 #endif
8879 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8880 case TARGET_NR_pidfd_open:
8881 return get_errno(pidfd_open(arg1, arg2));
8882 #endif
8883 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8884 case TARGET_NR_pidfd_send_signal:
8886 siginfo_t uinfo, *puinfo;
8888 if (arg3) {
8889 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8890 if (!p) {
8891 return -TARGET_EFAULT;
8893 target_to_host_siginfo(&uinfo, p);
8894 unlock_user(p, arg3, 0);
8895 puinfo = &uinfo;
8896 } else {
8897 puinfo = NULL;
8899 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8900 puinfo, arg4));
8902 return ret;
8903 #endif
8904 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8905 case TARGET_NR_pidfd_getfd:
8906 return get_errno(pidfd_getfd(arg1, arg2, arg3));
8907 #endif
8908 case TARGET_NR_close:
8909 fd_trans_unregister(arg1);
8910 return get_errno(close(arg1));
8911 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8912 case TARGET_NR_close_range:
8913 ret = get_errno(sys_close_range(arg1, arg2, arg3));
8914 if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8915 abi_long fd, maxfd;
8916 maxfd = MIN(arg2, target_fd_max);
8917 for (fd = arg1; fd < maxfd; fd++) {
8918 fd_trans_unregister(fd);
8921 return ret;
8922 #endif
8924 case TARGET_NR_brk:
8925 return do_brk(arg1);
8926 #ifdef TARGET_NR_fork
8927 case TARGET_NR_fork:
8928 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8929 #endif
8930 #ifdef TARGET_NR_waitpid
8931 case TARGET_NR_waitpid:
8933 int status;
8934 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8935 if (!is_error(ret) && arg2 && ret
8936 && put_user_s32(host_to_target_waitstatus(status), arg2))
8937 return -TARGET_EFAULT;
8939 return ret;
8940 #endif
8941 #ifdef TARGET_NR_waitid
8942 case TARGET_NR_waitid:
8944 siginfo_t info;
8945 info.si_pid = 0;
8946 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8947 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8948 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8949 return -TARGET_EFAULT;
8950 host_to_target_siginfo(p, &info);
8951 unlock_user(p, arg3, sizeof(target_siginfo_t));
8954 return ret;
8955 #endif
8956 #ifdef TARGET_NR_creat /* not on alpha */
8957 case TARGET_NR_creat:
8958 if (!(p = lock_user_string(arg1)))
8959 return -TARGET_EFAULT;
8960 ret = get_errno(creat(p, arg2));
8961 fd_trans_unregister(ret);
8962 unlock_user(p, arg1, 0);
8963 return ret;
8964 #endif
8965 #ifdef TARGET_NR_link
8966 case TARGET_NR_link:
8968 void * p2;
8969 p = lock_user_string(arg1);
8970 p2 = lock_user_string(arg2);
8971 if (!p || !p2)
8972 ret = -TARGET_EFAULT;
8973 else
8974 ret = get_errno(link(p, p2));
8975 unlock_user(p2, arg2, 0);
8976 unlock_user(p, arg1, 0);
8978 return ret;
8979 #endif
8980 #if defined(TARGET_NR_linkat)
8981 case TARGET_NR_linkat:
8983 void * p2 = NULL;
8984 if (!arg2 || !arg4)
8985 return -TARGET_EFAULT;
8986 p = lock_user_string(arg2);
8987 p2 = lock_user_string(arg4);
8988 if (!p || !p2)
8989 ret = -TARGET_EFAULT;
8990 else
8991 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8992 unlock_user(p, arg2, 0);
8993 unlock_user(p2, arg4, 0);
8995 return ret;
8996 #endif
8997 #ifdef TARGET_NR_unlink
8998 case TARGET_NR_unlink:
8999 if (!(p = lock_user_string(arg1)))
9000 return -TARGET_EFAULT;
9001 ret = get_errno(unlink(p));
9002 unlock_user(p, arg1, 0);
9003 return ret;
9004 #endif
9005 #if defined(TARGET_NR_unlinkat)
9006 case TARGET_NR_unlinkat:
9007 if (!(p = lock_user_string(arg2)))
9008 return -TARGET_EFAULT;
9009 ret = get_errno(unlinkat(arg1, p, arg3));
9010 unlock_user(p, arg2, 0);
9011 return ret;
9012 #endif
9013 case TARGET_NR_execveat:
9014 return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
9015 case TARGET_NR_execve:
9016 return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
9017 case TARGET_NR_chdir:
9018 if (!(p = lock_user_string(arg1)))
9019 return -TARGET_EFAULT;
9020 ret = get_errno(chdir(p));
9021 unlock_user(p, arg1, 0);
9022 return ret;
9023 #ifdef TARGET_NR_time
9024 case TARGET_NR_time:
9026 time_t host_time;
9027 ret = get_errno(time(&host_time));
9028 if (!is_error(ret)
9029 && arg1
9030 && put_user_sal(host_time, arg1))
9031 return -TARGET_EFAULT;
9033 return ret;
9034 #endif
9035 #ifdef TARGET_NR_mknod
9036 case TARGET_NR_mknod:
9037 if (!(p = lock_user_string(arg1)))
9038 return -TARGET_EFAULT;
9039 ret = get_errno(mknod(p, arg2, arg3));
9040 unlock_user(p, arg1, 0);
9041 return ret;
9042 #endif
9043 #if defined(TARGET_NR_mknodat)
9044 case TARGET_NR_mknodat:
9045 if (!(p = lock_user_string(arg2)))
9046 return -TARGET_EFAULT;
9047 ret = get_errno(mknodat(arg1, p, arg3, arg4));
9048 unlock_user(p, arg2, 0);
9049 return ret;
9050 #endif
9051 #ifdef TARGET_NR_chmod
9052 case TARGET_NR_chmod:
9053 if (!(p = lock_user_string(arg1)))
9054 return -TARGET_EFAULT;
9055 ret = get_errno(chmod(p, arg2));
9056 unlock_user(p, arg1, 0);
9057 return ret;
9058 #endif
9059 #ifdef TARGET_NR_lseek
9060 case TARGET_NR_lseek:
9061 return get_errno(lseek(arg1, arg2, arg3));
9062 #endif
9063 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9064 /* Alpha specific */
9065 case TARGET_NR_getxpid:
9066 cpu_env->ir[IR_A4] = getppid();
9067 return get_errno(getpid());
9068 #endif
9069 #ifdef TARGET_NR_getpid
9070 case TARGET_NR_getpid:
9071 return get_errno(getpid());
9072 #endif
9073 case TARGET_NR_mount:
9075 /* need to look at the data field */
9076 void *p2, *p3;
9078 if (arg1) {
9079 p = lock_user_string(arg1);
9080 if (!p) {
9081 return -TARGET_EFAULT;
9083 } else {
9084 p = NULL;
9087 p2 = lock_user_string(arg2);
9088 if (!p2) {
9089 if (arg1) {
9090 unlock_user(p, arg1, 0);
9092 return -TARGET_EFAULT;
9095 if (arg3) {
9096 p3 = lock_user_string(arg3);
9097 if (!p3) {
9098 if (arg1) {
9099 unlock_user(p, arg1, 0);
9101 unlock_user(p2, arg2, 0);
9102 return -TARGET_EFAULT;
9104 } else {
9105 p3 = NULL;
9108 /* FIXME - arg5 should be locked, but it isn't clear how to
9109 * do that since it's not guaranteed to be a NULL-terminated
9110 * string.
9112 if (!arg5) {
9113 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9114 } else {
9115 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9117 ret = get_errno(ret);
9119 if (arg1) {
9120 unlock_user(p, arg1, 0);
9122 unlock_user(p2, arg2, 0);
9123 if (arg3) {
9124 unlock_user(p3, arg3, 0);
9127 return ret;
9128 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9129 #if defined(TARGET_NR_umount)
9130 case TARGET_NR_umount:
9131 #endif
9132 #if defined(TARGET_NR_oldumount)
9133 case TARGET_NR_oldumount:
9134 #endif
9135 if (!(p = lock_user_string(arg1)))
9136 return -TARGET_EFAULT;
9137 ret = get_errno(umount(p));
9138 unlock_user(p, arg1, 0);
9139 return ret;
9140 #endif
9141 #ifdef TARGET_NR_stime /* not on alpha */
9142 case TARGET_NR_stime:
9144 struct timespec ts;
9145 ts.tv_nsec = 0;
9146 if (get_user_sal(ts.tv_sec, arg1)) {
9147 return -TARGET_EFAULT;
9149 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9151 #endif
9152 #ifdef TARGET_NR_alarm /* not on alpha */
9153 case TARGET_NR_alarm:
9154 return alarm(arg1);
9155 #endif
9156 #ifdef TARGET_NR_pause /* not on alpha */
9157 case TARGET_NR_pause:
9158 if (!block_signals()) {
9159 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9161 return -TARGET_EINTR;
9162 #endif
9163 #ifdef TARGET_NR_utime
9164 case TARGET_NR_utime:
9166 struct utimbuf tbuf, *host_tbuf;
9167 struct target_utimbuf *target_tbuf;
9168 if (arg2) {
9169 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9170 return -TARGET_EFAULT;
9171 tbuf.actime = tswapal(target_tbuf->actime);
9172 tbuf.modtime = tswapal(target_tbuf->modtime);
9173 unlock_user_struct(target_tbuf, arg2, 0);
9174 host_tbuf = &tbuf;
9175 } else {
9176 host_tbuf = NULL;
9178 if (!(p = lock_user_string(arg1)))
9179 return -TARGET_EFAULT;
9180 ret = get_errno(utime(p, host_tbuf));
9181 unlock_user(p, arg1, 0);
9183 return ret;
9184 #endif
9185 #ifdef TARGET_NR_utimes
9186 case TARGET_NR_utimes:
9188 struct timeval *tvp, tv[2];
9189 if (arg2) {
9190 if (copy_from_user_timeval(&tv[0], arg2)
9191 || copy_from_user_timeval(&tv[1],
9192 arg2 + sizeof(struct target_timeval)))
9193 return -TARGET_EFAULT;
9194 tvp = tv;
9195 } else {
9196 tvp = NULL;
9198 if (!(p = lock_user_string(arg1)))
9199 return -TARGET_EFAULT;
9200 ret = get_errno(utimes(p, tvp));
9201 unlock_user(p, arg1, 0);
9203 return ret;
9204 #endif
9205 #if defined(TARGET_NR_futimesat)
9206 case TARGET_NR_futimesat:
9208 struct timeval *tvp, tv[2];
9209 if (arg3) {
9210 if (copy_from_user_timeval(&tv[0], arg3)
9211 || copy_from_user_timeval(&tv[1],
9212 arg3 + sizeof(struct target_timeval)))
9213 return -TARGET_EFAULT;
9214 tvp = tv;
9215 } else {
9216 tvp = NULL;
9218 if (!(p = lock_user_string(arg2))) {
9219 return -TARGET_EFAULT;
9221 ret = get_errno(futimesat(arg1, path(p), tvp));
9222 unlock_user(p, arg2, 0);
9224 return ret;
9225 #endif
9226 #ifdef TARGET_NR_access
9227 case TARGET_NR_access:
9228 if (!(p = lock_user_string(arg1))) {
9229 return -TARGET_EFAULT;
9231 ret = get_errno(access(path(p), arg2));
9232 unlock_user(p, arg1, 0);
9233 return ret;
9234 #endif
9235 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9236 case TARGET_NR_faccessat:
9237 if (!(p = lock_user_string(arg2))) {
9238 return -TARGET_EFAULT;
9240 ret = get_errno(faccessat(arg1, p, arg3, 0));
9241 unlock_user(p, arg2, 0);
9242 return ret;
9243 #endif
9244 #if defined(TARGET_NR_faccessat2)
9245 case TARGET_NR_faccessat2:
9246 if (!(p = lock_user_string(arg2))) {
9247 return -TARGET_EFAULT;
9249 ret = get_errno(faccessat(arg1, p, arg3, arg4));
9250 unlock_user(p, arg2, 0);
9251 return ret;
9252 #endif
9253 #ifdef TARGET_NR_nice /* not on alpha */
9254 case TARGET_NR_nice:
9255 return get_errno(nice(arg1));
9256 #endif
9257 case TARGET_NR_sync:
9258 sync();
9259 return 0;
9260 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9261 case TARGET_NR_syncfs:
9262 return get_errno(syncfs(arg1));
9263 #endif
9264 case TARGET_NR_kill:
9265 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9266 #ifdef TARGET_NR_rename
9267 case TARGET_NR_rename:
9269 void *p2;
9270 p = lock_user_string(arg1);
9271 p2 = lock_user_string(arg2);
9272 if (!p || !p2)
9273 ret = -TARGET_EFAULT;
9274 else
9275 ret = get_errno(rename(p, p2));
9276 unlock_user(p2, arg2, 0);
9277 unlock_user(p, arg1, 0);
9279 return ret;
9280 #endif
9281 #if defined(TARGET_NR_renameat)
9282 case TARGET_NR_renameat:
9284 void *p2;
9285 p = lock_user_string(arg2);
9286 p2 = lock_user_string(arg4);
9287 if (!p || !p2)
9288 ret = -TARGET_EFAULT;
9289 else
9290 ret = get_errno(renameat(arg1, p, arg3, p2));
9291 unlock_user(p2, arg4, 0);
9292 unlock_user(p, arg2, 0);
9294 return ret;
9295 #endif
9296 #if defined(TARGET_NR_renameat2)
9297 case TARGET_NR_renameat2:
9299 void *p2;
9300 p = lock_user_string(arg2);
9301 p2 = lock_user_string(arg4);
9302 if (!p || !p2) {
9303 ret = -TARGET_EFAULT;
9304 } else {
9305 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9307 unlock_user(p2, arg4, 0);
9308 unlock_user(p, arg2, 0);
9310 return ret;
9311 #endif
9312 #ifdef TARGET_NR_mkdir
9313 case TARGET_NR_mkdir:
9314 if (!(p = lock_user_string(arg1)))
9315 return -TARGET_EFAULT;
9316 ret = get_errno(mkdir(p, arg2));
9317 unlock_user(p, arg1, 0);
9318 return ret;
9319 #endif
9320 #if defined(TARGET_NR_mkdirat)
9321 case TARGET_NR_mkdirat:
9322 if (!(p = lock_user_string(arg2)))
9323 return -TARGET_EFAULT;
9324 ret = get_errno(mkdirat(arg1, p, arg3));
9325 unlock_user(p, arg2, 0);
9326 return ret;
9327 #endif
9328 #ifdef TARGET_NR_rmdir
9329 case TARGET_NR_rmdir:
9330 if (!(p = lock_user_string(arg1)))
9331 return -TARGET_EFAULT;
9332 ret = get_errno(rmdir(p));
9333 unlock_user(p, arg1, 0);
9334 return ret;
9335 #endif
9336 case TARGET_NR_dup:
9337 ret = get_errno(dup(arg1));
9338 if (ret >= 0) {
9339 fd_trans_dup(arg1, ret);
9341 return ret;
9342 #ifdef TARGET_NR_pipe
9343 case TARGET_NR_pipe:
9344 return do_pipe(cpu_env, arg1, 0, 0);
9345 #endif
9346 #ifdef TARGET_NR_pipe2
9347 case TARGET_NR_pipe2:
9348 return do_pipe(cpu_env, arg1,
9349 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9350 #endif
9351 case TARGET_NR_times:
9353 struct target_tms *tmsp;
9354 struct tms tms;
9355 ret = get_errno(times(&tms));
9356 if (arg1) {
9357 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9358 if (!tmsp)
9359 return -TARGET_EFAULT;
9360 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9361 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9362 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9363 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9365 if (!is_error(ret))
9366 ret = host_to_target_clock_t(ret);
9368 return ret;
9369 case TARGET_NR_acct:
9370 if (arg1 == 0) {
9371 ret = get_errno(acct(NULL));
9372 } else {
9373 if (!(p = lock_user_string(arg1))) {
9374 return -TARGET_EFAULT;
9376 ret = get_errno(acct(path(p)));
9377 unlock_user(p, arg1, 0);
9379 return ret;
9380 #ifdef TARGET_NR_umount2
9381 case TARGET_NR_umount2:
9382 if (!(p = lock_user_string(arg1)))
9383 return -TARGET_EFAULT;
9384 ret = get_errno(umount2(p, arg2));
9385 unlock_user(p, arg1, 0);
9386 return ret;
9387 #endif
9388 case TARGET_NR_ioctl:
9389 return do_ioctl(arg1, arg2, arg3);
9390 #ifdef TARGET_NR_fcntl
9391 case TARGET_NR_fcntl:
9392 return do_fcntl(arg1, arg2, arg3);
9393 #endif
9394 case TARGET_NR_setpgid:
9395 return get_errno(setpgid(arg1, arg2));
9396 case TARGET_NR_umask:
9397 return get_errno(umask(arg1));
9398 case TARGET_NR_chroot:
9399 if (!(p = lock_user_string(arg1)))
9400 return -TARGET_EFAULT;
9401 ret = get_errno(chroot(p));
9402 unlock_user(p, arg1, 0);
9403 return ret;
9404 #ifdef TARGET_NR_dup2
9405 case TARGET_NR_dup2:
9406 ret = get_errno(dup2(arg1, arg2));
9407 if (ret >= 0) {
9408 fd_trans_dup(arg1, arg2);
9410 return ret;
9411 #endif
9412 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9413 case TARGET_NR_dup3:
9415 int host_flags;
9417 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9418 return -EINVAL;
9420 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9421 ret = get_errno(dup3(arg1, arg2, host_flags));
9422 if (ret >= 0) {
9423 fd_trans_dup(arg1, arg2);
9425 return ret;
9427 #endif
9428 #ifdef TARGET_NR_getppid /* not on alpha */
9429 case TARGET_NR_getppid:
9430 return get_errno(getppid());
9431 #endif
9432 #ifdef TARGET_NR_getpgrp
9433 case TARGET_NR_getpgrp:
9434 return get_errno(getpgrp());
9435 #endif
9436 case TARGET_NR_setsid:
9437 return get_errno(setsid());
9438 #ifdef TARGET_NR_sigaction
9439 case TARGET_NR_sigaction:
9441 #if defined(TARGET_MIPS)
9442 struct target_sigaction act, oact, *pact, *old_act;
9444 if (arg2) {
9445 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9446 return -TARGET_EFAULT;
9447 act._sa_handler = old_act->_sa_handler;
9448 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9449 act.sa_flags = old_act->sa_flags;
9450 unlock_user_struct(old_act, arg2, 0);
9451 pact = &act;
9452 } else {
9453 pact = NULL;
9456 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9458 if (!is_error(ret) && arg3) {
9459 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9460 return -TARGET_EFAULT;
9461 old_act->_sa_handler = oact._sa_handler;
9462 old_act->sa_flags = oact.sa_flags;
9463 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9464 old_act->sa_mask.sig[1] = 0;
9465 old_act->sa_mask.sig[2] = 0;
9466 old_act->sa_mask.sig[3] = 0;
9467 unlock_user_struct(old_act, arg3, 1);
9469 #else
9470 struct target_old_sigaction *old_act;
9471 struct target_sigaction act, oact, *pact;
9472 if (arg2) {
9473 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9474 return -TARGET_EFAULT;
9475 act._sa_handler = old_act->_sa_handler;
9476 target_siginitset(&act.sa_mask, old_act->sa_mask);
9477 act.sa_flags = old_act->sa_flags;
9478 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9479 act.sa_restorer = old_act->sa_restorer;
9480 #endif
9481 unlock_user_struct(old_act, arg2, 0);
9482 pact = &act;
9483 } else {
9484 pact = NULL;
9486 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9487 if (!is_error(ret) && arg3) {
9488 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9489 return -TARGET_EFAULT;
9490 old_act->_sa_handler = oact._sa_handler;
9491 old_act->sa_mask = oact.sa_mask.sig[0];
9492 old_act->sa_flags = oact.sa_flags;
9493 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9494 old_act->sa_restorer = oact.sa_restorer;
9495 #endif
9496 unlock_user_struct(old_act, arg3, 1);
9498 #endif
9500 return ret;
9501 #endif
9502 case TARGET_NR_rt_sigaction:
9505 * For Alpha and SPARC this is a 5 argument syscall, with
9506 * a 'restorer' parameter which must be copied into the
9507 * sa_restorer field of the sigaction struct.
9508 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9509 * and arg5 is the sigsetsize.
9511 #if defined(TARGET_ALPHA)
9512 target_ulong sigsetsize = arg4;
9513 target_ulong restorer = arg5;
9514 #elif defined(TARGET_SPARC)
9515 target_ulong restorer = arg4;
9516 target_ulong sigsetsize = arg5;
9517 #else
9518 target_ulong sigsetsize = arg4;
9519 target_ulong restorer = 0;
9520 #endif
9521 struct target_sigaction *act = NULL;
9522 struct target_sigaction *oact = NULL;
9524 if (sigsetsize != sizeof(target_sigset_t)) {
9525 return -TARGET_EINVAL;
9527 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9528 return -TARGET_EFAULT;
9530 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9531 ret = -TARGET_EFAULT;
9532 } else {
9533 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9534 if (oact) {
9535 unlock_user_struct(oact, arg3, 1);
9538 if (act) {
9539 unlock_user_struct(act, arg2, 0);
9542 return ret;
9543 #ifdef TARGET_NR_sgetmask /* not on alpha */
9544 case TARGET_NR_sgetmask:
9546 sigset_t cur_set;
9547 abi_ulong target_set;
9548 ret = do_sigprocmask(0, NULL, &cur_set);
9549 if (!ret) {
9550 host_to_target_old_sigset(&target_set, &cur_set);
9551 ret = target_set;
9554 return ret;
9555 #endif
9556 #ifdef TARGET_NR_ssetmask /* not on alpha */
9557 case TARGET_NR_ssetmask:
9559 sigset_t set, oset;
9560 abi_ulong target_set = arg1;
9561 target_to_host_old_sigset(&set, &target_set);
9562 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9563 if (!ret) {
9564 host_to_target_old_sigset(&target_set, &oset);
9565 ret = target_set;
9568 return ret;
9569 #endif
9570 #ifdef TARGET_NR_sigprocmask
9571 case TARGET_NR_sigprocmask:
9573 #if defined(TARGET_ALPHA)
9574 sigset_t set, oldset;
9575 abi_ulong mask;
9576 int how;
9578 switch (arg1) {
9579 case TARGET_SIG_BLOCK:
9580 how = SIG_BLOCK;
9581 break;
9582 case TARGET_SIG_UNBLOCK:
9583 how = SIG_UNBLOCK;
9584 break;
9585 case TARGET_SIG_SETMASK:
9586 how = SIG_SETMASK;
9587 break;
9588 default:
9589 return -TARGET_EINVAL;
9591 mask = arg2;
9592 target_to_host_old_sigset(&set, &mask);
9594 ret = do_sigprocmask(how, &set, &oldset);
9595 if (!is_error(ret)) {
9596 host_to_target_old_sigset(&mask, &oldset);
9597 ret = mask;
9598 cpu_env->ir[IR_V0] = 0; /* force no error */
9600 #else
9601 sigset_t set, oldset, *set_ptr;
9602 int how;
9604 if (arg2) {
9605 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9606 if (!p) {
9607 return -TARGET_EFAULT;
9609 target_to_host_old_sigset(&set, p);
9610 unlock_user(p, arg2, 0);
9611 set_ptr = &set;
9612 switch (arg1) {
9613 case TARGET_SIG_BLOCK:
9614 how = SIG_BLOCK;
9615 break;
9616 case TARGET_SIG_UNBLOCK:
9617 how = SIG_UNBLOCK;
9618 break;
9619 case TARGET_SIG_SETMASK:
9620 how = SIG_SETMASK;
9621 break;
9622 default:
9623 return -TARGET_EINVAL;
9625 } else {
9626 how = 0;
9627 set_ptr = NULL;
9629 ret = do_sigprocmask(how, set_ptr, &oldset);
9630 if (!is_error(ret) && arg3) {
9631 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9632 return -TARGET_EFAULT;
9633 host_to_target_old_sigset(p, &oldset);
9634 unlock_user(p, arg3, sizeof(target_sigset_t));
9636 #endif
9638 return ret;
9639 #endif
9640 case TARGET_NR_rt_sigprocmask:
9642 int how = arg1;
9643 sigset_t set, oldset, *set_ptr;
9645 if (arg4 != sizeof(target_sigset_t)) {
9646 return -TARGET_EINVAL;
9649 if (arg2) {
9650 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9651 if (!p) {
9652 return -TARGET_EFAULT;
9654 target_to_host_sigset(&set, p);
9655 unlock_user(p, arg2, 0);
9656 set_ptr = &set;
9657 switch(how) {
9658 case TARGET_SIG_BLOCK:
9659 how = SIG_BLOCK;
9660 break;
9661 case TARGET_SIG_UNBLOCK:
9662 how = SIG_UNBLOCK;
9663 break;
9664 case TARGET_SIG_SETMASK:
9665 how = SIG_SETMASK;
9666 break;
9667 default:
9668 return -TARGET_EINVAL;
9670 } else {
9671 how = 0;
9672 set_ptr = NULL;
9674 ret = do_sigprocmask(how, set_ptr, &oldset);
9675 if (!is_error(ret) && arg3) {
9676 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9677 return -TARGET_EFAULT;
9678 host_to_target_sigset(p, &oldset);
9679 unlock_user(p, arg3, sizeof(target_sigset_t));
9682 return ret;
9683 #ifdef TARGET_NR_sigpending
9684 case TARGET_NR_sigpending:
9686 sigset_t set;
9687 ret = get_errno(sigpending(&set));
9688 if (!is_error(ret)) {
9689 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9690 return -TARGET_EFAULT;
9691 host_to_target_old_sigset(p, &set);
9692 unlock_user(p, arg1, sizeof(target_sigset_t));
9695 return ret;
9696 #endif
9697 case TARGET_NR_rt_sigpending:
9699 sigset_t set;
9701 /* Yes, this check is >, not != like most. We follow the kernel's
9702 * logic and it does it like this because it implements
9703 * NR_sigpending through the same code path, and in that case
9704 * the old_sigset_t is smaller in size.
9706 if (arg2 > sizeof(target_sigset_t)) {
9707 return -TARGET_EINVAL;
9710 ret = get_errno(sigpending(&set));
9711 if (!is_error(ret)) {
9712 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9713 return -TARGET_EFAULT;
9714 host_to_target_sigset(p, &set);
9715 unlock_user(p, arg1, sizeof(target_sigset_t));
9718 return ret;
9719 #ifdef TARGET_NR_sigsuspend
9720 case TARGET_NR_sigsuspend:
9722 sigset_t *set;
9724 #if defined(TARGET_ALPHA)
9725 TaskState *ts = cpu->opaque;
9726 /* target_to_host_old_sigset will bswap back */
9727 abi_ulong mask = tswapal(arg1);
9728 set = &ts->sigsuspend_mask;
9729 target_to_host_old_sigset(set, &mask);
9730 #else
9731 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9732 if (ret != 0) {
9733 return ret;
9735 #endif
9736 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9737 finish_sigsuspend_mask(ret);
9739 return ret;
9740 #endif
9741 case TARGET_NR_rt_sigsuspend:
9743 sigset_t *set;
9745 ret = process_sigsuspend_mask(&set, arg1, arg2);
9746 if (ret != 0) {
9747 return ret;
9749 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9750 finish_sigsuspend_mask(ret);
9752 return ret;
9753 #ifdef TARGET_NR_rt_sigtimedwait
9754 case TARGET_NR_rt_sigtimedwait:
9756 sigset_t set;
9757 struct timespec uts, *puts;
9758 siginfo_t uinfo;
9760 if (arg4 != sizeof(target_sigset_t)) {
9761 return -TARGET_EINVAL;
9764 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9765 return -TARGET_EFAULT;
9766 target_to_host_sigset(&set, p);
9767 unlock_user(p, arg1, 0);
9768 if (arg3) {
9769 puts = &uts;
9770 if (target_to_host_timespec(puts, arg3)) {
9771 return -TARGET_EFAULT;
9773 } else {
9774 puts = NULL;
9776 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9777 SIGSET_T_SIZE));
9778 if (!is_error(ret)) {
9779 if (arg2) {
9780 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9782 if (!p) {
9783 return -TARGET_EFAULT;
9785 host_to_target_siginfo(p, &uinfo);
9786 unlock_user(p, arg2, sizeof(target_siginfo_t));
9788 ret = host_to_target_signal(ret);
9791 return ret;
9792 #endif
9793 #ifdef TARGET_NR_rt_sigtimedwait_time64
9794 case TARGET_NR_rt_sigtimedwait_time64:
9796 sigset_t set;
9797 struct timespec uts, *puts;
9798 siginfo_t uinfo;
9800 if (arg4 != sizeof(target_sigset_t)) {
9801 return -TARGET_EINVAL;
9804 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9805 if (!p) {
9806 return -TARGET_EFAULT;
9808 target_to_host_sigset(&set, p);
9809 unlock_user(p, arg1, 0);
9810 if (arg3) {
9811 puts = &uts;
9812 if (target_to_host_timespec64(puts, arg3)) {
9813 return -TARGET_EFAULT;
9815 } else {
9816 puts = NULL;
9818 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9819 SIGSET_T_SIZE));
9820 if (!is_error(ret)) {
9821 if (arg2) {
9822 p = lock_user(VERIFY_WRITE, arg2,
9823 sizeof(target_siginfo_t), 0);
9824 if (!p) {
9825 return -TARGET_EFAULT;
9827 host_to_target_siginfo(p, &uinfo);
9828 unlock_user(p, arg2, sizeof(target_siginfo_t));
9830 ret = host_to_target_signal(ret);
9833 return ret;
9834 #endif
9835 case TARGET_NR_rt_sigqueueinfo:
9837 siginfo_t uinfo;
9839 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9840 if (!p) {
9841 return -TARGET_EFAULT;
9843 target_to_host_siginfo(&uinfo, p);
9844 unlock_user(p, arg3, 0);
9845 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9847 return ret;
9848 case TARGET_NR_rt_tgsigqueueinfo:
9850 siginfo_t uinfo;
9852 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9853 if (!p) {
9854 return -TARGET_EFAULT;
9856 target_to_host_siginfo(&uinfo, p);
9857 unlock_user(p, arg4, 0);
9858 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9860 return ret;
9861 #ifdef TARGET_NR_sigreturn
9862 case TARGET_NR_sigreturn:
9863 if (block_signals()) {
9864 return -QEMU_ERESTARTSYS;
9866 return do_sigreturn(cpu_env);
9867 #endif
9868 case TARGET_NR_rt_sigreturn:
9869 if (block_signals()) {
9870 return -QEMU_ERESTARTSYS;
9872 return do_rt_sigreturn(cpu_env);
9873 case TARGET_NR_sethostname:
9874 if (!(p = lock_user_string(arg1)))
9875 return -TARGET_EFAULT;
9876 ret = get_errno(sethostname(p, arg2));
9877 unlock_user(p, arg1, 0);
9878 return ret;
9879 #ifdef TARGET_NR_setrlimit
9880 case TARGET_NR_setrlimit:
9882 int resource = target_to_host_resource(arg1);
9883 struct target_rlimit *target_rlim;
9884 struct rlimit rlim;
9885 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9886 return -TARGET_EFAULT;
9887 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9888 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9889 unlock_user_struct(target_rlim, arg2, 0);
9891 * If we just passed through resource limit settings for memory then
9892 * they would also apply to QEMU's own allocations, and QEMU will
9893 * crash or hang or die if its allocations fail. Ideally we would
9894 * track the guest allocations in QEMU and apply the limits ourselves.
9895 * For now, just tell the guest the call succeeded but don't actually
9896 * limit anything.
9898 if (resource != RLIMIT_AS &&
9899 resource != RLIMIT_DATA &&
9900 resource != RLIMIT_STACK) {
9901 return get_errno(setrlimit(resource, &rlim));
9902 } else {
9903 return 0;
9906 #endif
9907 #ifdef TARGET_NR_getrlimit
9908 case TARGET_NR_getrlimit:
9910 int resource = target_to_host_resource(arg1);
9911 struct target_rlimit *target_rlim;
9912 struct rlimit rlim;
9914 ret = get_errno(getrlimit(resource, &rlim));
9915 if (!is_error(ret)) {
9916 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9917 return -TARGET_EFAULT;
9918 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9919 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9920 unlock_user_struct(target_rlim, arg2, 1);
9923 return ret;
9924 #endif
9925 case TARGET_NR_getrusage:
9927 struct rusage rusage;
9928 ret = get_errno(getrusage(arg1, &rusage));
9929 if (!is_error(ret)) {
9930 ret = host_to_target_rusage(arg2, &rusage);
9933 return ret;
9934 #if defined(TARGET_NR_gettimeofday)
9935 case TARGET_NR_gettimeofday:
9937 struct timeval tv;
9938 struct timezone tz;
9940 ret = get_errno(gettimeofday(&tv, &tz));
9941 if (!is_error(ret)) {
9942 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9943 return -TARGET_EFAULT;
9945 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9946 return -TARGET_EFAULT;
9950 return ret;
9951 #endif
9952 #if defined(TARGET_NR_settimeofday)
9953 case TARGET_NR_settimeofday:
9955 struct timeval tv, *ptv = NULL;
9956 struct timezone tz, *ptz = NULL;
9958 if (arg1) {
9959 if (copy_from_user_timeval(&tv, arg1)) {
9960 return -TARGET_EFAULT;
9962 ptv = &tv;
9965 if (arg2) {
9966 if (copy_from_user_timezone(&tz, arg2)) {
9967 return -TARGET_EFAULT;
9969 ptz = &tz;
9972 return get_errno(settimeofday(ptv, ptz));
9974 #endif
9975 #if defined(TARGET_NR_select)
9976 case TARGET_NR_select:
9977 #if defined(TARGET_WANT_NI_OLD_SELECT)
9978 /* some architectures used to have old_select here
9979 * but now ENOSYS it.
9981 ret = -TARGET_ENOSYS;
9982 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9983 ret = do_old_select(arg1);
9984 #else
9985 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9986 #endif
9987 return ret;
9988 #endif
9989 #ifdef TARGET_NR_pselect6
9990 case TARGET_NR_pselect6:
9991 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9992 #endif
9993 #ifdef TARGET_NR_pselect6_time64
9994 case TARGET_NR_pselect6_time64:
9995 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9996 #endif
9997 #ifdef TARGET_NR_symlink
9998 case TARGET_NR_symlink:
10000 void *p2;
10001 p = lock_user_string(arg1);
10002 p2 = lock_user_string(arg2);
10003 if (!p || !p2)
10004 ret = -TARGET_EFAULT;
10005 else
10006 ret = get_errno(symlink(p, p2));
10007 unlock_user(p2, arg2, 0);
10008 unlock_user(p, arg1, 0);
10010 return ret;
10011 #endif
10012 #if defined(TARGET_NR_symlinkat)
10013 case TARGET_NR_symlinkat:
10015 void *p2;
10016 p = lock_user_string(arg1);
10017 p2 = lock_user_string(arg3);
10018 if (!p || !p2)
10019 ret = -TARGET_EFAULT;
10020 else
10021 ret = get_errno(symlinkat(p, arg2, p2));
10022 unlock_user(p2, arg3, 0);
10023 unlock_user(p, arg1, 0);
10025 return ret;
10026 #endif
10027 #ifdef TARGET_NR_readlink
10028 case TARGET_NR_readlink:
10030 void *p2;
10031 p = lock_user_string(arg1);
10032 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10033 if (!p || !p2) {
10034 ret = -TARGET_EFAULT;
10035 } else if (!arg3) {
10036 /* Short circuit this for the magic exe check. */
10037 ret = -TARGET_EINVAL;
10038 } else if (is_proc_myself((const char *)p, "exe")) {
10040 * Don't worry about sign mismatch as earlier mapping
10041 * logic would have thrown a bad address error.
10043 ret = MIN(strlen(exec_path), arg3);
10044 /* We cannot NUL terminate the string. */
10045 memcpy(p2, exec_path, ret);
10046 } else {
10047 ret = get_errno(readlink(path(p), p2, arg3));
10049 unlock_user(p2, arg2, ret);
10050 unlock_user(p, arg1, 0);
10052 return ret;
10053 #endif
10054 #if defined(TARGET_NR_readlinkat)
10055 case TARGET_NR_readlinkat:
10057 void *p2;
10058 p = lock_user_string(arg2);
10059 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10060 if (!p || !p2) {
10061 ret = -TARGET_EFAULT;
10062 } else if (!arg4) {
10063 /* Short circuit this for the magic exe check. */
10064 ret = -TARGET_EINVAL;
10065 } else if (is_proc_myself((const char *)p, "exe")) {
10067 * Don't worry about sign mismatch as earlier mapping
10068 * logic would have thrown a bad address error.
10070 ret = MIN(strlen(exec_path), arg4);
10071 /* We cannot NUL terminate the string. */
10072 memcpy(p2, exec_path, ret);
10073 } else {
10074 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10076 unlock_user(p2, arg3, ret);
10077 unlock_user(p, arg2, 0);
10079 return ret;
10080 #endif
10081 #ifdef TARGET_NR_swapon
10082 case TARGET_NR_swapon:
10083 if (!(p = lock_user_string(arg1)))
10084 return -TARGET_EFAULT;
10085 ret = get_errno(swapon(p, arg2));
10086 unlock_user(p, arg1, 0);
10087 return ret;
10088 #endif
10089 case TARGET_NR_reboot:
10090 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10091 /* arg4 must be ignored in all other cases */
10092 p = lock_user_string(arg4);
10093 if (!p) {
10094 return -TARGET_EFAULT;
10096 ret = get_errno(reboot(arg1, arg2, arg3, p));
10097 unlock_user(p, arg4, 0);
10098 } else {
10099 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10101 return ret;
10102 #ifdef TARGET_NR_mmap
10103 case TARGET_NR_mmap:
10104 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10105 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10106 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10107 || defined(TARGET_S390X)
10109 abi_ulong *v;
10110 abi_ulong v1, v2, v3, v4, v5, v6;
10111 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10112 return -TARGET_EFAULT;
10113 v1 = tswapal(v[0]);
10114 v2 = tswapal(v[1]);
10115 v3 = tswapal(v[2]);
10116 v4 = tswapal(v[3]);
10117 v5 = tswapal(v[4]);
10118 v6 = tswapal(v[5]);
10119 unlock_user(v, arg1, 0);
10120 ret = get_errno(target_mmap(v1, v2, v3,
10121 target_to_host_bitmask(v4, mmap_flags_tbl),
10122 v5, v6));
10124 #else
10125 /* mmap pointers are always untagged */
10126 ret = get_errno(target_mmap(arg1, arg2, arg3,
10127 target_to_host_bitmask(arg4, mmap_flags_tbl),
10128 arg5,
10129 arg6));
10130 #endif
10131 return ret;
10132 #endif
10133 #ifdef TARGET_NR_mmap2
10134 case TARGET_NR_mmap2:
10135 #ifndef MMAP_SHIFT
10136 #define MMAP_SHIFT 12
10137 #endif
10138 ret = target_mmap(arg1, arg2, arg3,
10139 target_to_host_bitmask(arg4, mmap_flags_tbl),
10140 arg5, arg6 << MMAP_SHIFT);
10141 return get_errno(ret);
10142 #endif
10143 case TARGET_NR_munmap:
10144 arg1 = cpu_untagged_addr(cpu, arg1);
10145 return get_errno(target_munmap(arg1, arg2));
10146 case TARGET_NR_mprotect:
10147 arg1 = cpu_untagged_addr(cpu, arg1);
10149 TaskState *ts = cpu->opaque;
10150 /* Special hack to detect libc making the stack executable. */
10151 if ((arg3 & PROT_GROWSDOWN)
10152 && arg1 >= ts->info->stack_limit
10153 && arg1 <= ts->info->start_stack) {
10154 arg3 &= ~PROT_GROWSDOWN;
10155 arg2 = arg2 + arg1 - ts->info->stack_limit;
10156 arg1 = ts->info->stack_limit;
10159 return get_errno(target_mprotect(arg1, arg2, arg3));
10160 #ifdef TARGET_NR_mremap
10161 case TARGET_NR_mremap:
10162 arg1 = cpu_untagged_addr(cpu, arg1);
10163 /* mremap new_addr (arg5) is always untagged */
10164 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10165 #endif
10166 /* ??? msync/mlock/munlock are broken for softmmu. */
10167 #ifdef TARGET_NR_msync
10168 case TARGET_NR_msync:
10169 return get_errno(msync(g2h(cpu, arg1), arg2,
10170 target_to_host_msync_arg(arg3)));
10171 #endif
10172 #ifdef TARGET_NR_mlock
10173 case TARGET_NR_mlock:
10174 return get_errno(mlock(g2h(cpu, arg1), arg2));
10175 #endif
10176 #ifdef TARGET_NR_munlock
10177 case TARGET_NR_munlock:
10178 return get_errno(munlock(g2h(cpu, arg1), arg2));
10179 #endif
10180 #ifdef TARGET_NR_mlockall
10181 case TARGET_NR_mlockall:
10182 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10183 #endif
10184 #ifdef TARGET_NR_munlockall
10185 case TARGET_NR_munlockall:
10186 return get_errno(munlockall());
10187 #endif
10188 #ifdef TARGET_NR_truncate
10189 case TARGET_NR_truncate:
10190 if (!(p = lock_user_string(arg1)))
10191 return -TARGET_EFAULT;
10192 ret = get_errno(truncate(p, arg2));
10193 unlock_user(p, arg1, 0);
10194 return ret;
10195 #endif
10196 #ifdef TARGET_NR_ftruncate
10197 case TARGET_NR_ftruncate:
10198 return get_errno(ftruncate(arg1, arg2));
10199 #endif
10200 case TARGET_NR_fchmod:
10201 return get_errno(fchmod(arg1, arg2));
10202 #if defined(TARGET_NR_fchmodat)
10203 case TARGET_NR_fchmodat:
10204 if (!(p = lock_user_string(arg2)))
10205 return -TARGET_EFAULT;
10206 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10207 unlock_user(p, arg2, 0);
10208 return ret;
10209 #endif
10210 case TARGET_NR_getpriority:
10211 /* Note that negative values are valid for getpriority, so we must
10212 differentiate based on errno settings. */
10213 errno = 0;
10214 ret = getpriority(arg1, arg2);
10215 if (ret == -1 && errno != 0) {
10216 return -host_to_target_errno(errno);
10218 #ifdef TARGET_ALPHA
10219 /* Return value is the unbiased priority. Signal no error. */
10220 cpu_env->ir[IR_V0] = 0;
10221 #else
10222 /* Return value is a biased priority to avoid negative numbers. */
10223 ret = 20 - ret;
10224 #endif
10225 return ret;
10226 case TARGET_NR_setpriority:
10227 return get_errno(setpriority(arg1, arg2, arg3));
10228 #ifdef TARGET_NR_statfs
10229 case TARGET_NR_statfs:
10230 if (!(p = lock_user_string(arg1))) {
10231 return -TARGET_EFAULT;
10233 ret = get_errno(statfs(path(p), &stfs));
10234 unlock_user(p, arg1, 0);
10235 convert_statfs:
10236 if (!is_error(ret)) {
10237 struct target_statfs *target_stfs;
10239 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10240 return -TARGET_EFAULT;
10241 __put_user(stfs.f_type, &target_stfs->f_type);
10242 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10243 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10244 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10245 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10246 __put_user(stfs.f_files, &target_stfs->f_files);
10247 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10248 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10249 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10250 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10251 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10252 #ifdef _STATFS_F_FLAGS
10253 __put_user(stfs.f_flags, &target_stfs->f_flags);
10254 #else
10255 __put_user(0, &target_stfs->f_flags);
10256 #endif
10257 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10258 unlock_user_struct(target_stfs, arg2, 1);
10260 return ret;
10261 #endif
10262 #ifdef TARGET_NR_fstatfs
10263 case TARGET_NR_fstatfs:
10264 ret = get_errno(fstatfs(arg1, &stfs));
10265 goto convert_statfs;
10266 #endif
10267 #ifdef TARGET_NR_statfs64
10268 case TARGET_NR_statfs64:
10269 if (!(p = lock_user_string(arg1))) {
10270 return -TARGET_EFAULT;
10272 ret = get_errno(statfs(path(p), &stfs));
10273 unlock_user(p, arg1, 0);
10274 convert_statfs64:
10275 if (!is_error(ret)) {
10276 struct target_statfs64 *target_stfs;
10278 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10279 return -TARGET_EFAULT;
10280 __put_user(stfs.f_type, &target_stfs->f_type);
10281 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10282 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10283 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10284 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10285 __put_user(stfs.f_files, &target_stfs->f_files);
10286 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10287 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10288 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10289 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10290 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10291 #ifdef _STATFS_F_FLAGS
10292 __put_user(stfs.f_flags, &target_stfs->f_flags);
10293 #else
10294 __put_user(0, &target_stfs->f_flags);
10295 #endif
10296 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10297 unlock_user_struct(target_stfs, arg3, 1);
10299 return ret;
10300 case TARGET_NR_fstatfs64:
10301 ret = get_errno(fstatfs(arg1, &stfs));
10302 goto convert_statfs64;
10303 #endif
10304 #ifdef TARGET_NR_socketcall
10305 case TARGET_NR_socketcall:
10306 return do_socketcall(arg1, arg2);
10307 #endif
10308 #ifdef TARGET_NR_accept
10309 case TARGET_NR_accept:
10310 return do_accept4(arg1, arg2, arg3, 0);
10311 #endif
10312 #ifdef TARGET_NR_accept4
10313 case TARGET_NR_accept4:
10314 return do_accept4(arg1, arg2, arg3, arg4);
10315 #endif
10316 #ifdef TARGET_NR_bind
10317 case TARGET_NR_bind:
10318 return do_bind(arg1, arg2, arg3);
10319 #endif
10320 #ifdef TARGET_NR_connect
10321 case TARGET_NR_connect:
10322 return do_connect(arg1, arg2, arg3);
10323 #endif
10324 #ifdef TARGET_NR_getpeername
10325 case TARGET_NR_getpeername:
10326 return do_getpeername(arg1, arg2, arg3);
10327 #endif
10328 #ifdef TARGET_NR_getsockname
10329 case TARGET_NR_getsockname:
10330 return do_getsockname(arg1, arg2, arg3);
10331 #endif
10332 #ifdef TARGET_NR_getsockopt
10333 case TARGET_NR_getsockopt:
10334 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10335 #endif
10336 #ifdef TARGET_NR_listen
10337 case TARGET_NR_listen:
10338 return get_errno(listen(arg1, arg2));
10339 #endif
10340 #ifdef TARGET_NR_recv
10341 case TARGET_NR_recv:
10342 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10343 #endif
10344 #ifdef TARGET_NR_recvfrom
10345 case TARGET_NR_recvfrom:
10346 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10347 #endif
10348 #ifdef TARGET_NR_recvmsg
10349 case TARGET_NR_recvmsg:
10350 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10351 #endif
10352 #ifdef TARGET_NR_send
10353 case TARGET_NR_send:
10354 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10355 #endif
10356 #ifdef TARGET_NR_sendmsg
10357 case TARGET_NR_sendmsg:
10358 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10359 #endif
10360 #ifdef TARGET_NR_sendmmsg
10361 case TARGET_NR_sendmmsg:
10362 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10363 #endif
10364 #ifdef TARGET_NR_recvmmsg
10365 case TARGET_NR_recvmmsg:
10366 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10367 #endif
10368 #ifdef TARGET_NR_sendto
10369 case TARGET_NR_sendto:
10370 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10371 #endif
10372 #ifdef TARGET_NR_shutdown
10373 case TARGET_NR_shutdown:
10374 return get_errno(shutdown(arg1, arg2));
10375 #endif
10376 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10377 case TARGET_NR_getrandom:
10378 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10379 if (!p) {
10380 return -TARGET_EFAULT;
10382 ret = get_errno(getrandom(p, arg2, arg3));
10383 unlock_user(p, arg1, ret);
10384 return ret;
10385 #endif
10386 #ifdef TARGET_NR_socket
10387 case TARGET_NR_socket:
10388 return do_socket(arg1, arg2, arg3);
10389 #endif
10390 #ifdef TARGET_NR_socketpair
10391 case TARGET_NR_socketpair:
10392 return do_socketpair(arg1, arg2, arg3, arg4);
10393 #endif
10394 #ifdef TARGET_NR_setsockopt
10395 case TARGET_NR_setsockopt:
10396 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10397 #endif
10398 #if defined(TARGET_NR_syslog)
10399 case TARGET_NR_syslog:
10401 int len = arg2;
10403 switch (arg1) {
10404 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10405 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10406 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10407 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10408 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10409 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10410 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10411 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10412 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10413 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10414 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10415 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10417 if (len < 0) {
10418 return -TARGET_EINVAL;
10420 if (len == 0) {
10421 return 0;
10423 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10424 if (!p) {
10425 return -TARGET_EFAULT;
10427 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10428 unlock_user(p, arg2, arg3);
10430 return ret;
10431 default:
10432 return -TARGET_EINVAL;
10435 break;
10436 #endif
10437 case TARGET_NR_setitimer:
10439 struct itimerval value, ovalue, *pvalue;
10441 if (arg2) {
10442 pvalue = &value;
10443 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10444 || copy_from_user_timeval(&pvalue->it_value,
10445 arg2 + sizeof(struct target_timeval)))
10446 return -TARGET_EFAULT;
10447 } else {
10448 pvalue = NULL;
10450 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10451 if (!is_error(ret) && arg3) {
10452 if (copy_to_user_timeval(arg3,
10453 &ovalue.it_interval)
10454 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10455 &ovalue.it_value))
10456 return -TARGET_EFAULT;
10459 return ret;
10460 case TARGET_NR_getitimer:
10462 struct itimerval value;
10464 ret = get_errno(getitimer(arg1, &value));
10465 if (!is_error(ret) && arg2) {
10466 if (copy_to_user_timeval(arg2,
10467 &value.it_interval)
10468 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10469 &value.it_value))
10470 return -TARGET_EFAULT;
10473 return ret;
10474 #ifdef TARGET_NR_stat
10475 case TARGET_NR_stat:
10476 if (!(p = lock_user_string(arg1))) {
10477 return -TARGET_EFAULT;
10479 ret = get_errno(stat(path(p), &st));
10480 unlock_user(p, arg1, 0);
10481 goto do_stat;
10482 #endif
10483 #ifdef TARGET_NR_lstat
10484 case TARGET_NR_lstat:
10485 if (!(p = lock_user_string(arg1))) {
10486 return -TARGET_EFAULT;
10488 ret = get_errno(lstat(path(p), &st));
10489 unlock_user(p, arg1, 0);
10490 goto do_stat;
10491 #endif
10492 #ifdef TARGET_NR_fstat
10493 case TARGET_NR_fstat:
10495 ret = get_errno(fstat(arg1, &st));
10496 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10497 do_stat:
10498 #endif
10499 if (!is_error(ret)) {
10500 struct target_stat *target_st;
10502 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10503 return -TARGET_EFAULT;
10504 memset(target_st, 0, sizeof(*target_st));
10505 __put_user(st.st_dev, &target_st->st_dev);
10506 __put_user(st.st_ino, &target_st->st_ino);
10507 __put_user(st.st_mode, &target_st->st_mode);
10508 __put_user(st.st_uid, &target_st->st_uid);
10509 __put_user(st.st_gid, &target_st->st_gid);
10510 __put_user(st.st_nlink, &target_st->st_nlink);
10511 __put_user(st.st_rdev, &target_st->st_rdev);
10512 __put_user(st.st_size, &target_st->st_size);
10513 __put_user(st.st_blksize, &target_st->st_blksize);
10514 __put_user(st.st_blocks, &target_st->st_blocks);
10515 __put_user(st.st_atime, &target_st->target_st_atime);
10516 __put_user(st.st_mtime, &target_st->target_st_mtime);
10517 __put_user(st.st_ctime, &target_st->target_st_ctime);
10518 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10519 __put_user(st.st_atim.tv_nsec,
10520 &target_st->target_st_atime_nsec);
10521 __put_user(st.st_mtim.tv_nsec,
10522 &target_st->target_st_mtime_nsec);
10523 __put_user(st.st_ctim.tv_nsec,
10524 &target_st->target_st_ctime_nsec);
10525 #endif
10526 unlock_user_struct(target_st, arg2, 1);
10529 return ret;
10530 #endif
10531 case TARGET_NR_vhangup:
10532 return get_errno(vhangup());
10533 #ifdef TARGET_NR_syscall
10534 case TARGET_NR_syscall:
10535 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10536 arg6, arg7, arg8, 0);
10537 #endif
10538 #if defined(TARGET_NR_wait4)
10539 case TARGET_NR_wait4:
10541 int status;
10542 abi_long status_ptr = arg2;
10543 struct rusage rusage, *rusage_ptr;
10544 abi_ulong target_rusage = arg4;
10545 abi_long rusage_err;
10546 if (target_rusage)
10547 rusage_ptr = &rusage;
10548 else
10549 rusage_ptr = NULL;
10550 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10551 if (!is_error(ret)) {
10552 if (status_ptr && ret) {
10553 status = host_to_target_waitstatus(status);
10554 if (put_user_s32(status, status_ptr))
10555 return -TARGET_EFAULT;
10557 if (target_rusage) {
10558 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10559 if (rusage_err) {
10560 ret = rusage_err;
10565 return ret;
10566 #endif
10567 #ifdef TARGET_NR_swapoff
10568 case TARGET_NR_swapoff:
10569 if (!(p = lock_user_string(arg1)))
10570 return -TARGET_EFAULT;
10571 ret = get_errno(swapoff(p));
10572 unlock_user(p, arg1, 0);
10573 return ret;
10574 #endif
10575 case TARGET_NR_sysinfo:
10577 struct target_sysinfo *target_value;
10578 struct sysinfo value;
10579 ret = get_errno(sysinfo(&value));
10580 if (!is_error(ret) && arg1)
10582 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10583 return -TARGET_EFAULT;
10584 __put_user(value.uptime, &target_value->uptime);
10585 __put_user(value.loads[0], &target_value->loads[0]);
10586 __put_user(value.loads[1], &target_value->loads[1]);
10587 __put_user(value.loads[2], &target_value->loads[2]);
10588 __put_user(value.totalram, &target_value->totalram);
10589 __put_user(value.freeram, &target_value->freeram);
10590 __put_user(value.sharedram, &target_value->sharedram);
10591 __put_user(value.bufferram, &target_value->bufferram);
10592 __put_user(value.totalswap, &target_value->totalswap);
10593 __put_user(value.freeswap, &target_value->freeswap);
10594 __put_user(value.procs, &target_value->procs);
10595 __put_user(value.totalhigh, &target_value->totalhigh);
10596 __put_user(value.freehigh, &target_value->freehigh);
10597 __put_user(value.mem_unit, &target_value->mem_unit);
10598 unlock_user_struct(target_value, arg1, 1);
10601 return ret;
10602 #ifdef TARGET_NR_ipc
10603 case TARGET_NR_ipc:
10604 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10605 #endif
10606 #ifdef TARGET_NR_semget
10607 case TARGET_NR_semget:
10608 return get_errno(semget(arg1, arg2, arg3));
10609 #endif
10610 #ifdef TARGET_NR_semop
10611 case TARGET_NR_semop:
10612 return do_semtimedop(arg1, arg2, arg3, 0, false);
10613 #endif
10614 #ifdef TARGET_NR_semtimedop
10615 case TARGET_NR_semtimedop:
10616 return do_semtimedop(arg1, arg2, arg3, arg4, false);
10617 #endif
10618 #ifdef TARGET_NR_semtimedop_time64
10619 case TARGET_NR_semtimedop_time64:
10620 return do_semtimedop(arg1, arg2, arg3, arg4, true);
10621 #endif
10622 #ifdef TARGET_NR_semctl
10623 case TARGET_NR_semctl:
10624 return do_semctl(arg1, arg2, arg3, arg4);
10625 #endif
10626 #ifdef TARGET_NR_msgctl
10627 case TARGET_NR_msgctl:
10628 return do_msgctl(arg1, arg2, arg3);
10629 #endif
10630 #ifdef TARGET_NR_msgget
10631 case TARGET_NR_msgget:
10632 return get_errno(msgget(arg1, arg2));
10633 #endif
10634 #ifdef TARGET_NR_msgrcv
10635 case TARGET_NR_msgrcv:
10636 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10637 #endif
10638 #ifdef TARGET_NR_msgsnd
10639 case TARGET_NR_msgsnd:
10640 return do_msgsnd(arg1, arg2, arg3, arg4);
10641 #endif
10642 #ifdef TARGET_NR_shmget
10643 case TARGET_NR_shmget:
10644 return get_errno(shmget(arg1, arg2, arg3));
10645 #endif
10646 #ifdef TARGET_NR_shmctl
10647 case TARGET_NR_shmctl:
10648 return do_shmctl(arg1, arg2, arg3);
10649 #endif
10650 #ifdef TARGET_NR_shmat
10651 case TARGET_NR_shmat:
10652 return do_shmat(cpu_env, arg1, arg2, arg3);
10653 #endif
10654 #ifdef TARGET_NR_shmdt
10655 case TARGET_NR_shmdt:
10656 return do_shmdt(arg1);
10657 #endif
10658 case TARGET_NR_fsync:
10659 return get_errno(fsync(arg1));
10660 case TARGET_NR_clone:
10661 /* Linux manages to have three different orderings for its
10662 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10663 * match the kernel's CONFIG_CLONE_* settings.
10664 * Microblaze is further special in that it uses a sixth
10665 * implicit argument to clone for the TLS pointer.
10667 #if defined(TARGET_MICROBLAZE)
10668 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10669 #elif defined(TARGET_CLONE_BACKWARDS)
10670 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10671 #elif defined(TARGET_CLONE_BACKWARDS2)
10672 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10673 #else
10674 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10675 #endif
10676 return ret;
10677 #ifdef __NR_exit_group
10678 /* new thread calls */
10679 case TARGET_NR_exit_group:
10680 preexit_cleanup(cpu_env, arg1);
10681 return get_errno(exit_group(arg1));
10682 #endif
10683 case TARGET_NR_setdomainname:
10684 if (!(p = lock_user_string(arg1)))
10685 return -TARGET_EFAULT;
10686 ret = get_errno(setdomainname(p, arg2));
10687 unlock_user(p, arg1, 0);
10688 return ret;
10689 case TARGET_NR_uname:
10690 /* no need to transcode because we use the linux syscall */
10692 struct new_utsname * buf;
10694 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10695 return -TARGET_EFAULT;
10696 ret = get_errno(sys_uname(buf));
10697 if (!is_error(ret)) {
10698 /* Overwrite the native machine name with whatever is being
10699 emulated. */
10700 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10701 sizeof(buf->machine));
10702 /* Allow the user to override the reported release. */
10703 if (qemu_uname_release && *qemu_uname_release) {
10704 g_strlcpy(buf->release, qemu_uname_release,
10705 sizeof(buf->release));
10708 unlock_user_struct(buf, arg1, 1);
10710 return ret;
10711 #ifdef TARGET_I386
10712 case TARGET_NR_modify_ldt:
10713 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10714 #if !defined(TARGET_X86_64)
10715 case TARGET_NR_vm86:
10716 return do_vm86(cpu_env, arg1, arg2);
10717 #endif
10718 #endif
10719 #if defined(TARGET_NR_adjtimex)
10720 case TARGET_NR_adjtimex:
10722 struct timex host_buf;
10724 if (target_to_host_timex(&host_buf, arg1) != 0) {
10725 return -TARGET_EFAULT;
10727 ret = get_errno(adjtimex(&host_buf));
10728 if (!is_error(ret)) {
10729 if (host_to_target_timex(arg1, &host_buf) != 0) {
10730 return -TARGET_EFAULT;
10734 return ret;
10735 #endif
10736 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10737 case TARGET_NR_clock_adjtime:
10739 struct timex htx, *phtx = &htx;
10741 if (target_to_host_timex(phtx, arg2) != 0) {
10742 return -TARGET_EFAULT;
10744 ret = get_errno(clock_adjtime(arg1, phtx));
10745 if (!is_error(ret) && phtx) {
10746 if (host_to_target_timex(arg2, phtx) != 0) {
10747 return -TARGET_EFAULT;
10751 return ret;
10752 #endif
10753 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10754 case TARGET_NR_clock_adjtime64:
10756 struct timex htx;
10758 if (target_to_host_timex64(&htx, arg2) != 0) {
10759 return -TARGET_EFAULT;
10761 ret = get_errno(clock_adjtime(arg1, &htx));
10762 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10763 return -TARGET_EFAULT;
10766 return ret;
10767 #endif
10768 case TARGET_NR_getpgid:
10769 return get_errno(getpgid(arg1));
10770 case TARGET_NR_fchdir:
10771 return get_errno(fchdir(arg1));
10772 case TARGET_NR_personality:
10773 return get_errno(personality(arg1));
10774 #ifdef TARGET_NR__llseek /* Not on alpha */
10775 case TARGET_NR__llseek:
10777 int64_t res;
10778 #if !defined(__NR_llseek)
10779 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10780 if (res == -1) {
10781 ret = get_errno(res);
10782 } else {
10783 ret = 0;
10785 #else
10786 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10787 #endif
10788 if ((ret == 0) && put_user_s64(res, arg4)) {
10789 return -TARGET_EFAULT;
10792 return ret;
10793 #endif
10794 #ifdef TARGET_NR_getdents
10795 case TARGET_NR_getdents:
10796 return do_getdents(arg1, arg2, arg3);
10797 #endif /* TARGET_NR_getdents */
10798 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10799 case TARGET_NR_getdents64:
10800 return do_getdents64(arg1, arg2, arg3);
10801 #endif /* TARGET_NR_getdents64 */
10802 #if defined(TARGET_NR__newselect)
10803 case TARGET_NR__newselect:
10804 return do_select(arg1, arg2, arg3, arg4, arg5);
10805 #endif
10806 #ifdef TARGET_NR_poll
10807 case TARGET_NR_poll:
10808 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10809 #endif
10810 #ifdef TARGET_NR_ppoll
10811 case TARGET_NR_ppoll:
10812 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10813 #endif
10814 #ifdef TARGET_NR_ppoll_time64
10815 case TARGET_NR_ppoll_time64:
10816 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10817 #endif
10818 case TARGET_NR_flock:
10819 /* NOTE: the flock constant seems to be the same for every
10820 Linux platform */
10821 return get_errno(safe_flock(arg1, arg2));
10822 case TARGET_NR_readv:
10824 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10825 if (vec != NULL) {
10826 ret = get_errno(safe_readv(arg1, vec, arg3));
10827 unlock_iovec(vec, arg2, arg3, 1);
10828 } else {
10829 ret = -host_to_target_errno(errno);
10832 return ret;
10833 case TARGET_NR_writev:
10835 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10836 if (vec != NULL) {
10837 ret = get_errno(safe_writev(arg1, vec, arg3));
10838 unlock_iovec(vec, arg2, arg3, 0);
10839 } else {
10840 ret = -host_to_target_errno(errno);
10843 return ret;
10844 #if defined(TARGET_NR_preadv)
10845 case TARGET_NR_preadv:
10847 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10848 if (vec != NULL) {
10849 unsigned long low, high;
10851 target_to_host_low_high(arg4, arg5, &low, &high);
10852 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10853 unlock_iovec(vec, arg2, arg3, 1);
10854 } else {
10855 ret = -host_to_target_errno(errno);
10858 return ret;
10859 #endif
10860 #if defined(TARGET_NR_pwritev)
10861 case TARGET_NR_pwritev:
10863 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10864 if (vec != NULL) {
10865 unsigned long low, high;
10867 target_to_host_low_high(arg4, arg5, &low, &high);
10868 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10869 unlock_iovec(vec, arg2, arg3, 0);
10870 } else {
10871 ret = -host_to_target_errno(errno);
10874 return ret;
10875 #endif
10876 case TARGET_NR_getsid:
10877 return get_errno(getsid(arg1));
10878 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10879 case TARGET_NR_fdatasync:
10880 return get_errno(fdatasync(arg1));
10881 #endif
10882 case TARGET_NR_sched_getaffinity:
10884 unsigned int mask_size;
10885 unsigned long *mask;
10888 * sched_getaffinity needs multiples of ulong, so need to take
10889 * care of mismatches between target ulong and host ulong sizes.
10891 if (arg2 & (sizeof(abi_ulong) - 1)) {
10892 return -TARGET_EINVAL;
10894 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10896 mask = alloca(mask_size);
10897 memset(mask, 0, mask_size);
10898 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10900 if (!is_error(ret)) {
10901 if (ret > arg2) {
10902 /* More data returned than the caller's buffer will fit.
10903 * This only happens if sizeof(abi_long) < sizeof(long)
10904 * and the caller passed us a buffer holding an odd number
10905 * of abi_longs. If the host kernel is actually using the
10906 * extra 4 bytes then fail EINVAL; otherwise we can just
10907 * ignore them and only copy the interesting part.
10909 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10910 if (numcpus > arg2 * 8) {
10911 return -TARGET_EINVAL;
10913 ret = arg2;
10916 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10917 return -TARGET_EFAULT;
10921 return ret;
10922 case TARGET_NR_sched_setaffinity:
10924 unsigned int mask_size;
10925 unsigned long *mask;
10928 * sched_setaffinity needs multiples of ulong, so need to take
10929 * care of mismatches between target ulong and host ulong sizes.
10931 if (arg2 & (sizeof(abi_ulong) - 1)) {
10932 return -TARGET_EINVAL;
10934 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10935 mask = alloca(mask_size);
10937 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10938 if (ret) {
10939 return ret;
10942 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10944 case TARGET_NR_getcpu:
10946 unsigned cpu, node;
10947 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10948 arg2 ? &node : NULL,
10949 NULL));
10950 if (is_error(ret)) {
10951 return ret;
10953 if (arg1 && put_user_u32(cpu, arg1)) {
10954 return -TARGET_EFAULT;
10956 if (arg2 && put_user_u32(node, arg2)) {
10957 return -TARGET_EFAULT;
10960 return ret;
10961 case TARGET_NR_sched_setparam:
10963 struct target_sched_param *target_schp;
10964 struct sched_param schp;
10966 if (arg2 == 0) {
10967 return -TARGET_EINVAL;
10969 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10970 return -TARGET_EFAULT;
10972 schp.sched_priority = tswap32(target_schp->sched_priority);
10973 unlock_user_struct(target_schp, arg2, 0);
10974 return get_errno(sys_sched_setparam(arg1, &schp));
10976 case TARGET_NR_sched_getparam:
10978 struct target_sched_param *target_schp;
10979 struct sched_param schp;
10981 if (arg2 == 0) {
10982 return -TARGET_EINVAL;
10984 ret = get_errno(sys_sched_getparam(arg1, &schp));
10985 if (!is_error(ret)) {
10986 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10987 return -TARGET_EFAULT;
10989 target_schp->sched_priority = tswap32(schp.sched_priority);
10990 unlock_user_struct(target_schp, arg2, 1);
10993 return ret;
10994 case TARGET_NR_sched_setscheduler:
10996 struct target_sched_param *target_schp;
10997 struct sched_param schp;
10998 if (arg3 == 0) {
10999 return -TARGET_EINVAL;
11001 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11002 return -TARGET_EFAULT;
11004 schp.sched_priority = tswap32(target_schp->sched_priority);
11005 unlock_user_struct(target_schp, arg3, 0);
11006 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11008 case TARGET_NR_sched_getscheduler:
11009 return get_errno(sys_sched_getscheduler(arg1));
11010 case TARGET_NR_sched_getattr:
11012 struct target_sched_attr *target_scha;
11013 struct sched_attr scha;
11014 if (arg2 == 0) {
11015 return -TARGET_EINVAL;
11017 if (arg3 > sizeof(scha)) {
11018 arg3 = sizeof(scha);
11020 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11021 if (!is_error(ret)) {
11022 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11023 if (!target_scha) {
11024 return -TARGET_EFAULT;
11026 target_scha->size = tswap32(scha.size);
11027 target_scha->sched_policy = tswap32(scha.sched_policy);
11028 target_scha->sched_flags = tswap64(scha.sched_flags);
11029 target_scha->sched_nice = tswap32(scha.sched_nice);
11030 target_scha->sched_priority = tswap32(scha.sched_priority);
11031 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11032 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11033 target_scha->sched_period = tswap64(scha.sched_period);
11034 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11035 target_scha->sched_util_min = tswap32(scha.sched_util_min);
11036 target_scha->sched_util_max = tswap32(scha.sched_util_max);
11038 unlock_user(target_scha, arg2, arg3);
11040 return ret;
11042 case TARGET_NR_sched_setattr:
11044 struct target_sched_attr *target_scha;
11045 struct sched_attr scha;
11046 uint32_t size;
11047 int zeroed;
11048 if (arg2 == 0) {
11049 return -TARGET_EINVAL;
11051 if (get_user_u32(size, arg2)) {
11052 return -TARGET_EFAULT;
11054 if (!size) {
11055 size = offsetof(struct target_sched_attr, sched_util_min);
11057 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11058 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11059 return -TARGET_EFAULT;
11061 return -TARGET_E2BIG;
11064 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11065 if (zeroed < 0) {
11066 return zeroed;
11067 } else if (zeroed == 0) {
11068 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11069 return -TARGET_EFAULT;
11071 return -TARGET_E2BIG;
11073 if (size > sizeof(struct target_sched_attr)) {
11074 size = sizeof(struct target_sched_attr);
11077 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11078 if (!target_scha) {
11079 return -TARGET_EFAULT;
11081 scha.size = size;
11082 scha.sched_policy = tswap32(target_scha->sched_policy);
11083 scha.sched_flags = tswap64(target_scha->sched_flags);
11084 scha.sched_nice = tswap32(target_scha->sched_nice);
11085 scha.sched_priority = tswap32(target_scha->sched_priority);
11086 scha.sched_runtime = tswap64(target_scha->sched_runtime);
11087 scha.sched_deadline = tswap64(target_scha->sched_deadline);
11088 scha.sched_period = tswap64(target_scha->sched_period);
11089 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11090 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11091 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11093 unlock_user(target_scha, arg2, 0);
11094 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11096 case TARGET_NR_sched_yield:
11097 return get_errno(sched_yield());
11098 case TARGET_NR_sched_get_priority_max:
11099 return get_errno(sched_get_priority_max(arg1));
11100 case TARGET_NR_sched_get_priority_min:
11101 return get_errno(sched_get_priority_min(arg1));
11102 #ifdef TARGET_NR_sched_rr_get_interval
11103 case TARGET_NR_sched_rr_get_interval:
11105 struct timespec ts;
11106 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11107 if (!is_error(ret)) {
11108 ret = host_to_target_timespec(arg2, &ts);
11111 return ret;
11112 #endif
11113 #ifdef TARGET_NR_sched_rr_get_interval_time64
11114 case TARGET_NR_sched_rr_get_interval_time64:
11116 struct timespec ts;
11117 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11118 if (!is_error(ret)) {
11119 ret = host_to_target_timespec64(arg2, &ts);
11122 return ret;
11123 #endif
11124 #if defined(TARGET_NR_nanosleep)
11125 case TARGET_NR_nanosleep:
11127 struct timespec req, rem;
11128 target_to_host_timespec(&req, arg1);
11129 ret = get_errno(safe_nanosleep(&req, &rem));
11130 if (is_error(ret) && arg2) {
11131 host_to_target_timespec(arg2, &rem);
11134 return ret;
11135 #endif
11136 case TARGET_NR_prctl:
11137 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11138 break;
11139 #ifdef TARGET_NR_arch_prctl
11140 case TARGET_NR_arch_prctl:
11141 return do_arch_prctl(cpu_env, arg1, arg2);
11142 #endif
11143 #ifdef TARGET_NR_pread64
11144 case TARGET_NR_pread64:
11145 if (regpairs_aligned(cpu_env, num)) {
11146 arg4 = arg5;
11147 arg5 = arg6;
11149 if (arg2 == 0 && arg3 == 0) {
11150 /* Special-case NULL buffer and zero length, which should succeed */
11151 p = 0;
11152 } else {
11153 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11154 if (!p) {
11155 return -TARGET_EFAULT;
11158 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11159 unlock_user(p, arg2, ret);
11160 return ret;
11161 case TARGET_NR_pwrite64:
11162 if (regpairs_aligned(cpu_env, num)) {
11163 arg4 = arg5;
11164 arg5 = arg6;
11166 if (arg2 == 0 && arg3 == 0) {
11167 /* Special-case NULL buffer and zero length, which should succeed */
11168 p = 0;
11169 } else {
11170 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11171 if (!p) {
11172 return -TARGET_EFAULT;
11175 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11176 unlock_user(p, arg2, 0);
11177 return ret;
11178 #endif
11179 case TARGET_NR_getcwd:
11180 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11181 return -TARGET_EFAULT;
11182 ret = get_errno(sys_getcwd1(p, arg2));
11183 unlock_user(p, arg1, ret);
11184 return ret;
11185 case TARGET_NR_capget:
11186 case TARGET_NR_capset:
11188 struct target_user_cap_header *target_header;
11189 struct target_user_cap_data *target_data = NULL;
11190 struct __user_cap_header_struct header;
11191 struct __user_cap_data_struct data[2];
11192 struct __user_cap_data_struct *dataptr = NULL;
11193 int i, target_datalen;
11194 int data_items = 1;
11196 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11197 return -TARGET_EFAULT;
11199 header.version = tswap32(target_header->version);
11200 header.pid = tswap32(target_header->pid);
11202 if (header.version != _LINUX_CAPABILITY_VERSION) {
11203 /* Version 2 and up takes pointer to two user_data structs */
11204 data_items = 2;
11207 target_datalen = sizeof(*target_data) * data_items;
11209 if (arg2) {
11210 if (num == TARGET_NR_capget) {
11211 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11212 } else {
11213 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11215 if (!target_data) {
11216 unlock_user_struct(target_header, arg1, 0);
11217 return -TARGET_EFAULT;
11220 if (num == TARGET_NR_capset) {
11221 for (i = 0; i < data_items; i++) {
11222 data[i].effective = tswap32(target_data[i].effective);
11223 data[i].permitted = tswap32(target_data[i].permitted);
11224 data[i].inheritable = tswap32(target_data[i].inheritable);
11228 dataptr = data;
11231 if (num == TARGET_NR_capget) {
11232 ret = get_errno(capget(&header, dataptr));
11233 } else {
11234 ret = get_errno(capset(&header, dataptr));
11237 /* The kernel always updates version for both capget and capset */
11238 target_header->version = tswap32(header.version);
11239 unlock_user_struct(target_header, arg1, 1);
11241 if (arg2) {
11242 if (num == TARGET_NR_capget) {
11243 for (i = 0; i < data_items; i++) {
11244 target_data[i].effective = tswap32(data[i].effective);
11245 target_data[i].permitted = tswap32(data[i].permitted);
11246 target_data[i].inheritable = tswap32(data[i].inheritable);
11248 unlock_user(target_data, arg2, target_datalen);
11249 } else {
11250 unlock_user(target_data, arg2, 0);
11253 return ret;
11255 case TARGET_NR_sigaltstack:
11256 return do_sigaltstack(arg1, arg2, cpu_env);
11258 #ifdef CONFIG_SENDFILE
11259 #ifdef TARGET_NR_sendfile
11260 case TARGET_NR_sendfile:
11262 off_t *offp = NULL;
11263 off_t off;
11264 if (arg3) {
11265 ret = get_user_sal(off, arg3);
11266 if (is_error(ret)) {
11267 return ret;
11269 offp = &off;
11271 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11272 if (!is_error(ret) && arg3) {
11273 abi_long ret2 = put_user_sal(off, arg3);
11274 if (is_error(ret2)) {
11275 ret = ret2;
11278 return ret;
11280 #endif
11281 #ifdef TARGET_NR_sendfile64
11282 case TARGET_NR_sendfile64:
11284 off_t *offp = NULL;
11285 off_t off;
11286 if (arg3) {
11287 ret = get_user_s64(off, arg3);
11288 if (is_error(ret)) {
11289 return ret;
11291 offp = &off;
11293 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11294 if (!is_error(ret) && arg3) {
11295 abi_long ret2 = put_user_s64(off, arg3);
11296 if (is_error(ret2)) {
11297 ret = ret2;
11300 return ret;
11302 #endif
11303 #endif
11304 #ifdef TARGET_NR_vfork
11305 case TARGET_NR_vfork:
11306 return get_errno(do_fork(cpu_env,
11307 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11308 0, 0, 0, 0));
11309 #endif
11310 #ifdef TARGET_NR_ugetrlimit
11311 case TARGET_NR_ugetrlimit:
11313 struct rlimit rlim;
11314 int resource = target_to_host_resource(arg1);
11315 ret = get_errno(getrlimit(resource, &rlim));
11316 if (!is_error(ret)) {
11317 struct target_rlimit *target_rlim;
11318 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11319 return -TARGET_EFAULT;
11320 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11321 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11322 unlock_user_struct(target_rlim, arg2, 1);
11324 return ret;
11326 #endif
11327 #ifdef TARGET_NR_truncate64
11328 case TARGET_NR_truncate64:
11329 if (!(p = lock_user_string(arg1)))
11330 return -TARGET_EFAULT;
11331 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11332 unlock_user(p, arg1, 0);
11333 return ret;
11334 #endif
11335 #ifdef TARGET_NR_ftruncate64
11336 case TARGET_NR_ftruncate64:
11337 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11338 #endif
11339 #ifdef TARGET_NR_stat64
11340 case TARGET_NR_stat64:
11341 if (!(p = lock_user_string(arg1))) {
11342 return -TARGET_EFAULT;
11344 ret = get_errno(stat(path(p), &st));
11345 unlock_user(p, arg1, 0);
11346 if (!is_error(ret))
11347 ret = host_to_target_stat64(cpu_env, arg2, &st);
11348 return ret;
11349 #endif
11350 #ifdef TARGET_NR_lstat64
11351 case TARGET_NR_lstat64:
11352 if (!(p = lock_user_string(arg1))) {
11353 return -TARGET_EFAULT;
11355 ret = get_errno(lstat(path(p), &st));
11356 unlock_user(p, arg1, 0);
11357 if (!is_error(ret))
11358 ret = host_to_target_stat64(cpu_env, arg2, &st);
11359 return ret;
11360 #endif
11361 #ifdef TARGET_NR_fstat64
11362 case TARGET_NR_fstat64:
11363 ret = get_errno(fstat(arg1, &st));
11364 if (!is_error(ret))
11365 ret = host_to_target_stat64(cpu_env, arg2, &st);
11366 return ret;
11367 #endif
11368 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11369 #ifdef TARGET_NR_fstatat64
11370 case TARGET_NR_fstatat64:
11371 #endif
11372 #ifdef TARGET_NR_newfstatat
11373 case TARGET_NR_newfstatat:
11374 #endif
11375 if (!(p = lock_user_string(arg2))) {
11376 return -TARGET_EFAULT;
11378 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11379 unlock_user(p, arg2, 0);
11380 if (!is_error(ret))
11381 ret = host_to_target_stat64(cpu_env, arg3, &st);
11382 return ret;
11383 #endif
11384 #if defined(TARGET_NR_statx)
11385 case TARGET_NR_statx:
11387 struct target_statx *target_stx;
11388 int dirfd = arg1;
11389 int flags = arg3;
11391 p = lock_user_string(arg2);
11392 if (p == NULL) {
11393 return -TARGET_EFAULT;
11395 #if defined(__NR_statx)
11398 * It is assumed that struct statx is architecture independent.
11400 struct target_statx host_stx;
11401 int mask = arg4;
11403 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11404 if (!is_error(ret)) {
11405 if (host_to_target_statx(&host_stx, arg5) != 0) {
11406 unlock_user(p, arg2, 0);
11407 return -TARGET_EFAULT;
11411 if (ret != -TARGET_ENOSYS) {
11412 unlock_user(p, arg2, 0);
11413 return ret;
11416 #endif
11417 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11418 unlock_user(p, arg2, 0);
11420 if (!is_error(ret)) {
11421 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11422 return -TARGET_EFAULT;
11424 memset(target_stx, 0, sizeof(*target_stx));
11425 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11426 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11427 __put_user(st.st_ino, &target_stx->stx_ino);
11428 __put_user(st.st_mode, &target_stx->stx_mode);
11429 __put_user(st.st_uid, &target_stx->stx_uid);
11430 __put_user(st.st_gid, &target_stx->stx_gid);
11431 __put_user(st.st_nlink, &target_stx->stx_nlink);
11432 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11433 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11434 __put_user(st.st_size, &target_stx->stx_size);
11435 __put_user(st.st_blksize, &target_stx->stx_blksize);
11436 __put_user(st.st_blocks, &target_stx->stx_blocks);
11437 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11438 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11439 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11440 unlock_user_struct(target_stx, arg5, 1);
11443 return ret;
11444 #endif
11445 #ifdef TARGET_NR_lchown
11446 case TARGET_NR_lchown:
11447 if (!(p = lock_user_string(arg1)))
11448 return -TARGET_EFAULT;
11449 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11450 unlock_user(p, arg1, 0);
11451 return ret;
11452 #endif
11453 #ifdef TARGET_NR_getuid
11454 case TARGET_NR_getuid:
11455 return get_errno(high2lowuid(getuid()));
11456 #endif
11457 #ifdef TARGET_NR_getgid
11458 case TARGET_NR_getgid:
11459 return get_errno(high2lowgid(getgid()));
11460 #endif
11461 #ifdef TARGET_NR_geteuid
11462 case TARGET_NR_geteuid:
11463 return get_errno(high2lowuid(geteuid()));
11464 #endif
11465 #ifdef TARGET_NR_getegid
11466 case TARGET_NR_getegid:
11467 return get_errno(high2lowgid(getegid()));
11468 #endif
11469 case TARGET_NR_setreuid:
11470 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11471 case TARGET_NR_setregid:
11472 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11473 case TARGET_NR_getgroups:
11475 int gidsetsize = arg1;
11476 target_id *target_grouplist;
11477 gid_t *grouplist;
11478 int i;
11480 grouplist = alloca(gidsetsize * sizeof(gid_t));
11481 ret = get_errno(getgroups(gidsetsize, grouplist));
11482 if (gidsetsize == 0)
11483 return ret;
11484 if (!is_error(ret)) {
11485 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11486 if (!target_grouplist)
11487 return -TARGET_EFAULT;
11488 for(i = 0;i < ret; i++)
11489 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11490 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11493 return ret;
11494 case TARGET_NR_setgroups:
11496 int gidsetsize = arg1;
11497 target_id *target_grouplist;
11498 gid_t *grouplist = NULL;
11499 int i;
11500 if (gidsetsize) {
11501 grouplist = alloca(gidsetsize * sizeof(gid_t));
11502 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11503 if (!target_grouplist) {
11504 return -TARGET_EFAULT;
11506 for (i = 0; i < gidsetsize; i++) {
11507 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11509 unlock_user(target_grouplist, arg2, 0);
11511 return get_errno(setgroups(gidsetsize, grouplist));
11513 case TARGET_NR_fchown:
11514 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11515 #if defined(TARGET_NR_fchownat)
11516 case TARGET_NR_fchownat:
11517 if (!(p = lock_user_string(arg2)))
11518 return -TARGET_EFAULT;
11519 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11520 low2highgid(arg4), arg5));
11521 unlock_user(p, arg2, 0);
11522 return ret;
11523 #endif
11524 #ifdef TARGET_NR_setresuid
11525 case TARGET_NR_setresuid:
11526 return get_errno(sys_setresuid(low2highuid(arg1),
11527 low2highuid(arg2),
11528 low2highuid(arg3)));
11529 #endif
11530 #ifdef TARGET_NR_getresuid
11531 case TARGET_NR_getresuid:
11533 uid_t ruid, euid, suid;
11534 ret = get_errno(getresuid(&ruid, &euid, &suid));
11535 if (!is_error(ret)) {
11536 if (put_user_id(high2lowuid(ruid), arg1)
11537 || put_user_id(high2lowuid(euid), arg2)
11538 || put_user_id(high2lowuid(suid), arg3))
11539 return -TARGET_EFAULT;
11542 return ret;
11543 #endif
11544 #ifdef TARGET_NR_getresgid
11545 case TARGET_NR_setresgid:
11546 return get_errno(sys_setresgid(low2highgid(arg1),
11547 low2highgid(arg2),
11548 low2highgid(arg3)));
11549 #endif
11550 #ifdef TARGET_NR_getresgid
11551 case TARGET_NR_getresgid:
11553 gid_t rgid, egid, sgid;
11554 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11555 if (!is_error(ret)) {
11556 if (put_user_id(high2lowgid(rgid), arg1)
11557 || put_user_id(high2lowgid(egid), arg2)
11558 || put_user_id(high2lowgid(sgid), arg3))
11559 return -TARGET_EFAULT;
11562 return ret;
11563 #endif
11564 #ifdef TARGET_NR_chown
11565 case TARGET_NR_chown:
11566 if (!(p = lock_user_string(arg1)))
11567 return -TARGET_EFAULT;
11568 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11569 unlock_user(p, arg1, 0);
11570 return ret;
11571 #endif
11572 case TARGET_NR_setuid:
11573 return get_errno(sys_setuid(low2highuid(arg1)));
11574 case TARGET_NR_setgid:
11575 return get_errno(sys_setgid(low2highgid(arg1)));
11576 case TARGET_NR_setfsuid:
11577 return get_errno(setfsuid(arg1));
11578 case TARGET_NR_setfsgid:
11579 return get_errno(setfsgid(arg1));
11581 #ifdef TARGET_NR_lchown32
11582 case TARGET_NR_lchown32:
11583 if (!(p = lock_user_string(arg1)))
11584 return -TARGET_EFAULT;
11585 ret = get_errno(lchown(p, arg2, arg3));
11586 unlock_user(p, arg1, 0);
11587 return ret;
11588 #endif
11589 #ifdef TARGET_NR_getuid32
11590 case TARGET_NR_getuid32:
11591 return get_errno(getuid());
11592 #endif
11594 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11595 /* Alpha specific */
11596 case TARGET_NR_getxuid:
11598 uid_t euid;
11599 euid=geteuid();
11600 cpu_env->ir[IR_A4]=euid;
11602 return get_errno(getuid());
11603 #endif
11604 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11605 /* Alpha specific */
11606 case TARGET_NR_getxgid:
11608 uid_t egid;
11609 egid=getegid();
11610 cpu_env->ir[IR_A4]=egid;
11612 return get_errno(getgid());
11613 #endif
11614 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11615 /* Alpha specific */
11616 case TARGET_NR_osf_getsysinfo:
11617 ret = -TARGET_EOPNOTSUPP;
11618 switch (arg1) {
11619 case TARGET_GSI_IEEE_FP_CONTROL:
11621 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11622 uint64_t swcr = cpu_env->swcr;
11624 swcr &= ~SWCR_STATUS_MASK;
11625 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11627 if (put_user_u64 (swcr, arg2))
11628 return -TARGET_EFAULT;
11629 ret = 0;
11631 break;
11633 /* case GSI_IEEE_STATE_AT_SIGNAL:
11634 -- Not implemented in linux kernel.
11635 case GSI_UACPROC:
11636 -- Retrieves current unaligned access state; not much used.
11637 case GSI_PROC_TYPE:
11638 -- Retrieves implver information; surely not used.
11639 case GSI_GET_HWRPB:
11640 -- Grabs a copy of the HWRPB; surely not used.
11643 return ret;
11644 #endif
11645 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11646 /* Alpha specific */
11647 case TARGET_NR_osf_setsysinfo:
11648 ret = -TARGET_EOPNOTSUPP;
11649 switch (arg1) {
11650 case TARGET_SSI_IEEE_FP_CONTROL:
11652 uint64_t swcr, fpcr;
11654 if (get_user_u64 (swcr, arg2)) {
11655 return -TARGET_EFAULT;
11659 * The kernel calls swcr_update_status to update the
11660 * status bits from the fpcr at every point that it
11661 * could be queried. Therefore, we store the status
11662 * bits only in FPCR.
11664 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11666 fpcr = cpu_alpha_load_fpcr(cpu_env);
11667 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11668 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11669 cpu_alpha_store_fpcr(cpu_env, fpcr);
11670 ret = 0;
11672 break;
11674 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11676 uint64_t exc, fpcr, fex;
11678 if (get_user_u64(exc, arg2)) {
11679 return -TARGET_EFAULT;
11681 exc &= SWCR_STATUS_MASK;
11682 fpcr = cpu_alpha_load_fpcr(cpu_env);
11684 /* Old exceptions are not signaled. */
11685 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11686 fex = exc & ~fex;
11687 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11688 fex &= (cpu_env)->swcr;
11690 /* Update the hardware fpcr. */
11691 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11692 cpu_alpha_store_fpcr(cpu_env, fpcr);
11694 if (fex) {
11695 int si_code = TARGET_FPE_FLTUNK;
11696 target_siginfo_t info;
11698 if (fex & SWCR_TRAP_ENABLE_DNO) {
11699 si_code = TARGET_FPE_FLTUND;
11701 if (fex & SWCR_TRAP_ENABLE_INE) {
11702 si_code = TARGET_FPE_FLTRES;
11704 if (fex & SWCR_TRAP_ENABLE_UNF) {
11705 si_code = TARGET_FPE_FLTUND;
11707 if (fex & SWCR_TRAP_ENABLE_OVF) {
11708 si_code = TARGET_FPE_FLTOVF;
11710 if (fex & SWCR_TRAP_ENABLE_DZE) {
11711 si_code = TARGET_FPE_FLTDIV;
11713 if (fex & SWCR_TRAP_ENABLE_INV) {
11714 si_code = TARGET_FPE_FLTINV;
11717 info.si_signo = SIGFPE;
11718 info.si_errno = 0;
11719 info.si_code = si_code;
11720 info._sifields._sigfault._addr = (cpu_env)->pc;
11721 queue_signal(cpu_env, info.si_signo,
11722 QEMU_SI_FAULT, &info);
11724 ret = 0;
11726 break;
11728 /* case SSI_NVPAIRS:
11729 -- Used with SSIN_UACPROC to enable unaligned accesses.
11730 case SSI_IEEE_STATE_AT_SIGNAL:
11731 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11732 -- Not implemented in linux kernel
11735 return ret;
11736 #endif
11737 #ifdef TARGET_NR_osf_sigprocmask
11738 /* Alpha specific. */
11739 case TARGET_NR_osf_sigprocmask:
11741 abi_ulong mask;
11742 int how;
11743 sigset_t set, oldset;
11745 switch(arg1) {
11746 case TARGET_SIG_BLOCK:
11747 how = SIG_BLOCK;
11748 break;
11749 case TARGET_SIG_UNBLOCK:
11750 how = SIG_UNBLOCK;
11751 break;
11752 case TARGET_SIG_SETMASK:
11753 how = SIG_SETMASK;
11754 break;
11755 default:
11756 return -TARGET_EINVAL;
11758 mask = arg2;
11759 target_to_host_old_sigset(&set, &mask);
11760 ret = do_sigprocmask(how, &set, &oldset);
11761 if (!ret) {
11762 host_to_target_old_sigset(&mask, &oldset);
11763 ret = mask;
11766 return ret;
11767 #endif
11769 #ifdef TARGET_NR_getgid32
11770 case TARGET_NR_getgid32:
11771 return get_errno(getgid());
11772 #endif
11773 #ifdef TARGET_NR_geteuid32
11774 case TARGET_NR_geteuid32:
11775 return get_errno(geteuid());
11776 #endif
11777 #ifdef TARGET_NR_getegid32
11778 case TARGET_NR_getegid32:
11779 return get_errno(getegid());
11780 #endif
11781 #ifdef TARGET_NR_setreuid32
11782 case TARGET_NR_setreuid32:
11783 return get_errno(setreuid(arg1, arg2));
11784 #endif
11785 #ifdef TARGET_NR_setregid32
11786 case TARGET_NR_setregid32:
11787 return get_errno(setregid(arg1, arg2));
11788 #endif
11789 #ifdef TARGET_NR_getgroups32
11790 case TARGET_NR_getgroups32:
11792 int gidsetsize = arg1;
11793 uint32_t *target_grouplist;
11794 gid_t *grouplist;
11795 int i;
11797 grouplist = alloca(gidsetsize * sizeof(gid_t));
11798 ret = get_errno(getgroups(gidsetsize, grouplist));
11799 if (gidsetsize == 0)
11800 return ret;
11801 if (!is_error(ret)) {
11802 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11803 if (!target_grouplist) {
11804 return -TARGET_EFAULT;
11806 for(i = 0;i < ret; i++)
11807 target_grouplist[i] = tswap32(grouplist[i]);
11808 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11811 return ret;
11812 #endif
11813 #ifdef TARGET_NR_setgroups32
11814 case TARGET_NR_setgroups32:
11816 int gidsetsize = arg1;
11817 uint32_t *target_grouplist;
11818 gid_t *grouplist;
11819 int i;
11821 grouplist = alloca(gidsetsize * sizeof(gid_t));
11822 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11823 if (!target_grouplist) {
11824 return -TARGET_EFAULT;
11826 for(i = 0;i < gidsetsize; i++)
11827 grouplist[i] = tswap32(target_grouplist[i]);
11828 unlock_user(target_grouplist, arg2, 0);
11829 return get_errno(setgroups(gidsetsize, grouplist));
11831 #endif
11832 #ifdef TARGET_NR_fchown32
11833 case TARGET_NR_fchown32:
11834 return get_errno(fchown(arg1, arg2, arg3));
11835 #endif
11836 #ifdef TARGET_NR_setresuid32
11837 case TARGET_NR_setresuid32:
11838 return get_errno(sys_setresuid(arg1, arg2, arg3));
11839 #endif
11840 #ifdef TARGET_NR_getresuid32
11841 case TARGET_NR_getresuid32:
11843 uid_t ruid, euid, suid;
11844 ret = get_errno(getresuid(&ruid, &euid, &suid));
11845 if (!is_error(ret)) {
11846 if (put_user_u32(ruid, arg1)
11847 || put_user_u32(euid, arg2)
11848 || put_user_u32(suid, arg3))
11849 return -TARGET_EFAULT;
11852 return ret;
11853 #endif
11854 #ifdef TARGET_NR_setresgid32
11855 case TARGET_NR_setresgid32:
11856 return get_errno(sys_setresgid(arg1, arg2, arg3));
11857 #endif
11858 #ifdef TARGET_NR_getresgid32
11859 case TARGET_NR_getresgid32:
11861 gid_t rgid, egid, sgid;
11862 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11863 if (!is_error(ret)) {
11864 if (put_user_u32(rgid, arg1)
11865 || put_user_u32(egid, arg2)
11866 || put_user_u32(sgid, arg3))
11867 return -TARGET_EFAULT;
11870 return ret;
11871 #endif
11872 #ifdef TARGET_NR_chown32
11873 case TARGET_NR_chown32:
11874 if (!(p = lock_user_string(arg1)))
11875 return -TARGET_EFAULT;
11876 ret = get_errno(chown(p, arg2, arg3));
11877 unlock_user(p, arg1, 0);
11878 return ret;
11879 #endif
11880 #ifdef TARGET_NR_setuid32
11881 case TARGET_NR_setuid32:
11882 return get_errno(sys_setuid(arg1));
11883 #endif
11884 #ifdef TARGET_NR_setgid32
11885 case TARGET_NR_setgid32:
11886 return get_errno(sys_setgid(arg1));
11887 #endif
11888 #ifdef TARGET_NR_setfsuid32
11889 case TARGET_NR_setfsuid32:
11890 return get_errno(setfsuid(arg1));
11891 #endif
11892 #ifdef TARGET_NR_setfsgid32
11893 case TARGET_NR_setfsgid32:
11894 return get_errno(setfsgid(arg1));
11895 #endif
11896 #ifdef TARGET_NR_mincore
11897 case TARGET_NR_mincore:
11899 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11900 if (!a) {
11901 return -TARGET_ENOMEM;
11903 p = lock_user_string(arg3);
11904 if (!p) {
11905 ret = -TARGET_EFAULT;
11906 } else {
11907 ret = get_errno(mincore(a, arg2, p));
11908 unlock_user(p, arg3, ret);
11910 unlock_user(a, arg1, 0);
11912 return ret;
11913 #endif
11914 #ifdef TARGET_NR_arm_fadvise64_64
11915 case TARGET_NR_arm_fadvise64_64:
11916 /* arm_fadvise64_64 looks like fadvise64_64 but
11917 * with different argument order: fd, advice, offset, len
11918 * rather than the usual fd, offset, len, advice.
11919 * Note that offset and len are both 64-bit so appear as
11920 * pairs of 32-bit registers.
11922 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11923 target_offset64(arg5, arg6), arg2);
11924 return -host_to_target_errno(ret);
11925 #endif
11927 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11929 #ifdef TARGET_NR_fadvise64_64
11930 case TARGET_NR_fadvise64_64:
11931 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11932 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11933 ret = arg2;
11934 arg2 = arg3;
11935 arg3 = arg4;
11936 arg4 = arg5;
11937 arg5 = arg6;
11938 arg6 = ret;
11939 #else
11940 /* 6 args: fd, offset (high, low), len (high, low), advice */
11941 if (regpairs_aligned(cpu_env, num)) {
11942 /* offset is in (3,4), len in (5,6) and advice in 7 */
11943 arg2 = arg3;
11944 arg3 = arg4;
11945 arg4 = arg5;
11946 arg5 = arg6;
11947 arg6 = arg7;
11949 #endif
11950 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11951 target_offset64(arg4, arg5), arg6);
11952 return -host_to_target_errno(ret);
11953 #endif
11955 #ifdef TARGET_NR_fadvise64
11956 case TARGET_NR_fadvise64:
11957 /* 5 args: fd, offset (high, low), len, advice */
11958 if (regpairs_aligned(cpu_env, num)) {
11959 /* offset is in (3,4), len in 5 and advice in 6 */
11960 arg2 = arg3;
11961 arg3 = arg4;
11962 arg4 = arg5;
11963 arg5 = arg6;
11965 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11966 return -host_to_target_errno(ret);
11967 #endif
11969 #else /* not a 32-bit ABI */
11970 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11971 #ifdef TARGET_NR_fadvise64_64
11972 case TARGET_NR_fadvise64_64:
11973 #endif
11974 #ifdef TARGET_NR_fadvise64
11975 case TARGET_NR_fadvise64:
11976 #endif
11977 #ifdef TARGET_S390X
11978 switch (arg4) {
11979 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11980 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11981 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11982 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11983 default: break;
11985 #endif
11986 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11987 #endif
11988 #endif /* end of 64-bit ABI fadvise handling */
11990 #ifdef TARGET_NR_madvise
11991 case TARGET_NR_madvise:
11992 return target_madvise(arg1, arg2, arg3);
11993 #endif
11994 #ifdef TARGET_NR_fcntl64
11995 case TARGET_NR_fcntl64:
11997 int cmd;
11998 struct flock64 fl;
11999 from_flock64_fn *copyfrom = copy_from_user_flock64;
12000 to_flock64_fn *copyto = copy_to_user_flock64;
12002 #ifdef TARGET_ARM
12003 if (!cpu_env->eabi) {
12004 copyfrom = copy_from_user_oabi_flock64;
12005 copyto = copy_to_user_oabi_flock64;
12007 #endif
12009 cmd = target_to_host_fcntl_cmd(arg2);
12010 if (cmd == -TARGET_EINVAL) {
12011 return cmd;
12014 switch(arg2) {
12015 case TARGET_F_GETLK64:
12016 ret = copyfrom(&fl, arg3);
12017 if (ret) {
12018 break;
12020 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12021 if (ret == 0) {
12022 ret = copyto(arg3, &fl);
12024 break;
12026 case TARGET_F_SETLK64:
12027 case TARGET_F_SETLKW64:
12028 ret = copyfrom(&fl, arg3);
12029 if (ret) {
12030 break;
12032 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12033 break;
12034 default:
12035 ret = do_fcntl(arg1, arg2, arg3);
12036 break;
12038 return ret;
12040 #endif
12041 #ifdef TARGET_NR_cacheflush
12042 case TARGET_NR_cacheflush:
12043 /* self-modifying code is handled automatically, so nothing needed */
12044 return 0;
12045 #endif
12046 #ifdef TARGET_NR_getpagesize
12047 case TARGET_NR_getpagesize:
12048 return TARGET_PAGE_SIZE;
12049 #endif
12050 case TARGET_NR_gettid:
12051 return get_errno(sys_gettid());
12052 #ifdef TARGET_NR_readahead
12053 case TARGET_NR_readahead:
12054 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12055 if (regpairs_aligned(cpu_env, num)) {
12056 arg2 = arg3;
12057 arg3 = arg4;
12058 arg4 = arg5;
12060 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12061 #else
12062 ret = get_errno(readahead(arg1, arg2, arg3));
12063 #endif
12064 return ret;
12065 #endif
12066 #ifdef CONFIG_ATTR
12067 #ifdef TARGET_NR_setxattr
12068 case TARGET_NR_listxattr:
12069 case TARGET_NR_llistxattr:
12071 void *p, *b = 0;
12072 if (arg2) {
12073 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12074 if (!b) {
12075 return -TARGET_EFAULT;
12078 p = lock_user_string(arg1);
12079 if (p) {
12080 if (num == TARGET_NR_listxattr) {
12081 ret = get_errno(listxattr(p, b, arg3));
12082 } else {
12083 ret = get_errno(llistxattr(p, b, arg3));
12085 } else {
12086 ret = -TARGET_EFAULT;
12088 unlock_user(p, arg1, 0);
12089 unlock_user(b, arg2, arg3);
12090 return ret;
12092 case TARGET_NR_flistxattr:
12094 void *b = 0;
12095 if (arg2) {
12096 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12097 if (!b) {
12098 return -TARGET_EFAULT;
12101 ret = get_errno(flistxattr(arg1, b, arg3));
12102 unlock_user(b, arg2, arg3);
12103 return ret;
12105 case TARGET_NR_setxattr:
12106 case TARGET_NR_lsetxattr:
12108 void *p, *n, *v = 0;
12109 if (arg3) {
12110 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12111 if (!v) {
12112 return -TARGET_EFAULT;
12115 p = lock_user_string(arg1);
12116 n = lock_user_string(arg2);
12117 if (p && n) {
12118 if (num == TARGET_NR_setxattr) {
12119 ret = get_errno(setxattr(p, n, v, arg4, arg5));
12120 } else {
12121 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12123 } else {
12124 ret = -TARGET_EFAULT;
12126 unlock_user(p, arg1, 0);
12127 unlock_user(n, arg2, 0);
12128 unlock_user(v, arg3, 0);
12130 return ret;
12131 case TARGET_NR_fsetxattr:
12133 void *n, *v = 0;
12134 if (arg3) {
12135 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12136 if (!v) {
12137 return -TARGET_EFAULT;
12140 n = lock_user_string(arg2);
12141 if (n) {
12142 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12143 } else {
12144 ret = -TARGET_EFAULT;
12146 unlock_user(n, arg2, 0);
12147 unlock_user(v, arg3, 0);
12149 return ret;
12150 case TARGET_NR_getxattr:
12151 case TARGET_NR_lgetxattr:
12153 void *p, *n, *v = 0;
12154 if (arg3) {
12155 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12156 if (!v) {
12157 return -TARGET_EFAULT;
12160 p = lock_user_string(arg1);
12161 n = lock_user_string(arg2);
12162 if (p && n) {
12163 if (num == TARGET_NR_getxattr) {
12164 ret = get_errno(getxattr(p, n, v, arg4));
12165 } else {
12166 ret = get_errno(lgetxattr(p, n, v, arg4));
12168 } else {
12169 ret = -TARGET_EFAULT;
12171 unlock_user(p, arg1, 0);
12172 unlock_user(n, arg2, 0);
12173 unlock_user(v, arg3, arg4);
12175 return ret;
12176 case TARGET_NR_fgetxattr:
12178 void *n, *v = 0;
12179 if (arg3) {
12180 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12181 if (!v) {
12182 return -TARGET_EFAULT;
12185 n = lock_user_string(arg2);
12186 if (n) {
12187 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12188 } else {
12189 ret = -TARGET_EFAULT;
12191 unlock_user(n, arg2, 0);
12192 unlock_user(v, arg3, arg4);
12194 return ret;
12195 case TARGET_NR_removexattr:
12196 case TARGET_NR_lremovexattr:
12198 void *p, *n;
12199 p = lock_user_string(arg1);
12200 n = lock_user_string(arg2);
12201 if (p && n) {
12202 if (num == TARGET_NR_removexattr) {
12203 ret = get_errno(removexattr(p, n));
12204 } else {
12205 ret = get_errno(lremovexattr(p, n));
12207 } else {
12208 ret = -TARGET_EFAULT;
12210 unlock_user(p, arg1, 0);
12211 unlock_user(n, arg2, 0);
12213 return ret;
12214 case TARGET_NR_fremovexattr:
12216 void *n;
12217 n = lock_user_string(arg2);
12218 if (n) {
12219 ret = get_errno(fremovexattr(arg1, n));
12220 } else {
12221 ret = -TARGET_EFAULT;
12223 unlock_user(n, arg2, 0);
12225 return ret;
12226 #endif
12227 #endif /* CONFIG_ATTR */
12228 #ifdef TARGET_NR_set_thread_area
12229 case TARGET_NR_set_thread_area:
12230 #if defined(TARGET_MIPS)
12231 cpu_env->active_tc.CP0_UserLocal = arg1;
12232 return 0;
12233 #elif defined(TARGET_CRIS)
12234 if (arg1 & 0xff)
12235 ret = -TARGET_EINVAL;
12236 else {
12237 cpu_env->pregs[PR_PID] = arg1;
12238 ret = 0;
12240 return ret;
12241 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12242 return do_set_thread_area(cpu_env, arg1);
12243 #elif defined(TARGET_M68K)
12245 TaskState *ts = cpu->opaque;
12246 ts->tp_value = arg1;
12247 return 0;
12249 #else
12250 return -TARGET_ENOSYS;
12251 #endif
12252 #endif
12253 #ifdef TARGET_NR_get_thread_area
12254 case TARGET_NR_get_thread_area:
12255 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12256 return do_get_thread_area(cpu_env, arg1);
12257 #elif defined(TARGET_M68K)
12259 TaskState *ts = cpu->opaque;
12260 return ts->tp_value;
12262 #else
12263 return -TARGET_ENOSYS;
12264 #endif
12265 #endif
12266 #ifdef TARGET_NR_getdomainname
12267 case TARGET_NR_getdomainname:
12268 return -TARGET_ENOSYS;
12269 #endif
12271 #ifdef TARGET_NR_clock_settime
12272 case TARGET_NR_clock_settime:
12274 struct timespec ts;
12276 ret = target_to_host_timespec(&ts, arg2);
12277 if (!is_error(ret)) {
12278 ret = get_errno(clock_settime(arg1, &ts));
12280 return ret;
12282 #endif
12283 #ifdef TARGET_NR_clock_settime64
12284 case TARGET_NR_clock_settime64:
12286 struct timespec ts;
12288 ret = target_to_host_timespec64(&ts, arg2);
12289 if (!is_error(ret)) {
12290 ret = get_errno(clock_settime(arg1, &ts));
12292 return ret;
12294 #endif
12295 #ifdef TARGET_NR_clock_gettime
12296 case TARGET_NR_clock_gettime:
12298 struct timespec ts;
12299 ret = get_errno(clock_gettime(arg1, &ts));
12300 if (!is_error(ret)) {
12301 ret = host_to_target_timespec(arg2, &ts);
12303 return ret;
12305 #endif
12306 #ifdef TARGET_NR_clock_gettime64
12307 case TARGET_NR_clock_gettime64:
12309 struct timespec ts;
12310 ret = get_errno(clock_gettime(arg1, &ts));
12311 if (!is_error(ret)) {
12312 ret = host_to_target_timespec64(arg2, &ts);
12314 return ret;
12316 #endif
12317 #ifdef TARGET_NR_clock_getres
12318 case TARGET_NR_clock_getres:
12320 struct timespec ts;
12321 ret = get_errno(clock_getres(arg1, &ts));
12322 if (!is_error(ret)) {
12323 host_to_target_timespec(arg2, &ts);
12325 return ret;
12327 #endif
12328 #ifdef TARGET_NR_clock_getres_time64
12329 case TARGET_NR_clock_getres_time64:
12331 struct timespec ts;
12332 ret = get_errno(clock_getres(arg1, &ts));
12333 if (!is_error(ret)) {
12334 host_to_target_timespec64(arg2, &ts);
12336 return ret;
12338 #endif
12339 #ifdef TARGET_NR_clock_nanosleep
12340 case TARGET_NR_clock_nanosleep:
12342 struct timespec ts;
12343 if (target_to_host_timespec(&ts, arg3)) {
12344 return -TARGET_EFAULT;
12346 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12347 &ts, arg4 ? &ts : NULL));
12349 * if the call is interrupted by a signal handler, it fails
12350 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12351 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12353 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12354 host_to_target_timespec(arg4, &ts)) {
12355 return -TARGET_EFAULT;
12358 return ret;
12360 #endif
12361 #ifdef TARGET_NR_clock_nanosleep_time64
12362 case TARGET_NR_clock_nanosleep_time64:
12364 struct timespec ts;
12366 if (target_to_host_timespec64(&ts, arg3)) {
12367 return -TARGET_EFAULT;
12370 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12371 &ts, arg4 ? &ts : NULL));
12373 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12374 host_to_target_timespec64(arg4, &ts)) {
12375 return -TARGET_EFAULT;
12377 return ret;
12379 #endif
12381 #if defined(TARGET_NR_set_tid_address)
12382 case TARGET_NR_set_tid_address:
12384 TaskState *ts = cpu->opaque;
12385 ts->child_tidptr = arg1;
12386 /* do not call host set_tid_address() syscall, instead return tid() */
12387 return get_errno(sys_gettid());
12389 #endif
12391 case TARGET_NR_tkill:
12392 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12394 case TARGET_NR_tgkill:
12395 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12396 target_to_host_signal(arg3)));
12398 #ifdef TARGET_NR_set_robust_list
12399 case TARGET_NR_set_robust_list:
12400 case TARGET_NR_get_robust_list:
12401 /* The ABI for supporting robust futexes has userspace pass
12402 * the kernel a pointer to a linked list which is updated by
12403 * userspace after the syscall; the list is walked by the kernel
12404 * when the thread exits. Since the linked list in QEMU guest
12405 * memory isn't a valid linked list for the host and we have
12406 * no way to reliably intercept the thread-death event, we can't
12407 * support these. Silently return ENOSYS so that guest userspace
12408 * falls back to a non-robust futex implementation (which should
12409 * be OK except in the corner case of the guest crashing while
12410 * holding a mutex that is shared with another process via
12411 * shared memory).
12413 return -TARGET_ENOSYS;
12414 #endif
12416 #if defined(TARGET_NR_utimensat)
12417 case TARGET_NR_utimensat:
12419 struct timespec *tsp, ts[2];
12420 if (!arg3) {
12421 tsp = NULL;
12422 } else {
12423 if (target_to_host_timespec(ts, arg3)) {
12424 return -TARGET_EFAULT;
12426 if (target_to_host_timespec(ts + 1, arg3 +
12427 sizeof(struct target_timespec))) {
12428 return -TARGET_EFAULT;
12430 tsp = ts;
12432 if (!arg2)
12433 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12434 else {
12435 if (!(p = lock_user_string(arg2))) {
12436 return -TARGET_EFAULT;
12438 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12439 unlock_user(p, arg2, 0);
12442 return ret;
12443 #endif
12444 #ifdef TARGET_NR_utimensat_time64
12445 case TARGET_NR_utimensat_time64:
12447 struct timespec *tsp, ts[2];
12448 if (!arg3) {
12449 tsp = NULL;
12450 } else {
12451 if (target_to_host_timespec64(ts, arg3)) {
12452 return -TARGET_EFAULT;
12454 if (target_to_host_timespec64(ts + 1, arg3 +
12455 sizeof(struct target__kernel_timespec))) {
12456 return -TARGET_EFAULT;
12458 tsp = ts;
12460 if (!arg2)
12461 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12462 else {
12463 p = lock_user_string(arg2);
12464 if (!p) {
12465 return -TARGET_EFAULT;
12467 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12468 unlock_user(p, arg2, 0);
12471 return ret;
12472 #endif
12473 #ifdef TARGET_NR_futex
12474 case TARGET_NR_futex:
12475 return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12476 #endif
12477 #ifdef TARGET_NR_futex_time64
12478 case TARGET_NR_futex_time64:
12479 return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12480 #endif
12481 #ifdef CONFIG_INOTIFY
12482 #if defined(TARGET_NR_inotify_init)
12483 case TARGET_NR_inotify_init:
12484 ret = get_errno(inotify_init());
12485 if (ret >= 0) {
12486 fd_trans_register(ret, &target_inotify_trans);
12488 return ret;
12489 #endif
12490 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12491 case TARGET_NR_inotify_init1:
12492 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12493 fcntl_flags_tbl)));
12494 if (ret >= 0) {
12495 fd_trans_register(ret, &target_inotify_trans);
12497 return ret;
12498 #endif
12499 #if defined(TARGET_NR_inotify_add_watch)
12500 case TARGET_NR_inotify_add_watch:
12501 p = lock_user_string(arg2);
12502 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12503 unlock_user(p, arg2, 0);
12504 return ret;
12505 #endif
12506 #if defined(TARGET_NR_inotify_rm_watch)
12507 case TARGET_NR_inotify_rm_watch:
12508 return get_errno(inotify_rm_watch(arg1, arg2));
12509 #endif
12510 #endif
12512 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12513 case TARGET_NR_mq_open:
12515 struct mq_attr posix_mq_attr;
12516 struct mq_attr *pposix_mq_attr;
12517 int host_flags;
12519 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12520 pposix_mq_attr = NULL;
12521 if (arg4) {
12522 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12523 return -TARGET_EFAULT;
12525 pposix_mq_attr = &posix_mq_attr;
12527 p = lock_user_string(arg1 - 1);
12528 if (!p) {
12529 return -TARGET_EFAULT;
12531 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12532 unlock_user (p, arg1, 0);
12534 return ret;
12536 case TARGET_NR_mq_unlink:
12537 p = lock_user_string(arg1 - 1);
12538 if (!p) {
12539 return -TARGET_EFAULT;
12541 ret = get_errno(mq_unlink(p));
12542 unlock_user (p, arg1, 0);
12543 return ret;
12545 #ifdef TARGET_NR_mq_timedsend
12546 case TARGET_NR_mq_timedsend:
12548 struct timespec ts;
12550 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12551 if (arg5 != 0) {
12552 if (target_to_host_timespec(&ts, arg5)) {
12553 return -TARGET_EFAULT;
12555 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12556 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12557 return -TARGET_EFAULT;
12559 } else {
12560 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12562 unlock_user (p, arg2, arg3);
12564 return ret;
12565 #endif
12566 #ifdef TARGET_NR_mq_timedsend_time64
12567 case TARGET_NR_mq_timedsend_time64:
12569 struct timespec ts;
12571 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12572 if (arg5 != 0) {
12573 if (target_to_host_timespec64(&ts, arg5)) {
12574 return -TARGET_EFAULT;
12576 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12577 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12578 return -TARGET_EFAULT;
12580 } else {
12581 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12583 unlock_user(p, arg2, arg3);
12585 return ret;
12586 #endif
12588 #ifdef TARGET_NR_mq_timedreceive
12589 case TARGET_NR_mq_timedreceive:
12591 struct timespec ts;
12592 unsigned int prio;
12594 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12595 if (arg5 != 0) {
12596 if (target_to_host_timespec(&ts, arg5)) {
12597 return -TARGET_EFAULT;
12599 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12600 &prio, &ts));
12601 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12602 return -TARGET_EFAULT;
12604 } else {
12605 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12606 &prio, NULL));
12608 unlock_user (p, arg2, arg3);
12609 if (arg4 != 0)
12610 put_user_u32(prio, arg4);
12612 return ret;
12613 #endif
12614 #ifdef TARGET_NR_mq_timedreceive_time64
12615 case TARGET_NR_mq_timedreceive_time64:
12617 struct timespec ts;
12618 unsigned int prio;
12620 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12621 if (arg5 != 0) {
12622 if (target_to_host_timespec64(&ts, arg5)) {
12623 return -TARGET_EFAULT;
12625 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12626 &prio, &ts));
12627 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12628 return -TARGET_EFAULT;
12630 } else {
12631 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12632 &prio, NULL));
12634 unlock_user(p, arg2, arg3);
12635 if (arg4 != 0) {
12636 put_user_u32(prio, arg4);
12639 return ret;
12640 #endif
12642 /* Not implemented for now... */
12643 /* case TARGET_NR_mq_notify: */
12644 /* break; */
12646 case TARGET_NR_mq_getsetattr:
12648 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12649 ret = 0;
12650 if (arg2 != 0) {
12651 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12652 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12653 &posix_mq_attr_out));
12654 } else if (arg3 != 0) {
12655 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12657 if (ret == 0 && arg3 != 0) {
12658 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12661 return ret;
12662 #endif
12664 #ifdef CONFIG_SPLICE
12665 #ifdef TARGET_NR_tee
12666 case TARGET_NR_tee:
12668 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12670 return ret;
12671 #endif
12672 #ifdef TARGET_NR_splice
12673 case TARGET_NR_splice:
12675 loff_t loff_in, loff_out;
12676 loff_t *ploff_in = NULL, *ploff_out = NULL;
12677 if (arg2) {
12678 if (get_user_u64(loff_in, arg2)) {
12679 return -TARGET_EFAULT;
12681 ploff_in = &loff_in;
12683 if (arg4) {
12684 if (get_user_u64(loff_out, arg4)) {
12685 return -TARGET_EFAULT;
12687 ploff_out = &loff_out;
12689 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12690 if (arg2) {
12691 if (put_user_u64(loff_in, arg2)) {
12692 return -TARGET_EFAULT;
12695 if (arg4) {
12696 if (put_user_u64(loff_out, arg4)) {
12697 return -TARGET_EFAULT;
12701 return ret;
12702 #endif
12703 #ifdef TARGET_NR_vmsplice
12704 case TARGET_NR_vmsplice:
12706 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12707 if (vec != NULL) {
12708 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12709 unlock_iovec(vec, arg2, arg3, 0);
12710 } else {
12711 ret = -host_to_target_errno(errno);
12714 return ret;
12715 #endif
12716 #endif /* CONFIG_SPLICE */
12717 #ifdef CONFIG_EVENTFD
12718 #if defined(TARGET_NR_eventfd)
12719 case TARGET_NR_eventfd:
12720 ret = get_errno(eventfd(arg1, 0));
12721 if (ret >= 0) {
12722 fd_trans_register(ret, &target_eventfd_trans);
12724 return ret;
12725 #endif
12726 #if defined(TARGET_NR_eventfd2)
12727 case TARGET_NR_eventfd2:
12729 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12730 if (arg2 & TARGET_O_NONBLOCK) {
12731 host_flags |= O_NONBLOCK;
12733 if (arg2 & TARGET_O_CLOEXEC) {
12734 host_flags |= O_CLOEXEC;
12736 ret = get_errno(eventfd(arg1, host_flags));
12737 if (ret >= 0) {
12738 fd_trans_register(ret, &target_eventfd_trans);
12740 return ret;
12742 #endif
12743 #endif /* CONFIG_EVENTFD */
12744 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12745 case TARGET_NR_fallocate:
12746 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12747 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12748 target_offset64(arg5, arg6)));
12749 #else
12750 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12751 #endif
12752 return ret;
12753 #endif
12754 #if defined(CONFIG_SYNC_FILE_RANGE)
12755 #if defined(TARGET_NR_sync_file_range)
12756 case TARGET_NR_sync_file_range:
12757 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12758 #if defined(TARGET_MIPS)
12759 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12760 target_offset64(arg5, arg6), arg7));
12761 #else
12762 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12763 target_offset64(arg4, arg5), arg6));
12764 #endif /* !TARGET_MIPS */
12765 #else
12766 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12767 #endif
12768 return ret;
12769 #endif
12770 #if defined(TARGET_NR_sync_file_range2) || \
12771 defined(TARGET_NR_arm_sync_file_range)
12772 #if defined(TARGET_NR_sync_file_range2)
12773 case TARGET_NR_sync_file_range2:
12774 #endif
12775 #if defined(TARGET_NR_arm_sync_file_range)
12776 case TARGET_NR_arm_sync_file_range:
12777 #endif
12778 /* This is like sync_file_range but the arguments are reordered */
12779 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12780 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12781 target_offset64(arg5, arg6), arg2));
12782 #else
12783 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12784 #endif
12785 return ret;
12786 #endif
12787 #endif
12788 #if defined(TARGET_NR_signalfd4)
12789 case TARGET_NR_signalfd4:
12790 return do_signalfd4(arg1, arg2, arg4);
12791 #endif
12792 #if defined(TARGET_NR_signalfd)
12793 case TARGET_NR_signalfd:
12794 return do_signalfd4(arg1, arg2, 0);
12795 #endif
12796 #if defined(CONFIG_EPOLL)
12797 #if defined(TARGET_NR_epoll_create)
12798 case TARGET_NR_epoll_create:
12799 return get_errno(epoll_create(arg1));
12800 #endif
12801 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12802 case TARGET_NR_epoll_create1:
12803 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12804 #endif
12805 #if defined(TARGET_NR_epoll_ctl)
12806 case TARGET_NR_epoll_ctl:
12808 struct epoll_event ep;
12809 struct epoll_event *epp = 0;
12810 if (arg4) {
12811 if (arg2 != EPOLL_CTL_DEL) {
12812 struct target_epoll_event *target_ep;
12813 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12814 return -TARGET_EFAULT;
12816 ep.events = tswap32(target_ep->events);
12818 * The epoll_data_t union is just opaque data to the kernel,
12819 * so we transfer all 64 bits across and need not worry what
12820 * actual data type it is.
12822 ep.data.u64 = tswap64(target_ep->data.u64);
12823 unlock_user_struct(target_ep, arg4, 0);
12826 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12827 * non-null pointer, even though this argument is ignored.
12830 epp = &ep;
12832 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12834 #endif
12836 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12837 #if defined(TARGET_NR_epoll_wait)
12838 case TARGET_NR_epoll_wait:
12839 #endif
12840 #if defined(TARGET_NR_epoll_pwait)
12841 case TARGET_NR_epoll_pwait:
12842 #endif
12844 struct target_epoll_event *target_ep;
12845 struct epoll_event *ep;
12846 int epfd = arg1;
12847 int maxevents = arg3;
12848 int timeout = arg4;
12850 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12851 return -TARGET_EINVAL;
12854 target_ep = lock_user(VERIFY_WRITE, arg2,
12855 maxevents * sizeof(struct target_epoll_event), 1);
12856 if (!target_ep) {
12857 return -TARGET_EFAULT;
12860 ep = g_try_new(struct epoll_event, maxevents);
12861 if (!ep) {
12862 unlock_user(target_ep, arg2, 0);
12863 return -TARGET_ENOMEM;
12866 switch (num) {
12867 #if defined(TARGET_NR_epoll_pwait)
12868 case TARGET_NR_epoll_pwait:
12870 sigset_t *set = NULL;
12872 if (arg5) {
12873 ret = process_sigsuspend_mask(&set, arg5, arg6);
12874 if (ret != 0) {
12875 break;
12879 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12880 set, SIGSET_T_SIZE));
12882 if (set) {
12883 finish_sigsuspend_mask(ret);
12885 break;
12887 #endif
12888 #if defined(TARGET_NR_epoll_wait)
12889 case TARGET_NR_epoll_wait:
12890 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12891 NULL, 0));
12892 break;
12893 #endif
12894 default:
12895 ret = -TARGET_ENOSYS;
12897 if (!is_error(ret)) {
12898 int i;
12899 for (i = 0; i < ret; i++) {
12900 target_ep[i].events = tswap32(ep[i].events);
12901 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12903 unlock_user(target_ep, arg2,
12904 ret * sizeof(struct target_epoll_event));
12905 } else {
12906 unlock_user(target_ep, arg2, 0);
12908 g_free(ep);
12909 return ret;
12911 #endif
12912 #endif
12913 #ifdef TARGET_NR_prlimit64
12914 case TARGET_NR_prlimit64:
12916 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12917 struct target_rlimit64 *target_rnew, *target_rold;
12918 struct host_rlimit64 rnew, rold, *rnewp = 0;
12919 int resource = target_to_host_resource(arg2);
12921 if (arg3 && (resource != RLIMIT_AS &&
12922 resource != RLIMIT_DATA &&
12923 resource != RLIMIT_STACK)) {
12924 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12925 return -TARGET_EFAULT;
12927 __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
12928 __get_user(rnew.rlim_max, &target_rnew->rlim_max);
12929 unlock_user_struct(target_rnew, arg3, 0);
12930 rnewp = &rnew;
12933 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12934 if (!is_error(ret) && arg4) {
12935 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12936 return -TARGET_EFAULT;
12938 __put_user(rold.rlim_cur, &target_rold->rlim_cur);
12939 __put_user(rold.rlim_max, &target_rold->rlim_max);
12940 unlock_user_struct(target_rold, arg4, 1);
12942 return ret;
12944 #endif
12945 #ifdef TARGET_NR_gethostname
12946 case TARGET_NR_gethostname:
12948 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12949 if (name) {
12950 ret = get_errno(gethostname(name, arg2));
12951 unlock_user(name, arg1, arg2);
12952 } else {
12953 ret = -TARGET_EFAULT;
12955 return ret;
12957 #endif
12958 #ifdef TARGET_NR_atomic_cmpxchg_32
12959 case TARGET_NR_atomic_cmpxchg_32:
12961 /* should use start_exclusive from main.c */
12962 abi_ulong mem_value;
12963 if (get_user_u32(mem_value, arg6)) {
12964 target_siginfo_t info;
12965 info.si_signo = SIGSEGV;
12966 info.si_errno = 0;
12967 info.si_code = TARGET_SEGV_MAPERR;
12968 info._sifields._sigfault._addr = arg6;
12969 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12970 ret = 0xdeadbeef;
12973 if (mem_value == arg2)
12974 put_user_u32(arg1, arg6);
12975 return mem_value;
12977 #endif
12978 #ifdef TARGET_NR_atomic_barrier
12979 case TARGET_NR_atomic_barrier:
12980 /* Like the kernel implementation and the
12981 qemu arm barrier, no-op this? */
12982 return 0;
12983 #endif
12985 #ifdef TARGET_NR_timer_create
12986 case TARGET_NR_timer_create:
12988 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12990 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12992 int clkid = arg1;
12993 int timer_index = next_free_host_timer();
12995 if (timer_index < 0) {
12996 ret = -TARGET_EAGAIN;
12997 } else {
12998 timer_t *phtimer = g_posix_timers + timer_index;
13000 if (arg2) {
13001 phost_sevp = &host_sevp;
13002 ret = target_to_host_sigevent(phost_sevp, arg2);
13003 if (ret != 0) {
13004 free_host_timer_slot(timer_index);
13005 return ret;
13009 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13010 if (ret) {
13011 free_host_timer_slot(timer_index);
13012 } else {
13013 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13014 timer_delete(*phtimer);
13015 free_host_timer_slot(timer_index);
13016 return -TARGET_EFAULT;
13020 return ret;
13022 #endif
13024 #ifdef TARGET_NR_timer_settime
13025 case TARGET_NR_timer_settime:
13027 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13028 * struct itimerspec * old_value */
13029 target_timer_t timerid = get_timer_id(arg1);
13031 if (timerid < 0) {
13032 ret = timerid;
13033 } else if (arg3 == 0) {
13034 ret = -TARGET_EINVAL;
13035 } else {
13036 timer_t htimer = g_posix_timers[timerid];
13037 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13039 if (target_to_host_itimerspec(&hspec_new, arg3)) {
13040 return -TARGET_EFAULT;
13042 ret = get_errno(
13043 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13044 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13045 return -TARGET_EFAULT;
13048 return ret;
13050 #endif
13052 #ifdef TARGET_NR_timer_settime64
13053 case TARGET_NR_timer_settime64:
13055 target_timer_t timerid = get_timer_id(arg1);
13057 if (timerid < 0) {
13058 ret = timerid;
13059 } else if (arg3 == 0) {
13060 ret = -TARGET_EINVAL;
13061 } else {
13062 timer_t htimer = g_posix_timers[timerid];
13063 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13065 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13066 return -TARGET_EFAULT;
13068 ret = get_errno(
13069 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13070 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13071 return -TARGET_EFAULT;
13074 return ret;
13076 #endif
13078 #ifdef TARGET_NR_timer_gettime
13079 case TARGET_NR_timer_gettime:
13081 /* args: timer_t timerid, struct itimerspec *curr_value */
13082 target_timer_t timerid = get_timer_id(arg1);
13084 if (timerid < 0) {
13085 ret = timerid;
13086 } else if (!arg2) {
13087 ret = -TARGET_EFAULT;
13088 } else {
13089 timer_t htimer = g_posix_timers[timerid];
13090 struct itimerspec hspec;
13091 ret = get_errno(timer_gettime(htimer, &hspec));
13093 if (host_to_target_itimerspec(arg2, &hspec)) {
13094 ret = -TARGET_EFAULT;
13097 return ret;
13099 #endif
13101 #ifdef TARGET_NR_timer_gettime64
13102 case TARGET_NR_timer_gettime64:
13104 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13105 target_timer_t timerid = get_timer_id(arg1);
13107 if (timerid < 0) {
13108 ret = timerid;
13109 } else if (!arg2) {
13110 ret = -TARGET_EFAULT;
13111 } else {
13112 timer_t htimer = g_posix_timers[timerid];
13113 struct itimerspec hspec;
13114 ret = get_errno(timer_gettime(htimer, &hspec));
13116 if (host_to_target_itimerspec64(arg2, &hspec)) {
13117 ret = -TARGET_EFAULT;
13120 return ret;
13122 #endif
13124 #ifdef TARGET_NR_timer_getoverrun
13125 case TARGET_NR_timer_getoverrun:
13127 /* args: timer_t timerid */
13128 target_timer_t timerid = get_timer_id(arg1);
13130 if (timerid < 0) {
13131 ret = timerid;
13132 } else {
13133 timer_t htimer = g_posix_timers[timerid];
13134 ret = get_errno(timer_getoverrun(htimer));
13136 return ret;
13138 #endif
13140 #ifdef TARGET_NR_timer_delete
13141 case TARGET_NR_timer_delete:
13143 /* args: timer_t timerid */
13144 target_timer_t timerid = get_timer_id(arg1);
13146 if (timerid < 0) {
13147 ret = timerid;
13148 } else {
13149 timer_t htimer = g_posix_timers[timerid];
13150 ret = get_errno(timer_delete(htimer));
13151 free_host_timer_slot(timerid);
13153 return ret;
13155 #endif
13157 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13158 case TARGET_NR_timerfd_create:
13159 ret = get_errno(timerfd_create(arg1,
13160 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13161 if (ret >= 0) {
13162 fd_trans_register(ret, &target_timerfd_trans);
13164 return ret;
13165 #endif
13167 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13168 case TARGET_NR_timerfd_gettime:
13170 struct itimerspec its_curr;
13172 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13174 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13175 return -TARGET_EFAULT;
13178 return ret;
13179 #endif
13181 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13182 case TARGET_NR_timerfd_gettime64:
13184 struct itimerspec its_curr;
13186 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13188 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13189 return -TARGET_EFAULT;
13192 return ret;
13193 #endif
13195 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13196 case TARGET_NR_timerfd_settime:
13198 struct itimerspec its_new, its_old, *p_new;
13200 if (arg3) {
13201 if (target_to_host_itimerspec(&its_new, arg3)) {
13202 return -TARGET_EFAULT;
13204 p_new = &its_new;
13205 } else {
13206 p_new = NULL;
13209 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13211 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13212 return -TARGET_EFAULT;
13215 return ret;
13216 #endif
13218 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13219 case TARGET_NR_timerfd_settime64:
13221 struct itimerspec its_new, its_old, *p_new;
13223 if (arg3) {
13224 if (target_to_host_itimerspec64(&its_new, arg3)) {
13225 return -TARGET_EFAULT;
13227 p_new = &its_new;
13228 } else {
13229 p_new = NULL;
13232 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13234 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13235 return -TARGET_EFAULT;
13238 return ret;
13239 #endif
13241 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13242 case TARGET_NR_ioprio_get:
13243 return get_errno(ioprio_get(arg1, arg2));
13244 #endif
13246 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13247 case TARGET_NR_ioprio_set:
13248 return get_errno(ioprio_set(arg1, arg2, arg3));
13249 #endif
13251 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13252 case TARGET_NR_setns:
13253 return get_errno(setns(arg1, arg2));
13254 #endif
13255 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13256 case TARGET_NR_unshare:
13257 return get_errno(unshare(arg1));
13258 #endif
13259 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13260 case TARGET_NR_kcmp:
13261 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13262 #endif
13263 #ifdef TARGET_NR_swapcontext
13264 case TARGET_NR_swapcontext:
13265 /* PowerPC specific. */
13266 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13267 #endif
13268 #ifdef TARGET_NR_memfd_create
13269 case TARGET_NR_memfd_create:
13270 p = lock_user_string(arg1);
13271 if (!p) {
13272 return -TARGET_EFAULT;
13274 ret = get_errno(memfd_create(p, arg2));
13275 fd_trans_unregister(ret);
13276 unlock_user(p, arg1, 0);
13277 return ret;
13278 #endif
13279 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13280 case TARGET_NR_membarrier:
13281 return get_errno(membarrier(arg1, arg2));
13282 #endif
13284 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13285 case TARGET_NR_copy_file_range:
13287 loff_t inoff, outoff;
13288 loff_t *pinoff = NULL, *poutoff = NULL;
13290 if (arg2) {
13291 if (get_user_u64(inoff, arg2)) {
13292 return -TARGET_EFAULT;
13294 pinoff = &inoff;
13296 if (arg4) {
13297 if (get_user_u64(outoff, arg4)) {
13298 return -TARGET_EFAULT;
13300 poutoff = &outoff;
13302 /* Do not sign-extend the count parameter. */
13303 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13304 (abi_ulong)arg5, arg6));
13305 if (!is_error(ret) && ret > 0) {
13306 if (arg2) {
13307 if (put_user_u64(inoff, arg2)) {
13308 return -TARGET_EFAULT;
13311 if (arg4) {
13312 if (put_user_u64(outoff, arg4)) {
13313 return -TARGET_EFAULT;
13318 return ret;
13319 #endif
13321 #if defined(TARGET_NR_pivot_root)
13322 case TARGET_NR_pivot_root:
13324 void *p2;
13325 p = lock_user_string(arg1); /* new_root */
13326 p2 = lock_user_string(arg2); /* put_old */
13327 if (!p || !p2) {
13328 ret = -TARGET_EFAULT;
13329 } else {
13330 ret = get_errno(pivot_root(p, p2));
13332 unlock_user(p2, arg2, 0);
13333 unlock_user(p, arg1, 0);
13335 return ret;
13336 #endif
13338 default:
13339 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13340 return -TARGET_ENOSYS;
13342 return ret;
13345 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13346 abi_long arg2, abi_long arg3, abi_long arg4,
13347 abi_long arg5, abi_long arg6, abi_long arg7,
13348 abi_long arg8)
13350 CPUState *cpu = env_cpu(cpu_env);
13351 abi_long ret;
13353 #ifdef DEBUG_ERESTARTSYS
13354 /* Debug-only code for exercising the syscall-restart code paths
13355 * in the per-architecture cpu main loops: restart every syscall
13356 * the guest makes once before letting it through.
13359 static bool flag;
13360 flag = !flag;
13361 if (flag) {
13362 return -QEMU_ERESTARTSYS;
13365 #endif
13367 record_syscall_start(cpu, num, arg1,
13368 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13370 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13371 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13374 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13375 arg5, arg6, arg7, arg8);
13377 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13378 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13379 arg3, arg4, arg5, arg6);
13382 record_syscall_return(cpu, num, ret);
13383 return ret;