icount: Take iothread lock when running QEMU timers
[qemu/ar7.git] / linux-user / syscall.c
blobef53feb5ab458f9f9b62d01958bfe1fe725c1349
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
144 #ifndef CLONE_IO
145 #define CLONE_IO 0x80000000 /* Clone io context */
146 #endif
148 /* We can't directly call the host clone syscall, because this will
149 * badly confuse libc (breaking mutexes, for example). So we must
150 * divide clone flags into:
151 * * flag combinations that look like pthread_create()
152 * * flag combinations that look like fork()
153 * * flags we can implement within QEMU itself
154 * * flags we can't support and will return an error for
156 /* For thread creation, all these flags must be present; for
157 * fork, none must be present.
159 #define CLONE_THREAD_FLAGS \
160 (CLONE_VM | CLONE_FS | CLONE_FILES | \
161 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 /* These flags are ignored:
164 * CLONE_DETACHED is now ignored by the kernel;
165 * CLONE_IO is just an optimisation hint to the I/O scheduler
167 #define CLONE_IGNORED_FLAGS \
168 (CLONE_DETACHED | CLONE_IO)
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS \
172 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
173 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS \
177 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
178 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 #define CLONE_INVALID_FORK_FLAGS \
181 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 #define CLONE_INVALID_THREAD_FLAGS \
184 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
185 CLONE_IGNORED_FLAGS))
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188 * have almost all been allocated. We cannot support any of
189 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191 * The checks against the invalid thread masks above will catch these.
192 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196 * once. This exercises the codepaths for restart.
198 //#define DEBUG_ERESTARTSYS
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
214 #define _syscall0(type,name) \
215 static type name (void) \
217 return syscall(__NR_##name); \
220 #define _syscall1(type,name,type1,arg1) \
221 static type name (type1 arg1) \
223 return syscall(__NR_##name, arg1); \
226 #define _syscall2(type,name,type1,arg1,type2,arg2) \
227 static type name (type1 arg1,type2 arg2) \
229 return syscall(__NR_##name, arg1, arg2); \
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
233 static type name (type1 arg1,type2 arg2,type3 arg3) \
235 return syscall(__NR_##name, arg1, arg2, arg3); \
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
245 type5,arg5) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 type5,arg5,type6,arg6) \
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
255 type6 arg6) \
257 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_statx __NR_statx
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
314 loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318 siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(__NR_futex)
324 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
325 const struct timespec *,timeout,int *,uaddr2,int,val3)
326 #endif
327 #if defined(__NR_futex_time64)
328 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
329 const struct timespec *,timeout,int *,uaddr2,int,val3)
330 #endif
331 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
332 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
333 unsigned long *, user_mask_ptr);
334 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
335 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
336 unsigned long *, user_mask_ptr);
337 /* sched_attr is not defined in glibc */
338 struct sched_attr {
339 uint32_t size;
340 uint32_t sched_policy;
341 uint64_t sched_flags;
342 int32_t sched_nice;
343 uint32_t sched_priority;
344 uint64_t sched_runtime;
345 uint64_t sched_deadline;
346 uint64_t sched_period;
347 uint32_t sched_util_min;
348 uint32_t sched_util_max;
350 #define __NR_sys_sched_getattr __NR_sched_getattr
351 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
352 unsigned int, size, unsigned int, flags);
353 #define __NR_sys_sched_setattr __NR_sched_setattr
354 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
355 unsigned int, flags);
356 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
357 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
358 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
359 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
360 const struct sched_param *, param);
361 #define __NR_sys_sched_getparam __NR_sched_getparam
362 _syscall2(int, sys_sched_getparam, pid_t, pid,
363 struct sched_param *, param);
364 #define __NR_sys_sched_setparam __NR_sched_setparam
365 _syscall2(int, sys_sched_setparam, pid_t, pid,
366 const struct sched_param *, param);
367 #define __NR_sys_getcpu __NR_getcpu
368 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
369 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
370 void *, arg);
371 _syscall2(int, capget, struct __user_cap_header_struct *, header,
372 struct __user_cap_data_struct *, data);
373 _syscall2(int, capset, struct __user_cap_header_struct *, header,
374 struct __user_cap_data_struct *, data);
375 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
376 _syscall2(int, ioprio_get, int, which, int, who)
377 #endif
378 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
379 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
380 #endif
381 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
382 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
383 #endif
385 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
386 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
387 unsigned long, idx1, unsigned long, idx2)
388 #endif
391 * It is assumed that struct statx is architecture independent.
393 #if defined(TARGET_NR_statx) && defined(__NR_statx)
394 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
395 unsigned int, mask, struct target_statx *, statxbuf)
396 #endif
397 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
398 _syscall2(int, membarrier, int, cmd, int, flags)
399 #endif
401 static const bitmask_transtbl fcntl_flags_tbl[] = {
402 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
403 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
404 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
405 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
406 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
407 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
408 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
409 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
410 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
411 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
412 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
413 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
414 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
415 #if defined(O_DIRECT)
416 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
417 #endif
418 #if defined(O_NOATIME)
419 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
420 #endif
421 #if defined(O_CLOEXEC)
422 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
423 #endif
424 #if defined(O_PATH)
425 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
426 #endif
427 #if defined(O_TMPFILE)
428 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
429 #endif
430 /* Don't terminate the list prematurely on 64-bit host+guest. */
431 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
432 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
433 #endif
434 { 0, 0, 0, 0 }
437 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
439 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
440 #if defined(__NR_utimensat)
441 #define __NR_sys_utimensat __NR_utimensat
442 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
443 const struct timespec *,tsp,int,flags)
444 #else
445 static int sys_utimensat(int dirfd, const char *pathname,
446 const struct timespec times[2], int flags)
448 errno = ENOSYS;
449 return -1;
451 #endif
452 #endif /* TARGET_NR_utimensat */
454 #ifdef TARGET_NR_renameat2
455 #if defined(__NR_renameat2)
456 #define __NR_sys_renameat2 __NR_renameat2
457 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
458 const char *, new, unsigned int, flags)
459 #else
460 static int sys_renameat2(int oldfd, const char *old,
461 int newfd, const char *new, int flags)
463 if (flags == 0) {
464 return renameat(oldfd, old, newfd, new);
466 errno = ENOSYS;
467 return -1;
469 #endif
470 #endif /* TARGET_NR_renameat2 */
472 #ifdef CONFIG_INOTIFY
473 #include <sys/inotify.h>
474 #else
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY */
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
485 #endif
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64 {
489 uint64_t rlim_cur;
490 uint64_t rlim_max;
492 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
493 const struct host_rlimit64 *, new_limit,
494 struct host_rlimit64 *, old_limit)
495 #endif
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers[32] = { 0, } ;
502 static inline int next_free_host_timer(void)
504 int k ;
505 /* FIXME: Does finding the next free slot require a lock? */
506 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
507 if (g_posix_timers[k] == 0) {
508 g_posix_timers[k] = (timer_t) 1;
509 return k;
512 return -1;
514 #endif
516 static inline int host_to_target_errno(int host_errno)
518 switch (host_errno) {
519 #define E(X) case X: return TARGET_##X;
520 #include "errnos.c.inc"
521 #undef E
522 default:
523 return host_errno;
527 static inline int target_to_host_errno(int target_errno)
529 switch (target_errno) {
530 #define E(X) case TARGET_##X: return X;
531 #include "errnos.c.inc"
532 #undef E
533 default:
534 return target_errno;
538 abi_long get_errno(abi_long ret)
540 if (ret == -1)
541 return -host_to_target_errno(errno);
542 else
543 return ret;
546 const char *target_strerror(int err)
548 if (err == QEMU_ERESTARTSYS) {
549 return "To be restarted";
551 if (err == QEMU_ESIGRETURN) {
552 return "Successful exit from sigreturn";
555 return strerror(target_to_host_errno(err));
558 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
560 int i;
561 uint8_t b;
562 if (usize <= ksize) {
563 return 1;
565 for (i = ksize; i < usize; i++) {
566 if (get_user_u8(b, addr + i)) {
567 return -TARGET_EFAULT;
569 if (b != 0) {
570 return 0;
573 return 1;
576 #define safe_syscall0(type, name) \
577 static type safe_##name(void) \
579 return safe_syscall(__NR_##name); \
582 #define safe_syscall1(type, name, type1, arg1) \
583 static type safe_##name(type1 arg1) \
585 return safe_syscall(__NR_##name, arg1); \
588 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
589 static type safe_##name(type1 arg1, type2 arg2) \
591 return safe_syscall(__NR_##name, arg1, arg2); \
594 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
595 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
597 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
600 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
601 type4, arg4) \
602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
604 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
607 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
608 type4, arg4, type5, arg5) \
609 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
610 type5 arg5) \
612 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
615 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
616 type4, arg4, type5, arg5, type6, arg6) \
617 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
618 type5 arg5, type6 arg6) \
620 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
623 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
624 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
625 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
626 int, flags, mode_t, mode)
627 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
628 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
629 struct rusage *, rusage)
630 #endif
631 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
632 int, options, struct rusage *, rusage)
633 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
634 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
635 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
636 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
637 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
638 #endif
639 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
640 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
641 struct timespec *, tsp, const sigset_t *, sigmask,
642 size_t, sigsetsize)
643 #endif
644 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
645 int, maxevents, int, timeout, const sigset_t *, sigmask,
646 size_t, sigsetsize)
647 #if defined(__NR_futex)
648 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
649 const struct timespec *,timeout,int *,uaddr2,int,val3)
650 #endif
651 #if defined(__NR_futex_time64)
652 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
653 const struct timespec *,timeout,int *,uaddr2,int,val3)
654 #endif
655 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
656 safe_syscall2(int, kill, pid_t, pid, int, sig)
657 safe_syscall2(int, tkill, int, tid, int, sig)
658 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
659 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
660 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
661 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
662 unsigned long, pos_l, unsigned long, pos_h)
663 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
664 unsigned long, pos_l, unsigned long, pos_h)
665 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
666 socklen_t, addrlen)
667 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
668 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
669 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
670 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
671 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
672 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
673 safe_syscall2(int, flock, int, fd, int, operation)
674 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
675 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
676 const struct timespec *, uts, size_t, sigsetsize)
677 #endif
678 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
679 int, flags)
680 #if defined(TARGET_NR_nanosleep)
681 safe_syscall2(int, nanosleep, const struct timespec *, req,
682 struct timespec *, rem)
683 #endif
684 #if defined(TARGET_NR_clock_nanosleep) || \
685 defined(TARGET_NR_clock_nanosleep_time64)
686 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
687 const struct timespec *, req, struct timespec *, rem)
688 #endif
689 #ifdef __NR_ipc
690 #ifdef __s390x__
691 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
692 void *, ptr)
693 #else
694 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
695 void *, ptr, long, fifth)
696 #endif
697 #endif
698 #ifdef __NR_msgsnd
699 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
700 int, flags)
701 #endif
702 #ifdef __NR_msgrcv
703 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
704 long, msgtype, int, flags)
705 #endif
706 #ifdef __NR_semtimedop
707 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
708 unsigned, nsops, const struct timespec *, timeout)
709 #endif
710 #if defined(TARGET_NR_mq_timedsend) || \
711 defined(TARGET_NR_mq_timedsend_time64)
712 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
713 size_t, len, unsigned, prio, const struct timespec *, timeout)
714 #endif
715 #if defined(TARGET_NR_mq_timedreceive) || \
716 defined(TARGET_NR_mq_timedreceive_time64)
717 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
718 size_t, len, unsigned *, prio, const struct timespec *, timeout)
719 #endif
720 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
721 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
722 int, outfd, loff_t *, poutoff, size_t, length,
723 unsigned int, flags)
724 #endif
726 /* We do ioctl like this rather than via safe_syscall3 to preserve the
727 * "third argument might be integer or pointer or not present" behaviour of
728 * the libc function.
730 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
731 /* Similarly for fcntl. Note that callers must always:
732 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
733 * use the flock64 struct rather than unsuffixed flock
734 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
736 #ifdef __NR_fcntl64
737 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
738 #else
739 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
740 #endif
742 static inline int host_to_target_sock_type(int host_type)
744 int target_type;
746 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
747 case SOCK_DGRAM:
748 target_type = TARGET_SOCK_DGRAM;
749 break;
750 case SOCK_STREAM:
751 target_type = TARGET_SOCK_STREAM;
752 break;
753 default:
754 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
755 break;
758 #if defined(SOCK_CLOEXEC)
759 if (host_type & SOCK_CLOEXEC) {
760 target_type |= TARGET_SOCK_CLOEXEC;
762 #endif
764 #if defined(SOCK_NONBLOCK)
765 if (host_type & SOCK_NONBLOCK) {
766 target_type |= TARGET_SOCK_NONBLOCK;
768 #endif
770 return target_type;
773 static abi_ulong target_brk;
774 static abi_ulong target_original_brk;
775 static abi_ulong brk_page;
777 void target_set_brk(abi_ulong new_brk)
779 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
780 brk_page = HOST_PAGE_ALIGN(target_brk);
783 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
784 #define DEBUGF_BRK(message, args...)
786 /* do_brk() must return target values and target errnos. */
787 abi_long do_brk(abi_ulong new_brk)
789 abi_long mapped_addr;
790 abi_ulong new_alloc_size;
792 /* brk pointers are always untagged */
794 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
796 if (!new_brk) {
797 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
798 return target_brk;
800 if (new_brk < target_original_brk) {
801 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
802 target_brk);
803 return target_brk;
806 /* If the new brk is less than the highest page reserved to the
807 * target heap allocation, set it and we're almost done... */
808 if (new_brk <= brk_page) {
809 /* Heap contents are initialized to zero, as for anonymous
810 * mapped pages. */
811 if (new_brk > target_brk) {
812 memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
814 target_brk = new_brk;
815 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
816 return target_brk;
819 /* We need to allocate more memory after the brk... Note that
820 * we don't use MAP_FIXED because that will map over the top of
821 * any existing mapping (like the one with the host libc or qemu
822 * itself); instead we treat "mapped but at wrong address" as
823 * a failure and unmap again.
825 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
826 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
827 PROT_READ|PROT_WRITE,
828 MAP_ANON|MAP_PRIVATE, 0, 0));
830 if (mapped_addr == brk_page) {
831 /* Heap contents are initialized to zero, as for anonymous
832 * mapped pages. Technically the new pages are already
833 * initialized to zero since they *are* anonymous mapped
834 * pages, however we have to take care with the contents that
835 * come from the remaining part of the previous page: it may
836 * contains garbage data due to a previous heap usage (grown
837 * then shrunken). */
838 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
840 target_brk = new_brk;
841 brk_page = HOST_PAGE_ALIGN(target_brk);
842 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
843 target_brk);
844 return target_brk;
845 } else if (mapped_addr != -1) {
846 /* Mapped but at wrong address, meaning there wasn't actually
847 * enough space for this brk.
849 target_munmap(mapped_addr, new_alloc_size);
850 mapped_addr = -1;
851 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
853 else {
854 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
857 #if defined(TARGET_ALPHA)
858 /* We (partially) emulate OSF/1 on Alpha, which requires we
859 return a proper errno, not an unchanged brk value. */
860 return -TARGET_ENOMEM;
861 #endif
862 /* For everything else, return the previous break. */
863 return target_brk;
866 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
867 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
868 static inline abi_long copy_from_user_fdset(fd_set *fds,
869 abi_ulong target_fds_addr,
870 int n)
872 int i, nw, j, k;
873 abi_ulong b, *target_fds;
875 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
876 if (!(target_fds = lock_user(VERIFY_READ,
877 target_fds_addr,
878 sizeof(abi_ulong) * nw,
879 1)))
880 return -TARGET_EFAULT;
882 FD_ZERO(fds);
883 k = 0;
884 for (i = 0; i < nw; i++) {
885 /* grab the abi_ulong */
886 __get_user(b, &target_fds[i]);
887 for (j = 0; j < TARGET_ABI_BITS; j++) {
888 /* check the bit inside the abi_ulong */
889 if ((b >> j) & 1)
890 FD_SET(k, fds);
891 k++;
895 unlock_user(target_fds, target_fds_addr, 0);
897 return 0;
900 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
901 abi_ulong target_fds_addr,
902 int n)
904 if (target_fds_addr) {
905 if (copy_from_user_fdset(fds, target_fds_addr, n))
906 return -TARGET_EFAULT;
907 *fds_ptr = fds;
908 } else {
909 *fds_ptr = NULL;
911 return 0;
914 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
915 const fd_set *fds,
916 int n)
918 int i, nw, j, k;
919 abi_long v;
920 abi_ulong *target_fds;
922 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
923 if (!(target_fds = lock_user(VERIFY_WRITE,
924 target_fds_addr,
925 sizeof(abi_ulong) * nw,
926 0)))
927 return -TARGET_EFAULT;
929 k = 0;
930 for (i = 0; i < nw; i++) {
931 v = 0;
932 for (j = 0; j < TARGET_ABI_BITS; j++) {
933 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
934 k++;
936 __put_user(v, &target_fds[i]);
939 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
941 return 0;
943 #endif
945 #if defined(__alpha__)
946 #define HOST_HZ 1024
947 #else
948 #define HOST_HZ 100
949 #endif
951 static inline abi_long host_to_target_clock_t(long ticks)
953 #if HOST_HZ == TARGET_HZ
954 return ticks;
955 #else
956 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
957 #endif
960 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
961 const struct rusage *rusage)
963 struct target_rusage *target_rusage;
965 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
966 return -TARGET_EFAULT;
967 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
968 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
969 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
970 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
971 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
972 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
973 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
974 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
975 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
976 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
977 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
978 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
979 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
980 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
981 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
982 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
983 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
984 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
985 unlock_user_struct(target_rusage, target_addr, 1);
987 return 0;
990 #ifdef TARGET_NR_setrlimit
991 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
993 abi_ulong target_rlim_swap;
994 rlim_t result;
996 target_rlim_swap = tswapal(target_rlim);
997 if (target_rlim_swap == TARGET_RLIM_INFINITY)
998 return RLIM_INFINITY;
1000 result = target_rlim_swap;
1001 if (target_rlim_swap != (rlim_t)result)
1002 return RLIM_INFINITY;
1004 return result;
1006 #endif
1008 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1009 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1011 abi_ulong target_rlim_swap;
1012 abi_ulong result;
1014 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1015 target_rlim_swap = TARGET_RLIM_INFINITY;
1016 else
1017 target_rlim_swap = rlim;
1018 result = tswapal(target_rlim_swap);
1020 return result;
1022 #endif
1024 static inline int target_to_host_resource(int code)
1026 switch (code) {
1027 case TARGET_RLIMIT_AS:
1028 return RLIMIT_AS;
1029 case TARGET_RLIMIT_CORE:
1030 return RLIMIT_CORE;
1031 case TARGET_RLIMIT_CPU:
1032 return RLIMIT_CPU;
1033 case TARGET_RLIMIT_DATA:
1034 return RLIMIT_DATA;
1035 case TARGET_RLIMIT_FSIZE:
1036 return RLIMIT_FSIZE;
1037 case TARGET_RLIMIT_LOCKS:
1038 return RLIMIT_LOCKS;
1039 case TARGET_RLIMIT_MEMLOCK:
1040 return RLIMIT_MEMLOCK;
1041 case TARGET_RLIMIT_MSGQUEUE:
1042 return RLIMIT_MSGQUEUE;
1043 case TARGET_RLIMIT_NICE:
1044 return RLIMIT_NICE;
1045 case TARGET_RLIMIT_NOFILE:
1046 return RLIMIT_NOFILE;
1047 case TARGET_RLIMIT_NPROC:
1048 return RLIMIT_NPROC;
1049 case TARGET_RLIMIT_RSS:
1050 return RLIMIT_RSS;
1051 case TARGET_RLIMIT_RTPRIO:
1052 return RLIMIT_RTPRIO;
1053 #ifdef RLIMIT_RTTIME
1054 case TARGET_RLIMIT_RTTIME:
1055 return RLIMIT_RTTIME;
1056 #endif
1057 case TARGET_RLIMIT_SIGPENDING:
1058 return RLIMIT_SIGPENDING;
1059 case TARGET_RLIMIT_STACK:
1060 return RLIMIT_STACK;
1061 default:
1062 return code;
1066 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1067 abi_ulong target_tv_addr)
1069 struct target_timeval *target_tv;
1071 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1072 return -TARGET_EFAULT;
1075 __get_user(tv->tv_sec, &target_tv->tv_sec);
1076 __get_user(tv->tv_usec, &target_tv->tv_usec);
1078 unlock_user_struct(target_tv, target_tv_addr, 0);
1080 return 0;
1083 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1084 const struct timeval *tv)
1086 struct target_timeval *target_tv;
1088 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1089 return -TARGET_EFAULT;
1092 __put_user(tv->tv_sec, &target_tv->tv_sec);
1093 __put_user(tv->tv_usec, &target_tv->tv_usec);
1095 unlock_user_struct(target_tv, target_tv_addr, 1);
1097 return 0;
1100 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1101 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1102 abi_ulong target_tv_addr)
1104 struct target__kernel_sock_timeval *target_tv;
1106 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1107 return -TARGET_EFAULT;
1110 __get_user(tv->tv_sec, &target_tv->tv_sec);
1111 __get_user(tv->tv_usec, &target_tv->tv_usec);
1113 unlock_user_struct(target_tv, target_tv_addr, 0);
1115 return 0;
1117 #endif
1119 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1120 const struct timeval *tv)
1122 struct target__kernel_sock_timeval *target_tv;
1124 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1125 return -TARGET_EFAULT;
1128 __put_user(tv->tv_sec, &target_tv->tv_sec);
1129 __put_user(tv->tv_usec, &target_tv->tv_usec);
1131 unlock_user_struct(target_tv, target_tv_addr, 1);
1133 return 0;
1136 #if defined(TARGET_NR_futex) || \
1137 defined(TARGET_NR_rt_sigtimedwait) || \
1138 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1139 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1140 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1141 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1142 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1143 defined(TARGET_NR_timer_settime) || \
1144 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1145 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1146 abi_ulong target_addr)
1148 struct target_timespec *target_ts;
1150 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1151 return -TARGET_EFAULT;
1153 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1154 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1155 unlock_user_struct(target_ts, target_addr, 0);
1156 return 0;
1158 #endif
1160 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1161 defined(TARGET_NR_timer_settime64) || \
1162 defined(TARGET_NR_mq_timedsend_time64) || \
1163 defined(TARGET_NR_mq_timedreceive_time64) || \
1164 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1165 defined(TARGET_NR_clock_nanosleep_time64) || \
1166 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1167 defined(TARGET_NR_utimensat) || \
1168 defined(TARGET_NR_utimensat_time64) || \
1169 defined(TARGET_NR_semtimedop_time64) || \
1170 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1171 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1172 abi_ulong target_addr)
1174 struct target__kernel_timespec *target_ts;
1176 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1177 return -TARGET_EFAULT;
1179 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1180 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1181 /* in 32bit mode, this drops the padding */
1182 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1183 unlock_user_struct(target_ts, target_addr, 0);
1184 return 0;
1186 #endif
1188 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1189 struct timespec *host_ts)
1191 struct target_timespec *target_ts;
1193 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1194 return -TARGET_EFAULT;
1196 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1197 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1198 unlock_user_struct(target_ts, target_addr, 1);
1199 return 0;
1202 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1203 struct timespec *host_ts)
1205 struct target__kernel_timespec *target_ts;
1207 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1208 return -TARGET_EFAULT;
1210 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1211 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212 unlock_user_struct(target_ts, target_addr, 1);
1213 return 0;
1216 #if defined(TARGET_NR_gettimeofday)
1217 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1218 struct timezone *tz)
1220 struct target_timezone *target_tz;
1222 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1223 return -TARGET_EFAULT;
1226 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1227 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1229 unlock_user_struct(target_tz, target_tz_addr, 1);
1231 return 0;
1233 #endif
1235 #if defined(TARGET_NR_settimeofday)
1236 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1237 abi_ulong target_tz_addr)
1239 struct target_timezone *target_tz;
1241 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1242 return -TARGET_EFAULT;
1245 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1246 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1248 unlock_user_struct(target_tz, target_tz_addr, 0);
1250 return 0;
1252 #endif
1254 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1255 #include <mqueue.h>
1257 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1258 abi_ulong target_mq_attr_addr)
1260 struct target_mq_attr *target_mq_attr;
1262 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1263 target_mq_attr_addr, 1))
1264 return -TARGET_EFAULT;
1266 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1267 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1268 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1269 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1271 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1273 return 0;
1276 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1277 const struct mq_attr *attr)
1279 struct target_mq_attr *target_mq_attr;
1281 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1282 target_mq_attr_addr, 0))
1283 return -TARGET_EFAULT;
1285 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1286 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1287 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1288 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1290 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1292 return 0;
1294 #endif
1296 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1297 /* do_select() must return target values and target errnos. */
1298 static abi_long do_select(int n,
1299 abi_ulong rfd_addr, abi_ulong wfd_addr,
1300 abi_ulong efd_addr, abi_ulong target_tv_addr)
1302 fd_set rfds, wfds, efds;
1303 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1304 struct timeval tv;
1305 struct timespec ts, *ts_ptr;
1306 abi_long ret;
1308 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1309 if (ret) {
1310 return ret;
1312 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1313 if (ret) {
1314 return ret;
1316 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1317 if (ret) {
1318 return ret;
1321 if (target_tv_addr) {
1322 if (copy_from_user_timeval(&tv, target_tv_addr))
1323 return -TARGET_EFAULT;
1324 ts.tv_sec = tv.tv_sec;
1325 ts.tv_nsec = tv.tv_usec * 1000;
1326 ts_ptr = &ts;
1327 } else {
1328 ts_ptr = NULL;
1331 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1332 ts_ptr, NULL));
1334 if (!is_error(ret)) {
1335 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1336 return -TARGET_EFAULT;
1337 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1338 return -TARGET_EFAULT;
1339 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1340 return -TARGET_EFAULT;
1342 if (target_tv_addr) {
1343 tv.tv_sec = ts.tv_sec;
1344 tv.tv_usec = ts.tv_nsec / 1000;
1345 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1346 return -TARGET_EFAULT;
1351 return ret;
1354 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1355 static abi_long do_old_select(abi_ulong arg1)
1357 struct target_sel_arg_struct *sel;
1358 abi_ulong inp, outp, exp, tvp;
1359 long nsel;
1361 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1362 return -TARGET_EFAULT;
1365 nsel = tswapal(sel->n);
1366 inp = tswapal(sel->inp);
1367 outp = tswapal(sel->outp);
1368 exp = tswapal(sel->exp);
1369 tvp = tswapal(sel->tvp);
1371 unlock_user_struct(sel, arg1, 0);
1373 return do_select(nsel, inp, outp, exp, tvp);
1375 #endif
1376 #endif
1378 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1379 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1380 abi_long arg4, abi_long arg5, abi_long arg6,
1381 bool time64)
1383 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1384 fd_set rfds, wfds, efds;
1385 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1386 struct timespec ts, *ts_ptr;
1387 abi_long ret;
1390 * The 6th arg is actually two args smashed together,
1391 * so we cannot use the C library.
1393 struct {
1394 sigset_t *set;
1395 size_t size;
1396 } sig, *sig_ptr;
1398 abi_ulong arg_sigset, arg_sigsize, *arg7;
1400 n = arg1;
1401 rfd_addr = arg2;
1402 wfd_addr = arg3;
1403 efd_addr = arg4;
1404 ts_addr = arg5;
1406 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1407 if (ret) {
1408 return ret;
1410 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1411 if (ret) {
1412 return ret;
1414 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1415 if (ret) {
1416 return ret;
1420 * This takes a timespec, and not a timeval, so we cannot
1421 * use the do_select() helper ...
1423 if (ts_addr) {
1424 if (time64) {
1425 if (target_to_host_timespec64(&ts, ts_addr)) {
1426 return -TARGET_EFAULT;
1428 } else {
1429 if (target_to_host_timespec(&ts, ts_addr)) {
1430 return -TARGET_EFAULT;
1433 ts_ptr = &ts;
1434 } else {
1435 ts_ptr = NULL;
1438 /* Extract the two packed args for the sigset */
1439 sig_ptr = NULL;
1440 if (arg6) {
1441 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1442 if (!arg7) {
1443 return -TARGET_EFAULT;
1445 arg_sigset = tswapal(arg7[0]);
1446 arg_sigsize = tswapal(arg7[1]);
1447 unlock_user(arg7, arg6, 0);
1449 if (arg_sigset) {
1450 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1451 if (ret != 0) {
1452 return ret;
1454 sig_ptr = &sig;
1455 sig.size = SIGSET_T_SIZE;
1459 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1460 ts_ptr, sig_ptr));
1462 if (sig_ptr) {
1463 finish_sigsuspend_mask(ret);
1466 if (!is_error(ret)) {
1467 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1468 return -TARGET_EFAULT;
1470 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1471 return -TARGET_EFAULT;
1473 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1474 return -TARGET_EFAULT;
1476 if (time64) {
1477 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1478 return -TARGET_EFAULT;
1480 } else {
1481 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1482 return -TARGET_EFAULT;
1486 return ret;
1488 #endif
1490 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1491 defined(TARGET_NR_ppoll_time64)
1492 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1493 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1495 struct target_pollfd *target_pfd;
1496 unsigned int nfds = arg2;
1497 struct pollfd *pfd;
1498 unsigned int i;
1499 abi_long ret;
1501 pfd = NULL;
1502 target_pfd = NULL;
1503 if (nfds) {
1504 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1505 return -TARGET_EINVAL;
1507 target_pfd = lock_user(VERIFY_WRITE, arg1,
1508 sizeof(struct target_pollfd) * nfds, 1);
1509 if (!target_pfd) {
1510 return -TARGET_EFAULT;
1513 pfd = alloca(sizeof(struct pollfd) * nfds);
1514 for (i = 0; i < nfds; i++) {
1515 pfd[i].fd = tswap32(target_pfd[i].fd);
1516 pfd[i].events = tswap16(target_pfd[i].events);
1519 if (ppoll) {
1520 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1521 sigset_t *set = NULL;
1523 if (arg3) {
1524 if (time64) {
1525 if (target_to_host_timespec64(timeout_ts, arg3)) {
1526 unlock_user(target_pfd, arg1, 0);
1527 return -TARGET_EFAULT;
1529 } else {
1530 if (target_to_host_timespec(timeout_ts, arg3)) {
1531 unlock_user(target_pfd, arg1, 0);
1532 return -TARGET_EFAULT;
1535 } else {
1536 timeout_ts = NULL;
1539 if (arg4) {
1540 ret = process_sigsuspend_mask(&set, arg4, arg5);
1541 if (ret != 0) {
1542 unlock_user(target_pfd, arg1, 0);
1543 return ret;
1547 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1548 set, SIGSET_T_SIZE));
1550 if (set) {
1551 finish_sigsuspend_mask(ret);
1553 if (!is_error(ret) && arg3) {
1554 if (time64) {
1555 if (host_to_target_timespec64(arg3, timeout_ts)) {
1556 return -TARGET_EFAULT;
1558 } else {
1559 if (host_to_target_timespec(arg3, timeout_ts)) {
1560 return -TARGET_EFAULT;
1564 } else {
1565 struct timespec ts, *pts;
1567 if (arg3 >= 0) {
1568 /* Convert ms to secs, ns */
1569 ts.tv_sec = arg3 / 1000;
1570 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1571 pts = &ts;
1572 } else {
1573 /* -ve poll() timeout means "infinite" */
1574 pts = NULL;
1576 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1579 if (!is_error(ret)) {
1580 for (i = 0; i < nfds; i++) {
1581 target_pfd[i].revents = tswap16(pfd[i].revents);
1584 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1585 return ret;
1587 #endif
1589 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1590 int flags, int is_pipe2)
1592 int host_pipe[2];
1593 abi_long ret;
1594 ret = pipe2(host_pipe, flags);
1596 if (is_error(ret))
1597 return get_errno(ret);
1599 /* Several targets have special calling conventions for the original
1600 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1601 if (!is_pipe2) {
1602 #if defined(TARGET_ALPHA)
1603 cpu_env->ir[IR_A4] = host_pipe[1];
1604 return host_pipe[0];
1605 #elif defined(TARGET_MIPS)
1606 cpu_env->active_tc.gpr[3] = host_pipe[1];
1607 return host_pipe[0];
1608 #elif defined(TARGET_SH4)
1609 cpu_env->gregs[1] = host_pipe[1];
1610 return host_pipe[0];
1611 #elif defined(TARGET_SPARC)
1612 cpu_env->regwptr[1] = host_pipe[1];
1613 return host_pipe[0];
1614 #endif
1617 if (put_user_s32(host_pipe[0], pipedes)
1618 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1619 return -TARGET_EFAULT;
1620 return get_errno(ret);
1623 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1624 abi_ulong target_addr,
1625 socklen_t len)
1627 struct target_ip_mreqn *target_smreqn;
1629 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1630 if (!target_smreqn)
1631 return -TARGET_EFAULT;
1632 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1633 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1634 if (len == sizeof(struct target_ip_mreqn))
1635 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1636 unlock_user(target_smreqn, target_addr, 0);
1638 return 0;
1641 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1642 abi_ulong target_addr,
1643 socklen_t len)
1645 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1646 sa_family_t sa_family;
1647 struct target_sockaddr *target_saddr;
1649 if (fd_trans_target_to_host_addr(fd)) {
1650 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1653 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1654 if (!target_saddr)
1655 return -TARGET_EFAULT;
1657 sa_family = tswap16(target_saddr->sa_family);
1659 /* Oops. The caller might send a incomplete sun_path; sun_path
1660 * must be terminated by \0 (see the manual page), but
1661 * unfortunately it is quite common to specify sockaddr_un
1662 * length as "strlen(x->sun_path)" while it should be
1663 * "strlen(...) + 1". We'll fix that here if needed.
1664 * Linux kernel has a similar feature.
1667 if (sa_family == AF_UNIX) {
1668 if (len < unix_maxlen && len > 0) {
1669 char *cp = (char*)target_saddr;
1671 if ( cp[len-1] && !cp[len] )
1672 len++;
1674 if (len > unix_maxlen)
1675 len = unix_maxlen;
1678 memcpy(addr, target_saddr, len);
1679 addr->sa_family = sa_family;
1680 if (sa_family == AF_NETLINK) {
1681 struct sockaddr_nl *nladdr;
1683 nladdr = (struct sockaddr_nl *)addr;
1684 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1685 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1686 } else if (sa_family == AF_PACKET) {
1687 struct target_sockaddr_ll *lladdr;
1689 lladdr = (struct target_sockaddr_ll *)addr;
1690 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1691 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1693 unlock_user(target_saddr, target_addr, 0);
1695 return 0;
1698 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1699 struct sockaddr *addr,
1700 socklen_t len)
1702 struct target_sockaddr *target_saddr;
1704 if (len == 0) {
1705 return 0;
1707 assert(addr);
1709 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1710 if (!target_saddr)
1711 return -TARGET_EFAULT;
1712 memcpy(target_saddr, addr, len);
1713 if (len >= offsetof(struct target_sockaddr, sa_family) +
1714 sizeof(target_saddr->sa_family)) {
1715 target_saddr->sa_family = tswap16(addr->sa_family);
1717 if (addr->sa_family == AF_NETLINK &&
1718 len >= sizeof(struct target_sockaddr_nl)) {
1719 struct target_sockaddr_nl *target_nl =
1720 (struct target_sockaddr_nl *)target_saddr;
1721 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1722 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1723 } else if (addr->sa_family == AF_PACKET) {
1724 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1725 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1726 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1727 } else if (addr->sa_family == AF_INET6 &&
1728 len >= sizeof(struct target_sockaddr_in6)) {
1729 struct target_sockaddr_in6 *target_in6 =
1730 (struct target_sockaddr_in6 *)target_saddr;
1731 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1733 unlock_user(target_saddr, target_addr, len);
1735 return 0;
1738 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1739 struct target_msghdr *target_msgh)
1741 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1742 abi_long msg_controllen;
1743 abi_ulong target_cmsg_addr;
1744 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1745 socklen_t space = 0;
1747 msg_controllen = tswapal(target_msgh->msg_controllen);
1748 if (msg_controllen < sizeof (struct target_cmsghdr))
1749 goto the_end;
1750 target_cmsg_addr = tswapal(target_msgh->msg_control);
1751 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1752 target_cmsg_start = target_cmsg;
1753 if (!target_cmsg)
1754 return -TARGET_EFAULT;
1756 while (cmsg && target_cmsg) {
1757 void *data = CMSG_DATA(cmsg);
1758 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1760 int len = tswapal(target_cmsg->cmsg_len)
1761 - sizeof(struct target_cmsghdr);
1763 space += CMSG_SPACE(len);
1764 if (space > msgh->msg_controllen) {
1765 space -= CMSG_SPACE(len);
1766 /* This is a QEMU bug, since we allocated the payload
1767 * area ourselves (unlike overflow in host-to-target
1768 * conversion, which is just the guest giving us a buffer
1769 * that's too small). It can't happen for the payload types
1770 * we currently support; if it becomes an issue in future
1771 * we would need to improve our allocation strategy to
1772 * something more intelligent than "twice the size of the
1773 * target buffer we're reading from".
1775 qemu_log_mask(LOG_UNIMP,
1776 ("Unsupported ancillary data %d/%d: "
1777 "unhandled msg size\n"),
1778 tswap32(target_cmsg->cmsg_level),
1779 tswap32(target_cmsg->cmsg_type));
1780 break;
1783 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1784 cmsg->cmsg_level = SOL_SOCKET;
1785 } else {
1786 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1788 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1789 cmsg->cmsg_len = CMSG_LEN(len);
1791 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1792 int *fd = (int *)data;
1793 int *target_fd = (int *)target_data;
1794 int i, numfds = len / sizeof(int);
1796 for (i = 0; i < numfds; i++) {
1797 __get_user(fd[i], target_fd + i);
1799 } else if (cmsg->cmsg_level == SOL_SOCKET
1800 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1801 struct ucred *cred = (struct ucred *)data;
1802 struct target_ucred *target_cred =
1803 (struct target_ucred *)target_data;
1805 __get_user(cred->pid, &target_cred->pid);
1806 __get_user(cred->uid, &target_cred->uid);
1807 __get_user(cred->gid, &target_cred->gid);
1808 } else {
1809 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1810 cmsg->cmsg_level, cmsg->cmsg_type);
1811 memcpy(data, target_data, len);
1814 cmsg = CMSG_NXTHDR(msgh, cmsg);
1815 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1816 target_cmsg_start);
1818 unlock_user(target_cmsg, target_cmsg_addr, 0);
1819 the_end:
1820 msgh->msg_controllen = space;
1821 return 0;
1824 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1825 struct msghdr *msgh)
1827 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1828 abi_long msg_controllen;
1829 abi_ulong target_cmsg_addr;
1830 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1831 socklen_t space = 0;
1833 msg_controllen = tswapal(target_msgh->msg_controllen);
1834 if (msg_controllen < sizeof (struct target_cmsghdr))
1835 goto the_end;
1836 target_cmsg_addr = tswapal(target_msgh->msg_control);
1837 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1838 target_cmsg_start = target_cmsg;
1839 if (!target_cmsg)
1840 return -TARGET_EFAULT;
1842 while (cmsg && target_cmsg) {
1843 void *data = CMSG_DATA(cmsg);
1844 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1846 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1847 int tgt_len, tgt_space;
1849 /* We never copy a half-header but may copy half-data;
1850 * this is Linux's behaviour in put_cmsg(). Note that
1851 * truncation here is a guest problem (which we report
1852 * to the guest via the CTRUNC bit), unlike truncation
1853 * in target_to_host_cmsg, which is a QEMU bug.
1855 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1856 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1857 break;
1860 if (cmsg->cmsg_level == SOL_SOCKET) {
1861 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1862 } else {
1863 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1865 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1867 /* Payload types which need a different size of payload on
1868 * the target must adjust tgt_len here.
1870 tgt_len = len;
1871 switch (cmsg->cmsg_level) {
1872 case SOL_SOCKET:
1873 switch (cmsg->cmsg_type) {
1874 case SO_TIMESTAMP:
1875 tgt_len = sizeof(struct target_timeval);
1876 break;
1877 default:
1878 break;
1880 break;
1881 default:
1882 break;
1885 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1886 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1887 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1890 /* We must now copy-and-convert len bytes of payload
1891 * into tgt_len bytes of destination space. Bear in mind
1892 * that in both source and destination we may be dealing
1893 * with a truncated value!
1895 switch (cmsg->cmsg_level) {
1896 case SOL_SOCKET:
1897 switch (cmsg->cmsg_type) {
1898 case SCM_RIGHTS:
1900 int *fd = (int *)data;
1901 int *target_fd = (int *)target_data;
1902 int i, numfds = tgt_len / sizeof(int);
1904 for (i = 0; i < numfds; i++) {
1905 __put_user(fd[i], target_fd + i);
1907 break;
1909 case SO_TIMESTAMP:
1911 struct timeval *tv = (struct timeval *)data;
1912 struct target_timeval *target_tv =
1913 (struct target_timeval *)target_data;
1915 if (len != sizeof(struct timeval) ||
1916 tgt_len != sizeof(struct target_timeval)) {
1917 goto unimplemented;
1920 /* copy struct timeval to target */
1921 __put_user(tv->tv_sec, &target_tv->tv_sec);
1922 __put_user(tv->tv_usec, &target_tv->tv_usec);
1923 break;
1925 case SCM_CREDENTIALS:
1927 struct ucred *cred = (struct ucred *)data;
1928 struct target_ucred *target_cred =
1929 (struct target_ucred *)target_data;
1931 __put_user(cred->pid, &target_cred->pid);
1932 __put_user(cred->uid, &target_cred->uid);
1933 __put_user(cred->gid, &target_cred->gid);
1934 break;
1936 default:
1937 goto unimplemented;
1939 break;
1941 case SOL_IP:
1942 switch (cmsg->cmsg_type) {
1943 case IP_TTL:
1945 uint32_t *v = (uint32_t *)data;
1946 uint32_t *t_int = (uint32_t *)target_data;
1948 if (len != sizeof(uint32_t) ||
1949 tgt_len != sizeof(uint32_t)) {
1950 goto unimplemented;
1952 __put_user(*v, t_int);
1953 break;
1955 case IP_RECVERR:
1957 struct errhdr_t {
1958 struct sock_extended_err ee;
1959 struct sockaddr_in offender;
1961 struct errhdr_t *errh = (struct errhdr_t *)data;
1962 struct errhdr_t *target_errh =
1963 (struct errhdr_t *)target_data;
1965 if (len != sizeof(struct errhdr_t) ||
1966 tgt_len != sizeof(struct errhdr_t)) {
1967 goto unimplemented;
1969 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1970 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1971 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1972 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1973 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1974 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1975 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1976 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1977 (void *) &errh->offender, sizeof(errh->offender));
1978 break;
1980 default:
1981 goto unimplemented;
1983 break;
1985 case SOL_IPV6:
1986 switch (cmsg->cmsg_type) {
1987 case IPV6_HOPLIMIT:
1989 uint32_t *v = (uint32_t *)data;
1990 uint32_t *t_int = (uint32_t *)target_data;
1992 if (len != sizeof(uint32_t) ||
1993 tgt_len != sizeof(uint32_t)) {
1994 goto unimplemented;
1996 __put_user(*v, t_int);
1997 break;
1999 case IPV6_RECVERR:
2001 struct errhdr6_t {
2002 struct sock_extended_err ee;
2003 struct sockaddr_in6 offender;
2005 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2006 struct errhdr6_t *target_errh =
2007 (struct errhdr6_t *)target_data;
2009 if (len != sizeof(struct errhdr6_t) ||
2010 tgt_len != sizeof(struct errhdr6_t)) {
2011 goto unimplemented;
2013 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2014 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2015 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2016 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2017 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2018 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2019 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2020 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2021 (void *) &errh->offender, sizeof(errh->offender));
2022 break;
2024 default:
2025 goto unimplemented;
2027 break;
2029 default:
2030 unimplemented:
2031 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2032 cmsg->cmsg_level, cmsg->cmsg_type);
2033 memcpy(target_data, data, MIN(len, tgt_len));
2034 if (tgt_len > len) {
2035 memset(target_data + len, 0, tgt_len - len);
2039 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2040 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2041 if (msg_controllen < tgt_space) {
2042 tgt_space = msg_controllen;
2044 msg_controllen -= tgt_space;
2045 space += tgt_space;
2046 cmsg = CMSG_NXTHDR(msgh, cmsg);
2047 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2048 target_cmsg_start);
2050 unlock_user(target_cmsg, target_cmsg_addr, space);
2051 the_end:
2052 target_msgh->msg_controllen = tswapal(space);
2053 return 0;
2056 /* do_setsockopt() Must return target values and target errnos. */
2057 static abi_long do_setsockopt(int sockfd, int level, int optname,
2058 abi_ulong optval_addr, socklen_t optlen)
2060 abi_long ret;
2061 int val;
2062 struct ip_mreqn *ip_mreq;
2063 struct ip_mreq_source *ip_mreq_source;
2065 switch(level) {
2066 case SOL_TCP:
2067 case SOL_UDP:
2068 /* TCP and UDP options all take an 'int' value. */
2069 if (optlen < sizeof(uint32_t))
2070 return -TARGET_EINVAL;
2072 if (get_user_u32(val, optval_addr))
2073 return -TARGET_EFAULT;
2074 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2075 break;
2076 case SOL_IP:
2077 switch(optname) {
2078 case IP_TOS:
2079 case IP_TTL:
2080 case IP_HDRINCL:
2081 case IP_ROUTER_ALERT:
2082 case IP_RECVOPTS:
2083 case IP_RETOPTS:
2084 case IP_PKTINFO:
2085 case IP_MTU_DISCOVER:
2086 case IP_RECVERR:
2087 case IP_RECVTTL:
2088 case IP_RECVTOS:
2089 #ifdef IP_FREEBIND
2090 case IP_FREEBIND:
2091 #endif
2092 case IP_MULTICAST_TTL:
2093 case IP_MULTICAST_LOOP:
2094 val = 0;
2095 if (optlen >= sizeof(uint32_t)) {
2096 if (get_user_u32(val, optval_addr))
2097 return -TARGET_EFAULT;
2098 } else if (optlen >= 1) {
2099 if (get_user_u8(val, optval_addr))
2100 return -TARGET_EFAULT;
2102 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2103 break;
2104 case IP_ADD_MEMBERSHIP:
2105 case IP_DROP_MEMBERSHIP:
2106 if (optlen < sizeof (struct target_ip_mreq) ||
2107 optlen > sizeof (struct target_ip_mreqn))
2108 return -TARGET_EINVAL;
2110 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2111 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2112 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2113 break;
2115 case IP_BLOCK_SOURCE:
2116 case IP_UNBLOCK_SOURCE:
2117 case IP_ADD_SOURCE_MEMBERSHIP:
2118 case IP_DROP_SOURCE_MEMBERSHIP:
2119 if (optlen != sizeof (struct target_ip_mreq_source))
2120 return -TARGET_EINVAL;
2122 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2123 if (!ip_mreq_source) {
2124 return -TARGET_EFAULT;
2126 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2127 unlock_user (ip_mreq_source, optval_addr, 0);
2128 break;
2130 default:
2131 goto unimplemented;
2133 break;
2134 case SOL_IPV6:
2135 switch (optname) {
2136 case IPV6_MTU_DISCOVER:
2137 case IPV6_MTU:
2138 case IPV6_V6ONLY:
2139 case IPV6_RECVPKTINFO:
2140 case IPV6_UNICAST_HOPS:
2141 case IPV6_MULTICAST_HOPS:
2142 case IPV6_MULTICAST_LOOP:
2143 case IPV6_RECVERR:
2144 case IPV6_RECVHOPLIMIT:
2145 case IPV6_2292HOPLIMIT:
2146 case IPV6_CHECKSUM:
2147 case IPV6_ADDRFORM:
2148 case IPV6_2292PKTINFO:
2149 case IPV6_RECVTCLASS:
2150 case IPV6_RECVRTHDR:
2151 case IPV6_2292RTHDR:
2152 case IPV6_RECVHOPOPTS:
2153 case IPV6_2292HOPOPTS:
2154 case IPV6_RECVDSTOPTS:
2155 case IPV6_2292DSTOPTS:
2156 case IPV6_TCLASS:
2157 case IPV6_ADDR_PREFERENCES:
2158 #ifdef IPV6_RECVPATHMTU
2159 case IPV6_RECVPATHMTU:
2160 #endif
2161 #ifdef IPV6_TRANSPARENT
2162 case IPV6_TRANSPARENT:
2163 #endif
2164 #ifdef IPV6_FREEBIND
2165 case IPV6_FREEBIND:
2166 #endif
2167 #ifdef IPV6_RECVORIGDSTADDR
2168 case IPV6_RECVORIGDSTADDR:
2169 #endif
2170 val = 0;
2171 if (optlen < sizeof(uint32_t)) {
2172 return -TARGET_EINVAL;
2174 if (get_user_u32(val, optval_addr)) {
2175 return -TARGET_EFAULT;
2177 ret = get_errno(setsockopt(sockfd, level, optname,
2178 &val, sizeof(val)));
2179 break;
2180 case IPV6_PKTINFO:
2182 struct in6_pktinfo pki;
2184 if (optlen < sizeof(pki)) {
2185 return -TARGET_EINVAL;
2188 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2189 return -TARGET_EFAULT;
2192 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2194 ret = get_errno(setsockopt(sockfd, level, optname,
2195 &pki, sizeof(pki)));
2196 break;
2198 case IPV6_ADD_MEMBERSHIP:
2199 case IPV6_DROP_MEMBERSHIP:
2201 struct ipv6_mreq ipv6mreq;
2203 if (optlen < sizeof(ipv6mreq)) {
2204 return -TARGET_EINVAL;
2207 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2208 return -TARGET_EFAULT;
2211 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2213 ret = get_errno(setsockopt(sockfd, level, optname,
2214 &ipv6mreq, sizeof(ipv6mreq)));
2215 break;
2217 default:
2218 goto unimplemented;
2220 break;
2221 case SOL_ICMPV6:
2222 switch (optname) {
2223 case ICMPV6_FILTER:
2225 struct icmp6_filter icmp6f;
2227 if (optlen > sizeof(icmp6f)) {
2228 optlen = sizeof(icmp6f);
2231 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2232 return -TARGET_EFAULT;
2235 for (val = 0; val < 8; val++) {
2236 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2239 ret = get_errno(setsockopt(sockfd, level, optname,
2240 &icmp6f, optlen));
2241 break;
2243 default:
2244 goto unimplemented;
2246 break;
2247 case SOL_RAW:
2248 switch (optname) {
2249 case ICMP_FILTER:
2250 case IPV6_CHECKSUM:
2251 /* those take an u32 value */
2252 if (optlen < sizeof(uint32_t)) {
2253 return -TARGET_EINVAL;
2256 if (get_user_u32(val, optval_addr)) {
2257 return -TARGET_EFAULT;
2259 ret = get_errno(setsockopt(sockfd, level, optname,
2260 &val, sizeof(val)));
2261 break;
2263 default:
2264 goto unimplemented;
2266 break;
2267 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2268 case SOL_ALG:
2269 switch (optname) {
2270 case ALG_SET_KEY:
2272 char *alg_key = g_malloc(optlen);
2274 if (!alg_key) {
2275 return -TARGET_ENOMEM;
2277 if (copy_from_user(alg_key, optval_addr, optlen)) {
2278 g_free(alg_key);
2279 return -TARGET_EFAULT;
2281 ret = get_errno(setsockopt(sockfd, level, optname,
2282 alg_key, optlen));
2283 g_free(alg_key);
2284 break;
2286 case ALG_SET_AEAD_AUTHSIZE:
2288 ret = get_errno(setsockopt(sockfd, level, optname,
2289 NULL, optlen));
2290 break;
2292 default:
2293 goto unimplemented;
2295 break;
2296 #endif
2297 case TARGET_SOL_SOCKET:
2298 switch (optname) {
2299 case TARGET_SO_RCVTIMEO:
2301 struct timeval tv;
2303 optname = SO_RCVTIMEO;
2305 set_timeout:
2306 if (optlen != sizeof(struct target_timeval)) {
2307 return -TARGET_EINVAL;
2310 if (copy_from_user_timeval(&tv, optval_addr)) {
2311 return -TARGET_EFAULT;
2314 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2315 &tv, sizeof(tv)));
2316 return ret;
2318 case TARGET_SO_SNDTIMEO:
2319 optname = SO_SNDTIMEO;
2320 goto set_timeout;
2321 case TARGET_SO_ATTACH_FILTER:
2323 struct target_sock_fprog *tfprog;
2324 struct target_sock_filter *tfilter;
2325 struct sock_fprog fprog;
2326 struct sock_filter *filter;
2327 int i;
2329 if (optlen != sizeof(*tfprog)) {
2330 return -TARGET_EINVAL;
2332 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2333 return -TARGET_EFAULT;
2335 if (!lock_user_struct(VERIFY_READ, tfilter,
2336 tswapal(tfprog->filter), 0)) {
2337 unlock_user_struct(tfprog, optval_addr, 1);
2338 return -TARGET_EFAULT;
2341 fprog.len = tswap16(tfprog->len);
2342 filter = g_try_new(struct sock_filter, fprog.len);
2343 if (filter == NULL) {
2344 unlock_user_struct(tfilter, tfprog->filter, 1);
2345 unlock_user_struct(tfprog, optval_addr, 1);
2346 return -TARGET_ENOMEM;
2348 for (i = 0; i < fprog.len; i++) {
2349 filter[i].code = tswap16(tfilter[i].code);
2350 filter[i].jt = tfilter[i].jt;
2351 filter[i].jf = tfilter[i].jf;
2352 filter[i].k = tswap32(tfilter[i].k);
2354 fprog.filter = filter;
2356 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2357 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2358 g_free(filter);
2360 unlock_user_struct(tfilter, tfprog->filter, 1);
2361 unlock_user_struct(tfprog, optval_addr, 1);
2362 return ret;
2364 case TARGET_SO_BINDTODEVICE:
2366 char *dev_ifname, *addr_ifname;
2368 if (optlen > IFNAMSIZ - 1) {
2369 optlen = IFNAMSIZ - 1;
2371 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2372 if (!dev_ifname) {
2373 return -TARGET_EFAULT;
2375 optname = SO_BINDTODEVICE;
2376 addr_ifname = alloca(IFNAMSIZ);
2377 memcpy(addr_ifname, dev_ifname, optlen);
2378 addr_ifname[optlen] = 0;
2379 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2380 addr_ifname, optlen));
2381 unlock_user (dev_ifname, optval_addr, 0);
2382 return ret;
2384 case TARGET_SO_LINGER:
2386 struct linger lg;
2387 struct target_linger *tlg;
2389 if (optlen != sizeof(struct target_linger)) {
2390 return -TARGET_EINVAL;
2392 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2393 return -TARGET_EFAULT;
2395 __get_user(lg.l_onoff, &tlg->l_onoff);
2396 __get_user(lg.l_linger, &tlg->l_linger);
2397 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2398 &lg, sizeof(lg)));
2399 unlock_user_struct(tlg, optval_addr, 0);
2400 return ret;
2402 /* Options with 'int' argument. */
2403 case TARGET_SO_DEBUG:
2404 optname = SO_DEBUG;
2405 break;
2406 case TARGET_SO_REUSEADDR:
2407 optname = SO_REUSEADDR;
2408 break;
2409 #ifdef SO_REUSEPORT
2410 case TARGET_SO_REUSEPORT:
2411 optname = SO_REUSEPORT;
2412 break;
2413 #endif
2414 case TARGET_SO_TYPE:
2415 optname = SO_TYPE;
2416 break;
2417 case TARGET_SO_ERROR:
2418 optname = SO_ERROR;
2419 break;
2420 case TARGET_SO_DONTROUTE:
2421 optname = SO_DONTROUTE;
2422 break;
2423 case TARGET_SO_BROADCAST:
2424 optname = SO_BROADCAST;
2425 break;
2426 case TARGET_SO_SNDBUF:
2427 optname = SO_SNDBUF;
2428 break;
2429 case TARGET_SO_SNDBUFFORCE:
2430 optname = SO_SNDBUFFORCE;
2431 break;
2432 case TARGET_SO_RCVBUF:
2433 optname = SO_RCVBUF;
2434 break;
2435 case TARGET_SO_RCVBUFFORCE:
2436 optname = SO_RCVBUFFORCE;
2437 break;
2438 case TARGET_SO_KEEPALIVE:
2439 optname = SO_KEEPALIVE;
2440 break;
2441 case TARGET_SO_OOBINLINE:
2442 optname = SO_OOBINLINE;
2443 break;
2444 case TARGET_SO_NO_CHECK:
2445 optname = SO_NO_CHECK;
2446 break;
2447 case TARGET_SO_PRIORITY:
2448 optname = SO_PRIORITY;
2449 break;
2450 #ifdef SO_BSDCOMPAT
2451 case TARGET_SO_BSDCOMPAT:
2452 optname = SO_BSDCOMPAT;
2453 break;
2454 #endif
2455 case TARGET_SO_PASSCRED:
2456 optname = SO_PASSCRED;
2457 break;
2458 case TARGET_SO_PASSSEC:
2459 optname = SO_PASSSEC;
2460 break;
2461 case TARGET_SO_TIMESTAMP:
2462 optname = SO_TIMESTAMP;
2463 break;
2464 case TARGET_SO_RCVLOWAT:
2465 optname = SO_RCVLOWAT;
2466 break;
2467 default:
2468 goto unimplemented;
2470 if (optlen < sizeof(uint32_t))
2471 return -TARGET_EINVAL;
2473 if (get_user_u32(val, optval_addr))
2474 return -TARGET_EFAULT;
2475 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2476 break;
2477 #ifdef SOL_NETLINK
2478 case SOL_NETLINK:
2479 switch (optname) {
2480 case NETLINK_PKTINFO:
2481 case NETLINK_ADD_MEMBERSHIP:
2482 case NETLINK_DROP_MEMBERSHIP:
2483 case NETLINK_BROADCAST_ERROR:
2484 case NETLINK_NO_ENOBUFS:
2485 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2486 case NETLINK_LISTEN_ALL_NSID:
2487 case NETLINK_CAP_ACK:
2488 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2489 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2490 case NETLINK_EXT_ACK:
2491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2493 case NETLINK_GET_STRICT_CHK:
2494 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2495 break;
2496 default:
2497 goto unimplemented;
2499 val = 0;
2500 if (optlen < sizeof(uint32_t)) {
2501 return -TARGET_EINVAL;
2503 if (get_user_u32(val, optval_addr)) {
2504 return -TARGET_EFAULT;
2506 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2507 sizeof(val)));
2508 break;
2509 #endif /* SOL_NETLINK */
2510 default:
2511 unimplemented:
2512 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2513 level, optname);
2514 ret = -TARGET_ENOPROTOOPT;
2516 return ret;
2519 /* do_getsockopt() Must return target values and target errnos. */
2520 static abi_long do_getsockopt(int sockfd, int level, int optname,
2521 abi_ulong optval_addr, abi_ulong optlen)
2523 abi_long ret;
2524 int len, val;
2525 socklen_t lv;
2527 switch(level) {
2528 case TARGET_SOL_SOCKET:
2529 level = SOL_SOCKET;
2530 switch (optname) {
2531 /* These don't just return a single integer */
2532 case TARGET_SO_PEERNAME:
2533 goto unimplemented;
2534 case TARGET_SO_RCVTIMEO: {
2535 struct timeval tv;
2536 socklen_t tvlen;
2538 optname = SO_RCVTIMEO;
2540 get_timeout:
2541 if (get_user_u32(len, optlen)) {
2542 return -TARGET_EFAULT;
2544 if (len < 0) {
2545 return -TARGET_EINVAL;
2548 tvlen = sizeof(tv);
2549 ret = get_errno(getsockopt(sockfd, level, optname,
2550 &tv, &tvlen));
2551 if (ret < 0) {
2552 return ret;
2554 if (len > sizeof(struct target_timeval)) {
2555 len = sizeof(struct target_timeval);
2557 if (copy_to_user_timeval(optval_addr, &tv)) {
2558 return -TARGET_EFAULT;
2560 if (put_user_u32(len, optlen)) {
2561 return -TARGET_EFAULT;
2563 break;
2565 case TARGET_SO_SNDTIMEO:
2566 optname = SO_SNDTIMEO;
2567 goto get_timeout;
2568 case TARGET_SO_PEERCRED: {
2569 struct ucred cr;
2570 socklen_t crlen;
2571 struct target_ucred *tcr;
2573 if (get_user_u32(len, optlen)) {
2574 return -TARGET_EFAULT;
2576 if (len < 0) {
2577 return -TARGET_EINVAL;
2580 crlen = sizeof(cr);
2581 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2582 &cr, &crlen));
2583 if (ret < 0) {
2584 return ret;
2586 if (len > crlen) {
2587 len = crlen;
2589 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2590 return -TARGET_EFAULT;
2592 __put_user(cr.pid, &tcr->pid);
2593 __put_user(cr.uid, &tcr->uid);
2594 __put_user(cr.gid, &tcr->gid);
2595 unlock_user_struct(tcr, optval_addr, 1);
2596 if (put_user_u32(len, optlen)) {
2597 return -TARGET_EFAULT;
2599 break;
2601 case TARGET_SO_PEERSEC: {
2602 char *name;
2604 if (get_user_u32(len, optlen)) {
2605 return -TARGET_EFAULT;
2607 if (len < 0) {
2608 return -TARGET_EINVAL;
2610 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2611 if (!name) {
2612 return -TARGET_EFAULT;
2614 lv = len;
2615 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2616 name, &lv));
2617 if (put_user_u32(lv, optlen)) {
2618 ret = -TARGET_EFAULT;
2620 unlock_user(name, optval_addr, lv);
2621 break;
2623 case TARGET_SO_LINGER:
2625 struct linger lg;
2626 socklen_t lglen;
2627 struct target_linger *tlg;
2629 if (get_user_u32(len, optlen)) {
2630 return -TARGET_EFAULT;
2632 if (len < 0) {
2633 return -TARGET_EINVAL;
2636 lglen = sizeof(lg);
2637 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2638 &lg, &lglen));
2639 if (ret < 0) {
2640 return ret;
2642 if (len > lglen) {
2643 len = lglen;
2645 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2646 return -TARGET_EFAULT;
2648 __put_user(lg.l_onoff, &tlg->l_onoff);
2649 __put_user(lg.l_linger, &tlg->l_linger);
2650 unlock_user_struct(tlg, optval_addr, 1);
2651 if (put_user_u32(len, optlen)) {
2652 return -TARGET_EFAULT;
2654 break;
2656 /* Options with 'int' argument. */
2657 case TARGET_SO_DEBUG:
2658 optname = SO_DEBUG;
2659 goto int_case;
2660 case TARGET_SO_REUSEADDR:
2661 optname = SO_REUSEADDR;
2662 goto int_case;
2663 #ifdef SO_REUSEPORT
2664 case TARGET_SO_REUSEPORT:
2665 optname = SO_REUSEPORT;
2666 goto int_case;
2667 #endif
2668 case TARGET_SO_TYPE:
2669 optname = SO_TYPE;
2670 goto int_case;
2671 case TARGET_SO_ERROR:
2672 optname = SO_ERROR;
2673 goto int_case;
2674 case TARGET_SO_DONTROUTE:
2675 optname = SO_DONTROUTE;
2676 goto int_case;
2677 case TARGET_SO_BROADCAST:
2678 optname = SO_BROADCAST;
2679 goto int_case;
2680 case TARGET_SO_SNDBUF:
2681 optname = SO_SNDBUF;
2682 goto int_case;
2683 case TARGET_SO_RCVBUF:
2684 optname = SO_RCVBUF;
2685 goto int_case;
2686 case TARGET_SO_KEEPALIVE:
2687 optname = SO_KEEPALIVE;
2688 goto int_case;
2689 case TARGET_SO_OOBINLINE:
2690 optname = SO_OOBINLINE;
2691 goto int_case;
2692 case TARGET_SO_NO_CHECK:
2693 optname = SO_NO_CHECK;
2694 goto int_case;
2695 case TARGET_SO_PRIORITY:
2696 optname = SO_PRIORITY;
2697 goto int_case;
2698 #ifdef SO_BSDCOMPAT
2699 case TARGET_SO_BSDCOMPAT:
2700 optname = SO_BSDCOMPAT;
2701 goto int_case;
2702 #endif
2703 case TARGET_SO_PASSCRED:
2704 optname = SO_PASSCRED;
2705 goto int_case;
2706 case TARGET_SO_TIMESTAMP:
2707 optname = SO_TIMESTAMP;
2708 goto int_case;
2709 case TARGET_SO_RCVLOWAT:
2710 optname = SO_RCVLOWAT;
2711 goto int_case;
2712 case TARGET_SO_ACCEPTCONN:
2713 optname = SO_ACCEPTCONN;
2714 goto int_case;
2715 case TARGET_SO_PROTOCOL:
2716 optname = SO_PROTOCOL;
2717 goto int_case;
2718 case TARGET_SO_DOMAIN:
2719 optname = SO_DOMAIN;
2720 goto int_case;
2721 default:
2722 goto int_case;
2724 break;
2725 case SOL_TCP:
2726 case SOL_UDP:
2727 /* TCP and UDP options all take an 'int' value. */
2728 int_case:
2729 if (get_user_u32(len, optlen))
2730 return -TARGET_EFAULT;
2731 if (len < 0)
2732 return -TARGET_EINVAL;
2733 lv = sizeof(lv);
2734 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2735 if (ret < 0)
2736 return ret;
2737 if (optname == SO_TYPE) {
2738 val = host_to_target_sock_type(val);
2740 if (len > lv)
2741 len = lv;
2742 if (len == 4) {
2743 if (put_user_u32(val, optval_addr))
2744 return -TARGET_EFAULT;
2745 } else {
2746 if (put_user_u8(val, optval_addr))
2747 return -TARGET_EFAULT;
2749 if (put_user_u32(len, optlen))
2750 return -TARGET_EFAULT;
2751 break;
2752 case SOL_IP:
2753 switch(optname) {
2754 case IP_TOS:
2755 case IP_TTL:
2756 case IP_HDRINCL:
2757 case IP_ROUTER_ALERT:
2758 case IP_RECVOPTS:
2759 case IP_RETOPTS:
2760 case IP_PKTINFO:
2761 case IP_MTU_DISCOVER:
2762 case IP_RECVERR:
2763 case IP_RECVTOS:
2764 #ifdef IP_FREEBIND
2765 case IP_FREEBIND:
2766 #endif
2767 case IP_MULTICAST_TTL:
2768 case IP_MULTICAST_LOOP:
2769 if (get_user_u32(len, optlen))
2770 return -TARGET_EFAULT;
2771 if (len < 0)
2772 return -TARGET_EINVAL;
2773 lv = sizeof(lv);
2774 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2775 if (ret < 0)
2776 return ret;
2777 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2778 len = 1;
2779 if (put_user_u32(len, optlen)
2780 || put_user_u8(val, optval_addr))
2781 return -TARGET_EFAULT;
2782 } else {
2783 if (len > sizeof(int))
2784 len = sizeof(int);
2785 if (put_user_u32(len, optlen)
2786 || put_user_u32(val, optval_addr))
2787 return -TARGET_EFAULT;
2789 break;
2790 default:
2791 ret = -TARGET_ENOPROTOOPT;
2792 break;
2794 break;
2795 case SOL_IPV6:
2796 switch (optname) {
2797 case IPV6_MTU_DISCOVER:
2798 case IPV6_MTU:
2799 case IPV6_V6ONLY:
2800 case IPV6_RECVPKTINFO:
2801 case IPV6_UNICAST_HOPS:
2802 case IPV6_MULTICAST_HOPS:
2803 case IPV6_MULTICAST_LOOP:
2804 case IPV6_RECVERR:
2805 case IPV6_RECVHOPLIMIT:
2806 case IPV6_2292HOPLIMIT:
2807 case IPV6_CHECKSUM:
2808 case IPV6_ADDRFORM:
2809 case IPV6_2292PKTINFO:
2810 case IPV6_RECVTCLASS:
2811 case IPV6_RECVRTHDR:
2812 case IPV6_2292RTHDR:
2813 case IPV6_RECVHOPOPTS:
2814 case IPV6_2292HOPOPTS:
2815 case IPV6_RECVDSTOPTS:
2816 case IPV6_2292DSTOPTS:
2817 case IPV6_TCLASS:
2818 case IPV6_ADDR_PREFERENCES:
2819 #ifdef IPV6_RECVPATHMTU
2820 case IPV6_RECVPATHMTU:
2821 #endif
2822 #ifdef IPV6_TRANSPARENT
2823 case IPV6_TRANSPARENT:
2824 #endif
2825 #ifdef IPV6_FREEBIND
2826 case IPV6_FREEBIND:
2827 #endif
2828 #ifdef IPV6_RECVORIGDSTADDR
2829 case IPV6_RECVORIGDSTADDR:
2830 #endif
2831 if (get_user_u32(len, optlen))
2832 return -TARGET_EFAULT;
2833 if (len < 0)
2834 return -TARGET_EINVAL;
2835 lv = sizeof(lv);
2836 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2837 if (ret < 0)
2838 return ret;
2839 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2840 len = 1;
2841 if (put_user_u32(len, optlen)
2842 || put_user_u8(val, optval_addr))
2843 return -TARGET_EFAULT;
2844 } else {
2845 if (len > sizeof(int))
2846 len = sizeof(int);
2847 if (put_user_u32(len, optlen)
2848 || put_user_u32(val, optval_addr))
2849 return -TARGET_EFAULT;
2851 break;
2852 default:
2853 ret = -TARGET_ENOPROTOOPT;
2854 break;
2856 break;
2857 #ifdef SOL_NETLINK
2858 case SOL_NETLINK:
2859 switch (optname) {
2860 case NETLINK_PKTINFO:
2861 case NETLINK_BROADCAST_ERROR:
2862 case NETLINK_NO_ENOBUFS:
2863 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2864 case NETLINK_LISTEN_ALL_NSID:
2865 case NETLINK_CAP_ACK:
2866 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2867 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2868 case NETLINK_EXT_ACK:
2869 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2871 case NETLINK_GET_STRICT_CHK:
2872 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2873 if (get_user_u32(len, optlen)) {
2874 return -TARGET_EFAULT;
2876 if (len != sizeof(val)) {
2877 return -TARGET_EINVAL;
2879 lv = len;
2880 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2881 if (ret < 0) {
2882 return ret;
2884 if (put_user_u32(lv, optlen)
2885 || put_user_u32(val, optval_addr)) {
2886 return -TARGET_EFAULT;
2888 break;
2889 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2890 case NETLINK_LIST_MEMBERSHIPS:
2892 uint32_t *results;
2893 int i;
2894 if (get_user_u32(len, optlen)) {
2895 return -TARGET_EFAULT;
2897 if (len < 0) {
2898 return -TARGET_EINVAL;
2900 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2901 if (!results && len > 0) {
2902 return -TARGET_EFAULT;
2904 lv = len;
2905 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2906 if (ret < 0) {
2907 unlock_user(results, optval_addr, 0);
2908 return ret;
2910 /* swap host endianess to target endianess. */
2911 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2912 results[i] = tswap32(results[i]);
2914 if (put_user_u32(lv, optlen)) {
2915 return -TARGET_EFAULT;
2917 unlock_user(results, optval_addr, 0);
2918 break;
2920 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2921 default:
2922 goto unimplemented;
2924 break;
2925 #endif /* SOL_NETLINK */
2926 default:
2927 unimplemented:
2928 qemu_log_mask(LOG_UNIMP,
2929 "getsockopt level=%d optname=%d not yet supported\n",
2930 level, optname);
2931 ret = -TARGET_EOPNOTSUPP;
2932 break;
2934 return ret;
2937 /* Convert target low/high pair representing file offset into the host
2938 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2939 * as the kernel doesn't handle them either.
2941 static void target_to_host_low_high(abi_ulong tlow,
2942 abi_ulong thigh,
2943 unsigned long *hlow,
2944 unsigned long *hhigh)
2946 uint64_t off = tlow |
2947 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2948 TARGET_LONG_BITS / 2;
2950 *hlow = off;
2951 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2954 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2955 abi_ulong count, int copy)
2957 struct target_iovec *target_vec;
2958 struct iovec *vec;
2959 abi_ulong total_len, max_len;
2960 int i;
2961 int err = 0;
2962 bool bad_address = false;
2964 if (count == 0) {
2965 errno = 0;
2966 return NULL;
2968 if (count > IOV_MAX) {
2969 errno = EINVAL;
2970 return NULL;
2973 vec = g_try_new0(struct iovec, count);
2974 if (vec == NULL) {
2975 errno = ENOMEM;
2976 return NULL;
2979 target_vec = lock_user(VERIFY_READ, target_addr,
2980 count * sizeof(struct target_iovec), 1);
2981 if (target_vec == NULL) {
2982 err = EFAULT;
2983 goto fail2;
2986 /* ??? If host page size > target page size, this will result in a
2987 value larger than what we can actually support. */
2988 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2989 total_len = 0;
2991 for (i = 0; i < count; i++) {
2992 abi_ulong base = tswapal(target_vec[i].iov_base);
2993 abi_long len = tswapal(target_vec[i].iov_len);
2995 if (len < 0) {
2996 err = EINVAL;
2997 goto fail;
2998 } else if (len == 0) {
2999 /* Zero length pointer is ignored. */
3000 vec[i].iov_base = 0;
3001 } else {
3002 vec[i].iov_base = lock_user(type, base, len, copy);
3003 /* If the first buffer pointer is bad, this is a fault. But
3004 * subsequent bad buffers will result in a partial write; this
3005 * is realized by filling the vector with null pointers and
3006 * zero lengths. */
3007 if (!vec[i].iov_base) {
3008 if (i == 0) {
3009 err = EFAULT;
3010 goto fail;
3011 } else {
3012 bad_address = true;
3015 if (bad_address) {
3016 len = 0;
3018 if (len > max_len - total_len) {
3019 len = max_len - total_len;
3022 vec[i].iov_len = len;
3023 total_len += len;
3026 unlock_user(target_vec, target_addr, 0);
3027 return vec;
3029 fail:
3030 while (--i >= 0) {
3031 if (tswapal(target_vec[i].iov_len) > 0) {
3032 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3035 unlock_user(target_vec, target_addr, 0);
3036 fail2:
3037 g_free(vec);
3038 errno = err;
3039 return NULL;
3042 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3043 abi_ulong count, int copy)
3045 struct target_iovec *target_vec;
3046 int i;
3048 target_vec = lock_user(VERIFY_READ, target_addr,
3049 count * sizeof(struct target_iovec), 1);
3050 if (target_vec) {
3051 for (i = 0; i < count; i++) {
3052 abi_ulong base = tswapal(target_vec[i].iov_base);
3053 abi_long len = tswapal(target_vec[i].iov_len);
3054 if (len < 0) {
3055 break;
3057 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3059 unlock_user(target_vec, target_addr, 0);
3062 g_free(vec);
3065 static inline int target_to_host_sock_type(int *type)
3067 int host_type = 0;
3068 int target_type = *type;
3070 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3071 case TARGET_SOCK_DGRAM:
3072 host_type = SOCK_DGRAM;
3073 break;
3074 case TARGET_SOCK_STREAM:
3075 host_type = SOCK_STREAM;
3076 break;
3077 default:
3078 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3079 break;
3081 if (target_type & TARGET_SOCK_CLOEXEC) {
3082 #if defined(SOCK_CLOEXEC)
3083 host_type |= SOCK_CLOEXEC;
3084 #else
3085 return -TARGET_EINVAL;
3086 #endif
3088 if (target_type & TARGET_SOCK_NONBLOCK) {
3089 #if defined(SOCK_NONBLOCK)
3090 host_type |= SOCK_NONBLOCK;
3091 #elif !defined(O_NONBLOCK)
3092 return -TARGET_EINVAL;
3093 #endif
3095 *type = host_type;
3096 return 0;
3099 /* Try to emulate socket type flags after socket creation. */
3100 static int sock_flags_fixup(int fd, int target_type)
3102 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3103 if (target_type & TARGET_SOCK_NONBLOCK) {
3104 int flags = fcntl(fd, F_GETFL);
3105 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3106 close(fd);
3107 return -TARGET_EINVAL;
3110 #endif
3111 return fd;
3114 /* do_socket() Must return target values and target errnos. */
3115 static abi_long do_socket(int domain, int type, int protocol)
3117 int target_type = type;
3118 int ret;
3120 ret = target_to_host_sock_type(&type);
3121 if (ret) {
3122 return ret;
3125 if (domain == PF_NETLINK && !(
3126 #ifdef CONFIG_RTNETLINK
3127 protocol == NETLINK_ROUTE ||
3128 #endif
3129 protocol == NETLINK_KOBJECT_UEVENT ||
3130 protocol == NETLINK_AUDIT)) {
3131 return -TARGET_EPROTONOSUPPORT;
3134 if (domain == AF_PACKET ||
3135 (domain == AF_INET && type == SOCK_PACKET)) {
3136 protocol = tswap16(protocol);
3139 ret = get_errno(socket(domain, type, protocol));
3140 if (ret >= 0) {
3141 ret = sock_flags_fixup(ret, target_type);
3142 if (type == SOCK_PACKET) {
3143 /* Manage an obsolete case :
3144 * if socket type is SOCK_PACKET, bind by name
3146 fd_trans_register(ret, &target_packet_trans);
3147 } else if (domain == PF_NETLINK) {
3148 switch (protocol) {
3149 #ifdef CONFIG_RTNETLINK
3150 case NETLINK_ROUTE:
3151 fd_trans_register(ret, &target_netlink_route_trans);
3152 break;
3153 #endif
3154 case NETLINK_KOBJECT_UEVENT:
3155 /* nothing to do: messages are strings */
3156 break;
3157 case NETLINK_AUDIT:
3158 fd_trans_register(ret, &target_netlink_audit_trans);
3159 break;
3160 default:
3161 g_assert_not_reached();
3165 return ret;
3168 /* do_bind() Must return target values and target errnos. */
3169 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3170 socklen_t addrlen)
3172 void *addr;
3173 abi_long ret;
3175 if ((int)addrlen < 0) {
3176 return -TARGET_EINVAL;
3179 addr = alloca(addrlen+1);
3181 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3182 if (ret)
3183 return ret;
3185 return get_errno(bind(sockfd, addr, addrlen));
3188 /* do_connect() Must return target values and target errnos. */
3189 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3190 socklen_t addrlen)
3192 void *addr;
3193 abi_long ret;
3195 if ((int)addrlen < 0) {
3196 return -TARGET_EINVAL;
3199 addr = alloca(addrlen+1);
3201 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3202 if (ret)
3203 return ret;
3205 return get_errno(safe_connect(sockfd, addr, addrlen));
3208 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3209 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3210 int flags, int send)
3212 abi_long ret, len;
3213 struct msghdr msg;
3214 abi_ulong count;
3215 struct iovec *vec;
3216 abi_ulong target_vec;
3218 if (msgp->msg_name) {
3219 msg.msg_namelen = tswap32(msgp->msg_namelen);
3220 msg.msg_name = alloca(msg.msg_namelen+1);
3221 ret = target_to_host_sockaddr(fd, msg.msg_name,
3222 tswapal(msgp->msg_name),
3223 msg.msg_namelen);
3224 if (ret == -TARGET_EFAULT) {
3225 /* For connected sockets msg_name and msg_namelen must
3226 * be ignored, so returning EFAULT immediately is wrong.
3227 * Instead, pass a bad msg_name to the host kernel, and
3228 * let it decide whether to return EFAULT or not.
3230 msg.msg_name = (void *)-1;
3231 } else if (ret) {
3232 goto out2;
3234 } else {
3235 msg.msg_name = NULL;
3236 msg.msg_namelen = 0;
3238 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3239 msg.msg_control = alloca(msg.msg_controllen);
3240 memset(msg.msg_control, 0, msg.msg_controllen);
3242 msg.msg_flags = tswap32(msgp->msg_flags);
3244 count = tswapal(msgp->msg_iovlen);
3245 target_vec = tswapal(msgp->msg_iov);
3247 if (count > IOV_MAX) {
3248 /* sendrcvmsg returns a different errno for this condition than
3249 * readv/writev, so we must catch it here before lock_iovec() does.
3251 ret = -TARGET_EMSGSIZE;
3252 goto out2;
3255 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3256 target_vec, count, send);
3257 if (vec == NULL) {
3258 ret = -host_to_target_errno(errno);
3259 goto out2;
3261 msg.msg_iovlen = count;
3262 msg.msg_iov = vec;
3264 if (send) {
3265 if (fd_trans_target_to_host_data(fd)) {
3266 void *host_msg;
3268 host_msg = g_malloc(msg.msg_iov->iov_len);
3269 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3270 ret = fd_trans_target_to_host_data(fd)(host_msg,
3271 msg.msg_iov->iov_len);
3272 if (ret >= 0) {
3273 msg.msg_iov->iov_base = host_msg;
3274 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3276 g_free(host_msg);
3277 } else {
3278 ret = target_to_host_cmsg(&msg, msgp);
3279 if (ret == 0) {
3280 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3283 } else {
3284 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3285 if (!is_error(ret)) {
3286 len = ret;
3287 if (fd_trans_host_to_target_data(fd)) {
3288 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3289 MIN(msg.msg_iov->iov_len, len));
3290 } else {
3291 ret = host_to_target_cmsg(msgp, &msg);
3293 if (!is_error(ret)) {
3294 msgp->msg_namelen = tswap32(msg.msg_namelen);
3295 msgp->msg_flags = tswap32(msg.msg_flags);
3296 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3297 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3298 msg.msg_name, msg.msg_namelen);
3299 if (ret) {
3300 goto out;
3304 ret = len;
3309 out:
3310 unlock_iovec(vec, target_vec, count, !send);
3311 out2:
3312 return ret;
3315 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3316 int flags, int send)
3318 abi_long ret;
3319 struct target_msghdr *msgp;
3321 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3322 msgp,
3323 target_msg,
3324 send ? 1 : 0)) {
3325 return -TARGET_EFAULT;
3327 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3328 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3329 return ret;
3332 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3333 * so it might not have this *mmsg-specific flag either.
3335 #ifndef MSG_WAITFORONE
3336 #define MSG_WAITFORONE 0x10000
3337 #endif
3339 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3340 unsigned int vlen, unsigned int flags,
3341 int send)
3343 struct target_mmsghdr *mmsgp;
3344 abi_long ret = 0;
3345 int i;
3347 if (vlen > UIO_MAXIOV) {
3348 vlen = UIO_MAXIOV;
3351 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3352 if (!mmsgp) {
3353 return -TARGET_EFAULT;
3356 for (i = 0; i < vlen; i++) {
3357 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3358 if (is_error(ret)) {
3359 break;
3361 mmsgp[i].msg_len = tswap32(ret);
3362 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3363 if (flags & MSG_WAITFORONE) {
3364 flags |= MSG_DONTWAIT;
3368 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3370 /* Return number of datagrams sent if we sent any at all;
3371 * otherwise return the error.
3373 if (i) {
3374 return i;
3376 return ret;
3379 /* do_accept4() Must return target values and target errnos. */
3380 static abi_long do_accept4(int fd, abi_ulong target_addr,
3381 abi_ulong target_addrlen_addr, int flags)
3383 socklen_t addrlen, ret_addrlen;
3384 void *addr;
3385 abi_long ret;
3386 int host_flags;
3388 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3390 if (target_addr == 0) {
3391 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3394 /* linux returns EFAULT if addrlen pointer is invalid */
3395 if (get_user_u32(addrlen, target_addrlen_addr))
3396 return -TARGET_EFAULT;
3398 if ((int)addrlen < 0) {
3399 return -TARGET_EINVAL;
3402 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3403 return -TARGET_EFAULT;
3406 addr = alloca(addrlen);
3408 ret_addrlen = addrlen;
3409 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3410 if (!is_error(ret)) {
3411 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3412 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3413 ret = -TARGET_EFAULT;
3416 return ret;
3419 /* do_getpeername() Must return target values and target errnos. */
3420 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3421 abi_ulong target_addrlen_addr)
3423 socklen_t addrlen, ret_addrlen;
3424 void *addr;
3425 abi_long ret;
3427 if (get_user_u32(addrlen, target_addrlen_addr))
3428 return -TARGET_EFAULT;
3430 if ((int)addrlen < 0) {
3431 return -TARGET_EINVAL;
3434 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3435 return -TARGET_EFAULT;
3438 addr = alloca(addrlen);
3440 ret_addrlen = addrlen;
3441 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3442 if (!is_error(ret)) {
3443 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3444 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3445 ret = -TARGET_EFAULT;
3448 return ret;
3451 /* do_getsockname() Must return target values and target errnos. */
3452 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3453 abi_ulong target_addrlen_addr)
3455 socklen_t addrlen, ret_addrlen;
3456 void *addr;
3457 abi_long ret;
3459 if (get_user_u32(addrlen, target_addrlen_addr))
3460 return -TARGET_EFAULT;
3462 if ((int)addrlen < 0) {
3463 return -TARGET_EINVAL;
3466 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3467 return -TARGET_EFAULT;
3470 addr = alloca(addrlen);
3472 ret_addrlen = addrlen;
3473 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3474 if (!is_error(ret)) {
3475 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3476 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3477 ret = -TARGET_EFAULT;
3480 return ret;
3483 /* do_socketpair() Must return target values and target errnos. */
3484 static abi_long do_socketpair(int domain, int type, int protocol,
3485 abi_ulong target_tab_addr)
3487 int tab[2];
3488 abi_long ret;
3490 target_to_host_sock_type(&type);
3492 ret = get_errno(socketpair(domain, type, protocol, tab));
3493 if (!is_error(ret)) {
3494 if (put_user_s32(tab[0], target_tab_addr)
3495 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3496 ret = -TARGET_EFAULT;
3498 return ret;
3501 /* do_sendto() Must return target values and target errnos. */
3502 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3503 abi_ulong target_addr, socklen_t addrlen)
3505 void *addr;
3506 void *host_msg;
3507 void *copy_msg = NULL;
3508 abi_long ret;
3510 if ((int)addrlen < 0) {
3511 return -TARGET_EINVAL;
3514 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3515 if (!host_msg)
3516 return -TARGET_EFAULT;
3517 if (fd_trans_target_to_host_data(fd)) {
3518 copy_msg = host_msg;
3519 host_msg = g_malloc(len);
3520 memcpy(host_msg, copy_msg, len);
3521 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3522 if (ret < 0) {
3523 goto fail;
3526 if (target_addr) {
3527 addr = alloca(addrlen+1);
3528 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3529 if (ret) {
3530 goto fail;
3532 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3533 } else {
3534 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3536 fail:
3537 if (copy_msg) {
3538 g_free(host_msg);
3539 host_msg = copy_msg;
3541 unlock_user(host_msg, msg, 0);
3542 return ret;
3545 /* do_recvfrom() Must return target values and target errnos. */
3546 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3547 abi_ulong target_addr,
3548 abi_ulong target_addrlen)
3550 socklen_t addrlen, ret_addrlen;
3551 void *addr;
3552 void *host_msg;
3553 abi_long ret;
3555 if (!msg) {
3556 host_msg = NULL;
3557 } else {
3558 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3559 if (!host_msg) {
3560 return -TARGET_EFAULT;
3563 if (target_addr) {
3564 if (get_user_u32(addrlen, target_addrlen)) {
3565 ret = -TARGET_EFAULT;
3566 goto fail;
3568 if ((int)addrlen < 0) {
3569 ret = -TARGET_EINVAL;
3570 goto fail;
3572 addr = alloca(addrlen);
3573 ret_addrlen = addrlen;
3574 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3575 addr, &ret_addrlen));
3576 } else {
3577 addr = NULL; /* To keep compiler quiet. */
3578 addrlen = 0; /* To keep compiler quiet. */
3579 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3581 if (!is_error(ret)) {
3582 if (fd_trans_host_to_target_data(fd)) {
3583 abi_long trans;
3584 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3585 if (is_error(trans)) {
3586 ret = trans;
3587 goto fail;
3590 if (target_addr) {
3591 host_to_target_sockaddr(target_addr, addr,
3592 MIN(addrlen, ret_addrlen));
3593 if (put_user_u32(ret_addrlen, target_addrlen)) {
3594 ret = -TARGET_EFAULT;
3595 goto fail;
3598 unlock_user(host_msg, msg, len);
3599 } else {
3600 fail:
3601 unlock_user(host_msg, msg, 0);
3603 return ret;
3606 #ifdef TARGET_NR_socketcall
3607 /* do_socketcall() must return target values and target errnos. */
3608 static abi_long do_socketcall(int num, abi_ulong vptr)
3610 static const unsigned nargs[] = { /* number of arguments per operation */
3611 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3612 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3613 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3614 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3615 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3616 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3617 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3618 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3619 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3620 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3621 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3622 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3623 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3624 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3625 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3626 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3627 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3628 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3629 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3630 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3632 abi_long a[6]; /* max 6 args */
3633 unsigned i;
3635 /* check the range of the first argument num */
3636 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3637 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3638 return -TARGET_EINVAL;
3640 /* ensure we have space for args */
3641 if (nargs[num] > ARRAY_SIZE(a)) {
3642 return -TARGET_EINVAL;
3644 /* collect the arguments in a[] according to nargs[] */
3645 for (i = 0; i < nargs[num]; ++i) {
3646 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3647 return -TARGET_EFAULT;
3650 /* now when we have the args, invoke the appropriate underlying function */
3651 switch (num) {
3652 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3653 return do_socket(a[0], a[1], a[2]);
3654 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3655 return do_bind(a[0], a[1], a[2]);
3656 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3657 return do_connect(a[0], a[1], a[2]);
3658 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3659 return get_errno(listen(a[0], a[1]));
3660 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3661 return do_accept4(a[0], a[1], a[2], 0);
3662 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3663 return do_getsockname(a[0], a[1], a[2]);
3664 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3665 return do_getpeername(a[0], a[1], a[2]);
3666 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3667 return do_socketpair(a[0], a[1], a[2], a[3]);
3668 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3669 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3670 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3671 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3672 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3673 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3674 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3675 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3676 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3677 return get_errno(shutdown(a[0], a[1]));
3678 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3679 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3680 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3681 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3682 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3683 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3684 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3685 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3686 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3687 return do_accept4(a[0], a[1], a[2], a[3]);
3688 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3689 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3690 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3691 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3692 default:
3693 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3694 return -TARGET_EINVAL;
3697 #endif
3699 #define N_SHM_REGIONS 32
3701 static struct shm_region {
3702 abi_ulong start;
3703 abi_ulong size;
3704 bool in_use;
3705 } shm_regions[N_SHM_REGIONS];
3707 #ifndef TARGET_SEMID64_DS
3708 /* asm-generic version of this struct */
3709 struct target_semid64_ds
3711 struct target_ipc_perm sem_perm;
3712 abi_ulong sem_otime;
3713 #if TARGET_ABI_BITS == 32
3714 abi_ulong __unused1;
3715 #endif
3716 abi_ulong sem_ctime;
3717 #if TARGET_ABI_BITS == 32
3718 abi_ulong __unused2;
3719 #endif
3720 abi_ulong sem_nsems;
3721 abi_ulong __unused3;
3722 abi_ulong __unused4;
3724 #endif
3726 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3727 abi_ulong target_addr)
3729 struct target_ipc_perm *target_ip;
3730 struct target_semid64_ds *target_sd;
3732 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3733 return -TARGET_EFAULT;
3734 target_ip = &(target_sd->sem_perm);
3735 host_ip->__key = tswap32(target_ip->__key);
3736 host_ip->uid = tswap32(target_ip->uid);
3737 host_ip->gid = tswap32(target_ip->gid);
3738 host_ip->cuid = tswap32(target_ip->cuid);
3739 host_ip->cgid = tswap32(target_ip->cgid);
3740 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3741 host_ip->mode = tswap32(target_ip->mode);
3742 #else
3743 host_ip->mode = tswap16(target_ip->mode);
3744 #endif
3745 #if defined(TARGET_PPC)
3746 host_ip->__seq = tswap32(target_ip->__seq);
3747 #else
3748 host_ip->__seq = tswap16(target_ip->__seq);
3749 #endif
3750 unlock_user_struct(target_sd, target_addr, 0);
3751 return 0;
3754 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3755 struct ipc_perm *host_ip)
3757 struct target_ipc_perm *target_ip;
3758 struct target_semid64_ds *target_sd;
3760 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3761 return -TARGET_EFAULT;
3762 target_ip = &(target_sd->sem_perm);
3763 target_ip->__key = tswap32(host_ip->__key);
3764 target_ip->uid = tswap32(host_ip->uid);
3765 target_ip->gid = tswap32(host_ip->gid);
3766 target_ip->cuid = tswap32(host_ip->cuid);
3767 target_ip->cgid = tswap32(host_ip->cgid);
3768 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3769 target_ip->mode = tswap32(host_ip->mode);
3770 #else
3771 target_ip->mode = tswap16(host_ip->mode);
3772 #endif
3773 #if defined(TARGET_PPC)
3774 target_ip->__seq = tswap32(host_ip->__seq);
3775 #else
3776 target_ip->__seq = tswap16(host_ip->__seq);
3777 #endif
3778 unlock_user_struct(target_sd, target_addr, 1);
3779 return 0;
3782 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3783 abi_ulong target_addr)
3785 struct target_semid64_ds *target_sd;
3787 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3788 return -TARGET_EFAULT;
3789 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3790 return -TARGET_EFAULT;
3791 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3792 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3793 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3794 unlock_user_struct(target_sd, target_addr, 0);
3795 return 0;
3798 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3799 struct semid_ds *host_sd)
3801 struct target_semid64_ds *target_sd;
3803 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3804 return -TARGET_EFAULT;
3805 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3806 return -TARGET_EFAULT;
3807 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3808 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3809 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3810 unlock_user_struct(target_sd, target_addr, 1);
3811 return 0;
3814 struct target_seminfo {
3815 int semmap;
3816 int semmni;
3817 int semmns;
3818 int semmnu;
3819 int semmsl;
3820 int semopm;
3821 int semume;
3822 int semusz;
3823 int semvmx;
3824 int semaem;
3827 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3828 struct seminfo *host_seminfo)
3830 struct target_seminfo *target_seminfo;
3831 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3832 return -TARGET_EFAULT;
3833 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3834 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3835 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3836 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3837 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3838 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3839 __put_user(host_seminfo->semume, &target_seminfo->semume);
3840 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3841 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3842 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3843 unlock_user_struct(target_seminfo, target_addr, 1);
3844 return 0;
3847 union semun {
3848 int val;
3849 struct semid_ds *buf;
3850 unsigned short *array;
3851 struct seminfo *__buf;
3854 union target_semun {
3855 int val;
3856 abi_ulong buf;
3857 abi_ulong array;
3858 abi_ulong __buf;
3861 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3862 abi_ulong target_addr)
3864 int nsems;
3865 unsigned short *array;
3866 union semun semun;
3867 struct semid_ds semid_ds;
3868 int i, ret;
3870 semun.buf = &semid_ds;
3872 ret = semctl(semid, 0, IPC_STAT, semun);
3873 if (ret == -1)
3874 return get_errno(ret);
3876 nsems = semid_ds.sem_nsems;
3878 *host_array = g_try_new(unsigned short, nsems);
3879 if (!*host_array) {
3880 return -TARGET_ENOMEM;
3882 array = lock_user(VERIFY_READ, target_addr,
3883 nsems*sizeof(unsigned short), 1);
3884 if (!array) {
3885 g_free(*host_array);
3886 return -TARGET_EFAULT;
3889 for(i=0; i<nsems; i++) {
3890 __get_user((*host_array)[i], &array[i]);
3892 unlock_user(array, target_addr, 0);
3894 return 0;
3897 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3898 unsigned short **host_array)
3900 int nsems;
3901 unsigned short *array;
3902 union semun semun;
3903 struct semid_ds semid_ds;
3904 int i, ret;
3906 semun.buf = &semid_ds;
3908 ret = semctl(semid, 0, IPC_STAT, semun);
3909 if (ret == -1)
3910 return get_errno(ret);
3912 nsems = semid_ds.sem_nsems;
3914 array = lock_user(VERIFY_WRITE, target_addr,
3915 nsems*sizeof(unsigned short), 0);
3916 if (!array)
3917 return -TARGET_EFAULT;
3919 for(i=0; i<nsems; i++) {
3920 __put_user((*host_array)[i], &array[i]);
3922 g_free(*host_array);
3923 unlock_user(array, target_addr, 1);
3925 return 0;
3928 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3929 abi_ulong target_arg)
3931 union target_semun target_su = { .buf = target_arg };
3932 union semun arg;
3933 struct semid_ds dsarg;
3934 unsigned short *array = NULL;
3935 struct seminfo seminfo;
3936 abi_long ret = -TARGET_EINVAL;
3937 abi_long err;
3938 cmd &= 0xff;
3940 switch( cmd ) {
3941 case GETVAL:
3942 case SETVAL:
3943 /* In 64 bit cross-endian situations, we will erroneously pick up
3944 * the wrong half of the union for the "val" element. To rectify
3945 * this, the entire 8-byte structure is byteswapped, followed by
3946 * a swap of the 4 byte val field. In other cases, the data is
3947 * already in proper host byte order. */
3948 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3949 target_su.buf = tswapal(target_su.buf);
3950 arg.val = tswap32(target_su.val);
3951 } else {
3952 arg.val = target_su.val;
3954 ret = get_errno(semctl(semid, semnum, cmd, arg));
3955 break;
3956 case GETALL:
3957 case SETALL:
3958 err = target_to_host_semarray(semid, &array, target_su.array);
3959 if (err)
3960 return err;
3961 arg.array = array;
3962 ret = get_errno(semctl(semid, semnum, cmd, arg));
3963 err = host_to_target_semarray(semid, target_su.array, &array);
3964 if (err)
3965 return err;
3966 break;
3967 case IPC_STAT:
3968 case IPC_SET:
3969 case SEM_STAT:
3970 err = target_to_host_semid_ds(&dsarg, target_su.buf);
3971 if (err)
3972 return err;
3973 arg.buf = &dsarg;
3974 ret = get_errno(semctl(semid, semnum, cmd, arg));
3975 err = host_to_target_semid_ds(target_su.buf, &dsarg);
3976 if (err)
3977 return err;
3978 break;
3979 case IPC_INFO:
3980 case SEM_INFO:
3981 arg.__buf = &seminfo;
3982 ret = get_errno(semctl(semid, semnum, cmd, arg));
3983 err = host_to_target_seminfo(target_su.__buf, &seminfo);
3984 if (err)
3985 return err;
3986 break;
3987 case IPC_RMID:
3988 case GETPID:
3989 case GETNCNT:
3990 case GETZCNT:
3991 ret = get_errno(semctl(semid, semnum, cmd, NULL));
3992 break;
3995 return ret;
3998 struct target_sembuf {
3999 unsigned short sem_num;
4000 short sem_op;
4001 short sem_flg;
4004 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4005 abi_ulong target_addr,
4006 unsigned nsops)
4008 struct target_sembuf *target_sembuf;
4009 int i;
4011 target_sembuf = lock_user(VERIFY_READ, target_addr,
4012 nsops*sizeof(struct target_sembuf), 1);
4013 if (!target_sembuf)
4014 return -TARGET_EFAULT;
4016 for(i=0; i<nsops; i++) {
4017 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4018 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4019 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4022 unlock_user(target_sembuf, target_addr, 0);
4024 return 0;
4027 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4028 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4031 * This macro is required to handle the s390 variants, which passes the
4032 * arguments in a different order than default.
4034 #ifdef __s390x__
4035 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4036 (__nsops), (__timeout), (__sops)
4037 #else
4038 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4039 (__nsops), 0, (__sops), (__timeout)
4040 #endif
4042 static inline abi_long do_semtimedop(int semid,
4043 abi_long ptr,
4044 unsigned nsops,
4045 abi_long timeout, bool time64)
4047 struct sembuf *sops;
4048 struct timespec ts, *pts = NULL;
4049 abi_long ret;
4051 if (timeout) {
4052 pts = &ts;
4053 if (time64) {
4054 if (target_to_host_timespec64(pts, timeout)) {
4055 return -TARGET_EFAULT;
4057 } else {
4058 if (target_to_host_timespec(pts, timeout)) {
4059 return -TARGET_EFAULT;
4064 if (nsops > TARGET_SEMOPM) {
4065 return -TARGET_E2BIG;
4068 sops = g_new(struct sembuf, nsops);
4070 if (target_to_host_sembuf(sops, ptr, nsops)) {
4071 g_free(sops);
4072 return -TARGET_EFAULT;
4075 ret = -TARGET_ENOSYS;
4076 #ifdef __NR_semtimedop
4077 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4078 #endif
4079 #ifdef __NR_ipc
4080 if (ret == -TARGET_ENOSYS) {
4081 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4082 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4084 #endif
4085 g_free(sops);
4086 return ret;
4088 #endif
4090 struct target_msqid_ds
4092 struct target_ipc_perm msg_perm;
4093 abi_ulong msg_stime;
4094 #if TARGET_ABI_BITS == 32
4095 abi_ulong __unused1;
4096 #endif
4097 abi_ulong msg_rtime;
4098 #if TARGET_ABI_BITS == 32
4099 abi_ulong __unused2;
4100 #endif
4101 abi_ulong msg_ctime;
4102 #if TARGET_ABI_BITS == 32
4103 abi_ulong __unused3;
4104 #endif
4105 abi_ulong __msg_cbytes;
4106 abi_ulong msg_qnum;
4107 abi_ulong msg_qbytes;
4108 abi_ulong msg_lspid;
4109 abi_ulong msg_lrpid;
4110 abi_ulong __unused4;
4111 abi_ulong __unused5;
4114 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4115 abi_ulong target_addr)
4117 struct target_msqid_ds *target_md;
4119 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4120 return -TARGET_EFAULT;
4121 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4122 return -TARGET_EFAULT;
4123 host_md->msg_stime = tswapal(target_md->msg_stime);
4124 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4125 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4126 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4127 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4128 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4129 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4130 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4131 unlock_user_struct(target_md, target_addr, 0);
4132 return 0;
4135 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4136 struct msqid_ds *host_md)
4138 struct target_msqid_ds *target_md;
4140 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4141 return -TARGET_EFAULT;
4142 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4143 return -TARGET_EFAULT;
4144 target_md->msg_stime = tswapal(host_md->msg_stime);
4145 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4146 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4147 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4148 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4149 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4150 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4151 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4152 unlock_user_struct(target_md, target_addr, 1);
4153 return 0;
4156 struct target_msginfo {
4157 int msgpool;
4158 int msgmap;
4159 int msgmax;
4160 int msgmnb;
4161 int msgmni;
4162 int msgssz;
4163 int msgtql;
4164 unsigned short int msgseg;
4167 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4168 struct msginfo *host_msginfo)
4170 struct target_msginfo *target_msginfo;
4171 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4172 return -TARGET_EFAULT;
4173 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4174 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4175 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4176 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4177 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4178 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4179 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4180 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4181 unlock_user_struct(target_msginfo, target_addr, 1);
4182 return 0;
4185 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4187 struct msqid_ds dsarg;
4188 struct msginfo msginfo;
4189 abi_long ret = -TARGET_EINVAL;
4191 cmd &= 0xff;
4193 switch (cmd) {
4194 case IPC_STAT:
4195 case IPC_SET:
4196 case MSG_STAT:
4197 if (target_to_host_msqid_ds(&dsarg,ptr))
4198 return -TARGET_EFAULT;
4199 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4200 if (host_to_target_msqid_ds(ptr,&dsarg))
4201 return -TARGET_EFAULT;
4202 break;
4203 case IPC_RMID:
4204 ret = get_errno(msgctl(msgid, cmd, NULL));
4205 break;
4206 case IPC_INFO:
4207 case MSG_INFO:
4208 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4209 if (host_to_target_msginfo(ptr, &msginfo))
4210 return -TARGET_EFAULT;
4211 break;
4214 return ret;
4217 struct target_msgbuf {
4218 abi_long mtype;
4219 char mtext[1];
4222 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4223 ssize_t msgsz, int msgflg)
4225 struct target_msgbuf *target_mb;
4226 struct msgbuf *host_mb;
4227 abi_long ret = 0;
4229 if (msgsz < 0) {
4230 return -TARGET_EINVAL;
4233 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4234 return -TARGET_EFAULT;
4235 host_mb = g_try_malloc(msgsz + sizeof(long));
4236 if (!host_mb) {
4237 unlock_user_struct(target_mb, msgp, 0);
4238 return -TARGET_ENOMEM;
4240 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4241 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4242 ret = -TARGET_ENOSYS;
4243 #ifdef __NR_msgsnd
4244 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4245 #endif
4246 #ifdef __NR_ipc
4247 if (ret == -TARGET_ENOSYS) {
4248 #ifdef __s390x__
4249 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4250 host_mb));
4251 #else
4252 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4253 host_mb, 0));
4254 #endif
4256 #endif
4257 g_free(host_mb);
4258 unlock_user_struct(target_mb, msgp, 0);
4260 return ret;
4263 #ifdef __NR_ipc
4264 #if defined(__sparc__)
4265 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4266 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4267 #elif defined(__s390x__)
4268 /* The s390 sys_ipc variant has only five parameters. */
4269 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4270 ((long int[]){(long int)__msgp, __msgtyp})
4271 #else
4272 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4273 ((long int[]){(long int)__msgp, __msgtyp}), 0
4274 #endif
4275 #endif
4277 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4278 ssize_t msgsz, abi_long msgtyp,
4279 int msgflg)
4281 struct target_msgbuf *target_mb;
4282 char *target_mtext;
4283 struct msgbuf *host_mb;
4284 abi_long ret = 0;
4286 if (msgsz < 0) {
4287 return -TARGET_EINVAL;
4290 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4291 return -TARGET_EFAULT;
4293 host_mb = g_try_malloc(msgsz + sizeof(long));
4294 if (!host_mb) {
4295 ret = -TARGET_ENOMEM;
4296 goto end;
4298 ret = -TARGET_ENOSYS;
4299 #ifdef __NR_msgrcv
4300 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4301 #endif
4302 #ifdef __NR_ipc
4303 if (ret == -TARGET_ENOSYS) {
4304 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4305 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4307 #endif
4309 if (ret > 0) {
4310 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4311 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4312 if (!target_mtext) {
4313 ret = -TARGET_EFAULT;
4314 goto end;
4316 memcpy(target_mb->mtext, host_mb->mtext, ret);
4317 unlock_user(target_mtext, target_mtext_addr, ret);
4320 target_mb->mtype = tswapal(host_mb->mtype);
4322 end:
4323 if (target_mb)
4324 unlock_user_struct(target_mb, msgp, 1);
4325 g_free(host_mb);
4326 return ret;
4329 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4330 abi_ulong target_addr)
4332 struct target_shmid_ds *target_sd;
4334 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4335 return -TARGET_EFAULT;
4336 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4337 return -TARGET_EFAULT;
4338 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4339 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4340 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4341 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4342 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4343 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4344 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4345 unlock_user_struct(target_sd, target_addr, 0);
4346 return 0;
4349 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4350 struct shmid_ds *host_sd)
4352 struct target_shmid_ds *target_sd;
4354 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4355 return -TARGET_EFAULT;
4356 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4357 return -TARGET_EFAULT;
4358 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4359 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4360 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4361 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4362 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4363 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4364 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4365 unlock_user_struct(target_sd, target_addr, 1);
4366 return 0;
4369 struct target_shminfo {
4370 abi_ulong shmmax;
4371 abi_ulong shmmin;
4372 abi_ulong shmmni;
4373 abi_ulong shmseg;
4374 abi_ulong shmall;
4377 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4378 struct shminfo *host_shminfo)
4380 struct target_shminfo *target_shminfo;
4381 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4382 return -TARGET_EFAULT;
4383 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4384 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4385 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4386 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4387 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4388 unlock_user_struct(target_shminfo, target_addr, 1);
4389 return 0;
4392 struct target_shm_info {
4393 int used_ids;
4394 abi_ulong shm_tot;
4395 abi_ulong shm_rss;
4396 abi_ulong shm_swp;
4397 abi_ulong swap_attempts;
4398 abi_ulong swap_successes;
4401 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4402 struct shm_info *host_shm_info)
4404 struct target_shm_info *target_shm_info;
4405 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4406 return -TARGET_EFAULT;
4407 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4408 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4409 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4410 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4411 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4412 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4413 unlock_user_struct(target_shm_info, target_addr, 1);
4414 return 0;
4417 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4419 struct shmid_ds dsarg;
4420 struct shminfo shminfo;
4421 struct shm_info shm_info;
4422 abi_long ret = -TARGET_EINVAL;
4424 cmd &= 0xff;
4426 switch(cmd) {
4427 case IPC_STAT:
4428 case IPC_SET:
4429 case SHM_STAT:
4430 if (target_to_host_shmid_ds(&dsarg, buf))
4431 return -TARGET_EFAULT;
4432 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4433 if (host_to_target_shmid_ds(buf, &dsarg))
4434 return -TARGET_EFAULT;
4435 break;
4436 case IPC_INFO:
4437 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4438 if (host_to_target_shminfo(buf, &shminfo))
4439 return -TARGET_EFAULT;
4440 break;
4441 case SHM_INFO:
4442 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4443 if (host_to_target_shm_info(buf, &shm_info))
4444 return -TARGET_EFAULT;
4445 break;
4446 case IPC_RMID:
4447 case SHM_LOCK:
4448 case SHM_UNLOCK:
4449 ret = get_errno(shmctl(shmid, cmd, NULL));
4450 break;
4453 return ret;
4456 #ifndef TARGET_FORCE_SHMLBA
4457 /* For most architectures, SHMLBA is the same as the page size;
4458 * some architectures have larger values, in which case they should
4459 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4460 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4461 * and defining its own value for SHMLBA.
4463 * The kernel also permits SHMLBA to be set by the architecture to a
4464 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4465 * this means that addresses are rounded to the large size if
4466 * SHM_RND is set but addresses not aligned to that size are not rejected
4467 * as long as they are at least page-aligned. Since the only architecture
4468 * which uses this is ia64 this code doesn't provide for that oddity.
4470 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4472 return TARGET_PAGE_SIZE;
4474 #endif
4476 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4477 int shmid, abi_ulong shmaddr, int shmflg)
4479 CPUState *cpu = env_cpu(cpu_env);
4480 abi_long raddr;
4481 void *host_raddr;
4482 struct shmid_ds shm_info;
4483 int i,ret;
4484 abi_ulong shmlba;
4486 /* shmat pointers are always untagged */
4488 /* find out the length of the shared memory segment */
4489 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4490 if (is_error(ret)) {
4491 /* can't get length, bail out */
4492 return ret;
4495 shmlba = target_shmlba(cpu_env);
4497 if (shmaddr & (shmlba - 1)) {
4498 if (shmflg & SHM_RND) {
4499 shmaddr &= ~(shmlba - 1);
4500 } else {
4501 return -TARGET_EINVAL;
4504 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4505 return -TARGET_EINVAL;
4508 mmap_lock();
4511 * We're mapping shared memory, so ensure we generate code for parallel
4512 * execution and flush old translations. This will work up to the level
4513 * supported by the host -- anything that requires EXCP_ATOMIC will not
4514 * be atomic with respect to an external process.
4516 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4517 cpu->tcg_cflags |= CF_PARALLEL;
4518 tb_flush(cpu);
4521 if (shmaddr)
4522 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4523 else {
4524 abi_ulong mmap_start;
4526 /* In order to use the host shmat, we need to honor host SHMLBA. */
4527 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4529 if (mmap_start == -1) {
4530 errno = ENOMEM;
4531 host_raddr = (void *)-1;
4532 } else
4533 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4534 shmflg | SHM_REMAP);
4537 if (host_raddr == (void *)-1) {
4538 mmap_unlock();
4539 return get_errno((long)host_raddr);
4541 raddr=h2g((unsigned long)host_raddr);
4543 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4544 PAGE_VALID | PAGE_RESET | PAGE_READ |
4545 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4547 for (i = 0; i < N_SHM_REGIONS; i++) {
4548 if (!shm_regions[i].in_use) {
4549 shm_regions[i].in_use = true;
4550 shm_regions[i].start = raddr;
4551 shm_regions[i].size = shm_info.shm_segsz;
4552 break;
4556 mmap_unlock();
4557 return raddr;
4561 static inline abi_long do_shmdt(abi_ulong shmaddr)
4563 int i;
4564 abi_long rv;
4566 /* shmdt pointers are always untagged */
4568 mmap_lock();
4570 for (i = 0; i < N_SHM_REGIONS; ++i) {
4571 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4572 shm_regions[i].in_use = false;
4573 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4574 break;
4577 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4579 mmap_unlock();
4581 return rv;
4584 #ifdef TARGET_NR_ipc
4585 /* ??? This only works with linear mappings. */
4586 /* do_ipc() must return target values and target errnos. */
4587 static abi_long do_ipc(CPUArchState *cpu_env,
4588 unsigned int call, abi_long first,
4589 abi_long second, abi_long third,
4590 abi_long ptr, abi_long fifth)
4592 int version;
4593 abi_long ret = 0;
4595 version = call >> 16;
4596 call &= 0xffff;
4598 switch (call) {
4599 case IPCOP_semop:
4600 ret = do_semtimedop(first, ptr, second, 0, false);
4601 break;
4602 case IPCOP_semtimedop:
4604 * The s390 sys_ipc variant has only five parameters instead of six
4605 * (as for default variant) and the only difference is the handling of
4606 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4607 * to a struct timespec where the generic variant uses fifth parameter.
4609 #if defined(TARGET_S390X)
4610 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4611 #else
4612 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4613 #endif
4614 break;
4616 case IPCOP_semget:
4617 ret = get_errno(semget(first, second, third));
4618 break;
4620 case IPCOP_semctl: {
4621 /* The semun argument to semctl is passed by value, so dereference the
4622 * ptr argument. */
4623 abi_ulong atptr;
4624 get_user_ual(atptr, ptr);
4625 ret = do_semctl(first, second, third, atptr);
4626 break;
4629 case IPCOP_msgget:
4630 ret = get_errno(msgget(first, second));
4631 break;
4633 case IPCOP_msgsnd:
4634 ret = do_msgsnd(first, ptr, second, third);
4635 break;
4637 case IPCOP_msgctl:
4638 ret = do_msgctl(first, second, ptr);
4639 break;
4641 case IPCOP_msgrcv:
4642 switch (version) {
4643 case 0:
4645 struct target_ipc_kludge {
4646 abi_long msgp;
4647 abi_long msgtyp;
4648 } *tmp;
4650 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4651 ret = -TARGET_EFAULT;
4652 break;
4655 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4657 unlock_user_struct(tmp, ptr, 0);
4658 break;
4660 default:
4661 ret = do_msgrcv(first, ptr, second, fifth, third);
4663 break;
4665 case IPCOP_shmat:
4666 switch (version) {
4667 default:
4669 abi_ulong raddr;
4670 raddr = do_shmat(cpu_env, first, ptr, second);
4671 if (is_error(raddr))
4672 return get_errno(raddr);
4673 if (put_user_ual(raddr, third))
4674 return -TARGET_EFAULT;
4675 break;
4677 case 1:
4678 ret = -TARGET_EINVAL;
4679 break;
4681 break;
4682 case IPCOP_shmdt:
4683 ret = do_shmdt(ptr);
4684 break;
4686 case IPCOP_shmget:
4687 /* IPC_* flag values are the same on all linux platforms */
4688 ret = get_errno(shmget(first, second, third));
4689 break;
4691 /* IPC_* and SHM_* command values are the same on all linux platforms */
4692 case IPCOP_shmctl:
4693 ret = do_shmctl(first, second, ptr);
4694 break;
4695 default:
4696 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4697 call, version);
4698 ret = -TARGET_ENOSYS;
4699 break;
4701 return ret;
4703 #endif
4705 /* kernel structure types definitions */
4707 #define STRUCT(name, ...) STRUCT_ ## name,
4708 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4709 enum {
4710 #include "syscall_types.h"
4711 STRUCT_MAX
4713 #undef STRUCT
4714 #undef STRUCT_SPECIAL
4716 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4717 #define STRUCT_SPECIAL(name)
4718 #include "syscall_types.h"
4719 #undef STRUCT
4720 #undef STRUCT_SPECIAL
4722 #define MAX_STRUCT_SIZE 4096
4724 #ifdef CONFIG_FIEMAP
4725 /* So fiemap access checks don't overflow on 32 bit systems.
4726 * This is very slightly smaller than the limit imposed by
4727 * the underlying kernel.
4729 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4730 / sizeof(struct fiemap_extent))
4732 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4733 int fd, int cmd, abi_long arg)
4735 /* The parameter for this ioctl is a struct fiemap followed
4736 * by an array of struct fiemap_extent whose size is set
4737 * in fiemap->fm_extent_count. The array is filled in by the
4738 * ioctl.
4740 int target_size_in, target_size_out;
4741 struct fiemap *fm;
4742 const argtype *arg_type = ie->arg_type;
4743 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4744 void *argptr, *p;
4745 abi_long ret;
4746 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4747 uint32_t outbufsz;
4748 int free_fm = 0;
4750 assert(arg_type[0] == TYPE_PTR);
4751 assert(ie->access == IOC_RW);
4752 arg_type++;
4753 target_size_in = thunk_type_size(arg_type, 0);
4754 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4755 if (!argptr) {
4756 return -TARGET_EFAULT;
4758 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4759 unlock_user(argptr, arg, 0);
4760 fm = (struct fiemap *)buf_temp;
4761 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4762 return -TARGET_EINVAL;
4765 outbufsz = sizeof (*fm) +
4766 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4768 if (outbufsz > MAX_STRUCT_SIZE) {
4769 /* We can't fit all the extents into the fixed size buffer.
4770 * Allocate one that is large enough and use it instead.
4772 fm = g_try_malloc(outbufsz);
4773 if (!fm) {
4774 return -TARGET_ENOMEM;
4776 memcpy(fm, buf_temp, sizeof(struct fiemap));
4777 free_fm = 1;
4779 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4780 if (!is_error(ret)) {
4781 target_size_out = target_size_in;
4782 /* An extent_count of 0 means we were only counting the extents
4783 * so there are no structs to copy
4785 if (fm->fm_extent_count != 0) {
4786 target_size_out += fm->fm_mapped_extents * extent_size;
4788 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4789 if (!argptr) {
4790 ret = -TARGET_EFAULT;
4791 } else {
4792 /* Convert the struct fiemap */
4793 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4794 if (fm->fm_extent_count != 0) {
4795 p = argptr + target_size_in;
4796 /* ...and then all the struct fiemap_extents */
4797 for (i = 0; i < fm->fm_mapped_extents; i++) {
4798 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4799 THUNK_TARGET);
4800 p += extent_size;
4803 unlock_user(argptr, arg, target_size_out);
4806 if (free_fm) {
4807 g_free(fm);
4809 return ret;
4811 #endif
4813 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4814 int fd, int cmd, abi_long arg)
4816 const argtype *arg_type = ie->arg_type;
4817 int target_size;
4818 void *argptr;
4819 int ret;
4820 struct ifconf *host_ifconf;
4821 uint32_t outbufsz;
4822 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4823 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4824 int target_ifreq_size;
4825 int nb_ifreq;
4826 int free_buf = 0;
4827 int i;
4828 int target_ifc_len;
4829 abi_long target_ifc_buf;
4830 int host_ifc_len;
4831 char *host_ifc_buf;
4833 assert(arg_type[0] == TYPE_PTR);
4834 assert(ie->access == IOC_RW);
4836 arg_type++;
4837 target_size = thunk_type_size(arg_type, 0);
4839 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4840 if (!argptr)
4841 return -TARGET_EFAULT;
4842 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4843 unlock_user(argptr, arg, 0);
4845 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4846 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4847 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4849 if (target_ifc_buf != 0) {
4850 target_ifc_len = host_ifconf->ifc_len;
4851 nb_ifreq = target_ifc_len / target_ifreq_size;
4852 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4854 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4855 if (outbufsz > MAX_STRUCT_SIZE) {
4857 * We can't fit all the extents into the fixed size buffer.
4858 * Allocate one that is large enough and use it instead.
4860 host_ifconf = g_try_malloc(outbufsz);
4861 if (!host_ifconf) {
4862 return -TARGET_ENOMEM;
4864 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4865 free_buf = 1;
4867 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4869 host_ifconf->ifc_len = host_ifc_len;
4870 } else {
4871 host_ifc_buf = NULL;
4873 host_ifconf->ifc_buf = host_ifc_buf;
4875 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4876 if (!is_error(ret)) {
4877 /* convert host ifc_len to target ifc_len */
4879 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4880 target_ifc_len = nb_ifreq * target_ifreq_size;
4881 host_ifconf->ifc_len = target_ifc_len;
4883 /* restore target ifc_buf */
4885 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4887 /* copy struct ifconf to target user */
4889 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4890 if (!argptr)
4891 return -TARGET_EFAULT;
4892 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4893 unlock_user(argptr, arg, target_size);
4895 if (target_ifc_buf != 0) {
4896 /* copy ifreq[] to target user */
4897 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4898 for (i = 0; i < nb_ifreq ; i++) {
4899 thunk_convert(argptr + i * target_ifreq_size,
4900 host_ifc_buf + i * sizeof(struct ifreq),
4901 ifreq_arg_type, THUNK_TARGET);
4903 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4907 if (free_buf) {
4908 g_free(host_ifconf);
4911 return ret;
4914 #if defined(CONFIG_USBFS)
4915 #if HOST_LONG_BITS > 64
4916 #error USBDEVFS thunks do not support >64 bit hosts yet.
4917 #endif
4918 struct live_urb {
4919 uint64_t target_urb_adr;
4920 uint64_t target_buf_adr;
4921 char *target_buf_ptr;
4922 struct usbdevfs_urb host_urb;
4925 static GHashTable *usbdevfs_urb_hashtable(void)
4927 static GHashTable *urb_hashtable;
4929 if (!urb_hashtable) {
4930 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4932 return urb_hashtable;
4935 static void urb_hashtable_insert(struct live_urb *urb)
4937 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4938 g_hash_table_insert(urb_hashtable, urb, urb);
4941 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4943 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4944 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4947 static void urb_hashtable_remove(struct live_urb *urb)
4949 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4950 g_hash_table_remove(urb_hashtable, urb);
4953 static abi_long
4954 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4955 int fd, int cmd, abi_long arg)
4957 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4958 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4959 struct live_urb *lurb;
4960 void *argptr;
4961 uint64_t hurb;
4962 int target_size;
4963 uintptr_t target_urb_adr;
4964 abi_long ret;
4966 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4968 memset(buf_temp, 0, sizeof(uint64_t));
4969 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4970 if (is_error(ret)) {
4971 return ret;
4974 memcpy(&hurb, buf_temp, sizeof(uint64_t));
4975 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4976 if (!lurb->target_urb_adr) {
4977 return -TARGET_EFAULT;
4979 urb_hashtable_remove(lurb);
4980 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4981 lurb->host_urb.buffer_length);
4982 lurb->target_buf_ptr = NULL;
4984 /* restore the guest buffer pointer */
4985 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4987 /* update the guest urb struct */
4988 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4989 if (!argptr) {
4990 g_free(lurb);
4991 return -TARGET_EFAULT;
4993 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4994 unlock_user(argptr, lurb->target_urb_adr, target_size);
4996 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4997 /* write back the urb handle */
4998 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4999 if (!argptr) {
5000 g_free(lurb);
5001 return -TARGET_EFAULT;
5004 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5005 target_urb_adr = lurb->target_urb_adr;
5006 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5007 unlock_user(argptr, arg, target_size);
5009 g_free(lurb);
5010 return ret;
5013 static abi_long
5014 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5015 uint8_t *buf_temp __attribute__((unused)),
5016 int fd, int cmd, abi_long arg)
5018 struct live_urb *lurb;
5020 /* map target address back to host URB with metadata. */
5021 lurb = urb_hashtable_lookup(arg);
5022 if (!lurb) {
5023 return -TARGET_EFAULT;
5025 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5028 static abi_long
5029 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5030 int fd, int cmd, abi_long arg)
5032 const argtype *arg_type = ie->arg_type;
5033 int target_size;
5034 abi_long ret;
5035 void *argptr;
5036 int rw_dir;
5037 struct live_urb *lurb;
5040 * each submitted URB needs to map to a unique ID for the
5041 * kernel, and that unique ID needs to be a pointer to
5042 * host memory. hence, we need to malloc for each URB.
5043 * isochronous transfers have a variable length struct.
5045 arg_type++;
5046 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5048 /* construct host copy of urb and metadata */
5049 lurb = g_try_new0(struct live_urb, 1);
5050 if (!lurb) {
5051 return -TARGET_ENOMEM;
5054 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5055 if (!argptr) {
5056 g_free(lurb);
5057 return -TARGET_EFAULT;
5059 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5060 unlock_user(argptr, arg, 0);
5062 lurb->target_urb_adr = arg;
5063 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5065 /* buffer space used depends on endpoint type so lock the entire buffer */
5066 /* control type urbs should check the buffer contents for true direction */
5067 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5068 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5069 lurb->host_urb.buffer_length, 1);
5070 if (lurb->target_buf_ptr == NULL) {
5071 g_free(lurb);
5072 return -TARGET_EFAULT;
5075 /* update buffer pointer in host copy */
5076 lurb->host_urb.buffer = lurb->target_buf_ptr;
5078 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5079 if (is_error(ret)) {
5080 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5081 g_free(lurb);
5082 } else {
5083 urb_hashtable_insert(lurb);
5086 return ret;
5088 #endif /* CONFIG_USBFS */
5090 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5091 int cmd, abi_long arg)
5093 void *argptr;
5094 struct dm_ioctl *host_dm;
5095 abi_long guest_data;
5096 uint32_t guest_data_size;
5097 int target_size;
5098 const argtype *arg_type = ie->arg_type;
5099 abi_long ret;
5100 void *big_buf = NULL;
5101 char *host_data;
5103 arg_type++;
5104 target_size = thunk_type_size(arg_type, 0);
5105 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5106 if (!argptr) {
5107 ret = -TARGET_EFAULT;
5108 goto out;
5110 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5111 unlock_user(argptr, arg, 0);
5113 /* buf_temp is too small, so fetch things into a bigger buffer */
5114 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5115 memcpy(big_buf, buf_temp, target_size);
5116 buf_temp = big_buf;
5117 host_dm = big_buf;
5119 guest_data = arg + host_dm->data_start;
5120 if ((guest_data - arg) < 0) {
5121 ret = -TARGET_EINVAL;
5122 goto out;
5124 guest_data_size = host_dm->data_size - host_dm->data_start;
5125 host_data = (char*)host_dm + host_dm->data_start;
5127 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5128 if (!argptr) {
5129 ret = -TARGET_EFAULT;
5130 goto out;
5133 switch (ie->host_cmd) {
5134 case DM_REMOVE_ALL:
5135 case DM_LIST_DEVICES:
5136 case DM_DEV_CREATE:
5137 case DM_DEV_REMOVE:
5138 case DM_DEV_SUSPEND:
5139 case DM_DEV_STATUS:
5140 case DM_DEV_WAIT:
5141 case DM_TABLE_STATUS:
5142 case DM_TABLE_CLEAR:
5143 case DM_TABLE_DEPS:
5144 case DM_LIST_VERSIONS:
5145 /* no input data */
5146 break;
5147 case DM_DEV_RENAME:
5148 case DM_DEV_SET_GEOMETRY:
5149 /* data contains only strings */
5150 memcpy(host_data, argptr, guest_data_size);
5151 break;
5152 case DM_TARGET_MSG:
5153 memcpy(host_data, argptr, guest_data_size);
5154 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5155 break;
5156 case DM_TABLE_LOAD:
5158 void *gspec = argptr;
5159 void *cur_data = host_data;
5160 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5161 int spec_size = thunk_type_size(arg_type, 0);
5162 int i;
5164 for (i = 0; i < host_dm->target_count; i++) {
5165 struct dm_target_spec *spec = cur_data;
5166 uint32_t next;
5167 int slen;
5169 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5170 slen = strlen((char*)gspec + spec_size) + 1;
5171 next = spec->next;
5172 spec->next = sizeof(*spec) + slen;
5173 strcpy((char*)&spec[1], gspec + spec_size);
5174 gspec += next;
5175 cur_data += spec->next;
5177 break;
5179 default:
5180 ret = -TARGET_EINVAL;
5181 unlock_user(argptr, guest_data, 0);
5182 goto out;
5184 unlock_user(argptr, guest_data, 0);
5186 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5187 if (!is_error(ret)) {
5188 guest_data = arg + host_dm->data_start;
5189 guest_data_size = host_dm->data_size - host_dm->data_start;
5190 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5191 switch (ie->host_cmd) {
5192 case DM_REMOVE_ALL:
5193 case DM_DEV_CREATE:
5194 case DM_DEV_REMOVE:
5195 case DM_DEV_RENAME:
5196 case DM_DEV_SUSPEND:
5197 case DM_DEV_STATUS:
5198 case DM_TABLE_LOAD:
5199 case DM_TABLE_CLEAR:
5200 case DM_TARGET_MSG:
5201 case DM_DEV_SET_GEOMETRY:
5202 /* no return data */
5203 break;
5204 case DM_LIST_DEVICES:
5206 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5207 uint32_t remaining_data = guest_data_size;
5208 void *cur_data = argptr;
5209 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5210 int nl_size = 12; /* can't use thunk_size due to alignment */
5212 while (1) {
5213 uint32_t next = nl->next;
5214 if (next) {
5215 nl->next = nl_size + (strlen(nl->name) + 1);
5217 if (remaining_data < nl->next) {
5218 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5219 break;
5221 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5222 strcpy(cur_data + nl_size, nl->name);
5223 cur_data += nl->next;
5224 remaining_data -= nl->next;
5225 if (!next) {
5226 break;
5228 nl = (void*)nl + next;
5230 break;
5232 case DM_DEV_WAIT:
5233 case DM_TABLE_STATUS:
5235 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5236 void *cur_data = argptr;
5237 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5238 int spec_size = thunk_type_size(arg_type, 0);
5239 int i;
5241 for (i = 0; i < host_dm->target_count; i++) {
5242 uint32_t next = spec->next;
5243 int slen = strlen((char*)&spec[1]) + 1;
5244 spec->next = (cur_data - argptr) + spec_size + slen;
5245 if (guest_data_size < spec->next) {
5246 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5247 break;
5249 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5250 strcpy(cur_data + spec_size, (char*)&spec[1]);
5251 cur_data = argptr + spec->next;
5252 spec = (void*)host_dm + host_dm->data_start + next;
5254 break;
5256 case DM_TABLE_DEPS:
5258 void *hdata = (void*)host_dm + host_dm->data_start;
5259 int count = *(uint32_t*)hdata;
5260 uint64_t *hdev = hdata + 8;
5261 uint64_t *gdev = argptr + 8;
5262 int i;
5264 *(uint32_t*)argptr = tswap32(count);
5265 for (i = 0; i < count; i++) {
5266 *gdev = tswap64(*hdev);
5267 gdev++;
5268 hdev++;
5270 break;
5272 case DM_LIST_VERSIONS:
5274 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5275 uint32_t remaining_data = guest_data_size;
5276 void *cur_data = argptr;
5277 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5278 int vers_size = thunk_type_size(arg_type, 0);
5280 while (1) {
5281 uint32_t next = vers->next;
5282 if (next) {
5283 vers->next = vers_size + (strlen(vers->name) + 1);
5285 if (remaining_data < vers->next) {
5286 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5287 break;
5289 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5290 strcpy(cur_data + vers_size, vers->name);
5291 cur_data += vers->next;
5292 remaining_data -= vers->next;
5293 if (!next) {
5294 break;
5296 vers = (void*)vers + next;
5298 break;
5300 default:
5301 unlock_user(argptr, guest_data, 0);
5302 ret = -TARGET_EINVAL;
5303 goto out;
5305 unlock_user(argptr, guest_data, guest_data_size);
5307 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5308 if (!argptr) {
5309 ret = -TARGET_EFAULT;
5310 goto out;
5312 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5313 unlock_user(argptr, arg, target_size);
5315 out:
5316 g_free(big_buf);
5317 return ret;
5320 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5321 int cmd, abi_long arg)
5323 void *argptr;
5324 int target_size;
5325 const argtype *arg_type = ie->arg_type;
5326 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5327 abi_long ret;
5329 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5330 struct blkpg_partition host_part;
5332 /* Read and convert blkpg */
5333 arg_type++;
5334 target_size = thunk_type_size(arg_type, 0);
5335 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5336 if (!argptr) {
5337 ret = -TARGET_EFAULT;
5338 goto out;
5340 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5341 unlock_user(argptr, arg, 0);
5343 switch (host_blkpg->op) {
5344 case BLKPG_ADD_PARTITION:
5345 case BLKPG_DEL_PARTITION:
5346 /* payload is struct blkpg_partition */
5347 break;
5348 default:
5349 /* Unknown opcode */
5350 ret = -TARGET_EINVAL;
5351 goto out;
5354 /* Read and convert blkpg->data */
5355 arg = (abi_long)(uintptr_t)host_blkpg->data;
5356 target_size = thunk_type_size(part_arg_type, 0);
5357 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5358 if (!argptr) {
5359 ret = -TARGET_EFAULT;
5360 goto out;
5362 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5363 unlock_user(argptr, arg, 0);
5365 /* Swizzle the data pointer to our local copy and call! */
5366 host_blkpg->data = &host_part;
5367 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5369 out:
5370 return ret;
5373 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5374 int fd, int cmd, abi_long arg)
5376 const argtype *arg_type = ie->arg_type;
5377 const StructEntry *se;
5378 const argtype *field_types;
5379 const int *dst_offsets, *src_offsets;
5380 int target_size;
5381 void *argptr;
5382 abi_ulong *target_rt_dev_ptr = NULL;
5383 unsigned long *host_rt_dev_ptr = NULL;
5384 abi_long ret;
5385 int i;
5387 assert(ie->access == IOC_W);
5388 assert(*arg_type == TYPE_PTR);
5389 arg_type++;
5390 assert(*arg_type == TYPE_STRUCT);
5391 target_size = thunk_type_size(arg_type, 0);
5392 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5393 if (!argptr) {
5394 return -TARGET_EFAULT;
5396 arg_type++;
5397 assert(*arg_type == (int)STRUCT_rtentry);
5398 se = struct_entries + *arg_type++;
5399 assert(se->convert[0] == NULL);
5400 /* convert struct here to be able to catch rt_dev string */
5401 field_types = se->field_types;
5402 dst_offsets = se->field_offsets[THUNK_HOST];
5403 src_offsets = se->field_offsets[THUNK_TARGET];
5404 for (i = 0; i < se->nb_fields; i++) {
5405 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5406 assert(*field_types == TYPE_PTRVOID);
5407 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5408 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5409 if (*target_rt_dev_ptr != 0) {
5410 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5411 tswapal(*target_rt_dev_ptr));
5412 if (!*host_rt_dev_ptr) {
5413 unlock_user(argptr, arg, 0);
5414 return -TARGET_EFAULT;
5416 } else {
5417 *host_rt_dev_ptr = 0;
5419 field_types++;
5420 continue;
5422 field_types = thunk_convert(buf_temp + dst_offsets[i],
5423 argptr + src_offsets[i],
5424 field_types, THUNK_HOST);
5426 unlock_user(argptr, arg, 0);
5428 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5430 assert(host_rt_dev_ptr != NULL);
5431 assert(target_rt_dev_ptr != NULL);
5432 if (*host_rt_dev_ptr != 0) {
5433 unlock_user((void *)*host_rt_dev_ptr,
5434 *target_rt_dev_ptr, 0);
5436 return ret;
5439 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5440 int fd, int cmd, abi_long arg)
5442 int sig = target_to_host_signal(arg);
5443 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5446 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5447 int fd, int cmd, abi_long arg)
5449 struct timeval tv;
5450 abi_long ret;
5452 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5453 if (is_error(ret)) {
5454 return ret;
5457 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5458 if (copy_to_user_timeval(arg, &tv)) {
5459 return -TARGET_EFAULT;
5461 } else {
5462 if (copy_to_user_timeval64(arg, &tv)) {
5463 return -TARGET_EFAULT;
5467 return ret;
5470 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5471 int fd, int cmd, abi_long arg)
5473 struct timespec ts;
5474 abi_long ret;
5476 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5477 if (is_error(ret)) {
5478 return ret;
5481 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5482 if (host_to_target_timespec(arg, &ts)) {
5483 return -TARGET_EFAULT;
5485 } else{
5486 if (host_to_target_timespec64(arg, &ts)) {
5487 return -TARGET_EFAULT;
5491 return ret;
5494 #ifdef TIOCGPTPEER
5495 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5496 int fd, int cmd, abi_long arg)
5498 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5499 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5501 #endif
5503 #ifdef HAVE_DRM_H
5505 static void unlock_drm_version(struct drm_version *host_ver,
5506 struct target_drm_version *target_ver,
5507 bool copy)
5509 unlock_user(host_ver->name, target_ver->name,
5510 copy ? host_ver->name_len : 0);
5511 unlock_user(host_ver->date, target_ver->date,
5512 copy ? host_ver->date_len : 0);
5513 unlock_user(host_ver->desc, target_ver->desc,
5514 copy ? host_ver->desc_len : 0);
5517 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5518 struct target_drm_version *target_ver)
5520 memset(host_ver, 0, sizeof(*host_ver));
5522 __get_user(host_ver->name_len, &target_ver->name_len);
5523 if (host_ver->name_len) {
5524 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5525 target_ver->name_len, 0);
5526 if (!host_ver->name) {
5527 return -EFAULT;
5531 __get_user(host_ver->date_len, &target_ver->date_len);
5532 if (host_ver->date_len) {
5533 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5534 target_ver->date_len, 0);
5535 if (!host_ver->date) {
5536 goto err;
5540 __get_user(host_ver->desc_len, &target_ver->desc_len);
5541 if (host_ver->desc_len) {
5542 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5543 target_ver->desc_len, 0);
5544 if (!host_ver->desc) {
5545 goto err;
5549 return 0;
5550 err:
5551 unlock_drm_version(host_ver, target_ver, false);
5552 return -EFAULT;
5555 static inline void host_to_target_drmversion(
5556 struct target_drm_version *target_ver,
5557 struct drm_version *host_ver)
5559 __put_user(host_ver->version_major, &target_ver->version_major);
5560 __put_user(host_ver->version_minor, &target_ver->version_minor);
5561 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5562 __put_user(host_ver->name_len, &target_ver->name_len);
5563 __put_user(host_ver->date_len, &target_ver->date_len);
5564 __put_user(host_ver->desc_len, &target_ver->desc_len);
5565 unlock_drm_version(host_ver, target_ver, true);
5568 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5569 int fd, int cmd, abi_long arg)
5571 struct drm_version *ver;
5572 struct target_drm_version *target_ver;
5573 abi_long ret;
5575 switch (ie->host_cmd) {
5576 case DRM_IOCTL_VERSION:
5577 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5578 return -TARGET_EFAULT;
5580 ver = (struct drm_version *)buf_temp;
5581 ret = target_to_host_drmversion(ver, target_ver);
5582 if (!is_error(ret)) {
5583 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5584 if (is_error(ret)) {
5585 unlock_drm_version(ver, target_ver, false);
5586 } else {
5587 host_to_target_drmversion(target_ver, ver);
5590 unlock_user_struct(target_ver, arg, 0);
5591 return ret;
5593 return -TARGET_ENOSYS;
5596 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5597 struct drm_i915_getparam *gparam,
5598 int fd, abi_long arg)
5600 abi_long ret;
5601 int value;
5602 struct target_drm_i915_getparam *target_gparam;
5604 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5605 return -TARGET_EFAULT;
5608 __get_user(gparam->param, &target_gparam->param);
5609 gparam->value = &value;
5610 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5611 put_user_s32(value, target_gparam->value);
5613 unlock_user_struct(target_gparam, arg, 0);
5614 return ret;
5617 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5618 int fd, int cmd, abi_long arg)
5620 switch (ie->host_cmd) {
5621 case DRM_IOCTL_I915_GETPARAM:
5622 return do_ioctl_drm_i915_getparam(ie,
5623 (struct drm_i915_getparam *)buf_temp,
5624 fd, arg);
5625 default:
5626 return -TARGET_ENOSYS;
5630 #endif
5632 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5633 int fd, int cmd, abi_long arg)
5635 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5636 struct tun_filter *target_filter;
5637 char *target_addr;
5639 assert(ie->access == IOC_W);
5641 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5642 if (!target_filter) {
5643 return -TARGET_EFAULT;
5645 filter->flags = tswap16(target_filter->flags);
5646 filter->count = tswap16(target_filter->count);
5647 unlock_user(target_filter, arg, 0);
5649 if (filter->count) {
5650 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5651 MAX_STRUCT_SIZE) {
5652 return -TARGET_EFAULT;
5655 target_addr = lock_user(VERIFY_READ,
5656 arg + offsetof(struct tun_filter, addr),
5657 filter->count * ETH_ALEN, 1);
5658 if (!target_addr) {
5659 return -TARGET_EFAULT;
5661 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5662 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5665 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5668 IOCTLEntry ioctl_entries[] = {
5669 #define IOCTL(cmd, access, ...) \
5670 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5671 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5672 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5673 #define IOCTL_IGNORE(cmd) \
5674 { TARGET_ ## cmd, 0, #cmd },
5675 #include "ioctls.h"
5676 { 0, 0, },
5679 /* ??? Implement proper locking for ioctls. */
5680 /* do_ioctl() Must return target values and target errnos. */
5681 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5683 const IOCTLEntry *ie;
5684 const argtype *arg_type;
5685 abi_long ret;
5686 uint8_t buf_temp[MAX_STRUCT_SIZE];
5687 int target_size;
5688 void *argptr;
5690 ie = ioctl_entries;
5691 for(;;) {
5692 if (ie->target_cmd == 0) {
5693 qemu_log_mask(
5694 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5695 return -TARGET_ENOSYS;
5697 if (ie->target_cmd == cmd)
5698 break;
5699 ie++;
5701 arg_type = ie->arg_type;
5702 if (ie->do_ioctl) {
5703 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5704 } else if (!ie->host_cmd) {
5705 /* Some architectures define BSD ioctls in their headers
5706 that are not implemented in Linux. */
5707 return -TARGET_ENOSYS;
5710 switch(arg_type[0]) {
5711 case TYPE_NULL:
5712 /* no argument */
5713 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5714 break;
5715 case TYPE_PTRVOID:
5716 case TYPE_INT:
5717 case TYPE_LONG:
5718 case TYPE_ULONG:
5719 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5720 break;
5721 case TYPE_PTR:
5722 arg_type++;
5723 target_size = thunk_type_size(arg_type, 0);
5724 switch(ie->access) {
5725 case IOC_R:
5726 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5727 if (!is_error(ret)) {
5728 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5729 if (!argptr)
5730 return -TARGET_EFAULT;
5731 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5732 unlock_user(argptr, arg, target_size);
5734 break;
5735 case IOC_W:
5736 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5737 if (!argptr)
5738 return -TARGET_EFAULT;
5739 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5740 unlock_user(argptr, arg, 0);
5741 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5742 break;
5743 default:
5744 case IOC_RW:
5745 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5746 if (!argptr)
5747 return -TARGET_EFAULT;
5748 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5749 unlock_user(argptr, arg, 0);
5750 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5751 if (!is_error(ret)) {
5752 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5753 if (!argptr)
5754 return -TARGET_EFAULT;
5755 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5756 unlock_user(argptr, arg, target_size);
5758 break;
5760 break;
5761 default:
5762 qemu_log_mask(LOG_UNIMP,
5763 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5764 (long)cmd, arg_type[0]);
5765 ret = -TARGET_ENOSYS;
5766 break;
5768 return ret;
5771 static const bitmask_transtbl iflag_tbl[] = {
5772 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5773 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5774 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5775 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5776 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5777 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5778 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5779 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5780 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5781 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5782 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5783 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5784 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5785 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5786 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5787 { 0, 0, 0, 0 }
5790 static const bitmask_transtbl oflag_tbl[] = {
5791 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5792 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5793 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5794 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5795 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5796 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5797 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5798 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5799 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5800 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5801 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5802 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5803 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5804 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5805 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5806 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5807 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5808 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5809 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5810 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5811 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5812 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5813 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5814 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5815 { 0, 0, 0, 0 }
5818 static const bitmask_transtbl cflag_tbl[] = {
5819 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5820 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5821 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5822 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5823 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5824 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5825 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5826 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5827 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5828 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5829 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5830 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5831 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5832 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5833 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5834 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5835 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5836 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5837 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5838 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5839 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5840 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5841 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5842 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5843 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5844 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5845 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5846 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5847 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5848 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5849 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5850 { 0, 0, 0, 0 }
5853 static const bitmask_transtbl lflag_tbl[] = {
5854 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5855 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5856 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5857 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5858 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5859 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5860 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5861 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5862 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5863 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5864 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5865 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5866 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5867 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5868 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5869 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5870 { 0, 0, 0, 0 }
5873 static void target_to_host_termios (void *dst, const void *src)
5875 struct host_termios *host = dst;
5876 const struct target_termios *target = src;
5878 host->c_iflag =
5879 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5880 host->c_oflag =
5881 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5882 host->c_cflag =
5883 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5884 host->c_lflag =
5885 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5886 host->c_line = target->c_line;
5888 memset(host->c_cc, 0, sizeof(host->c_cc));
5889 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5890 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5891 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5892 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5893 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5894 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5895 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5896 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5897 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5898 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5899 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5900 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5901 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5902 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5903 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5904 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5905 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5908 static void host_to_target_termios (void *dst, const void *src)
5910 struct target_termios *target = dst;
5911 const struct host_termios *host = src;
5913 target->c_iflag =
5914 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5915 target->c_oflag =
5916 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5917 target->c_cflag =
5918 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5919 target->c_lflag =
5920 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5921 target->c_line = host->c_line;
5923 memset(target->c_cc, 0, sizeof(target->c_cc));
5924 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5925 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5926 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5927 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5928 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5929 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5930 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5931 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5932 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5933 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5934 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5935 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5936 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5937 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5938 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5939 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5940 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5943 static const StructEntry struct_termios_def = {
5944 .convert = { host_to_target_termios, target_to_host_termios },
5945 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5946 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5947 .print = print_termios,
5950 static const bitmask_transtbl mmap_flags_tbl[] = {
5951 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5952 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5953 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5954 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5955 MAP_ANONYMOUS, MAP_ANONYMOUS },
5956 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5957 MAP_GROWSDOWN, MAP_GROWSDOWN },
5958 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5959 MAP_DENYWRITE, MAP_DENYWRITE },
5960 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5961 MAP_EXECUTABLE, MAP_EXECUTABLE },
5962 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5963 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5964 MAP_NORESERVE, MAP_NORESERVE },
5965 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5966 /* MAP_STACK had been ignored by the kernel for quite some time.
5967 Recognize it for the target insofar as we do not want to pass
5968 it through to the host. */
5969 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5970 { 0, 0, 0, 0 }
5974 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5975 * TARGET_I386 is defined if TARGET_X86_64 is defined
5977 #if defined(TARGET_I386)
5979 /* NOTE: there is really one LDT for all the threads */
5980 static uint8_t *ldt_table;
5982 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5984 int size;
5985 void *p;
5987 if (!ldt_table)
5988 return 0;
5989 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5990 if (size > bytecount)
5991 size = bytecount;
5992 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5993 if (!p)
5994 return -TARGET_EFAULT;
5995 /* ??? Should this by byteswapped? */
5996 memcpy(p, ldt_table, size);
5997 unlock_user(p, ptr, size);
5998 return size;
6001 /* XXX: add locking support */
6002 static abi_long write_ldt(CPUX86State *env,
6003 abi_ulong ptr, unsigned long bytecount, int oldmode)
6005 struct target_modify_ldt_ldt_s ldt_info;
6006 struct target_modify_ldt_ldt_s *target_ldt_info;
6007 int seg_32bit, contents, read_exec_only, limit_in_pages;
6008 int seg_not_present, useable, lm;
6009 uint32_t *lp, entry_1, entry_2;
6011 if (bytecount != sizeof(ldt_info))
6012 return -TARGET_EINVAL;
6013 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6014 return -TARGET_EFAULT;
6015 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6016 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6017 ldt_info.limit = tswap32(target_ldt_info->limit);
6018 ldt_info.flags = tswap32(target_ldt_info->flags);
6019 unlock_user_struct(target_ldt_info, ptr, 0);
6021 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6022 return -TARGET_EINVAL;
6023 seg_32bit = ldt_info.flags & 1;
6024 contents = (ldt_info.flags >> 1) & 3;
6025 read_exec_only = (ldt_info.flags >> 3) & 1;
6026 limit_in_pages = (ldt_info.flags >> 4) & 1;
6027 seg_not_present = (ldt_info.flags >> 5) & 1;
6028 useable = (ldt_info.flags >> 6) & 1;
6029 #ifdef TARGET_ABI32
6030 lm = 0;
6031 #else
6032 lm = (ldt_info.flags >> 7) & 1;
6033 #endif
6034 if (contents == 3) {
6035 if (oldmode)
6036 return -TARGET_EINVAL;
6037 if (seg_not_present == 0)
6038 return -TARGET_EINVAL;
6040 /* allocate the LDT */
6041 if (!ldt_table) {
6042 env->ldt.base = target_mmap(0,
6043 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6044 PROT_READ|PROT_WRITE,
6045 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6046 if (env->ldt.base == -1)
6047 return -TARGET_ENOMEM;
6048 memset(g2h_untagged(env->ldt.base), 0,
6049 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6050 env->ldt.limit = 0xffff;
6051 ldt_table = g2h_untagged(env->ldt.base);
6054 /* NOTE: same code as Linux kernel */
6055 /* Allow LDTs to be cleared by the user. */
6056 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6057 if (oldmode ||
6058 (contents == 0 &&
6059 read_exec_only == 1 &&
6060 seg_32bit == 0 &&
6061 limit_in_pages == 0 &&
6062 seg_not_present == 1 &&
6063 useable == 0 )) {
6064 entry_1 = 0;
6065 entry_2 = 0;
6066 goto install;
6070 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6071 (ldt_info.limit & 0x0ffff);
6072 entry_2 = (ldt_info.base_addr & 0xff000000) |
6073 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6074 (ldt_info.limit & 0xf0000) |
6075 ((read_exec_only ^ 1) << 9) |
6076 (contents << 10) |
6077 ((seg_not_present ^ 1) << 15) |
6078 (seg_32bit << 22) |
6079 (limit_in_pages << 23) |
6080 (lm << 21) |
6081 0x7000;
6082 if (!oldmode)
6083 entry_2 |= (useable << 20);
6085 /* Install the new entry ... */
6086 install:
6087 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6088 lp[0] = tswap32(entry_1);
6089 lp[1] = tswap32(entry_2);
6090 return 0;
6093 /* specific and weird i386 syscalls */
6094 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6095 unsigned long bytecount)
6097 abi_long ret;
6099 switch (func) {
6100 case 0:
6101 ret = read_ldt(ptr, bytecount);
6102 break;
6103 case 1:
6104 ret = write_ldt(env, ptr, bytecount, 1);
6105 break;
6106 case 0x11:
6107 ret = write_ldt(env, ptr, bytecount, 0);
6108 break;
6109 default:
6110 ret = -TARGET_ENOSYS;
6111 break;
6113 return ret;
6116 #if defined(TARGET_ABI32)
6117 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6119 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6120 struct target_modify_ldt_ldt_s ldt_info;
6121 struct target_modify_ldt_ldt_s *target_ldt_info;
6122 int seg_32bit, contents, read_exec_only, limit_in_pages;
6123 int seg_not_present, useable, lm;
6124 uint32_t *lp, entry_1, entry_2;
6125 int i;
6127 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6128 if (!target_ldt_info)
6129 return -TARGET_EFAULT;
6130 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6131 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6132 ldt_info.limit = tswap32(target_ldt_info->limit);
6133 ldt_info.flags = tswap32(target_ldt_info->flags);
6134 if (ldt_info.entry_number == -1) {
6135 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6136 if (gdt_table[i] == 0) {
6137 ldt_info.entry_number = i;
6138 target_ldt_info->entry_number = tswap32(i);
6139 break;
6143 unlock_user_struct(target_ldt_info, ptr, 1);
6145 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6146 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6147 return -TARGET_EINVAL;
6148 seg_32bit = ldt_info.flags & 1;
6149 contents = (ldt_info.flags >> 1) & 3;
6150 read_exec_only = (ldt_info.flags >> 3) & 1;
6151 limit_in_pages = (ldt_info.flags >> 4) & 1;
6152 seg_not_present = (ldt_info.flags >> 5) & 1;
6153 useable = (ldt_info.flags >> 6) & 1;
6154 #ifdef TARGET_ABI32
6155 lm = 0;
6156 #else
6157 lm = (ldt_info.flags >> 7) & 1;
6158 #endif
6160 if (contents == 3) {
6161 if (seg_not_present == 0)
6162 return -TARGET_EINVAL;
6165 /* NOTE: same code as Linux kernel */
6166 /* Allow LDTs to be cleared by the user. */
6167 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6168 if ((contents == 0 &&
6169 read_exec_only == 1 &&
6170 seg_32bit == 0 &&
6171 limit_in_pages == 0 &&
6172 seg_not_present == 1 &&
6173 useable == 0 )) {
6174 entry_1 = 0;
6175 entry_2 = 0;
6176 goto install;
6180 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6181 (ldt_info.limit & 0x0ffff);
6182 entry_2 = (ldt_info.base_addr & 0xff000000) |
6183 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6184 (ldt_info.limit & 0xf0000) |
6185 ((read_exec_only ^ 1) << 9) |
6186 (contents << 10) |
6187 ((seg_not_present ^ 1) << 15) |
6188 (seg_32bit << 22) |
6189 (limit_in_pages << 23) |
6190 (useable << 20) |
6191 (lm << 21) |
6192 0x7000;
6194 /* Install the new entry ... */
6195 install:
6196 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6197 lp[0] = tswap32(entry_1);
6198 lp[1] = tswap32(entry_2);
6199 return 0;
6202 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6204 struct target_modify_ldt_ldt_s *target_ldt_info;
6205 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6206 uint32_t base_addr, limit, flags;
6207 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6208 int seg_not_present, useable, lm;
6209 uint32_t *lp, entry_1, entry_2;
6211 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6212 if (!target_ldt_info)
6213 return -TARGET_EFAULT;
6214 idx = tswap32(target_ldt_info->entry_number);
6215 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6216 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6217 unlock_user_struct(target_ldt_info, ptr, 1);
6218 return -TARGET_EINVAL;
6220 lp = (uint32_t *)(gdt_table + idx);
6221 entry_1 = tswap32(lp[0]);
6222 entry_2 = tswap32(lp[1]);
6224 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6225 contents = (entry_2 >> 10) & 3;
6226 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6227 seg_32bit = (entry_2 >> 22) & 1;
6228 limit_in_pages = (entry_2 >> 23) & 1;
6229 useable = (entry_2 >> 20) & 1;
6230 #ifdef TARGET_ABI32
6231 lm = 0;
6232 #else
6233 lm = (entry_2 >> 21) & 1;
6234 #endif
6235 flags = (seg_32bit << 0) | (contents << 1) |
6236 (read_exec_only << 3) | (limit_in_pages << 4) |
6237 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6238 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6239 base_addr = (entry_1 >> 16) |
6240 (entry_2 & 0xff000000) |
6241 ((entry_2 & 0xff) << 16);
6242 target_ldt_info->base_addr = tswapal(base_addr);
6243 target_ldt_info->limit = tswap32(limit);
6244 target_ldt_info->flags = tswap32(flags);
6245 unlock_user_struct(target_ldt_info, ptr, 1);
6246 return 0;
6249 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6251 return -TARGET_ENOSYS;
6253 #else
6254 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6256 abi_long ret = 0;
6257 abi_ulong val;
6258 int idx;
6260 switch(code) {
6261 case TARGET_ARCH_SET_GS:
6262 case TARGET_ARCH_SET_FS:
6263 if (code == TARGET_ARCH_SET_GS)
6264 idx = R_GS;
6265 else
6266 idx = R_FS;
6267 cpu_x86_load_seg(env, idx, 0);
6268 env->segs[idx].base = addr;
6269 break;
6270 case TARGET_ARCH_GET_GS:
6271 case TARGET_ARCH_GET_FS:
6272 if (code == TARGET_ARCH_GET_GS)
6273 idx = R_GS;
6274 else
6275 idx = R_FS;
6276 val = env->segs[idx].base;
6277 if (put_user(val, addr, abi_ulong))
6278 ret = -TARGET_EFAULT;
6279 break;
6280 default:
6281 ret = -TARGET_EINVAL;
6282 break;
6284 return ret;
6286 #endif /* defined(TARGET_ABI32 */
6287 #endif /* defined(TARGET_I386) */
6290 * These constants are generic. Supply any that are missing from the host.
6292 #ifndef PR_SET_NAME
6293 # define PR_SET_NAME 15
6294 # define PR_GET_NAME 16
6295 #endif
6296 #ifndef PR_SET_FP_MODE
6297 # define PR_SET_FP_MODE 45
6298 # define PR_GET_FP_MODE 46
6299 # define PR_FP_MODE_FR (1 << 0)
6300 # define PR_FP_MODE_FRE (1 << 1)
6301 #endif
6302 #ifndef PR_SVE_SET_VL
6303 # define PR_SVE_SET_VL 50
6304 # define PR_SVE_GET_VL 51
6305 # define PR_SVE_VL_LEN_MASK 0xffff
6306 # define PR_SVE_VL_INHERIT (1 << 17)
6307 #endif
6308 #ifndef PR_PAC_RESET_KEYS
6309 # define PR_PAC_RESET_KEYS 54
6310 # define PR_PAC_APIAKEY (1 << 0)
6311 # define PR_PAC_APIBKEY (1 << 1)
6312 # define PR_PAC_APDAKEY (1 << 2)
6313 # define PR_PAC_APDBKEY (1 << 3)
6314 # define PR_PAC_APGAKEY (1 << 4)
6315 #endif
6316 #ifndef PR_SET_TAGGED_ADDR_CTRL
6317 # define PR_SET_TAGGED_ADDR_CTRL 55
6318 # define PR_GET_TAGGED_ADDR_CTRL 56
6319 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6320 #endif
6321 #ifndef PR_MTE_TCF_SHIFT
6322 # define PR_MTE_TCF_SHIFT 1
6323 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6324 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6325 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6326 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6327 # define PR_MTE_TAG_SHIFT 3
6328 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6329 #endif
6330 #ifndef PR_SET_IO_FLUSHER
6331 # define PR_SET_IO_FLUSHER 57
6332 # define PR_GET_IO_FLUSHER 58
6333 #endif
6334 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6335 # define PR_SET_SYSCALL_USER_DISPATCH 59
6336 #endif
6337 #ifndef PR_SME_SET_VL
6338 # define PR_SME_SET_VL 63
6339 # define PR_SME_GET_VL 64
6340 # define PR_SME_VL_LEN_MASK 0xffff
6341 # define PR_SME_VL_INHERIT (1 << 17)
6342 #endif
6344 #include "target_prctl.h"
6346 static abi_long do_prctl_inval0(CPUArchState *env)
6348 return -TARGET_EINVAL;
6351 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6353 return -TARGET_EINVAL;
6356 #ifndef do_prctl_get_fp_mode
6357 #define do_prctl_get_fp_mode do_prctl_inval0
6358 #endif
6359 #ifndef do_prctl_set_fp_mode
6360 #define do_prctl_set_fp_mode do_prctl_inval1
6361 #endif
6362 #ifndef do_prctl_sve_get_vl
6363 #define do_prctl_sve_get_vl do_prctl_inval0
6364 #endif
6365 #ifndef do_prctl_sve_set_vl
6366 #define do_prctl_sve_set_vl do_prctl_inval1
6367 #endif
6368 #ifndef do_prctl_reset_keys
6369 #define do_prctl_reset_keys do_prctl_inval1
6370 #endif
6371 #ifndef do_prctl_set_tagged_addr_ctrl
6372 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6373 #endif
6374 #ifndef do_prctl_get_tagged_addr_ctrl
6375 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6376 #endif
6377 #ifndef do_prctl_get_unalign
6378 #define do_prctl_get_unalign do_prctl_inval1
6379 #endif
6380 #ifndef do_prctl_set_unalign
6381 #define do_prctl_set_unalign do_prctl_inval1
6382 #endif
6383 #ifndef do_prctl_sme_get_vl
6384 #define do_prctl_sme_get_vl do_prctl_inval0
6385 #endif
6386 #ifndef do_prctl_sme_set_vl
6387 #define do_prctl_sme_set_vl do_prctl_inval1
6388 #endif
6390 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6391 abi_long arg3, abi_long arg4, abi_long arg5)
6393 abi_long ret;
6395 switch (option) {
6396 case PR_GET_PDEATHSIG:
6398 int deathsig;
6399 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6400 arg3, arg4, arg5));
6401 if (!is_error(ret) &&
6402 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6403 return -TARGET_EFAULT;
6405 return ret;
6407 case PR_SET_PDEATHSIG:
6408 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6409 arg3, arg4, arg5));
6410 case PR_GET_NAME:
6412 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6413 if (!name) {
6414 return -TARGET_EFAULT;
6416 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6417 arg3, arg4, arg5));
6418 unlock_user(name, arg2, 16);
6419 return ret;
6421 case PR_SET_NAME:
6423 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6424 if (!name) {
6425 return -TARGET_EFAULT;
6427 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6428 arg3, arg4, arg5));
6429 unlock_user(name, arg2, 0);
6430 return ret;
6432 case PR_GET_FP_MODE:
6433 return do_prctl_get_fp_mode(env);
6434 case PR_SET_FP_MODE:
6435 return do_prctl_set_fp_mode(env, arg2);
6436 case PR_SVE_GET_VL:
6437 return do_prctl_sve_get_vl(env);
6438 case PR_SVE_SET_VL:
6439 return do_prctl_sve_set_vl(env, arg2);
6440 case PR_SME_GET_VL:
6441 return do_prctl_sme_get_vl(env);
6442 case PR_SME_SET_VL:
6443 return do_prctl_sme_set_vl(env, arg2);
6444 case PR_PAC_RESET_KEYS:
6445 if (arg3 || arg4 || arg5) {
6446 return -TARGET_EINVAL;
6448 return do_prctl_reset_keys(env, arg2);
6449 case PR_SET_TAGGED_ADDR_CTRL:
6450 if (arg3 || arg4 || arg5) {
6451 return -TARGET_EINVAL;
6453 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6454 case PR_GET_TAGGED_ADDR_CTRL:
6455 if (arg2 || arg3 || arg4 || arg5) {
6456 return -TARGET_EINVAL;
6458 return do_prctl_get_tagged_addr_ctrl(env);
6460 case PR_GET_UNALIGN:
6461 return do_prctl_get_unalign(env, arg2);
6462 case PR_SET_UNALIGN:
6463 return do_prctl_set_unalign(env, arg2);
6465 case PR_CAP_AMBIENT:
6466 case PR_CAPBSET_READ:
6467 case PR_CAPBSET_DROP:
6468 case PR_GET_DUMPABLE:
6469 case PR_SET_DUMPABLE:
6470 case PR_GET_KEEPCAPS:
6471 case PR_SET_KEEPCAPS:
6472 case PR_GET_SECUREBITS:
6473 case PR_SET_SECUREBITS:
6474 case PR_GET_TIMING:
6475 case PR_SET_TIMING:
6476 case PR_GET_TIMERSLACK:
6477 case PR_SET_TIMERSLACK:
6478 case PR_MCE_KILL:
6479 case PR_MCE_KILL_GET:
6480 case PR_GET_NO_NEW_PRIVS:
6481 case PR_SET_NO_NEW_PRIVS:
6482 case PR_GET_IO_FLUSHER:
6483 case PR_SET_IO_FLUSHER:
6484 /* Some prctl options have no pointer arguments and we can pass on. */
6485 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6487 case PR_GET_CHILD_SUBREAPER:
6488 case PR_SET_CHILD_SUBREAPER:
6489 case PR_GET_SPECULATION_CTRL:
6490 case PR_SET_SPECULATION_CTRL:
6491 case PR_GET_TID_ADDRESS:
6492 /* TODO */
6493 return -TARGET_EINVAL;
6495 case PR_GET_FPEXC:
6496 case PR_SET_FPEXC:
6497 /* Was used for SPE on PowerPC. */
6498 return -TARGET_EINVAL;
6500 case PR_GET_ENDIAN:
6501 case PR_SET_ENDIAN:
6502 case PR_GET_FPEMU:
6503 case PR_SET_FPEMU:
6504 case PR_SET_MM:
6505 case PR_GET_SECCOMP:
6506 case PR_SET_SECCOMP:
6507 case PR_SET_SYSCALL_USER_DISPATCH:
6508 case PR_GET_THP_DISABLE:
6509 case PR_SET_THP_DISABLE:
6510 case PR_GET_TSC:
6511 case PR_SET_TSC:
6512 /* Disable to prevent the target disabling stuff we need. */
6513 return -TARGET_EINVAL;
6515 default:
6516 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6517 option);
6518 return -TARGET_EINVAL;
6522 #define NEW_STACK_SIZE 0x40000
6525 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6526 typedef struct {
6527 CPUArchState *env;
6528 pthread_mutex_t mutex;
6529 pthread_cond_t cond;
6530 pthread_t thread;
6531 uint32_t tid;
6532 abi_ulong child_tidptr;
6533 abi_ulong parent_tidptr;
6534 sigset_t sigmask;
6535 } new_thread_info;
6537 static void *clone_func(void *arg)
6539 new_thread_info *info = arg;
6540 CPUArchState *env;
6541 CPUState *cpu;
6542 TaskState *ts;
6544 rcu_register_thread();
6545 tcg_register_thread();
6546 env = info->env;
6547 cpu = env_cpu(env);
6548 thread_cpu = cpu;
6549 ts = (TaskState *)cpu->opaque;
6550 info->tid = sys_gettid();
6551 task_settid(ts);
6552 if (info->child_tidptr)
6553 put_user_u32(info->tid, info->child_tidptr);
6554 if (info->parent_tidptr)
6555 put_user_u32(info->tid, info->parent_tidptr);
6556 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6557 /* Enable signals. */
6558 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6559 /* Signal to the parent that we're ready. */
6560 pthread_mutex_lock(&info->mutex);
6561 pthread_cond_broadcast(&info->cond);
6562 pthread_mutex_unlock(&info->mutex);
6563 /* Wait until the parent has finished initializing the tls state. */
6564 pthread_mutex_lock(&clone_lock);
6565 pthread_mutex_unlock(&clone_lock);
6566 cpu_loop(env);
6567 /* never exits */
6568 return NULL;
6571 /* do_fork() Must return host values and target errnos (unlike most
6572 do_*() functions). */
6573 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6574 abi_ulong parent_tidptr, target_ulong newtls,
6575 abi_ulong child_tidptr)
6577 CPUState *cpu = env_cpu(env);
6578 int ret;
6579 TaskState *ts;
6580 CPUState *new_cpu;
6581 CPUArchState *new_env;
6582 sigset_t sigmask;
6584 flags &= ~CLONE_IGNORED_FLAGS;
6586 /* Emulate vfork() with fork() */
6587 if (flags & CLONE_VFORK)
6588 flags &= ~(CLONE_VFORK | CLONE_VM);
6590 if (flags & CLONE_VM) {
6591 TaskState *parent_ts = (TaskState *)cpu->opaque;
6592 new_thread_info info;
6593 pthread_attr_t attr;
6595 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6596 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6597 return -TARGET_EINVAL;
6600 ts = g_new0(TaskState, 1);
6601 init_task_state(ts);
6603 /* Grab a mutex so that thread setup appears atomic. */
6604 pthread_mutex_lock(&clone_lock);
6607 * If this is our first additional thread, we need to ensure we
6608 * generate code for parallel execution and flush old translations.
6609 * Do this now so that the copy gets CF_PARALLEL too.
6611 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6612 cpu->tcg_cflags |= CF_PARALLEL;
6613 tb_flush(cpu);
6616 /* we create a new CPU instance. */
6617 new_env = cpu_copy(env);
6618 /* Init regs that differ from the parent. */
6619 cpu_clone_regs_child(new_env, newsp, flags);
6620 cpu_clone_regs_parent(env, flags);
6621 new_cpu = env_cpu(new_env);
6622 new_cpu->opaque = ts;
6623 ts->bprm = parent_ts->bprm;
6624 ts->info = parent_ts->info;
6625 ts->signal_mask = parent_ts->signal_mask;
6627 if (flags & CLONE_CHILD_CLEARTID) {
6628 ts->child_tidptr = child_tidptr;
6631 if (flags & CLONE_SETTLS) {
6632 cpu_set_tls (new_env, newtls);
6635 memset(&info, 0, sizeof(info));
6636 pthread_mutex_init(&info.mutex, NULL);
6637 pthread_mutex_lock(&info.mutex);
6638 pthread_cond_init(&info.cond, NULL);
6639 info.env = new_env;
6640 if (flags & CLONE_CHILD_SETTID) {
6641 info.child_tidptr = child_tidptr;
6643 if (flags & CLONE_PARENT_SETTID) {
6644 info.parent_tidptr = parent_tidptr;
6647 ret = pthread_attr_init(&attr);
6648 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6649 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6650 /* It is not safe to deliver signals until the child has finished
6651 initializing, so temporarily block all signals. */
6652 sigfillset(&sigmask);
6653 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6654 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6656 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6657 /* TODO: Free new CPU state if thread creation failed. */
6659 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6660 pthread_attr_destroy(&attr);
6661 if (ret == 0) {
6662 /* Wait for the child to initialize. */
6663 pthread_cond_wait(&info.cond, &info.mutex);
6664 ret = info.tid;
6665 } else {
6666 ret = -1;
6668 pthread_mutex_unlock(&info.mutex);
6669 pthread_cond_destroy(&info.cond);
6670 pthread_mutex_destroy(&info.mutex);
6671 pthread_mutex_unlock(&clone_lock);
6672 } else {
6673 /* if no CLONE_VM, we consider it is a fork */
6674 if (flags & CLONE_INVALID_FORK_FLAGS) {
6675 return -TARGET_EINVAL;
6678 /* We can't support custom termination signals */
6679 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6680 return -TARGET_EINVAL;
6683 if (block_signals()) {
6684 return -QEMU_ERESTARTSYS;
6687 fork_start();
6688 ret = fork();
6689 if (ret == 0) {
6690 /* Child Process. */
6691 cpu_clone_regs_child(env, newsp, flags);
6692 fork_end(1);
6693 /* There is a race condition here. The parent process could
6694 theoretically read the TID in the child process before the child
6695 tid is set. This would require using either ptrace
6696 (not implemented) or having *_tidptr to point at a shared memory
6697 mapping. We can't repeat the spinlock hack used above because
6698 the child process gets its own copy of the lock. */
6699 if (flags & CLONE_CHILD_SETTID)
6700 put_user_u32(sys_gettid(), child_tidptr);
6701 if (flags & CLONE_PARENT_SETTID)
6702 put_user_u32(sys_gettid(), parent_tidptr);
6703 ts = (TaskState *)cpu->opaque;
6704 if (flags & CLONE_SETTLS)
6705 cpu_set_tls (env, newtls);
6706 if (flags & CLONE_CHILD_CLEARTID)
6707 ts->child_tidptr = child_tidptr;
6708 } else {
6709 cpu_clone_regs_parent(env, flags);
6710 fork_end(0);
6713 return ret;
6716 /* warning : doesn't handle linux specific flags... */
6717 static int target_to_host_fcntl_cmd(int cmd)
6719 int ret;
6721 switch(cmd) {
6722 case TARGET_F_DUPFD:
6723 case TARGET_F_GETFD:
6724 case TARGET_F_SETFD:
6725 case TARGET_F_GETFL:
6726 case TARGET_F_SETFL:
6727 case TARGET_F_OFD_GETLK:
6728 case TARGET_F_OFD_SETLK:
6729 case TARGET_F_OFD_SETLKW:
6730 ret = cmd;
6731 break;
6732 case TARGET_F_GETLK:
6733 ret = F_GETLK64;
6734 break;
6735 case TARGET_F_SETLK:
6736 ret = F_SETLK64;
6737 break;
6738 case TARGET_F_SETLKW:
6739 ret = F_SETLKW64;
6740 break;
6741 case TARGET_F_GETOWN:
6742 ret = F_GETOWN;
6743 break;
6744 case TARGET_F_SETOWN:
6745 ret = F_SETOWN;
6746 break;
6747 case TARGET_F_GETSIG:
6748 ret = F_GETSIG;
6749 break;
6750 case TARGET_F_SETSIG:
6751 ret = F_SETSIG;
6752 break;
6753 #if TARGET_ABI_BITS == 32
6754 case TARGET_F_GETLK64:
6755 ret = F_GETLK64;
6756 break;
6757 case TARGET_F_SETLK64:
6758 ret = F_SETLK64;
6759 break;
6760 case TARGET_F_SETLKW64:
6761 ret = F_SETLKW64;
6762 break;
6763 #endif
6764 case TARGET_F_SETLEASE:
6765 ret = F_SETLEASE;
6766 break;
6767 case TARGET_F_GETLEASE:
6768 ret = F_GETLEASE;
6769 break;
6770 #ifdef F_DUPFD_CLOEXEC
6771 case TARGET_F_DUPFD_CLOEXEC:
6772 ret = F_DUPFD_CLOEXEC;
6773 break;
6774 #endif
6775 case TARGET_F_NOTIFY:
6776 ret = F_NOTIFY;
6777 break;
6778 #ifdef F_GETOWN_EX
6779 case TARGET_F_GETOWN_EX:
6780 ret = F_GETOWN_EX;
6781 break;
6782 #endif
6783 #ifdef F_SETOWN_EX
6784 case TARGET_F_SETOWN_EX:
6785 ret = F_SETOWN_EX;
6786 break;
6787 #endif
6788 #ifdef F_SETPIPE_SZ
6789 case TARGET_F_SETPIPE_SZ:
6790 ret = F_SETPIPE_SZ;
6791 break;
6792 case TARGET_F_GETPIPE_SZ:
6793 ret = F_GETPIPE_SZ;
6794 break;
6795 #endif
6796 #ifdef F_ADD_SEALS
6797 case TARGET_F_ADD_SEALS:
6798 ret = F_ADD_SEALS;
6799 break;
6800 case TARGET_F_GET_SEALS:
6801 ret = F_GET_SEALS;
6802 break;
6803 #endif
6804 default:
6805 ret = -TARGET_EINVAL;
6806 break;
6809 #if defined(__powerpc64__)
6810 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6811 * is not supported by kernel. The glibc fcntl call actually adjusts
6812 * them to 5, 6 and 7 before making the syscall(). Since we make the
6813 * syscall directly, adjust to what is supported by the kernel.
6815 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6816 ret -= F_GETLK64 - 5;
6818 #endif
6820 return ret;
6823 #define FLOCK_TRANSTBL \
6824 switch (type) { \
6825 TRANSTBL_CONVERT(F_RDLCK); \
6826 TRANSTBL_CONVERT(F_WRLCK); \
6827 TRANSTBL_CONVERT(F_UNLCK); \
6830 static int target_to_host_flock(int type)
6832 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6833 FLOCK_TRANSTBL
6834 #undef TRANSTBL_CONVERT
6835 return -TARGET_EINVAL;
6838 static int host_to_target_flock(int type)
6840 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6841 FLOCK_TRANSTBL
6842 #undef TRANSTBL_CONVERT
6843 /* if we don't know how to convert the value coming
6844 * from the host we copy to the target field as-is
6846 return type;
6849 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6850 abi_ulong target_flock_addr)
6852 struct target_flock *target_fl;
6853 int l_type;
6855 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6856 return -TARGET_EFAULT;
6859 __get_user(l_type, &target_fl->l_type);
6860 l_type = target_to_host_flock(l_type);
6861 if (l_type < 0) {
6862 return l_type;
6864 fl->l_type = l_type;
6865 __get_user(fl->l_whence, &target_fl->l_whence);
6866 __get_user(fl->l_start, &target_fl->l_start);
6867 __get_user(fl->l_len, &target_fl->l_len);
6868 __get_user(fl->l_pid, &target_fl->l_pid);
6869 unlock_user_struct(target_fl, target_flock_addr, 0);
6870 return 0;
6873 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6874 const struct flock64 *fl)
6876 struct target_flock *target_fl;
6877 short l_type;
6879 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6880 return -TARGET_EFAULT;
6883 l_type = host_to_target_flock(fl->l_type);
6884 __put_user(l_type, &target_fl->l_type);
6885 __put_user(fl->l_whence, &target_fl->l_whence);
6886 __put_user(fl->l_start, &target_fl->l_start);
6887 __put_user(fl->l_len, &target_fl->l_len);
6888 __put_user(fl->l_pid, &target_fl->l_pid);
6889 unlock_user_struct(target_fl, target_flock_addr, 1);
6890 return 0;
6893 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6894 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6896 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6897 struct target_oabi_flock64 {
6898 abi_short l_type;
6899 abi_short l_whence;
6900 abi_llong l_start;
6901 abi_llong l_len;
6902 abi_int l_pid;
6903 } QEMU_PACKED;
6905 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6906 abi_ulong target_flock_addr)
6908 struct target_oabi_flock64 *target_fl;
6909 int l_type;
6911 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6912 return -TARGET_EFAULT;
6915 __get_user(l_type, &target_fl->l_type);
6916 l_type = target_to_host_flock(l_type);
6917 if (l_type < 0) {
6918 return l_type;
6920 fl->l_type = l_type;
6921 __get_user(fl->l_whence, &target_fl->l_whence);
6922 __get_user(fl->l_start, &target_fl->l_start);
6923 __get_user(fl->l_len, &target_fl->l_len);
6924 __get_user(fl->l_pid, &target_fl->l_pid);
6925 unlock_user_struct(target_fl, target_flock_addr, 0);
6926 return 0;
6929 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6930 const struct flock64 *fl)
6932 struct target_oabi_flock64 *target_fl;
6933 short l_type;
6935 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6936 return -TARGET_EFAULT;
6939 l_type = host_to_target_flock(fl->l_type);
6940 __put_user(l_type, &target_fl->l_type);
6941 __put_user(fl->l_whence, &target_fl->l_whence);
6942 __put_user(fl->l_start, &target_fl->l_start);
6943 __put_user(fl->l_len, &target_fl->l_len);
6944 __put_user(fl->l_pid, &target_fl->l_pid);
6945 unlock_user_struct(target_fl, target_flock_addr, 1);
6946 return 0;
6948 #endif
6950 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6951 abi_ulong target_flock_addr)
6953 struct target_flock64 *target_fl;
6954 int l_type;
6956 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6957 return -TARGET_EFAULT;
6960 __get_user(l_type, &target_fl->l_type);
6961 l_type = target_to_host_flock(l_type);
6962 if (l_type < 0) {
6963 return l_type;
6965 fl->l_type = l_type;
6966 __get_user(fl->l_whence, &target_fl->l_whence);
6967 __get_user(fl->l_start, &target_fl->l_start);
6968 __get_user(fl->l_len, &target_fl->l_len);
6969 __get_user(fl->l_pid, &target_fl->l_pid);
6970 unlock_user_struct(target_fl, target_flock_addr, 0);
6971 return 0;
6974 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6975 const struct flock64 *fl)
6977 struct target_flock64 *target_fl;
6978 short l_type;
6980 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6981 return -TARGET_EFAULT;
6984 l_type = host_to_target_flock(fl->l_type);
6985 __put_user(l_type, &target_fl->l_type);
6986 __put_user(fl->l_whence, &target_fl->l_whence);
6987 __put_user(fl->l_start, &target_fl->l_start);
6988 __put_user(fl->l_len, &target_fl->l_len);
6989 __put_user(fl->l_pid, &target_fl->l_pid);
6990 unlock_user_struct(target_fl, target_flock_addr, 1);
6991 return 0;
6994 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6996 struct flock64 fl64;
6997 #ifdef F_GETOWN_EX
6998 struct f_owner_ex fox;
6999 struct target_f_owner_ex *target_fox;
7000 #endif
7001 abi_long ret;
7002 int host_cmd = target_to_host_fcntl_cmd(cmd);
7004 if (host_cmd == -TARGET_EINVAL)
7005 return host_cmd;
7007 switch(cmd) {
7008 case TARGET_F_GETLK:
7009 ret = copy_from_user_flock(&fl64, arg);
7010 if (ret) {
7011 return ret;
7013 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7014 if (ret == 0) {
7015 ret = copy_to_user_flock(arg, &fl64);
7017 break;
7019 case TARGET_F_SETLK:
7020 case TARGET_F_SETLKW:
7021 ret = copy_from_user_flock(&fl64, arg);
7022 if (ret) {
7023 return ret;
7025 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7026 break;
7028 case TARGET_F_GETLK64:
7029 case TARGET_F_OFD_GETLK:
7030 ret = copy_from_user_flock64(&fl64, arg);
7031 if (ret) {
7032 return ret;
7034 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7035 if (ret == 0) {
7036 ret = copy_to_user_flock64(arg, &fl64);
7038 break;
7039 case TARGET_F_SETLK64:
7040 case TARGET_F_SETLKW64:
7041 case TARGET_F_OFD_SETLK:
7042 case TARGET_F_OFD_SETLKW:
7043 ret = copy_from_user_flock64(&fl64, arg);
7044 if (ret) {
7045 return ret;
7047 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7048 break;
7050 case TARGET_F_GETFL:
7051 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7052 if (ret >= 0) {
7053 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7055 break;
7057 case TARGET_F_SETFL:
7058 ret = get_errno(safe_fcntl(fd, host_cmd,
7059 target_to_host_bitmask(arg,
7060 fcntl_flags_tbl)));
7061 break;
7063 #ifdef F_GETOWN_EX
7064 case TARGET_F_GETOWN_EX:
7065 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7066 if (ret >= 0) {
7067 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7068 return -TARGET_EFAULT;
7069 target_fox->type = tswap32(fox.type);
7070 target_fox->pid = tswap32(fox.pid);
7071 unlock_user_struct(target_fox, arg, 1);
7073 break;
7074 #endif
7076 #ifdef F_SETOWN_EX
7077 case TARGET_F_SETOWN_EX:
7078 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7079 return -TARGET_EFAULT;
7080 fox.type = tswap32(target_fox->type);
7081 fox.pid = tswap32(target_fox->pid);
7082 unlock_user_struct(target_fox, arg, 0);
7083 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7084 break;
7085 #endif
7087 case TARGET_F_SETSIG:
7088 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7089 break;
7091 case TARGET_F_GETSIG:
7092 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7093 break;
7095 case TARGET_F_SETOWN:
7096 case TARGET_F_GETOWN:
7097 case TARGET_F_SETLEASE:
7098 case TARGET_F_GETLEASE:
7099 case TARGET_F_SETPIPE_SZ:
7100 case TARGET_F_GETPIPE_SZ:
7101 case TARGET_F_ADD_SEALS:
7102 case TARGET_F_GET_SEALS:
7103 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7104 break;
7106 default:
7107 ret = get_errno(safe_fcntl(fd, cmd, arg));
7108 break;
7110 return ret;
7113 #ifdef USE_UID16
7115 static inline int high2lowuid(int uid)
7117 if (uid > 65535)
7118 return 65534;
7119 else
7120 return uid;
7123 static inline int high2lowgid(int gid)
7125 if (gid > 65535)
7126 return 65534;
7127 else
7128 return gid;
7131 static inline int low2highuid(int uid)
7133 if ((int16_t)uid == -1)
7134 return -1;
7135 else
7136 return uid;
7139 static inline int low2highgid(int gid)
7141 if ((int16_t)gid == -1)
7142 return -1;
7143 else
7144 return gid;
7146 static inline int tswapid(int id)
7148 return tswap16(id);
7151 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7153 #else /* !USE_UID16 */
7154 static inline int high2lowuid(int uid)
7156 return uid;
7158 static inline int high2lowgid(int gid)
7160 return gid;
7162 static inline int low2highuid(int uid)
7164 return uid;
7166 static inline int low2highgid(int gid)
7168 return gid;
7170 static inline int tswapid(int id)
7172 return tswap32(id);
7175 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7177 #endif /* USE_UID16 */
7179 /* We must do direct syscalls for setting UID/GID, because we want to
7180 * implement the Linux system call semantics of "change only for this thread",
7181 * not the libc/POSIX semantics of "change for all threads in process".
7182 * (See http://ewontfix.com/17/ for more details.)
7183 * We use the 32-bit version of the syscalls if present; if it is not
7184 * then either the host architecture supports 32-bit UIDs natively with
7185 * the standard syscall, or the 16-bit UID is the best we can do.
7187 #ifdef __NR_setuid32
7188 #define __NR_sys_setuid __NR_setuid32
7189 #else
7190 #define __NR_sys_setuid __NR_setuid
7191 #endif
7192 #ifdef __NR_setgid32
7193 #define __NR_sys_setgid __NR_setgid32
7194 #else
7195 #define __NR_sys_setgid __NR_setgid
7196 #endif
7197 #ifdef __NR_setresuid32
7198 #define __NR_sys_setresuid __NR_setresuid32
7199 #else
7200 #define __NR_sys_setresuid __NR_setresuid
7201 #endif
7202 #ifdef __NR_setresgid32
7203 #define __NR_sys_setresgid __NR_setresgid32
7204 #else
7205 #define __NR_sys_setresgid __NR_setresgid
7206 #endif
7208 _syscall1(int, sys_setuid, uid_t, uid)
7209 _syscall1(int, sys_setgid, gid_t, gid)
7210 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7211 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7213 void syscall_init(void)
7215 IOCTLEntry *ie;
7216 const argtype *arg_type;
7217 int size;
7219 thunk_init(STRUCT_MAX);
7221 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7222 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7223 #include "syscall_types.h"
7224 #undef STRUCT
7225 #undef STRUCT_SPECIAL
7227 /* we patch the ioctl size if necessary. We rely on the fact that
7228 no ioctl has all the bits at '1' in the size field */
7229 ie = ioctl_entries;
7230 while (ie->target_cmd != 0) {
7231 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7232 TARGET_IOC_SIZEMASK) {
7233 arg_type = ie->arg_type;
7234 if (arg_type[0] != TYPE_PTR) {
7235 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7236 ie->target_cmd);
7237 exit(1);
7239 arg_type++;
7240 size = thunk_type_size(arg_type, 0);
7241 ie->target_cmd = (ie->target_cmd &
7242 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7243 (size << TARGET_IOC_SIZESHIFT);
7246 /* automatic consistency check if same arch */
7247 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7248 (defined(__x86_64__) && defined(TARGET_X86_64))
7249 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7250 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7251 ie->name, ie->target_cmd, ie->host_cmd);
7253 #endif
7254 ie++;
7258 #ifdef TARGET_NR_truncate64
7259 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7260 abi_long arg2,
7261 abi_long arg3,
7262 abi_long arg4)
7264 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7265 arg2 = arg3;
7266 arg3 = arg4;
7268 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7270 #endif
7272 #ifdef TARGET_NR_ftruncate64
7273 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7274 abi_long arg2,
7275 abi_long arg3,
7276 abi_long arg4)
7278 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7279 arg2 = arg3;
7280 arg3 = arg4;
7282 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7284 #endif
7286 #if defined(TARGET_NR_timer_settime) || \
7287 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7288 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7289 abi_ulong target_addr)
7291 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7292 offsetof(struct target_itimerspec,
7293 it_interval)) ||
7294 target_to_host_timespec(&host_its->it_value, target_addr +
7295 offsetof(struct target_itimerspec,
7296 it_value))) {
7297 return -TARGET_EFAULT;
7300 return 0;
7302 #endif
7304 #if defined(TARGET_NR_timer_settime64) || \
7305 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7306 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7307 abi_ulong target_addr)
7309 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7310 offsetof(struct target__kernel_itimerspec,
7311 it_interval)) ||
7312 target_to_host_timespec64(&host_its->it_value, target_addr +
7313 offsetof(struct target__kernel_itimerspec,
7314 it_value))) {
7315 return -TARGET_EFAULT;
7318 return 0;
7320 #endif
7322 #if ((defined(TARGET_NR_timerfd_gettime) || \
7323 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7324 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7325 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7326 struct itimerspec *host_its)
7328 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7329 it_interval),
7330 &host_its->it_interval) ||
7331 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7332 it_value),
7333 &host_its->it_value)) {
7334 return -TARGET_EFAULT;
7336 return 0;
7338 #endif
7340 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7341 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7342 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7343 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7344 struct itimerspec *host_its)
7346 if (host_to_target_timespec64(target_addr +
7347 offsetof(struct target__kernel_itimerspec,
7348 it_interval),
7349 &host_its->it_interval) ||
7350 host_to_target_timespec64(target_addr +
7351 offsetof(struct target__kernel_itimerspec,
7352 it_value),
7353 &host_its->it_value)) {
7354 return -TARGET_EFAULT;
7356 return 0;
7358 #endif
7360 #if defined(TARGET_NR_adjtimex) || \
7361 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7362 static inline abi_long target_to_host_timex(struct timex *host_tx,
7363 abi_long target_addr)
7365 struct target_timex *target_tx;
7367 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7368 return -TARGET_EFAULT;
7371 __get_user(host_tx->modes, &target_tx->modes);
7372 __get_user(host_tx->offset, &target_tx->offset);
7373 __get_user(host_tx->freq, &target_tx->freq);
7374 __get_user(host_tx->maxerror, &target_tx->maxerror);
7375 __get_user(host_tx->esterror, &target_tx->esterror);
7376 __get_user(host_tx->status, &target_tx->status);
7377 __get_user(host_tx->constant, &target_tx->constant);
7378 __get_user(host_tx->precision, &target_tx->precision);
7379 __get_user(host_tx->tolerance, &target_tx->tolerance);
7380 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7381 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7382 __get_user(host_tx->tick, &target_tx->tick);
7383 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7384 __get_user(host_tx->jitter, &target_tx->jitter);
7385 __get_user(host_tx->shift, &target_tx->shift);
7386 __get_user(host_tx->stabil, &target_tx->stabil);
7387 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7388 __get_user(host_tx->calcnt, &target_tx->calcnt);
7389 __get_user(host_tx->errcnt, &target_tx->errcnt);
7390 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7391 __get_user(host_tx->tai, &target_tx->tai);
7393 unlock_user_struct(target_tx, target_addr, 0);
7394 return 0;
7397 static inline abi_long host_to_target_timex(abi_long target_addr,
7398 struct timex *host_tx)
7400 struct target_timex *target_tx;
7402 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7403 return -TARGET_EFAULT;
7406 __put_user(host_tx->modes, &target_tx->modes);
7407 __put_user(host_tx->offset, &target_tx->offset);
7408 __put_user(host_tx->freq, &target_tx->freq);
7409 __put_user(host_tx->maxerror, &target_tx->maxerror);
7410 __put_user(host_tx->esterror, &target_tx->esterror);
7411 __put_user(host_tx->status, &target_tx->status);
7412 __put_user(host_tx->constant, &target_tx->constant);
7413 __put_user(host_tx->precision, &target_tx->precision);
7414 __put_user(host_tx->tolerance, &target_tx->tolerance);
7415 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7416 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7417 __put_user(host_tx->tick, &target_tx->tick);
7418 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7419 __put_user(host_tx->jitter, &target_tx->jitter);
7420 __put_user(host_tx->shift, &target_tx->shift);
7421 __put_user(host_tx->stabil, &target_tx->stabil);
7422 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7423 __put_user(host_tx->calcnt, &target_tx->calcnt);
7424 __put_user(host_tx->errcnt, &target_tx->errcnt);
7425 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7426 __put_user(host_tx->tai, &target_tx->tai);
7428 unlock_user_struct(target_tx, target_addr, 1);
7429 return 0;
7431 #endif
7434 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7435 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7436 abi_long target_addr)
7438 struct target__kernel_timex *target_tx;
7440 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7441 offsetof(struct target__kernel_timex,
7442 time))) {
7443 return -TARGET_EFAULT;
7446 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7447 return -TARGET_EFAULT;
7450 __get_user(host_tx->modes, &target_tx->modes);
7451 __get_user(host_tx->offset, &target_tx->offset);
7452 __get_user(host_tx->freq, &target_tx->freq);
7453 __get_user(host_tx->maxerror, &target_tx->maxerror);
7454 __get_user(host_tx->esterror, &target_tx->esterror);
7455 __get_user(host_tx->status, &target_tx->status);
7456 __get_user(host_tx->constant, &target_tx->constant);
7457 __get_user(host_tx->precision, &target_tx->precision);
7458 __get_user(host_tx->tolerance, &target_tx->tolerance);
7459 __get_user(host_tx->tick, &target_tx->tick);
7460 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7461 __get_user(host_tx->jitter, &target_tx->jitter);
7462 __get_user(host_tx->shift, &target_tx->shift);
7463 __get_user(host_tx->stabil, &target_tx->stabil);
7464 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7465 __get_user(host_tx->calcnt, &target_tx->calcnt);
7466 __get_user(host_tx->errcnt, &target_tx->errcnt);
7467 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7468 __get_user(host_tx->tai, &target_tx->tai);
7470 unlock_user_struct(target_tx, target_addr, 0);
7471 return 0;
7474 static inline abi_long host_to_target_timex64(abi_long target_addr,
7475 struct timex *host_tx)
7477 struct target__kernel_timex *target_tx;
7479 if (copy_to_user_timeval64(target_addr +
7480 offsetof(struct target__kernel_timex, time),
7481 &host_tx->time)) {
7482 return -TARGET_EFAULT;
7485 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7486 return -TARGET_EFAULT;
7489 __put_user(host_tx->modes, &target_tx->modes);
7490 __put_user(host_tx->offset, &target_tx->offset);
7491 __put_user(host_tx->freq, &target_tx->freq);
7492 __put_user(host_tx->maxerror, &target_tx->maxerror);
7493 __put_user(host_tx->esterror, &target_tx->esterror);
7494 __put_user(host_tx->status, &target_tx->status);
7495 __put_user(host_tx->constant, &target_tx->constant);
7496 __put_user(host_tx->precision, &target_tx->precision);
7497 __put_user(host_tx->tolerance, &target_tx->tolerance);
7498 __put_user(host_tx->tick, &target_tx->tick);
7499 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7500 __put_user(host_tx->jitter, &target_tx->jitter);
7501 __put_user(host_tx->shift, &target_tx->shift);
7502 __put_user(host_tx->stabil, &target_tx->stabil);
7503 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7504 __put_user(host_tx->calcnt, &target_tx->calcnt);
7505 __put_user(host_tx->errcnt, &target_tx->errcnt);
7506 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7507 __put_user(host_tx->tai, &target_tx->tai);
7509 unlock_user_struct(target_tx, target_addr, 1);
7510 return 0;
7512 #endif
7514 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7515 #define sigev_notify_thread_id _sigev_un._tid
7516 #endif
7518 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7519 abi_ulong target_addr)
7521 struct target_sigevent *target_sevp;
7523 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7524 return -TARGET_EFAULT;
7527 /* This union is awkward on 64 bit systems because it has a 32 bit
7528 * integer and a pointer in it; we follow the conversion approach
7529 * used for handling sigval types in signal.c so the guest should get
7530 * the correct value back even if we did a 64 bit byteswap and it's
7531 * using the 32 bit integer.
7533 host_sevp->sigev_value.sival_ptr =
7534 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7535 host_sevp->sigev_signo =
7536 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7537 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7538 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7540 unlock_user_struct(target_sevp, target_addr, 1);
7541 return 0;
7544 #if defined(TARGET_NR_mlockall)
7545 static inline int target_to_host_mlockall_arg(int arg)
7547 int result = 0;
7549 if (arg & TARGET_MCL_CURRENT) {
7550 result |= MCL_CURRENT;
7552 if (arg & TARGET_MCL_FUTURE) {
7553 result |= MCL_FUTURE;
7555 #ifdef MCL_ONFAULT
7556 if (arg & TARGET_MCL_ONFAULT) {
7557 result |= MCL_ONFAULT;
7559 #endif
7561 return result;
7563 #endif
7565 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7566 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7567 defined(TARGET_NR_newfstatat))
7568 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7569 abi_ulong target_addr,
7570 struct stat *host_st)
7572 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7573 if (cpu_env->eabi) {
7574 struct target_eabi_stat64 *target_st;
7576 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7577 return -TARGET_EFAULT;
7578 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7579 __put_user(host_st->st_dev, &target_st->st_dev);
7580 __put_user(host_st->st_ino, &target_st->st_ino);
7581 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7582 __put_user(host_st->st_ino, &target_st->__st_ino);
7583 #endif
7584 __put_user(host_st->st_mode, &target_st->st_mode);
7585 __put_user(host_st->st_nlink, &target_st->st_nlink);
7586 __put_user(host_st->st_uid, &target_st->st_uid);
7587 __put_user(host_st->st_gid, &target_st->st_gid);
7588 __put_user(host_st->st_rdev, &target_st->st_rdev);
7589 __put_user(host_st->st_size, &target_st->st_size);
7590 __put_user(host_st->st_blksize, &target_st->st_blksize);
7591 __put_user(host_st->st_blocks, &target_st->st_blocks);
7592 __put_user(host_st->st_atime, &target_st->target_st_atime);
7593 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7594 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7595 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7596 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7597 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7598 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7599 #endif
7600 unlock_user_struct(target_st, target_addr, 1);
7601 } else
7602 #endif
7604 #if defined(TARGET_HAS_STRUCT_STAT64)
7605 struct target_stat64 *target_st;
7606 #else
7607 struct target_stat *target_st;
7608 #endif
7610 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7611 return -TARGET_EFAULT;
7612 memset(target_st, 0, sizeof(*target_st));
7613 __put_user(host_st->st_dev, &target_st->st_dev);
7614 __put_user(host_st->st_ino, &target_st->st_ino);
7615 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7616 __put_user(host_st->st_ino, &target_st->__st_ino);
7617 #endif
7618 __put_user(host_st->st_mode, &target_st->st_mode);
7619 __put_user(host_st->st_nlink, &target_st->st_nlink);
7620 __put_user(host_st->st_uid, &target_st->st_uid);
7621 __put_user(host_st->st_gid, &target_st->st_gid);
7622 __put_user(host_st->st_rdev, &target_st->st_rdev);
7623 /* XXX: better use of kernel struct */
7624 __put_user(host_st->st_size, &target_st->st_size);
7625 __put_user(host_st->st_blksize, &target_st->st_blksize);
7626 __put_user(host_st->st_blocks, &target_st->st_blocks);
7627 __put_user(host_st->st_atime, &target_st->target_st_atime);
7628 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7629 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7630 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7631 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7632 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7633 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7634 #endif
7635 unlock_user_struct(target_st, target_addr, 1);
7638 return 0;
7640 #endif
7642 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7643 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7644 abi_ulong target_addr)
7646 struct target_statx *target_stx;
7648 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7649 return -TARGET_EFAULT;
7651 memset(target_stx, 0, sizeof(*target_stx));
7653 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7654 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7655 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7656 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7657 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7658 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7659 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7660 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7661 __put_user(host_stx->stx_size, &target_stx->stx_size);
7662 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7663 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7664 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7665 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7666 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7667 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7668 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7669 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7670 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7671 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7672 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7673 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7674 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7675 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7677 unlock_user_struct(target_stx, target_addr, 1);
7679 return 0;
7681 #endif
7683 static int do_sys_futex(int *uaddr, int op, int val,
7684 const struct timespec *timeout, int *uaddr2,
7685 int val3)
7687 #if HOST_LONG_BITS == 64
7688 #if defined(__NR_futex)
7689 /* always a 64-bit time_t, it doesn't define _time64 version */
7690 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7692 #endif
7693 #else /* HOST_LONG_BITS == 64 */
7694 #if defined(__NR_futex_time64)
7695 if (sizeof(timeout->tv_sec) == 8) {
7696 /* _time64 function on 32bit arch */
7697 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7699 #endif
7700 #if defined(__NR_futex)
7701 /* old function on 32bit arch */
7702 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7703 #endif
7704 #endif /* HOST_LONG_BITS == 64 */
7705 g_assert_not_reached();
7708 static int do_safe_futex(int *uaddr, int op, int val,
7709 const struct timespec *timeout, int *uaddr2,
7710 int val3)
7712 #if HOST_LONG_BITS == 64
7713 #if defined(__NR_futex)
7714 /* always a 64-bit time_t, it doesn't define _time64 version */
7715 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7716 #endif
7717 #else /* HOST_LONG_BITS == 64 */
7718 #if defined(__NR_futex_time64)
7719 if (sizeof(timeout->tv_sec) == 8) {
7720 /* _time64 function on 32bit arch */
7721 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7722 val3));
7724 #endif
7725 #if defined(__NR_futex)
7726 /* old function on 32bit arch */
7727 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7728 #endif
7729 #endif /* HOST_LONG_BITS == 64 */
7730 return -TARGET_ENOSYS;
7733 /* ??? Using host futex calls even when target atomic operations
7734 are not really atomic probably breaks things. However implementing
7735 futexes locally would make futexes shared between multiple processes
7736 tricky. However they're probably useless because guest atomic
7737 operations won't work either. */
7738 #if defined(TARGET_NR_futex)
7739 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7740 target_ulong timeout, target_ulong uaddr2, int val3)
7742 struct timespec ts, *pts;
7743 int base_op;
7745 /* ??? We assume FUTEX_* constants are the same on both host
7746 and target. */
7747 #ifdef FUTEX_CMD_MASK
7748 base_op = op & FUTEX_CMD_MASK;
7749 #else
7750 base_op = op;
7751 #endif
7752 switch (base_op) {
7753 case FUTEX_WAIT:
7754 case FUTEX_WAIT_BITSET:
7755 if (timeout) {
7756 pts = &ts;
7757 target_to_host_timespec(pts, timeout);
7758 } else {
7759 pts = NULL;
7761 return do_safe_futex(g2h(cpu, uaddr),
7762 op, tswap32(val), pts, NULL, val3);
7763 case FUTEX_WAKE:
7764 return do_safe_futex(g2h(cpu, uaddr),
7765 op, val, NULL, NULL, 0);
7766 case FUTEX_FD:
7767 return do_safe_futex(g2h(cpu, uaddr),
7768 op, val, NULL, NULL, 0);
7769 case FUTEX_REQUEUE:
7770 case FUTEX_CMP_REQUEUE:
7771 case FUTEX_WAKE_OP:
7772 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7773 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7774 But the prototype takes a `struct timespec *'; insert casts
7775 to satisfy the compiler. We do not need to tswap TIMEOUT
7776 since it's not compared to guest memory. */
7777 pts = (struct timespec *)(uintptr_t) timeout;
7778 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7779 (base_op == FUTEX_CMP_REQUEUE
7780 ? tswap32(val3) : val3));
7781 default:
7782 return -TARGET_ENOSYS;
7785 #endif
7787 #if defined(TARGET_NR_futex_time64)
7788 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7789 int val, target_ulong timeout,
7790 target_ulong uaddr2, int val3)
7792 struct timespec ts, *pts;
7793 int base_op;
7795 /* ??? We assume FUTEX_* constants are the same on both host
7796 and target. */
7797 #ifdef FUTEX_CMD_MASK
7798 base_op = op & FUTEX_CMD_MASK;
7799 #else
7800 base_op = op;
7801 #endif
7802 switch (base_op) {
7803 case FUTEX_WAIT:
7804 case FUTEX_WAIT_BITSET:
7805 if (timeout) {
7806 pts = &ts;
7807 if (target_to_host_timespec64(pts, timeout)) {
7808 return -TARGET_EFAULT;
7810 } else {
7811 pts = NULL;
7813 return do_safe_futex(g2h(cpu, uaddr), op,
7814 tswap32(val), pts, NULL, val3);
7815 case FUTEX_WAKE:
7816 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7817 case FUTEX_FD:
7818 return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7819 case FUTEX_REQUEUE:
7820 case FUTEX_CMP_REQUEUE:
7821 case FUTEX_WAKE_OP:
7822 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7823 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7824 But the prototype takes a `struct timespec *'; insert casts
7825 to satisfy the compiler. We do not need to tswap TIMEOUT
7826 since it's not compared to guest memory. */
7827 pts = (struct timespec *)(uintptr_t) timeout;
7828 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7829 (base_op == FUTEX_CMP_REQUEUE
7830 ? tswap32(val3) : val3));
7831 default:
7832 return -TARGET_ENOSYS;
7835 #endif
7837 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7838 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7839 abi_long handle, abi_long mount_id,
7840 abi_long flags)
7842 struct file_handle *target_fh;
7843 struct file_handle *fh;
7844 int mid = 0;
7845 abi_long ret;
7846 char *name;
7847 unsigned int size, total_size;
7849 if (get_user_s32(size, handle)) {
7850 return -TARGET_EFAULT;
7853 name = lock_user_string(pathname);
7854 if (!name) {
7855 return -TARGET_EFAULT;
7858 total_size = sizeof(struct file_handle) + size;
7859 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7860 if (!target_fh) {
7861 unlock_user(name, pathname, 0);
7862 return -TARGET_EFAULT;
7865 fh = g_malloc0(total_size);
7866 fh->handle_bytes = size;
7868 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7869 unlock_user(name, pathname, 0);
7871 /* man name_to_handle_at(2):
7872 * Other than the use of the handle_bytes field, the caller should treat
7873 * the file_handle structure as an opaque data type
7876 memcpy(target_fh, fh, total_size);
7877 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7878 target_fh->handle_type = tswap32(fh->handle_type);
7879 g_free(fh);
7880 unlock_user(target_fh, handle, total_size);
7882 if (put_user_s32(mid, mount_id)) {
7883 return -TARGET_EFAULT;
7886 return ret;
7889 #endif
7891 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7892 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7893 abi_long flags)
7895 struct file_handle *target_fh;
7896 struct file_handle *fh;
7897 unsigned int size, total_size;
7898 abi_long ret;
7900 if (get_user_s32(size, handle)) {
7901 return -TARGET_EFAULT;
7904 total_size = sizeof(struct file_handle) + size;
7905 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7906 if (!target_fh) {
7907 return -TARGET_EFAULT;
7910 fh = g_memdup(target_fh, total_size);
7911 fh->handle_bytes = size;
7912 fh->handle_type = tswap32(target_fh->handle_type);
7914 ret = get_errno(open_by_handle_at(mount_fd, fh,
7915 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7917 g_free(fh);
7919 unlock_user(target_fh, handle, total_size);
7921 return ret;
7923 #endif
7925 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7927 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7929 int host_flags;
7930 target_sigset_t *target_mask;
7931 sigset_t host_mask;
7932 abi_long ret;
7934 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7935 return -TARGET_EINVAL;
7937 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7938 return -TARGET_EFAULT;
7941 target_to_host_sigset(&host_mask, target_mask);
7943 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7945 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7946 if (ret >= 0) {
7947 fd_trans_register(ret, &target_signalfd_trans);
7950 unlock_user_struct(target_mask, mask, 0);
7952 return ret;
7954 #endif
7956 /* Map host to target signal numbers for the wait family of syscalls.
7957 Assume all other status bits are the same. */
7958 int host_to_target_waitstatus(int status)
7960 if (WIFSIGNALED(status)) {
7961 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7963 if (WIFSTOPPED(status)) {
7964 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7965 | (status & 0xff);
7967 return status;
7970 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7972 CPUState *cpu = env_cpu(cpu_env);
7973 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7974 int i;
7976 for (i = 0; i < bprm->argc; i++) {
7977 size_t len = strlen(bprm->argv[i]) + 1;
7979 if (write(fd, bprm->argv[i], len) != len) {
7980 return -1;
7984 return 0;
7987 static int open_self_maps(CPUArchState *cpu_env, int fd)
7989 CPUState *cpu = env_cpu(cpu_env);
7990 TaskState *ts = cpu->opaque;
7991 GSList *map_info = read_self_maps();
7992 GSList *s;
7993 int count;
7995 for (s = map_info; s; s = g_slist_next(s)) {
7996 MapInfo *e = (MapInfo *) s->data;
7998 if (h2g_valid(e->start)) {
7999 unsigned long min = e->start;
8000 unsigned long max = e->end;
8001 int flags = page_get_flags(h2g(min));
8002 const char *path;
8004 max = h2g_valid(max - 1) ?
8005 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8007 if (page_check_range(h2g(min), max - min, flags) == -1) {
8008 continue;
8011 if (h2g(min) == ts->info->stack_limit) {
8012 path = "[stack]";
8013 } else {
8014 path = e->path;
8017 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8018 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8019 h2g(min), h2g(max - 1) + 1,
8020 (flags & PAGE_READ) ? 'r' : '-',
8021 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8022 (flags & PAGE_EXEC) ? 'x' : '-',
8023 e->is_priv ? 'p' : 's',
8024 (uint64_t) e->offset, e->dev, e->inode);
8025 if (path) {
8026 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8027 } else {
8028 dprintf(fd, "\n");
8033 free_self_maps(map_info);
8035 #ifdef TARGET_VSYSCALL_PAGE
8037 * We only support execution from the vsyscall page.
8038 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8040 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8041 " --xp 00000000 00:00 0",
8042 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8043 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8044 #endif
8046 return 0;
8049 static int open_self_stat(CPUArchState *cpu_env, int fd)
8051 CPUState *cpu = env_cpu(cpu_env);
8052 TaskState *ts = cpu->opaque;
8053 g_autoptr(GString) buf = g_string_new(NULL);
8054 int i;
8056 for (i = 0; i < 44; i++) {
8057 if (i == 0) {
8058 /* pid */
8059 g_string_printf(buf, FMT_pid " ", getpid());
8060 } else if (i == 1) {
8061 /* app name */
8062 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8063 bin = bin ? bin + 1 : ts->bprm->argv[0];
8064 g_string_printf(buf, "(%.15s) ", bin);
8065 } else if (i == 3) {
8066 /* ppid */
8067 g_string_printf(buf, FMT_pid " ", getppid());
8068 } else if (i == 21) {
8069 /* starttime */
8070 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8071 } else if (i == 27) {
8072 /* stack bottom */
8073 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8074 } else {
8075 /* for the rest, there is MasterCard */
8076 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8079 if (write(fd, buf->str, buf->len) != buf->len) {
8080 return -1;
8084 return 0;
8087 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8089 CPUState *cpu = env_cpu(cpu_env);
8090 TaskState *ts = cpu->opaque;
8091 abi_ulong auxv = ts->info->saved_auxv;
8092 abi_ulong len = ts->info->auxv_len;
8093 char *ptr;
8096 * Auxiliary vector is stored in target process stack.
8097 * read in whole auxv vector and copy it to file
8099 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8100 if (ptr != NULL) {
8101 while (len > 0) {
8102 ssize_t r;
8103 r = write(fd, ptr, len);
8104 if (r <= 0) {
8105 break;
8107 len -= r;
8108 ptr += r;
8110 lseek(fd, 0, SEEK_SET);
8111 unlock_user(ptr, auxv, len);
8114 return 0;
8117 static int is_proc_myself(const char *filename, const char *entry)
8119 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8120 filename += strlen("/proc/");
8121 if (!strncmp(filename, "self/", strlen("self/"))) {
8122 filename += strlen("self/");
8123 } else if (*filename >= '1' && *filename <= '9') {
8124 char myself[80];
8125 snprintf(myself, sizeof(myself), "%d/", getpid());
8126 if (!strncmp(filename, myself, strlen(myself))) {
8127 filename += strlen(myself);
8128 } else {
8129 return 0;
8131 } else {
8132 return 0;
8134 if (!strcmp(filename, entry)) {
8135 return 1;
8138 return 0;
8141 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8142 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8143 static int is_proc(const char *filename, const char *entry)
8145 return strcmp(filename, entry) == 0;
8147 #endif
8149 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8150 static int open_net_route(CPUArchState *cpu_env, int fd)
8152 FILE *fp;
8153 char *line = NULL;
8154 size_t len = 0;
8155 ssize_t read;
8157 fp = fopen("/proc/net/route", "r");
8158 if (fp == NULL) {
8159 return -1;
8162 /* read header */
8164 read = getline(&line, &len, fp);
8165 dprintf(fd, "%s", line);
8167 /* read routes */
8169 while ((read = getline(&line, &len, fp)) != -1) {
8170 char iface[16];
8171 uint32_t dest, gw, mask;
8172 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8173 int fields;
8175 fields = sscanf(line,
8176 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8177 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8178 &mask, &mtu, &window, &irtt);
8179 if (fields != 11) {
8180 continue;
8182 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8183 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8184 metric, tswap32(mask), mtu, window, irtt);
8187 free(line);
8188 fclose(fp);
8190 return 0;
8192 #endif
8194 #if defined(TARGET_SPARC)
8195 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8197 dprintf(fd, "type\t\t: sun4u\n");
8198 return 0;
8200 #endif
8202 #if defined(TARGET_HPPA)
8203 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8205 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8206 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8207 dprintf(fd, "capabilities\t: os32\n");
8208 dprintf(fd, "model\t\t: 9000/778/B160L\n");
8209 dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8210 return 0;
8212 #endif
8214 #if defined(TARGET_M68K)
8215 static int open_hardware(CPUArchState *cpu_env, int fd)
8217 dprintf(fd, "Model:\t\tqemu-m68k\n");
8218 return 0;
8220 #endif
8222 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8224 struct fake_open {
8225 const char *filename;
8226 int (*fill)(CPUArchState *cpu_env, int fd);
8227 int (*cmp)(const char *s1, const char *s2);
8229 const struct fake_open *fake_open;
8230 static const struct fake_open fakes[] = {
8231 { "maps", open_self_maps, is_proc_myself },
8232 { "stat", open_self_stat, is_proc_myself },
8233 { "auxv", open_self_auxv, is_proc_myself },
8234 { "cmdline", open_self_cmdline, is_proc_myself },
8235 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8236 { "/proc/net/route", open_net_route, is_proc },
8237 #endif
8238 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8239 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8240 #endif
8241 #if defined(TARGET_M68K)
8242 { "/proc/hardware", open_hardware, is_proc },
8243 #endif
8244 { NULL, NULL, NULL }
8247 if (is_proc_myself(pathname, "exe")) {
8248 int execfd = qemu_getauxval(AT_EXECFD);
8249 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8252 for (fake_open = fakes; fake_open->filename; fake_open++) {
8253 if (fake_open->cmp(pathname, fake_open->filename)) {
8254 break;
8258 if (fake_open->filename) {
8259 const char *tmpdir;
8260 char filename[PATH_MAX];
8261 int fd, r;
8263 fd = memfd_create("qemu-open", 0);
8264 if (fd < 0) {
8265 if (errno != ENOSYS) {
8266 return fd;
8268 /* create temporary file to map stat to */
8269 tmpdir = getenv("TMPDIR");
8270 if (!tmpdir)
8271 tmpdir = "/tmp";
8272 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8273 fd = mkstemp(filename);
8274 if (fd < 0) {
8275 return fd;
8277 unlink(filename);
8280 if ((r = fake_open->fill(cpu_env, fd))) {
8281 int e = errno;
8282 close(fd);
8283 errno = e;
8284 return r;
8286 lseek(fd, 0, SEEK_SET);
8288 return fd;
8291 return safe_openat(dirfd, path(pathname), flags, mode);
8294 #define TIMER_MAGIC 0x0caf0000
8295 #define TIMER_MAGIC_MASK 0xffff0000
8297 /* Convert QEMU provided timer ID back to internal 16bit index format */
8298 static target_timer_t get_timer_id(abi_long arg)
8300 target_timer_t timerid = arg;
8302 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8303 return -TARGET_EINVAL;
8306 timerid &= 0xffff;
8308 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8309 return -TARGET_EINVAL;
8312 return timerid;
8315 static int target_to_host_cpu_mask(unsigned long *host_mask,
8316 size_t host_size,
8317 abi_ulong target_addr,
8318 size_t target_size)
8320 unsigned target_bits = sizeof(abi_ulong) * 8;
8321 unsigned host_bits = sizeof(*host_mask) * 8;
8322 abi_ulong *target_mask;
8323 unsigned i, j;
8325 assert(host_size >= target_size);
8327 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8328 if (!target_mask) {
8329 return -TARGET_EFAULT;
8331 memset(host_mask, 0, host_size);
8333 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8334 unsigned bit = i * target_bits;
8335 abi_ulong val;
8337 __get_user(val, &target_mask[i]);
8338 for (j = 0; j < target_bits; j++, bit++) {
8339 if (val & (1UL << j)) {
8340 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8345 unlock_user(target_mask, target_addr, 0);
8346 return 0;
8349 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8350 size_t host_size,
8351 abi_ulong target_addr,
8352 size_t target_size)
8354 unsigned target_bits = sizeof(abi_ulong) * 8;
8355 unsigned host_bits = sizeof(*host_mask) * 8;
8356 abi_ulong *target_mask;
8357 unsigned i, j;
8359 assert(host_size >= target_size);
8361 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8362 if (!target_mask) {
8363 return -TARGET_EFAULT;
8366 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8367 unsigned bit = i * target_bits;
8368 abi_ulong val = 0;
8370 for (j = 0; j < target_bits; j++, bit++) {
8371 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8372 val |= 1UL << j;
8375 __put_user(val, &target_mask[i]);
8378 unlock_user(target_mask, target_addr, target_size);
8379 return 0;
8382 #ifdef TARGET_NR_getdents
8383 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8385 g_autofree void *hdirp = NULL;
8386 void *tdirp;
8387 int hlen, hoff, toff;
8388 int hreclen, treclen;
8389 off64_t prev_diroff = 0;
8391 hdirp = g_try_malloc(count);
8392 if (!hdirp) {
8393 return -TARGET_ENOMEM;
8396 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8397 hlen = sys_getdents(dirfd, hdirp, count);
8398 #else
8399 hlen = sys_getdents64(dirfd, hdirp, count);
8400 #endif
8402 hlen = get_errno(hlen);
8403 if (is_error(hlen)) {
8404 return hlen;
8407 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8408 if (!tdirp) {
8409 return -TARGET_EFAULT;
8412 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8413 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8414 struct linux_dirent *hde = hdirp + hoff;
8415 #else
8416 struct linux_dirent64 *hde = hdirp + hoff;
8417 #endif
8418 struct target_dirent *tde = tdirp + toff;
8419 int namelen;
8420 uint8_t type;
8422 namelen = strlen(hde->d_name);
8423 hreclen = hde->d_reclen;
8424 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8425 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8427 if (toff + treclen > count) {
8429 * If the host struct is smaller than the target struct, or
8430 * requires less alignment and thus packs into less space,
8431 * then the host can return more entries than we can pass
8432 * on to the guest.
8434 if (toff == 0) {
8435 toff = -TARGET_EINVAL; /* result buffer is too small */
8436 break;
8439 * Return what we have, resetting the file pointer to the
8440 * location of the first record not returned.
8442 lseek64(dirfd, prev_diroff, SEEK_SET);
8443 break;
8446 prev_diroff = hde->d_off;
8447 tde->d_ino = tswapal(hde->d_ino);
8448 tde->d_off = tswapal(hde->d_off);
8449 tde->d_reclen = tswap16(treclen);
8450 memcpy(tde->d_name, hde->d_name, namelen + 1);
8453 * The getdents type is in what was formerly a padding byte at the
8454 * end of the structure.
8456 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8457 type = *((uint8_t *)hde + hreclen - 1);
8458 #else
8459 type = hde->d_type;
8460 #endif
8461 *((uint8_t *)tde + treclen - 1) = type;
8464 unlock_user(tdirp, arg2, toff);
8465 return toff;
8467 #endif /* TARGET_NR_getdents */
8469 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8470 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8472 g_autofree void *hdirp = NULL;
8473 void *tdirp;
8474 int hlen, hoff, toff;
8475 int hreclen, treclen;
8476 off64_t prev_diroff = 0;
8478 hdirp = g_try_malloc(count);
8479 if (!hdirp) {
8480 return -TARGET_ENOMEM;
8483 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8484 if (is_error(hlen)) {
8485 return hlen;
8488 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8489 if (!tdirp) {
8490 return -TARGET_EFAULT;
8493 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8494 struct linux_dirent64 *hde = hdirp + hoff;
8495 struct target_dirent64 *tde = tdirp + toff;
8496 int namelen;
8498 namelen = strlen(hde->d_name) + 1;
8499 hreclen = hde->d_reclen;
8500 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8501 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8503 if (toff + treclen > count) {
8505 * If the host struct is smaller than the target struct, or
8506 * requires less alignment and thus packs into less space,
8507 * then the host can return more entries than we can pass
8508 * on to the guest.
8510 if (toff == 0) {
8511 toff = -TARGET_EINVAL; /* result buffer is too small */
8512 break;
8515 * Return what we have, resetting the file pointer to the
8516 * location of the first record not returned.
8518 lseek64(dirfd, prev_diroff, SEEK_SET);
8519 break;
8522 prev_diroff = hde->d_off;
8523 tde->d_ino = tswap64(hde->d_ino);
8524 tde->d_off = tswap64(hde->d_off);
8525 tde->d_reclen = tswap16(treclen);
8526 tde->d_type = hde->d_type;
8527 memcpy(tde->d_name, hde->d_name, namelen);
8530 unlock_user(tdirp, arg2, toff);
8531 return toff;
8533 #endif /* TARGET_NR_getdents64 */
8535 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8536 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8537 #endif
8539 /* This is an internal helper for do_syscall so that it is easier
8540 * to have a single return point, so that actions, such as logging
8541 * of syscall results, can be performed.
8542 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8544 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8545 abi_long arg2, abi_long arg3, abi_long arg4,
8546 abi_long arg5, abi_long arg6, abi_long arg7,
8547 abi_long arg8)
8549 CPUState *cpu = env_cpu(cpu_env);
8550 abi_long ret;
8551 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8552 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8553 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8554 || defined(TARGET_NR_statx)
8555 struct stat st;
8556 #endif
8557 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8558 || defined(TARGET_NR_fstatfs)
8559 struct statfs stfs;
8560 #endif
8561 void *p;
8563 switch(num) {
8564 case TARGET_NR_exit:
8565 /* In old applications this may be used to implement _exit(2).
8566 However in threaded applications it is used for thread termination,
8567 and _exit_group is used for application termination.
8568 Do thread termination if we have more then one thread. */
8570 if (block_signals()) {
8571 return -QEMU_ERESTARTSYS;
8574 pthread_mutex_lock(&clone_lock);
8576 if (CPU_NEXT(first_cpu)) {
8577 TaskState *ts = cpu->opaque;
8579 object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8580 object_unref(OBJECT(cpu));
8582 * At this point the CPU should be unrealized and removed
8583 * from cpu lists. We can clean-up the rest of the thread
8584 * data without the lock held.
8587 pthread_mutex_unlock(&clone_lock);
8589 if (ts->child_tidptr) {
8590 put_user_u32(0, ts->child_tidptr);
8591 do_sys_futex(g2h(cpu, ts->child_tidptr),
8592 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8594 thread_cpu = NULL;
8595 g_free(ts);
8596 rcu_unregister_thread();
8597 pthread_exit(NULL);
8600 pthread_mutex_unlock(&clone_lock);
8601 preexit_cleanup(cpu_env, arg1);
8602 _exit(arg1);
8603 return 0; /* avoid warning */
8604 case TARGET_NR_read:
8605 if (arg2 == 0 && arg3 == 0) {
8606 return get_errno(safe_read(arg1, 0, 0));
8607 } else {
8608 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8609 return -TARGET_EFAULT;
8610 ret = get_errno(safe_read(arg1, p, arg3));
8611 if (ret >= 0 &&
8612 fd_trans_host_to_target_data(arg1)) {
8613 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8615 unlock_user(p, arg2, ret);
8617 return ret;
8618 case TARGET_NR_write:
8619 if (arg2 == 0 && arg3 == 0) {
8620 return get_errno(safe_write(arg1, 0, 0));
8622 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8623 return -TARGET_EFAULT;
8624 if (fd_trans_target_to_host_data(arg1)) {
8625 void *copy = g_malloc(arg3);
8626 memcpy(copy, p, arg3);
8627 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8628 if (ret >= 0) {
8629 ret = get_errno(safe_write(arg1, copy, ret));
8631 g_free(copy);
8632 } else {
8633 ret = get_errno(safe_write(arg1, p, arg3));
8635 unlock_user(p, arg2, 0);
8636 return ret;
8638 #ifdef TARGET_NR_open
8639 case TARGET_NR_open:
8640 if (!(p = lock_user_string(arg1)))
8641 return -TARGET_EFAULT;
8642 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8643 target_to_host_bitmask(arg2, fcntl_flags_tbl),
8644 arg3));
8645 fd_trans_unregister(ret);
8646 unlock_user(p, arg1, 0);
8647 return ret;
8648 #endif
8649 case TARGET_NR_openat:
8650 if (!(p = lock_user_string(arg2)))
8651 return -TARGET_EFAULT;
8652 ret = get_errno(do_openat(cpu_env, arg1, p,
8653 target_to_host_bitmask(arg3, fcntl_flags_tbl),
8654 arg4));
8655 fd_trans_unregister(ret);
8656 unlock_user(p, arg2, 0);
8657 return ret;
8658 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8659 case TARGET_NR_name_to_handle_at:
8660 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8661 return ret;
8662 #endif
8663 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8664 case TARGET_NR_open_by_handle_at:
8665 ret = do_open_by_handle_at(arg1, arg2, arg3);
8666 fd_trans_unregister(ret);
8667 return ret;
8668 #endif
8669 case TARGET_NR_close:
8670 fd_trans_unregister(arg1);
8671 return get_errno(close(arg1));
8673 case TARGET_NR_brk:
8674 return do_brk(arg1);
8675 #ifdef TARGET_NR_fork
8676 case TARGET_NR_fork:
8677 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8678 #endif
8679 #ifdef TARGET_NR_waitpid
8680 case TARGET_NR_waitpid:
8682 int status;
8683 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8684 if (!is_error(ret) && arg2 && ret
8685 && put_user_s32(host_to_target_waitstatus(status), arg2))
8686 return -TARGET_EFAULT;
8688 return ret;
8689 #endif
8690 #ifdef TARGET_NR_waitid
8691 case TARGET_NR_waitid:
8693 siginfo_t info;
8694 info.si_pid = 0;
8695 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8696 if (!is_error(ret) && arg3 && info.si_pid != 0) {
8697 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8698 return -TARGET_EFAULT;
8699 host_to_target_siginfo(p, &info);
8700 unlock_user(p, arg3, sizeof(target_siginfo_t));
8703 return ret;
8704 #endif
8705 #ifdef TARGET_NR_creat /* not on alpha */
8706 case TARGET_NR_creat:
8707 if (!(p = lock_user_string(arg1)))
8708 return -TARGET_EFAULT;
8709 ret = get_errno(creat(p, arg2));
8710 fd_trans_unregister(ret);
8711 unlock_user(p, arg1, 0);
8712 return ret;
8713 #endif
8714 #ifdef TARGET_NR_link
8715 case TARGET_NR_link:
8717 void * p2;
8718 p = lock_user_string(arg1);
8719 p2 = lock_user_string(arg2);
8720 if (!p || !p2)
8721 ret = -TARGET_EFAULT;
8722 else
8723 ret = get_errno(link(p, p2));
8724 unlock_user(p2, arg2, 0);
8725 unlock_user(p, arg1, 0);
8727 return ret;
8728 #endif
8729 #if defined(TARGET_NR_linkat)
8730 case TARGET_NR_linkat:
8732 void * p2 = NULL;
8733 if (!arg2 || !arg4)
8734 return -TARGET_EFAULT;
8735 p = lock_user_string(arg2);
8736 p2 = lock_user_string(arg4);
8737 if (!p || !p2)
8738 ret = -TARGET_EFAULT;
8739 else
8740 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8741 unlock_user(p, arg2, 0);
8742 unlock_user(p2, arg4, 0);
8744 return ret;
8745 #endif
8746 #ifdef TARGET_NR_unlink
8747 case TARGET_NR_unlink:
8748 if (!(p = lock_user_string(arg1)))
8749 return -TARGET_EFAULT;
8750 ret = get_errno(unlink(p));
8751 unlock_user(p, arg1, 0);
8752 return ret;
8753 #endif
8754 #if defined(TARGET_NR_unlinkat)
8755 case TARGET_NR_unlinkat:
8756 if (!(p = lock_user_string(arg2)))
8757 return -TARGET_EFAULT;
8758 ret = get_errno(unlinkat(arg1, p, arg3));
8759 unlock_user(p, arg2, 0);
8760 return ret;
8761 #endif
8762 case TARGET_NR_execve:
8764 char **argp, **envp;
8765 int argc, envc;
8766 abi_ulong gp;
8767 abi_ulong guest_argp;
8768 abi_ulong guest_envp;
8769 abi_ulong addr;
8770 char **q;
8772 argc = 0;
8773 guest_argp = arg2;
8774 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8775 if (get_user_ual(addr, gp))
8776 return -TARGET_EFAULT;
8777 if (!addr)
8778 break;
8779 argc++;
8781 envc = 0;
8782 guest_envp = arg3;
8783 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8784 if (get_user_ual(addr, gp))
8785 return -TARGET_EFAULT;
8786 if (!addr)
8787 break;
8788 envc++;
8791 argp = g_new0(char *, argc + 1);
8792 envp = g_new0(char *, envc + 1);
8794 for (gp = guest_argp, q = argp; gp;
8795 gp += sizeof(abi_ulong), q++) {
8796 if (get_user_ual(addr, gp))
8797 goto execve_efault;
8798 if (!addr)
8799 break;
8800 if (!(*q = lock_user_string(addr)))
8801 goto execve_efault;
8803 *q = NULL;
8805 for (gp = guest_envp, q = envp; gp;
8806 gp += sizeof(abi_ulong), q++) {
8807 if (get_user_ual(addr, gp))
8808 goto execve_efault;
8809 if (!addr)
8810 break;
8811 if (!(*q = lock_user_string(addr)))
8812 goto execve_efault;
8814 *q = NULL;
8816 if (!(p = lock_user_string(arg1)))
8817 goto execve_efault;
8818 /* Although execve() is not an interruptible syscall it is
8819 * a special case where we must use the safe_syscall wrapper:
8820 * if we allow a signal to happen before we make the host
8821 * syscall then we will 'lose' it, because at the point of
8822 * execve the process leaves QEMU's control. So we use the
8823 * safe syscall wrapper to ensure that we either take the
8824 * signal as a guest signal, or else it does not happen
8825 * before the execve completes and makes it the other
8826 * program's problem.
8828 ret = get_errno(safe_execve(p, argp, envp));
8829 unlock_user(p, arg1, 0);
8831 goto execve_end;
8833 execve_efault:
8834 ret = -TARGET_EFAULT;
8836 execve_end:
8837 for (gp = guest_argp, q = argp; *q;
8838 gp += sizeof(abi_ulong), q++) {
8839 if (get_user_ual(addr, gp)
8840 || !addr)
8841 break;
8842 unlock_user(*q, addr, 0);
8844 for (gp = guest_envp, q = envp; *q;
8845 gp += sizeof(abi_ulong), q++) {
8846 if (get_user_ual(addr, gp)
8847 || !addr)
8848 break;
8849 unlock_user(*q, addr, 0);
8852 g_free(argp);
8853 g_free(envp);
8855 return ret;
8856 case TARGET_NR_chdir:
8857 if (!(p = lock_user_string(arg1)))
8858 return -TARGET_EFAULT;
8859 ret = get_errno(chdir(p));
8860 unlock_user(p, arg1, 0);
8861 return ret;
8862 #ifdef TARGET_NR_time
8863 case TARGET_NR_time:
8865 time_t host_time;
8866 ret = get_errno(time(&host_time));
8867 if (!is_error(ret)
8868 && arg1
8869 && put_user_sal(host_time, arg1))
8870 return -TARGET_EFAULT;
8872 return ret;
8873 #endif
8874 #ifdef TARGET_NR_mknod
8875 case TARGET_NR_mknod:
8876 if (!(p = lock_user_string(arg1)))
8877 return -TARGET_EFAULT;
8878 ret = get_errno(mknod(p, arg2, arg3));
8879 unlock_user(p, arg1, 0);
8880 return ret;
8881 #endif
8882 #if defined(TARGET_NR_mknodat)
8883 case TARGET_NR_mknodat:
8884 if (!(p = lock_user_string(arg2)))
8885 return -TARGET_EFAULT;
8886 ret = get_errno(mknodat(arg1, p, arg3, arg4));
8887 unlock_user(p, arg2, 0);
8888 return ret;
8889 #endif
8890 #ifdef TARGET_NR_chmod
8891 case TARGET_NR_chmod:
8892 if (!(p = lock_user_string(arg1)))
8893 return -TARGET_EFAULT;
8894 ret = get_errno(chmod(p, arg2));
8895 unlock_user(p, arg1, 0);
8896 return ret;
8897 #endif
8898 #ifdef TARGET_NR_lseek
8899 case TARGET_NR_lseek:
8900 return get_errno(lseek(arg1, arg2, arg3));
8901 #endif
8902 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8903 /* Alpha specific */
8904 case TARGET_NR_getxpid:
8905 cpu_env->ir[IR_A4] = getppid();
8906 return get_errno(getpid());
8907 #endif
8908 #ifdef TARGET_NR_getpid
8909 case TARGET_NR_getpid:
8910 return get_errno(getpid());
8911 #endif
8912 case TARGET_NR_mount:
8914 /* need to look at the data field */
8915 void *p2, *p3;
8917 if (arg1) {
8918 p = lock_user_string(arg1);
8919 if (!p) {
8920 return -TARGET_EFAULT;
8922 } else {
8923 p = NULL;
8926 p2 = lock_user_string(arg2);
8927 if (!p2) {
8928 if (arg1) {
8929 unlock_user(p, arg1, 0);
8931 return -TARGET_EFAULT;
8934 if (arg3) {
8935 p3 = lock_user_string(arg3);
8936 if (!p3) {
8937 if (arg1) {
8938 unlock_user(p, arg1, 0);
8940 unlock_user(p2, arg2, 0);
8941 return -TARGET_EFAULT;
8943 } else {
8944 p3 = NULL;
8947 /* FIXME - arg5 should be locked, but it isn't clear how to
8948 * do that since it's not guaranteed to be a NULL-terminated
8949 * string.
8951 if (!arg5) {
8952 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8953 } else {
8954 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8956 ret = get_errno(ret);
8958 if (arg1) {
8959 unlock_user(p, arg1, 0);
8961 unlock_user(p2, arg2, 0);
8962 if (arg3) {
8963 unlock_user(p3, arg3, 0);
8966 return ret;
8967 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8968 #if defined(TARGET_NR_umount)
8969 case TARGET_NR_umount:
8970 #endif
8971 #if defined(TARGET_NR_oldumount)
8972 case TARGET_NR_oldumount:
8973 #endif
8974 if (!(p = lock_user_string(arg1)))
8975 return -TARGET_EFAULT;
8976 ret = get_errno(umount(p));
8977 unlock_user(p, arg1, 0);
8978 return ret;
8979 #endif
8980 #ifdef TARGET_NR_stime /* not on alpha */
8981 case TARGET_NR_stime:
8983 struct timespec ts;
8984 ts.tv_nsec = 0;
8985 if (get_user_sal(ts.tv_sec, arg1)) {
8986 return -TARGET_EFAULT;
8988 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8990 #endif
8991 #ifdef TARGET_NR_alarm /* not on alpha */
8992 case TARGET_NR_alarm:
8993 return alarm(arg1);
8994 #endif
8995 #ifdef TARGET_NR_pause /* not on alpha */
8996 case TARGET_NR_pause:
8997 if (!block_signals()) {
8998 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9000 return -TARGET_EINTR;
9001 #endif
9002 #ifdef TARGET_NR_utime
9003 case TARGET_NR_utime:
9005 struct utimbuf tbuf, *host_tbuf;
9006 struct target_utimbuf *target_tbuf;
9007 if (arg2) {
9008 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9009 return -TARGET_EFAULT;
9010 tbuf.actime = tswapal(target_tbuf->actime);
9011 tbuf.modtime = tswapal(target_tbuf->modtime);
9012 unlock_user_struct(target_tbuf, arg2, 0);
9013 host_tbuf = &tbuf;
9014 } else {
9015 host_tbuf = NULL;
9017 if (!(p = lock_user_string(arg1)))
9018 return -TARGET_EFAULT;
9019 ret = get_errno(utime(p, host_tbuf));
9020 unlock_user(p, arg1, 0);
9022 return ret;
9023 #endif
9024 #ifdef TARGET_NR_utimes
9025 case TARGET_NR_utimes:
9027 struct timeval *tvp, tv[2];
9028 if (arg2) {
9029 if (copy_from_user_timeval(&tv[0], arg2)
9030 || copy_from_user_timeval(&tv[1],
9031 arg2 + sizeof(struct target_timeval)))
9032 return -TARGET_EFAULT;
9033 tvp = tv;
9034 } else {
9035 tvp = NULL;
9037 if (!(p = lock_user_string(arg1)))
9038 return -TARGET_EFAULT;
9039 ret = get_errno(utimes(p, tvp));
9040 unlock_user(p, arg1, 0);
9042 return ret;
9043 #endif
9044 #if defined(TARGET_NR_futimesat)
9045 case TARGET_NR_futimesat:
9047 struct timeval *tvp, tv[2];
9048 if (arg3) {
9049 if (copy_from_user_timeval(&tv[0], arg3)
9050 || copy_from_user_timeval(&tv[1],
9051 arg3 + sizeof(struct target_timeval)))
9052 return -TARGET_EFAULT;
9053 tvp = tv;
9054 } else {
9055 tvp = NULL;
9057 if (!(p = lock_user_string(arg2))) {
9058 return -TARGET_EFAULT;
9060 ret = get_errno(futimesat(arg1, path(p), tvp));
9061 unlock_user(p, arg2, 0);
9063 return ret;
9064 #endif
9065 #ifdef TARGET_NR_access
9066 case TARGET_NR_access:
9067 if (!(p = lock_user_string(arg1))) {
9068 return -TARGET_EFAULT;
9070 ret = get_errno(access(path(p), arg2));
9071 unlock_user(p, arg1, 0);
9072 return ret;
9073 #endif
9074 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9075 case TARGET_NR_faccessat:
9076 if (!(p = lock_user_string(arg2))) {
9077 return -TARGET_EFAULT;
9079 ret = get_errno(faccessat(arg1, p, arg3, 0));
9080 unlock_user(p, arg2, 0);
9081 return ret;
9082 #endif
9083 #ifdef TARGET_NR_nice /* not on alpha */
9084 case TARGET_NR_nice:
9085 return get_errno(nice(arg1));
9086 #endif
9087 case TARGET_NR_sync:
9088 sync();
9089 return 0;
9090 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9091 case TARGET_NR_syncfs:
9092 return get_errno(syncfs(arg1));
9093 #endif
9094 case TARGET_NR_kill:
9095 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9096 #ifdef TARGET_NR_rename
9097 case TARGET_NR_rename:
9099 void *p2;
9100 p = lock_user_string(arg1);
9101 p2 = lock_user_string(arg2);
9102 if (!p || !p2)
9103 ret = -TARGET_EFAULT;
9104 else
9105 ret = get_errno(rename(p, p2));
9106 unlock_user(p2, arg2, 0);
9107 unlock_user(p, arg1, 0);
9109 return ret;
9110 #endif
9111 #if defined(TARGET_NR_renameat)
9112 case TARGET_NR_renameat:
9114 void *p2;
9115 p = lock_user_string(arg2);
9116 p2 = lock_user_string(arg4);
9117 if (!p || !p2)
9118 ret = -TARGET_EFAULT;
9119 else
9120 ret = get_errno(renameat(arg1, p, arg3, p2));
9121 unlock_user(p2, arg4, 0);
9122 unlock_user(p, arg2, 0);
9124 return ret;
9125 #endif
9126 #if defined(TARGET_NR_renameat2)
9127 case TARGET_NR_renameat2:
9129 void *p2;
9130 p = lock_user_string(arg2);
9131 p2 = lock_user_string(arg4);
9132 if (!p || !p2) {
9133 ret = -TARGET_EFAULT;
9134 } else {
9135 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9137 unlock_user(p2, arg4, 0);
9138 unlock_user(p, arg2, 0);
9140 return ret;
9141 #endif
9142 #ifdef TARGET_NR_mkdir
9143 case TARGET_NR_mkdir:
9144 if (!(p = lock_user_string(arg1)))
9145 return -TARGET_EFAULT;
9146 ret = get_errno(mkdir(p, arg2));
9147 unlock_user(p, arg1, 0);
9148 return ret;
9149 #endif
9150 #if defined(TARGET_NR_mkdirat)
9151 case TARGET_NR_mkdirat:
9152 if (!(p = lock_user_string(arg2)))
9153 return -TARGET_EFAULT;
9154 ret = get_errno(mkdirat(arg1, p, arg3));
9155 unlock_user(p, arg2, 0);
9156 return ret;
9157 #endif
9158 #ifdef TARGET_NR_rmdir
9159 case TARGET_NR_rmdir:
9160 if (!(p = lock_user_string(arg1)))
9161 return -TARGET_EFAULT;
9162 ret = get_errno(rmdir(p));
9163 unlock_user(p, arg1, 0);
9164 return ret;
9165 #endif
9166 case TARGET_NR_dup:
9167 ret = get_errno(dup(arg1));
9168 if (ret >= 0) {
9169 fd_trans_dup(arg1, ret);
9171 return ret;
9172 #ifdef TARGET_NR_pipe
9173 case TARGET_NR_pipe:
9174 return do_pipe(cpu_env, arg1, 0, 0);
9175 #endif
9176 #ifdef TARGET_NR_pipe2
9177 case TARGET_NR_pipe2:
9178 return do_pipe(cpu_env, arg1,
9179 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9180 #endif
9181 case TARGET_NR_times:
9183 struct target_tms *tmsp;
9184 struct tms tms;
9185 ret = get_errno(times(&tms));
9186 if (arg1) {
9187 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9188 if (!tmsp)
9189 return -TARGET_EFAULT;
9190 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9191 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9192 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9193 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9195 if (!is_error(ret))
9196 ret = host_to_target_clock_t(ret);
9198 return ret;
9199 case TARGET_NR_acct:
9200 if (arg1 == 0) {
9201 ret = get_errno(acct(NULL));
9202 } else {
9203 if (!(p = lock_user_string(arg1))) {
9204 return -TARGET_EFAULT;
9206 ret = get_errno(acct(path(p)));
9207 unlock_user(p, arg1, 0);
9209 return ret;
9210 #ifdef TARGET_NR_umount2
9211 case TARGET_NR_umount2:
9212 if (!(p = lock_user_string(arg1)))
9213 return -TARGET_EFAULT;
9214 ret = get_errno(umount2(p, arg2));
9215 unlock_user(p, arg1, 0);
9216 return ret;
9217 #endif
9218 case TARGET_NR_ioctl:
9219 return do_ioctl(arg1, arg2, arg3);
9220 #ifdef TARGET_NR_fcntl
9221 case TARGET_NR_fcntl:
9222 return do_fcntl(arg1, arg2, arg3);
9223 #endif
9224 case TARGET_NR_setpgid:
9225 return get_errno(setpgid(arg1, arg2));
9226 case TARGET_NR_umask:
9227 return get_errno(umask(arg1));
9228 case TARGET_NR_chroot:
9229 if (!(p = lock_user_string(arg1)))
9230 return -TARGET_EFAULT;
9231 ret = get_errno(chroot(p));
9232 unlock_user(p, arg1, 0);
9233 return ret;
9234 #ifdef TARGET_NR_dup2
9235 case TARGET_NR_dup2:
9236 ret = get_errno(dup2(arg1, arg2));
9237 if (ret >= 0) {
9238 fd_trans_dup(arg1, arg2);
9240 return ret;
9241 #endif
9242 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9243 case TARGET_NR_dup3:
9245 int host_flags;
9247 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9248 return -EINVAL;
9250 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9251 ret = get_errno(dup3(arg1, arg2, host_flags));
9252 if (ret >= 0) {
9253 fd_trans_dup(arg1, arg2);
9255 return ret;
9257 #endif
9258 #ifdef TARGET_NR_getppid /* not on alpha */
9259 case TARGET_NR_getppid:
9260 return get_errno(getppid());
9261 #endif
9262 #ifdef TARGET_NR_getpgrp
9263 case TARGET_NR_getpgrp:
9264 return get_errno(getpgrp());
9265 #endif
9266 case TARGET_NR_setsid:
9267 return get_errno(setsid());
9268 #ifdef TARGET_NR_sigaction
9269 case TARGET_NR_sigaction:
9271 #if defined(TARGET_MIPS)
9272 struct target_sigaction act, oact, *pact, *old_act;
9274 if (arg2) {
9275 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9276 return -TARGET_EFAULT;
9277 act._sa_handler = old_act->_sa_handler;
9278 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9279 act.sa_flags = old_act->sa_flags;
9280 unlock_user_struct(old_act, arg2, 0);
9281 pact = &act;
9282 } else {
9283 pact = NULL;
9286 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9288 if (!is_error(ret) && arg3) {
9289 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9290 return -TARGET_EFAULT;
9291 old_act->_sa_handler = oact._sa_handler;
9292 old_act->sa_flags = oact.sa_flags;
9293 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9294 old_act->sa_mask.sig[1] = 0;
9295 old_act->sa_mask.sig[2] = 0;
9296 old_act->sa_mask.sig[3] = 0;
9297 unlock_user_struct(old_act, arg3, 1);
9299 #else
9300 struct target_old_sigaction *old_act;
9301 struct target_sigaction act, oact, *pact;
9302 if (arg2) {
9303 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9304 return -TARGET_EFAULT;
9305 act._sa_handler = old_act->_sa_handler;
9306 target_siginitset(&act.sa_mask, old_act->sa_mask);
9307 act.sa_flags = old_act->sa_flags;
9308 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9309 act.sa_restorer = old_act->sa_restorer;
9310 #endif
9311 unlock_user_struct(old_act, arg2, 0);
9312 pact = &act;
9313 } else {
9314 pact = NULL;
9316 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9317 if (!is_error(ret) && arg3) {
9318 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9319 return -TARGET_EFAULT;
9320 old_act->_sa_handler = oact._sa_handler;
9321 old_act->sa_mask = oact.sa_mask.sig[0];
9322 old_act->sa_flags = oact.sa_flags;
9323 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9324 old_act->sa_restorer = oact.sa_restorer;
9325 #endif
9326 unlock_user_struct(old_act, arg3, 1);
9328 #endif
9330 return ret;
9331 #endif
9332 case TARGET_NR_rt_sigaction:
9335 * For Alpha and SPARC this is a 5 argument syscall, with
9336 * a 'restorer' parameter which must be copied into the
9337 * sa_restorer field of the sigaction struct.
9338 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9339 * and arg5 is the sigsetsize.
9341 #if defined(TARGET_ALPHA)
9342 target_ulong sigsetsize = arg4;
9343 target_ulong restorer = arg5;
9344 #elif defined(TARGET_SPARC)
9345 target_ulong restorer = arg4;
9346 target_ulong sigsetsize = arg5;
9347 #else
9348 target_ulong sigsetsize = arg4;
9349 target_ulong restorer = 0;
9350 #endif
9351 struct target_sigaction *act = NULL;
9352 struct target_sigaction *oact = NULL;
9354 if (sigsetsize != sizeof(target_sigset_t)) {
9355 return -TARGET_EINVAL;
9357 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9358 return -TARGET_EFAULT;
9360 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9361 ret = -TARGET_EFAULT;
9362 } else {
9363 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9364 if (oact) {
9365 unlock_user_struct(oact, arg3, 1);
9368 if (act) {
9369 unlock_user_struct(act, arg2, 0);
9372 return ret;
9373 #ifdef TARGET_NR_sgetmask /* not on alpha */
9374 case TARGET_NR_sgetmask:
9376 sigset_t cur_set;
9377 abi_ulong target_set;
9378 ret = do_sigprocmask(0, NULL, &cur_set);
9379 if (!ret) {
9380 host_to_target_old_sigset(&target_set, &cur_set);
9381 ret = target_set;
9384 return ret;
9385 #endif
9386 #ifdef TARGET_NR_ssetmask /* not on alpha */
9387 case TARGET_NR_ssetmask:
9389 sigset_t set, oset;
9390 abi_ulong target_set = arg1;
9391 target_to_host_old_sigset(&set, &target_set);
9392 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9393 if (!ret) {
9394 host_to_target_old_sigset(&target_set, &oset);
9395 ret = target_set;
9398 return ret;
9399 #endif
9400 #ifdef TARGET_NR_sigprocmask
9401 case TARGET_NR_sigprocmask:
9403 #if defined(TARGET_ALPHA)
9404 sigset_t set, oldset;
9405 abi_ulong mask;
9406 int how;
9408 switch (arg1) {
9409 case TARGET_SIG_BLOCK:
9410 how = SIG_BLOCK;
9411 break;
9412 case TARGET_SIG_UNBLOCK:
9413 how = SIG_UNBLOCK;
9414 break;
9415 case TARGET_SIG_SETMASK:
9416 how = SIG_SETMASK;
9417 break;
9418 default:
9419 return -TARGET_EINVAL;
9421 mask = arg2;
9422 target_to_host_old_sigset(&set, &mask);
9424 ret = do_sigprocmask(how, &set, &oldset);
9425 if (!is_error(ret)) {
9426 host_to_target_old_sigset(&mask, &oldset);
9427 ret = mask;
9428 cpu_env->ir[IR_V0] = 0; /* force no error */
9430 #else
9431 sigset_t set, oldset, *set_ptr;
9432 int how;
9434 if (arg2) {
9435 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9436 if (!p) {
9437 return -TARGET_EFAULT;
9439 target_to_host_old_sigset(&set, p);
9440 unlock_user(p, arg2, 0);
9441 set_ptr = &set;
9442 switch (arg1) {
9443 case TARGET_SIG_BLOCK:
9444 how = SIG_BLOCK;
9445 break;
9446 case TARGET_SIG_UNBLOCK:
9447 how = SIG_UNBLOCK;
9448 break;
9449 case TARGET_SIG_SETMASK:
9450 how = SIG_SETMASK;
9451 break;
9452 default:
9453 return -TARGET_EINVAL;
9455 } else {
9456 how = 0;
9457 set_ptr = NULL;
9459 ret = do_sigprocmask(how, set_ptr, &oldset);
9460 if (!is_error(ret) && arg3) {
9461 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9462 return -TARGET_EFAULT;
9463 host_to_target_old_sigset(p, &oldset);
9464 unlock_user(p, arg3, sizeof(target_sigset_t));
9466 #endif
9468 return ret;
9469 #endif
9470 case TARGET_NR_rt_sigprocmask:
9472 int how = arg1;
9473 sigset_t set, oldset, *set_ptr;
9475 if (arg4 != sizeof(target_sigset_t)) {
9476 return -TARGET_EINVAL;
9479 if (arg2) {
9480 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9481 if (!p) {
9482 return -TARGET_EFAULT;
9484 target_to_host_sigset(&set, p);
9485 unlock_user(p, arg2, 0);
9486 set_ptr = &set;
9487 switch(how) {
9488 case TARGET_SIG_BLOCK:
9489 how = SIG_BLOCK;
9490 break;
9491 case TARGET_SIG_UNBLOCK:
9492 how = SIG_UNBLOCK;
9493 break;
9494 case TARGET_SIG_SETMASK:
9495 how = SIG_SETMASK;
9496 break;
9497 default:
9498 return -TARGET_EINVAL;
9500 } else {
9501 how = 0;
9502 set_ptr = NULL;
9504 ret = do_sigprocmask(how, set_ptr, &oldset);
9505 if (!is_error(ret) && arg3) {
9506 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9507 return -TARGET_EFAULT;
9508 host_to_target_sigset(p, &oldset);
9509 unlock_user(p, arg3, sizeof(target_sigset_t));
9512 return ret;
9513 #ifdef TARGET_NR_sigpending
9514 case TARGET_NR_sigpending:
9516 sigset_t set;
9517 ret = get_errno(sigpending(&set));
9518 if (!is_error(ret)) {
9519 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9520 return -TARGET_EFAULT;
9521 host_to_target_old_sigset(p, &set);
9522 unlock_user(p, arg1, sizeof(target_sigset_t));
9525 return ret;
9526 #endif
9527 case TARGET_NR_rt_sigpending:
9529 sigset_t set;
9531 /* Yes, this check is >, not != like most. We follow the kernel's
9532 * logic and it does it like this because it implements
9533 * NR_sigpending through the same code path, and in that case
9534 * the old_sigset_t is smaller in size.
9536 if (arg2 > sizeof(target_sigset_t)) {
9537 return -TARGET_EINVAL;
9540 ret = get_errno(sigpending(&set));
9541 if (!is_error(ret)) {
9542 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9543 return -TARGET_EFAULT;
9544 host_to_target_sigset(p, &set);
9545 unlock_user(p, arg1, sizeof(target_sigset_t));
9548 return ret;
9549 #ifdef TARGET_NR_sigsuspend
9550 case TARGET_NR_sigsuspend:
9552 sigset_t *set;
9554 #if defined(TARGET_ALPHA)
9555 TaskState *ts = cpu->opaque;
9556 /* target_to_host_old_sigset will bswap back */
9557 abi_ulong mask = tswapal(arg1);
9558 set = &ts->sigsuspend_mask;
9559 target_to_host_old_sigset(set, &mask);
9560 #else
9561 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9562 if (ret != 0) {
9563 return ret;
9565 #endif
9566 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9567 finish_sigsuspend_mask(ret);
9569 return ret;
9570 #endif
9571 case TARGET_NR_rt_sigsuspend:
9573 sigset_t *set;
9575 ret = process_sigsuspend_mask(&set, arg1, arg2);
9576 if (ret != 0) {
9577 return ret;
9579 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9580 finish_sigsuspend_mask(ret);
9582 return ret;
9583 #ifdef TARGET_NR_rt_sigtimedwait
9584 case TARGET_NR_rt_sigtimedwait:
9586 sigset_t set;
9587 struct timespec uts, *puts;
9588 siginfo_t uinfo;
9590 if (arg4 != sizeof(target_sigset_t)) {
9591 return -TARGET_EINVAL;
9594 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9595 return -TARGET_EFAULT;
9596 target_to_host_sigset(&set, p);
9597 unlock_user(p, arg1, 0);
9598 if (arg3) {
9599 puts = &uts;
9600 if (target_to_host_timespec(puts, arg3)) {
9601 return -TARGET_EFAULT;
9603 } else {
9604 puts = NULL;
9606 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9607 SIGSET_T_SIZE));
9608 if (!is_error(ret)) {
9609 if (arg2) {
9610 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9612 if (!p) {
9613 return -TARGET_EFAULT;
9615 host_to_target_siginfo(p, &uinfo);
9616 unlock_user(p, arg2, sizeof(target_siginfo_t));
9618 ret = host_to_target_signal(ret);
9621 return ret;
9622 #endif
9623 #ifdef TARGET_NR_rt_sigtimedwait_time64
9624 case TARGET_NR_rt_sigtimedwait_time64:
9626 sigset_t set;
9627 struct timespec uts, *puts;
9628 siginfo_t uinfo;
9630 if (arg4 != sizeof(target_sigset_t)) {
9631 return -TARGET_EINVAL;
9634 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9635 if (!p) {
9636 return -TARGET_EFAULT;
9638 target_to_host_sigset(&set, p);
9639 unlock_user(p, arg1, 0);
9640 if (arg3) {
9641 puts = &uts;
9642 if (target_to_host_timespec64(puts, arg3)) {
9643 return -TARGET_EFAULT;
9645 } else {
9646 puts = NULL;
9648 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9649 SIGSET_T_SIZE));
9650 if (!is_error(ret)) {
9651 if (arg2) {
9652 p = lock_user(VERIFY_WRITE, arg2,
9653 sizeof(target_siginfo_t), 0);
9654 if (!p) {
9655 return -TARGET_EFAULT;
9657 host_to_target_siginfo(p, &uinfo);
9658 unlock_user(p, arg2, sizeof(target_siginfo_t));
9660 ret = host_to_target_signal(ret);
9663 return ret;
9664 #endif
9665 case TARGET_NR_rt_sigqueueinfo:
9667 siginfo_t uinfo;
9669 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9670 if (!p) {
9671 return -TARGET_EFAULT;
9673 target_to_host_siginfo(&uinfo, p);
9674 unlock_user(p, arg3, 0);
9675 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9677 return ret;
9678 case TARGET_NR_rt_tgsigqueueinfo:
9680 siginfo_t uinfo;
9682 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9683 if (!p) {
9684 return -TARGET_EFAULT;
9686 target_to_host_siginfo(&uinfo, p);
9687 unlock_user(p, arg4, 0);
9688 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9690 return ret;
9691 #ifdef TARGET_NR_sigreturn
9692 case TARGET_NR_sigreturn:
9693 if (block_signals()) {
9694 return -QEMU_ERESTARTSYS;
9696 return do_sigreturn(cpu_env);
9697 #endif
9698 case TARGET_NR_rt_sigreturn:
9699 if (block_signals()) {
9700 return -QEMU_ERESTARTSYS;
9702 return do_rt_sigreturn(cpu_env);
9703 case TARGET_NR_sethostname:
9704 if (!(p = lock_user_string(arg1)))
9705 return -TARGET_EFAULT;
9706 ret = get_errno(sethostname(p, arg2));
9707 unlock_user(p, arg1, 0);
9708 return ret;
9709 #ifdef TARGET_NR_setrlimit
9710 case TARGET_NR_setrlimit:
9712 int resource = target_to_host_resource(arg1);
9713 struct target_rlimit *target_rlim;
9714 struct rlimit rlim;
9715 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9716 return -TARGET_EFAULT;
9717 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9718 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9719 unlock_user_struct(target_rlim, arg2, 0);
9721 * If we just passed through resource limit settings for memory then
9722 * they would also apply to QEMU's own allocations, and QEMU will
9723 * crash or hang or die if its allocations fail. Ideally we would
9724 * track the guest allocations in QEMU and apply the limits ourselves.
9725 * For now, just tell the guest the call succeeded but don't actually
9726 * limit anything.
9728 if (resource != RLIMIT_AS &&
9729 resource != RLIMIT_DATA &&
9730 resource != RLIMIT_STACK) {
9731 return get_errno(setrlimit(resource, &rlim));
9732 } else {
9733 return 0;
9736 #endif
9737 #ifdef TARGET_NR_getrlimit
9738 case TARGET_NR_getrlimit:
9740 int resource = target_to_host_resource(arg1);
9741 struct target_rlimit *target_rlim;
9742 struct rlimit rlim;
9744 ret = get_errno(getrlimit(resource, &rlim));
9745 if (!is_error(ret)) {
9746 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9747 return -TARGET_EFAULT;
9748 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9749 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9750 unlock_user_struct(target_rlim, arg2, 1);
9753 return ret;
9754 #endif
9755 case TARGET_NR_getrusage:
9757 struct rusage rusage;
9758 ret = get_errno(getrusage(arg1, &rusage));
9759 if (!is_error(ret)) {
9760 ret = host_to_target_rusage(arg2, &rusage);
9763 return ret;
9764 #if defined(TARGET_NR_gettimeofday)
9765 case TARGET_NR_gettimeofday:
9767 struct timeval tv;
9768 struct timezone tz;
9770 ret = get_errno(gettimeofday(&tv, &tz));
9771 if (!is_error(ret)) {
9772 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9773 return -TARGET_EFAULT;
9775 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9776 return -TARGET_EFAULT;
9780 return ret;
9781 #endif
9782 #if defined(TARGET_NR_settimeofday)
9783 case TARGET_NR_settimeofday:
9785 struct timeval tv, *ptv = NULL;
9786 struct timezone tz, *ptz = NULL;
9788 if (arg1) {
9789 if (copy_from_user_timeval(&tv, arg1)) {
9790 return -TARGET_EFAULT;
9792 ptv = &tv;
9795 if (arg2) {
9796 if (copy_from_user_timezone(&tz, arg2)) {
9797 return -TARGET_EFAULT;
9799 ptz = &tz;
9802 return get_errno(settimeofday(ptv, ptz));
9804 #endif
9805 #if defined(TARGET_NR_select)
9806 case TARGET_NR_select:
9807 #if defined(TARGET_WANT_NI_OLD_SELECT)
9808 /* some architectures used to have old_select here
9809 * but now ENOSYS it.
9811 ret = -TARGET_ENOSYS;
9812 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9813 ret = do_old_select(arg1);
9814 #else
9815 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9816 #endif
9817 return ret;
9818 #endif
9819 #ifdef TARGET_NR_pselect6
9820 case TARGET_NR_pselect6:
9821 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9822 #endif
9823 #ifdef TARGET_NR_pselect6_time64
9824 case TARGET_NR_pselect6_time64:
9825 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9826 #endif
9827 #ifdef TARGET_NR_symlink
9828 case TARGET_NR_symlink:
9830 void *p2;
9831 p = lock_user_string(arg1);
9832 p2 = lock_user_string(arg2);
9833 if (!p || !p2)
9834 ret = -TARGET_EFAULT;
9835 else
9836 ret = get_errno(symlink(p, p2));
9837 unlock_user(p2, arg2, 0);
9838 unlock_user(p, arg1, 0);
9840 return ret;
9841 #endif
9842 #if defined(TARGET_NR_symlinkat)
9843 case TARGET_NR_symlinkat:
9845 void *p2;
9846 p = lock_user_string(arg1);
9847 p2 = lock_user_string(arg3);
9848 if (!p || !p2)
9849 ret = -TARGET_EFAULT;
9850 else
9851 ret = get_errno(symlinkat(p, arg2, p2));
9852 unlock_user(p2, arg3, 0);
9853 unlock_user(p, arg1, 0);
9855 return ret;
9856 #endif
9857 #ifdef TARGET_NR_readlink
9858 case TARGET_NR_readlink:
9860 void *p2;
9861 p = lock_user_string(arg1);
9862 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9863 if (!p || !p2) {
9864 ret = -TARGET_EFAULT;
9865 } else if (!arg3) {
9866 /* Short circuit this for the magic exe check. */
9867 ret = -TARGET_EINVAL;
9868 } else if (is_proc_myself((const char *)p, "exe")) {
9869 char real[PATH_MAX], *temp;
9870 temp = realpath(exec_path, real);
9871 /* Return value is # of bytes that we wrote to the buffer. */
9872 if (temp == NULL) {
9873 ret = get_errno(-1);
9874 } else {
9875 /* Don't worry about sign mismatch as earlier mapping
9876 * logic would have thrown a bad address error. */
9877 ret = MIN(strlen(real), arg3);
9878 /* We cannot NUL terminate the string. */
9879 memcpy(p2, real, ret);
9881 } else {
9882 ret = get_errno(readlink(path(p), p2, arg3));
9884 unlock_user(p2, arg2, ret);
9885 unlock_user(p, arg1, 0);
9887 return ret;
9888 #endif
9889 #if defined(TARGET_NR_readlinkat)
9890 case TARGET_NR_readlinkat:
9892 void *p2;
9893 p = lock_user_string(arg2);
9894 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9895 if (!p || !p2) {
9896 ret = -TARGET_EFAULT;
9897 } else if (is_proc_myself((const char *)p, "exe")) {
9898 char real[PATH_MAX], *temp;
9899 temp = realpath(exec_path, real);
9900 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9901 snprintf((char *)p2, arg4, "%s", real);
9902 } else {
9903 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9905 unlock_user(p2, arg3, ret);
9906 unlock_user(p, arg2, 0);
9908 return ret;
9909 #endif
9910 #ifdef TARGET_NR_swapon
9911 case TARGET_NR_swapon:
9912 if (!(p = lock_user_string(arg1)))
9913 return -TARGET_EFAULT;
9914 ret = get_errno(swapon(p, arg2));
9915 unlock_user(p, arg1, 0);
9916 return ret;
9917 #endif
9918 case TARGET_NR_reboot:
9919 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9920 /* arg4 must be ignored in all other cases */
9921 p = lock_user_string(arg4);
9922 if (!p) {
9923 return -TARGET_EFAULT;
9925 ret = get_errno(reboot(arg1, arg2, arg3, p));
9926 unlock_user(p, arg4, 0);
9927 } else {
9928 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9930 return ret;
9931 #ifdef TARGET_NR_mmap
9932 case TARGET_NR_mmap:
9933 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9934 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9935 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9936 || defined(TARGET_S390X)
9938 abi_ulong *v;
9939 abi_ulong v1, v2, v3, v4, v5, v6;
9940 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9941 return -TARGET_EFAULT;
9942 v1 = tswapal(v[0]);
9943 v2 = tswapal(v[1]);
9944 v3 = tswapal(v[2]);
9945 v4 = tswapal(v[3]);
9946 v5 = tswapal(v[4]);
9947 v6 = tswapal(v[5]);
9948 unlock_user(v, arg1, 0);
9949 ret = get_errno(target_mmap(v1, v2, v3,
9950 target_to_host_bitmask(v4, mmap_flags_tbl),
9951 v5, v6));
9953 #else
9954 /* mmap pointers are always untagged */
9955 ret = get_errno(target_mmap(arg1, arg2, arg3,
9956 target_to_host_bitmask(arg4, mmap_flags_tbl),
9957 arg5,
9958 arg6));
9959 #endif
9960 return ret;
9961 #endif
9962 #ifdef TARGET_NR_mmap2
9963 case TARGET_NR_mmap2:
9964 #ifndef MMAP_SHIFT
9965 #define MMAP_SHIFT 12
9966 #endif
9967 ret = target_mmap(arg1, arg2, arg3,
9968 target_to_host_bitmask(arg4, mmap_flags_tbl),
9969 arg5, arg6 << MMAP_SHIFT);
9970 return get_errno(ret);
9971 #endif
9972 case TARGET_NR_munmap:
9973 arg1 = cpu_untagged_addr(cpu, arg1);
9974 return get_errno(target_munmap(arg1, arg2));
9975 case TARGET_NR_mprotect:
9976 arg1 = cpu_untagged_addr(cpu, arg1);
9978 TaskState *ts = cpu->opaque;
9979 /* Special hack to detect libc making the stack executable. */
9980 if ((arg3 & PROT_GROWSDOWN)
9981 && arg1 >= ts->info->stack_limit
9982 && arg1 <= ts->info->start_stack) {
9983 arg3 &= ~PROT_GROWSDOWN;
9984 arg2 = arg2 + arg1 - ts->info->stack_limit;
9985 arg1 = ts->info->stack_limit;
9988 return get_errno(target_mprotect(arg1, arg2, arg3));
9989 #ifdef TARGET_NR_mremap
9990 case TARGET_NR_mremap:
9991 arg1 = cpu_untagged_addr(cpu, arg1);
9992 /* mremap new_addr (arg5) is always untagged */
9993 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9994 #endif
9995 /* ??? msync/mlock/munlock are broken for softmmu. */
9996 #ifdef TARGET_NR_msync
9997 case TARGET_NR_msync:
9998 return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9999 #endif
10000 #ifdef TARGET_NR_mlock
10001 case TARGET_NR_mlock:
10002 return get_errno(mlock(g2h(cpu, arg1), arg2));
10003 #endif
10004 #ifdef TARGET_NR_munlock
10005 case TARGET_NR_munlock:
10006 return get_errno(munlock(g2h(cpu, arg1), arg2));
10007 #endif
10008 #ifdef TARGET_NR_mlockall
10009 case TARGET_NR_mlockall:
10010 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10011 #endif
10012 #ifdef TARGET_NR_munlockall
10013 case TARGET_NR_munlockall:
10014 return get_errno(munlockall());
10015 #endif
10016 #ifdef TARGET_NR_truncate
10017 case TARGET_NR_truncate:
10018 if (!(p = lock_user_string(arg1)))
10019 return -TARGET_EFAULT;
10020 ret = get_errno(truncate(p, arg2));
10021 unlock_user(p, arg1, 0);
10022 return ret;
10023 #endif
10024 #ifdef TARGET_NR_ftruncate
10025 case TARGET_NR_ftruncate:
10026 return get_errno(ftruncate(arg1, arg2));
10027 #endif
10028 case TARGET_NR_fchmod:
10029 return get_errno(fchmod(arg1, arg2));
10030 #if defined(TARGET_NR_fchmodat)
10031 case TARGET_NR_fchmodat:
10032 if (!(p = lock_user_string(arg2)))
10033 return -TARGET_EFAULT;
10034 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10035 unlock_user(p, arg2, 0);
10036 return ret;
10037 #endif
10038 case TARGET_NR_getpriority:
10039 /* Note that negative values are valid for getpriority, so we must
10040 differentiate based on errno settings. */
10041 errno = 0;
10042 ret = getpriority(arg1, arg2);
10043 if (ret == -1 && errno != 0) {
10044 return -host_to_target_errno(errno);
10046 #ifdef TARGET_ALPHA
10047 /* Return value is the unbiased priority. Signal no error. */
10048 cpu_env->ir[IR_V0] = 0;
10049 #else
10050 /* Return value is a biased priority to avoid negative numbers. */
10051 ret = 20 - ret;
10052 #endif
10053 return ret;
10054 case TARGET_NR_setpriority:
10055 return get_errno(setpriority(arg1, arg2, arg3));
10056 #ifdef TARGET_NR_statfs
10057 case TARGET_NR_statfs:
10058 if (!(p = lock_user_string(arg1))) {
10059 return -TARGET_EFAULT;
10061 ret = get_errno(statfs(path(p), &stfs));
10062 unlock_user(p, arg1, 0);
10063 convert_statfs:
10064 if (!is_error(ret)) {
10065 struct target_statfs *target_stfs;
10067 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10068 return -TARGET_EFAULT;
10069 __put_user(stfs.f_type, &target_stfs->f_type);
10070 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10071 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10072 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10073 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10074 __put_user(stfs.f_files, &target_stfs->f_files);
10075 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10076 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10077 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10078 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10079 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10080 #ifdef _STATFS_F_FLAGS
10081 __put_user(stfs.f_flags, &target_stfs->f_flags);
10082 #else
10083 __put_user(0, &target_stfs->f_flags);
10084 #endif
10085 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10086 unlock_user_struct(target_stfs, arg2, 1);
10088 return ret;
10089 #endif
10090 #ifdef TARGET_NR_fstatfs
10091 case TARGET_NR_fstatfs:
10092 ret = get_errno(fstatfs(arg1, &stfs));
10093 goto convert_statfs;
10094 #endif
10095 #ifdef TARGET_NR_statfs64
10096 case TARGET_NR_statfs64:
10097 if (!(p = lock_user_string(arg1))) {
10098 return -TARGET_EFAULT;
10100 ret = get_errno(statfs(path(p), &stfs));
10101 unlock_user(p, arg1, 0);
10102 convert_statfs64:
10103 if (!is_error(ret)) {
10104 struct target_statfs64 *target_stfs;
10106 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10107 return -TARGET_EFAULT;
10108 __put_user(stfs.f_type, &target_stfs->f_type);
10109 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10110 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10111 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10112 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10113 __put_user(stfs.f_files, &target_stfs->f_files);
10114 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10115 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10116 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10117 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10118 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10119 #ifdef _STATFS_F_FLAGS
10120 __put_user(stfs.f_flags, &target_stfs->f_flags);
10121 #else
10122 __put_user(0, &target_stfs->f_flags);
10123 #endif
10124 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10125 unlock_user_struct(target_stfs, arg3, 1);
10127 return ret;
10128 case TARGET_NR_fstatfs64:
10129 ret = get_errno(fstatfs(arg1, &stfs));
10130 goto convert_statfs64;
10131 #endif
10132 #ifdef TARGET_NR_socketcall
10133 case TARGET_NR_socketcall:
10134 return do_socketcall(arg1, arg2);
10135 #endif
10136 #ifdef TARGET_NR_accept
10137 case TARGET_NR_accept:
10138 return do_accept4(arg1, arg2, arg3, 0);
10139 #endif
10140 #ifdef TARGET_NR_accept4
10141 case TARGET_NR_accept4:
10142 return do_accept4(arg1, arg2, arg3, arg4);
10143 #endif
10144 #ifdef TARGET_NR_bind
10145 case TARGET_NR_bind:
10146 return do_bind(arg1, arg2, arg3);
10147 #endif
10148 #ifdef TARGET_NR_connect
10149 case TARGET_NR_connect:
10150 return do_connect(arg1, arg2, arg3);
10151 #endif
10152 #ifdef TARGET_NR_getpeername
10153 case TARGET_NR_getpeername:
10154 return do_getpeername(arg1, arg2, arg3);
10155 #endif
10156 #ifdef TARGET_NR_getsockname
10157 case TARGET_NR_getsockname:
10158 return do_getsockname(arg1, arg2, arg3);
10159 #endif
10160 #ifdef TARGET_NR_getsockopt
10161 case TARGET_NR_getsockopt:
10162 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10163 #endif
10164 #ifdef TARGET_NR_listen
10165 case TARGET_NR_listen:
10166 return get_errno(listen(arg1, arg2));
10167 #endif
10168 #ifdef TARGET_NR_recv
10169 case TARGET_NR_recv:
10170 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10171 #endif
10172 #ifdef TARGET_NR_recvfrom
10173 case TARGET_NR_recvfrom:
10174 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10175 #endif
10176 #ifdef TARGET_NR_recvmsg
10177 case TARGET_NR_recvmsg:
10178 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10179 #endif
10180 #ifdef TARGET_NR_send
10181 case TARGET_NR_send:
10182 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10183 #endif
10184 #ifdef TARGET_NR_sendmsg
10185 case TARGET_NR_sendmsg:
10186 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10187 #endif
10188 #ifdef TARGET_NR_sendmmsg
10189 case TARGET_NR_sendmmsg:
10190 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10191 #endif
10192 #ifdef TARGET_NR_recvmmsg
10193 case TARGET_NR_recvmmsg:
10194 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10195 #endif
10196 #ifdef TARGET_NR_sendto
10197 case TARGET_NR_sendto:
10198 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10199 #endif
10200 #ifdef TARGET_NR_shutdown
10201 case TARGET_NR_shutdown:
10202 return get_errno(shutdown(arg1, arg2));
10203 #endif
10204 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10205 case TARGET_NR_getrandom:
10206 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10207 if (!p) {
10208 return -TARGET_EFAULT;
10210 ret = get_errno(getrandom(p, arg2, arg3));
10211 unlock_user(p, arg1, ret);
10212 return ret;
10213 #endif
10214 #ifdef TARGET_NR_socket
10215 case TARGET_NR_socket:
10216 return do_socket(arg1, arg2, arg3);
10217 #endif
10218 #ifdef TARGET_NR_socketpair
10219 case TARGET_NR_socketpair:
10220 return do_socketpair(arg1, arg2, arg3, arg4);
10221 #endif
10222 #ifdef TARGET_NR_setsockopt
10223 case TARGET_NR_setsockopt:
10224 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10225 #endif
10226 #if defined(TARGET_NR_syslog)
10227 case TARGET_NR_syslog:
10229 int len = arg2;
10231 switch (arg1) {
10232 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10233 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10234 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10235 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10236 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10237 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10238 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10239 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10240 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10241 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10242 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10243 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10245 if (len < 0) {
10246 return -TARGET_EINVAL;
10248 if (len == 0) {
10249 return 0;
10251 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10252 if (!p) {
10253 return -TARGET_EFAULT;
10255 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10256 unlock_user(p, arg2, arg3);
10258 return ret;
10259 default:
10260 return -TARGET_EINVAL;
10263 break;
10264 #endif
10265 case TARGET_NR_setitimer:
10267 struct itimerval value, ovalue, *pvalue;
10269 if (arg2) {
10270 pvalue = &value;
10271 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10272 || copy_from_user_timeval(&pvalue->it_value,
10273 arg2 + sizeof(struct target_timeval)))
10274 return -TARGET_EFAULT;
10275 } else {
10276 pvalue = NULL;
10278 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10279 if (!is_error(ret) && arg3) {
10280 if (copy_to_user_timeval(arg3,
10281 &ovalue.it_interval)
10282 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10283 &ovalue.it_value))
10284 return -TARGET_EFAULT;
10287 return ret;
10288 case TARGET_NR_getitimer:
10290 struct itimerval value;
10292 ret = get_errno(getitimer(arg1, &value));
10293 if (!is_error(ret) && arg2) {
10294 if (copy_to_user_timeval(arg2,
10295 &value.it_interval)
10296 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10297 &value.it_value))
10298 return -TARGET_EFAULT;
10301 return ret;
10302 #ifdef TARGET_NR_stat
10303 case TARGET_NR_stat:
10304 if (!(p = lock_user_string(arg1))) {
10305 return -TARGET_EFAULT;
10307 ret = get_errno(stat(path(p), &st));
10308 unlock_user(p, arg1, 0);
10309 goto do_stat;
10310 #endif
10311 #ifdef TARGET_NR_lstat
10312 case TARGET_NR_lstat:
10313 if (!(p = lock_user_string(arg1))) {
10314 return -TARGET_EFAULT;
10316 ret = get_errno(lstat(path(p), &st));
10317 unlock_user(p, arg1, 0);
10318 goto do_stat;
10319 #endif
10320 #ifdef TARGET_NR_fstat
10321 case TARGET_NR_fstat:
10323 ret = get_errno(fstat(arg1, &st));
10324 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10325 do_stat:
10326 #endif
10327 if (!is_error(ret)) {
10328 struct target_stat *target_st;
10330 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10331 return -TARGET_EFAULT;
10332 memset(target_st, 0, sizeof(*target_st));
10333 __put_user(st.st_dev, &target_st->st_dev);
10334 __put_user(st.st_ino, &target_st->st_ino);
10335 __put_user(st.st_mode, &target_st->st_mode);
10336 __put_user(st.st_uid, &target_st->st_uid);
10337 __put_user(st.st_gid, &target_st->st_gid);
10338 __put_user(st.st_nlink, &target_st->st_nlink);
10339 __put_user(st.st_rdev, &target_st->st_rdev);
10340 __put_user(st.st_size, &target_st->st_size);
10341 __put_user(st.st_blksize, &target_st->st_blksize);
10342 __put_user(st.st_blocks, &target_st->st_blocks);
10343 __put_user(st.st_atime, &target_st->target_st_atime);
10344 __put_user(st.st_mtime, &target_st->target_st_mtime);
10345 __put_user(st.st_ctime, &target_st->target_st_ctime);
10346 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10347 __put_user(st.st_atim.tv_nsec,
10348 &target_st->target_st_atime_nsec);
10349 __put_user(st.st_mtim.tv_nsec,
10350 &target_st->target_st_mtime_nsec);
10351 __put_user(st.st_ctim.tv_nsec,
10352 &target_st->target_st_ctime_nsec);
10353 #endif
10354 unlock_user_struct(target_st, arg2, 1);
10357 return ret;
10358 #endif
10359 case TARGET_NR_vhangup:
10360 return get_errno(vhangup());
10361 #ifdef TARGET_NR_syscall
10362 case TARGET_NR_syscall:
10363 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10364 arg6, arg7, arg8, 0);
10365 #endif
10366 #if defined(TARGET_NR_wait4)
10367 case TARGET_NR_wait4:
10369 int status;
10370 abi_long status_ptr = arg2;
10371 struct rusage rusage, *rusage_ptr;
10372 abi_ulong target_rusage = arg4;
10373 abi_long rusage_err;
10374 if (target_rusage)
10375 rusage_ptr = &rusage;
10376 else
10377 rusage_ptr = NULL;
10378 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10379 if (!is_error(ret)) {
10380 if (status_ptr && ret) {
10381 status = host_to_target_waitstatus(status);
10382 if (put_user_s32(status, status_ptr))
10383 return -TARGET_EFAULT;
10385 if (target_rusage) {
10386 rusage_err = host_to_target_rusage(target_rusage, &rusage);
10387 if (rusage_err) {
10388 ret = rusage_err;
10393 return ret;
10394 #endif
10395 #ifdef TARGET_NR_swapoff
10396 case TARGET_NR_swapoff:
10397 if (!(p = lock_user_string(arg1)))
10398 return -TARGET_EFAULT;
10399 ret = get_errno(swapoff(p));
10400 unlock_user(p, arg1, 0);
10401 return ret;
10402 #endif
10403 case TARGET_NR_sysinfo:
10405 struct target_sysinfo *target_value;
10406 struct sysinfo value;
10407 ret = get_errno(sysinfo(&value));
10408 if (!is_error(ret) && arg1)
10410 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10411 return -TARGET_EFAULT;
10412 __put_user(value.uptime, &target_value->uptime);
10413 __put_user(value.loads[0], &target_value->loads[0]);
10414 __put_user(value.loads[1], &target_value->loads[1]);
10415 __put_user(value.loads[2], &target_value->loads[2]);
10416 __put_user(value.totalram, &target_value->totalram);
10417 __put_user(value.freeram, &target_value->freeram);
10418 __put_user(value.sharedram, &target_value->sharedram);
10419 __put_user(value.bufferram, &target_value->bufferram);
10420 __put_user(value.totalswap, &target_value->totalswap);
10421 __put_user(value.freeswap, &target_value->freeswap);
10422 __put_user(value.procs, &target_value->procs);
10423 __put_user(value.totalhigh, &target_value->totalhigh);
10424 __put_user(value.freehigh, &target_value->freehigh);
10425 __put_user(value.mem_unit, &target_value->mem_unit);
10426 unlock_user_struct(target_value, arg1, 1);
10429 return ret;
10430 #ifdef TARGET_NR_ipc
10431 case TARGET_NR_ipc:
10432 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10433 #endif
10434 #ifdef TARGET_NR_semget
10435 case TARGET_NR_semget:
10436 return get_errno(semget(arg1, arg2, arg3));
10437 #endif
10438 #ifdef TARGET_NR_semop
10439 case TARGET_NR_semop:
10440 return do_semtimedop(arg1, arg2, arg3, 0, false);
10441 #endif
10442 #ifdef TARGET_NR_semtimedop
10443 case TARGET_NR_semtimedop:
10444 return do_semtimedop(arg1, arg2, arg3, arg4, false);
10445 #endif
10446 #ifdef TARGET_NR_semtimedop_time64
10447 case TARGET_NR_semtimedop_time64:
10448 return do_semtimedop(arg1, arg2, arg3, arg4, true);
10449 #endif
10450 #ifdef TARGET_NR_semctl
10451 case TARGET_NR_semctl:
10452 return do_semctl(arg1, arg2, arg3, arg4);
10453 #endif
10454 #ifdef TARGET_NR_msgctl
10455 case TARGET_NR_msgctl:
10456 return do_msgctl(arg1, arg2, arg3);
10457 #endif
10458 #ifdef TARGET_NR_msgget
10459 case TARGET_NR_msgget:
10460 return get_errno(msgget(arg1, arg2));
10461 #endif
10462 #ifdef TARGET_NR_msgrcv
10463 case TARGET_NR_msgrcv:
10464 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10465 #endif
10466 #ifdef TARGET_NR_msgsnd
10467 case TARGET_NR_msgsnd:
10468 return do_msgsnd(arg1, arg2, arg3, arg4);
10469 #endif
10470 #ifdef TARGET_NR_shmget
10471 case TARGET_NR_shmget:
10472 return get_errno(shmget(arg1, arg2, arg3));
10473 #endif
10474 #ifdef TARGET_NR_shmctl
10475 case TARGET_NR_shmctl:
10476 return do_shmctl(arg1, arg2, arg3);
10477 #endif
10478 #ifdef TARGET_NR_shmat
10479 case TARGET_NR_shmat:
10480 return do_shmat(cpu_env, arg1, arg2, arg3);
10481 #endif
10482 #ifdef TARGET_NR_shmdt
10483 case TARGET_NR_shmdt:
10484 return do_shmdt(arg1);
10485 #endif
10486 case TARGET_NR_fsync:
10487 return get_errno(fsync(arg1));
10488 case TARGET_NR_clone:
10489 /* Linux manages to have three different orderings for its
10490 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10491 * match the kernel's CONFIG_CLONE_* settings.
10492 * Microblaze is further special in that it uses a sixth
10493 * implicit argument to clone for the TLS pointer.
10495 #if defined(TARGET_MICROBLAZE)
10496 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10497 #elif defined(TARGET_CLONE_BACKWARDS)
10498 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10499 #elif defined(TARGET_CLONE_BACKWARDS2)
10500 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10501 #else
10502 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10503 #endif
10504 return ret;
10505 #ifdef __NR_exit_group
10506 /* new thread calls */
10507 case TARGET_NR_exit_group:
10508 preexit_cleanup(cpu_env, arg1);
10509 return get_errno(exit_group(arg1));
10510 #endif
10511 case TARGET_NR_setdomainname:
10512 if (!(p = lock_user_string(arg1)))
10513 return -TARGET_EFAULT;
10514 ret = get_errno(setdomainname(p, arg2));
10515 unlock_user(p, arg1, 0);
10516 return ret;
10517 case TARGET_NR_uname:
10518 /* no need to transcode because we use the linux syscall */
10520 struct new_utsname * buf;
10522 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10523 return -TARGET_EFAULT;
10524 ret = get_errno(sys_uname(buf));
10525 if (!is_error(ret)) {
10526 /* Overwrite the native machine name with whatever is being
10527 emulated. */
10528 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10529 sizeof(buf->machine));
10530 /* Allow the user to override the reported release. */
10531 if (qemu_uname_release && *qemu_uname_release) {
10532 g_strlcpy(buf->release, qemu_uname_release,
10533 sizeof(buf->release));
10536 unlock_user_struct(buf, arg1, 1);
10538 return ret;
10539 #ifdef TARGET_I386
10540 case TARGET_NR_modify_ldt:
10541 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10542 #if !defined(TARGET_X86_64)
10543 case TARGET_NR_vm86:
10544 return do_vm86(cpu_env, arg1, arg2);
10545 #endif
10546 #endif
10547 #if defined(TARGET_NR_adjtimex)
10548 case TARGET_NR_adjtimex:
10550 struct timex host_buf;
10552 if (target_to_host_timex(&host_buf, arg1) != 0) {
10553 return -TARGET_EFAULT;
10555 ret = get_errno(adjtimex(&host_buf));
10556 if (!is_error(ret)) {
10557 if (host_to_target_timex(arg1, &host_buf) != 0) {
10558 return -TARGET_EFAULT;
10562 return ret;
10563 #endif
10564 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10565 case TARGET_NR_clock_adjtime:
10567 struct timex htx, *phtx = &htx;
10569 if (target_to_host_timex(phtx, arg2) != 0) {
10570 return -TARGET_EFAULT;
10572 ret = get_errno(clock_adjtime(arg1, phtx));
10573 if (!is_error(ret) && phtx) {
10574 if (host_to_target_timex(arg2, phtx) != 0) {
10575 return -TARGET_EFAULT;
10579 return ret;
10580 #endif
10581 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10582 case TARGET_NR_clock_adjtime64:
10584 struct timex htx;
10586 if (target_to_host_timex64(&htx, arg2) != 0) {
10587 return -TARGET_EFAULT;
10589 ret = get_errno(clock_adjtime(arg1, &htx));
10590 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10591 return -TARGET_EFAULT;
10594 return ret;
10595 #endif
10596 case TARGET_NR_getpgid:
10597 return get_errno(getpgid(arg1));
10598 case TARGET_NR_fchdir:
10599 return get_errno(fchdir(arg1));
10600 case TARGET_NR_personality:
10601 return get_errno(personality(arg1));
10602 #ifdef TARGET_NR__llseek /* Not on alpha */
10603 case TARGET_NR__llseek:
10605 int64_t res;
10606 #if !defined(__NR_llseek)
10607 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10608 if (res == -1) {
10609 ret = get_errno(res);
10610 } else {
10611 ret = 0;
10613 #else
10614 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10615 #endif
10616 if ((ret == 0) && put_user_s64(res, arg4)) {
10617 return -TARGET_EFAULT;
10620 return ret;
10621 #endif
10622 #ifdef TARGET_NR_getdents
10623 case TARGET_NR_getdents:
10624 return do_getdents(arg1, arg2, arg3);
10625 #endif /* TARGET_NR_getdents */
10626 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10627 case TARGET_NR_getdents64:
10628 return do_getdents64(arg1, arg2, arg3);
10629 #endif /* TARGET_NR_getdents64 */
10630 #if defined(TARGET_NR__newselect)
10631 case TARGET_NR__newselect:
10632 return do_select(arg1, arg2, arg3, arg4, arg5);
10633 #endif
10634 #ifdef TARGET_NR_poll
10635 case TARGET_NR_poll:
10636 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10637 #endif
10638 #ifdef TARGET_NR_ppoll
10639 case TARGET_NR_ppoll:
10640 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10641 #endif
10642 #ifdef TARGET_NR_ppoll_time64
10643 case TARGET_NR_ppoll_time64:
10644 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10645 #endif
10646 case TARGET_NR_flock:
10647 /* NOTE: the flock constant seems to be the same for every
10648 Linux platform */
10649 return get_errno(safe_flock(arg1, arg2));
10650 case TARGET_NR_readv:
10652 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10653 if (vec != NULL) {
10654 ret = get_errno(safe_readv(arg1, vec, arg3));
10655 unlock_iovec(vec, arg2, arg3, 1);
10656 } else {
10657 ret = -host_to_target_errno(errno);
10660 return ret;
10661 case TARGET_NR_writev:
10663 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10664 if (vec != NULL) {
10665 ret = get_errno(safe_writev(arg1, vec, arg3));
10666 unlock_iovec(vec, arg2, arg3, 0);
10667 } else {
10668 ret = -host_to_target_errno(errno);
10671 return ret;
10672 #if defined(TARGET_NR_preadv)
10673 case TARGET_NR_preadv:
10675 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10676 if (vec != NULL) {
10677 unsigned long low, high;
10679 target_to_host_low_high(arg4, arg5, &low, &high);
10680 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10681 unlock_iovec(vec, arg2, arg3, 1);
10682 } else {
10683 ret = -host_to_target_errno(errno);
10686 return ret;
10687 #endif
10688 #if defined(TARGET_NR_pwritev)
10689 case TARGET_NR_pwritev:
10691 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10692 if (vec != NULL) {
10693 unsigned long low, high;
10695 target_to_host_low_high(arg4, arg5, &low, &high);
10696 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10697 unlock_iovec(vec, arg2, arg3, 0);
10698 } else {
10699 ret = -host_to_target_errno(errno);
10702 return ret;
10703 #endif
10704 case TARGET_NR_getsid:
10705 return get_errno(getsid(arg1));
10706 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10707 case TARGET_NR_fdatasync:
10708 return get_errno(fdatasync(arg1));
10709 #endif
10710 case TARGET_NR_sched_getaffinity:
10712 unsigned int mask_size;
10713 unsigned long *mask;
10716 * sched_getaffinity needs multiples of ulong, so need to take
10717 * care of mismatches between target ulong and host ulong sizes.
10719 if (arg2 & (sizeof(abi_ulong) - 1)) {
10720 return -TARGET_EINVAL;
10722 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10724 mask = alloca(mask_size);
10725 memset(mask, 0, mask_size);
10726 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10728 if (!is_error(ret)) {
10729 if (ret > arg2) {
10730 /* More data returned than the caller's buffer will fit.
10731 * This only happens if sizeof(abi_long) < sizeof(long)
10732 * and the caller passed us a buffer holding an odd number
10733 * of abi_longs. If the host kernel is actually using the
10734 * extra 4 bytes then fail EINVAL; otherwise we can just
10735 * ignore them and only copy the interesting part.
10737 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10738 if (numcpus > arg2 * 8) {
10739 return -TARGET_EINVAL;
10741 ret = arg2;
10744 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10745 return -TARGET_EFAULT;
10749 return ret;
10750 case TARGET_NR_sched_setaffinity:
10752 unsigned int mask_size;
10753 unsigned long *mask;
10756 * sched_setaffinity needs multiples of ulong, so need to take
10757 * care of mismatches between target ulong and host ulong sizes.
10759 if (arg2 & (sizeof(abi_ulong) - 1)) {
10760 return -TARGET_EINVAL;
10762 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10763 mask = alloca(mask_size);
10765 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10766 if (ret) {
10767 return ret;
10770 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10772 case TARGET_NR_getcpu:
10774 unsigned cpu, node;
10775 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10776 arg2 ? &node : NULL,
10777 NULL));
10778 if (is_error(ret)) {
10779 return ret;
10781 if (arg1 && put_user_u32(cpu, arg1)) {
10782 return -TARGET_EFAULT;
10784 if (arg2 && put_user_u32(node, arg2)) {
10785 return -TARGET_EFAULT;
10788 return ret;
10789 case TARGET_NR_sched_setparam:
10791 struct target_sched_param *target_schp;
10792 struct sched_param schp;
10794 if (arg2 == 0) {
10795 return -TARGET_EINVAL;
10797 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10798 return -TARGET_EFAULT;
10800 schp.sched_priority = tswap32(target_schp->sched_priority);
10801 unlock_user_struct(target_schp, arg2, 0);
10802 return get_errno(sys_sched_setparam(arg1, &schp));
10804 case TARGET_NR_sched_getparam:
10806 struct target_sched_param *target_schp;
10807 struct sched_param schp;
10809 if (arg2 == 0) {
10810 return -TARGET_EINVAL;
10812 ret = get_errno(sys_sched_getparam(arg1, &schp));
10813 if (!is_error(ret)) {
10814 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10815 return -TARGET_EFAULT;
10817 target_schp->sched_priority = tswap32(schp.sched_priority);
10818 unlock_user_struct(target_schp, arg2, 1);
10821 return ret;
10822 case TARGET_NR_sched_setscheduler:
10824 struct target_sched_param *target_schp;
10825 struct sched_param schp;
10826 if (arg3 == 0) {
10827 return -TARGET_EINVAL;
10829 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10830 return -TARGET_EFAULT;
10832 schp.sched_priority = tswap32(target_schp->sched_priority);
10833 unlock_user_struct(target_schp, arg3, 0);
10834 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10836 case TARGET_NR_sched_getscheduler:
10837 return get_errno(sys_sched_getscheduler(arg1));
10838 case TARGET_NR_sched_getattr:
10840 struct target_sched_attr *target_scha;
10841 struct sched_attr scha;
10842 if (arg2 == 0) {
10843 return -TARGET_EINVAL;
10845 if (arg3 > sizeof(scha)) {
10846 arg3 = sizeof(scha);
10848 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10849 if (!is_error(ret)) {
10850 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10851 if (!target_scha) {
10852 return -TARGET_EFAULT;
10854 target_scha->size = tswap32(scha.size);
10855 target_scha->sched_policy = tswap32(scha.sched_policy);
10856 target_scha->sched_flags = tswap64(scha.sched_flags);
10857 target_scha->sched_nice = tswap32(scha.sched_nice);
10858 target_scha->sched_priority = tswap32(scha.sched_priority);
10859 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10860 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10861 target_scha->sched_period = tswap64(scha.sched_period);
10862 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10863 target_scha->sched_util_min = tswap32(scha.sched_util_min);
10864 target_scha->sched_util_max = tswap32(scha.sched_util_max);
10866 unlock_user(target_scha, arg2, arg3);
10868 return ret;
10870 case TARGET_NR_sched_setattr:
10872 struct target_sched_attr *target_scha;
10873 struct sched_attr scha;
10874 uint32_t size;
10875 int zeroed;
10876 if (arg2 == 0) {
10877 return -TARGET_EINVAL;
10879 if (get_user_u32(size, arg2)) {
10880 return -TARGET_EFAULT;
10882 if (!size) {
10883 size = offsetof(struct target_sched_attr, sched_util_min);
10885 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10886 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10887 return -TARGET_EFAULT;
10889 return -TARGET_E2BIG;
10892 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10893 if (zeroed < 0) {
10894 return zeroed;
10895 } else if (zeroed == 0) {
10896 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10897 return -TARGET_EFAULT;
10899 return -TARGET_E2BIG;
10901 if (size > sizeof(struct target_sched_attr)) {
10902 size = sizeof(struct target_sched_attr);
10905 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10906 if (!target_scha) {
10907 return -TARGET_EFAULT;
10909 scha.size = size;
10910 scha.sched_policy = tswap32(target_scha->sched_policy);
10911 scha.sched_flags = tswap64(target_scha->sched_flags);
10912 scha.sched_nice = tswap32(target_scha->sched_nice);
10913 scha.sched_priority = tswap32(target_scha->sched_priority);
10914 scha.sched_runtime = tswap64(target_scha->sched_runtime);
10915 scha.sched_deadline = tswap64(target_scha->sched_deadline);
10916 scha.sched_period = tswap64(target_scha->sched_period);
10917 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10918 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10919 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10921 unlock_user(target_scha, arg2, 0);
10922 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10924 case TARGET_NR_sched_yield:
10925 return get_errno(sched_yield());
10926 case TARGET_NR_sched_get_priority_max:
10927 return get_errno(sched_get_priority_max(arg1));
10928 case TARGET_NR_sched_get_priority_min:
10929 return get_errno(sched_get_priority_min(arg1));
10930 #ifdef TARGET_NR_sched_rr_get_interval
10931 case TARGET_NR_sched_rr_get_interval:
10933 struct timespec ts;
10934 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10935 if (!is_error(ret)) {
10936 ret = host_to_target_timespec(arg2, &ts);
10939 return ret;
10940 #endif
10941 #ifdef TARGET_NR_sched_rr_get_interval_time64
10942 case TARGET_NR_sched_rr_get_interval_time64:
10944 struct timespec ts;
10945 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10946 if (!is_error(ret)) {
10947 ret = host_to_target_timespec64(arg2, &ts);
10950 return ret;
10951 #endif
10952 #if defined(TARGET_NR_nanosleep)
10953 case TARGET_NR_nanosleep:
10955 struct timespec req, rem;
10956 target_to_host_timespec(&req, arg1);
10957 ret = get_errno(safe_nanosleep(&req, &rem));
10958 if (is_error(ret) && arg2) {
10959 host_to_target_timespec(arg2, &rem);
10962 return ret;
10963 #endif
10964 case TARGET_NR_prctl:
10965 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10966 break;
10967 #ifdef TARGET_NR_arch_prctl
10968 case TARGET_NR_arch_prctl:
10969 return do_arch_prctl(cpu_env, arg1, arg2);
10970 #endif
10971 #ifdef TARGET_NR_pread64
10972 case TARGET_NR_pread64:
10973 if (regpairs_aligned(cpu_env, num)) {
10974 arg4 = arg5;
10975 arg5 = arg6;
10977 if (arg2 == 0 && arg3 == 0) {
10978 /* Special-case NULL buffer and zero length, which should succeed */
10979 p = 0;
10980 } else {
10981 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10982 if (!p) {
10983 return -TARGET_EFAULT;
10986 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10987 unlock_user(p, arg2, ret);
10988 return ret;
10989 case TARGET_NR_pwrite64:
10990 if (regpairs_aligned(cpu_env, num)) {
10991 arg4 = arg5;
10992 arg5 = arg6;
10994 if (arg2 == 0 && arg3 == 0) {
10995 /* Special-case NULL buffer and zero length, which should succeed */
10996 p = 0;
10997 } else {
10998 p = lock_user(VERIFY_READ, arg2, arg3, 1);
10999 if (!p) {
11000 return -TARGET_EFAULT;
11003 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11004 unlock_user(p, arg2, 0);
11005 return ret;
11006 #endif
11007 case TARGET_NR_getcwd:
11008 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11009 return -TARGET_EFAULT;
11010 ret = get_errno(sys_getcwd1(p, arg2));
11011 unlock_user(p, arg1, ret);
11012 return ret;
11013 case TARGET_NR_capget:
11014 case TARGET_NR_capset:
11016 struct target_user_cap_header *target_header;
11017 struct target_user_cap_data *target_data = NULL;
11018 struct __user_cap_header_struct header;
11019 struct __user_cap_data_struct data[2];
11020 struct __user_cap_data_struct *dataptr = NULL;
11021 int i, target_datalen;
11022 int data_items = 1;
11024 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11025 return -TARGET_EFAULT;
11027 header.version = tswap32(target_header->version);
11028 header.pid = tswap32(target_header->pid);
11030 if (header.version != _LINUX_CAPABILITY_VERSION) {
11031 /* Version 2 and up takes pointer to two user_data structs */
11032 data_items = 2;
11035 target_datalen = sizeof(*target_data) * data_items;
11037 if (arg2) {
11038 if (num == TARGET_NR_capget) {
11039 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11040 } else {
11041 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11043 if (!target_data) {
11044 unlock_user_struct(target_header, arg1, 0);
11045 return -TARGET_EFAULT;
11048 if (num == TARGET_NR_capset) {
11049 for (i = 0; i < data_items; i++) {
11050 data[i].effective = tswap32(target_data[i].effective);
11051 data[i].permitted = tswap32(target_data[i].permitted);
11052 data[i].inheritable = tswap32(target_data[i].inheritable);
11056 dataptr = data;
11059 if (num == TARGET_NR_capget) {
11060 ret = get_errno(capget(&header, dataptr));
11061 } else {
11062 ret = get_errno(capset(&header, dataptr));
11065 /* The kernel always updates version for both capget and capset */
11066 target_header->version = tswap32(header.version);
11067 unlock_user_struct(target_header, arg1, 1);
11069 if (arg2) {
11070 if (num == TARGET_NR_capget) {
11071 for (i = 0; i < data_items; i++) {
11072 target_data[i].effective = tswap32(data[i].effective);
11073 target_data[i].permitted = tswap32(data[i].permitted);
11074 target_data[i].inheritable = tswap32(data[i].inheritable);
11076 unlock_user(target_data, arg2, target_datalen);
11077 } else {
11078 unlock_user(target_data, arg2, 0);
11081 return ret;
11083 case TARGET_NR_sigaltstack:
11084 return do_sigaltstack(arg1, arg2, cpu_env);
11086 #ifdef CONFIG_SENDFILE
11087 #ifdef TARGET_NR_sendfile
11088 case TARGET_NR_sendfile:
11090 off_t *offp = NULL;
11091 off_t off;
11092 if (arg3) {
11093 ret = get_user_sal(off, arg3);
11094 if (is_error(ret)) {
11095 return ret;
11097 offp = &off;
11099 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11100 if (!is_error(ret) && arg3) {
11101 abi_long ret2 = put_user_sal(off, arg3);
11102 if (is_error(ret2)) {
11103 ret = ret2;
11106 return ret;
11108 #endif
11109 #ifdef TARGET_NR_sendfile64
11110 case TARGET_NR_sendfile64:
11112 off_t *offp = NULL;
11113 off_t off;
11114 if (arg3) {
11115 ret = get_user_s64(off, arg3);
11116 if (is_error(ret)) {
11117 return ret;
11119 offp = &off;
11121 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11122 if (!is_error(ret) && arg3) {
11123 abi_long ret2 = put_user_s64(off, arg3);
11124 if (is_error(ret2)) {
11125 ret = ret2;
11128 return ret;
11130 #endif
11131 #endif
11132 #ifdef TARGET_NR_vfork
11133 case TARGET_NR_vfork:
11134 return get_errno(do_fork(cpu_env,
11135 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11136 0, 0, 0, 0));
11137 #endif
11138 #ifdef TARGET_NR_ugetrlimit
11139 case TARGET_NR_ugetrlimit:
11141 struct rlimit rlim;
11142 int resource = target_to_host_resource(arg1);
11143 ret = get_errno(getrlimit(resource, &rlim));
11144 if (!is_error(ret)) {
11145 struct target_rlimit *target_rlim;
11146 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11147 return -TARGET_EFAULT;
11148 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11149 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11150 unlock_user_struct(target_rlim, arg2, 1);
11152 return ret;
11154 #endif
11155 #ifdef TARGET_NR_truncate64
11156 case TARGET_NR_truncate64:
11157 if (!(p = lock_user_string(arg1)))
11158 return -TARGET_EFAULT;
11159 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11160 unlock_user(p, arg1, 0);
11161 return ret;
11162 #endif
11163 #ifdef TARGET_NR_ftruncate64
11164 case TARGET_NR_ftruncate64:
11165 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11166 #endif
11167 #ifdef TARGET_NR_stat64
11168 case TARGET_NR_stat64:
11169 if (!(p = lock_user_string(arg1))) {
11170 return -TARGET_EFAULT;
11172 ret = get_errno(stat(path(p), &st));
11173 unlock_user(p, arg1, 0);
11174 if (!is_error(ret))
11175 ret = host_to_target_stat64(cpu_env, arg2, &st);
11176 return ret;
11177 #endif
11178 #ifdef TARGET_NR_lstat64
11179 case TARGET_NR_lstat64:
11180 if (!(p = lock_user_string(arg1))) {
11181 return -TARGET_EFAULT;
11183 ret = get_errno(lstat(path(p), &st));
11184 unlock_user(p, arg1, 0);
11185 if (!is_error(ret))
11186 ret = host_to_target_stat64(cpu_env, arg2, &st);
11187 return ret;
11188 #endif
11189 #ifdef TARGET_NR_fstat64
11190 case TARGET_NR_fstat64:
11191 ret = get_errno(fstat(arg1, &st));
11192 if (!is_error(ret))
11193 ret = host_to_target_stat64(cpu_env, arg2, &st);
11194 return ret;
11195 #endif
11196 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11197 #ifdef TARGET_NR_fstatat64
11198 case TARGET_NR_fstatat64:
11199 #endif
11200 #ifdef TARGET_NR_newfstatat
11201 case TARGET_NR_newfstatat:
11202 #endif
11203 if (!(p = lock_user_string(arg2))) {
11204 return -TARGET_EFAULT;
11206 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11207 unlock_user(p, arg2, 0);
11208 if (!is_error(ret))
11209 ret = host_to_target_stat64(cpu_env, arg3, &st);
11210 return ret;
11211 #endif
11212 #if defined(TARGET_NR_statx)
11213 case TARGET_NR_statx:
11215 struct target_statx *target_stx;
11216 int dirfd = arg1;
11217 int flags = arg3;
11219 p = lock_user_string(arg2);
11220 if (p == NULL) {
11221 return -TARGET_EFAULT;
11223 #if defined(__NR_statx)
11226 * It is assumed that struct statx is architecture independent.
11228 struct target_statx host_stx;
11229 int mask = arg4;
11231 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11232 if (!is_error(ret)) {
11233 if (host_to_target_statx(&host_stx, arg5) != 0) {
11234 unlock_user(p, arg2, 0);
11235 return -TARGET_EFAULT;
11239 if (ret != -TARGET_ENOSYS) {
11240 unlock_user(p, arg2, 0);
11241 return ret;
11244 #endif
11245 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11246 unlock_user(p, arg2, 0);
11248 if (!is_error(ret)) {
11249 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11250 return -TARGET_EFAULT;
11252 memset(target_stx, 0, sizeof(*target_stx));
11253 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11254 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11255 __put_user(st.st_ino, &target_stx->stx_ino);
11256 __put_user(st.st_mode, &target_stx->stx_mode);
11257 __put_user(st.st_uid, &target_stx->stx_uid);
11258 __put_user(st.st_gid, &target_stx->stx_gid);
11259 __put_user(st.st_nlink, &target_stx->stx_nlink);
11260 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11261 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11262 __put_user(st.st_size, &target_stx->stx_size);
11263 __put_user(st.st_blksize, &target_stx->stx_blksize);
11264 __put_user(st.st_blocks, &target_stx->stx_blocks);
11265 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11266 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11267 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11268 unlock_user_struct(target_stx, arg5, 1);
11271 return ret;
11272 #endif
11273 #ifdef TARGET_NR_lchown
11274 case TARGET_NR_lchown:
11275 if (!(p = lock_user_string(arg1)))
11276 return -TARGET_EFAULT;
11277 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11278 unlock_user(p, arg1, 0);
11279 return ret;
11280 #endif
11281 #ifdef TARGET_NR_getuid
11282 case TARGET_NR_getuid:
11283 return get_errno(high2lowuid(getuid()));
11284 #endif
11285 #ifdef TARGET_NR_getgid
11286 case TARGET_NR_getgid:
11287 return get_errno(high2lowgid(getgid()));
11288 #endif
11289 #ifdef TARGET_NR_geteuid
11290 case TARGET_NR_geteuid:
11291 return get_errno(high2lowuid(geteuid()));
11292 #endif
11293 #ifdef TARGET_NR_getegid
11294 case TARGET_NR_getegid:
11295 return get_errno(high2lowgid(getegid()));
11296 #endif
11297 case TARGET_NR_setreuid:
11298 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11299 case TARGET_NR_setregid:
11300 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11301 case TARGET_NR_getgroups:
11303 int gidsetsize = arg1;
11304 target_id *target_grouplist;
11305 gid_t *grouplist;
11306 int i;
11308 grouplist = alloca(gidsetsize * sizeof(gid_t));
11309 ret = get_errno(getgroups(gidsetsize, grouplist));
11310 if (gidsetsize == 0)
11311 return ret;
11312 if (!is_error(ret)) {
11313 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11314 if (!target_grouplist)
11315 return -TARGET_EFAULT;
11316 for(i = 0;i < ret; i++)
11317 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11318 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11321 return ret;
11322 case TARGET_NR_setgroups:
11324 int gidsetsize = arg1;
11325 target_id *target_grouplist;
11326 gid_t *grouplist = NULL;
11327 int i;
11328 if (gidsetsize) {
11329 grouplist = alloca(gidsetsize * sizeof(gid_t));
11330 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11331 if (!target_grouplist) {
11332 return -TARGET_EFAULT;
11334 for (i = 0; i < gidsetsize; i++) {
11335 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11337 unlock_user(target_grouplist, arg2, 0);
11339 return get_errno(setgroups(gidsetsize, grouplist));
11341 case TARGET_NR_fchown:
11342 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11343 #if defined(TARGET_NR_fchownat)
11344 case TARGET_NR_fchownat:
11345 if (!(p = lock_user_string(arg2)))
11346 return -TARGET_EFAULT;
11347 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11348 low2highgid(arg4), arg5));
11349 unlock_user(p, arg2, 0);
11350 return ret;
11351 #endif
11352 #ifdef TARGET_NR_setresuid
11353 case TARGET_NR_setresuid:
11354 return get_errno(sys_setresuid(low2highuid(arg1),
11355 low2highuid(arg2),
11356 low2highuid(arg3)));
11357 #endif
11358 #ifdef TARGET_NR_getresuid
11359 case TARGET_NR_getresuid:
11361 uid_t ruid, euid, suid;
11362 ret = get_errno(getresuid(&ruid, &euid, &suid));
11363 if (!is_error(ret)) {
11364 if (put_user_id(high2lowuid(ruid), arg1)
11365 || put_user_id(high2lowuid(euid), arg2)
11366 || put_user_id(high2lowuid(suid), arg3))
11367 return -TARGET_EFAULT;
11370 return ret;
11371 #endif
11372 #ifdef TARGET_NR_getresgid
11373 case TARGET_NR_setresgid:
11374 return get_errno(sys_setresgid(low2highgid(arg1),
11375 low2highgid(arg2),
11376 low2highgid(arg3)));
11377 #endif
11378 #ifdef TARGET_NR_getresgid
11379 case TARGET_NR_getresgid:
11381 gid_t rgid, egid, sgid;
11382 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11383 if (!is_error(ret)) {
11384 if (put_user_id(high2lowgid(rgid), arg1)
11385 || put_user_id(high2lowgid(egid), arg2)
11386 || put_user_id(high2lowgid(sgid), arg3))
11387 return -TARGET_EFAULT;
11390 return ret;
11391 #endif
11392 #ifdef TARGET_NR_chown
11393 case TARGET_NR_chown:
11394 if (!(p = lock_user_string(arg1)))
11395 return -TARGET_EFAULT;
11396 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11397 unlock_user(p, arg1, 0);
11398 return ret;
11399 #endif
11400 case TARGET_NR_setuid:
11401 return get_errno(sys_setuid(low2highuid(arg1)));
11402 case TARGET_NR_setgid:
11403 return get_errno(sys_setgid(low2highgid(arg1)));
11404 case TARGET_NR_setfsuid:
11405 return get_errno(setfsuid(arg1));
11406 case TARGET_NR_setfsgid:
11407 return get_errno(setfsgid(arg1));
11409 #ifdef TARGET_NR_lchown32
11410 case TARGET_NR_lchown32:
11411 if (!(p = lock_user_string(arg1)))
11412 return -TARGET_EFAULT;
11413 ret = get_errno(lchown(p, arg2, arg3));
11414 unlock_user(p, arg1, 0);
11415 return ret;
11416 #endif
11417 #ifdef TARGET_NR_getuid32
11418 case TARGET_NR_getuid32:
11419 return get_errno(getuid());
11420 #endif
11422 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11423 /* Alpha specific */
11424 case TARGET_NR_getxuid:
11426 uid_t euid;
11427 euid=geteuid();
11428 cpu_env->ir[IR_A4]=euid;
11430 return get_errno(getuid());
11431 #endif
11432 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11433 /* Alpha specific */
11434 case TARGET_NR_getxgid:
11436 uid_t egid;
11437 egid=getegid();
11438 cpu_env->ir[IR_A4]=egid;
11440 return get_errno(getgid());
11441 #endif
11442 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11443 /* Alpha specific */
11444 case TARGET_NR_osf_getsysinfo:
11445 ret = -TARGET_EOPNOTSUPP;
11446 switch (arg1) {
11447 case TARGET_GSI_IEEE_FP_CONTROL:
11449 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11450 uint64_t swcr = cpu_env->swcr;
11452 swcr &= ~SWCR_STATUS_MASK;
11453 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11455 if (put_user_u64 (swcr, arg2))
11456 return -TARGET_EFAULT;
11457 ret = 0;
11459 break;
11461 /* case GSI_IEEE_STATE_AT_SIGNAL:
11462 -- Not implemented in linux kernel.
11463 case GSI_UACPROC:
11464 -- Retrieves current unaligned access state; not much used.
11465 case GSI_PROC_TYPE:
11466 -- Retrieves implver information; surely not used.
11467 case GSI_GET_HWRPB:
11468 -- Grabs a copy of the HWRPB; surely not used.
11471 return ret;
11472 #endif
11473 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11474 /* Alpha specific */
11475 case TARGET_NR_osf_setsysinfo:
11476 ret = -TARGET_EOPNOTSUPP;
11477 switch (arg1) {
11478 case TARGET_SSI_IEEE_FP_CONTROL:
11480 uint64_t swcr, fpcr;
11482 if (get_user_u64 (swcr, arg2)) {
11483 return -TARGET_EFAULT;
11487 * The kernel calls swcr_update_status to update the
11488 * status bits from the fpcr at every point that it
11489 * could be queried. Therefore, we store the status
11490 * bits only in FPCR.
11492 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11494 fpcr = cpu_alpha_load_fpcr(cpu_env);
11495 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11496 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11497 cpu_alpha_store_fpcr(cpu_env, fpcr);
11498 ret = 0;
11500 break;
11502 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11504 uint64_t exc, fpcr, fex;
11506 if (get_user_u64(exc, arg2)) {
11507 return -TARGET_EFAULT;
11509 exc &= SWCR_STATUS_MASK;
11510 fpcr = cpu_alpha_load_fpcr(cpu_env);
11512 /* Old exceptions are not signaled. */
11513 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11514 fex = exc & ~fex;
11515 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11516 fex &= (cpu_env)->swcr;
11518 /* Update the hardware fpcr. */
11519 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11520 cpu_alpha_store_fpcr(cpu_env, fpcr);
11522 if (fex) {
11523 int si_code = TARGET_FPE_FLTUNK;
11524 target_siginfo_t info;
11526 if (fex & SWCR_TRAP_ENABLE_DNO) {
11527 si_code = TARGET_FPE_FLTUND;
11529 if (fex & SWCR_TRAP_ENABLE_INE) {
11530 si_code = TARGET_FPE_FLTRES;
11532 if (fex & SWCR_TRAP_ENABLE_UNF) {
11533 si_code = TARGET_FPE_FLTUND;
11535 if (fex & SWCR_TRAP_ENABLE_OVF) {
11536 si_code = TARGET_FPE_FLTOVF;
11538 if (fex & SWCR_TRAP_ENABLE_DZE) {
11539 si_code = TARGET_FPE_FLTDIV;
11541 if (fex & SWCR_TRAP_ENABLE_INV) {
11542 si_code = TARGET_FPE_FLTINV;
11545 info.si_signo = SIGFPE;
11546 info.si_errno = 0;
11547 info.si_code = si_code;
11548 info._sifields._sigfault._addr = (cpu_env)->pc;
11549 queue_signal(cpu_env, info.si_signo,
11550 QEMU_SI_FAULT, &info);
11552 ret = 0;
11554 break;
11556 /* case SSI_NVPAIRS:
11557 -- Used with SSIN_UACPROC to enable unaligned accesses.
11558 case SSI_IEEE_STATE_AT_SIGNAL:
11559 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11560 -- Not implemented in linux kernel
11563 return ret;
11564 #endif
11565 #ifdef TARGET_NR_osf_sigprocmask
11566 /* Alpha specific. */
11567 case TARGET_NR_osf_sigprocmask:
11569 abi_ulong mask;
11570 int how;
11571 sigset_t set, oldset;
11573 switch(arg1) {
11574 case TARGET_SIG_BLOCK:
11575 how = SIG_BLOCK;
11576 break;
11577 case TARGET_SIG_UNBLOCK:
11578 how = SIG_UNBLOCK;
11579 break;
11580 case TARGET_SIG_SETMASK:
11581 how = SIG_SETMASK;
11582 break;
11583 default:
11584 return -TARGET_EINVAL;
11586 mask = arg2;
11587 target_to_host_old_sigset(&set, &mask);
11588 ret = do_sigprocmask(how, &set, &oldset);
11589 if (!ret) {
11590 host_to_target_old_sigset(&mask, &oldset);
11591 ret = mask;
11594 return ret;
11595 #endif
11597 #ifdef TARGET_NR_getgid32
11598 case TARGET_NR_getgid32:
11599 return get_errno(getgid());
11600 #endif
11601 #ifdef TARGET_NR_geteuid32
11602 case TARGET_NR_geteuid32:
11603 return get_errno(geteuid());
11604 #endif
11605 #ifdef TARGET_NR_getegid32
11606 case TARGET_NR_getegid32:
11607 return get_errno(getegid());
11608 #endif
11609 #ifdef TARGET_NR_setreuid32
11610 case TARGET_NR_setreuid32:
11611 return get_errno(setreuid(arg1, arg2));
11612 #endif
11613 #ifdef TARGET_NR_setregid32
11614 case TARGET_NR_setregid32:
11615 return get_errno(setregid(arg1, arg2));
11616 #endif
11617 #ifdef TARGET_NR_getgroups32
11618 case TARGET_NR_getgroups32:
11620 int gidsetsize = arg1;
11621 uint32_t *target_grouplist;
11622 gid_t *grouplist;
11623 int i;
11625 grouplist = alloca(gidsetsize * sizeof(gid_t));
11626 ret = get_errno(getgroups(gidsetsize, grouplist));
11627 if (gidsetsize == 0)
11628 return ret;
11629 if (!is_error(ret)) {
11630 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11631 if (!target_grouplist) {
11632 return -TARGET_EFAULT;
11634 for(i = 0;i < ret; i++)
11635 target_grouplist[i] = tswap32(grouplist[i]);
11636 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11639 return ret;
11640 #endif
11641 #ifdef TARGET_NR_setgroups32
11642 case TARGET_NR_setgroups32:
11644 int gidsetsize = arg1;
11645 uint32_t *target_grouplist;
11646 gid_t *grouplist;
11647 int i;
11649 grouplist = alloca(gidsetsize * sizeof(gid_t));
11650 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11651 if (!target_grouplist) {
11652 return -TARGET_EFAULT;
11654 for(i = 0;i < gidsetsize; i++)
11655 grouplist[i] = tswap32(target_grouplist[i]);
11656 unlock_user(target_grouplist, arg2, 0);
11657 return get_errno(setgroups(gidsetsize, grouplist));
11659 #endif
11660 #ifdef TARGET_NR_fchown32
11661 case TARGET_NR_fchown32:
11662 return get_errno(fchown(arg1, arg2, arg3));
11663 #endif
11664 #ifdef TARGET_NR_setresuid32
11665 case TARGET_NR_setresuid32:
11666 return get_errno(sys_setresuid(arg1, arg2, arg3));
11667 #endif
11668 #ifdef TARGET_NR_getresuid32
11669 case TARGET_NR_getresuid32:
11671 uid_t ruid, euid, suid;
11672 ret = get_errno(getresuid(&ruid, &euid, &suid));
11673 if (!is_error(ret)) {
11674 if (put_user_u32(ruid, arg1)
11675 || put_user_u32(euid, arg2)
11676 || put_user_u32(suid, arg3))
11677 return -TARGET_EFAULT;
11680 return ret;
11681 #endif
11682 #ifdef TARGET_NR_setresgid32
11683 case TARGET_NR_setresgid32:
11684 return get_errno(sys_setresgid(arg1, arg2, arg3));
11685 #endif
11686 #ifdef TARGET_NR_getresgid32
11687 case TARGET_NR_getresgid32:
11689 gid_t rgid, egid, sgid;
11690 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11691 if (!is_error(ret)) {
11692 if (put_user_u32(rgid, arg1)
11693 || put_user_u32(egid, arg2)
11694 || put_user_u32(sgid, arg3))
11695 return -TARGET_EFAULT;
11698 return ret;
11699 #endif
11700 #ifdef TARGET_NR_chown32
11701 case TARGET_NR_chown32:
11702 if (!(p = lock_user_string(arg1)))
11703 return -TARGET_EFAULT;
11704 ret = get_errno(chown(p, arg2, arg3));
11705 unlock_user(p, arg1, 0);
11706 return ret;
11707 #endif
11708 #ifdef TARGET_NR_setuid32
11709 case TARGET_NR_setuid32:
11710 return get_errno(sys_setuid(arg1));
11711 #endif
11712 #ifdef TARGET_NR_setgid32
11713 case TARGET_NR_setgid32:
11714 return get_errno(sys_setgid(arg1));
11715 #endif
11716 #ifdef TARGET_NR_setfsuid32
11717 case TARGET_NR_setfsuid32:
11718 return get_errno(setfsuid(arg1));
11719 #endif
11720 #ifdef TARGET_NR_setfsgid32
11721 case TARGET_NR_setfsgid32:
11722 return get_errno(setfsgid(arg1));
11723 #endif
11724 #ifdef TARGET_NR_mincore
11725 case TARGET_NR_mincore:
11727 void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11728 if (!a) {
11729 return -TARGET_ENOMEM;
11731 p = lock_user_string(arg3);
11732 if (!p) {
11733 ret = -TARGET_EFAULT;
11734 } else {
11735 ret = get_errno(mincore(a, arg2, p));
11736 unlock_user(p, arg3, ret);
11738 unlock_user(a, arg1, 0);
11740 return ret;
11741 #endif
11742 #ifdef TARGET_NR_arm_fadvise64_64
11743 case TARGET_NR_arm_fadvise64_64:
11744 /* arm_fadvise64_64 looks like fadvise64_64 but
11745 * with different argument order: fd, advice, offset, len
11746 * rather than the usual fd, offset, len, advice.
11747 * Note that offset and len are both 64-bit so appear as
11748 * pairs of 32-bit registers.
11750 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11751 target_offset64(arg5, arg6), arg2);
11752 return -host_to_target_errno(ret);
11753 #endif
11755 #if TARGET_ABI_BITS == 32
11757 #ifdef TARGET_NR_fadvise64_64
11758 case TARGET_NR_fadvise64_64:
11759 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11760 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11761 ret = arg2;
11762 arg2 = arg3;
11763 arg3 = arg4;
11764 arg4 = arg5;
11765 arg5 = arg6;
11766 arg6 = ret;
11767 #else
11768 /* 6 args: fd, offset (high, low), len (high, low), advice */
11769 if (regpairs_aligned(cpu_env, num)) {
11770 /* offset is in (3,4), len in (5,6) and advice in 7 */
11771 arg2 = arg3;
11772 arg3 = arg4;
11773 arg4 = arg5;
11774 arg5 = arg6;
11775 arg6 = arg7;
11777 #endif
11778 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11779 target_offset64(arg4, arg5), arg6);
11780 return -host_to_target_errno(ret);
11781 #endif
11783 #ifdef TARGET_NR_fadvise64
11784 case TARGET_NR_fadvise64:
11785 /* 5 args: fd, offset (high, low), len, advice */
11786 if (regpairs_aligned(cpu_env, num)) {
11787 /* offset is in (3,4), len in 5 and advice in 6 */
11788 arg2 = arg3;
11789 arg3 = arg4;
11790 arg4 = arg5;
11791 arg5 = arg6;
11793 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11794 return -host_to_target_errno(ret);
11795 #endif
11797 #else /* not a 32-bit ABI */
11798 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11799 #ifdef TARGET_NR_fadvise64_64
11800 case TARGET_NR_fadvise64_64:
11801 #endif
11802 #ifdef TARGET_NR_fadvise64
11803 case TARGET_NR_fadvise64:
11804 #endif
11805 #ifdef TARGET_S390X
11806 switch (arg4) {
11807 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11808 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11809 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11810 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11811 default: break;
11813 #endif
11814 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11815 #endif
11816 #endif /* end of 64-bit ABI fadvise handling */
11818 #ifdef TARGET_NR_madvise
11819 case TARGET_NR_madvise:
11820 return target_madvise(arg1, arg2, arg3);
11821 #endif
11822 #ifdef TARGET_NR_fcntl64
11823 case TARGET_NR_fcntl64:
11825 int cmd;
11826 struct flock64 fl;
11827 from_flock64_fn *copyfrom = copy_from_user_flock64;
11828 to_flock64_fn *copyto = copy_to_user_flock64;
11830 #ifdef TARGET_ARM
11831 if (!cpu_env->eabi) {
11832 copyfrom = copy_from_user_oabi_flock64;
11833 copyto = copy_to_user_oabi_flock64;
11835 #endif
11837 cmd = target_to_host_fcntl_cmd(arg2);
11838 if (cmd == -TARGET_EINVAL) {
11839 return cmd;
11842 switch(arg2) {
11843 case TARGET_F_GETLK64:
11844 ret = copyfrom(&fl, arg3);
11845 if (ret) {
11846 break;
11848 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11849 if (ret == 0) {
11850 ret = copyto(arg3, &fl);
11852 break;
11854 case TARGET_F_SETLK64:
11855 case TARGET_F_SETLKW64:
11856 ret = copyfrom(&fl, arg3);
11857 if (ret) {
11858 break;
11860 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11861 break;
11862 default:
11863 ret = do_fcntl(arg1, arg2, arg3);
11864 break;
11866 return ret;
11868 #endif
11869 #ifdef TARGET_NR_cacheflush
11870 case TARGET_NR_cacheflush:
11871 /* self-modifying code is handled automatically, so nothing needed */
11872 return 0;
11873 #endif
11874 #ifdef TARGET_NR_getpagesize
11875 case TARGET_NR_getpagesize:
11876 return TARGET_PAGE_SIZE;
11877 #endif
11878 case TARGET_NR_gettid:
11879 return get_errno(sys_gettid());
11880 #ifdef TARGET_NR_readahead
11881 case TARGET_NR_readahead:
11882 #if TARGET_ABI_BITS == 32
11883 if (regpairs_aligned(cpu_env, num)) {
11884 arg2 = arg3;
11885 arg3 = arg4;
11886 arg4 = arg5;
11888 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11889 #else
11890 ret = get_errno(readahead(arg1, arg2, arg3));
11891 #endif
11892 return ret;
11893 #endif
11894 #ifdef CONFIG_ATTR
11895 #ifdef TARGET_NR_setxattr
11896 case TARGET_NR_listxattr:
11897 case TARGET_NR_llistxattr:
11899 void *p, *b = 0;
11900 if (arg2) {
11901 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11902 if (!b) {
11903 return -TARGET_EFAULT;
11906 p = lock_user_string(arg1);
11907 if (p) {
11908 if (num == TARGET_NR_listxattr) {
11909 ret = get_errno(listxattr(p, b, arg3));
11910 } else {
11911 ret = get_errno(llistxattr(p, b, arg3));
11913 } else {
11914 ret = -TARGET_EFAULT;
11916 unlock_user(p, arg1, 0);
11917 unlock_user(b, arg2, arg3);
11918 return ret;
11920 case TARGET_NR_flistxattr:
11922 void *b = 0;
11923 if (arg2) {
11924 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11925 if (!b) {
11926 return -TARGET_EFAULT;
11929 ret = get_errno(flistxattr(arg1, b, arg3));
11930 unlock_user(b, arg2, arg3);
11931 return ret;
11933 case TARGET_NR_setxattr:
11934 case TARGET_NR_lsetxattr:
11936 void *p, *n, *v = 0;
11937 if (arg3) {
11938 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11939 if (!v) {
11940 return -TARGET_EFAULT;
11943 p = lock_user_string(arg1);
11944 n = lock_user_string(arg2);
11945 if (p && n) {
11946 if (num == TARGET_NR_setxattr) {
11947 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11948 } else {
11949 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11951 } else {
11952 ret = -TARGET_EFAULT;
11954 unlock_user(p, arg1, 0);
11955 unlock_user(n, arg2, 0);
11956 unlock_user(v, arg3, 0);
11958 return ret;
11959 case TARGET_NR_fsetxattr:
11961 void *n, *v = 0;
11962 if (arg3) {
11963 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11964 if (!v) {
11965 return -TARGET_EFAULT;
11968 n = lock_user_string(arg2);
11969 if (n) {
11970 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11971 } else {
11972 ret = -TARGET_EFAULT;
11974 unlock_user(n, arg2, 0);
11975 unlock_user(v, arg3, 0);
11977 return ret;
11978 case TARGET_NR_getxattr:
11979 case TARGET_NR_lgetxattr:
11981 void *p, *n, *v = 0;
11982 if (arg3) {
11983 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11984 if (!v) {
11985 return -TARGET_EFAULT;
11988 p = lock_user_string(arg1);
11989 n = lock_user_string(arg2);
11990 if (p && n) {
11991 if (num == TARGET_NR_getxattr) {
11992 ret = get_errno(getxattr(p, n, v, arg4));
11993 } else {
11994 ret = get_errno(lgetxattr(p, n, v, arg4));
11996 } else {
11997 ret = -TARGET_EFAULT;
11999 unlock_user(p, arg1, 0);
12000 unlock_user(n, arg2, 0);
12001 unlock_user(v, arg3, arg4);
12003 return ret;
12004 case TARGET_NR_fgetxattr:
12006 void *n, *v = 0;
12007 if (arg3) {
12008 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12009 if (!v) {
12010 return -TARGET_EFAULT;
12013 n = lock_user_string(arg2);
12014 if (n) {
12015 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12016 } else {
12017 ret = -TARGET_EFAULT;
12019 unlock_user(n, arg2, 0);
12020 unlock_user(v, arg3, arg4);
12022 return ret;
12023 case TARGET_NR_removexattr:
12024 case TARGET_NR_lremovexattr:
12026 void *p, *n;
12027 p = lock_user_string(arg1);
12028 n = lock_user_string(arg2);
12029 if (p && n) {
12030 if (num == TARGET_NR_removexattr) {
12031 ret = get_errno(removexattr(p, n));
12032 } else {
12033 ret = get_errno(lremovexattr(p, n));
12035 } else {
12036 ret = -TARGET_EFAULT;
12038 unlock_user(p, arg1, 0);
12039 unlock_user(n, arg2, 0);
12041 return ret;
12042 case TARGET_NR_fremovexattr:
12044 void *n;
12045 n = lock_user_string(arg2);
12046 if (n) {
12047 ret = get_errno(fremovexattr(arg1, n));
12048 } else {
12049 ret = -TARGET_EFAULT;
12051 unlock_user(n, arg2, 0);
12053 return ret;
12054 #endif
12055 #endif /* CONFIG_ATTR */
12056 #ifdef TARGET_NR_set_thread_area
12057 case TARGET_NR_set_thread_area:
12058 #if defined(TARGET_MIPS)
12059 cpu_env->active_tc.CP0_UserLocal = arg1;
12060 return 0;
12061 #elif defined(TARGET_CRIS)
12062 if (arg1 & 0xff)
12063 ret = -TARGET_EINVAL;
12064 else {
12065 cpu_env->pregs[PR_PID] = arg1;
12066 ret = 0;
12068 return ret;
12069 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12070 return do_set_thread_area(cpu_env, arg1);
12071 #elif defined(TARGET_M68K)
12073 TaskState *ts = cpu->opaque;
12074 ts->tp_value = arg1;
12075 return 0;
12077 #else
12078 return -TARGET_ENOSYS;
12079 #endif
12080 #endif
12081 #ifdef TARGET_NR_get_thread_area
12082 case TARGET_NR_get_thread_area:
12083 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12084 return do_get_thread_area(cpu_env, arg1);
12085 #elif defined(TARGET_M68K)
12087 TaskState *ts = cpu->opaque;
12088 return ts->tp_value;
12090 #else
12091 return -TARGET_ENOSYS;
12092 #endif
12093 #endif
12094 #ifdef TARGET_NR_getdomainname
12095 case TARGET_NR_getdomainname:
12096 return -TARGET_ENOSYS;
12097 #endif
12099 #ifdef TARGET_NR_clock_settime
12100 case TARGET_NR_clock_settime:
12102 struct timespec ts;
12104 ret = target_to_host_timespec(&ts, arg2);
12105 if (!is_error(ret)) {
12106 ret = get_errno(clock_settime(arg1, &ts));
12108 return ret;
12110 #endif
12111 #ifdef TARGET_NR_clock_settime64
12112 case TARGET_NR_clock_settime64:
12114 struct timespec ts;
12116 ret = target_to_host_timespec64(&ts, arg2);
12117 if (!is_error(ret)) {
12118 ret = get_errno(clock_settime(arg1, &ts));
12120 return ret;
12122 #endif
12123 #ifdef TARGET_NR_clock_gettime
12124 case TARGET_NR_clock_gettime:
12126 struct timespec ts;
12127 ret = get_errno(clock_gettime(arg1, &ts));
12128 if (!is_error(ret)) {
12129 ret = host_to_target_timespec(arg2, &ts);
12131 return ret;
12133 #endif
12134 #ifdef TARGET_NR_clock_gettime64
12135 case TARGET_NR_clock_gettime64:
12137 struct timespec ts;
12138 ret = get_errno(clock_gettime(arg1, &ts));
12139 if (!is_error(ret)) {
12140 ret = host_to_target_timespec64(arg2, &ts);
12142 return ret;
12144 #endif
12145 #ifdef TARGET_NR_clock_getres
12146 case TARGET_NR_clock_getres:
12148 struct timespec ts;
12149 ret = get_errno(clock_getres(arg1, &ts));
12150 if (!is_error(ret)) {
12151 host_to_target_timespec(arg2, &ts);
12153 return ret;
12155 #endif
12156 #ifdef TARGET_NR_clock_getres_time64
12157 case TARGET_NR_clock_getres_time64:
12159 struct timespec ts;
12160 ret = get_errno(clock_getres(arg1, &ts));
12161 if (!is_error(ret)) {
12162 host_to_target_timespec64(arg2, &ts);
12164 return ret;
12166 #endif
12167 #ifdef TARGET_NR_clock_nanosleep
12168 case TARGET_NR_clock_nanosleep:
12170 struct timespec ts;
12171 if (target_to_host_timespec(&ts, arg3)) {
12172 return -TARGET_EFAULT;
12174 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12175 &ts, arg4 ? &ts : NULL));
12177 * if the call is interrupted by a signal handler, it fails
12178 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12179 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12181 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12182 host_to_target_timespec(arg4, &ts)) {
12183 return -TARGET_EFAULT;
12186 return ret;
12188 #endif
12189 #ifdef TARGET_NR_clock_nanosleep_time64
12190 case TARGET_NR_clock_nanosleep_time64:
12192 struct timespec ts;
12194 if (target_to_host_timespec64(&ts, arg3)) {
12195 return -TARGET_EFAULT;
12198 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12199 &ts, arg4 ? &ts : NULL));
12201 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12202 host_to_target_timespec64(arg4, &ts)) {
12203 return -TARGET_EFAULT;
12205 return ret;
12207 #endif
12209 #if defined(TARGET_NR_set_tid_address)
12210 case TARGET_NR_set_tid_address:
12212 TaskState *ts = cpu->opaque;
12213 ts->child_tidptr = arg1;
12214 /* do not call host set_tid_address() syscall, instead return tid() */
12215 return get_errno(sys_gettid());
12217 #endif
12219 case TARGET_NR_tkill:
12220 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12222 case TARGET_NR_tgkill:
12223 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12224 target_to_host_signal(arg3)));
12226 #ifdef TARGET_NR_set_robust_list
12227 case TARGET_NR_set_robust_list:
12228 case TARGET_NR_get_robust_list:
12229 /* The ABI for supporting robust futexes has userspace pass
12230 * the kernel a pointer to a linked list which is updated by
12231 * userspace after the syscall; the list is walked by the kernel
12232 * when the thread exits. Since the linked list in QEMU guest
12233 * memory isn't a valid linked list for the host and we have
12234 * no way to reliably intercept the thread-death event, we can't
12235 * support these. Silently return ENOSYS so that guest userspace
12236 * falls back to a non-robust futex implementation (which should
12237 * be OK except in the corner case of the guest crashing while
12238 * holding a mutex that is shared with another process via
12239 * shared memory).
12241 return -TARGET_ENOSYS;
12242 #endif
12244 #if defined(TARGET_NR_utimensat)
12245 case TARGET_NR_utimensat:
12247 struct timespec *tsp, ts[2];
12248 if (!arg3) {
12249 tsp = NULL;
12250 } else {
12251 if (target_to_host_timespec(ts, arg3)) {
12252 return -TARGET_EFAULT;
12254 if (target_to_host_timespec(ts + 1, arg3 +
12255 sizeof(struct target_timespec))) {
12256 return -TARGET_EFAULT;
12258 tsp = ts;
12260 if (!arg2)
12261 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12262 else {
12263 if (!(p = lock_user_string(arg2))) {
12264 return -TARGET_EFAULT;
12266 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12267 unlock_user(p, arg2, 0);
12270 return ret;
12271 #endif
12272 #ifdef TARGET_NR_utimensat_time64
12273 case TARGET_NR_utimensat_time64:
12275 struct timespec *tsp, ts[2];
12276 if (!arg3) {
12277 tsp = NULL;
12278 } else {
12279 if (target_to_host_timespec64(ts, arg3)) {
12280 return -TARGET_EFAULT;
12282 if (target_to_host_timespec64(ts + 1, arg3 +
12283 sizeof(struct target__kernel_timespec))) {
12284 return -TARGET_EFAULT;
12286 tsp = ts;
12288 if (!arg2)
12289 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12290 else {
12291 p = lock_user_string(arg2);
12292 if (!p) {
12293 return -TARGET_EFAULT;
12295 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12296 unlock_user(p, arg2, 0);
12299 return ret;
12300 #endif
12301 #ifdef TARGET_NR_futex
12302 case TARGET_NR_futex:
12303 return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12304 #endif
12305 #ifdef TARGET_NR_futex_time64
12306 case TARGET_NR_futex_time64:
12307 return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12308 #endif
12309 #ifdef CONFIG_INOTIFY
12310 #if defined(TARGET_NR_inotify_init)
12311 case TARGET_NR_inotify_init:
12312 ret = get_errno(inotify_init());
12313 if (ret >= 0) {
12314 fd_trans_register(ret, &target_inotify_trans);
12316 return ret;
12317 #endif
12318 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12319 case TARGET_NR_inotify_init1:
12320 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12321 fcntl_flags_tbl)));
12322 if (ret >= 0) {
12323 fd_trans_register(ret, &target_inotify_trans);
12325 return ret;
12326 #endif
12327 #if defined(TARGET_NR_inotify_add_watch)
12328 case TARGET_NR_inotify_add_watch:
12329 p = lock_user_string(arg2);
12330 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12331 unlock_user(p, arg2, 0);
12332 return ret;
12333 #endif
12334 #if defined(TARGET_NR_inotify_rm_watch)
12335 case TARGET_NR_inotify_rm_watch:
12336 return get_errno(inotify_rm_watch(arg1, arg2));
12337 #endif
12338 #endif
12340 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12341 case TARGET_NR_mq_open:
12343 struct mq_attr posix_mq_attr;
12344 struct mq_attr *pposix_mq_attr;
12345 int host_flags;
12347 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12348 pposix_mq_attr = NULL;
12349 if (arg4) {
12350 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12351 return -TARGET_EFAULT;
12353 pposix_mq_attr = &posix_mq_attr;
12355 p = lock_user_string(arg1 - 1);
12356 if (!p) {
12357 return -TARGET_EFAULT;
12359 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12360 unlock_user (p, arg1, 0);
12362 return ret;
12364 case TARGET_NR_mq_unlink:
12365 p = lock_user_string(arg1 - 1);
12366 if (!p) {
12367 return -TARGET_EFAULT;
12369 ret = get_errno(mq_unlink(p));
12370 unlock_user (p, arg1, 0);
12371 return ret;
12373 #ifdef TARGET_NR_mq_timedsend
12374 case TARGET_NR_mq_timedsend:
12376 struct timespec ts;
12378 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12379 if (arg5 != 0) {
12380 if (target_to_host_timespec(&ts, arg5)) {
12381 return -TARGET_EFAULT;
12383 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12384 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12385 return -TARGET_EFAULT;
12387 } else {
12388 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12390 unlock_user (p, arg2, arg3);
12392 return ret;
12393 #endif
12394 #ifdef TARGET_NR_mq_timedsend_time64
12395 case TARGET_NR_mq_timedsend_time64:
12397 struct timespec ts;
12399 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12400 if (arg5 != 0) {
12401 if (target_to_host_timespec64(&ts, arg5)) {
12402 return -TARGET_EFAULT;
12404 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12405 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12406 return -TARGET_EFAULT;
12408 } else {
12409 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12411 unlock_user(p, arg2, arg3);
12413 return ret;
12414 #endif
12416 #ifdef TARGET_NR_mq_timedreceive
12417 case TARGET_NR_mq_timedreceive:
12419 struct timespec ts;
12420 unsigned int prio;
12422 p = lock_user (VERIFY_READ, arg2, arg3, 1);
12423 if (arg5 != 0) {
12424 if (target_to_host_timespec(&ts, arg5)) {
12425 return -TARGET_EFAULT;
12427 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12428 &prio, &ts));
12429 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12430 return -TARGET_EFAULT;
12432 } else {
12433 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12434 &prio, NULL));
12436 unlock_user (p, arg2, arg3);
12437 if (arg4 != 0)
12438 put_user_u32(prio, arg4);
12440 return ret;
12441 #endif
12442 #ifdef TARGET_NR_mq_timedreceive_time64
12443 case TARGET_NR_mq_timedreceive_time64:
12445 struct timespec ts;
12446 unsigned int prio;
12448 p = lock_user(VERIFY_READ, arg2, arg3, 1);
12449 if (arg5 != 0) {
12450 if (target_to_host_timespec64(&ts, arg5)) {
12451 return -TARGET_EFAULT;
12453 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12454 &prio, &ts));
12455 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12456 return -TARGET_EFAULT;
12458 } else {
12459 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12460 &prio, NULL));
12462 unlock_user(p, arg2, arg3);
12463 if (arg4 != 0) {
12464 put_user_u32(prio, arg4);
12467 return ret;
12468 #endif
12470 /* Not implemented for now... */
12471 /* case TARGET_NR_mq_notify: */
12472 /* break; */
12474 case TARGET_NR_mq_getsetattr:
12476 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12477 ret = 0;
12478 if (arg2 != 0) {
12479 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12480 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12481 &posix_mq_attr_out));
12482 } else if (arg3 != 0) {
12483 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12485 if (ret == 0 && arg3 != 0) {
12486 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12489 return ret;
12490 #endif
12492 #ifdef CONFIG_SPLICE
12493 #ifdef TARGET_NR_tee
12494 case TARGET_NR_tee:
12496 ret = get_errno(tee(arg1,arg2,arg3,arg4));
12498 return ret;
12499 #endif
12500 #ifdef TARGET_NR_splice
12501 case TARGET_NR_splice:
12503 loff_t loff_in, loff_out;
12504 loff_t *ploff_in = NULL, *ploff_out = NULL;
12505 if (arg2) {
12506 if (get_user_u64(loff_in, arg2)) {
12507 return -TARGET_EFAULT;
12509 ploff_in = &loff_in;
12511 if (arg4) {
12512 if (get_user_u64(loff_out, arg4)) {
12513 return -TARGET_EFAULT;
12515 ploff_out = &loff_out;
12517 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12518 if (arg2) {
12519 if (put_user_u64(loff_in, arg2)) {
12520 return -TARGET_EFAULT;
12523 if (arg4) {
12524 if (put_user_u64(loff_out, arg4)) {
12525 return -TARGET_EFAULT;
12529 return ret;
12530 #endif
12531 #ifdef TARGET_NR_vmsplice
12532 case TARGET_NR_vmsplice:
12534 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12535 if (vec != NULL) {
12536 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12537 unlock_iovec(vec, arg2, arg3, 0);
12538 } else {
12539 ret = -host_to_target_errno(errno);
12542 return ret;
12543 #endif
12544 #endif /* CONFIG_SPLICE */
12545 #ifdef CONFIG_EVENTFD
12546 #if defined(TARGET_NR_eventfd)
12547 case TARGET_NR_eventfd:
12548 ret = get_errno(eventfd(arg1, 0));
12549 if (ret >= 0) {
12550 fd_trans_register(ret, &target_eventfd_trans);
12552 return ret;
12553 #endif
12554 #if defined(TARGET_NR_eventfd2)
12555 case TARGET_NR_eventfd2:
12557 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12558 if (arg2 & TARGET_O_NONBLOCK) {
12559 host_flags |= O_NONBLOCK;
12561 if (arg2 & TARGET_O_CLOEXEC) {
12562 host_flags |= O_CLOEXEC;
12564 ret = get_errno(eventfd(arg1, host_flags));
12565 if (ret >= 0) {
12566 fd_trans_register(ret, &target_eventfd_trans);
12568 return ret;
12570 #endif
12571 #endif /* CONFIG_EVENTFD */
12572 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12573 case TARGET_NR_fallocate:
12574 #if TARGET_ABI_BITS == 32
12575 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12576 target_offset64(arg5, arg6)));
12577 #else
12578 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12579 #endif
12580 return ret;
12581 #endif
12582 #if defined(CONFIG_SYNC_FILE_RANGE)
12583 #if defined(TARGET_NR_sync_file_range)
12584 case TARGET_NR_sync_file_range:
12585 #if TARGET_ABI_BITS == 32
12586 #if defined(TARGET_MIPS)
12587 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12588 target_offset64(arg5, arg6), arg7));
12589 #else
12590 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12591 target_offset64(arg4, arg5), arg6));
12592 #endif /* !TARGET_MIPS */
12593 #else
12594 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12595 #endif
12596 return ret;
12597 #endif
12598 #if defined(TARGET_NR_sync_file_range2) || \
12599 defined(TARGET_NR_arm_sync_file_range)
12600 #if defined(TARGET_NR_sync_file_range2)
12601 case TARGET_NR_sync_file_range2:
12602 #endif
12603 #if defined(TARGET_NR_arm_sync_file_range)
12604 case TARGET_NR_arm_sync_file_range:
12605 #endif
12606 /* This is like sync_file_range but the arguments are reordered */
12607 #if TARGET_ABI_BITS == 32
12608 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12609 target_offset64(arg5, arg6), arg2));
12610 #else
12611 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12612 #endif
12613 return ret;
12614 #endif
12615 #endif
12616 #if defined(TARGET_NR_signalfd4)
12617 case TARGET_NR_signalfd4:
12618 return do_signalfd4(arg1, arg2, arg4);
12619 #endif
12620 #if defined(TARGET_NR_signalfd)
12621 case TARGET_NR_signalfd:
12622 return do_signalfd4(arg1, arg2, 0);
12623 #endif
12624 #if defined(CONFIG_EPOLL)
12625 #if defined(TARGET_NR_epoll_create)
12626 case TARGET_NR_epoll_create:
12627 return get_errno(epoll_create(arg1));
12628 #endif
12629 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12630 case TARGET_NR_epoll_create1:
12631 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12632 #endif
12633 #if defined(TARGET_NR_epoll_ctl)
12634 case TARGET_NR_epoll_ctl:
12636 struct epoll_event ep;
12637 struct epoll_event *epp = 0;
12638 if (arg4) {
12639 if (arg2 != EPOLL_CTL_DEL) {
12640 struct target_epoll_event *target_ep;
12641 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12642 return -TARGET_EFAULT;
12644 ep.events = tswap32(target_ep->events);
12646 * The epoll_data_t union is just opaque data to the kernel,
12647 * so we transfer all 64 bits across and need not worry what
12648 * actual data type it is.
12650 ep.data.u64 = tswap64(target_ep->data.u64);
12651 unlock_user_struct(target_ep, arg4, 0);
12654 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12655 * non-null pointer, even though this argument is ignored.
12658 epp = &ep;
12660 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12662 #endif
12664 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12665 #if defined(TARGET_NR_epoll_wait)
12666 case TARGET_NR_epoll_wait:
12667 #endif
12668 #if defined(TARGET_NR_epoll_pwait)
12669 case TARGET_NR_epoll_pwait:
12670 #endif
12672 struct target_epoll_event *target_ep;
12673 struct epoll_event *ep;
12674 int epfd = arg1;
12675 int maxevents = arg3;
12676 int timeout = arg4;
12678 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12679 return -TARGET_EINVAL;
12682 target_ep = lock_user(VERIFY_WRITE, arg2,
12683 maxevents * sizeof(struct target_epoll_event), 1);
12684 if (!target_ep) {
12685 return -TARGET_EFAULT;
12688 ep = g_try_new(struct epoll_event, maxevents);
12689 if (!ep) {
12690 unlock_user(target_ep, arg2, 0);
12691 return -TARGET_ENOMEM;
12694 switch (num) {
12695 #if defined(TARGET_NR_epoll_pwait)
12696 case TARGET_NR_epoll_pwait:
12698 sigset_t *set = NULL;
12700 if (arg5) {
12701 ret = process_sigsuspend_mask(&set, arg5, arg6);
12702 if (ret != 0) {
12703 break;
12707 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12708 set, SIGSET_T_SIZE));
12710 if (set) {
12711 finish_sigsuspend_mask(ret);
12713 break;
12715 #endif
12716 #if defined(TARGET_NR_epoll_wait)
12717 case TARGET_NR_epoll_wait:
12718 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12719 NULL, 0));
12720 break;
12721 #endif
12722 default:
12723 ret = -TARGET_ENOSYS;
12725 if (!is_error(ret)) {
12726 int i;
12727 for (i = 0; i < ret; i++) {
12728 target_ep[i].events = tswap32(ep[i].events);
12729 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12731 unlock_user(target_ep, arg2,
12732 ret * sizeof(struct target_epoll_event));
12733 } else {
12734 unlock_user(target_ep, arg2, 0);
12736 g_free(ep);
12737 return ret;
12739 #endif
12740 #endif
12741 #ifdef TARGET_NR_prlimit64
12742 case TARGET_NR_prlimit64:
12744 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12745 struct target_rlimit64 *target_rnew, *target_rold;
12746 struct host_rlimit64 rnew, rold, *rnewp = 0;
12747 int resource = target_to_host_resource(arg2);
12749 if (arg3 && (resource != RLIMIT_AS &&
12750 resource != RLIMIT_DATA &&
12751 resource != RLIMIT_STACK)) {
12752 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12753 return -TARGET_EFAULT;
12755 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12756 rnew.rlim_max = tswap64(target_rnew->rlim_max);
12757 unlock_user_struct(target_rnew, arg3, 0);
12758 rnewp = &rnew;
12761 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12762 if (!is_error(ret) && arg4) {
12763 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12764 return -TARGET_EFAULT;
12766 target_rold->rlim_cur = tswap64(rold.rlim_cur);
12767 target_rold->rlim_max = tswap64(rold.rlim_max);
12768 unlock_user_struct(target_rold, arg4, 1);
12770 return ret;
12772 #endif
12773 #ifdef TARGET_NR_gethostname
12774 case TARGET_NR_gethostname:
12776 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12777 if (name) {
12778 ret = get_errno(gethostname(name, arg2));
12779 unlock_user(name, arg1, arg2);
12780 } else {
12781 ret = -TARGET_EFAULT;
12783 return ret;
12785 #endif
12786 #ifdef TARGET_NR_atomic_cmpxchg_32
12787 case TARGET_NR_atomic_cmpxchg_32:
12789 /* should use start_exclusive from main.c */
12790 abi_ulong mem_value;
12791 if (get_user_u32(mem_value, arg6)) {
12792 target_siginfo_t info;
12793 info.si_signo = SIGSEGV;
12794 info.si_errno = 0;
12795 info.si_code = TARGET_SEGV_MAPERR;
12796 info._sifields._sigfault._addr = arg6;
12797 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12798 ret = 0xdeadbeef;
12801 if (mem_value == arg2)
12802 put_user_u32(arg1, arg6);
12803 return mem_value;
12805 #endif
12806 #ifdef TARGET_NR_atomic_barrier
12807 case TARGET_NR_atomic_barrier:
12808 /* Like the kernel implementation and the
12809 qemu arm barrier, no-op this? */
12810 return 0;
12811 #endif
12813 #ifdef TARGET_NR_timer_create
12814 case TARGET_NR_timer_create:
12816 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12818 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12820 int clkid = arg1;
12821 int timer_index = next_free_host_timer();
12823 if (timer_index < 0) {
12824 ret = -TARGET_EAGAIN;
12825 } else {
12826 timer_t *phtimer = g_posix_timers + timer_index;
12828 if (arg2) {
12829 phost_sevp = &host_sevp;
12830 ret = target_to_host_sigevent(phost_sevp, arg2);
12831 if (ret != 0) {
12832 return ret;
12836 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12837 if (ret) {
12838 phtimer = NULL;
12839 } else {
12840 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12841 return -TARGET_EFAULT;
12845 return ret;
12847 #endif
12849 #ifdef TARGET_NR_timer_settime
12850 case TARGET_NR_timer_settime:
12852 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12853 * struct itimerspec * old_value */
12854 target_timer_t timerid = get_timer_id(arg1);
12856 if (timerid < 0) {
12857 ret = timerid;
12858 } else if (arg3 == 0) {
12859 ret = -TARGET_EINVAL;
12860 } else {
12861 timer_t htimer = g_posix_timers[timerid];
12862 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12864 if (target_to_host_itimerspec(&hspec_new, arg3)) {
12865 return -TARGET_EFAULT;
12867 ret = get_errno(
12868 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12869 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12870 return -TARGET_EFAULT;
12873 return ret;
12875 #endif
12877 #ifdef TARGET_NR_timer_settime64
12878 case TARGET_NR_timer_settime64:
12880 target_timer_t timerid = get_timer_id(arg1);
12882 if (timerid < 0) {
12883 ret = timerid;
12884 } else if (arg3 == 0) {
12885 ret = -TARGET_EINVAL;
12886 } else {
12887 timer_t htimer = g_posix_timers[timerid];
12888 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12890 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12891 return -TARGET_EFAULT;
12893 ret = get_errno(
12894 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12895 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12896 return -TARGET_EFAULT;
12899 return ret;
12901 #endif
12903 #ifdef TARGET_NR_timer_gettime
12904 case TARGET_NR_timer_gettime:
12906 /* args: timer_t timerid, struct itimerspec *curr_value */
12907 target_timer_t timerid = get_timer_id(arg1);
12909 if (timerid < 0) {
12910 ret = timerid;
12911 } else if (!arg2) {
12912 ret = -TARGET_EFAULT;
12913 } else {
12914 timer_t htimer = g_posix_timers[timerid];
12915 struct itimerspec hspec;
12916 ret = get_errno(timer_gettime(htimer, &hspec));
12918 if (host_to_target_itimerspec(arg2, &hspec)) {
12919 ret = -TARGET_EFAULT;
12922 return ret;
12924 #endif
12926 #ifdef TARGET_NR_timer_gettime64
12927 case TARGET_NR_timer_gettime64:
12929 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12930 target_timer_t timerid = get_timer_id(arg1);
12932 if (timerid < 0) {
12933 ret = timerid;
12934 } else if (!arg2) {
12935 ret = -TARGET_EFAULT;
12936 } else {
12937 timer_t htimer = g_posix_timers[timerid];
12938 struct itimerspec hspec;
12939 ret = get_errno(timer_gettime(htimer, &hspec));
12941 if (host_to_target_itimerspec64(arg2, &hspec)) {
12942 ret = -TARGET_EFAULT;
12945 return ret;
12947 #endif
12949 #ifdef TARGET_NR_timer_getoverrun
12950 case TARGET_NR_timer_getoverrun:
12952 /* args: timer_t timerid */
12953 target_timer_t timerid = get_timer_id(arg1);
12955 if (timerid < 0) {
12956 ret = timerid;
12957 } else {
12958 timer_t htimer = g_posix_timers[timerid];
12959 ret = get_errno(timer_getoverrun(htimer));
12961 return ret;
12963 #endif
12965 #ifdef TARGET_NR_timer_delete
12966 case TARGET_NR_timer_delete:
12968 /* args: timer_t timerid */
12969 target_timer_t timerid = get_timer_id(arg1);
12971 if (timerid < 0) {
12972 ret = timerid;
12973 } else {
12974 timer_t htimer = g_posix_timers[timerid];
12975 ret = get_errno(timer_delete(htimer));
12976 g_posix_timers[timerid] = 0;
12978 return ret;
12980 #endif
12982 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12983 case TARGET_NR_timerfd_create:
12984 return get_errno(timerfd_create(arg1,
12985 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12986 #endif
12988 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12989 case TARGET_NR_timerfd_gettime:
12991 struct itimerspec its_curr;
12993 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12995 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12996 return -TARGET_EFAULT;
12999 return ret;
13000 #endif
13002 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13003 case TARGET_NR_timerfd_gettime64:
13005 struct itimerspec its_curr;
13007 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13009 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13010 return -TARGET_EFAULT;
13013 return ret;
13014 #endif
13016 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13017 case TARGET_NR_timerfd_settime:
13019 struct itimerspec its_new, its_old, *p_new;
13021 if (arg3) {
13022 if (target_to_host_itimerspec(&its_new, arg3)) {
13023 return -TARGET_EFAULT;
13025 p_new = &its_new;
13026 } else {
13027 p_new = NULL;
13030 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13032 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13033 return -TARGET_EFAULT;
13036 return ret;
13037 #endif
13039 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13040 case TARGET_NR_timerfd_settime64:
13042 struct itimerspec its_new, its_old, *p_new;
13044 if (arg3) {
13045 if (target_to_host_itimerspec64(&its_new, arg3)) {
13046 return -TARGET_EFAULT;
13048 p_new = &its_new;
13049 } else {
13050 p_new = NULL;
13053 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13055 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13056 return -TARGET_EFAULT;
13059 return ret;
13060 #endif
13062 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13063 case TARGET_NR_ioprio_get:
13064 return get_errno(ioprio_get(arg1, arg2));
13065 #endif
13067 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13068 case TARGET_NR_ioprio_set:
13069 return get_errno(ioprio_set(arg1, arg2, arg3));
13070 #endif
13072 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13073 case TARGET_NR_setns:
13074 return get_errno(setns(arg1, arg2));
13075 #endif
13076 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13077 case TARGET_NR_unshare:
13078 return get_errno(unshare(arg1));
13079 #endif
13080 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13081 case TARGET_NR_kcmp:
13082 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13083 #endif
13084 #ifdef TARGET_NR_swapcontext
13085 case TARGET_NR_swapcontext:
13086 /* PowerPC specific. */
13087 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13088 #endif
13089 #ifdef TARGET_NR_memfd_create
13090 case TARGET_NR_memfd_create:
13091 p = lock_user_string(arg1);
13092 if (!p) {
13093 return -TARGET_EFAULT;
13095 ret = get_errno(memfd_create(p, arg2));
13096 fd_trans_unregister(ret);
13097 unlock_user(p, arg1, 0);
13098 return ret;
13099 #endif
13100 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13101 case TARGET_NR_membarrier:
13102 return get_errno(membarrier(arg1, arg2));
13103 #endif
13105 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13106 case TARGET_NR_copy_file_range:
13108 loff_t inoff, outoff;
13109 loff_t *pinoff = NULL, *poutoff = NULL;
13111 if (arg2) {
13112 if (get_user_u64(inoff, arg2)) {
13113 return -TARGET_EFAULT;
13115 pinoff = &inoff;
13117 if (arg4) {
13118 if (get_user_u64(outoff, arg4)) {
13119 return -TARGET_EFAULT;
13121 poutoff = &outoff;
13123 /* Do not sign-extend the count parameter. */
13124 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13125 (abi_ulong)arg5, arg6));
13126 if (!is_error(ret) && ret > 0) {
13127 if (arg2) {
13128 if (put_user_u64(inoff, arg2)) {
13129 return -TARGET_EFAULT;
13132 if (arg4) {
13133 if (put_user_u64(outoff, arg4)) {
13134 return -TARGET_EFAULT;
13139 return ret;
13140 #endif
13142 #if defined(TARGET_NR_pivot_root)
13143 case TARGET_NR_pivot_root:
13145 void *p2;
13146 p = lock_user_string(arg1); /* new_root */
13147 p2 = lock_user_string(arg2); /* put_old */
13148 if (!p || !p2) {
13149 ret = -TARGET_EFAULT;
13150 } else {
13151 ret = get_errno(pivot_root(p, p2));
13153 unlock_user(p2, arg2, 0);
13154 unlock_user(p, arg1, 0);
13156 return ret;
13157 #endif
13159 default:
13160 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13161 return -TARGET_ENOSYS;
13163 return ret;
13166 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13167 abi_long arg2, abi_long arg3, abi_long arg4,
13168 abi_long arg5, abi_long arg6, abi_long arg7,
13169 abi_long arg8)
13171 CPUState *cpu = env_cpu(cpu_env);
13172 abi_long ret;
13174 #ifdef DEBUG_ERESTARTSYS
13175 /* Debug-only code for exercising the syscall-restart code paths
13176 * in the per-architecture cpu main loops: restart every syscall
13177 * the guest makes once before letting it through.
13180 static bool flag;
13181 flag = !flag;
13182 if (flag) {
13183 return -QEMU_ERESTARTSYS;
13186 #endif
13188 record_syscall_start(cpu, num, arg1,
13189 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13191 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13192 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13195 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13196 arg5, arg6, arg7, arg8);
13198 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13199 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13200 arg3, arg4, arg5, arg6);
13203 record_syscall_return(cpu, num, ret);
13204 return ret;