target/ppc: Fix VRMA page size for ISA v3.0
[qemu/kevin.git] / linux-user / syscall.c
blob95727a816add8e525c65f1a7f6bd1adc0932205f
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
147 #ifndef CLONE_IO
148 #define CLONE_IO 0x80000000 /* Clone io context */
149 #endif
151 /* We can't directly call the host clone syscall, because this will
152 * badly confuse libc (breaking mutexes, for example). So we must
153 * divide clone flags into:
154 * * flag combinations that look like pthread_create()
155 * * flag combinations that look like fork()
156 * * flags we can implement within QEMU itself
157 * * flags we can't support and will return an error for
159 /* For thread creation, all these flags must be present; for
160 * fork, none must be present.
162 #define CLONE_THREAD_FLAGS \
163 (CLONE_VM | CLONE_FS | CLONE_FILES | \
164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
166 /* These flags are ignored:
167 * CLONE_DETACHED is now ignored by the kernel;
168 * CLONE_IO is just an optimisation hint to the I/O scheduler
170 #define CLONE_IGNORED_FLAGS \
171 (CLONE_DETACHED | CLONE_IO)
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS \
179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS \
184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
187 #define CLONE_INVALID_FORK_FLAGS \
188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
190 #define CLONE_INVALID_THREAD_FLAGS \
191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
192 CLONE_IGNORED_FLAGS))
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195 * have almost all been allocated. We cannot support any of
196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198 * The checks against the invalid thread masks above will catch these.
199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203 * once. This exercises the codepaths for restart.
205 //#define DEBUG_ERESTARTSYS
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
221 #define _syscall0(type,name) \
222 static type name (void) \
224 return syscall(__NR_##name); \
227 #define _syscall1(type,name,type1,arg1) \
228 static type name (type1 arg1) \
230 return syscall(__NR_##name, arg1); \
233 #define _syscall2(type,name,type1,arg1,type2,arg2) \
234 static type name (type1 arg1,type2 arg2) \
236 return syscall(__NR_##name, arg1, arg2); \
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
240 static type name (type1 arg1,type2 arg2,type3 arg3) \
242 return syscall(__NR_##name, arg1, arg2, arg3); \
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
252 type5,arg5) \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
260 type5,arg5,type6,arg6) \
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
262 type6 arg6) \
264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
301 /* For the 64-bit guest on 32-bit host case we must emulate
302 * getdents using getdents64, because otherwise the host
303 * might hand us back more dirent records than we can fit
304 * into the guest buffer after structure format conversion.
305 * Otherwise we emulate getdents with getdents if the host has it.
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek, unsigned int, fd, unsigned long, hi, unsigned long, lo,
321 loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325 siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339 const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343 const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350 unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357 unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360 unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363 uint32_t size;
364 uint32_t sched_policy;
365 uint64_t sched_flags;
366 int32_t sched_nice;
367 uint32_t sched_priority;
368 uint64_t sched_runtime;
369 uint64_t sched_deadline;
370 uint64_t sched_period;
371 uint32_t sched_util_min;
372 uint32_t sched_util_max;
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376 unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379 unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384 const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387 struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390 const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394 void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396 struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398 struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411 unsigned long, idx1, unsigned long, idx2)
412 #endif
415 * It is assumed that struct statx is architecture independent.
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419 unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
427 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
428 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
429 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
430 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
431 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
432 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
433 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
434 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
435 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
436 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
437 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
439 #if defined(O_DIRECT)
440 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
441 #endif
442 #if defined(O_NOATIME)
443 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
444 #endif
445 #if defined(O_CLOEXEC)
446 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
447 #endif
448 #if defined(O_PATH)
449 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
450 #endif
451 #if defined(O_TMPFILE)
452 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
453 #endif
454 /* Don't terminate the list prematurely on 64-bit host+guest. */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458 { 0, 0, 0, 0 }
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467 const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470 const struct timespec times[2], int flags)
472 errno = ENOSYS;
473 return -1;
475 #endif
476 #endif /* TARGET_NR_utimensat */
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482 const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485 int newfd, const char *new, int flags)
487 if (flags == 0) {
488 return renameat(oldfd, old, newfd, new);
490 errno = ENOSYS;
491 return -1;
493 #endif
494 #endif /* TARGET_NR_renameat2 */
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513 uint64_t rlim_cur;
514 uint64_t rlim_max;
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517 const struct host_rlimit64 *, new_limit,
518 struct host_rlimit64 *, old_limit)
519 #endif
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
528 static inline int next_free_host_timer(void)
530 int k;
531 for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532 if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533 return k;
536 return -1;
539 static inline void free_host_timer_slot(int id)
541 qatomic_store_release(g_posix_timer_allocated + id, 0);
543 #endif
545 static inline int host_to_target_errno(int host_errno)
547 switch (host_errno) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551 default:
552 return host_errno;
556 static inline int target_to_host_errno(int target_errno)
558 switch (target_errno) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562 default:
563 return target_errno;
567 abi_long get_errno(abi_long ret)
569 if (ret == -1)
570 return -host_to_target_errno(errno);
571 else
572 return ret;
575 const char *target_strerror(int err)
577 if (err == QEMU_ERESTARTSYS) {
578 return "To be restarted";
580 if (err == QEMU_ESIGRETURN) {
581 return "Successful exit from sigreturn";
584 return strerror(target_to_host_errno(err));
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
589 int i;
590 uint8_t b;
591 if (usize <= ksize) {
592 return 1;
594 for (i = ksize; i < usize; i++) {
595 if (get_user_u8(b, addr + i)) {
596 return -TARGET_EFAULT;
598 if (b != 0) {
599 return 0;
602 return 1;
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
608 return safe_syscall(__NR_##name); \
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
614 return safe_syscall(__NR_##name, arg1); \
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
620 return safe_syscall(__NR_##name, arg1, arg2); \
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630 type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637 type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639 type5 arg5) \
641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645 type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647 type5 arg5, type6 arg6) \
649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655 int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658 struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661 int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664 char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672 struct timespec *, tsp, const sigset_t *, sigmask,
673 size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676 int, maxevents, int, timeout, const sigset_t *, sigmask,
677 size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680 const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684 const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693 unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695 unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697 socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707 const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710 int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713 struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716 defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718 const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723 void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726 void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731 int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735 long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739 unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742 defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744 size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747 defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749 size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753 int, outfd, loff_t *, poutoff, size_t, length,
754 unsigned int, flags)
755 #endif
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758 * "third argument might be integer or pointer or not present" behaviour of
759 * the libc function.
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764 * use the flock64 struct rather than unsuffixed flock
765 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
767 #ifdef __NR_fcntl64
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #else
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
771 #endif
773 static inline int host_to_target_sock_type(int host_type)
775 int target_type;
777 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
778 case SOCK_DGRAM:
779 target_type = TARGET_SOCK_DGRAM;
780 break;
781 case SOCK_STREAM:
782 target_type = TARGET_SOCK_STREAM;
783 break;
784 default:
785 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
786 break;
789 #if defined(SOCK_CLOEXEC)
790 if (host_type & SOCK_CLOEXEC) {
791 target_type |= TARGET_SOCK_CLOEXEC;
793 #endif
795 #if defined(SOCK_NONBLOCK)
796 if (host_type & SOCK_NONBLOCK) {
797 target_type |= TARGET_SOCK_NONBLOCK;
799 #endif
801 return target_type;
804 static abi_ulong target_brk, initial_target_brk;
805 static abi_ulong brk_page;
807 void target_set_brk(abi_ulong new_brk)
809 target_brk = TARGET_PAGE_ALIGN(new_brk);
810 initial_target_brk = target_brk;
811 brk_page = HOST_PAGE_ALIGN(target_brk);
814 /* do_brk() must return target values and target errnos. */
815 abi_long do_brk(abi_ulong brk_val)
817 abi_long mapped_addr;
818 abi_ulong new_alloc_size;
819 abi_ulong new_brk, new_host_brk_page;
821 /* brk pointers are always untagged */
823 /* return old brk value if brk_val unchanged or zero */
824 if (!brk_val || brk_val == target_brk) {
825 return target_brk;
828 /* do not allow to shrink below initial brk value */
829 if (brk_val < initial_target_brk) {
830 brk_val = initial_target_brk;
833 new_brk = TARGET_PAGE_ALIGN(brk_val);
834 new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
836 /* brk_val and old target_brk might be on the same page */
837 if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
838 /* empty remaining bytes in (possibly larger) host page */
839 memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
840 target_brk = brk_val;
841 return target_brk;
844 /* Release heap if necesary */
845 if (new_brk < target_brk) {
846 /* empty remaining bytes in (possibly larger) host page */
847 memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
849 /* free unused host pages and set new brk_page */
850 target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
851 brk_page = new_host_brk_page;
853 target_brk = brk_val;
854 return target_brk;
857 /* We need to allocate more memory after the brk... Note that
858 * we don't use MAP_FIXED because that will map over the top of
859 * any existing mapping (like the one with the host libc or qemu
860 * itself); instead we treat "mapped but at wrong address" as
861 * a failure and unmap again.
863 if (new_host_brk_page > brk_page) {
864 new_alloc_size = new_host_brk_page - brk_page;
865 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
866 PROT_READ|PROT_WRITE,
867 MAP_ANON|MAP_PRIVATE, 0, 0));
868 } else {
869 new_alloc_size = 0;
870 mapped_addr = brk_page;
873 if (mapped_addr == brk_page) {
874 /* Heap contents are initialized to zero, as for anonymous
875 * mapped pages. Technically the new pages are already
876 * initialized to zero since they *are* anonymous mapped
877 * pages, however we have to take care with the contents that
878 * come from the remaining part of the previous page: it may
879 * contains garbage data due to a previous heap usage (grown
880 * then shrunken). */
881 memset(g2h_untagged(brk_page), 0, HOST_PAGE_ALIGN(brk_page) - brk_page);
883 target_brk = brk_val;
884 brk_page = new_host_brk_page;
885 return target_brk;
886 } else if (mapped_addr != -1) {
887 /* Mapped but at wrong address, meaning there wasn't actually
888 * enough space for this brk.
890 target_munmap(mapped_addr, new_alloc_size);
891 mapped_addr = -1;
894 #if defined(TARGET_ALPHA)
895 /* We (partially) emulate OSF/1 on Alpha, which requires we
896 return a proper errno, not an unchanged brk value. */
897 return -TARGET_ENOMEM;
898 #endif
899 /* For everything else, return the previous break. */
900 return target_brk;
903 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
904 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
905 static inline abi_long copy_from_user_fdset(fd_set *fds,
906 abi_ulong target_fds_addr,
907 int n)
909 int i, nw, j, k;
910 abi_ulong b, *target_fds;
912 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
913 if (!(target_fds = lock_user(VERIFY_READ,
914 target_fds_addr,
915 sizeof(abi_ulong) * nw,
916 1)))
917 return -TARGET_EFAULT;
919 FD_ZERO(fds);
920 k = 0;
921 for (i = 0; i < nw; i++) {
922 /* grab the abi_ulong */
923 __get_user(b, &target_fds[i]);
924 for (j = 0; j < TARGET_ABI_BITS; j++) {
925 /* check the bit inside the abi_ulong */
926 if ((b >> j) & 1)
927 FD_SET(k, fds);
928 k++;
932 unlock_user(target_fds, target_fds_addr, 0);
934 return 0;
937 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
938 abi_ulong target_fds_addr,
939 int n)
941 if (target_fds_addr) {
942 if (copy_from_user_fdset(fds, target_fds_addr, n))
943 return -TARGET_EFAULT;
944 *fds_ptr = fds;
945 } else {
946 *fds_ptr = NULL;
948 return 0;
951 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
952 const fd_set *fds,
953 int n)
955 int i, nw, j, k;
956 abi_long v;
957 abi_ulong *target_fds;
959 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
960 if (!(target_fds = lock_user(VERIFY_WRITE,
961 target_fds_addr,
962 sizeof(abi_ulong) * nw,
963 0)))
964 return -TARGET_EFAULT;
966 k = 0;
967 for (i = 0; i < nw; i++) {
968 v = 0;
969 for (j = 0; j < TARGET_ABI_BITS; j++) {
970 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
971 k++;
973 __put_user(v, &target_fds[i]);
976 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
978 return 0;
980 #endif
982 #if defined(__alpha__)
983 #define HOST_HZ 1024
984 #else
985 #define HOST_HZ 100
986 #endif
988 static inline abi_long host_to_target_clock_t(long ticks)
990 #if HOST_HZ == TARGET_HZ
991 return ticks;
992 #else
993 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
994 #endif
997 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
998 const struct rusage *rusage)
1000 struct target_rusage *target_rusage;
1002 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1003 return -TARGET_EFAULT;
1004 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1005 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1006 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1007 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1008 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1009 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1010 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1011 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1012 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1013 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1014 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1015 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1016 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1017 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1018 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1019 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1020 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1021 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1022 unlock_user_struct(target_rusage, target_addr, 1);
1024 return 0;
1027 #ifdef TARGET_NR_setrlimit
1028 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1030 abi_ulong target_rlim_swap;
1031 rlim_t result;
1033 target_rlim_swap = tswapal(target_rlim);
1034 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1035 return RLIM_INFINITY;
1037 result = target_rlim_swap;
1038 if (target_rlim_swap != (rlim_t)result)
1039 return RLIM_INFINITY;
1041 return result;
1043 #endif
1045 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1046 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1048 abi_ulong target_rlim_swap;
1049 abi_ulong result;
1051 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1052 target_rlim_swap = TARGET_RLIM_INFINITY;
1053 else
1054 target_rlim_swap = rlim;
1055 result = tswapal(target_rlim_swap);
1057 return result;
1059 #endif
1061 static inline int target_to_host_resource(int code)
1063 switch (code) {
1064 case TARGET_RLIMIT_AS:
1065 return RLIMIT_AS;
1066 case TARGET_RLIMIT_CORE:
1067 return RLIMIT_CORE;
1068 case TARGET_RLIMIT_CPU:
1069 return RLIMIT_CPU;
1070 case TARGET_RLIMIT_DATA:
1071 return RLIMIT_DATA;
1072 case TARGET_RLIMIT_FSIZE:
1073 return RLIMIT_FSIZE;
1074 case TARGET_RLIMIT_LOCKS:
1075 return RLIMIT_LOCKS;
1076 case TARGET_RLIMIT_MEMLOCK:
1077 return RLIMIT_MEMLOCK;
1078 case TARGET_RLIMIT_MSGQUEUE:
1079 return RLIMIT_MSGQUEUE;
1080 case TARGET_RLIMIT_NICE:
1081 return RLIMIT_NICE;
1082 case TARGET_RLIMIT_NOFILE:
1083 return RLIMIT_NOFILE;
1084 case TARGET_RLIMIT_NPROC:
1085 return RLIMIT_NPROC;
1086 case TARGET_RLIMIT_RSS:
1087 return RLIMIT_RSS;
1088 case TARGET_RLIMIT_RTPRIO:
1089 return RLIMIT_RTPRIO;
1090 #ifdef RLIMIT_RTTIME
1091 case TARGET_RLIMIT_RTTIME:
1092 return RLIMIT_RTTIME;
1093 #endif
1094 case TARGET_RLIMIT_SIGPENDING:
1095 return RLIMIT_SIGPENDING;
1096 case TARGET_RLIMIT_STACK:
1097 return RLIMIT_STACK;
1098 default:
1099 return code;
1103 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1104 abi_ulong target_tv_addr)
1106 struct target_timeval *target_tv;
1108 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1109 return -TARGET_EFAULT;
1112 __get_user(tv->tv_sec, &target_tv->tv_sec);
1113 __get_user(tv->tv_usec, &target_tv->tv_usec);
1115 unlock_user_struct(target_tv, target_tv_addr, 0);
1117 return 0;
1120 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1121 const struct timeval *tv)
1123 struct target_timeval *target_tv;
1125 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1126 return -TARGET_EFAULT;
1129 __put_user(tv->tv_sec, &target_tv->tv_sec);
1130 __put_user(tv->tv_usec, &target_tv->tv_usec);
1132 unlock_user_struct(target_tv, target_tv_addr, 1);
1134 return 0;
1137 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1138 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1139 abi_ulong target_tv_addr)
1141 struct target__kernel_sock_timeval *target_tv;
1143 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1144 return -TARGET_EFAULT;
1147 __get_user(tv->tv_sec, &target_tv->tv_sec);
1148 __get_user(tv->tv_usec, &target_tv->tv_usec);
1150 unlock_user_struct(target_tv, target_tv_addr, 0);
1152 return 0;
1154 #endif
1156 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1157 const struct timeval *tv)
1159 struct target__kernel_sock_timeval *target_tv;
1161 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1162 return -TARGET_EFAULT;
1165 __put_user(tv->tv_sec, &target_tv->tv_sec);
1166 __put_user(tv->tv_usec, &target_tv->tv_usec);
1168 unlock_user_struct(target_tv, target_tv_addr, 1);
1170 return 0;
1173 #if defined(TARGET_NR_futex) || \
1174 defined(TARGET_NR_rt_sigtimedwait) || \
1175 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1176 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1177 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1178 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1179 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1180 defined(TARGET_NR_timer_settime) || \
1181 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1182 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1183 abi_ulong target_addr)
1185 struct target_timespec *target_ts;
1187 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1188 return -TARGET_EFAULT;
1190 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1191 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1192 unlock_user_struct(target_ts, target_addr, 0);
1193 return 0;
1195 #endif
1197 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1198 defined(TARGET_NR_timer_settime64) || \
1199 defined(TARGET_NR_mq_timedsend_time64) || \
1200 defined(TARGET_NR_mq_timedreceive_time64) || \
1201 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1202 defined(TARGET_NR_clock_nanosleep_time64) || \
1203 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1204 defined(TARGET_NR_utimensat) || \
1205 defined(TARGET_NR_utimensat_time64) || \
1206 defined(TARGET_NR_semtimedop_time64) || \
1207 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1208 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1209 abi_ulong target_addr)
1211 struct target__kernel_timespec *target_ts;
1213 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1214 return -TARGET_EFAULT;
1216 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1217 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1218 /* in 32bit mode, this drops the padding */
1219 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1220 unlock_user_struct(target_ts, target_addr, 0);
1221 return 0;
1223 #endif
1225 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1226 struct timespec *host_ts)
1228 struct target_timespec *target_ts;
1230 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1231 return -TARGET_EFAULT;
1233 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1234 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1235 unlock_user_struct(target_ts, target_addr, 1);
1236 return 0;
1239 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1240 struct timespec *host_ts)
1242 struct target__kernel_timespec *target_ts;
1244 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1245 return -TARGET_EFAULT;
1247 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1248 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1249 unlock_user_struct(target_ts, target_addr, 1);
1250 return 0;
1253 #if defined(TARGET_NR_gettimeofday)
1254 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1255 struct timezone *tz)
1257 struct target_timezone *target_tz;
1259 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1260 return -TARGET_EFAULT;
1263 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1264 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1266 unlock_user_struct(target_tz, target_tz_addr, 1);
1268 return 0;
1270 #endif
1272 #if defined(TARGET_NR_settimeofday)
1273 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1274 abi_ulong target_tz_addr)
1276 struct target_timezone *target_tz;
1278 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1279 return -TARGET_EFAULT;
1282 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1283 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1285 unlock_user_struct(target_tz, target_tz_addr, 0);
1287 return 0;
1289 #endif
1291 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1292 #include <mqueue.h>
1294 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1295 abi_ulong target_mq_attr_addr)
1297 struct target_mq_attr *target_mq_attr;
1299 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1300 target_mq_attr_addr, 1))
1301 return -TARGET_EFAULT;
1303 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1304 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1305 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1306 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1308 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1310 return 0;
1313 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1314 const struct mq_attr *attr)
1316 struct target_mq_attr *target_mq_attr;
1318 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1319 target_mq_attr_addr, 0))
1320 return -TARGET_EFAULT;
1322 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1323 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1324 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1325 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1327 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1329 return 0;
1331 #endif
1333 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1334 /* do_select() must return target values and target errnos. */
1335 static abi_long do_select(int n,
1336 abi_ulong rfd_addr, abi_ulong wfd_addr,
1337 abi_ulong efd_addr, abi_ulong target_tv_addr)
1339 fd_set rfds, wfds, efds;
1340 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1341 struct timeval tv;
1342 struct timespec ts, *ts_ptr;
1343 abi_long ret;
1345 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1346 if (ret) {
1347 return ret;
1349 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1350 if (ret) {
1351 return ret;
1353 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1354 if (ret) {
1355 return ret;
1358 if (target_tv_addr) {
1359 if (copy_from_user_timeval(&tv, target_tv_addr))
1360 return -TARGET_EFAULT;
1361 ts.tv_sec = tv.tv_sec;
1362 ts.tv_nsec = tv.tv_usec * 1000;
1363 ts_ptr = &ts;
1364 } else {
1365 ts_ptr = NULL;
1368 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1369 ts_ptr, NULL));
1371 if (!is_error(ret)) {
1372 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1373 return -TARGET_EFAULT;
1374 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1375 return -TARGET_EFAULT;
1376 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1377 return -TARGET_EFAULT;
1379 if (target_tv_addr) {
1380 tv.tv_sec = ts.tv_sec;
1381 tv.tv_usec = ts.tv_nsec / 1000;
1382 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1383 return -TARGET_EFAULT;
1388 return ret;
1391 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1392 static abi_long do_old_select(abi_ulong arg1)
1394 struct target_sel_arg_struct *sel;
1395 abi_ulong inp, outp, exp, tvp;
1396 long nsel;
1398 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1399 return -TARGET_EFAULT;
1402 nsel = tswapal(sel->n);
1403 inp = tswapal(sel->inp);
1404 outp = tswapal(sel->outp);
1405 exp = tswapal(sel->exp);
1406 tvp = tswapal(sel->tvp);
1408 unlock_user_struct(sel, arg1, 0);
1410 return do_select(nsel, inp, outp, exp, tvp);
1412 #endif
1413 #endif
1415 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1416 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1417 abi_long arg4, abi_long arg5, abi_long arg6,
1418 bool time64)
1420 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1421 fd_set rfds, wfds, efds;
1422 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1423 struct timespec ts, *ts_ptr;
1424 abi_long ret;
1427 * The 6th arg is actually two args smashed together,
1428 * so we cannot use the C library.
1430 struct {
1431 sigset_t *set;
1432 size_t size;
1433 } sig, *sig_ptr;
1435 abi_ulong arg_sigset, arg_sigsize, *arg7;
1437 n = arg1;
1438 rfd_addr = arg2;
1439 wfd_addr = arg3;
1440 efd_addr = arg4;
1441 ts_addr = arg5;
1443 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1444 if (ret) {
1445 return ret;
1447 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1448 if (ret) {
1449 return ret;
1451 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1452 if (ret) {
1453 return ret;
1457 * This takes a timespec, and not a timeval, so we cannot
1458 * use the do_select() helper ...
1460 if (ts_addr) {
1461 if (time64) {
1462 if (target_to_host_timespec64(&ts, ts_addr)) {
1463 return -TARGET_EFAULT;
1465 } else {
1466 if (target_to_host_timespec(&ts, ts_addr)) {
1467 return -TARGET_EFAULT;
1470 ts_ptr = &ts;
1471 } else {
1472 ts_ptr = NULL;
1475 /* Extract the two packed args for the sigset */
1476 sig_ptr = NULL;
1477 if (arg6) {
1478 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1479 if (!arg7) {
1480 return -TARGET_EFAULT;
1482 arg_sigset = tswapal(arg7[0]);
1483 arg_sigsize = tswapal(arg7[1]);
1484 unlock_user(arg7, arg6, 0);
1486 if (arg_sigset) {
1487 ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1488 if (ret != 0) {
1489 return ret;
1491 sig_ptr = &sig;
1492 sig.size = SIGSET_T_SIZE;
1496 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1497 ts_ptr, sig_ptr));
1499 if (sig_ptr) {
1500 finish_sigsuspend_mask(ret);
1503 if (!is_error(ret)) {
1504 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1505 return -TARGET_EFAULT;
1507 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1508 return -TARGET_EFAULT;
1510 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1511 return -TARGET_EFAULT;
1513 if (time64) {
1514 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1515 return -TARGET_EFAULT;
1517 } else {
1518 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1519 return -TARGET_EFAULT;
1523 return ret;
1525 #endif
1527 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1528 defined(TARGET_NR_ppoll_time64)
1529 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1530 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1532 struct target_pollfd *target_pfd;
1533 unsigned int nfds = arg2;
1534 struct pollfd *pfd;
1535 unsigned int i;
1536 abi_long ret;
1538 pfd = NULL;
1539 target_pfd = NULL;
1540 if (nfds) {
1541 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1542 return -TARGET_EINVAL;
1544 target_pfd = lock_user(VERIFY_WRITE, arg1,
1545 sizeof(struct target_pollfd) * nfds, 1);
1546 if (!target_pfd) {
1547 return -TARGET_EFAULT;
1550 pfd = alloca(sizeof(struct pollfd) * nfds);
1551 for (i = 0; i < nfds; i++) {
1552 pfd[i].fd = tswap32(target_pfd[i].fd);
1553 pfd[i].events = tswap16(target_pfd[i].events);
1556 if (ppoll) {
1557 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1558 sigset_t *set = NULL;
1560 if (arg3) {
1561 if (time64) {
1562 if (target_to_host_timespec64(timeout_ts, arg3)) {
1563 unlock_user(target_pfd, arg1, 0);
1564 return -TARGET_EFAULT;
1566 } else {
1567 if (target_to_host_timespec(timeout_ts, arg3)) {
1568 unlock_user(target_pfd, arg1, 0);
1569 return -TARGET_EFAULT;
1572 } else {
1573 timeout_ts = NULL;
1576 if (arg4) {
1577 ret = process_sigsuspend_mask(&set, arg4, arg5);
1578 if (ret != 0) {
1579 unlock_user(target_pfd, arg1, 0);
1580 return ret;
1584 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1585 set, SIGSET_T_SIZE));
1587 if (set) {
1588 finish_sigsuspend_mask(ret);
1590 if (!is_error(ret) && arg3) {
1591 if (time64) {
1592 if (host_to_target_timespec64(arg3, timeout_ts)) {
1593 return -TARGET_EFAULT;
1595 } else {
1596 if (host_to_target_timespec(arg3, timeout_ts)) {
1597 return -TARGET_EFAULT;
1601 } else {
1602 struct timespec ts, *pts;
1604 if (arg3 >= 0) {
1605 /* Convert ms to secs, ns */
1606 ts.tv_sec = arg3 / 1000;
1607 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1608 pts = &ts;
1609 } else {
1610 /* -ve poll() timeout means "infinite" */
1611 pts = NULL;
1613 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1616 if (!is_error(ret)) {
1617 for (i = 0; i < nfds; i++) {
1618 target_pfd[i].revents = tswap16(pfd[i].revents);
1621 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1622 return ret;
1624 #endif
1626 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1627 int flags, int is_pipe2)
1629 int host_pipe[2];
1630 abi_long ret;
1631 ret = pipe2(host_pipe, flags);
1633 if (is_error(ret))
1634 return get_errno(ret);
1636 /* Several targets have special calling conventions for the original
1637 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1638 if (!is_pipe2) {
1639 #if defined(TARGET_ALPHA)
1640 cpu_env->ir[IR_A4] = host_pipe[1];
1641 return host_pipe[0];
1642 #elif defined(TARGET_MIPS)
1643 cpu_env->active_tc.gpr[3] = host_pipe[1];
1644 return host_pipe[0];
1645 #elif defined(TARGET_SH4)
1646 cpu_env->gregs[1] = host_pipe[1];
1647 return host_pipe[0];
1648 #elif defined(TARGET_SPARC)
1649 cpu_env->regwptr[1] = host_pipe[1];
1650 return host_pipe[0];
1651 #endif
1654 if (put_user_s32(host_pipe[0], pipedes)
1655 || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1656 return -TARGET_EFAULT;
1657 return get_errno(ret);
1660 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1661 abi_ulong target_addr,
1662 socklen_t len)
1664 struct target_ip_mreqn *target_smreqn;
1666 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1667 if (!target_smreqn)
1668 return -TARGET_EFAULT;
1669 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1670 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1671 if (len == sizeof(struct target_ip_mreqn))
1672 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1673 unlock_user(target_smreqn, target_addr, 0);
1675 return 0;
1678 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1679 abi_ulong target_addr,
1680 socklen_t len)
1682 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1683 sa_family_t sa_family;
1684 struct target_sockaddr *target_saddr;
1686 if (fd_trans_target_to_host_addr(fd)) {
1687 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1690 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1691 if (!target_saddr)
1692 return -TARGET_EFAULT;
1694 sa_family = tswap16(target_saddr->sa_family);
1696 /* Oops. The caller might send a incomplete sun_path; sun_path
1697 * must be terminated by \0 (see the manual page), but
1698 * unfortunately it is quite common to specify sockaddr_un
1699 * length as "strlen(x->sun_path)" while it should be
1700 * "strlen(...) + 1". We'll fix that here if needed.
1701 * Linux kernel has a similar feature.
1704 if (sa_family == AF_UNIX) {
1705 if (len < unix_maxlen && len > 0) {
1706 char *cp = (char*)target_saddr;
1708 if ( cp[len-1] && !cp[len] )
1709 len++;
1711 if (len > unix_maxlen)
1712 len = unix_maxlen;
1715 memcpy(addr, target_saddr, len);
1716 addr->sa_family = sa_family;
1717 if (sa_family == AF_NETLINK) {
1718 struct sockaddr_nl *nladdr;
1720 nladdr = (struct sockaddr_nl *)addr;
1721 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1722 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1723 } else if (sa_family == AF_PACKET) {
1724 struct target_sockaddr_ll *lladdr;
1726 lladdr = (struct target_sockaddr_ll *)addr;
1727 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1728 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1729 } else if (sa_family == AF_INET6) {
1730 struct sockaddr_in6 *in6addr;
1732 in6addr = (struct sockaddr_in6 *)addr;
1733 in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1735 unlock_user(target_saddr, target_addr, 0);
1737 return 0;
1740 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1741 struct sockaddr *addr,
1742 socklen_t len)
1744 struct target_sockaddr *target_saddr;
1746 if (len == 0) {
1747 return 0;
1749 assert(addr);
1751 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1752 if (!target_saddr)
1753 return -TARGET_EFAULT;
1754 memcpy(target_saddr, addr, len);
1755 if (len >= offsetof(struct target_sockaddr, sa_family) +
1756 sizeof(target_saddr->sa_family)) {
1757 target_saddr->sa_family = tswap16(addr->sa_family);
1759 if (addr->sa_family == AF_NETLINK &&
1760 len >= sizeof(struct target_sockaddr_nl)) {
1761 struct target_sockaddr_nl *target_nl =
1762 (struct target_sockaddr_nl *)target_saddr;
1763 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1764 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1765 } else if (addr->sa_family == AF_PACKET) {
1766 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1767 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1768 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1769 } else if (addr->sa_family == AF_INET6 &&
1770 len >= sizeof(struct target_sockaddr_in6)) {
1771 struct target_sockaddr_in6 *target_in6 =
1772 (struct target_sockaddr_in6 *)target_saddr;
1773 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1775 unlock_user(target_saddr, target_addr, len);
1777 return 0;
1780 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1781 struct target_msghdr *target_msgh)
1783 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1784 abi_long msg_controllen;
1785 abi_ulong target_cmsg_addr;
1786 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1787 socklen_t space = 0;
1789 msg_controllen = tswapal(target_msgh->msg_controllen);
1790 if (msg_controllen < sizeof (struct target_cmsghdr))
1791 goto the_end;
1792 target_cmsg_addr = tswapal(target_msgh->msg_control);
1793 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1794 target_cmsg_start = target_cmsg;
1795 if (!target_cmsg)
1796 return -TARGET_EFAULT;
1798 while (cmsg && target_cmsg) {
1799 void *data = CMSG_DATA(cmsg);
1800 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1802 int len = tswapal(target_cmsg->cmsg_len)
1803 - sizeof(struct target_cmsghdr);
1805 space += CMSG_SPACE(len);
1806 if (space > msgh->msg_controllen) {
1807 space -= CMSG_SPACE(len);
1808 /* This is a QEMU bug, since we allocated the payload
1809 * area ourselves (unlike overflow in host-to-target
1810 * conversion, which is just the guest giving us a buffer
1811 * that's too small). It can't happen for the payload types
1812 * we currently support; if it becomes an issue in future
1813 * we would need to improve our allocation strategy to
1814 * something more intelligent than "twice the size of the
1815 * target buffer we're reading from".
1817 qemu_log_mask(LOG_UNIMP,
1818 ("Unsupported ancillary data %d/%d: "
1819 "unhandled msg size\n"),
1820 tswap32(target_cmsg->cmsg_level),
1821 tswap32(target_cmsg->cmsg_type));
1822 break;
1825 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1826 cmsg->cmsg_level = SOL_SOCKET;
1827 } else {
1828 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1830 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1831 cmsg->cmsg_len = CMSG_LEN(len);
1833 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1834 int *fd = (int *)data;
1835 int *target_fd = (int *)target_data;
1836 int i, numfds = len / sizeof(int);
1838 for (i = 0; i < numfds; i++) {
1839 __get_user(fd[i], target_fd + i);
1841 } else if (cmsg->cmsg_level == SOL_SOCKET
1842 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1843 struct ucred *cred = (struct ucred *)data;
1844 struct target_ucred *target_cred =
1845 (struct target_ucred *)target_data;
1847 __get_user(cred->pid, &target_cred->pid);
1848 __get_user(cred->uid, &target_cred->uid);
1849 __get_user(cred->gid, &target_cred->gid);
1850 } else if (cmsg->cmsg_level == SOL_ALG) {
1851 uint32_t *dst = (uint32_t *)data;
1853 memcpy(dst, target_data, len);
1854 /* fix endianess of first 32-bit word */
1855 if (len >= sizeof(uint32_t)) {
1856 *dst = tswap32(*dst);
1858 } else {
1859 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1860 cmsg->cmsg_level, cmsg->cmsg_type);
1861 memcpy(data, target_data, len);
1864 cmsg = CMSG_NXTHDR(msgh, cmsg);
1865 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1866 target_cmsg_start);
1868 unlock_user(target_cmsg, target_cmsg_addr, 0);
1869 the_end:
1870 msgh->msg_controllen = space;
1871 return 0;
1874 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1875 struct msghdr *msgh)
1877 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1878 abi_long msg_controllen;
1879 abi_ulong target_cmsg_addr;
1880 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1881 socklen_t space = 0;
1883 msg_controllen = tswapal(target_msgh->msg_controllen);
1884 if (msg_controllen < sizeof (struct target_cmsghdr))
1885 goto the_end;
1886 target_cmsg_addr = tswapal(target_msgh->msg_control);
1887 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1888 target_cmsg_start = target_cmsg;
1889 if (!target_cmsg)
1890 return -TARGET_EFAULT;
1892 while (cmsg && target_cmsg) {
1893 void *data = CMSG_DATA(cmsg);
1894 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1896 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1897 int tgt_len, tgt_space;
1899 /* We never copy a half-header but may copy half-data;
1900 * this is Linux's behaviour in put_cmsg(). Note that
1901 * truncation here is a guest problem (which we report
1902 * to the guest via the CTRUNC bit), unlike truncation
1903 * in target_to_host_cmsg, which is a QEMU bug.
1905 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1906 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1907 break;
1910 if (cmsg->cmsg_level == SOL_SOCKET) {
1911 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1912 } else {
1913 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1915 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1917 /* Payload types which need a different size of payload on
1918 * the target must adjust tgt_len here.
1920 tgt_len = len;
1921 switch (cmsg->cmsg_level) {
1922 case SOL_SOCKET:
1923 switch (cmsg->cmsg_type) {
1924 case SO_TIMESTAMP:
1925 tgt_len = sizeof(struct target_timeval);
1926 break;
1927 default:
1928 break;
1930 break;
1931 default:
1932 break;
1935 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1936 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1937 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1940 /* We must now copy-and-convert len bytes of payload
1941 * into tgt_len bytes of destination space. Bear in mind
1942 * that in both source and destination we may be dealing
1943 * with a truncated value!
1945 switch (cmsg->cmsg_level) {
1946 case SOL_SOCKET:
1947 switch (cmsg->cmsg_type) {
1948 case SCM_RIGHTS:
1950 int *fd = (int *)data;
1951 int *target_fd = (int *)target_data;
1952 int i, numfds = tgt_len / sizeof(int);
1954 for (i = 0; i < numfds; i++) {
1955 __put_user(fd[i], target_fd + i);
1957 break;
1959 case SO_TIMESTAMP:
1961 struct timeval *tv = (struct timeval *)data;
1962 struct target_timeval *target_tv =
1963 (struct target_timeval *)target_data;
1965 if (len != sizeof(struct timeval) ||
1966 tgt_len != sizeof(struct target_timeval)) {
1967 goto unimplemented;
1970 /* copy struct timeval to target */
1971 __put_user(tv->tv_sec, &target_tv->tv_sec);
1972 __put_user(tv->tv_usec, &target_tv->tv_usec);
1973 break;
1975 case SCM_CREDENTIALS:
1977 struct ucred *cred = (struct ucred *)data;
1978 struct target_ucred *target_cred =
1979 (struct target_ucred *)target_data;
1981 __put_user(cred->pid, &target_cred->pid);
1982 __put_user(cred->uid, &target_cred->uid);
1983 __put_user(cred->gid, &target_cred->gid);
1984 break;
1986 default:
1987 goto unimplemented;
1989 break;
1991 case SOL_IP:
1992 switch (cmsg->cmsg_type) {
1993 case IP_TTL:
1995 uint32_t *v = (uint32_t *)data;
1996 uint32_t *t_int = (uint32_t *)target_data;
1998 if (len != sizeof(uint32_t) ||
1999 tgt_len != sizeof(uint32_t)) {
2000 goto unimplemented;
2002 __put_user(*v, t_int);
2003 break;
2005 case IP_RECVERR:
2007 struct errhdr_t {
2008 struct sock_extended_err ee;
2009 struct sockaddr_in offender;
2011 struct errhdr_t *errh = (struct errhdr_t *)data;
2012 struct errhdr_t *target_errh =
2013 (struct errhdr_t *)target_data;
2015 if (len != sizeof(struct errhdr_t) ||
2016 tgt_len != sizeof(struct errhdr_t)) {
2017 goto unimplemented;
2019 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2020 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2021 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2022 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2023 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2024 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2025 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2026 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2027 (void *) &errh->offender, sizeof(errh->offender));
2028 break;
2030 default:
2031 goto unimplemented;
2033 break;
2035 case SOL_IPV6:
2036 switch (cmsg->cmsg_type) {
2037 case IPV6_HOPLIMIT:
2039 uint32_t *v = (uint32_t *)data;
2040 uint32_t *t_int = (uint32_t *)target_data;
2042 if (len != sizeof(uint32_t) ||
2043 tgt_len != sizeof(uint32_t)) {
2044 goto unimplemented;
2046 __put_user(*v, t_int);
2047 break;
2049 case IPV6_RECVERR:
2051 struct errhdr6_t {
2052 struct sock_extended_err ee;
2053 struct sockaddr_in6 offender;
2055 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2056 struct errhdr6_t *target_errh =
2057 (struct errhdr6_t *)target_data;
2059 if (len != sizeof(struct errhdr6_t) ||
2060 tgt_len != sizeof(struct errhdr6_t)) {
2061 goto unimplemented;
2063 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2064 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2065 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2066 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2067 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2068 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2069 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2070 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2071 (void *) &errh->offender, sizeof(errh->offender));
2072 break;
2074 default:
2075 goto unimplemented;
2077 break;
2079 default:
2080 unimplemented:
2081 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2082 cmsg->cmsg_level, cmsg->cmsg_type);
2083 memcpy(target_data, data, MIN(len, tgt_len));
2084 if (tgt_len > len) {
2085 memset(target_data + len, 0, tgt_len - len);
2089 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2090 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2091 if (msg_controllen < tgt_space) {
2092 tgt_space = msg_controllen;
2094 msg_controllen -= tgt_space;
2095 space += tgt_space;
2096 cmsg = CMSG_NXTHDR(msgh, cmsg);
2097 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2098 target_cmsg_start);
2100 unlock_user(target_cmsg, target_cmsg_addr, space);
2101 the_end:
2102 target_msgh->msg_controllen = tswapal(space);
2103 return 0;
2106 /* do_setsockopt() Must return target values and target errnos. */
2107 static abi_long do_setsockopt(int sockfd, int level, int optname,
2108 abi_ulong optval_addr, socklen_t optlen)
2110 abi_long ret;
2111 int val;
2112 struct ip_mreqn *ip_mreq;
2113 struct ip_mreq_source *ip_mreq_source;
2115 switch(level) {
2116 case SOL_TCP:
2117 case SOL_UDP:
2118 /* TCP and UDP options all take an 'int' value. */
2119 if (optlen < sizeof(uint32_t))
2120 return -TARGET_EINVAL;
2122 if (get_user_u32(val, optval_addr))
2123 return -TARGET_EFAULT;
2124 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2125 break;
2126 case SOL_IP:
2127 switch(optname) {
2128 case IP_TOS:
2129 case IP_TTL:
2130 case IP_HDRINCL:
2131 case IP_ROUTER_ALERT:
2132 case IP_RECVOPTS:
2133 case IP_RETOPTS:
2134 case IP_PKTINFO:
2135 case IP_MTU_DISCOVER:
2136 case IP_RECVERR:
2137 case IP_RECVTTL:
2138 case IP_RECVTOS:
2139 #ifdef IP_FREEBIND
2140 case IP_FREEBIND:
2141 #endif
2142 case IP_MULTICAST_TTL:
2143 case IP_MULTICAST_LOOP:
2144 val = 0;
2145 if (optlen >= sizeof(uint32_t)) {
2146 if (get_user_u32(val, optval_addr))
2147 return -TARGET_EFAULT;
2148 } else if (optlen >= 1) {
2149 if (get_user_u8(val, optval_addr))
2150 return -TARGET_EFAULT;
2152 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2153 break;
2154 case IP_ADD_MEMBERSHIP:
2155 case IP_DROP_MEMBERSHIP:
2156 if (optlen < sizeof (struct target_ip_mreq) ||
2157 optlen > sizeof (struct target_ip_mreqn))
2158 return -TARGET_EINVAL;
2160 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2161 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2162 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2163 break;
2165 case IP_BLOCK_SOURCE:
2166 case IP_UNBLOCK_SOURCE:
2167 case IP_ADD_SOURCE_MEMBERSHIP:
2168 case IP_DROP_SOURCE_MEMBERSHIP:
2169 if (optlen != sizeof (struct target_ip_mreq_source))
2170 return -TARGET_EINVAL;
2172 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2173 if (!ip_mreq_source) {
2174 return -TARGET_EFAULT;
2176 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2177 unlock_user (ip_mreq_source, optval_addr, 0);
2178 break;
2180 default:
2181 goto unimplemented;
2183 break;
2184 case SOL_IPV6:
2185 switch (optname) {
2186 case IPV6_MTU_DISCOVER:
2187 case IPV6_MTU:
2188 case IPV6_V6ONLY:
2189 case IPV6_RECVPKTINFO:
2190 case IPV6_UNICAST_HOPS:
2191 case IPV6_MULTICAST_HOPS:
2192 case IPV6_MULTICAST_LOOP:
2193 case IPV6_RECVERR:
2194 case IPV6_RECVHOPLIMIT:
2195 case IPV6_2292HOPLIMIT:
2196 case IPV6_CHECKSUM:
2197 case IPV6_ADDRFORM:
2198 case IPV6_2292PKTINFO:
2199 case IPV6_RECVTCLASS:
2200 case IPV6_RECVRTHDR:
2201 case IPV6_2292RTHDR:
2202 case IPV6_RECVHOPOPTS:
2203 case IPV6_2292HOPOPTS:
2204 case IPV6_RECVDSTOPTS:
2205 case IPV6_2292DSTOPTS:
2206 case IPV6_TCLASS:
2207 case IPV6_ADDR_PREFERENCES:
2208 #ifdef IPV6_RECVPATHMTU
2209 case IPV6_RECVPATHMTU:
2210 #endif
2211 #ifdef IPV6_TRANSPARENT
2212 case IPV6_TRANSPARENT:
2213 #endif
2214 #ifdef IPV6_FREEBIND
2215 case IPV6_FREEBIND:
2216 #endif
2217 #ifdef IPV6_RECVORIGDSTADDR
2218 case IPV6_RECVORIGDSTADDR:
2219 #endif
2220 val = 0;
2221 if (optlen < sizeof(uint32_t)) {
2222 return -TARGET_EINVAL;
2224 if (get_user_u32(val, optval_addr)) {
2225 return -TARGET_EFAULT;
2227 ret = get_errno(setsockopt(sockfd, level, optname,
2228 &val, sizeof(val)));
2229 break;
2230 case IPV6_PKTINFO:
2232 struct in6_pktinfo pki;
2234 if (optlen < sizeof(pki)) {
2235 return -TARGET_EINVAL;
2238 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2239 return -TARGET_EFAULT;
2242 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2244 ret = get_errno(setsockopt(sockfd, level, optname,
2245 &pki, sizeof(pki)));
2246 break;
2248 case IPV6_ADD_MEMBERSHIP:
2249 case IPV6_DROP_MEMBERSHIP:
2251 struct ipv6_mreq ipv6mreq;
2253 if (optlen < sizeof(ipv6mreq)) {
2254 return -TARGET_EINVAL;
2257 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2258 return -TARGET_EFAULT;
2261 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2263 ret = get_errno(setsockopt(sockfd, level, optname,
2264 &ipv6mreq, sizeof(ipv6mreq)));
2265 break;
2267 default:
2268 goto unimplemented;
2270 break;
2271 case SOL_ICMPV6:
2272 switch (optname) {
2273 case ICMPV6_FILTER:
2275 struct icmp6_filter icmp6f;
2277 if (optlen > sizeof(icmp6f)) {
2278 optlen = sizeof(icmp6f);
2281 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2282 return -TARGET_EFAULT;
2285 for (val = 0; val < 8; val++) {
2286 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2289 ret = get_errno(setsockopt(sockfd, level, optname,
2290 &icmp6f, optlen));
2291 break;
2293 default:
2294 goto unimplemented;
2296 break;
2297 case SOL_RAW:
2298 switch (optname) {
2299 case ICMP_FILTER:
2300 case IPV6_CHECKSUM:
2301 /* those take an u32 value */
2302 if (optlen < sizeof(uint32_t)) {
2303 return -TARGET_EINVAL;
2306 if (get_user_u32(val, optval_addr)) {
2307 return -TARGET_EFAULT;
2309 ret = get_errno(setsockopt(sockfd, level, optname,
2310 &val, sizeof(val)));
2311 break;
2313 default:
2314 goto unimplemented;
2316 break;
2317 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2318 case SOL_ALG:
2319 switch (optname) {
2320 case ALG_SET_KEY:
2322 char *alg_key = g_malloc(optlen);
2324 if (!alg_key) {
2325 return -TARGET_ENOMEM;
2327 if (copy_from_user(alg_key, optval_addr, optlen)) {
2328 g_free(alg_key);
2329 return -TARGET_EFAULT;
2331 ret = get_errno(setsockopt(sockfd, level, optname,
2332 alg_key, optlen));
2333 g_free(alg_key);
2334 break;
2336 case ALG_SET_AEAD_AUTHSIZE:
2338 ret = get_errno(setsockopt(sockfd, level, optname,
2339 NULL, optlen));
2340 break;
2342 default:
2343 goto unimplemented;
2345 break;
2346 #endif
2347 case TARGET_SOL_SOCKET:
2348 switch (optname) {
2349 case TARGET_SO_RCVTIMEO:
2351 struct timeval tv;
2353 optname = SO_RCVTIMEO;
2355 set_timeout:
2356 if (optlen != sizeof(struct target_timeval)) {
2357 return -TARGET_EINVAL;
2360 if (copy_from_user_timeval(&tv, optval_addr)) {
2361 return -TARGET_EFAULT;
2364 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2365 &tv, sizeof(tv)));
2366 return ret;
2368 case TARGET_SO_SNDTIMEO:
2369 optname = SO_SNDTIMEO;
2370 goto set_timeout;
2371 case TARGET_SO_ATTACH_FILTER:
2373 struct target_sock_fprog *tfprog;
2374 struct target_sock_filter *tfilter;
2375 struct sock_fprog fprog;
2376 struct sock_filter *filter;
2377 int i;
2379 if (optlen != sizeof(*tfprog)) {
2380 return -TARGET_EINVAL;
2382 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2383 return -TARGET_EFAULT;
2385 if (!lock_user_struct(VERIFY_READ, tfilter,
2386 tswapal(tfprog->filter), 0)) {
2387 unlock_user_struct(tfprog, optval_addr, 1);
2388 return -TARGET_EFAULT;
2391 fprog.len = tswap16(tfprog->len);
2392 filter = g_try_new(struct sock_filter, fprog.len);
2393 if (filter == NULL) {
2394 unlock_user_struct(tfilter, tfprog->filter, 1);
2395 unlock_user_struct(tfprog, optval_addr, 1);
2396 return -TARGET_ENOMEM;
2398 for (i = 0; i < fprog.len; i++) {
2399 filter[i].code = tswap16(tfilter[i].code);
2400 filter[i].jt = tfilter[i].jt;
2401 filter[i].jf = tfilter[i].jf;
2402 filter[i].k = tswap32(tfilter[i].k);
2404 fprog.filter = filter;
2406 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2407 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2408 g_free(filter);
2410 unlock_user_struct(tfilter, tfprog->filter, 1);
2411 unlock_user_struct(tfprog, optval_addr, 1);
2412 return ret;
2414 case TARGET_SO_BINDTODEVICE:
2416 char *dev_ifname, *addr_ifname;
2418 if (optlen > IFNAMSIZ - 1) {
2419 optlen = IFNAMSIZ - 1;
2421 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2422 if (!dev_ifname) {
2423 return -TARGET_EFAULT;
2425 optname = SO_BINDTODEVICE;
2426 addr_ifname = alloca(IFNAMSIZ);
2427 memcpy(addr_ifname, dev_ifname, optlen);
2428 addr_ifname[optlen] = 0;
2429 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2430 addr_ifname, optlen));
2431 unlock_user (dev_ifname, optval_addr, 0);
2432 return ret;
2434 case TARGET_SO_LINGER:
2436 struct linger lg;
2437 struct target_linger *tlg;
2439 if (optlen != sizeof(struct target_linger)) {
2440 return -TARGET_EINVAL;
2442 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2443 return -TARGET_EFAULT;
2445 __get_user(lg.l_onoff, &tlg->l_onoff);
2446 __get_user(lg.l_linger, &tlg->l_linger);
2447 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2448 &lg, sizeof(lg)));
2449 unlock_user_struct(tlg, optval_addr, 0);
2450 return ret;
2452 /* Options with 'int' argument. */
2453 case TARGET_SO_DEBUG:
2454 optname = SO_DEBUG;
2455 break;
2456 case TARGET_SO_REUSEADDR:
2457 optname = SO_REUSEADDR;
2458 break;
2459 #ifdef SO_REUSEPORT
2460 case TARGET_SO_REUSEPORT:
2461 optname = SO_REUSEPORT;
2462 break;
2463 #endif
2464 case TARGET_SO_TYPE:
2465 optname = SO_TYPE;
2466 break;
2467 case TARGET_SO_ERROR:
2468 optname = SO_ERROR;
2469 break;
2470 case TARGET_SO_DONTROUTE:
2471 optname = SO_DONTROUTE;
2472 break;
2473 case TARGET_SO_BROADCAST:
2474 optname = SO_BROADCAST;
2475 break;
2476 case TARGET_SO_SNDBUF:
2477 optname = SO_SNDBUF;
2478 break;
2479 case TARGET_SO_SNDBUFFORCE:
2480 optname = SO_SNDBUFFORCE;
2481 break;
2482 case TARGET_SO_RCVBUF:
2483 optname = SO_RCVBUF;
2484 break;
2485 case TARGET_SO_RCVBUFFORCE:
2486 optname = SO_RCVBUFFORCE;
2487 break;
2488 case TARGET_SO_KEEPALIVE:
2489 optname = SO_KEEPALIVE;
2490 break;
2491 case TARGET_SO_OOBINLINE:
2492 optname = SO_OOBINLINE;
2493 break;
2494 case TARGET_SO_NO_CHECK:
2495 optname = SO_NO_CHECK;
2496 break;
2497 case TARGET_SO_PRIORITY:
2498 optname = SO_PRIORITY;
2499 break;
2500 #ifdef SO_BSDCOMPAT
2501 case TARGET_SO_BSDCOMPAT:
2502 optname = SO_BSDCOMPAT;
2503 break;
2504 #endif
2505 case TARGET_SO_PASSCRED:
2506 optname = SO_PASSCRED;
2507 break;
2508 case TARGET_SO_PASSSEC:
2509 optname = SO_PASSSEC;
2510 break;
2511 case TARGET_SO_TIMESTAMP:
2512 optname = SO_TIMESTAMP;
2513 break;
2514 case TARGET_SO_RCVLOWAT:
2515 optname = SO_RCVLOWAT;
2516 break;
2517 default:
2518 goto unimplemented;
2520 if (optlen < sizeof(uint32_t))
2521 return -TARGET_EINVAL;
2523 if (get_user_u32(val, optval_addr))
2524 return -TARGET_EFAULT;
2525 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2526 break;
2527 #ifdef SOL_NETLINK
2528 case SOL_NETLINK:
2529 switch (optname) {
2530 case NETLINK_PKTINFO:
2531 case NETLINK_ADD_MEMBERSHIP:
2532 case NETLINK_DROP_MEMBERSHIP:
2533 case NETLINK_BROADCAST_ERROR:
2534 case NETLINK_NO_ENOBUFS:
2535 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2536 case NETLINK_LISTEN_ALL_NSID:
2537 case NETLINK_CAP_ACK:
2538 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2539 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2540 case NETLINK_EXT_ACK:
2541 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2542 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2543 case NETLINK_GET_STRICT_CHK:
2544 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2545 break;
2546 default:
2547 goto unimplemented;
2549 val = 0;
2550 if (optlen < sizeof(uint32_t)) {
2551 return -TARGET_EINVAL;
2553 if (get_user_u32(val, optval_addr)) {
2554 return -TARGET_EFAULT;
2556 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2557 sizeof(val)));
2558 break;
2559 #endif /* SOL_NETLINK */
2560 default:
2561 unimplemented:
2562 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2563 level, optname);
2564 ret = -TARGET_ENOPROTOOPT;
2566 return ret;
2569 /* do_getsockopt() Must return target values and target errnos. */
2570 static abi_long do_getsockopt(int sockfd, int level, int optname,
2571 abi_ulong optval_addr, abi_ulong optlen)
2573 abi_long ret;
2574 int len, val;
2575 socklen_t lv;
2577 switch(level) {
2578 case TARGET_SOL_SOCKET:
2579 level = SOL_SOCKET;
2580 switch (optname) {
2581 /* These don't just return a single integer */
2582 case TARGET_SO_PEERNAME:
2583 goto unimplemented;
2584 case TARGET_SO_RCVTIMEO: {
2585 struct timeval tv;
2586 socklen_t tvlen;
2588 optname = SO_RCVTIMEO;
2590 get_timeout:
2591 if (get_user_u32(len, optlen)) {
2592 return -TARGET_EFAULT;
2594 if (len < 0) {
2595 return -TARGET_EINVAL;
2598 tvlen = sizeof(tv);
2599 ret = get_errno(getsockopt(sockfd, level, optname,
2600 &tv, &tvlen));
2601 if (ret < 0) {
2602 return ret;
2604 if (len > sizeof(struct target_timeval)) {
2605 len = sizeof(struct target_timeval);
2607 if (copy_to_user_timeval(optval_addr, &tv)) {
2608 return -TARGET_EFAULT;
2610 if (put_user_u32(len, optlen)) {
2611 return -TARGET_EFAULT;
2613 break;
2615 case TARGET_SO_SNDTIMEO:
2616 optname = SO_SNDTIMEO;
2617 goto get_timeout;
2618 case TARGET_SO_PEERCRED: {
2619 struct ucred cr;
2620 socklen_t crlen;
2621 struct target_ucred *tcr;
2623 if (get_user_u32(len, optlen)) {
2624 return -TARGET_EFAULT;
2626 if (len < 0) {
2627 return -TARGET_EINVAL;
2630 crlen = sizeof(cr);
2631 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2632 &cr, &crlen));
2633 if (ret < 0) {
2634 return ret;
2636 if (len > crlen) {
2637 len = crlen;
2639 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2640 return -TARGET_EFAULT;
2642 __put_user(cr.pid, &tcr->pid);
2643 __put_user(cr.uid, &tcr->uid);
2644 __put_user(cr.gid, &tcr->gid);
2645 unlock_user_struct(tcr, optval_addr, 1);
2646 if (put_user_u32(len, optlen)) {
2647 return -TARGET_EFAULT;
2649 break;
2651 case TARGET_SO_PEERSEC: {
2652 char *name;
2654 if (get_user_u32(len, optlen)) {
2655 return -TARGET_EFAULT;
2657 if (len < 0) {
2658 return -TARGET_EINVAL;
2660 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2661 if (!name) {
2662 return -TARGET_EFAULT;
2664 lv = len;
2665 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2666 name, &lv));
2667 if (put_user_u32(lv, optlen)) {
2668 ret = -TARGET_EFAULT;
2670 unlock_user(name, optval_addr, lv);
2671 break;
2673 case TARGET_SO_LINGER:
2675 struct linger lg;
2676 socklen_t lglen;
2677 struct target_linger *tlg;
2679 if (get_user_u32(len, optlen)) {
2680 return -TARGET_EFAULT;
2682 if (len < 0) {
2683 return -TARGET_EINVAL;
2686 lglen = sizeof(lg);
2687 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2688 &lg, &lglen));
2689 if (ret < 0) {
2690 return ret;
2692 if (len > lglen) {
2693 len = lglen;
2695 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2696 return -TARGET_EFAULT;
2698 __put_user(lg.l_onoff, &tlg->l_onoff);
2699 __put_user(lg.l_linger, &tlg->l_linger);
2700 unlock_user_struct(tlg, optval_addr, 1);
2701 if (put_user_u32(len, optlen)) {
2702 return -TARGET_EFAULT;
2704 break;
2706 /* Options with 'int' argument. */
2707 case TARGET_SO_DEBUG:
2708 optname = SO_DEBUG;
2709 goto int_case;
2710 case TARGET_SO_REUSEADDR:
2711 optname = SO_REUSEADDR;
2712 goto int_case;
2713 #ifdef SO_REUSEPORT
2714 case TARGET_SO_REUSEPORT:
2715 optname = SO_REUSEPORT;
2716 goto int_case;
2717 #endif
2718 case TARGET_SO_TYPE:
2719 optname = SO_TYPE;
2720 goto int_case;
2721 case TARGET_SO_ERROR:
2722 optname = SO_ERROR;
2723 goto int_case;
2724 case TARGET_SO_DONTROUTE:
2725 optname = SO_DONTROUTE;
2726 goto int_case;
2727 case TARGET_SO_BROADCAST:
2728 optname = SO_BROADCAST;
2729 goto int_case;
2730 case TARGET_SO_SNDBUF:
2731 optname = SO_SNDBUF;
2732 goto int_case;
2733 case TARGET_SO_RCVBUF:
2734 optname = SO_RCVBUF;
2735 goto int_case;
2736 case TARGET_SO_KEEPALIVE:
2737 optname = SO_KEEPALIVE;
2738 goto int_case;
2739 case TARGET_SO_OOBINLINE:
2740 optname = SO_OOBINLINE;
2741 goto int_case;
2742 case TARGET_SO_NO_CHECK:
2743 optname = SO_NO_CHECK;
2744 goto int_case;
2745 case TARGET_SO_PRIORITY:
2746 optname = SO_PRIORITY;
2747 goto int_case;
2748 #ifdef SO_BSDCOMPAT
2749 case TARGET_SO_BSDCOMPAT:
2750 optname = SO_BSDCOMPAT;
2751 goto int_case;
2752 #endif
2753 case TARGET_SO_PASSCRED:
2754 optname = SO_PASSCRED;
2755 goto int_case;
2756 case TARGET_SO_TIMESTAMP:
2757 optname = SO_TIMESTAMP;
2758 goto int_case;
2759 case TARGET_SO_RCVLOWAT:
2760 optname = SO_RCVLOWAT;
2761 goto int_case;
2762 case TARGET_SO_ACCEPTCONN:
2763 optname = SO_ACCEPTCONN;
2764 goto int_case;
2765 case TARGET_SO_PROTOCOL:
2766 optname = SO_PROTOCOL;
2767 goto int_case;
2768 case TARGET_SO_DOMAIN:
2769 optname = SO_DOMAIN;
2770 goto int_case;
2771 default:
2772 goto int_case;
2774 break;
2775 case SOL_TCP:
2776 case SOL_UDP:
2777 /* TCP and UDP options all take an 'int' value. */
2778 int_case:
2779 if (get_user_u32(len, optlen))
2780 return -TARGET_EFAULT;
2781 if (len < 0)
2782 return -TARGET_EINVAL;
2783 lv = sizeof(lv);
2784 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2785 if (ret < 0)
2786 return ret;
2787 switch (optname) {
2788 case SO_TYPE:
2789 val = host_to_target_sock_type(val);
2790 break;
2791 case SO_ERROR:
2792 val = host_to_target_errno(val);
2793 break;
2795 if (len > lv)
2796 len = lv;
2797 if (len == 4) {
2798 if (put_user_u32(val, optval_addr))
2799 return -TARGET_EFAULT;
2800 } else {
2801 if (put_user_u8(val, optval_addr))
2802 return -TARGET_EFAULT;
2804 if (put_user_u32(len, optlen))
2805 return -TARGET_EFAULT;
2806 break;
2807 case SOL_IP:
2808 switch(optname) {
2809 case IP_TOS:
2810 case IP_TTL:
2811 case IP_HDRINCL:
2812 case IP_ROUTER_ALERT:
2813 case IP_RECVOPTS:
2814 case IP_RETOPTS:
2815 case IP_PKTINFO:
2816 case IP_MTU_DISCOVER:
2817 case IP_RECVERR:
2818 case IP_RECVTOS:
2819 #ifdef IP_FREEBIND
2820 case IP_FREEBIND:
2821 #endif
2822 case IP_MULTICAST_TTL:
2823 case IP_MULTICAST_LOOP:
2824 if (get_user_u32(len, optlen))
2825 return -TARGET_EFAULT;
2826 if (len < 0)
2827 return -TARGET_EINVAL;
2828 lv = sizeof(lv);
2829 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2830 if (ret < 0)
2831 return ret;
2832 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2833 len = 1;
2834 if (put_user_u32(len, optlen)
2835 || put_user_u8(val, optval_addr))
2836 return -TARGET_EFAULT;
2837 } else {
2838 if (len > sizeof(int))
2839 len = sizeof(int);
2840 if (put_user_u32(len, optlen)
2841 || put_user_u32(val, optval_addr))
2842 return -TARGET_EFAULT;
2844 break;
2845 default:
2846 ret = -TARGET_ENOPROTOOPT;
2847 break;
2849 break;
2850 case SOL_IPV6:
2851 switch (optname) {
2852 case IPV6_MTU_DISCOVER:
2853 case IPV6_MTU:
2854 case IPV6_V6ONLY:
2855 case IPV6_RECVPKTINFO:
2856 case IPV6_UNICAST_HOPS:
2857 case IPV6_MULTICAST_HOPS:
2858 case IPV6_MULTICAST_LOOP:
2859 case IPV6_RECVERR:
2860 case IPV6_RECVHOPLIMIT:
2861 case IPV6_2292HOPLIMIT:
2862 case IPV6_CHECKSUM:
2863 case IPV6_ADDRFORM:
2864 case IPV6_2292PKTINFO:
2865 case IPV6_RECVTCLASS:
2866 case IPV6_RECVRTHDR:
2867 case IPV6_2292RTHDR:
2868 case IPV6_RECVHOPOPTS:
2869 case IPV6_2292HOPOPTS:
2870 case IPV6_RECVDSTOPTS:
2871 case IPV6_2292DSTOPTS:
2872 case IPV6_TCLASS:
2873 case IPV6_ADDR_PREFERENCES:
2874 #ifdef IPV6_RECVPATHMTU
2875 case IPV6_RECVPATHMTU:
2876 #endif
2877 #ifdef IPV6_TRANSPARENT
2878 case IPV6_TRANSPARENT:
2879 #endif
2880 #ifdef IPV6_FREEBIND
2881 case IPV6_FREEBIND:
2882 #endif
2883 #ifdef IPV6_RECVORIGDSTADDR
2884 case IPV6_RECVORIGDSTADDR:
2885 #endif
2886 if (get_user_u32(len, optlen))
2887 return -TARGET_EFAULT;
2888 if (len < 0)
2889 return -TARGET_EINVAL;
2890 lv = sizeof(lv);
2891 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2892 if (ret < 0)
2893 return ret;
2894 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2895 len = 1;
2896 if (put_user_u32(len, optlen)
2897 || put_user_u8(val, optval_addr))
2898 return -TARGET_EFAULT;
2899 } else {
2900 if (len > sizeof(int))
2901 len = sizeof(int);
2902 if (put_user_u32(len, optlen)
2903 || put_user_u32(val, optval_addr))
2904 return -TARGET_EFAULT;
2906 break;
2907 default:
2908 ret = -TARGET_ENOPROTOOPT;
2909 break;
2911 break;
2912 #ifdef SOL_NETLINK
2913 case SOL_NETLINK:
2914 switch (optname) {
2915 case NETLINK_PKTINFO:
2916 case NETLINK_BROADCAST_ERROR:
2917 case NETLINK_NO_ENOBUFS:
2918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2919 case NETLINK_LISTEN_ALL_NSID:
2920 case NETLINK_CAP_ACK:
2921 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2922 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2923 case NETLINK_EXT_ACK:
2924 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2925 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2926 case NETLINK_GET_STRICT_CHK:
2927 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2928 if (get_user_u32(len, optlen)) {
2929 return -TARGET_EFAULT;
2931 if (len != sizeof(val)) {
2932 return -TARGET_EINVAL;
2934 lv = len;
2935 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2936 if (ret < 0) {
2937 return ret;
2939 if (put_user_u32(lv, optlen)
2940 || put_user_u32(val, optval_addr)) {
2941 return -TARGET_EFAULT;
2943 break;
2944 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2945 case NETLINK_LIST_MEMBERSHIPS:
2947 uint32_t *results;
2948 int i;
2949 if (get_user_u32(len, optlen)) {
2950 return -TARGET_EFAULT;
2952 if (len < 0) {
2953 return -TARGET_EINVAL;
2955 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2956 if (!results && len > 0) {
2957 return -TARGET_EFAULT;
2959 lv = len;
2960 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2961 if (ret < 0) {
2962 unlock_user(results, optval_addr, 0);
2963 return ret;
2965 /* swap host endianess to target endianess. */
2966 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2967 results[i] = tswap32(results[i]);
2969 if (put_user_u32(lv, optlen)) {
2970 return -TARGET_EFAULT;
2972 unlock_user(results, optval_addr, 0);
2973 break;
2975 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2976 default:
2977 goto unimplemented;
2979 break;
2980 #endif /* SOL_NETLINK */
2981 default:
2982 unimplemented:
2983 qemu_log_mask(LOG_UNIMP,
2984 "getsockopt level=%d optname=%d not yet supported\n",
2985 level, optname);
2986 ret = -TARGET_EOPNOTSUPP;
2987 break;
2989 return ret;
2992 /* Convert target low/high pair representing file offset into the host
2993 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2994 * as the kernel doesn't handle them either.
2996 static void target_to_host_low_high(abi_ulong tlow,
2997 abi_ulong thigh,
2998 unsigned long *hlow,
2999 unsigned long *hhigh)
3001 uint64_t off = tlow |
3002 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3003 TARGET_LONG_BITS / 2;
3005 *hlow = off;
3006 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3009 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3010 abi_ulong count, int copy)
3012 struct target_iovec *target_vec;
3013 struct iovec *vec;
3014 abi_ulong total_len, max_len;
3015 int i;
3016 int err = 0;
3017 bool bad_address = false;
3019 if (count == 0) {
3020 errno = 0;
3021 return NULL;
3023 if (count > IOV_MAX) {
3024 errno = EINVAL;
3025 return NULL;
3028 vec = g_try_new0(struct iovec, count);
3029 if (vec == NULL) {
3030 errno = ENOMEM;
3031 return NULL;
3034 target_vec = lock_user(VERIFY_READ, target_addr,
3035 count * sizeof(struct target_iovec), 1);
3036 if (target_vec == NULL) {
3037 err = EFAULT;
3038 goto fail2;
3041 /* ??? If host page size > target page size, this will result in a
3042 value larger than what we can actually support. */
3043 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3044 total_len = 0;
3046 for (i = 0; i < count; i++) {
3047 abi_ulong base = tswapal(target_vec[i].iov_base);
3048 abi_long len = tswapal(target_vec[i].iov_len);
3050 if (len < 0) {
3051 err = EINVAL;
3052 goto fail;
3053 } else if (len == 0) {
3054 /* Zero length pointer is ignored. */
3055 vec[i].iov_base = 0;
3056 } else {
3057 vec[i].iov_base = lock_user(type, base, len, copy);
3058 /* If the first buffer pointer is bad, this is a fault. But
3059 * subsequent bad buffers will result in a partial write; this
3060 * is realized by filling the vector with null pointers and
3061 * zero lengths. */
3062 if (!vec[i].iov_base) {
3063 if (i == 0) {
3064 err = EFAULT;
3065 goto fail;
3066 } else {
3067 bad_address = true;
3070 if (bad_address) {
3071 len = 0;
3073 if (len > max_len - total_len) {
3074 len = max_len - total_len;
3077 vec[i].iov_len = len;
3078 total_len += len;
3081 unlock_user(target_vec, target_addr, 0);
3082 return vec;
3084 fail:
3085 while (--i >= 0) {
3086 if (tswapal(target_vec[i].iov_len) > 0) {
3087 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3090 unlock_user(target_vec, target_addr, 0);
3091 fail2:
3092 g_free(vec);
3093 errno = err;
3094 return NULL;
3097 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3098 abi_ulong count, int copy)
3100 struct target_iovec *target_vec;
3101 int i;
3103 target_vec = lock_user(VERIFY_READ, target_addr,
3104 count * sizeof(struct target_iovec), 1);
3105 if (target_vec) {
3106 for (i = 0; i < count; i++) {
3107 abi_ulong base = tswapal(target_vec[i].iov_base);
3108 abi_long len = tswapal(target_vec[i].iov_len);
3109 if (len < 0) {
3110 break;
3112 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3114 unlock_user(target_vec, target_addr, 0);
3117 g_free(vec);
3120 static inline int target_to_host_sock_type(int *type)
3122 int host_type = 0;
3123 int target_type = *type;
3125 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3126 case TARGET_SOCK_DGRAM:
3127 host_type = SOCK_DGRAM;
3128 break;
3129 case TARGET_SOCK_STREAM:
3130 host_type = SOCK_STREAM;
3131 break;
3132 default:
3133 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3134 break;
3136 if (target_type & TARGET_SOCK_CLOEXEC) {
3137 #if defined(SOCK_CLOEXEC)
3138 host_type |= SOCK_CLOEXEC;
3139 #else
3140 return -TARGET_EINVAL;
3141 #endif
3143 if (target_type & TARGET_SOCK_NONBLOCK) {
3144 #if defined(SOCK_NONBLOCK)
3145 host_type |= SOCK_NONBLOCK;
3146 #elif !defined(O_NONBLOCK)
3147 return -TARGET_EINVAL;
3148 #endif
3150 *type = host_type;
3151 return 0;
3154 /* Try to emulate socket type flags after socket creation. */
3155 static int sock_flags_fixup(int fd, int target_type)
3157 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3158 if (target_type & TARGET_SOCK_NONBLOCK) {
3159 int flags = fcntl(fd, F_GETFL);
3160 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3161 close(fd);
3162 return -TARGET_EINVAL;
3165 #endif
3166 return fd;
3169 /* do_socket() Must return target values and target errnos. */
3170 static abi_long do_socket(int domain, int type, int protocol)
3172 int target_type = type;
3173 int ret;
3175 ret = target_to_host_sock_type(&type);
3176 if (ret) {
3177 return ret;
3180 if (domain == PF_NETLINK && !(
3181 #ifdef CONFIG_RTNETLINK
3182 protocol == NETLINK_ROUTE ||
3183 #endif
3184 protocol == NETLINK_KOBJECT_UEVENT ||
3185 protocol == NETLINK_AUDIT)) {
3186 return -TARGET_EPROTONOSUPPORT;
3189 if (domain == AF_PACKET ||
3190 (domain == AF_INET && type == SOCK_PACKET)) {
3191 protocol = tswap16(protocol);
3194 ret = get_errno(socket(domain, type, protocol));
3195 if (ret >= 0) {
3196 ret = sock_flags_fixup(ret, target_type);
3197 if (type == SOCK_PACKET) {
3198 /* Manage an obsolete case :
3199 * if socket type is SOCK_PACKET, bind by name
3201 fd_trans_register(ret, &target_packet_trans);
3202 } else if (domain == PF_NETLINK) {
3203 switch (protocol) {
3204 #ifdef CONFIG_RTNETLINK
3205 case NETLINK_ROUTE:
3206 fd_trans_register(ret, &target_netlink_route_trans);
3207 break;
3208 #endif
3209 case NETLINK_KOBJECT_UEVENT:
3210 /* nothing to do: messages are strings */
3211 break;
3212 case NETLINK_AUDIT:
3213 fd_trans_register(ret, &target_netlink_audit_trans);
3214 break;
3215 default:
3216 g_assert_not_reached();
3220 return ret;
3223 /* do_bind() Must return target values and target errnos. */
3224 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3225 socklen_t addrlen)
3227 void *addr;
3228 abi_long ret;
3230 if ((int)addrlen < 0) {
3231 return -TARGET_EINVAL;
3234 addr = alloca(addrlen+1);
3236 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3237 if (ret)
3238 return ret;
3240 return get_errno(bind(sockfd, addr, addrlen));
3243 /* do_connect() Must return target values and target errnos. */
3244 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3245 socklen_t addrlen)
3247 void *addr;
3248 abi_long ret;
3250 if ((int)addrlen < 0) {
3251 return -TARGET_EINVAL;
3254 addr = alloca(addrlen+1);
3256 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3257 if (ret)
3258 return ret;
3260 return get_errno(safe_connect(sockfd, addr, addrlen));
3263 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3264 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3265 int flags, int send)
3267 abi_long ret, len;
3268 struct msghdr msg;
3269 abi_ulong count;
3270 struct iovec *vec;
3271 abi_ulong target_vec;
3273 if (msgp->msg_name) {
3274 msg.msg_namelen = tswap32(msgp->msg_namelen);
3275 msg.msg_name = alloca(msg.msg_namelen+1);
3276 ret = target_to_host_sockaddr(fd, msg.msg_name,
3277 tswapal(msgp->msg_name),
3278 msg.msg_namelen);
3279 if (ret == -TARGET_EFAULT) {
3280 /* For connected sockets msg_name and msg_namelen must
3281 * be ignored, so returning EFAULT immediately is wrong.
3282 * Instead, pass a bad msg_name to the host kernel, and
3283 * let it decide whether to return EFAULT or not.
3285 msg.msg_name = (void *)-1;
3286 } else if (ret) {
3287 goto out2;
3289 } else {
3290 msg.msg_name = NULL;
3291 msg.msg_namelen = 0;
3293 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3294 msg.msg_control = alloca(msg.msg_controllen);
3295 memset(msg.msg_control, 0, msg.msg_controllen);
3297 msg.msg_flags = tswap32(msgp->msg_flags);
3299 count = tswapal(msgp->msg_iovlen);
3300 target_vec = tswapal(msgp->msg_iov);
3302 if (count > IOV_MAX) {
3303 /* sendrcvmsg returns a different errno for this condition than
3304 * readv/writev, so we must catch it here before lock_iovec() does.
3306 ret = -TARGET_EMSGSIZE;
3307 goto out2;
3310 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3311 target_vec, count, send);
3312 if (vec == NULL) {
3313 ret = -host_to_target_errno(errno);
3314 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3315 if (!send || ret) {
3316 goto out2;
3319 msg.msg_iovlen = count;
3320 msg.msg_iov = vec;
3322 if (send) {
3323 if (fd_trans_target_to_host_data(fd)) {
3324 void *host_msg;
3326 host_msg = g_malloc(msg.msg_iov->iov_len);
3327 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3328 ret = fd_trans_target_to_host_data(fd)(host_msg,
3329 msg.msg_iov->iov_len);
3330 if (ret >= 0) {
3331 msg.msg_iov->iov_base = host_msg;
3332 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3334 g_free(host_msg);
3335 } else {
3336 ret = target_to_host_cmsg(&msg, msgp);
3337 if (ret == 0) {
3338 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3341 } else {
3342 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3343 if (!is_error(ret)) {
3344 len = ret;
3345 if (fd_trans_host_to_target_data(fd)) {
3346 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3347 MIN(msg.msg_iov->iov_len, len));
3349 if (!is_error(ret)) {
3350 ret = host_to_target_cmsg(msgp, &msg);
3352 if (!is_error(ret)) {
3353 msgp->msg_namelen = tswap32(msg.msg_namelen);
3354 msgp->msg_flags = tswap32(msg.msg_flags);
3355 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3356 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3357 msg.msg_name, msg.msg_namelen);
3358 if (ret) {
3359 goto out;
3363 ret = len;
3368 out:
3369 if (vec) {
3370 unlock_iovec(vec, target_vec, count, !send);
3372 out2:
3373 return ret;
3376 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3377 int flags, int send)
3379 abi_long ret;
3380 struct target_msghdr *msgp;
3382 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3383 msgp,
3384 target_msg,
3385 send ? 1 : 0)) {
3386 return -TARGET_EFAULT;
3388 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3389 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3390 return ret;
3393 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3394 * so it might not have this *mmsg-specific flag either.
3396 #ifndef MSG_WAITFORONE
3397 #define MSG_WAITFORONE 0x10000
3398 #endif
3400 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3401 unsigned int vlen, unsigned int flags,
3402 int send)
3404 struct target_mmsghdr *mmsgp;
3405 abi_long ret = 0;
3406 int i;
3408 if (vlen > UIO_MAXIOV) {
3409 vlen = UIO_MAXIOV;
3412 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3413 if (!mmsgp) {
3414 return -TARGET_EFAULT;
3417 for (i = 0; i < vlen; i++) {
3418 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3419 if (is_error(ret)) {
3420 break;
3422 mmsgp[i].msg_len = tswap32(ret);
3423 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3424 if (flags & MSG_WAITFORONE) {
3425 flags |= MSG_DONTWAIT;
3429 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3431 /* Return number of datagrams sent if we sent any at all;
3432 * otherwise return the error.
3434 if (i) {
3435 return i;
3437 return ret;
3440 /* do_accept4() Must return target values and target errnos. */
3441 static abi_long do_accept4(int fd, abi_ulong target_addr,
3442 abi_ulong target_addrlen_addr, int flags)
3444 socklen_t addrlen, ret_addrlen;
3445 void *addr;
3446 abi_long ret;
3447 int host_flags;
3449 if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3450 return -TARGET_EINVAL;
3453 host_flags = 0;
3454 if (flags & TARGET_SOCK_NONBLOCK) {
3455 host_flags |= SOCK_NONBLOCK;
3457 if (flags & TARGET_SOCK_CLOEXEC) {
3458 host_flags |= SOCK_CLOEXEC;
3461 if (target_addr == 0) {
3462 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3465 /* linux returns EFAULT if addrlen pointer is invalid */
3466 if (get_user_u32(addrlen, target_addrlen_addr))
3467 return -TARGET_EFAULT;
3469 if ((int)addrlen < 0) {
3470 return -TARGET_EINVAL;
3473 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3474 return -TARGET_EFAULT;
3477 addr = alloca(addrlen);
3479 ret_addrlen = addrlen;
3480 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3481 if (!is_error(ret)) {
3482 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3483 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3484 ret = -TARGET_EFAULT;
3487 return ret;
3490 /* do_getpeername() Must return target values and target errnos. */
3491 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3492 abi_ulong target_addrlen_addr)
3494 socklen_t addrlen, ret_addrlen;
3495 void *addr;
3496 abi_long ret;
3498 if (get_user_u32(addrlen, target_addrlen_addr))
3499 return -TARGET_EFAULT;
3501 if ((int)addrlen < 0) {
3502 return -TARGET_EINVAL;
3505 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3506 return -TARGET_EFAULT;
3509 addr = alloca(addrlen);
3511 ret_addrlen = addrlen;
3512 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3513 if (!is_error(ret)) {
3514 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3515 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3516 ret = -TARGET_EFAULT;
3519 return ret;
3522 /* do_getsockname() Must return target values and target errnos. */
3523 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3524 abi_ulong target_addrlen_addr)
3526 socklen_t addrlen, ret_addrlen;
3527 void *addr;
3528 abi_long ret;
3530 if (get_user_u32(addrlen, target_addrlen_addr))
3531 return -TARGET_EFAULT;
3533 if ((int)addrlen < 0) {
3534 return -TARGET_EINVAL;
3537 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3538 return -TARGET_EFAULT;
3541 addr = alloca(addrlen);
3543 ret_addrlen = addrlen;
3544 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3545 if (!is_error(ret)) {
3546 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3547 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3548 ret = -TARGET_EFAULT;
3551 return ret;
3554 /* do_socketpair() Must return target values and target errnos. */
3555 static abi_long do_socketpair(int domain, int type, int protocol,
3556 abi_ulong target_tab_addr)
3558 int tab[2];
3559 abi_long ret;
3561 target_to_host_sock_type(&type);
3563 ret = get_errno(socketpair(domain, type, protocol, tab));
3564 if (!is_error(ret)) {
3565 if (put_user_s32(tab[0], target_tab_addr)
3566 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3567 ret = -TARGET_EFAULT;
3569 return ret;
3572 /* do_sendto() Must return target values and target errnos. */
3573 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3574 abi_ulong target_addr, socklen_t addrlen)
3576 void *addr;
3577 void *host_msg;
3578 void *copy_msg = NULL;
3579 abi_long ret;
3581 if ((int)addrlen < 0) {
3582 return -TARGET_EINVAL;
3585 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3586 if (!host_msg)
3587 return -TARGET_EFAULT;
3588 if (fd_trans_target_to_host_data(fd)) {
3589 copy_msg = host_msg;
3590 host_msg = g_malloc(len);
3591 memcpy(host_msg, copy_msg, len);
3592 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3593 if (ret < 0) {
3594 goto fail;
3597 if (target_addr) {
3598 addr = alloca(addrlen+1);
3599 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3600 if (ret) {
3601 goto fail;
3603 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3604 } else {
3605 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3607 fail:
3608 if (copy_msg) {
3609 g_free(host_msg);
3610 host_msg = copy_msg;
3612 unlock_user(host_msg, msg, 0);
3613 return ret;
3616 /* do_recvfrom() Must return target values and target errnos. */
3617 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3618 abi_ulong target_addr,
3619 abi_ulong target_addrlen)
3621 socklen_t addrlen, ret_addrlen;
3622 void *addr;
3623 void *host_msg;
3624 abi_long ret;
3626 if (!msg) {
3627 host_msg = NULL;
3628 } else {
3629 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3630 if (!host_msg) {
3631 return -TARGET_EFAULT;
3634 if (target_addr) {
3635 if (get_user_u32(addrlen, target_addrlen)) {
3636 ret = -TARGET_EFAULT;
3637 goto fail;
3639 if ((int)addrlen < 0) {
3640 ret = -TARGET_EINVAL;
3641 goto fail;
3643 addr = alloca(addrlen);
3644 ret_addrlen = addrlen;
3645 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3646 addr, &ret_addrlen));
3647 } else {
3648 addr = NULL; /* To keep compiler quiet. */
3649 addrlen = 0; /* To keep compiler quiet. */
3650 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3652 if (!is_error(ret)) {
3653 if (fd_trans_host_to_target_data(fd)) {
3654 abi_long trans;
3655 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3656 if (is_error(trans)) {
3657 ret = trans;
3658 goto fail;
3661 if (target_addr) {
3662 host_to_target_sockaddr(target_addr, addr,
3663 MIN(addrlen, ret_addrlen));
3664 if (put_user_u32(ret_addrlen, target_addrlen)) {
3665 ret = -TARGET_EFAULT;
3666 goto fail;
3669 unlock_user(host_msg, msg, len);
3670 } else {
3671 fail:
3672 unlock_user(host_msg, msg, 0);
3674 return ret;
3677 #ifdef TARGET_NR_socketcall
3678 /* do_socketcall() must return target values and target errnos. */
3679 static abi_long do_socketcall(int num, abi_ulong vptr)
3681 static const unsigned nargs[] = { /* number of arguments per operation */
3682 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3683 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3684 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3685 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3686 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3687 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3688 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3689 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3690 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3691 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3692 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3693 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3694 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3695 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3696 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3697 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3698 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3699 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3700 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3701 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3703 abi_long a[6]; /* max 6 args */
3704 unsigned i;
3706 /* check the range of the first argument num */
3707 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3708 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3709 return -TARGET_EINVAL;
3711 /* ensure we have space for args */
3712 if (nargs[num] > ARRAY_SIZE(a)) {
3713 return -TARGET_EINVAL;
3715 /* collect the arguments in a[] according to nargs[] */
3716 for (i = 0; i < nargs[num]; ++i) {
3717 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3718 return -TARGET_EFAULT;
3721 /* now when we have the args, invoke the appropriate underlying function */
3722 switch (num) {
3723 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3724 return do_socket(a[0], a[1], a[2]);
3725 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3726 return do_bind(a[0], a[1], a[2]);
3727 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3728 return do_connect(a[0], a[1], a[2]);
3729 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3730 return get_errno(listen(a[0], a[1]));
3731 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3732 return do_accept4(a[0], a[1], a[2], 0);
3733 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3734 return do_getsockname(a[0], a[1], a[2]);
3735 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3736 return do_getpeername(a[0], a[1], a[2]);
3737 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3738 return do_socketpair(a[0], a[1], a[2], a[3]);
3739 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3740 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3741 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3742 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3743 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3744 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3745 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3746 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3747 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3748 return get_errno(shutdown(a[0], a[1]));
3749 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3750 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3751 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3752 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3753 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3754 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3755 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3756 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3757 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3758 return do_accept4(a[0], a[1], a[2], a[3]);
3759 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3760 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3761 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3762 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3763 default:
3764 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3765 return -TARGET_EINVAL;
3768 #endif
3770 #define N_SHM_REGIONS 32
3772 static struct shm_region {
3773 abi_ulong start;
3774 abi_ulong size;
3775 bool in_use;
3776 } shm_regions[N_SHM_REGIONS];
3778 #ifndef TARGET_SEMID64_DS
3779 /* asm-generic version of this struct */
3780 struct target_semid64_ds
3782 struct target_ipc_perm sem_perm;
3783 abi_ulong sem_otime;
3784 #if TARGET_ABI_BITS == 32
3785 abi_ulong __unused1;
3786 #endif
3787 abi_ulong sem_ctime;
3788 #if TARGET_ABI_BITS == 32
3789 abi_ulong __unused2;
3790 #endif
3791 abi_ulong sem_nsems;
3792 abi_ulong __unused3;
3793 abi_ulong __unused4;
3795 #endif
3797 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3798 abi_ulong target_addr)
3800 struct target_ipc_perm *target_ip;
3801 struct target_semid64_ds *target_sd;
3803 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3804 return -TARGET_EFAULT;
3805 target_ip = &(target_sd->sem_perm);
3806 host_ip->__key = tswap32(target_ip->__key);
3807 host_ip->uid = tswap32(target_ip->uid);
3808 host_ip->gid = tswap32(target_ip->gid);
3809 host_ip->cuid = tswap32(target_ip->cuid);
3810 host_ip->cgid = tswap32(target_ip->cgid);
3811 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3812 host_ip->mode = tswap32(target_ip->mode);
3813 #else
3814 host_ip->mode = tswap16(target_ip->mode);
3815 #endif
3816 #if defined(TARGET_PPC)
3817 host_ip->__seq = tswap32(target_ip->__seq);
3818 #else
3819 host_ip->__seq = tswap16(target_ip->__seq);
3820 #endif
3821 unlock_user_struct(target_sd, target_addr, 0);
3822 return 0;
3825 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3826 struct ipc_perm *host_ip)
3828 struct target_ipc_perm *target_ip;
3829 struct target_semid64_ds *target_sd;
3831 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3832 return -TARGET_EFAULT;
3833 target_ip = &(target_sd->sem_perm);
3834 target_ip->__key = tswap32(host_ip->__key);
3835 target_ip->uid = tswap32(host_ip->uid);
3836 target_ip->gid = tswap32(host_ip->gid);
3837 target_ip->cuid = tswap32(host_ip->cuid);
3838 target_ip->cgid = tswap32(host_ip->cgid);
3839 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3840 target_ip->mode = tswap32(host_ip->mode);
3841 #else
3842 target_ip->mode = tswap16(host_ip->mode);
3843 #endif
3844 #if defined(TARGET_PPC)
3845 target_ip->__seq = tswap32(host_ip->__seq);
3846 #else
3847 target_ip->__seq = tswap16(host_ip->__seq);
3848 #endif
3849 unlock_user_struct(target_sd, target_addr, 1);
3850 return 0;
3853 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3854 abi_ulong target_addr)
3856 struct target_semid64_ds *target_sd;
3858 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3859 return -TARGET_EFAULT;
3860 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3861 return -TARGET_EFAULT;
3862 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3863 host_sd->sem_otime = tswapal(target_sd->sem_otime);
3864 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3865 unlock_user_struct(target_sd, target_addr, 0);
3866 return 0;
3869 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3870 struct semid_ds *host_sd)
3872 struct target_semid64_ds *target_sd;
3874 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3875 return -TARGET_EFAULT;
3876 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3877 return -TARGET_EFAULT;
3878 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3879 target_sd->sem_otime = tswapal(host_sd->sem_otime);
3880 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3881 unlock_user_struct(target_sd, target_addr, 1);
3882 return 0;
3885 struct target_seminfo {
3886 int semmap;
3887 int semmni;
3888 int semmns;
3889 int semmnu;
3890 int semmsl;
3891 int semopm;
3892 int semume;
3893 int semusz;
3894 int semvmx;
3895 int semaem;
3898 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3899 struct seminfo *host_seminfo)
3901 struct target_seminfo *target_seminfo;
3902 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3903 return -TARGET_EFAULT;
3904 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3905 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3906 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3907 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3908 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3909 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3910 __put_user(host_seminfo->semume, &target_seminfo->semume);
3911 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3912 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3913 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3914 unlock_user_struct(target_seminfo, target_addr, 1);
3915 return 0;
3918 union semun {
3919 int val;
3920 struct semid_ds *buf;
3921 unsigned short *array;
3922 struct seminfo *__buf;
3925 union target_semun {
3926 int val;
3927 abi_ulong buf;
3928 abi_ulong array;
3929 abi_ulong __buf;
3932 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3933 abi_ulong target_addr)
3935 int nsems;
3936 unsigned short *array;
3937 union semun semun;
3938 struct semid_ds semid_ds;
3939 int i, ret;
3941 semun.buf = &semid_ds;
3943 ret = semctl(semid, 0, IPC_STAT, semun);
3944 if (ret == -1)
3945 return get_errno(ret);
3947 nsems = semid_ds.sem_nsems;
3949 *host_array = g_try_new(unsigned short, nsems);
3950 if (!*host_array) {
3951 return -TARGET_ENOMEM;
3953 array = lock_user(VERIFY_READ, target_addr,
3954 nsems*sizeof(unsigned short), 1);
3955 if (!array) {
3956 g_free(*host_array);
3957 return -TARGET_EFAULT;
3960 for(i=0; i<nsems; i++) {
3961 __get_user((*host_array)[i], &array[i]);
3963 unlock_user(array, target_addr, 0);
3965 return 0;
3968 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3969 unsigned short **host_array)
3971 int nsems;
3972 unsigned short *array;
3973 union semun semun;
3974 struct semid_ds semid_ds;
3975 int i, ret;
3977 semun.buf = &semid_ds;
3979 ret = semctl(semid, 0, IPC_STAT, semun);
3980 if (ret == -1)
3981 return get_errno(ret);
3983 nsems = semid_ds.sem_nsems;
3985 array = lock_user(VERIFY_WRITE, target_addr,
3986 nsems*sizeof(unsigned short), 0);
3987 if (!array)
3988 return -TARGET_EFAULT;
3990 for(i=0; i<nsems; i++) {
3991 __put_user((*host_array)[i], &array[i]);
3993 g_free(*host_array);
3994 unlock_user(array, target_addr, 1);
3996 return 0;
3999 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4000 abi_ulong target_arg)
4002 union target_semun target_su = { .buf = target_arg };
4003 union semun arg;
4004 struct semid_ds dsarg;
4005 unsigned short *array = NULL;
4006 struct seminfo seminfo;
4007 abi_long ret = -TARGET_EINVAL;
4008 abi_long err;
4009 cmd &= 0xff;
4011 switch( cmd ) {
4012 case GETVAL:
4013 case SETVAL:
4014 /* In 64 bit cross-endian situations, we will erroneously pick up
4015 * the wrong half of the union for the "val" element. To rectify
4016 * this, the entire 8-byte structure is byteswapped, followed by
4017 * a swap of the 4 byte val field. In other cases, the data is
4018 * already in proper host byte order. */
4019 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4020 target_su.buf = tswapal(target_su.buf);
4021 arg.val = tswap32(target_su.val);
4022 } else {
4023 arg.val = target_su.val;
4025 ret = get_errno(semctl(semid, semnum, cmd, arg));
4026 break;
4027 case GETALL:
4028 case SETALL:
4029 err = target_to_host_semarray(semid, &array, target_su.array);
4030 if (err)
4031 return err;
4032 arg.array = array;
4033 ret = get_errno(semctl(semid, semnum, cmd, arg));
4034 err = host_to_target_semarray(semid, target_su.array, &array);
4035 if (err)
4036 return err;
4037 break;
4038 case IPC_STAT:
4039 case IPC_SET:
4040 case SEM_STAT:
4041 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4042 if (err)
4043 return err;
4044 arg.buf = &dsarg;
4045 ret = get_errno(semctl(semid, semnum, cmd, arg));
4046 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4047 if (err)
4048 return err;
4049 break;
4050 case IPC_INFO:
4051 case SEM_INFO:
4052 arg.__buf = &seminfo;
4053 ret = get_errno(semctl(semid, semnum, cmd, arg));
4054 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4055 if (err)
4056 return err;
4057 break;
4058 case IPC_RMID:
4059 case GETPID:
4060 case GETNCNT:
4061 case GETZCNT:
4062 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4063 break;
4066 return ret;
4069 struct target_sembuf {
4070 unsigned short sem_num;
4071 short sem_op;
4072 short sem_flg;
4075 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4076 abi_ulong target_addr,
4077 unsigned nsops)
4079 struct target_sembuf *target_sembuf;
4080 int i;
4082 target_sembuf = lock_user(VERIFY_READ, target_addr,
4083 nsops*sizeof(struct target_sembuf), 1);
4084 if (!target_sembuf)
4085 return -TARGET_EFAULT;
4087 for(i=0; i<nsops; i++) {
4088 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4089 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4090 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4093 unlock_user(target_sembuf, target_addr, 0);
4095 return 0;
4098 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4099 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4102 * This macro is required to handle the s390 variants, which passes the
4103 * arguments in a different order than default.
4105 #ifdef __s390x__
4106 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4107 (__nsops), (__timeout), (__sops)
4108 #else
4109 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4110 (__nsops), 0, (__sops), (__timeout)
4111 #endif
4113 static inline abi_long do_semtimedop(int semid,
4114 abi_long ptr,
4115 unsigned nsops,
4116 abi_long timeout, bool time64)
4118 struct sembuf *sops;
4119 struct timespec ts, *pts = NULL;
4120 abi_long ret;
4122 if (timeout) {
4123 pts = &ts;
4124 if (time64) {
4125 if (target_to_host_timespec64(pts, timeout)) {
4126 return -TARGET_EFAULT;
4128 } else {
4129 if (target_to_host_timespec(pts, timeout)) {
4130 return -TARGET_EFAULT;
4135 if (nsops > TARGET_SEMOPM) {
4136 return -TARGET_E2BIG;
4139 sops = g_new(struct sembuf, nsops);
4141 if (target_to_host_sembuf(sops, ptr, nsops)) {
4142 g_free(sops);
4143 return -TARGET_EFAULT;
4146 ret = -TARGET_ENOSYS;
4147 #ifdef __NR_semtimedop
4148 ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4149 #endif
4150 #ifdef __NR_ipc
4151 if (ret == -TARGET_ENOSYS) {
4152 ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4153 SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4155 #endif
4156 g_free(sops);
4157 return ret;
4159 #endif
4161 struct target_msqid_ds
4163 struct target_ipc_perm msg_perm;
4164 abi_ulong msg_stime;
4165 #if TARGET_ABI_BITS == 32
4166 abi_ulong __unused1;
4167 #endif
4168 abi_ulong msg_rtime;
4169 #if TARGET_ABI_BITS == 32
4170 abi_ulong __unused2;
4171 #endif
4172 abi_ulong msg_ctime;
4173 #if TARGET_ABI_BITS == 32
4174 abi_ulong __unused3;
4175 #endif
4176 abi_ulong __msg_cbytes;
4177 abi_ulong msg_qnum;
4178 abi_ulong msg_qbytes;
4179 abi_ulong msg_lspid;
4180 abi_ulong msg_lrpid;
4181 abi_ulong __unused4;
4182 abi_ulong __unused5;
4185 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4186 abi_ulong target_addr)
4188 struct target_msqid_ds *target_md;
4190 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4191 return -TARGET_EFAULT;
4192 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4193 return -TARGET_EFAULT;
4194 host_md->msg_stime = tswapal(target_md->msg_stime);
4195 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4196 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4197 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4198 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4199 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4200 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4201 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4202 unlock_user_struct(target_md, target_addr, 0);
4203 return 0;
4206 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4207 struct msqid_ds *host_md)
4209 struct target_msqid_ds *target_md;
4211 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4212 return -TARGET_EFAULT;
4213 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4214 return -TARGET_EFAULT;
4215 target_md->msg_stime = tswapal(host_md->msg_stime);
4216 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4217 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4218 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4219 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4220 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4221 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4222 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4223 unlock_user_struct(target_md, target_addr, 1);
4224 return 0;
4227 struct target_msginfo {
4228 int msgpool;
4229 int msgmap;
4230 int msgmax;
4231 int msgmnb;
4232 int msgmni;
4233 int msgssz;
4234 int msgtql;
4235 unsigned short int msgseg;
4238 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4239 struct msginfo *host_msginfo)
4241 struct target_msginfo *target_msginfo;
4242 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4243 return -TARGET_EFAULT;
4244 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4245 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4246 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4247 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4248 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4249 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4250 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4251 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4252 unlock_user_struct(target_msginfo, target_addr, 1);
4253 return 0;
4256 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4258 struct msqid_ds dsarg;
4259 struct msginfo msginfo;
4260 abi_long ret = -TARGET_EINVAL;
4262 cmd &= 0xff;
4264 switch (cmd) {
4265 case IPC_STAT:
4266 case IPC_SET:
4267 case MSG_STAT:
4268 if (target_to_host_msqid_ds(&dsarg,ptr))
4269 return -TARGET_EFAULT;
4270 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4271 if (host_to_target_msqid_ds(ptr,&dsarg))
4272 return -TARGET_EFAULT;
4273 break;
4274 case IPC_RMID:
4275 ret = get_errno(msgctl(msgid, cmd, NULL));
4276 break;
4277 case IPC_INFO:
4278 case MSG_INFO:
4279 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4280 if (host_to_target_msginfo(ptr, &msginfo))
4281 return -TARGET_EFAULT;
4282 break;
4285 return ret;
4288 struct target_msgbuf {
4289 abi_long mtype;
4290 char mtext[1];
4293 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4294 ssize_t msgsz, int msgflg)
4296 struct target_msgbuf *target_mb;
4297 struct msgbuf *host_mb;
4298 abi_long ret = 0;
4300 if (msgsz < 0) {
4301 return -TARGET_EINVAL;
4304 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4305 return -TARGET_EFAULT;
4306 host_mb = g_try_malloc(msgsz + sizeof(long));
4307 if (!host_mb) {
4308 unlock_user_struct(target_mb, msgp, 0);
4309 return -TARGET_ENOMEM;
4311 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4312 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4313 ret = -TARGET_ENOSYS;
4314 #ifdef __NR_msgsnd
4315 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4316 #endif
4317 #ifdef __NR_ipc
4318 if (ret == -TARGET_ENOSYS) {
4319 #ifdef __s390x__
4320 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4321 host_mb));
4322 #else
4323 ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4324 host_mb, 0));
4325 #endif
4327 #endif
4328 g_free(host_mb);
4329 unlock_user_struct(target_mb, msgp, 0);
4331 return ret;
4334 #ifdef __NR_ipc
4335 #if defined(__sparc__)
4336 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4337 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4338 #elif defined(__s390x__)
4339 /* The s390 sys_ipc variant has only five parameters. */
4340 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4341 ((long int[]){(long int)__msgp, __msgtyp})
4342 #else
4343 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4344 ((long int[]){(long int)__msgp, __msgtyp}), 0
4345 #endif
4346 #endif
4348 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4349 ssize_t msgsz, abi_long msgtyp,
4350 int msgflg)
4352 struct target_msgbuf *target_mb;
4353 char *target_mtext;
4354 struct msgbuf *host_mb;
4355 abi_long ret = 0;
4357 if (msgsz < 0) {
4358 return -TARGET_EINVAL;
4361 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4362 return -TARGET_EFAULT;
4364 host_mb = g_try_malloc(msgsz + sizeof(long));
4365 if (!host_mb) {
4366 ret = -TARGET_ENOMEM;
4367 goto end;
4369 ret = -TARGET_ENOSYS;
4370 #ifdef __NR_msgrcv
4371 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4372 #endif
4373 #ifdef __NR_ipc
4374 if (ret == -TARGET_ENOSYS) {
4375 ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4376 msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4378 #endif
4380 if (ret > 0) {
4381 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4382 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4383 if (!target_mtext) {
4384 ret = -TARGET_EFAULT;
4385 goto end;
4387 memcpy(target_mb->mtext, host_mb->mtext, ret);
4388 unlock_user(target_mtext, target_mtext_addr, ret);
4391 target_mb->mtype = tswapal(host_mb->mtype);
4393 end:
4394 if (target_mb)
4395 unlock_user_struct(target_mb, msgp, 1);
4396 g_free(host_mb);
4397 return ret;
4400 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4401 abi_ulong target_addr)
4403 struct target_shmid_ds *target_sd;
4405 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4406 return -TARGET_EFAULT;
4407 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4408 return -TARGET_EFAULT;
4409 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4410 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4411 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4412 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4413 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4414 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4415 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4416 unlock_user_struct(target_sd, target_addr, 0);
4417 return 0;
4420 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4421 struct shmid_ds *host_sd)
4423 struct target_shmid_ds *target_sd;
4425 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4426 return -TARGET_EFAULT;
4427 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4428 return -TARGET_EFAULT;
4429 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4430 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4431 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4432 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4433 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4434 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4435 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4436 unlock_user_struct(target_sd, target_addr, 1);
4437 return 0;
4440 struct target_shminfo {
4441 abi_ulong shmmax;
4442 abi_ulong shmmin;
4443 abi_ulong shmmni;
4444 abi_ulong shmseg;
4445 abi_ulong shmall;
4448 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4449 struct shminfo *host_shminfo)
4451 struct target_shminfo *target_shminfo;
4452 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4453 return -TARGET_EFAULT;
4454 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4455 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4456 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4457 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4458 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4459 unlock_user_struct(target_shminfo, target_addr, 1);
4460 return 0;
4463 struct target_shm_info {
4464 int used_ids;
4465 abi_ulong shm_tot;
4466 abi_ulong shm_rss;
4467 abi_ulong shm_swp;
4468 abi_ulong swap_attempts;
4469 abi_ulong swap_successes;
4472 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4473 struct shm_info *host_shm_info)
4475 struct target_shm_info *target_shm_info;
4476 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4477 return -TARGET_EFAULT;
4478 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4479 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4480 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4481 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4482 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4483 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4484 unlock_user_struct(target_shm_info, target_addr, 1);
4485 return 0;
4488 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4490 struct shmid_ds dsarg;
4491 struct shminfo shminfo;
4492 struct shm_info shm_info;
4493 abi_long ret = -TARGET_EINVAL;
4495 cmd &= 0xff;
4497 switch(cmd) {
4498 case IPC_STAT:
4499 case IPC_SET:
4500 case SHM_STAT:
4501 if (target_to_host_shmid_ds(&dsarg, buf))
4502 return -TARGET_EFAULT;
4503 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4504 if (host_to_target_shmid_ds(buf, &dsarg))
4505 return -TARGET_EFAULT;
4506 break;
4507 case IPC_INFO:
4508 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4509 if (host_to_target_shminfo(buf, &shminfo))
4510 return -TARGET_EFAULT;
4511 break;
4512 case SHM_INFO:
4513 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4514 if (host_to_target_shm_info(buf, &shm_info))
4515 return -TARGET_EFAULT;
4516 break;
4517 case IPC_RMID:
4518 case SHM_LOCK:
4519 case SHM_UNLOCK:
4520 ret = get_errno(shmctl(shmid, cmd, NULL));
4521 break;
4524 return ret;
4527 #ifndef TARGET_FORCE_SHMLBA
4528 /* For most architectures, SHMLBA is the same as the page size;
4529 * some architectures have larger values, in which case they should
4530 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4531 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4532 * and defining its own value for SHMLBA.
4534 * The kernel also permits SHMLBA to be set by the architecture to a
4535 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4536 * this means that addresses are rounded to the large size if
4537 * SHM_RND is set but addresses not aligned to that size are not rejected
4538 * as long as they are at least page-aligned. Since the only architecture
4539 * which uses this is ia64 this code doesn't provide for that oddity.
4541 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4543 return TARGET_PAGE_SIZE;
4545 #endif
4547 static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
4548 abi_ulong shmaddr, int shmflg)
4550 CPUState *cpu = env_cpu(cpu_env);
4551 abi_ulong raddr;
4552 void *host_raddr;
4553 struct shmid_ds shm_info;
4554 int i, ret;
4555 abi_ulong shmlba;
4557 /* shmat pointers are always untagged */
4559 /* find out the length of the shared memory segment */
4560 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4561 if (is_error(ret)) {
4562 /* can't get length, bail out */
4563 return ret;
4566 shmlba = target_shmlba(cpu_env);
4568 if (shmaddr & (shmlba - 1)) {
4569 if (shmflg & SHM_RND) {
4570 shmaddr &= ~(shmlba - 1);
4571 } else {
4572 return -TARGET_EINVAL;
4575 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4576 return -TARGET_EINVAL;
4579 mmap_lock();
4582 * We're mapping shared memory, so ensure we generate code for parallel
4583 * execution and flush old translations. This will work up to the level
4584 * supported by the host -- anything that requires EXCP_ATOMIC will not
4585 * be atomic with respect to an external process.
4587 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4588 cpu->tcg_cflags |= CF_PARALLEL;
4589 tb_flush(cpu);
4592 if (shmaddr)
4593 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4594 else {
4595 abi_ulong mmap_start;
4597 /* In order to use the host shmat, we need to honor host SHMLBA. */
4598 mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4600 if (mmap_start == -1) {
4601 errno = ENOMEM;
4602 host_raddr = (void *)-1;
4603 } else
4604 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4605 shmflg | SHM_REMAP);
4608 if (host_raddr == (void *)-1) {
4609 mmap_unlock();
4610 return get_errno((intptr_t)host_raddr);
4612 raddr = h2g((uintptr_t)host_raddr);
4614 page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4615 PAGE_VALID | PAGE_RESET | PAGE_READ |
4616 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4618 for (i = 0; i < N_SHM_REGIONS; i++) {
4619 if (!shm_regions[i].in_use) {
4620 shm_regions[i].in_use = true;
4621 shm_regions[i].start = raddr;
4622 shm_regions[i].size = shm_info.shm_segsz;
4623 break;
4627 mmap_unlock();
4628 return raddr;
4631 static inline abi_long do_shmdt(abi_ulong shmaddr)
4633 int i;
4634 abi_long rv;
4636 /* shmdt pointers are always untagged */
4638 mmap_lock();
4640 for (i = 0; i < N_SHM_REGIONS; ++i) {
4641 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4642 shm_regions[i].in_use = false;
4643 page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4644 break;
4647 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4649 mmap_unlock();
4651 return rv;
4654 #ifdef TARGET_NR_ipc
4655 /* ??? This only works with linear mappings. */
4656 /* do_ipc() must return target values and target errnos. */
4657 static abi_long do_ipc(CPUArchState *cpu_env,
4658 unsigned int call, abi_long first,
4659 abi_long second, abi_long third,
4660 abi_long ptr, abi_long fifth)
4662 int version;
4663 abi_long ret = 0;
4665 version = call >> 16;
4666 call &= 0xffff;
4668 switch (call) {
4669 case IPCOP_semop:
4670 ret = do_semtimedop(first, ptr, second, 0, false);
4671 break;
4672 case IPCOP_semtimedop:
4674 * The s390 sys_ipc variant has only five parameters instead of six
4675 * (as for default variant) and the only difference is the handling of
4676 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4677 * to a struct timespec where the generic variant uses fifth parameter.
4679 #if defined(TARGET_S390X)
4680 ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4681 #else
4682 ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4683 #endif
4684 break;
4686 case IPCOP_semget:
4687 ret = get_errno(semget(first, second, third));
4688 break;
4690 case IPCOP_semctl: {
4691 /* The semun argument to semctl is passed by value, so dereference the
4692 * ptr argument. */
4693 abi_ulong atptr;
4694 get_user_ual(atptr, ptr);
4695 ret = do_semctl(first, second, third, atptr);
4696 break;
4699 case IPCOP_msgget:
4700 ret = get_errno(msgget(first, second));
4701 break;
4703 case IPCOP_msgsnd:
4704 ret = do_msgsnd(first, ptr, second, third);
4705 break;
4707 case IPCOP_msgctl:
4708 ret = do_msgctl(first, second, ptr);
4709 break;
4711 case IPCOP_msgrcv:
4712 switch (version) {
4713 case 0:
4715 struct target_ipc_kludge {
4716 abi_long msgp;
4717 abi_long msgtyp;
4718 } *tmp;
4720 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4721 ret = -TARGET_EFAULT;
4722 break;
4725 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4727 unlock_user_struct(tmp, ptr, 0);
4728 break;
4730 default:
4731 ret = do_msgrcv(first, ptr, second, fifth, third);
4733 break;
4735 case IPCOP_shmat:
4736 switch (version) {
4737 default:
4739 abi_ulong raddr;
4740 raddr = do_shmat(cpu_env, first, ptr, second);
4741 if (is_error(raddr))
4742 return get_errno(raddr);
4743 if (put_user_ual(raddr, third))
4744 return -TARGET_EFAULT;
4745 break;
4747 case 1:
4748 ret = -TARGET_EINVAL;
4749 break;
4751 break;
4752 case IPCOP_shmdt:
4753 ret = do_shmdt(ptr);
4754 break;
4756 case IPCOP_shmget:
4757 /* IPC_* flag values are the same on all linux platforms */
4758 ret = get_errno(shmget(first, second, third));
4759 break;
4761 /* IPC_* and SHM_* command values are the same on all linux platforms */
4762 case IPCOP_shmctl:
4763 ret = do_shmctl(first, second, ptr);
4764 break;
4765 default:
4766 qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4767 call, version);
4768 ret = -TARGET_ENOSYS;
4769 break;
4771 return ret;
4773 #endif
4775 /* kernel structure types definitions */
4777 #define STRUCT(name, ...) STRUCT_ ## name,
4778 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4779 enum {
4780 #include "syscall_types.h"
4781 STRUCT_MAX
4783 #undef STRUCT
4784 #undef STRUCT_SPECIAL
4786 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4787 #define STRUCT_SPECIAL(name)
4788 #include "syscall_types.h"
4789 #undef STRUCT
4790 #undef STRUCT_SPECIAL
4792 #define MAX_STRUCT_SIZE 4096
4794 #ifdef CONFIG_FIEMAP
4795 /* So fiemap access checks don't overflow on 32 bit systems.
4796 * This is very slightly smaller than the limit imposed by
4797 * the underlying kernel.
4799 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4800 / sizeof(struct fiemap_extent))
4802 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4803 int fd, int cmd, abi_long arg)
4805 /* The parameter for this ioctl is a struct fiemap followed
4806 * by an array of struct fiemap_extent whose size is set
4807 * in fiemap->fm_extent_count. The array is filled in by the
4808 * ioctl.
4810 int target_size_in, target_size_out;
4811 struct fiemap *fm;
4812 const argtype *arg_type = ie->arg_type;
4813 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4814 void *argptr, *p;
4815 abi_long ret;
4816 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4817 uint32_t outbufsz;
4818 int free_fm = 0;
4820 assert(arg_type[0] == TYPE_PTR);
4821 assert(ie->access == IOC_RW);
4822 arg_type++;
4823 target_size_in = thunk_type_size(arg_type, 0);
4824 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4825 if (!argptr) {
4826 return -TARGET_EFAULT;
4828 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4829 unlock_user(argptr, arg, 0);
4830 fm = (struct fiemap *)buf_temp;
4831 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4832 return -TARGET_EINVAL;
4835 outbufsz = sizeof (*fm) +
4836 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4838 if (outbufsz > MAX_STRUCT_SIZE) {
4839 /* We can't fit all the extents into the fixed size buffer.
4840 * Allocate one that is large enough and use it instead.
4842 fm = g_try_malloc(outbufsz);
4843 if (!fm) {
4844 return -TARGET_ENOMEM;
4846 memcpy(fm, buf_temp, sizeof(struct fiemap));
4847 free_fm = 1;
4849 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4850 if (!is_error(ret)) {
4851 target_size_out = target_size_in;
4852 /* An extent_count of 0 means we were only counting the extents
4853 * so there are no structs to copy
4855 if (fm->fm_extent_count != 0) {
4856 target_size_out += fm->fm_mapped_extents * extent_size;
4858 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4859 if (!argptr) {
4860 ret = -TARGET_EFAULT;
4861 } else {
4862 /* Convert the struct fiemap */
4863 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4864 if (fm->fm_extent_count != 0) {
4865 p = argptr + target_size_in;
4866 /* ...and then all the struct fiemap_extents */
4867 for (i = 0; i < fm->fm_mapped_extents; i++) {
4868 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4869 THUNK_TARGET);
4870 p += extent_size;
4873 unlock_user(argptr, arg, target_size_out);
4876 if (free_fm) {
4877 g_free(fm);
4879 return ret;
4881 #endif
4883 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4884 int fd, int cmd, abi_long arg)
4886 const argtype *arg_type = ie->arg_type;
4887 int target_size;
4888 void *argptr;
4889 int ret;
4890 struct ifconf *host_ifconf;
4891 uint32_t outbufsz;
4892 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4893 const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4894 int target_ifreq_size;
4895 int nb_ifreq;
4896 int free_buf = 0;
4897 int i;
4898 int target_ifc_len;
4899 abi_long target_ifc_buf;
4900 int host_ifc_len;
4901 char *host_ifc_buf;
4903 assert(arg_type[0] == TYPE_PTR);
4904 assert(ie->access == IOC_RW);
4906 arg_type++;
4907 target_size = thunk_type_size(arg_type, 0);
4909 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4910 if (!argptr)
4911 return -TARGET_EFAULT;
4912 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4913 unlock_user(argptr, arg, 0);
4915 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4916 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4917 target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4919 if (target_ifc_buf != 0) {
4920 target_ifc_len = host_ifconf->ifc_len;
4921 nb_ifreq = target_ifc_len / target_ifreq_size;
4922 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4924 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4925 if (outbufsz > MAX_STRUCT_SIZE) {
4927 * We can't fit all the extents into the fixed size buffer.
4928 * Allocate one that is large enough and use it instead.
4930 host_ifconf = g_try_malloc(outbufsz);
4931 if (!host_ifconf) {
4932 return -TARGET_ENOMEM;
4934 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4935 free_buf = 1;
4937 host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4939 host_ifconf->ifc_len = host_ifc_len;
4940 } else {
4941 host_ifc_buf = NULL;
4943 host_ifconf->ifc_buf = host_ifc_buf;
4945 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4946 if (!is_error(ret)) {
4947 /* convert host ifc_len to target ifc_len */
4949 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4950 target_ifc_len = nb_ifreq * target_ifreq_size;
4951 host_ifconf->ifc_len = target_ifc_len;
4953 /* restore target ifc_buf */
4955 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4957 /* copy struct ifconf to target user */
4959 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4960 if (!argptr)
4961 return -TARGET_EFAULT;
4962 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4963 unlock_user(argptr, arg, target_size);
4965 if (target_ifc_buf != 0) {
4966 /* copy ifreq[] to target user */
4967 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4968 for (i = 0; i < nb_ifreq ; i++) {
4969 thunk_convert(argptr + i * target_ifreq_size,
4970 host_ifc_buf + i * sizeof(struct ifreq),
4971 ifreq_arg_type, THUNK_TARGET);
4973 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4977 if (free_buf) {
4978 g_free(host_ifconf);
4981 return ret;
4984 #if defined(CONFIG_USBFS)
4985 #if HOST_LONG_BITS > 64
4986 #error USBDEVFS thunks do not support >64 bit hosts yet.
4987 #endif
4988 struct live_urb {
4989 uint64_t target_urb_adr;
4990 uint64_t target_buf_adr;
4991 char *target_buf_ptr;
4992 struct usbdevfs_urb host_urb;
4995 static GHashTable *usbdevfs_urb_hashtable(void)
4997 static GHashTable *urb_hashtable;
4999 if (!urb_hashtable) {
5000 urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
5002 return urb_hashtable;
5005 static void urb_hashtable_insert(struct live_urb *urb)
5007 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5008 g_hash_table_insert(urb_hashtable, urb, urb);
5011 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5013 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5014 return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5017 static void urb_hashtable_remove(struct live_urb *urb)
5019 GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5020 g_hash_table_remove(urb_hashtable, urb);
5023 static abi_long
5024 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5025 int fd, int cmd, abi_long arg)
5027 const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5028 const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5029 struct live_urb *lurb;
5030 void *argptr;
5031 uint64_t hurb;
5032 int target_size;
5033 uintptr_t target_urb_adr;
5034 abi_long ret;
5036 target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5038 memset(buf_temp, 0, sizeof(uint64_t));
5039 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5040 if (is_error(ret)) {
5041 return ret;
5044 memcpy(&hurb, buf_temp, sizeof(uint64_t));
5045 lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5046 if (!lurb->target_urb_adr) {
5047 return -TARGET_EFAULT;
5049 urb_hashtable_remove(lurb);
5050 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5051 lurb->host_urb.buffer_length);
5052 lurb->target_buf_ptr = NULL;
5054 /* restore the guest buffer pointer */
5055 lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5057 /* update the guest urb struct */
5058 argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5059 if (!argptr) {
5060 g_free(lurb);
5061 return -TARGET_EFAULT;
5063 thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5064 unlock_user(argptr, lurb->target_urb_adr, target_size);
5066 target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5067 /* write back the urb handle */
5068 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5069 if (!argptr) {
5070 g_free(lurb);
5071 return -TARGET_EFAULT;
5074 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5075 target_urb_adr = lurb->target_urb_adr;
5076 thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5077 unlock_user(argptr, arg, target_size);
5079 g_free(lurb);
5080 return ret;
5083 static abi_long
5084 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5085 uint8_t *buf_temp __attribute__((unused)),
5086 int fd, int cmd, abi_long arg)
5088 struct live_urb *lurb;
5090 /* map target address back to host URB with metadata. */
5091 lurb = urb_hashtable_lookup(arg);
5092 if (!lurb) {
5093 return -TARGET_EFAULT;
5095 return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5098 static abi_long
5099 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5100 int fd, int cmd, abi_long arg)
5102 const argtype *arg_type = ie->arg_type;
5103 int target_size;
5104 abi_long ret;
5105 void *argptr;
5106 int rw_dir;
5107 struct live_urb *lurb;
5110 * each submitted URB needs to map to a unique ID for the
5111 * kernel, and that unique ID needs to be a pointer to
5112 * host memory. hence, we need to malloc for each URB.
5113 * isochronous transfers have a variable length struct.
5115 arg_type++;
5116 target_size = thunk_type_size(arg_type, THUNK_TARGET);
5118 /* construct host copy of urb and metadata */
5119 lurb = g_try_new0(struct live_urb, 1);
5120 if (!lurb) {
5121 return -TARGET_ENOMEM;
5124 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5125 if (!argptr) {
5126 g_free(lurb);
5127 return -TARGET_EFAULT;
5129 thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5130 unlock_user(argptr, arg, 0);
5132 lurb->target_urb_adr = arg;
5133 lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5135 /* buffer space used depends on endpoint type so lock the entire buffer */
5136 /* control type urbs should check the buffer contents for true direction */
5137 rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5138 lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5139 lurb->host_urb.buffer_length, 1);
5140 if (lurb->target_buf_ptr == NULL) {
5141 g_free(lurb);
5142 return -TARGET_EFAULT;
5145 /* update buffer pointer in host copy */
5146 lurb->host_urb.buffer = lurb->target_buf_ptr;
5148 ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5149 if (is_error(ret)) {
5150 unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5151 g_free(lurb);
5152 } else {
5153 urb_hashtable_insert(lurb);
5156 return ret;
5158 #endif /* CONFIG_USBFS */
5160 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5161 int cmd, abi_long arg)
5163 void *argptr;
5164 struct dm_ioctl *host_dm;
5165 abi_long guest_data;
5166 uint32_t guest_data_size;
5167 int target_size;
5168 const argtype *arg_type = ie->arg_type;
5169 abi_long ret;
5170 void *big_buf = NULL;
5171 char *host_data;
5173 arg_type++;
5174 target_size = thunk_type_size(arg_type, 0);
5175 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5176 if (!argptr) {
5177 ret = -TARGET_EFAULT;
5178 goto out;
5180 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5181 unlock_user(argptr, arg, 0);
5183 /* buf_temp is too small, so fetch things into a bigger buffer */
5184 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5185 memcpy(big_buf, buf_temp, target_size);
5186 buf_temp = big_buf;
5187 host_dm = big_buf;
5189 guest_data = arg + host_dm->data_start;
5190 if ((guest_data - arg) < 0) {
5191 ret = -TARGET_EINVAL;
5192 goto out;
5194 guest_data_size = host_dm->data_size - host_dm->data_start;
5195 host_data = (char*)host_dm + host_dm->data_start;
5197 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5198 if (!argptr) {
5199 ret = -TARGET_EFAULT;
5200 goto out;
5203 switch (ie->host_cmd) {
5204 case DM_REMOVE_ALL:
5205 case DM_LIST_DEVICES:
5206 case DM_DEV_CREATE:
5207 case DM_DEV_REMOVE:
5208 case DM_DEV_SUSPEND:
5209 case DM_DEV_STATUS:
5210 case DM_DEV_WAIT:
5211 case DM_TABLE_STATUS:
5212 case DM_TABLE_CLEAR:
5213 case DM_TABLE_DEPS:
5214 case DM_LIST_VERSIONS:
5215 /* no input data */
5216 break;
5217 case DM_DEV_RENAME:
5218 case DM_DEV_SET_GEOMETRY:
5219 /* data contains only strings */
5220 memcpy(host_data, argptr, guest_data_size);
5221 break;
5222 case DM_TARGET_MSG:
5223 memcpy(host_data, argptr, guest_data_size);
5224 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5225 break;
5226 case DM_TABLE_LOAD:
5228 void *gspec = argptr;
5229 void *cur_data = host_data;
5230 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5231 int spec_size = thunk_type_size(arg_type, 0);
5232 int i;
5234 for (i = 0; i < host_dm->target_count; i++) {
5235 struct dm_target_spec *spec = cur_data;
5236 uint32_t next;
5237 int slen;
5239 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5240 slen = strlen((char*)gspec + spec_size) + 1;
5241 next = spec->next;
5242 spec->next = sizeof(*spec) + slen;
5243 strcpy((char*)&spec[1], gspec + spec_size);
5244 gspec += next;
5245 cur_data += spec->next;
5247 break;
5249 default:
5250 ret = -TARGET_EINVAL;
5251 unlock_user(argptr, guest_data, 0);
5252 goto out;
5254 unlock_user(argptr, guest_data, 0);
5256 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5257 if (!is_error(ret)) {
5258 guest_data = arg + host_dm->data_start;
5259 guest_data_size = host_dm->data_size - host_dm->data_start;
5260 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5261 switch (ie->host_cmd) {
5262 case DM_REMOVE_ALL:
5263 case DM_DEV_CREATE:
5264 case DM_DEV_REMOVE:
5265 case DM_DEV_RENAME:
5266 case DM_DEV_SUSPEND:
5267 case DM_DEV_STATUS:
5268 case DM_TABLE_LOAD:
5269 case DM_TABLE_CLEAR:
5270 case DM_TARGET_MSG:
5271 case DM_DEV_SET_GEOMETRY:
5272 /* no return data */
5273 break;
5274 case DM_LIST_DEVICES:
5276 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5277 uint32_t remaining_data = guest_data_size;
5278 void *cur_data = argptr;
5279 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5280 int nl_size = 12; /* can't use thunk_size due to alignment */
5282 while (1) {
5283 uint32_t next = nl->next;
5284 if (next) {
5285 nl->next = nl_size + (strlen(nl->name) + 1);
5287 if (remaining_data < nl->next) {
5288 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5289 break;
5291 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5292 strcpy(cur_data + nl_size, nl->name);
5293 cur_data += nl->next;
5294 remaining_data -= nl->next;
5295 if (!next) {
5296 break;
5298 nl = (void*)nl + next;
5300 break;
5302 case DM_DEV_WAIT:
5303 case DM_TABLE_STATUS:
5305 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5306 void *cur_data = argptr;
5307 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5308 int spec_size = thunk_type_size(arg_type, 0);
5309 int i;
5311 for (i = 0; i < host_dm->target_count; i++) {
5312 uint32_t next = spec->next;
5313 int slen = strlen((char*)&spec[1]) + 1;
5314 spec->next = (cur_data - argptr) + spec_size + slen;
5315 if (guest_data_size < spec->next) {
5316 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5317 break;
5319 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5320 strcpy(cur_data + spec_size, (char*)&spec[1]);
5321 cur_data = argptr + spec->next;
5322 spec = (void*)host_dm + host_dm->data_start + next;
5324 break;
5326 case DM_TABLE_DEPS:
5328 void *hdata = (void*)host_dm + host_dm->data_start;
5329 int count = *(uint32_t*)hdata;
5330 uint64_t *hdev = hdata + 8;
5331 uint64_t *gdev = argptr + 8;
5332 int i;
5334 *(uint32_t*)argptr = tswap32(count);
5335 for (i = 0; i < count; i++) {
5336 *gdev = tswap64(*hdev);
5337 gdev++;
5338 hdev++;
5340 break;
5342 case DM_LIST_VERSIONS:
5344 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5345 uint32_t remaining_data = guest_data_size;
5346 void *cur_data = argptr;
5347 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5348 int vers_size = thunk_type_size(arg_type, 0);
5350 while (1) {
5351 uint32_t next = vers->next;
5352 if (next) {
5353 vers->next = vers_size + (strlen(vers->name) + 1);
5355 if (remaining_data < vers->next) {
5356 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5357 break;
5359 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5360 strcpy(cur_data + vers_size, vers->name);
5361 cur_data += vers->next;
5362 remaining_data -= vers->next;
5363 if (!next) {
5364 break;
5366 vers = (void*)vers + next;
5368 break;
5370 default:
5371 unlock_user(argptr, guest_data, 0);
5372 ret = -TARGET_EINVAL;
5373 goto out;
5375 unlock_user(argptr, guest_data, guest_data_size);
5377 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5378 if (!argptr) {
5379 ret = -TARGET_EFAULT;
5380 goto out;
5382 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5383 unlock_user(argptr, arg, target_size);
5385 out:
5386 g_free(big_buf);
5387 return ret;
5390 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5391 int cmd, abi_long arg)
5393 void *argptr;
5394 int target_size;
5395 const argtype *arg_type = ie->arg_type;
5396 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5397 abi_long ret;
5399 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5400 struct blkpg_partition host_part;
5402 /* Read and convert blkpg */
5403 arg_type++;
5404 target_size = thunk_type_size(arg_type, 0);
5405 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5406 if (!argptr) {
5407 ret = -TARGET_EFAULT;
5408 goto out;
5410 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5411 unlock_user(argptr, arg, 0);
5413 switch (host_blkpg->op) {
5414 case BLKPG_ADD_PARTITION:
5415 case BLKPG_DEL_PARTITION:
5416 /* payload is struct blkpg_partition */
5417 break;
5418 default:
5419 /* Unknown opcode */
5420 ret = -TARGET_EINVAL;
5421 goto out;
5424 /* Read and convert blkpg->data */
5425 arg = (abi_long)(uintptr_t)host_blkpg->data;
5426 target_size = thunk_type_size(part_arg_type, 0);
5427 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5428 if (!argptr) {
5429 ret = -TARGET_EFAULT;
5430 goto out;
5432 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5433 unlock_user(argptr, arg, 0);
5435 /* Swizzle the data pointer to our local copy and call! */
5436 host_blkpg->data = &host_part;
5437 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5439 out:
5440 return ret;
5443 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5444 int fd, int cmd, abi_long arg)
5446 const argtype *arg_type = ie->arg_type;
5447 const StructEntry *se;
5448 const argtype *field_types;
5449 const int *dst_offsets, *src_offsets;
5450 int target_size;
5451 void *argptr;
5452 abi_ulong *target_rt_dev_ptr = NULL;
5453 unsigned long *host_rt_dev_ptr = NULL;
5454 abi_long ret;
5455 int i;
5457 assert(ie->access == IOC_W);
5458 assert(*arg_type == TYPE_PTR);
5459 arg_type++;
5460 assert(*arg_type == TYPE_STRUCT);
5461 target_size = thunk_type_size(arg_type, 0);
5462 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5463 if (!argptr) {
5464 return -TARGET_EFAULT;
5466 arg_type++;
5467 assert(*arg_type == (int)STRUCT_rtentry);
5468 se = struct_entries + *arg_type++;
5469 assert(se->convert[0] == NULL);
5470 /* convert struct here to be able to catch rt_dev string */
5471 field_types = se->field_types;
5472 dst_offsets = se->field_offsets[THUNK_HOST];
5473 src_offsets = se->field_offsets[THUNK_TARGET];
5474 for (i = 0; i < se->nb_fields; i++) {
5475 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5476 assert(*field_types == TYPE_PTRVOID);
5477 target_rt_dev_ptr = argptr + src_offsets[i];
5478 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5479 if (*target_rt_dev_ptr != 0) {
5480 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5481 tswapal(*target_rt_dev_ptr));
5482 if (!*host_rt_dev_ptr) {
5483 unlock_user(argptr, arg, 0);
5484 return -TARGET_EFAULT;
5486 } else {
5487 *host_rt_dev_ptr = 0;
5489 field_types++;
5490 continue;
5492 field_types = thunk_convert(buf_temp + dst_offsets[i],
5493 argptr + src_offsets[i],
5494 field_types, THUNK_HOST);
5496 unlock_user(argptr, arg, 0);
5498 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5500 assert(host_rt_dev_ptr != NULL);
5501 assert(target_rt_dev_ptr != NULL);
5502 if (*host_rt_dev_ptr != 0) {
5503 unlock_user((void *)*host_rt_dev_ptr,
5504 *target_rt_dev_ptr, 0);
5506 return ret;
5509 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5510 int fd, int cmd, abi_long arg)
5512 int sig = target_to_host_signal(arg);
5513 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5516 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5517 int fd, int cmd, abi_long arg)
5519 struct timeval tv;
5520 abi_long ret;
5522 ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5523 if (is_error(ret)) {
5524 return ret;
5527 if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5528 if (copy_to_user_timeval(arg, &tv)) {
5529 return -TARGET_EFAULT;
5531 } else {
5532 if (copy_to_user_timeval64(arg, &tv)) {
5533 return -TARGET_EFAULT;
5537 return ret;
5540 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5541 int fd, int cmd, abi_long arg)
5543 struct timespec ts;
5544 abi_long ret;
5546 ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5547 if (is_error(ret)) {
5548 return ret;
5551 if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5552 if (host_to_target_timespec(arg, &ts)) {
5553 return -TARGET_EFAULT;
5555 } else{
5556 if (host_to_target_timespec64(arg, &ts)) {
5557 return -TARGET_EFAULT;
5561 return ret;
5564 #ifdef TIOCGPTPEER
5565 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5566 int fd, int cmd, abi_long arg)
5568 int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5569 return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5571 #endif
5573 #ifdef HAVE_DRM_H
5575 static void unlock_drm_version(struct drm_version *host_ver,
5576 struct target_drm_version *target_ver,
5577 bool copy)
5579 unlock_user(host_ver->name, target_ver->name,
5580 copy ? host_ver->name_len : 0);
5581 unlock_user(host_ver->date, target_ver->date,
5582 copy ? host_ver->date_len : 0);
5583 unlock_user(host_ver->desc, target_ver->desc,
5584 copy ? host_ver->desc_len : 0);
5587 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5588 struct target_drm_version *target_ver)
5590 memset(host_ver, 0, sizeof(*host_ver));
5592 __get_user(host_ver->name_len, &target_ver->name_len);
5593 if (host_ver->name_len) {
5594 host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5595 target_ver->name_len, 0);
5596 if (!host_ver->name) {
5597 return -EFAULT;
5601 __get_user(host_ver->date_len, &target_ver->date_len);
5602 if (host_ver->date_len) {
5603 host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5604 target_ver->date_len, 0);
5605 if (!host_ver->date) {
5606 goto err;
5610 __get_user(host_ver->desc_len, &target_ver->desc_len);
5611 if (host_ver->desc_len) {
5612 host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5613 target_ver->desc_len, 0);
5614 if (!host_ver->desc) {
5615 goto err;
5619 return 0;
5620 err:
5621 unlock_drm_version(host_ver, target_ver, false);
5622 return -EFAULT;
5625 static inline void host_to_target_drmversion(
5626 struct target_drm_version *target_ver,
5627 struct drm_version *host_ver)
5629 __put_user(host_ver->version_major, &target_ver->version_major);
5630 __put_user(host_ver->version_minor, &target_ver->version_minor);
5631 __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5632 __put_user(host_ver->name_len, &target_ver->name_len);
5633 __put_user(host_ver->date_len, &target_ver->date_len);
5634 __put_user(host_ver->desc_len, &target_ver->desc_len);
5635 unlock_drm_version(host_ver, target_ver, true);
5638 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5639 int fd, int cmd, abi_long arg)
5641 struct drm_version *ver;
5642 struct target_drm_version *target_ver;
5643 abi_long ret;
5645 switch (ie->host_cmd) {
5646 case DRM_IOCTL_VERSION:
5647 if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5648 return -TARGET_EFAULT;
5650 ver = (struct drm_version *)buf_temp;
5651 ret = target_to_host_drmversion(ver, target_ver);
5652 if (!is_error(ret)) {
5653 ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5654 if (is_error(ret)) {
5655 unlock_drm_version(ver, target_ver, false);
5656 } else {
5657 host_to_target_drmversion(target_ver, ver);
5660 unlock_user_struct(target_ver, arg, 0);
5661 return ret;
5663 return -TARGET_ENOSYS;
5666 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5667 struct drm_i915_getparam *gparam,
5668 int fd, abi_long arg)
5670 abi_long ret;
5671 int value;
5672 struct target_drm_i915_getparam *target_gparam;
5674 if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5675 return -TARGET_EFAULT;
5678 __get_user(gparam->param, &target_gparam->param);
5679 gparam->value = &value;
5680 ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5681 put_user_s32(value, target_gparam->value);
5683 unlock_user_struct(target_gparam, arg, 0);
5684 return ret;
5687 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5688 int fd, int cmd, abi_long arg)
5690 switch (ie->host_cmd) {
5691 case DRM_IOCTL_I915_GETPARAM:
5692 return do_ioctl_drm_i915_getparam(ie,
5693 (struct drm_i915_getparam *)buf_temp,
5694 fd, arg);
5695 default:
5696 return -TARGET_ENOSYS;
5700 #endif
5702 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5703 int fd, int cmd, abi_long arg)
5705 struct tun_filter *filter = (struct tun_filter *)buf_temp;
5706 struct tun_filter *target_filter;
5707 char *target_addr;
5709 assert(ie->access == IOC_W);
5711 target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5712 if (!target_filter) {
5713 return -TARGET_EFAULT;
5715 filter->flags = tswap16(target_filter->flags);
5716 filter->count = tswap16(target_filter->count);
5717 unlock_user(target_filter, arg, 0);
5719 if (filter->count) {
5720 if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5721 MAX_STRUCT_SIZE) {
5722 return -TARGET_EFAULT;
5725 target_addr = lock_user(VERIFY_READ,
5726 arg + offsetof(struct tun_filter, addr),
5727 filter->count * ETH_ALEN, 1);
5728 if (!target_addr) {
5729 return -TARGET_EFAULT;
5731 memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5732 unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5735 return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5738 IOCTLEntry ioctl_entries[] = {
5739 #define IOCTL(cmd, access, ...) \
5740 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5741 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5742 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5743 #define IOCTL_IGNORE(cmd) \
5744 { TARGET_ ## cmd, 0, #cmd },
5745 #include "ioctls.h"
5746 { 0, 0, },
5749 /* ??? Implement proper locking for ioctls. */
5750 /* do_ioctl() Must return target values and target errnos. */
5751 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5753 const IOCTLEntry *ie;
5754 const argtype *arg_type;
5755 abi_long ret;
5756 uint8_t buf_temp[MAX_STRUCT_SIZE];
5757 int target_size;
5758 void *argptr;
5760 ie = ioctl_entries;
5761 for(;;) {
5762 if (ie->target_cmd == 0) {
5763 qemu_log_mask(
5764 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5765 return -TARGET_ENOTTY;
5767 if (ie->target_cmd == cmd)
5768 break;
5769 ie++;
5771 arg_type = ie->arg_type;
5772 if (ie->do_ioctl) {
5773 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5774 } else if (!ie->host_cmd) {
5775 /* Some architectures define BSD ioctls in their headers
5776 that are not implemented in Linux. */
5777 return -TARGET_ENOTTY;
5780 switch(arg_type[0]) {
5781 case TYPE_NULL:
5782 /* no argument */
5783 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5784 break;
5785 case TYPE_PTRVOID:
5786 case TYPE_INT:
5787 case TYPE_LONG:
5788 case TYPE_ULONG:
5789 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5790 break;
5791 case TYPE_PTR:
5792 arg_type++;
5793 target_size = thunk_type_size(arg_type, 0);
5794 switch(ie->access) {
5795 case IOC_R:
5796 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5797 if (!is_error(ret)) {
5798 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5799 if (!argptr)
5800 return -TARGET_EFAULT;
5801 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5802 unlock_user(argptr, arg, target_size);
5804 break;
5805 case IOC_W:
5806 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5807 if (!argptr)
5808 return -TARGET_EFAULT;
5809 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5810 unlock_user(argptr, arg, 0);
5811 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5812 break;
5813 default:
5814 case IOC_RW:
5815 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5816 if (!argptr)
5817 return -TARGET_EFAULT;
5818 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5819 unlock_user(argptr, arg, 0);
5820 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5821 if (!is_error(ret)) {
5822 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5823 if (!argptr)
5824 return -TARGET_EFAULT;
5825 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5826 unlock_user(argptr, arg, target_size);
5828 break;
5830 break;
5831 default:
5832 qemu_log_mask(LOG_UNIMP,
5833 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5834 (long)cmd, arg_type[0]);
5835 ret = -TARGET_ENOTTY;
5836 break;
5838 return ret;
5841 static const bitmask_transtbl iflag_tbl[] = {
5842 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5843 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5844 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5845 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5846 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5847 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5848 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5849 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5850 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5851 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5852 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5853 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5854 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5855 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5856 { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5857 { 0, 0, 0, 0 }
5860 static const bitmask_transtbl oflag_tbl[] = {
5861 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5862 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5863 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5864 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5865 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5866 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5867 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5868 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5869 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5870 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5871 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5872 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5873 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5874 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5875 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5876 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5877 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5878 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5879 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5880 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5881 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5882 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5883 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5884 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5885 { 0, 0, 0, 0 }
5888 static const bitmask_transtbl cflag_tbl[] = {
5889 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5890 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5891 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5892 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5893 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5894 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5895 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5896 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5897 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5898 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5899 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5900 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5901 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5902 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5903 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5904 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5905 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5906 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5907 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5908 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5909 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5910 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5911 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5912 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5913 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5914 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5915 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5916 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5917 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5918 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5919 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5920 { 0, 0, 0, 0 }
5923 static const bitmask_transtbl lflag_tbl[] = {
5924 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5925 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5926 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5927 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5928 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5929 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5930 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5931 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5932 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5933 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5934 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5935 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5936 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5937 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5938 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5939 { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5940 { 0, 0, 0, 0 }
5943 static void target_to_host_termios (void *dst, const void *src)
5945 struct host_termios *host = dst;
5946 const struct target_termios *target = src;
5948 host->c_iflag =
5949 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5950 host->c_oflag =
5951 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5952 host->c_cflag =
5953 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5954 host->c_lflag =
5955 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5956 host->c_line = target->c_line;
5958 memset(host->c_cc, 0, sizeof(host->c_cc));
5959 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5960 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5961 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5962 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5963 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5964 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5965 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5966 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5967 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5968 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5969 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5970 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5971 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5972 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5973 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5974 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5975 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5978 static void host_to_target_termios (void *dst, const void *src)
5980 struct target_termios *target = dst;
5981 const struct host_termios *host = src;
5983 target->c_iflag =
5984 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5985 target->c_oflag =
5986 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5987 target->c_cflag =
5988 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5989 target->c_lflag =
5990 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5991 target->c_line = host->c_line;
5993 memset(target->c_cc, 0, sizeof(target->c_cc));
5994 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5995 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5996 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5997 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5998 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5999 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
6000 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
6001 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
6002 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
6003 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6004 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6005 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6006 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6007 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6008 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6009 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6010 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6013 static const StructEntry struct_termios_def = {
6014 .convert = { host_to_target_termios, target_to_host_termios },
6015 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6016 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6017 .print = print_termios,
6020 /* If the host does not provide these bits, they may be safely discarded. */
6021 #ifndef MAP_SYNC
6022 #define MAP_SYNC 0
6023 #endif
6024 #ifndef MAP_UNINITIALIZED
6025 #define MAP_UNINITIALIZED 0
6026 #endif
6028 static const bitmask_transtbl mmap_flags_tbl[] = {
6029 { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
6030 { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
6031 { TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
6032 MAP_TYPE, MAP_SHARED_VALIDATE },
6033 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6034 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6035 MAP_ANONYMOUS, MAP_ANONYMOUS },
6036 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6037 MAP_GROWSDOWN, MAP_GROWSDOWN },
6038 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6039 MAP_DENYWRITE, MAP_DENYWRITE },
6040 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6041 MAP_EXECUTABLE, MAP_EXECUTABLE },
6042 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6043 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6044 MAP_NORESERVE, MAP_NORESERVE },
6045 { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6046 /* MAP_STACK had been ignored by the kernel for quite some time.
6047 Recognize it for the target insofar as we do not want to pass
6048 it through to the host. */
6049 { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6050 { TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
6051 { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
6052 { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
6053 { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
6054 MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
6055 { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
6056 MAP_UNINITIALIZED, MAP_UNINITIALIZED },
6057 { 0, 0, 0, 0 }
6061 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6062 * TARGET_I386 is defined if TARGET_X86_64 is defined
6064 #if defined(TARGET_I386)
6066 /* NOTE: there is really one LDT for all the threads */
6067 static uint8_t *ldt_table;
6069 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6071 int size;
6072 void *p;
6074 if (!ldt_table)
6075 return 0;
6076 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6077 if (size > bytecount)
6078 size = bytecount;
6079 p = lock_user(VERIFY_WRITE, ptr, size, 0);
6080 if (!p)
6081 return -TARGET_EFAULT;
6082 /* ??? Should this by byteswapped? */
6083 memcpy(p, ldt_table, size);
6084 unlock_user(p, ptr, size);
6085 return size;
6088 /* XXX: add locking support */
6089 static abi_long write_ldt(CPUX86State *env,
6090 abi_ulong ptr, unsigned long bytecount, int oldmode)
6092 struct target_modify_ldt_ldt_s ldt_info;
6093 struct target_modify_ldt_ldt_s *target_ldt_info;
6094 int seg_32bit, contents, read_exec_only, limit_in_pages;
6095 int seg_not_present, useable, lm;
6096 uint32_t *lp, entry_1, entry_2;
6098 if (bytecount != sizeof(ldt_info))
6099 return -TARGET_EINVAL;
6100 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6101 return -TARGET_EFAULT;
6102 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6103 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6104 ldt_info.limit = tswap32(target_ldt_info->limit);
6105 ldt_info.flags = tswap32(target_ldt_info->flags);
6106 unlock_user_struct(target_ldt_info, ptr, 0);
6108 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6109 return -TARGET_EINVAL;
6110 seg_32bit = ldt_info.flags & 1;
6111 contents = (ldt_info.flags >> 1) & 3;
6112 read_exec_only = (ldt_info.flags >> 3) & 1;
6113 limit_in_pages = (ldt_info.flags >> 4) & 1;
6114 seg_not_present = (ldt_info.flags >> 5) & 1;
6115 useable = (ldt_info.flags >> 6) & 1;
6116 #ifdef TARGET_ABI32
6117 lm = 0;
6118 #else
6119 lm = (ldt_info.flags >> 7) & 1;
6120 #endif
6121 if (contents == 3) {
6122 if (oldmode)
6123 return -TARGET_EINVAL;
6124 if (seg_not_present == 0)
6125 return -TARGET_EINVAL;
6127 /* allocate the LDT */
6128 if (!ldt_table) {
6129 env->ldt.base = target_mmap(0,
6130 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6131 PROT_READ|PROT_WRITE,
6132 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6133 if (env->ldt.base == -1)
6134 return -TARGET_ENOMEM;
6135 memset(g2h_untagged(env->ldt.base), 0,
6136 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6137 env->ldt.limit = 0xffff;
6138 ldt_table = g2h_untagged(env->ldt.base);
6141 /* NOTE: same code as Linux kernel */
6142 /* Allow LDTs to be cleared by the user. */
6143 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6144 if (oldmode ||
6145 (contents == 0 &&
6146 read_exec_only == 1 &&
6147 seg_32bit == 0 &&
6148 limit_in_pages == 0 &&
6149 seg_not_present == 1 &&
6150 useable == 0 )) {
6151 entry_1 = 0;
6152 entry_2 = 0;
6153 goto install;
6157 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6158 (ldt_info.limit & 0x0ffff);
6159 entry_2 = (ldt_info.base_addr & 0xff000000) |
6160 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6161 (ldt_info.limit & 0xf0000) |
6162 ((read_exec_only ^ 1) << 9) |
6163 (contents << 10) |
6164 ((seg_not_present ^ 1) << 15) |
6165 (seg_32bit << 22) |
6166 (limit_in_pages << 23) |
6167 (lm << 21) |
6168 0x7000;
6169 if (!oldmode)
6170 entry_2 |= (useable << 20);
6172 /* Install the new entry ... */
6173 install:
6174 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6175 lp[0] = tswap32(entry_1);
6176 lp[1] = tswap32(entry_2);
6177 return 0;
6180 /* specific and weird i386 syscalls */
6181 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6182 unsigned long bytecount)
6184 abi_long ret;
6186 switch (func) {
6187 case 0:
6188 ret = read_ldt(ptr, bytecount);
6189 break;
6190 case 1:
6191 ret = write_ldt(env, ptr, bytecount, 1);
6192 break;
6193 case 0x11:
6194 ret = write_ldt(env, ptr, bytecount, 0);
6195 break;
6196 default:
6197 ret = -TARGET_ENOSYS;
6198 break;
6200 return ret;
6203 #if defined(TARGET_ABI32)
6204 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6206 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6207 struct target_modify_ldt_ldt_s ldt_info;
6208 struct target_modify_ldt_ldt_s *target_ldt_info;
6209 int seg_32bit, contents, read_exec_only, limit_in_pages;
6210 int seg_not_present, useable, lm;
6211 uint32_t *lp, entry_1, entry_2;
6212 int i;
6214 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6215 if (!target_ldt_info)
6216 return -TARGET_EFAULT;
6217 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6218 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6219 ldt_info.limit = tswap32(target_ldt_info->limit);
6220 ldt_info.flags = tswap32(target_ldt_info->flags);
6221 if (ldt_info.entry_number == -1) {
6222 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6223 if (gdt_table[i] == 0) {
6224 ldt_info.entry_number = i;
6225 target_ldt_info->entry_number = tswap32(i);
6226 break;
6230 unlock_user_struct(target_ldt_info, ptr, 1);
6232 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6233 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6234 return -TARGET_EINVAL;
6235 seg_32bit = ldt_info.flags & 1;
6236 contents = (ldt_info.flags >> 1) & 3;
6237 read_exec_only = (ldt_info.flags >> 3) & 1;
6238 limit_in_pages = (ldt_info.flags >> 4) & 1;
6239 seg_not_present = (ldt_info.flags >> 5) & 1;
6240 useable = (ldt_info.flags >> 6) & 1;
6241 #ifdef TARGET_ABI32
6242 lm = 0;
6243 #else
6244 lm = (ldt_info.flags >> 7) & 1;
6245 #endif
6247 if (contents == 3) {
6248 if (seg_not_present == 0)
6249 return -TARGET_EINVAL;
6252 /* NOTE: same code as Linux kernel */
6253 /* Allow LDTs to be cleared by the user. */
6254 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6255 if ((contents == 0 &&
6256 read_exec_only == 1 &&
6257 seg_32bit == 0 &&
6258 limit_in_pages == 0 &&
6259 seg_not_present == 1 &&
6260 useable == 0 )) {
6261 entry_1 = 0;
6262 entry_2 = 0;
6263 goto install;
6267 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6268 (ldt_info.limit & 0x0ffff);
6269 entry_2 = (ldt_info.base_addr & 0xff000000) |
6270 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6271 (ldt_info.limit & 0xf0000) |
6272 ((read_exec_only ^ 1) << 9) |
6273 (contents << 10) |
6274 ((seg_not_present ^ 1) << 15) |
6275 (seg_32bit << 22) |
6276 (limit_in_pages << 23) |
6277 (useable << 20) |
6278 (lm << 21) |
6279 0x7000;
6281 /* Install the new entry ... */
6282 install:
6283 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6284 lp[0] = tswap32(entry_1);
6285 lp[1] = tswap32(entry_2);
6286 return 0;
6289 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6291 struct target_modify_ldt_ldt_s *target_ldt_info;
6292 uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6293 uint32_t base_addr, limit, flags;
6294 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6295 int seg_not_present, useable, lm;
6296 uint32_t *lp, entry_1, entry_2;
6298 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6299 if (!target_ldt_info)
6300 return -TARGET_EFAULT;
6301 idx = tswap32(target_ldt_info->entry_number);
6302 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6303 idx > TARGET_GDT_ENTRY_TLS_MAX) {
6304 unlock_user_struct(target_ldt_info, ptr, 1);
6305 return -TARGET_EINVAL;
6307 lp = (uint32_t *)(gdt_table + idx);
6308 entry_1 = tswap32(lp[0]);
6309 entry_2 = tswap32(lp[1]);
6311 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6312 contents = (entry_2 >> 10) & 3;
6313 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6314 seg_32bit = (entry_2 >> 22) & 1;
6315 limit_in_pages = (entry_2 >> 23) & 1;
6316 useable = (entry_2 >> 20) & 1;
6317 #ifdef TARGET_ABI32
6318 lm = 0;
6319 #else
6320 lm = (entry_2 >> 21) & 1;
6321 #endif
6322 flags = (seg_32bit << 0) | (contents << 1) |
6323 (read_exec_only << 3) | (limit_in_pages << 4) |
6324 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6325 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6326 base_addr = (entry_1 >> 16) |
6327 (entry_2 & 0xff000000) |
6328 ((entry_2 & 0xff) << 16);
6329 target_ldt_info->base_addr = tswapal(base_addr);
6330 target_ldt_info->limit = tswap32(limit);
6331 target_ldt_info->flags = tswap32(flags);
6332 unlock_user_struct(target_ldt_info, ptr, 1);
6333 return 0;
6336 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6338 return -TARGET_ENOSYS;
6340 #else
6341 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6343 abi_long ret = 0;
6344 abi_ulong val;
6345 int idx;
6347 switch(code) {
6348 case TARGET_ARCH_SET_GS:
6349 case TARGET_ARCH_SET_FS:
6350 if (code == TARGET_ARCH_SET_GS)
6351 idx = R_GS;
6352 else
6353 idx = R_FS;
6354 cpu_x86_load_seg(env, idx, 0);
6355 env->segs[idx].base = addr;
6356 break;
6357 case TARGET_ARCH_GET_GS:
6358 case TARGET_ARCH_GET_FS:
6359 if (code == TARGET_ARCH_GET_GS)
6360 idx = R_GS;
6361 else
6362 idx = R_FS;
6363 val = env->segs[idx].base;
6364 if (put_user(val, addr, abi_ulong))
6365 ret = -TARGET_EFAULT;
6366 break;
6367 default:
6368 ret = -TARGET_EINVAL;
6369 break;
6371 return ret;
6373 #endif /* defined(TARGET_ABI32 */
6374 #endif /* defined(TARGET_I386) */
6377 * These constants are generic. Supply any that are missing from the host.
6379 #ifndef PR_SET_NAME
6380 # define PR_SET_NAME 15
6381 # define PR_GET_NAME 16
6382 #endif
6383 #ifndef PR_SET_FP_MODE
6384 # define PR_SET_FP_MODE 45
6385 # define PR_GET_FP_MODE 46
6386 # define PR_FP_MODE_FR (1 << 0)
6387 # define PR_FP_MODE_FRE (1 << 1)
6388 #endif
6389 #ifndef PR_SVE_SET_VL
6390 # define PR_SVE_SET_VL 50
6391 # define PR_SVE_GET_VL 51
6392 # define PR_SVE_VL_LEN_MASK 0xffff
6393 # define PR_SVE_VL_INHERIT (1 << 17)
6394 #endif
6395 #ifndef PR_PAC_RESET_KEYS
6396 # define PR_PAC_RESET_KEYS 54
6397 # define PR_PAC_APIAKEY (1 << 0)
6398 # define PR_PAC_APIBKEY (1 << 1)
6399 # define PR_PAC_APDAKEY (1 << 2)
6400 # define PR_PAC_APDBKEY (1 << 3)
6401 # define PR_PAC_APGAKEY (1 << 4)
6402 #endif
6403 #ifndef PR_SET_TAGGED_ADDR_CTRL
6404 # define PR_SET_TAGGED_ADDR_CTRL 55
6405 # define PR_GET_TAGGED_ADDR_CTRL 56
6406 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6407 #endif
6408 #ifndef PR_MTE_TCF_SHIFT
6409 # define PR_MTE_TCF_SHIFT 1
6410 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6411 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6412 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6413 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6414 # define PR_MTE_TAG_SHIFT 3
6415 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6416 #endif
6417 #ifndef PR_SET_IO_FLUSHER
6418 # define PR_SET_IO_FLUSHER 57
6419 # define PR_GET_IO_FLUSHER 58
6420 #endif
6421 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6422 # define PR_SET_SYSCALL_USER_DISPATCH 59
6423 #endif
6424 #ifndef PR_SME_SET_VL
6425 # define PR_SME_SET_VL 63
6426 # define PR_SME_GET_VL 64
6427 # define PR_SME_VL_LEN_MASK 0xffff
6428 # define PR_SME_VL_INHERIT (1 << 17)
6429 #endif
6431 #include "target_prctl.h"
6433 static abi_long do_prctl_inval0(CPUArchState *env)
6435 return -TARGET_EINVAL;
6438 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6440 return -TARGET_EINVAL;
6443 #ifndef do_prctl_get_fp_mode
6444 #define do_prctl_get_fp_mode do_prctl_inval0
6445 #endif
6446 #ifndef do_prctl_set_fp_mode
6447 #define do_prctl_set_fp_mode do_prctl_inval1
6448 #endif
6449 #ifndef do_prctl_sve_get_vl
6450 #define do_prctl_sve_get_vl do_prctl_inval0
6451 #endif
6452 #ifndef do_prctl_sve_set_vl
6453 #define do_prctl_sve_set_vl do_prctl_inval1
6454 #endif
6455 #ifndef do_prctl_reset_keys
6456 #define do_prctl_reset_keys do_prctl_inval1
6457 #endif
6458 #ifndef do_prctl_set_tagged_addr_ctrl
6459 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6460 #endif
6461 #ifndef do_prctl_get_tagged_addr_ctrl
6462 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6463 #endif
6464 #ifndef do_prctl_get_unalign
6465 #define do_prctl_get_unalign do_prctl_inval1
6466 #endif
6467 #ifndef do_prctl_set_unalign
6468 #define do_prctl_set_unalign do_prctl_inval1
6469 #endif
6470 #ifndef do_prctl_sme_get_vl
6471 #define do_prctl_sme_get_vl do_prctl_inval0
6472 #endif
6473 #ifndef do_prctl_sme_set_vl
6474 #define do_prctl_sme_set_vl do_prctl_inval1
6475 #endif
6477 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6478 abi_long arg3, abi_long arg4, abi_long arg5)
6480 abi_long ret;
6482 switch (option) {
6483 case PR_GET_PDEATHSIG:
6485 int deathsig;
6486 ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6487 arg3, arg4, arg5));
6488 if (!is_error(ret) &&
6489 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6490 return -TARGET_EFAULT;
6492 return ret;
6494 case PR_SET_PDEATHSIG:
6495 return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6496 arg3, arg4, arg5));
6497 case PR_GET_NAME:
6499 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6500 if (!name) {
6501 return -TARGET_EFAULT;
6503 ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6504 arg3, arg4, arg5));
6505 unlock_user(name, arg2, 16);
6506 return ret;
6508 case PR_SET_NAME:
6510 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6511 if (!name) {
6512 return -TARGET_EFAULT;
6514 ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6515 arg3, arg4, arg5));
6516 unlock_user(name, arg2, 0);
6517 return ret;
6519 case PR_GET_FP_MODE:
6520 return do_prctl_get_fp_mode(env);
6521 case PR_SET_FP_MODE:
6522 return do_prctl_set_fp_mode(env, arg2);
6523 case PR_SVE_GET_VL:
6524 return do_prctl_sve_get_vl(env);
6525 case PR_SVE_SET_VL:
6526 return do_prctl_sve_set_vl(env, arg2);
6527 case PR_SME_GET_VL:
6528 return do_prctl_sme_get_vl(env);
6529 case PR_SME_SET_VL:
6530 return do_prctl_sme_set_vl(env, arg2);
6531 case PR_PAC_RESET_KEYS:
6532 if (arg3 || arg4 || arg5) {
6533 return -TARGET_EINVAL;
6535 return do_prctl_reset_keys(env, arg2);
6536 case PR_SET_TAGGED_ADDR_CTRL:
6537 if (arg3 || arg4 || arg5) {
6538 return -TARGET_EINVAL;
6540 return do_prctl_set_tagged_addr_ctrl(env, arg2);
6541 case PR_GET_TAGGED_ADDR_CTRL:
6542 if (arg2 || arg3 || arg4 || arg5) {
6543 return -TARGET_EINVAL;
6545 return do_prctl_get_tagged_addr_ctrl(env);
6547 case PR_GET_UNALIGN:
6548 return do_prctl_get_unalign(env, arg2);
6549 case PR_SET_UNALIGN:
6550 return do_prctl_set_unalign(env, arg2);
6552 case PR_CAP_AMBIENT:
6553 case PR_CAPBSET_READ:
6554 case PR_CAPBSET_DROP:
6555 case PR_GET_DUMPABLE:
6556 case PR_SET_DUMPABLE:
6557 case PR_GET_KEEPCAPS:
6558 case PR_SET_KEEPCAPS:
6559 case PR_GET_SECUREBITS:
6560 case PR_SET_SECUREBITS:
6561 case PR_GET_TIMING:
6562 case PR_SET_TIMING:
6563 case PR_GET_TIMERSLACK:
6564 case PR_SET_TIMERSLACK:
6565 case PR_MCE_KILL:
6566 case PR_MCE_KILL_GET:
6567 case PR_GET_NO_NEW_PRIVS:
6568 case PR_SET_NO_NEW_PRIVS:
6569 case PR_GET_IO_FLUSHER:
6570 case PR_SET_IO_FLUSHER:
6571 /* Some prctl options have no pointer arguments and we can pass on. */
6572 return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6574 case PR_GET_CHILD_SUBREAPER:
6575 case PR_SET_CHILD_SUBREAPER:
6576 case PR_GET_SPECULATION_CTRL:
6577 case PR_SET_SPECULATION_CTRL:
6578 case PR_GET_TID_ADDRESS:
6579 /* TODO */
6580 return -TARGET_EINVAL;
6582 case PR_GET_FPEXC:
6583 case PR_SET_FPEXC:
6584 /* Was used for SPE on PowerPC. */
6585 return -TARGET_EINVAL;
6587 case PR_GET_ENDIAN:
6588 case PR_SET_ENDIAN:
6589 case PR_GET_FPEMU:
6590 case PR_SET_FPEMU:
6591 case PR_SET_MM:
6592 case PR_GET_SECCOMP:
6593 case PR_SET_SECCOMP:
6594 case PR_SET_SYSCALL_USER_DISPATCH:
6595 case PR_GET_THP_DISABLE:
6596 case PR_SET_THP_DISABLE:
6597 case PR_GET_TSC:
6598 case PR_SET_TSC:
6599 /* Disable to prevent the target disabling stuff we need. */
6600 return -TARGET_EINVAL;
6602 default:
6603 qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6604 option);
6605 return -TARGET_EINVAL;
6609 #define NEW_STACK_SIZE 0x40000
6612 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6613 typedef struct {
6614 CPUArchState *env;
6615 pthread_mutex_t mutex;
6616 pthread_cond_t cond;
6617 pthread_t thread;
6618 uint32_t tid;
6619 abi_ulong child_tidptr;
6620 abi_ulong parent_tidptr;
6621 sigset_t sigmask;
6622 } new_thread_info;
6624 static void *clone_func(void *arg)
6626 new_thread_info *info = arg;
6627 CPUArchState *env;
6628 CPUState *cpu;
6629 TaskState *ts;
6631 rcu_register_thread();
6632 tcg_register_thread();
6633 env = info->env;
6634 cpu = env_cpu(env);
6635 thread_cpu = cpu;
6636 ts = (TaskState *)cpu->opaque;
6637 info->tid = sys_gettid();
6638 task_settid(ts);
6639 if (info->child_tidptr)
6640 put_user_u32(info->tid, info->child_tidptr);
6641 if (info->parent_tidptr)
6642 put_user_u32(info->tid, info->parent_tidptr);
6643 qemu_guest_random_seed_thread_part2(cpu->random_seed);
6644 /* Enable signals. */
6645 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6646 /* Signal to the parent that we're ready. */
6647 pthread_mutex_lock(&info->mutex);
6648 pthread_cond_broadcast(&info->cond);
6649 pthread_mutex_unlock(&info->mutex);
6650 /* Wait until the parent has finished initializing the tls state. */
6651 pthread_mutex_lock(&clone_lock);
6652 pthread_mutex_unlock(&clone_lock);
6653 cpu_loop(env);
6654 /* never exits */
6655 return NULL;
6658 /* do_fork() Must return host values and target errnos (unlike most
6659 do_*() functions). */
6660 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6661 abi_ulong parent_tidptr, target_ulong newtls,
6662 abi_ulong child_tidptr)
6664 CPUState *cpu = env_cpu(env);
6665 int ret;
6666 TaskState *ts;
6667 CPUState *new_cpu;
6668 CPUArchState *new_env;
6669 sigset_t sigmask;
6671 flags &= ~CLONE_IGNORED_FLAGS;
6673 /* Emulate vfork() with fork() */
6674 if (flags & CLONE_VFORK)
6675 flags &= ~(CLONE_VFORK | CLONE_VM);
6677 if (flags & CLONE_VM) {
6678 TaskState *parent_ts = (TaskState *)cpu->opaque;
6679 new_thread_info info;
6680 pthread_attr_t attr;
6682 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6683 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6684 return -TARGET_EINVAL;
6687 ts = g_new0(TaskState, 1);
6688 init_task_state(ts);
6690 /* Grab a mutex so that thread setup appears atomic. */
6691 pthread_mutex_lock(&clone_lock);
6694 * If this is our first additional thread, we need to ensure we
6695 * generate code for parallel execution and flush old translations.
6696 * Do this now so that the copy gets CF_PARALLEL too.
6698 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6699 cpu->tcg_cflags |= CF_PARALLEL;
6700 tb_flush(cpu);
6703 /* we create a new CPU instance. */
6704 new_env = cpu_copy(env);
6705 /* Init regs that differ from the parent. */
6706 cpu_clone_regs_child(new_env, newsp, flags);
6707 cpu_clone_regs_parent(env, flags);
6708 new_cpu = env_cpu(new_env);
6709 new_cpu->opaque = ts;
6710 ts->bprm = parent_ts->bprm;
6711 ts->info = parent_ts->info;
6712 ts->signal_mask = parent_ts->signal_mask;
6714 if (flags & CLONE_CHILD_CLEARTID) {
6715 ts->child_tidptr = child_tidptr;
6718 if (flags & CLONE_SETTLS) {
6719 cpu_set_tls (new_env, newtls);
6722 memset(&info, 0, sizeof(info));
6723 pthread_mutex_init(&info.mutex, NULL);
6724 pthread_mutex_lock(&info.mutex);
6725 pthread_cond_init(&info.cond, NULL);
6726 info.env = new_env;
6727 if (flags & CLONE_CHILD_SETTID) {
6728 info.child_tidptr = child_tidptr;
6730 if (flags & CLONE_PARENT_SETTID) {
6731 info.parent_tidptr = parent_tidptr;
6734 ret = pthread_attr_init(&attr);
6735 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6736 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6737 /* It is not safe to deliver signals until the child has finished
6738 initializing, so temporarily block all signals. */
6739 sigfillset(&sigmask);
6740 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6741 cpu->random_seed = qemu_guest_random_seed_thread_part1();
6743 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6744 /* TODO: Free new CPU state if thread creation failed. */
6746 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6747 pthread_attr_destroy(&attr);
6748 if (ret == 0) {
6749 /* Wait for the child to initialize. */
6750 pthread_cond_wait(&info.cond, &info.mutex);
6751 ret = info.tid;
6752 } else {
6753 ret = -1;
6755 pthread_mutex_unlock(&info.mutex);
6756 pthread_cond_destroy(&info.cond);
6757 pthread_mutex_destroy(&info.mutex);
6758 pthread_mutex_unlock(&clone_lock);
6759 } else {
6760 /* if no CLONE_VM, we consider it is a fork */
6761 if (flags & CLONE_INVALID_FORK_FLAGS) {
6762 return -TARGET_EINVAL;
6765 /* We can't support custom termination signals */
6766 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6767 return -TARGET_EINVAL;
6770 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6771 if (flags & CLONE_PIDFD) {
6772 return -TARGET_EINVAL;
6774 #endif
6776 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6777 if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6778 return -TARGET_EINVAL;
6781 if (block_signals()) {
6782 return -QEMU_ERESTARTSYS;
6785 fork_start();
6786 ret = fork();
6787 if (ret == 0) {
6788 /* Child Process. */
6789 cpu_clone_regs_child(env, newsp, flags);
6790 fork_end(1);
6791 /* There is a race condition here. The parent process could
6792 theoretically read the TID in the child process before the child
6793 tid is set. This would require using either ptrace
6794 (not implemented) or having *_tidptr to point at a shared memory
6795 mapping. We can't repeat the spinlock hack used above because
6796 the child process gets its own copy of the lock. */
6797 if (flags & CLONE_CHILD_SETTID)
6798 put_user_u32(sys_gettid(), child_tidptr);
6799 if (flags & CLONE_PARENT_SETTID)
6800 put_user_u32(sys_gettid(), parent_tidptr);
6801 ts = (TaskState *)cpu->opaque;
6802 if (flags & CLONE_SETTLS)
6803 cpu_set_tls (env, newtls);
6804 if (flags & CLONE_CHILD_CLEARTID)
6805 ts->child_tidptr = child_tidptr;
6806 } else {
6807 cpu_clone_regs_parent(env, flags);
6808 if (flags & CLONE_PIDFD) {
6809 int pid_fd = 0;
6810 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6811 int pid_child = ret;
6812 pid_fd = pidfd_open(pid_child, 0);
6813 if (pid_fd >= 0) {
6814 fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6815 | FD_CLOEXEC);
6816 } else {
6817 pid_fd = 0;
6819 #endif
6820 put_user_u32(pid_fd, parent_tidptr);
6822 fork_end(0);
6824 g_assert(!cpu_in_exclusive_context(cpu));
6826 return ret;
6829 /* warning : doesn't handle linux specific flags... */
6830 static int target_to_host_fcntl_cmd(int cmd)
6832 int ret;
6834 switch(cmd) {
6835 case TARGET_F_DUPFD:
6836 case TARGET_F_GETFD:
6837 case TARGET_F_SETFD:
6838 case TARGET_F_GETFL:
6839 case TARGET_F_SETFL:
6840 case TARGET_F_OFD_GETLK:
6841 case TARGET_F_OFD_SETLK:
6842 case TARGET_F_OFD_SETLKW:
6843 ret = cmd;
6844 break;
6845 case TARGET_F_GETLK:
6846 ret = F_GETLK64;
6847 break;
6848 case TARGET_F_SETLK:
6849 ret = F_SETLK64;
6850 break;
6851 case TARGET_F_SETLKW:
6852 ret = F_SETLKW64;
6853 break;
6854 case TARGET_F_GETOWN:
6855 ret = F_GETOWN;
6856 break;
6857 case TARGET_F_SETOWN:
6858 ret = F_SETOWN;
6859 break;
6860 case TARGET_F_GETSIG:
6861 ret = F_GETSIG;
6862 break;
6863 case TARGET_F_SETSIG:
6864 ret = F_SETSIG;
6865 break;
6866 #if TARGET_ABI_BITS == 32
6867 case TARGET_F_GETLK64:
6868 ret = F_GETLK64;
6869 break;
6870 case TARGET_F_SETLK64:
6871 ret = F_SETLK64;
6872 break;
6873 case TARGET_F_SETLKW64:
6874 ret = F_SETLKW64;
6875 break;
6876 #endif
6877 case TARGET_F_SETLEASE:
6878 ret = F_SETLEASE;
6879 break;
6880 case TARGET_F_GETLEASE:
6881 ret = F_GETLEASE;
6882 break;
6883 #ifdef F_DUPFD_CLOEXEC
6884 case TARGET_F_DUPFD_CLOEXEC:
6885 ret = F_DUPFD_CLOEXEC;
6886 break;
6887 #endif
6888 case TARGET_F_NOTIFY:
6889 ret = F_NOTIFY;
6890 break;
6891 #ifdef F_GETOWN_EX
6892 case TARGET_F_GETOWN_EX:
6893 ret = F_GETOWN_EX;
6894 break;
6895 #endif
6896 #ifdef F_SETOWN_EX
6897 case TARGET_F_SETOWN_EX:
6898 ret = F_SETOWN_EX;
6899 break;
6900 #endif
6901 #ifdef F_SETPIPE_SZ
6902 case TARGET_F_SETPIPE_SZ:
6903 ret = F_SETPIPE_SZ;
6904 break;
6905 case TARGET_F_GETPIPE_SZ:
6906 ret = F_GETPIPE_SZ;
6907 break;
6908 #endif
6909 #ifdef F_ADD_SEALS
6910 case TARGET_F_ADD_SEALS:
6911 ret = F_ADD_SEALS;
6912 break;
6913 case TARGET_F_GET_SEALS:
6914 ret = F_GET_SEALS;
6915 break;
6916 #endif
6917 default:
6918 ret = -TARGET_EINVAL;
6919 break;
6922 #if defined(__powerpc64__)
6923 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6924 * is not supported by kernel. The glibc fcntl call actually adjusts
6925 * them to 5, 6 and 7 before making the syscall(). Since we make the
6926 * syscall directly, adjust to what is supported by the kernel.
6928 if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6929 ret -= F_GETLK64 - 5;
6931 #endif
6933 return ret;
6936 #define FLOCK_TRANSTBL \
6937 switch (type) { \
6938 TRANSTBL_CONVERT(F_RDLCK); \
6939 TRANSTBL_CONVERT(F_WRLCK); \
6940 TRANSTBL_CONVERT(F_UNLCK); \
6943 static int target_to_host_flock(int type)
6945 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6946 FLOCK_TRANSTBL
6947 #undef TRANSTBL_CONVERT
6948 return -TARGET_EINVAL;
6951 static int host_to_target_flock(int type)
6953 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6954 FLOCK_TRANSTBL
6955 #undef TRANSTBL_CONVERT
6956 /* if we don't know how to convert the value coming
6957 * from the host we copy to the target field as-is
6959 return type;
6962 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6963 abi_ulong target_flock_addr)
6965 struct target_flock *target_fl;
6966 int l_type;
6968 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6969 return -TARGET_EFAULT;
6972 __get_user(l_type, &target_fl->l_type);
6973 l_type = target_to_host_flock(l_type);
6974 if (l_type < 0) {
6975 return l_type;
6977 fl->l_type = l_type;
6978 __get_user(fl->l_whence, &target_fl->l_whence);
6979 __get_user(fl->l_start, &target_fl->l_start);
6980 __get_user(fl->l_len, &target_fl->l_len);
6981 __get_user(fl->l_pid, &target_fl->l_pid);
6982 unlock_user_struct(target_fl, target_flock_addr, 0);
6983 return 0;
6986 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6987 const struct flock64 *fl)
6989 struct target_flock *target_fl;
6990 short l_type;
6992 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6993 return -TARGET_EFAULT;
6996 l_type = host_to_target_flock(fl->l_type);
6997 __put_user(l_type, &target_fl->l_type);
6998 __put_user(fl->l_whence, &target_fl->l_whence);
6999 __put_user(fl->l_start, &target_fl->l_start);
7000 __put_user(fl->l_len, &target_fl->l_len);
7001 __put_user(fl->l_pid, &target_fl->l_pid);
7002 unlock_user_struct(target_fl, target_flock_addr, 1);
7003 return 0;
7006 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
7007 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
7009 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
7010 struct target_oabi_flock64 {
7011 abi_short l_type;
7012 abi_short l_whence;
7013 abi_llong l_start;
7014 abi_llong l_len;
7015 abi_int l_pid;
7016 } QEMU_PACKED;
7018 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
7019 abi_ulong target_flock_addr)
7021 struct target_oabi_flock64 *target_fl;
7022 int l_type;
7024 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7025 return -TARGET_EFAULT;
7028 __get_user(l_type, &target_fl->l_type);
7029 l_type = target_to_host_flock(l_type);
7030 if (l_type < 0) {
7031 return l_type;
7033 fl->l_type = l_type;
7034 __get_user(fl->l_whence, &target_fl->l_whence);
7035 __get_user(fl->l_start, &target_fl->l_start);
7036 __get_user(fl->l_len, &target_fl->l_len);
7037 __get_user(fl->l_pid, &target_fl->l_pid);
7038 unlock_user_struct(target_fl, target_flock_addr, 0);
7039 return 0;
7042 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7043 const struct flock64 *fl)
7045 struct target_oabi_flock64 *target_fl;
7046 short l_type;
7048 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7049 return -TARGET_EFAULT;
7052 l_type = host_to_target_flock(fl->l_type);
7053 __put_user(l_type, &target_fl->l_type);
7054 __put_user(fl->l_whence, &target_fl->l_whence);
7055 __put_user(fl->l_start, &target_fl->l_start);
7056 __put_user(fl->l_len, &target_fl->l_len);
7057 __put_user(fl->l_pid, &target_fl->l_pid);
7058 unlock_user_struct(target_fl, target_flock_addr, 1);
7059 return 0;
7061 #endif
7063 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7064 abi_ulong target_flock_addr)
7066 struct target_flock64 *target_fl;
7067 int l_type;
7069 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7070 return -TARGET_EFAULT;
7073 __get_user(l_type, &target_fl->l_type);
7074 l_type = target_to_host_flock(l_type);
7075 if (l_type < 0) {
7076 return l_type;
7078 fl->l_type = l_type;
7079 __get_user(fl->l_whence, &target_fl->l_whence);
7080 __get_user(fl->l_start, &target_fl->l_start);
7081 __get_user(fl->l_len, &target_fl->l_len);
7082 __get_user(fl->l_pid, &target_fl->l_pid);
7083 unlock_user_struct(target_fl, target_flock_addr, 0);
7084 return 0;
7087 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7088 const struct flock64 *fl)
7090 struct target_flock64 *target_fl;
7091 short l_type;
7093 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7094 return -TARGET_EFAULT;
7097 l_type = host_to_target_flock(fl->l_type);
7098 __put_user(l_type, &target_fl->l_type);
7099 __put_user(fl->l_whence, &target_fl->l_whence);
7100 __put_user(fl->l_start, &target_fl->l_start);
7101 __put_user(fl->l_len, &target_fl->l_len);
7102 __put_user(fl->l_pid, &target_fl->l_pid);
7103 unlock_user_struct(target_fl, target_flock_addr, 1);
7104 return 0;
7107 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7109 struct flock64 fl64;
7110 #ifdef F_GETOWN_EX
7111 struct f_owner_ex fox;
7112 struct target_f_owner_ex *target_fox;
7113 #endif
7114 abi_long ret;
7115 int host_cmd = target_to_host_fcntl_cmd(cmd);
7117 if (host_cmd == -TARGET_EINVAL)
7118 return host_cmd;
7120 switch(cmd) {
7121 case TARGET_F_GETLK:
7122 ret = copy_from_user_flock(&fl64, arg);
7123 if (ret) {
7124 return ret;
7126 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7127 if (ret == 0) {
7128 ret = copy_to_user_flock(arg, &fl64);
7130 break;
7132 case TARGET_F_SETLK:
7133 case TARGET_F_SETLKW:
7134 ret = copy_from_user_flock(&fl64, arg);
7135 if (ret) {
7136 return ret;
7138 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7139 break;
7141 case TARGET_F_GETLK64:
7142 case TARGET_F_OFD_GETLK:
7143 ret = copy_from_user_flock64(&fl64, arg);
7144 if (ret) {
7145 return ret;
7147 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7148 if (ret == 0) {
7149 ret = copy_to_user_flock64(arg, &fl64);
7151 break;
7152 case TARGET_F_SETLK64:
7153 case TARGET_F_SETLKW64:
7154 case TARGET_F_OFD_SETLK:
7155 case TARGET_F_OFD_SETLKW:
7156 ret = copy_from_user_flock64(&fl64, arg);
7157 if (ret) {
7158 return ret;
7160 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7161 break;
7163 case TARGET_F_GETFL:
7164 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7165 if (ret >= 0) {
7166 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7167 /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7168 if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7169 ret |= TARGET_O_LARGEFILE;
7172 break;
7174 case TARGET_F_SETFL:
7175 ret = get_errno(safe_fcntl(fd, host_cmd,
7176 target_to_host_bitmask(arg,
7177 fcntl_flags_tbl)));
7178 break;
7180 #ifdef F_GETOWN_EX
7181 case TARGET_F_GETOWN_EX:
7182 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7183 if (ret >= 0) {
7184 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7185 return -TARGET_EFAULT;
7186 target_fox->type = tswap32(fox.type);
7187 target_fox->pid = tswap32(fox.pid);
7188 unlock_user_struct(target_fox, arg, 1);
7190 break;
7191 #endif
7193 #ifdef F_SETOWN_EX
7194 case TARGET_F_SETOWN_EX:
7195 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7196 return -TARGET_EFAULT;
7197 fox.type = tswap32(target_fox->type);
7198 fox.pid = tswap32(target_fox->pid);
7199 unlock_user_struct(target_fox, arg, 0);
7200 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7201 break;
7202 #endif
7204 case TARGET_F_SETSIG:
7205 ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7206 break;
7208 case TARGET_F_GETSIG:
7209 ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7210 break;
7212 case TARGET_F_SETOWN:
7213 case TARGET_F_GETOWN:
7214 case TARGET_F_SETLEASE:
7215 case TARGET_F_GETLEASE:
7216 case TARGET_F_SETPIPE_SZ:
7217 case TARGET_F_GETPIPE_SZ:
7218 case TARGET_F_ADD_SEALS:
7219 case TARGET_F_GET_SEALS:
7220 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7221 break;
7223 default:
7224 ret = get_errno(safe_fcntl(fd, cmd, arg));
7225 break;
7227 return ret;
7230 #ifdef USE_UID16
7232 static inline int high2lowuid(int uid)
7234 if (uid > 65535)
7235 return 65534;
7236 else
7237 return uid;
7240 static inline int high2lowgid(int gid)
7242 if (gid > 65535)
7243 return 65534;
7244 else
7245 return gid;
7248 static inline int low2highuid(int uid)
7250 if ((int16_t)uid == -1)
7251 return -1;
7252 else
7253 return uid;
7256 static inline int low2highgid(int gid)
7258 if ((int16_t)gid == -1)
7259 return -1;
7260 else
7261 return gid;
7263 static inline int tswapid(int id)
7265 return tswap16(id);
7268 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7270 #else /* !USE_UID16 */
7271 static inline int high2lowuid(int uid)
7273 return uid;
7275 static inline int high2lowgid(int gid)
7277 return gid;
7279 static inline int low2highuid(int uid)
7281 return uid;
7283 static inline int low2highgid(int gid)
7285 return gid;
7287 static inline int tswapid(int id)
7289 return tswap32(id);
7292 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7294 #endif /* USE_UID16 */
7296 /* We must do direct syscalls for setting UID/GID, because we want to
7297 * implement the Linux system call semantics of "change only for this thread",
7298 * not the libc/POSIX semantics of "change for all threads in process".
7299 * (See http://ewontfix.com/17/ for more details.)
7300 * We use the 32-bit version of the syscalls if present; if it is not
7301 * then either the host architecture supports 32-bit UIDs natively with
7302 * the standard syscall, or the 16-bit UID is the best we can do.
7304 #ifdef __NR_setuid32
7305 #define __NR_sys_setuid __NR_setuid32
7306 #else
7307 #define __NR_sys_setuid __NR_setuid
7308 #endif
7309 #ifdef __NR_setgid32
7310 #define __NR_sys_setgid __NR_setgid32
7311 #else
7312 #define __NR_sys_setgid __NR_setgid
7313 #endif
7314 #ifdef __NR_setresuid32
7315 #define __NR_sys_setresuid __NR_setresuid32
7316 #else
7317 #define __NR_sys_setresuid __NR_setresuid
7318 #endif
7319 #ifdef __NR_setresgid32
7320 #define __NR_sys_setresgid __NR_setresgid32
7321 #else
7322 #define __NR_sys_setresgid __NR_setresgid
7323 #endif
7325 _syscall1(int, sys_setuid, uid_t, uid)
7326 _syscall1(int, sys_setgid, gid_t, gid)
7327 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7328 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7330 void syscall_init(void)
7332 IOCTLEntry *ie;
7333 const argtype *arg_type;
7334 int size;
7336 thunk_init(STRUCT_MAX);
7338 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7339 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7340 #include "syscall_types.h"
7341 #undef STRUCT
7342 #undef STRUCT_SPECIAL
7344 /* we patch the ioctl size if necessary. We rely on the fact that
7345 no ioctl has all the bits at '1' in the size field */
7346 ie = ioctl_entries;
7347 while (ie->target_cmd != 0) {
7348 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7349 TARGET_IOC_SIZEMASK) {
7350 arg_type = ie->arg_type;
7351 if (arg_type[0] != TYPE_PTR) {
7352 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7353 ie->target_cmd);
7354 exit(1);
7356 arg_type++;
7357 size = thunk_type_size(arg_type, 0);
7358 ie->target_cmd = (ie->target_cmd &
7359 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7360 (size << TARGET_IOC_SIZESHIFT);
7363 /* automatic consistency check if same arch */
7364 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7365 (defined(__x86_64__) && defined(TARGET_X86_64))
7366 if (unlikely(ie->target_cmd != ie->host_cmd)) {
7367 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7368 ie->name, ie->target_cmd, ie->host_cmd);
7370 #endif
7371 ie++;
7375 #ifdef TARGET_NR_truncate64
7376 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7377 abi_long arg2,
7378 abi_long arg3,
7379 abi_long arg4)
7381 if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7382 arg2 = arg3;
7383 arg3 = arg4;
7385 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7387 #endif
7389 #ifdef TARGET_NR_ftruncate64
7390 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7391 abi_long arg2,
7392 abi_long arg3,
7393 abi_long arg4)
7395 if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7396 arg2 = arg3;
7397 arg3 = arg4;
7399 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7401 #endif
7403 #if defined(TARGET_NR_timer_settime) || \
7404 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7405 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7406 abi_ulong target_addr)
7408 if (target_to_host_timespec(&host_its->it_interval, target_addr +
7409 offsetof(struct target_itimerspec,
7410 it_interval)) ||
7411 target_to_host_timespec(&host_its->it_value, target_addr +
7412 offsetof(struct target_itimerspec,
7413 it_value))) {
7414 return -TARGET_EFAULT;
7417 return 0;
7419 #endif
7421 #if defined(TARGET_NR_timer_settime64) || \
7422 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7423 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7424 abi_ulong target_addr)
7426 if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7427 offsetof(struct target__kernel_itimerspec,
7428 it_interval)) ||
7429 target_to_host_timespec64(&host_its->it_value, target_addr +
7430 offsetof(struct target__kernel_itimerspec,
7431 it_value))) {
7432 return -TARGET_EFAULT;
7435 return 0;
7437 #endif
7439 #if ((defined(TARGET_NR_timerfd_gettime) || \
7440 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7441 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7442 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7443 struct itimerspec *host_its)
7445 if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7446 it_interval),
7447 &host_its->it_interval) ||
7448 host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7449 it_value),
7450 &host_its->it_value)) {
7451 return -TARGET_EFAULT;
7453 return 0;
7455 #endif
7457 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7458 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7459 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7460 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7461 struct itimerspec *host_its)
7463 if (host_to_target_timespec64(target_addr +
7464 offsetof(struct target__kernel_itimerspec,
7465 it_interval),
7466 &host_its->it_interval) ||
7467 host_to_target_timespec64(target_addr +
7468 offsetof(struct target__kernel_itimerspec,
7469 it_value),
7470 &host_its->it_value)) {
7471 return -TARGET_EFAULT;
7473 return 0;
7475 #endif
7477 #if defined(TARGET_NR_adjtimex) || \
7478 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7479 static inline abi_long target_to_host_timex(struct timex *host_tx,
7480 abi_long target_addr)
7482 struct target_timex *target_tx;
7484 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7485 return -TARGET_EFAULT;
7488 __get_user(host_tx->modes, &target_tx->modes);
7489 __get_user(host_tx->offset, &target_tx->offset);
7490 __get_user(host_tx->freq, &target_tx->freq);
7491 __get_user(host_tx->maxerror, &target_tx->maxerror);
7492 __get_user(host_tx->esterror, &target_tx->esterror);
7493 __get_user(host_tx->status, &target_tx->status);
7494 __get_user(host_tx->constant, &target_tx->constant);
7495 __get_user(host_tx->precision, &target_tx->precision);
7496 __get_user(host_tx->tolerance, &target_tx->tolerance);
7497 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7498 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7499 __get_user(host_tx->tick, &target_tx->tick);
7500 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7501 __get_user(host_tx->jitter, &target_tx->jitter);
7502 __get_user(host_tx->shift, &target_tx->shift);
7503 __get_user(host_tx->stabil, &target_tx->stabil);
7504 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7505 __get_user(host_tx->calcnt, &target_tx->calcnt);
7506 __get_user(host_tx->errcnt, &target_tx->errcnt);
7507 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7508 __get_user(host_tx->tai, &target_tx->tai);
7510 unlock_user_struct(target_tx, target_addr, 0);
7511 return 0;
7514 static inline abi_long host_to_target_timex(abi_long target_addr,
7515 struct timex *host_tx)
7517 struct target_timex *target_tx;
7519 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7520 return -TARGET_EFAULT;
7523 __put_user(host_tx->modes, &target_tx->modes);
7524 __put_user(host_tx->offset, &target_tx->offset);
7525 __put_user(host_tx->freq, &target_tx->freq);
7526 __put_user(host_tx->maxerror, &target_tx->maxerror);
7527 __put_user(host_tx->esterror, &target_tx->esterror);
7528 __put_user(host_tx->status, &target_tx->status);
7529 __put_user(host_tx->constant, &target_tx->constant);
7530 __put_user(host_tx->precision, &target_tx->precision);
7531 __put_user(host_tx->tolerance, &target_tx->tolerance);
7532 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7533 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7534 __put_user(host_tx->tick, &target_tx->tick);
7535 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7536 __put_user(host_tx->jitter, &target_tx->jitter);
7537 __put_user(host_tx->shift, &target_tx->shift);
7538 __put_user(host_tx->stabil, &target_tx->stabil);
7539 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7540 __put_user(host_tx->calcnt, &target_tx->calcnt);
7541 __put_user(host_tx->errcnt, &target_tx->errcnt);
7542 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7543 __put_user(host_tx->tai, &target_tx->tai);
7545 unlock_user_struct(target_tx, target_addr, 1);
7546 return 0;
7548 #endif
7551 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7552 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7553 abi_long target_addr)
7555 struct target__kernel_timex *target_tx;
7557 if (copy_from_user_timeval64(&host_tx->time, target_addr +
7558 offsetof(struct target__kernel_timex,
7559 time))) {
7560 return -TARGET_EFAULT;
7563 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7564 return -TARGET_EFAULT;
7567 __get_user(host_tx->modes, &target_tx->modes);
7568 __get_user(host_tx->offset, &target_tx->offset);
7569 __get_user(host_tx->freq, &target_tx->freq);
7570 __get_user(host_tx->maxerror, &target_tx->maxerror);
7571 __get_user(host_tx->esterror, &target_tx->esterror);
7572 __get_user(host_tx->status, &target_tx->status);
7573 __get_user(host_tx->constant, &target_tx->constant);
7574 __get_user(host_tx->precision, &target_tx->precision);
7575 __get_user(host_tx->tolerance, &target_tx->tolerance);
7576 __get_user(host_tx->tick, &target_tx->tick);
7577 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7578 __get_user(host_tx->jitter, &target_tx->jitter);
7579 __get_user(host_tx->shift, &target_tx->shift);
7580 __get_user(host_tx->stabil, &target_tx->stabil);
7581 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7582 __get_user(host_tx->calcnt, &target_tx->calcnt);
7583 __get_user(host_tx->errcnt, &target_tx->errcnt);
7584 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7585 __get_user(host_tx->tai, &target_tx->tai);
7587 unlock_user_struct(target_tx, target_addr, 0);
7588 return 0;
7591 static inline abi_long host_to_target_timex64(abi_long target_addr,
7592 struct timex *host_tx)
7594 struct target__kernel_timex *target_tx;
7596 if (copy_to_user_timeval64(target_addr +
7597 offsetof(struct target__kernel_timex, time),
7598 &host_tx->time)) {
7599 return -TARGET_EFAULT;
7602 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7603 return -TARGET_EFAULT;
7606 __put_user(host_tx->modes, &target_tx->modes);
7607 __put_user(host_tx->offset, &target_tx->offset);
7608 __put_user(host_tx->freq, &target_tx->freq);
7609 __put_user(host_tx->maxerror, &target_tx->maxerror);
7610 __put_user(host_tx->esterror, &target_tx->esterror);
7611 __put_user(host_tx->status, &target_tx->status);
7612 __put_user(host_tx->constant, &target_tx->constant);
7613 __put_user(host_tx->precision, &target_tx->precision);
7614 __put_user(host_tx->tolerance, &target_tx->tolerance);
7615 __put_user(host_tx->tick, &target_tx->tick);
7616 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7617 __put_user(host_tx->jitter, &target_tx->jitter);
7618 __put_user(host_tx->shift, &target_tx->shift);
7619 __put_user(host_tx->stabil, &target_tx->stabil);
7620 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7621 __put_user(host_tx->calcnt, &target_tx->calcnt);
7622 __put_user(host_tx->errcnt, &target_tx->errcnt);
7623 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7624 __put_user(host_tx->tai, &target_tx->tai);
7626 unlock_user_struct(target_tx, target_addr, 1);
7627 return 0;
7629 #endif
7631 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7632 #define sigev_notify_thread_id _sigev_un._tid
7633 #endif
7635 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7636 abi_ulong target_addr)
7638 struct target_sigevent *target_sevp;
7640 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7641 return -TARGET_EFAULT;
7644 /* This union is awkward on 64 bit systems because it has a 32 bit
7645 * integer and a pointer in it; we follow the conversion approach
7646 * used for handling sigval types in signal.c so the guest should get
7647 * the correct value back even if we did a 64 bit byteswap and it's
7648 * using the 32 bit integer.
7650 host_sevp->sigev_value.sival_ptr =
7651 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7652 host_sevp->sigev_signo =
7653 target_to_host_signal(tswap32(target_sevp->sigev_signo));
7654 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7655 host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7657 unlock_user_struct(target_sevp, target_addr, 1);
7658 return 0;
7661 #if defined(TARGET_NR_mlockall)
7662 static inline int target_to_host_mlockall_arg(int arg)
7664 int result = 0;
7666 if (arg & TARGET_MCL_CURRENT) {
7667 result |= MCL_CURRENT;
7669 if (arg & TARGET_MCL_FUTURE) {
7670 result |= MCL_FUTURE;
7672 #ifdef MCL_ONFAULT
7673 if (arg & TARGET_MCL_ONFAULT) {
7674 result |= MCL_ONFAULT;
7676 #endif
7678 return result;
7680 #endif
7682 static inline int target_to_host_msync_arg(abi_long arg)
7684 return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7685 ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7686 ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7687 (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7690 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7691 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7692 defined(TARGET_NR_newfstatat))
7693 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7694 abi_ulong target_addr,
7695 struct stat *host_st)
7697 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7698 if (cpu_env->eabi) {
7699 struct target_eabi_stat64 *target_st;
7701 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7702 return -TARGET_EFAULT;
7703 memset(target_st, 0, sizeof(struct target_eabi_stat64));
7704 __put_user(host_st->st_dev, &target_st->st_dev);
7705 __put_user(host_st->st_ino, &target_st->st_ino);
7706 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7707 __put_user(host_st->st_ino, &target_st->__st_ino);
7708 #endif
7709 __put_user(host_st->st_mode, &target_st->st_mode);
7710 __put_user(host_st->st_nlink, &target_st->st_nlink);
7711 __put_user(host_st->st_uid, &target_st->st_uid);
7712 __put_user(host_st->st_gid, &target_st->st_gid);
7713 __put_user(host_st->st_rdev, &target_st->st_rdev);
7714 __put_user(host_st->st_size, &target_st->st_size);
7715 __put_user(host_st->st_blksize, &target_st->st_blksize);
7716 __put_user(host_st->st_blocks, &target_st->st_blocks);
7717 __put_user(host_st->st_atime, &target_st->target_st_atime);
7718 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7719 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7720 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7721 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7722 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7723 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7724 #endif
7725 unlock_user_struct(target_st, target_addr, 1);
7726 } else
7727 #endif
7729 #if defined(TARGET_HAS_STRUCT_STAT64)
7730 struct target_stat64 *target_st;
7731 #else
7732 struct target_stat *target_st;
7733 #endif
7735 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7736 return -TARGET_EFAULT;
7737 memset(target_st, 0, sizeof(*target_st));
7738 __put_user(host_st->st_dev, &target_st->st_dev);
7739 __put_user(host_st->st_ino, &target_st->st_ino);
7740 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7741 __put_user(host_st->st_ino, &target_st->__st_ino);
7742 #endif
7743 __put_user(host_st->st_mode, &target_st->st_mode);
7744 __put_user(host_st->st_nlink, &target_st->st_nlink);
7745 __put_user(host_st->st_uid, &target_st->st_uid);
7746 __put_user(host_st->st_gid, &target_st->st_gid);
7747 __put_user(host_st->st_rdev, &target_st->st_rdev);
7748 /* XXX: better use of kernel struct */
7749 __put_user(host_st->st_size, &target_st->st_size);
7750 __put_user(host_st->st_blksize, &target_st->st_blksize);
7751 __put_user(host_st->st_blocks, &target_st->st_blocks);
7752 __put_user(host_st->st_atime, &target_st->target_st_atime);
7753 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7754 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7755 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7756 __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7757 __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7758 __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7759 #endif
7760 unlock_user_struct(target_st, target_addr, 1);
7763 return 0;
7765 #endif
7767 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7768 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7769 abi_ulong target_addr)
7771 struct target_statx *target_stx;
7773 if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
7774 return -TARGET_EFAULT;
7776 memset(target_stx, 0, sizeof(*target_stx));
7778 __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7779 __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7780 __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7781 __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7782 __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7783 __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7784 __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7785 __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7786 __put_user(host_stx->stx_size, &target_stx->stx_size);
7787 __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7788 __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7789 __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7790 __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7791 __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7792 __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7793 __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7794 __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7795 __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7796 __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7797 __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7798 __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7799 __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7800 __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7802 unlock_user_struct(target_stx, target_addr, 1);
7804 return 0;
7806 #endif
7808 static int do_sys_futex(int *uaddr, int op, int val,
7809 const struct timespec *timeout, int *uaddr2,
7810 int val3)
7812 #if HOST_LONG_BITS == 64
7813 #if defined(__NR_futex)
7814 /* always a 64-bit time_t, it doesn't define _time64 version */
7815 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7817 #endif
7818 #else /* HOST_LONG_BITS == 64 */
7819 #if defined(__NR_futex_time64)
7820 if (sizeof(timeout->tv_sec) == 8) {
7821 /* _time64 function on 32bit arch */
7822 return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7824 #endif
7825 #if defined(__NR_futex)
7826 /* old function on 32bit arch */
7827 return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7828 #endif
7829 #endif /* HOST_LONG_BITS == 64 */
7830 g_assert_not_reached();
7833 static int do_safe_futex(int *uaddr, int op, int val,
7834 const struct timespec *timeout, int *uaddr2,
7835 int val3)
7837 #if HOST_LONG_BITS == 64
7838 #if defined(__NR_futex)
7839 /* always a 64-bit time_t, it doesn't define _time64 version */
7840 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7841 #endif
7842 #else /* HOST_LONG_BITS == 64 */
7843 #if defined(__NR_futex_time64)
7844 if (sizeof(timeout->tv_sec) == 8) {
7845 /* _time64 function on 32bit arch */
7846 return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7847 val3));
7849 #endif
7850 #if defined(__NR_futex)
7851 /* old function on 32bit arch */
7852 return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7853 #endif
7854 #endif /* HOST_LONG_BITS == 64 */
7855 return -TARGET_ENOSYS;
7858 /* ??? Using host futex calls even when target atomic operations
7859 are not really atomic probably breaks things. However implementing
7860 futexes locally would make futexes shared between multiple processes
7861 tricky. However they're probably useless because guest atomic
7862 operations won't work either. */
7863 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7864 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7865 int op, int val, target_ulong timeout,
7866 target_ulong uaddr2, int val3)
7868 struct timespec ts, *pts = NULL;
7869 void *haddr2 = NULL;
7870 int base_op;
7872 /* We assume FUTEX_* constants are the same on both host and target. */
7873 #ifdef FUTEX_CMD_MASK
7874 base_op = op & FUTEX_CMD_MASK;
7875 #else
7876 base_op = op;
7877 #endif
7878 switch (base_op) {
7879 case FUTEX_WAIT:
7880 case FUTEX_WAIT_BITSET:
7881 val = tswap32(val);
7882 break;
7883 case FUTEX_WAIT_REQUEUE_PI:
7884 val = tswap32(val);
7885 haddr2 = g2h(cpu, uaddr2);
7886 break;
7887 case FUTEX_LOCK_PI:
7888 case FUTEX_LOCK_PI2:
7889 break;
7890 case FUTEX_WAKE:
7891 case FUTEX_WAKE_BITSET:
7892 case FUTEX_TRYLOCK_PI:
7893 case FUTEX_UNLOCK_PI:
7894 timeout = 0;
7895 break;
7896 case FUTEX_FD:
7897 val = target_to_host_signal(val);
7898 timeout = 0;
7899 break;
7900 case FUTEX_CMP_REQUEUE:
7901 case FUTEX_CMP_REQUEUE_PI:
7902 val3 = tswap32(val3);
7903 /* fall through */
7904 case FUTEX_REQUEUE:
7905 case FUTEX_WAKE_OP:
7907 * For these, the 4th argument is not TIMEOUT, but VAL2.
7908 * But the prototype of do_safe_futex takes a pointer, so
7909 * insert casts to satisfy the compiler. We do not need
7910 * to tswap VAL2 since it's not compared to guest memory.
7912 pts = (struct timespec *)(uintptr_t)timeout;
7913 timeout = 0;
7914 haddr2 = g2h(cpu, uaddr2);
7915 break;
7916 default:
7917 return -TARGET_ENOSYS;
7919 if (timeout) {
7920 pts = &ts;
7921 if (time64
7922 ? target_to_host_timespec64(pts, timeout)
7923 : target_to_host_timespec(pts, timeout)) {
7924 return -TARGET_EFAULT;
7927 return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7929 #endif
7931 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7932 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7933 abi_long handle, abi_long mount_id,
7934 abi_long flags)
7936 struct file_handle *target_fh;
7937 struct file_handle *fh;
7938 int mid = 0;
7939 abi_long ret;
7940 char *name;
7941 unsigned int size, total_size;
7943 if (get_user_s32(size, handle)) {
7944 return -TARGET_EFAULT;
7947 name = lock_user_string(pathname);
7948 if (!name) {
7949 return -TARGET_EFAULT;
7952 total_size = sizeof(struct file_handle) + size;
7953 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7954 if (!target_fh) {
7955 unlock_user(name, pathname, 0);
7956 return -TARGET_EFAULT;
7959 fh = g_malloc0(total_size);
7960 fh->handle_bytes = size;
7962 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7963 unlock_user(name, pathname, 0);
7965 /* man name_to_handle_at(2):
7966 * Other than the use of the handle_bytes field, the caller should treat
7967 * the file_handle structure as an opaque data type
7970 memcpy(target_fh, fh, total_size);
7971 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7972 target_fh->handle_type = tswap32(fh->handle_type);
7973 g_free(fh);
7974 unlock_user(target_fh, handle, total_size);
7976 if (put_user_s32(mid, mount_id)) {
7977 return -TARGET_EFAULT;
7980 return ret;
7983 #endif
7985 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7986 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7987 abi_long flags)
7989 struct file_handle *target_fh;
7990 struct file_handle *fh;
7991 unsigned int size, total_size;
7992 abi_long ret;
7994 if (get_user_s32(size, handle)) {
7995 return -TARGET_EFAULT;
7998 total_size = sizeof(struct file_handle) + size;
7999 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
8000 if (!target_fh) {
8001 return -TARGET_EFAULT;
8004 fh = g_memdup(target_fh, total_size);
8005 fh->handle_bytes = size;
8006 fh->handle_type = tswap32(target_fh->handle_type);
8008 ret = get_errno(open_by_handle_at(mount_fd, fh,
8009 target_to_host_bitmask(flags, fcntl_flags_tbl)));
8011 g_free(fh);
8013 unlock_user(target_fh, handle, total_size);
8015 return ret;
8017 #endif
8019 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8021 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8023 int host_flags;
8024 target_sigset_t *target_mask;
8025 sigset_t host_mask;
8026 abi_long ret;
8028 if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8029 return -TARGET_EINVAL;
8031 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8032 return -TARGET_EFAULT;
8035 target_to_host_sigset(&host_mask, target_mask);
8037 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8039 ret = get_errno(signalfd(fd, &host_mask, host_flags));
8040 if (ret >= 0) {
8041 fd_trans_register(ret, &target_signalfd_trans);
8044 unlock_user_struct(target_mask, mask, 0);
8046 return ret;
8048 #endif
8050 /* Map host to target signal numbers for the wait family of syscalls.
8051 Assume all other status bits are the same. */
8052 int host_to_target_waitstatus(int status)
8054 if (WIFSIGNALED(status)) {
8055 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8057 if (WIFSTOPPED(status)) {
8058 return (host_to_target_signal(WSTOPSIG(status)) << 8)
8059 | (status & 0xff);
8061 return status;
8064 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8066 CPUState *cpu = env_cpu(cpu_env);
8067 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8068 int i;
8070 for (i = 0; i < bprm->argc; i++) {
8071 size_t len = strlen(bprm->argv[i]) + 1;
8073 if (write(fd, bprm->argv[i], len) != len) {
8074 return -1;
8078 return 0;
8081 static void show_smaps(int fd, unsigned long size)
8083 unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8084 unsigned long size_kb = size >> 10;
8086 dprintf(fd, "Size: %lu kB\n"
8087 "KernelPageSize: %lu kB\n"
8088 "MMUPageSize: %lu kB\n"
8089 "Rss: 0 kB\n"
8090 "Pss: 0 kB\n"
8091 "Pss_Dirty: 0 kB\n"
8092 "Shared_Clean: 0 kB\n"
8093 "Shared_Dirty: 0 kB\n"
8094 "Private_Clean: 0 kB\n"
8095 "Private_Dirty: 0 kB\n"
8096 "Referenced: 0 kB\n"
8097 "Anonymous: 0 kB\n"
8098 "LazyFree: 0 kB\n"
8099 "AnonHugePages: 0 kB\n"
8100 "ShmemPmdMapped: 0 kB\n"
8101 "FilePmdMapped: 0 kB\n"
8102 "Shared_Hugetlb: 0 kB\n"
8103 "Private_Hugetlb: 0 kB\n"
8104 "Swap: 0 kB\n"
8105 "SwapPss: 0 kB\n"
8106 "Locked: 0 kB\n"
8107 "THPeligible: 0\n", size_kb, page_size_kb, page_size_kb);
8110 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8112 CPUState *cpu = env_cpu(cpu_env);
8113 TaskState *ts = cpu->opaque;
8114 GSList *map_info = read_self_maps();
8115 GSList *s;
8116 int count;
8118 for (s = map_info; s; s = g_slist_next(s)) {
8119 MapInfo *e = (MapInfo *) s->data;
8121 if (h2g_valid(e->start)) {
8122 unsigned long min = e->start;
8123 unsigned long max = e->end;
8124 int flags = page_get_flags(h2g(min));
8125 const char *path;
8127 max = h2g_valid(max - 1) ?
8128 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8130 if (!page_check_range(h2g(min), max - min, flags)) {
8131 continue;
8134 #ifdef TARGET_HPPA
8135 if (h2g(max) == ts->info->stack_limit) {
8136 #else
8137 if (h2g(min) == ts->info->stack_limit) {
8138 #endif
8139 path = "[stack]";
8140 } else {
8141 path = e->path;
8144 count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8145 " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8146 h2g(min), h2g(max - 1) + 1,
8147 (flags & PAGE_READ) ? 'r' : '-',
8148 (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8149 (flags & PAGE_EXEC) ? 'x' : '-',
8150 e->is_priv ? 'p' : 's',
8151 (uint64_t) e->offset, e->dev, e->inode);
8152 if (path) {
8153 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8154 } else {
8155 dprintf(fd, "\n");
8157 if (smaps) {
8158 show_smaps(fd, max - min);
8159 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8160 (flags & PAGE_READ) ? " rd" : "",
8161 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8162 (flags & PAGE_EXEC) ? " ex" : "",
8163 e->is_priv ? "" : " sh",
8164 (flags & PAGE_READ) ? " mr" : "",
8165 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8166 (flags & PAGE_EXEC) ? " me" : "",
8167 e->is_priv ? "" : " ms");
8172 free_self_maps(map_info);
8174 #ifdef TARGET_VSYSCALL_PAGE
8176 * We only support execution from the vsyscall page.
8177 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8179 count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8180 " --xp 00000000 00:00 0",
8181 TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8182 dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
8183 if (smaps) {
8184 show_smaps(fd, TARGET_PAGE_SIZE);
8185 dprintf(fd, "VmFlags: ex\n");
8187 #endif
8189 return 0;
8192 static int open_self_maps(CPUArchState *cpu_env, int fd)
8194 return open_self_maps_1(cpu_env, fd, false);
8197 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8199 return open_self_maps_1(cpu_env, fd, true);
8202 static int open_self_stat(CPUArchState *cpu_env, int fd)
8204 CPUState *cpu = env_cpu(cpu_env);
8205 TaskState *ts = cpu->opaque;
8206 g_autoptr(GString) buf = g_string_new(NULL);
8207 int i;
8209 for (i = 0; i < 44; i++) {
8210 if (i == 0) {
8211 /* pid */
8212 g_string_printf(buf, FMT_pid " ", getpid());
8213 } else if (i == 1) {
8214 /* app name */
8215 gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8216 bin = bin ? bin + 1 : ts->bprm->argv[0];
8217 g_string_printf(buf, "(%.15s) ", bin);
8218 } else if (i == 2) {
8219 /* task state */
8220 g_string_assign(buf, "R "); /* we are running right now */
8221 } else if (i == 3) {
8222 /* ppid */
8223 g_string_printf(buf, FMT_pid " ", getppid());
8224 } else if (i == 21) {
8225 /* starttime */
8226 g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8227 } else if (i == 27) {
8228 /* stack bottom */
8229 g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8230 } else {
8231 /* for the rest, there is MasterCard */
8232 g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8235 if (write(fd, buf->str, buf->len) != buf->len) {
8236 return -1;
8240 return 0;
8243 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8245 CPUState *cpu = env_cpu(cpu_env);
8246 TaskState *ts = cpu->opaque;
8247 abi_ulong auxv = ts->info->saved_auxv;
8248 abi_ulong len = ts->info->auxv_len;
8249 char *ptr;
8252 * Auxiliary vector is stored in target process stack.
8253 * read in whole auxv vector and copy it to file
8255 ptr = lock_user(VERIFY_READ, auxv, len, 0);
8256 if (ptr != NULL) {
8257 while (len > 0) {
8258 ssize_t r;
8259 r = write(fd, ptr, len);
8260 if (r <= 0) {
8261 break;
8263 len -= r;
8264 ptr += r;
8266 lseek(fd, 0, SEEK_SET);
8267 unlock_user(ptr, auxv, len);
8270 return 0;
8273 static int is_proc_myself(const char *filename, const char *entry)
8275 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8276 filename += strlen("/proc/");
8277 if (!strncmp(filename, "self/", strlen("self/"))) {
8278 filename += strlen("self/");
8279 } else if (*filename >= '1' && *filename <= '9') {
8280 char myself[80];
8281 snprintf(myself, sizeof(myself), "%d/", getpid());
8282 if (!strncmp(filename, myself, strlen(myself))) {
8283 filename += strlen(myself);
8284 } else {
8285 return 0;
8287 } else {
8288 return 0;
8290 if (!strcmp(filename, entry)) {
8291 return 1;
8294 return 0;
8297 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8298 const char *fmt, int code)
8300 if (logfile) {
8301 CPUState *cs = env_cpu(env);
8303 fprintf(logfile, fmt, code);
8304 fprintf(logfile, "Failing executable: %s\n", exec_path);
8305 cpu_dump_state(cs, logfile, 0);
8306 open_self_maps(env, fileno(logfile));
8310 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8312 /* dump to console */
8313 excp_dump_file(stderr, env, fmt, code);
8315 /* dump to log file */
8316 if (qemu_log_separate()) {
8317 FILE *logfile = qemu_log_trylock();
8319 excp_dump_file(logfile, env, fmt, code);
8320 qemu_log_unlock(logfile);
8324 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8325 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8326 defined(TARGET_RISCV) || defined(TARGET_S390X)
8327 static int is_proc(const char *filename, const char *entry)
8329 return strcmp(filename, entry) == 0;
8331 #endif
8333 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8334 static int open_net_route(CPUArchState *cpu_env, int fd)
8336 FILE *fp;
8337 char *line = NULL;
8338 size_t len = 0;
8339 ssize_t read;
8341 fp = fopen("/proc/net/route", "r");
8342 if (fp == NULL) {
8343 return -1;
8346 /* read header */
8348 read = getline(&line, &len, fp);
8349 dprintf(fd, "%s", line);
8351 /* read routes */
8353 while ((read = getline(&line, &len, fp)) != -1) {
8354 char iface[16];
8355 uint32_t dest, gw, mask;
8356 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8357 int fields;
8359 fields = sscanf(line,
8360 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8361 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8362 &mask, &mtu, &window, &irtt);
8363 if (fields != 11) {
8364 continue;
8366 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8367 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8368 metric, tswap32(mask), mtu, window, irtt);
8371 free(line);
8372 fclose(fp);
8374 return 0;
8376 #endif
8378 #if defined(TARGET_SPARC)
8379 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8381 dprintf(fd, "type\t\t: sun4u\n");
8382 return 0;
8384 #endif
8386 #if defined(TARGET_HPPA)
8387 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8389 int i, num_cpus;
8391 num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8392 for (i = 0; i < num_cpus; i++) {
8393 dprintf(fd, "processor\t: %d\n", i);
8394 dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8395 dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8396 dprintf(fd, "capabilities\t: os32\n");
8397 dprintf(fd, "model\t\t: 9000/778/B160L - "
8398 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8400 return 0;
8402 #endif
8404 #if defined(TARGET_RISCV)
8405 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8407 int i;
8408 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8409 RISCVCPU *cpu = env_archcpu(cpu_env);
8410 const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8411 char *isa_string = riscv_isa_string(cpu);
8412 const char *mmu;
8414 if (cfg->mmu) {
8415 mmu = (cpu_env->xl == MXL_RV32) ? "sv32" : "sv48";
8416 } else {
8417 mmu = "none";
8420 for (i = 0; i < num_cpus; i++) {
8421 dprintf(fd, "processor\t: %d\n", i);
8422 dprintf(fd, "hart\t\t: %d\n", i);
8423 dprintf(fd, "isa\t\t: %s\n", isa_string);
8424 dprintf(fd, "mmu\t\t: %s\n", mmu);
8425 dprintf(fd, "uarch\t\t: qemu\n\n");
8428 g_free(isa_string);
8429 return 0;
8431 #endif
8433 #if defined(TARGET_S390X)
8435 * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8436 * show in /proc/cpuinfo.
8438 * Skip the following in order to match the missing support in op_ecag():
8439 * - show_cacheinfo().
8440 * - show_cpu_topology().
8441 * - show_cpu_mhz().
8443 * Use fixed values for certain fields:
8444 * - bogomips per cpu - from a qemu-system-s390x run.
8445 * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8447 * Keep the code structure close to arch/s390/kernel/processor.c.
8450 static void show_facilities(int fd)
8452 size_t sizeof_stfl_bytes = 2048;
8453 g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8454 unsigned int bit;
8456 dprintf(fd, "facilities :");
8457 s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8458 for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8459 if (test_be_bit(bit, stfl_bytes)) {
8460 dprintf(fd, " %d", bit);
8463 dprintf(fd, "\n");
8466 static int cpu_ident(unsigned long n)
8468 return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8472 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8474 S390CPUModel *model = env_archcpu(cpu_env)->model;
8475 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8476 uint32_t elf_hwcap = get_elf_hwcap();
8477 const char *hwcap_str;
8478 int i;
8480 dprintf(fd, "vendor_id : IBM/S390\n"
8481 "# processors : %i\n"
8482 "bogomips per cpu: 13370.00\n",
8483 num_cpus);
8484 dprintf(fd, "max thread id : 0\n");
8485 dprintf(fd, "features\t: ");
8486 for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8487 if (!(elf_hwcap & (1 << i))) {
8488 continue;
8490 hwcap_str = elf_hwcap_str(i);
8491 if (hwcap_str) {
8492 dprintf(fd, "%s ", hwcap_str);
8495 dprintf(fd, "\n");
8496 show_facilities(fd);
8497 for (i = 0; i < num_cpus; i++) {
8498 dprintf(fd, "processor %d: "
8499 "version = %02X, "
8500 "identification = %06X, "
8501 "machine = %04X\n",
8502 i, model->cpu_ver, cpu_ident(i), model->def->type);
8506 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8508 S390CPUModel *model = env_archcpu(cpu_env)->model;
8510 dprintf(fd, "version : %02X\n", model->cpu_ver);
8511 dprintf(fd, "identification : %06X\n", cpu_ident(n));
8512 dprintf(fd, "machine : %04X\n", model->def->type);
8515 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8517 dprintf(fd, "\ncpu number : %ld\n", n);
8518 show_cpu_ids(cpu_env, fd, n);
8521 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8523 int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8524 int i;
8526 show_cpu_summary(cpu_env, fd);
8527 for (i = 0; i < num_cpus; i++) {
8528 show_cpuinfo(cpu_env, fd, i);
8530 return 0;
8532 #endif
8534 #if defined(TARGET_M68K)
8535 static int open_hardware(CPUArchState *cpu_env, int fd)
8537 dprintf(fd, "Model:\t\tqemu-m68k\n");
8538 return 0;
8540 #endif
8542 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8543 int flags, mode_t mode, bool safe)
8545 struct fake_open {
8546 const char *filename;
8547 int (*fill)(CPUArchState *cpu_env, int fd);
8548 int (*cmp)(const char *s1, const char *s2);
8550 const struct fake_open *fake_open;
8551 static const struct fake_open fakes[] = {
8552 { "maps", open_self_maps, is_proc_myself },
8553 { "smaps", open_self_smaps, is_proc_myself },
8554 { "stat", open_self_stat, is_proc_myself },
8555 { "auxv", open_self_auxv, is_proc_myself },
8556 { "cmdline", open_self_cmdline, is_proc_myself },
8557 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8558 { "/proc/net/route", open_net_route, is_proc },
8559 #endif
8560 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8561 defined(TARGET_RISCV) || defined(TARGET_S390X)
8562 { "/proc/cpuinfo", open_cpuinfo, is_proc },
8563 #endif
8564 #if defined(TARGET_M68K)
8565 { "/proc/hardware", open_hardware, is_proc },
8566 #endif
8567 { NULL, NULL, NULL }
8570 if (is_proc_myself(pathname, "exe")) {
8571 if (safe) {
8572 return safe_openat(dirfd, exec_path, flags, mode);
8573 } else {
8574 return openat(dirfd, exec_path, flags, mode);
8578 for (fake_open = fakes; fake_open->filename; fake_open++) {
8579 if (fake_open->cmp(pathname, fake_open->filename)) {
8580 break;
8584 if (fake_open->filename) {
8585 const char *tmpdir;
8586 char filename[PATH_MAX];
8587 int fd, r;
8589 fd = memfd_create("qemu-open", 0);
8590 if (fd < 0) {
8591 if (errno != ENOSYS) {
8592 return fd;
8594 /* create temporary file to map stat to */
8595 tmpdir = getenv("TMPDIR");
8596 if (!tmpdir)
8597 tmpdir = "/tmp";
8598 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8599 fd = mkstemp(filename);
8600 if (fd < 0) {
8601 return fd;
8603 unlink(filename);
8606 if ((r = fake_open->fill(cpu_env, fd))) {
8607 int e = errno;
8608 close(fd);
8609 errno = e;
8610 return r;
8612 lseek(fd, 0, SEEK_SET);
8614 return fd;
8617 if (safe) {
8618 return safe_openat(dirfd, path(pathname), flags, mode);
8619 } else {
8620 return openat(dirfd, path(pathname), flags, mode);
8624 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8626 ssize_t ret;
8628 if (!pathname || !buf) {
8629 errno = EFAULT;
8630 return -1;
8633 if (!bufsiz) {
8634 /* Short circuit this for the magic exe check. */
8635 errno = EINVAL;
8636 return -1;
8639 if (is_proc_myself((const char *)pathname, "exe")) {
8641 * Don't worry about sign mismatch as earlier mapping
8642 * logic would have thrown a bad address error.
8644 ret = MIN(strlen(exec_path), bufsiz);
8645 /* We cannot NUL terminate the string. */
8646 memcpy(buf, exec_path, ret);
8647 } else {
8648 ret = readlink(path(pathname), buf, bufsiz);
8651 return ret;
8654 static int do_execv(CPUArchState *cpu_env, int dirfd,
8655 abi_long pathname, abi_long guest_argp,
8656 abi_long guest_envp, int flags, bool is_execveat)
8658 int ret;
8659 char **argp, **envp;
8660 int argc, envc;
8661 abi_ulong gp;
8662 abi_ulong addr;
8663 char **q;
8664 void *p;
8666 argc = 0;
8668 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8669 if (get_user_ual(addr, gp)) {
8670 return -TARGET_EFAULT;
8672 if (!addr) {
8673 break;
8675 argc++;
8677 envc = 0;
8678 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8679 if (get_user_ual(addr, gp)) {
8680 return -TARGET_EFAULT;
8682 if (!addr) {
8683 break;
8685 envc++;
8688 argp = g_new0(char *, argc + 1);
8689 envp = g_new0(char *, envc + 1);
8691 for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8692 if (get_user_ual(addr, gp)) {
8693 goto execve_efault;
8695 if (!addr) {
8696 break;
8698 *q = lock_user_string(addr);
8699 if (!*q) {
8700 goto execve_efault;
8703 *q = NULL;
8705 for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8706 if (get_user_ual(addr, gp)) {
8707 goto execve_efault;
8709 if (!addr) {
8710 break;
8712 *q = lock_user_string(addr);
8713 if (!*q) {
8714 goto execve_efault;
8717 *q = NULL;
8720 * Although execve() is not an interruptible syscall it is
8721 * a special case where we must use the safe_syscall wrapper:
8722 * if we allow a signal to happen before we make the host
8723 * syscall then we will 'lose' it, because at the point of
8724 * execve the process leaves QEMU's control. So we use the
8725 * safe syscall wrapper to ensure that we either take the
8726 * signal as a guest signal, or else it does not happen
8727 * before the execve completes and makes it the other
8728 * program's problem.
8730 p = lock_user_string(pathname);
8731 if (!p) {
8732 goto execve_efault;
8735 const char *exe = p;
8736 if (is_proc_myself(p, "exe")) {
8737 exe = exec_path;
8739 ret = is_execveat
8740 ? safe_execveat(dirfd, exe, argp, envp, flags)
8741 : safe_execve(exe, argp, envp);
8742 ret = get_errno(ret);
8744 unlock_user(p, pathname, 0);
8746 goto execve_end;
8748 execve_efault:
8749 ret = -TARGET_EFAULT;
8751 execve_end:
8752 for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8753 if (get_user_ual(addr, gp) || !addr) {
8754 break;
8756 unlock_user(*q, addr, 0);
8758 for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8759 if (get_user_ual(addr, gp) || !addr) {
8760 break;
8762 unlock_user(*q, addr, 0);
8765 g_free(argp);
8766 g_free(envp);
8767 return ret;
8770 #define TIMER_MAGIC 0x0caf0000
8771 #define TIMER_MAGIC_MASK 0xffff0000
8773 /* Convert QEMU provided timer ID back to internal 16bit index format */
8774 static target_timer_t get_timer_id(abi_long arg)
8776 target_timer_t timerid = arg;
8778 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8779 return -TARGET_EINVAL;
8782 timerid &= 0xffff;
8784 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8785 return -TARGET_EINVAL;
8788 return timerid;
8791 static int target_to_host_cpu_mask(unsigned long *host_mask,
8792 size_t host_size,
8793 abi_ulong target_addr,
8794 size_t target_size)
8796 unsigned target_bits = sizeof(abi_ulong) * 8;
8797 unsigned host_bits = sizeof(*host_mask) * 8;
8798 abi_ulong *target_mask;
8799 unsigned i, j;
8801 assert(host_size >= target_size);
8803 target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8804 if (!target_mask) {
8805 return -TARGET_EFAULT;
8807 memset(host_mask, 0, host_size);
8809 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8810 unsigned bit = i * target_bits;
8811 abi_ulong val;
8813 __get_user(val, &target_mask[i]);
8814 for (j = 0; j < target_bits; j++, bit++) {
8815 if (val & (1UL << j)) {
8816 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8821 unlock_user(target_mask, target_addr, 0);
8822 return 0;
8825 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8826 size_t host_size,
8827 abi_ulong target_addr,
8828 size_t target_size)
8830 unsigned target_bits = sizeof(abi_ulong) * 8;
8831 unsigned host_bits = sizeof(*host_mask) * 8;
8832 abi_ulong *target_mask;
8833 unsigned i, j;
8835 assert(host_size >= target_size);
8837 target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8838 if (!target_mask) {
8839 return -TARGET_EFAULT;
8842 for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8843 unsigned bit = i * target_bits;
8844 abi_ulong val = 0;
8846 for (j = 0; j < target_bits; j++, bit++) {
8847 if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8848 val |= 1UL << j;
8851 __put_user(val, &target_mask[i]);
8854 unlock_user(target_mask, target_addr, target_size);
8855 return 0;
8858 #ifdef TARGET_NR_getdents
8859 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8861 g_autofree void *hdirp = NULL;
8862 void *tdirp;
8863 int hlen, hoff, toff;
8864 int hreclen, treclen;
8865 off64_t prev_diroff = 0;
8867 hdirp = g_try_malloc(count);
8868 if (!hdirp) {
8869 return -TARGET_ENOMEM;
8872 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8873 hlen = sys_getdents(dirfd, hdirp, count);
8874 #else
8875 hlen = sys_getdents64(dirfd, hdirp, count);
8876 #endif
8878 hlen = get_errno(hlen);
8879 if (is_error(hlen)) {
8880 return hlen;
8883 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8884 if (!tdirp) {
8885 return -TARGET_EFAULT;
8888 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8889 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8890 struct linux_dirent *hde = hdirp + hoff;
8891 #else
8892 struct linux_dirent64 *hde = hdirp + hoff;
8893 #endif
8894 struct target_dirent *tde = tdirp + toff;
8895 int namelen;
8896 uint8_t type;
8898 namelen = strlen(hde->d_name);
8899 hreclen = hde->d_reclen;
8900 treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8901 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8903 if (toff + treclen > count) {
8905 * If the host struct is smaller than the target struct, or
8906 * requires less alignment and thus packs into less space,
8907 * then the host can return more entries than we can pass
8908 * on to the guest.
8910 if (toff == 0) {
8911 toff = -TARGET_EINVAL; /* result buffer is too small */
8912 break;
8915 * Return what we have, resetting the file pointer to the
8916 * location of the first record not returned.
8918 lseek64(dirfd, prev_diroff, SEEK_SET);
8919 break;
8922 prev_diroff = hde->d_off;
8923 tde->d_ino = tswapal(hde->d_ino);
8924 tde->d_off = tswapal(hde->d_off);
8925 tde->d_reclen = tswap16(treclen);
8926 memcpy(tde->d_name, hde->d_name, namelen + 1);
8929 * The getdents type is in what was formerly a padding byte at the
8930 * end of the structure.
8932 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8933 type = *((uint8_t *)hde + hreclen - 1);
8934 #else
8935 type = hde->d_type;
8936 #endif
8937 *((uint8_t *)tde + treclen - 1) = type;
8940 unlock_user(tdirp, arg2, toff);
8941 return toff;
8943 #endif /* TARGET_NR_getdents */
8945 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8946 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8948 g_autofree void *hdirp = NULL;
8949 void *tdirp;
8950 int hlen, hoff, toff;
8951 int hreclen, treclen;
8952 off64_t prev_diroff = 0;
8954 hdirp = g_try_malloc(count);
8955 if (!hdirp) {
8956 return -TARGET_ENOMEM;
8959 hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8960 if (is_error(hlen)) {
8961 return hlen;
8964 tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8965 if (!tdirp) {
8966 return -TARGET_EFAULT;
8969 for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8970 struct linux_dirent64 *hde = hdirp + hoff;
8971 struct target_dirent64 *tde = tdirp + toff;
8972 int namelen;
8974 namelen = strlen(hde->d_name) + 1;
8975 hreclen = hde->d_reclen;
8976 treclen = offsetof(struct target_dirent64, d_name) + namelen;
8977 treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8979 if (toff + treclen > count) {
8981 * If the host struct is smaller than the target struct, or
8982 * requires less alignment and thus packs into less space,
8983 * then the host can return more entries than we can pass
8984 * on to the guest.
8986 if (toff == 0) {
8987 toff = -TARGET_EINVAL; /* result buffer is too small */
8988 break;
8991 * Return what we have, resetting the file pointer to the
8992 * location of the first record not returned.
8994 lseek64(dirfd, prev_diroff, SEEK_SET);
8995 break;
8998 prev_diroff = hde->d_off;
8999 tde->d_ino = tswap64(hde->d_ino);
9000 tde->d_off = tswap64(hde->d_off);
9001 tde->d_reclen = tswap16(treclen);
9002 tde->d_type = hde->d_type;
9003 memcpy(tde->d_name, hde->d_name, namelen);
9006 unlock_user(tdirp, arg2, toff);
9007 return toff;
9009 #endif /* TARGET_NR_getdents64 */
9011 #if defined(TARGET_NR_riscv_hwprobe)
9013 #define RISCV_HWPROBE_KEY_MVENDORID 0
9014 #define RISCV_HWPROBE_KEY_MARCHID 1
9015 #define RISCV_HWPROBE_KEY_MIMPID 2
9017 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9018 #define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9020 #define RISCV_HWPROBE_KEY_IMA_EXT_0 4
9021 #define RISCV_HWPROBE_IMA_FD (1 << 0)
9022 #define RISCV_HWPROBE_IMA_C (1 << 1)
9024 #define RISCV_HWPROBE_KEY_CPUPERF_0 5
9025 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
9026 #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
9027 #define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
9028 #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
9029 #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9030 #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
9032 struct riscv_hwprobe {
9033 abi_llong key;
9034 abi_ullong value;
9037 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9038 struct riscv_hwprobe *pair,
9039 size_t pair_count)
9041 const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9043 for (; pair_count > 0; pair_count--, pair++) {
9044 abi_llong key;
9045 abi_ullong value;
9046 __put_user(0, &pair->value);
9047 __get_user(key, &pair->key);
9048 switch (key) {
9049 case RISCV_HWPROBE_KEY_MVENDORID:
9050 __put_user(cfg->mvendorid, &pair->value);
9051 break;
9052 case RISCV_HWPROBE_KEY_MARCHID:
9053 __put_user(cfg->marchid, &pair->value);
9054 break;
9055 case RISCV_HWPROBE_KEY_MIMPID:
9056 __put_user(cfg->mimpid, &pair->value);
9057 break;
9058 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9059 value = riscv_has_ext(env, RVI) &&
9060 riscv_has_ext(env, RVM) &&
9061 riscv_has_ext(env, RVA) ?
9062 RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9063 __put_user(value, &pair->value);
9064 break;
9065 case RISCV_HWPROBE_KEY_IMA_EXT_0:
9066 value = riscv_has_ext(env, RVF) &&
9067 riscv_has_ext(env, RVD) ?
9068 RISCV_HWPROBE_IMA_FD : 0;
9069 value |= riscv_has_ext(env, RVC) ?
9070 RISCV_HWPROBE_IMA_C : pair->value;
9071 __put_user(value, &pair->value);
9072 break;
9073 case RISCV_HWPROBE_KEY_CPUPERF_0:
9074 __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9075 break;
9076 default:
9077 __put_user(-1, &pair->key);
9078 break;
9083 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9085 int ret, i, tmp;
9086 size_t host_mask_size, target_mask_size;
9087 unsigned long *host_mask;
9090 * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9091 * arg3 contains the cpu count.
9093 tmp = (8 * sizeof(abi_ulong));
9094 target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9095 host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9096 ~(sizeof(*host_mask) - 1);
9098 host_mask = alloca(host_mask_size);
9100 ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9101 arg4, target_mask_size);
9102 if (ret != 0) {
9103 return ret;
9106 for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9107 if (host_mask[i] != 0) {
9108 return 0;
9111 return -TARGET_EINVAL;
9114 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9115 abi_long arg2, abi_long arg3,
9116 abi_long arg4, abi_long arg5)
9118 int ret;
9119 struct riscv_hwprobe *host_pairs;
9121 /* flags must be 0 */
9122 if (arg5 != 0) {
9123 return -TARGET_EINVAL;
9126 /* check cpu_set */
9127 if (arg3 != 0) {
9128 ret = cpu_set_valid(arg3, arg4);
9129 if (ret != 0) {
9130 return ret;
9132 } else if (arg4 != 0) {
9133 return -TARGET_EINVAL;
9136 /* no pairs */
9137 if (arg2 == 0) {
9138 return 0;
9141 host_pairs = lock_user(VERIFY_WRITE, arg1,
9142 sizeof(*host_pairs) * (size_t)arg2, 0);
9143 if (host_pairs == NULL) {
9144 return -TARGET_EFAULT;
9146 risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9147 unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9148 return 0;
9150 #endif /* TARGET_NR_riscv_hwprobe */
9152 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9153 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9154 #endif
9156 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9157 #define __NR_sys_open_tree __NR_open_tree
9158 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9159 unsigned int, __flags)
9160 #endif
9162 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9163 #define __NR_sys_move_mount __NR_move_mount
9164 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9165 int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9166 #endif
9168 /* This is an internal helper for do_syscall so that it is easier
9169 * to have a single return point, so that actions, such as logging
9170 * of syscall results, can be performed.
9171 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9173 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9174 abi_long arg2, abi_long arg3, abi_long arg4,
9175 abi_long arg5, abi_long arg6, abi_long arg7,
9176 abi_long arg8)
9178 CPUState *cpu = env_cpu(cpu_env);
9179 abi_long ret;
9180 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9181 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9182 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9183 || defined(TARGET_NR_statx)
9184 struct stat st;
9185 #endif
9186 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9187 || defined(TARGET_NR_fstatfs)
9188 struct statfs stfs;
9189 #endif
9190 void *p;
9192 switch(num) {
9193 case TARGET_NR_exit:
9194 /* In old applications this may be used to implement _exit(2).
9195 However in threaded applications it is used for thread termination,
9196 and _exit_group is used for application termination.
9197 Do thread termination if we have more then one thread. */
9199 if (block_signals()) {
9200 return -QEMU_ERESTARTSYS;
9203 pthread_mutex_lock(&clone_lock);
9205 if (CPU_NEXT(first_cpu)) {
9206 TaskState *ts = cpu->opaque;
9208 if (ts->child_tidptr) {
9209 put_user_u32(0, ts->child_tidptr);
9210 do_sys_futex(g2h(cpu, ts->child_tidptr),
9211 FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9214 object_unparent(OBJECT(cpu));
9215 object_unref(OBJECT(cpu));
9217 * At this point the CPU should be unrealized and removed
9218 * from cpu lists. We can clean-up the rest of the thread
9219 * data without the lock held.
9222 pthread_mutex_unlock(&clone_lock);
9224 thread_cpu = NULL;
9225 g_free(ts);
9226 rcu_unregister_thread();
9227 pthread_exit(NULL);
9230 pthread_mutex_unlock(&clone_lock);
9231 preexit_cleanup(cpu_env, arg1);
9232 _exit(arg1);
9233 return 0; /* avoid warning */
9234 case TARGET_NR_read:
9235 if (arg2 == 0 && arg3 == 0) {
9236 return get_errno(safe_read(arg1, 0, 0));
9237 } else {
9238 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9239 return -TARGET_EFAULT;
9240 ret = get_errno(safe_read(arg1, p, arg3));
9241 if (ret >= 0 &&
9242 fd_trans_host_to_target_data(arg1)) {
9243 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9245 unlock_user(p, arg2, ret);
9247 return ret;
9248 case TARGET_NR_write:
9249 if (arg2 == 0 && arg3 == 0) {
9250 return get_errno(safe_write(arg1, 0, 0));
9252 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9253 return -TARGET_EFAULT;
9254 if (fd_trans_target_to_host_data(arg1)) {
9255 void *copy = g_malloc(arg3);
9256 memcpy(copy, p, arg3);
9257 ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9258 if (ret >= 0) {
9259 ret = get_errno(safe_write(arg1, copy, ret));
9261 g_free(copy);
9262 } else {
9263 ret = get_errno(safe_write(arg1, p, arg3));
9265 unlock_user(p, arg2, 0);
9266 return ret;
9268 #ifdef TARGET_NR_open
9269 case TARGET_NR_open:
9270 if (!(p = lock_user_string(arg1)))
9271 return -TARGET_EFAULT;
9272 ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9273 target_to_host_bitmask(arg2, fcntl_flags_tbl),
9274 arg3, true));
9275 fd_trans_unregister(ret);
9276 unlock_user(p, arg1, 0);
9277 return ret;
9278 #endif
9279 case TARGET_NR_openat:
9280 if (!(p = lock_user_string(arg2)))
9281 return -TARGET_EFAULT;
9282 ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9283 target_to_host_bitmask(arg3, fcntl_flags_tbl),
9284 arg4, true));
9285 fd_trans_unregister(ret);
9286 unlock_user(p, arg2, 0);
9287 return ret;
9288 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9289 case TARGET_NR_name_to_handle_at:
9290 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9291 return ret;
9292 #endif
9293 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9294 case TARGET_NR_open_by_handle_at:
9295 ret = do_open_by_handle_at(arg1, arg2, arg3);
9296 fd_trans_unregister(ret);
9297 return ret;
9298 #endif
9299 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9300 case TARGET_NR_pidfd_open:
9301 return get_errno(pidfd_open(arg1, arg2));
9302 #endif
9303 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9304 case TARGET_NR_pidfd_send_signal:
9306 siginfo_t uinfo, *puinfo;
9308 if (arg3) {
9309 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9310 if (!p) {
9311 return -TARGET_EFAULT;
9313 target_to_host_siginfo(&uinfo, p);
9314 unlock_user(p, arg3, 0);
9315 puinfo = &uinfo;
9316 } else {
9317 puinfo = NULL;
9319 ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9320 puinfo, arg4));
9322 return ret;
9323 #endif
9324 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9325 case TARGET_NR_pidfd_getfd:
9326 return get_errno(pidfd_getfd(arg1, arg2, arg3));
9327 #endif
9328 case TARGET_NR_close:
9329 fd_trans_unregister(arg1);
9330 return get_errno(close(arg1));
9331 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9332 case TARGET_NR_close_range:
9333 ret = get_errno(sys_close_range(arg1, arg2, arg3));
9334 if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9335 abi_long fd, maxfd;
9336 maxfd = MIN(arg2, target_fd_max);
9337 for (fd = arg1; fd < maxfd; fd++) {
9338 fd_trans_unregister(fd);
9341 return ret;
9342 #endif
9344 case TARGET_NR_brk:
9345 return do_brk(arg1);
9346 #ifdef TARGET_NR_fork
9347 case TARGET_NR_fork:
9348 return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9349 #endif
9350 #ifdef TARGET_NR_waitpid
9351 case TARGET_NR_waitpid:
9353 int status;
9354 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9355 if (!is_error(ret) && arg2 && ret
9356 && put_user_s32(host_to_target_waitstatus(status), arg2))
9357 return -TARGET_EFAULT;
9359 return ret;
9360 #endif
9361 #ifdef TARGET_NR_waitid
9362 case TARGET_NR_waitid:
9364 siginfo_t info;
9365 info.si_pid = 0;
9366 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9367 if (!is_error(ret) && arg3 && info.si_pid != 0) {
9368 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9369 return -TARGET_EFAULT;
9370 host_to_target_siginfo(p, &info);
9371 unlock_user(p, arg3, sizeof(target_siginfo_t));
9374 return ret;
9375 #endif
9376 #ifdef TARGET_NR_creat /* not on alpha */
9377 case TARGET_NR_creat:
9378 if (!(p = lock_user_string(arg1)))
9379 return -TARGET_EFAULT;
9380 ret = get_errno(creat(p, arg2));
9381 fd_trans_unregister(ret);
9382 unlock_user(p, arg1, 0);
9383 return ret;
9384 #endif
9385 #ifdef TARGET_NR_link
9386 case TARGET_NR_link:
9388 void * p2;
9389 p = lock_user_string(arg1);
9390 p2 = lock_user_string(arg2);
9391 if (!p || !p2)
9392 ret = -TARGET_EFAULT;
9393 else
9394 ret = get_errno(link(p, p2));
9395 unlock_user(p2, arg2, 0);
9396 unlock_user(p, arg1, 0);
9398 return ret;
9399 #endif
9400 #if defined(TARGET_NR_linkat)
9401 case TARGET_NR_linkat:
9403 void * p2 = NULL;
9404 if (!arg2 || !arg4)
9405 return -TARGET_EFAULT;
9406 p = lock_user_string(arg2);
9407 p2 = lock_user_string(arg4);
9408 if (!p || !p2)
9409 ret = -TARGET_EFAULT;
9410 else
9411 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9412 unlock_user(p, arg2, 0);
9413 unlock_user(p2, arg4, 0);
9415 return ret;
9416 #endif
9417 #ifdef TARGET_NR_unlink
9418 case TARGET_NR_unlink:
9419 if (!(p = lock_user_string(arg1)))
9420 return -TARGET_EFAULT;
9421 ret = get_errno(unlink(p));
9422 unlock_user(p, arg1, 0);
9423 return ret;
9424 #endif
9425 #if defined(TARGET_NR_unlinkat)
9426 case TARGET_NR_unlinkat:
9427 if (!(p = lock_user_string(arg2)))
9428 return -TARGET_EFAULT;
9429 ret = get_errno(unlinkat(arg1, p, arg3));
9430 unlock_user(p, arg2, 0);
9431 return ret;
9432 #endif
9433 case TARGET_NR_execveat:
9434 return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9435 case TARGET_NR_execve:
9436 return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9437 case TARGET_NR_chdir:
9438 if (!(p = lock_user_string(arg1)))
9439 return -TARGET_EFAULT;
9440 ret = get_errno(chdir(p));
9441 unlock_user(p, arg1, 0);
9442 return ret;
9443 #ifdef TARGET_NR_time
9444 case TARGET_NR_time:
9446 time_t host_time;
9447 ret = get_errno(time(&host_time));
9448 if (!is_error(ret)
9449 && arg1
9450 && put_user_sal(host_time, arg1))
9451 return -TARGET_EFAULT;
9453 return ret;
9454 #endif
9455 #ifdef TARGET_NR_mknod
9456 case TARGET_NR_mknod:
9457 if (!(p = lock_user_string(arg1)))
9458 return -TARGET_EFAULT;
9459 ret = get_errno(mknod(p, arg2, arg3));
9460 unlock_user(p, arg1, 0);
9461 return ret;
9462 #endif
9463 #if defined(TARGET_NR_mknodat)
9464 case TARGET_NR_mknodat:
9465 if (!(p = lock_user_string(arg2)))
9466 return -TARGET_EFAULT;
9467 ret = get_errno(mknodat(arg1, p, arg3, arg4));
9468 unlock_user(p, arg2, 0);
9469 return ret;
9470 #endif
9471 #ifdef TARGET_NR_chmod
9472 case TARGET_NR_chmod:
9473 if (!(p = lock_user_string(arg1)))
9474 return -TARGET_EFAULT;
9475 ret = get_errno(chmod(p, arg2));
9476 unlock_user(p, arg1, 0);
9477 return ret;
9478 #endif
9479 #ifdef TARGET_NR_lseek
9480 case TARGET_NR_lseek:
9481 return get_errno(lseek(arg1, arg2, arg3));
9482 #endif
9483 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9484 /* Alpha specific */
9485 case TARGET_NR_getxpid:
9486 cpu_env->ir[IR_A4] = getppid();
9487 return get_errno(getpid());
9488 #endif
9489 #ifdef TARGET_NR_getpid
9490 case TARGET_NR_getpid:
9491 return get_errno(getpid());
9492 #endif
9493 case TARGET_NR_mount:
9495 /* need to look at the data field */
9496 void *p2, *p3;
9498 if (arg1) {
9499 p = lock_user_string(arg1);
9500 if (!p) {
9501 return -TARGET_EFAULT;
9503 } else {
9504 p = NULL;
9507 p2 = lock_user_string(arg2);
9508 if (!p2) {
9509 if (arg1) {
9510 unlock_user(p, arg1, 0);
9512 return -TARGET_EFAULT;
9515 if (arg3) {
9516 p3 = lock_user_string(arg3);
9517 if (!p3) {
9518 if (arg1) {
9519 unlock_user(p, arg1, 0);
9521 unlock_user(p2, arg2, 0);
9522 return -TARGET_EFAULT;
9524 } else {
9525 p3 = NULL;
9528 /* FIXME - arg5 should be locked, but it isn't clear how to
9529 * do that since it's not guaranteed to be a NULL-terminated
9530 * string.
9532 if (!arg5) {
9533 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9534 } else {
9535 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9537 ret = get_errno(ret);
9539 if (arg1) {
9540 unlock_user(p, arg1, 0);
9542 unlock_user(p2, arg2, 0);
9543 if (arg3) {
9544 unlock_user(p3, arg3, 0);
9547 return ret;
9548 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9549 #if defined(TARGET_NR_umount)
9550 case TARGET_NR_umount:
9551 #endif
9552 #if defined(TARGET_NR_oldumount)
9553 case TARGET_NR_oldumount:
9554 #endif
9555 if (!(p = lock_user_string(arg1)))
9556 return -TARGET_EFAULT;
9557 ret = get_errno(umount(p));
9558 unlock_user(p, arg1, 0);
9559 return ret;
9560 #endif
9561 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9562 case TARGET_NR_move_mount:
9564 void *p2, *p4;
9566 if (!arg2 || !arg4) {
9567 return -TARGET_EFAULT;
9570 p2 = lock_user_string(arg2);
9571 if (!p2) {
9572 return -TARGET_EFAULT;
9575 p4 = lock_user_string(arg4);
9576 if (!p4) {
9577 unlock_user(p2, arg2, 0);
9578 return -TARGET_EFAULT;
9580 ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9582 unlock_user(p2, arg2, 0);
9583 unlock_user(p4, arg4, 0);
9585 return ret;
9587 #endif
9588 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9589 case TARGET_NR_open_tree:
9591 void *p2;
9592 int host_flags;
9594 if (!arg2) {
9595 return -TARGET_EFAULT;
9598 p2 = lock_user_string(arg2);
9599 if (!p2) {
9600 return -TARGET_EFAULT;
9603 host_flags = arg3 & ~TARGET_O_CLOEXEC;
9604 if (arg3 & TARGET_O_CLOEXEC) {
9605 host_flags |= O_CLOEXEC;
9608 ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9610 unlock_user(p2, arg2, 0);
9612 return ret;
9614 #endif
9615 #ifdef TARGET_NR_stime /* not on alpha */
9616 case TARGET_NR_stime:
9618 struct timespec ts;
9619 ts.tv_nsec = 0;
9620 if (get_user_sal(ts.tv_sec, arg1)) {
9621 return -TARGET_EFAULT;
9623 return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9625 #endif
9626 #ifdef TARGET_NR_alarm /* not on alpha */
9627 case TARGET_NR_alarm:
9628 return alarm(arg1);
9629 #endif
9630 #ifdef TARGET_NR_pause /* not on alpha */
9631 case TARGET_NR_pause:
9632 if (!block_signals()) {
9633 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9635 return -TARGET_EINTR;
9636 #endif
9637 #ifdef TARGET_NR_utime
9638 case TARGET_NR_utime:
9640 struct utimbuf tbuf, *host_tbuf;
9641 struct target_utimbuf *target_tbuf;
9642 if (arg2) {
9643 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9644 return -TARGET_EFAULT;
9645 tbuf.actime = tswapal(target_tbuf->actime);
9646 tbuf.modtime = tswapal(target_tbuf->modtime);
9647 unlock_user_struct(target_tbuf, arg2, 0);
9648 host_tbuf = &tbuf;
9649 } else {
9650 host_tbuf = NULL;
9652 if (!(p = lock_user_string(arg1)))
9653 return -TARGET_EFAULT;
9654 ret = get_errno(utime(p, host_tbuf));
9655 unlock_user(p, arg1, 0);
9657 return ret;
9658 #endif
9659 #ifdef TARGET_NR_utimes
9660 case TARGET_NR_utimes:
9662 struct timeval *tvp, tv[2];
9663 if (arg2) {
9664 if (copy_from_user_timeval(&tv[0], arg2)
9665 || copy_from_user_timeval(&tv[1],
9666 arg2 + sizeof(struct target_timeval)))
9667 return -TARGET_EFAULT;
9668 tvp = tv;
9669 } else {
9670 tvp = NULL;
9672 if (!(p = lock_user_string(arg1)))
9673 return -TARGET_EFAULT;
9674 ret = get_errno(utimes(p, tvp));
9675 unlock_user(p, arg1, 0);
9677 return ret;
9678 #endif
9679 #if defined(TARGET_NR_futimesat)
9680 case TARGET_NR_futimesat:
9682 struct timeval *tvp, tv[2];
9683 if (arg3) {
9684 if (copy_from_user_timeval(&tv[0], arg3)
9685 || copy_from_user_timeval(&tv[1],
9686 arg3 + sizeof(struct target_timeval)))
9687 return -TARGET_EFAULT;
9688 tvp = tv;
9689 } else {
9690 tvp = NULL;
9692 if (!(p = lock_user_string(arg2))) {
9693 return -TARGET_EFAULT;
9695 ret = get_errno(futimesat(arg1, path(p), tvp));
9696 unlock_user(p, arg2, 0);
9698 return ret;
9699 #endif
9700 #ifdef TARGET_NR_access
9701 case TARGET_NR_access:
9702 if (!(p = lock_user_string(arg1))) {
9703 return -TARGET_EFAULT;
9705 ret = get_errno(access(path(p), arg2));
9706 unlock_user(p, arg1, 0);
9707 return ret;
9708 #endif
9709 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9710 case TARGET_NR_faccessat:
9711 if (!(p = lock_user_string(arg2))) {
9712 return -TARGET_EFAULT;
9714 ret = get_errno(faccessat(arg1, p, arg3, 0));
9715 unlock_user(p, arg2, 0);
9716 return ret;
9717 #endif
9718 #if defined(TARGET_NR_faccessat2)
9719 case TARGET_NR_faccessat2:
9720 if (!(p = lock_user_string(arg2))) {
9721 return -TARGET_EFAULT;
9723 ret = get_errno(faccessat(arg1, p, arg3, arg4));
9724 unlock_user(p, arg2, 0);
9725 return ret;
9726 #endif
9727 #ifdef TARGET_NR_nice /* not on alpha */
9728 case TARGET_NR_nice:
9729 return get_errno(nice(arg1));
9730 #endif
9731 case TARGET_NR_sync:
9732 sync();
9733 return 0;
9734 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9735 case TARGET_NR_syncfs:
9736 return get_errno(syncfs(arg1));
9737 #endif
9738 case TARGET_NR_kill:
9739 return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9740 #ifdef TARGET_NR_rename
9741 case TARGET_NR_rename:
9743 void *p2;
9744 p = lock_user_string(arg1);
9745 p2 = lock_user_string(arg2);
9746 if (!p || !p2)
9747 ret = -TARGET_EFAULT;
9748 else
9749 ret = get_errno(rename(p, p2));
9750 unlock_user(p2, arg2, 0);
9751 unlock_user(p, arg1, 0);
9753 return ret;
9754 #endif
9755 #if defined(TARGET_NR_renameat)
9756 case TARGET_NR_renameat:
9758 void *p2;
9759 p = lock_user_string(arg2);
9760 p2 = lock_user_string(arg4);
9761 if (!p || !p2)
9762 ret = -TARGET_EFAULT;
9763 else
9764 ret = get_errno(renameat(arg1, p, arg3, p2));
9765 unlock_user(p2, arg4, 0);
9766 unlock_user(p, arg2, 0);
9768 return ret;
9769 #endif
9770 #if defined(TARGET_NR_renameat2)
9771 case TARGET_NR_renameat2:
9773 void *p2;
9774 p = lock_user_string(arg2);
9775 p2 = lock_user_string(arg4);
9776 if (!p || !p2) {
9777 ret = -TARGET_EFAULT;
9778 } else {
9779 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9781 unlock_user(p2, arg4, 0);
9782 unlock_user(p, arg2, 0);
9784 return ret;
9785 #endif
9786 #ifdef TARGET_NR_mkdir
9787 case TARGET_NR_mkdir:
9788 if (!(p = lock_user_string(arg1)))
9789 return -TARGET_EFAULT;
9790 ret = get_errno(mkdir(p, arg2));
9791 unlock_user(p, arg1, 0);
9792 return ret;
9793 #endif
9794 #if defined(TARGET_NR_mkdirat)
9795 case TARGET_NR_mkdirat:
9796 if (!(p = lock_user_string(arg2)))
9797 return -TARGET_EFAULT;
9798 ret = get_errno(mkdirat(arg1, p, arg3));
9799 unlock_user(p, arg2, 0);
9800 return ret;
9801 #endif
9802 #ifdef TARGET_NR_rmdir
9803 case TARGET_NR_rmdir:
9804 if (!(p = lock_user_string(arg1)))
9805 return -TARGET_EFAULT;
9806 ret = get_errno(rmdir(p));
9807 unlock_user(p, arg1, 0);
9808 return ret;
9809 #endif
9810 case TARGET_NR_dup:
9811 ret = get_errno(dup(arg1));
9812 if (ret >= 0) {
9813 fd_trans_dup(arg1, ret);
9815 return ret;
9816 #ifdef TARGET_NR_pipe
9817 case TARGET_NR_pipe:
9818 return do_pipe(cpu_env, arg1, 0, 0);
9819 #endif
9820 #ifdef TARGET_NR_pipe2
9821 case TARGET_NR_pipe2:
9822 return do_pipe(cpu_env, arg1,
9823 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9824 #endif
9825 case TARGET_NR_times:
9827 struct target_tms *tmsp;
9828 struct tms tms;
9829 ret = get_errno(times(&tms));
9830 if (arg1) {
9831 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9832 if (!tmsp)
9833 return -TARGET_EFAULT;
9834 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9835 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9836 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9837 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9839 if (!is_error(ret))
9840 ret = host_to_target_clock_t(ret);
9842 return ret;
9843 case TARGET_NR_acct:
9844 if (arg1 == 0) {
9845 ret = get_errno(acct(NULL));
9846 } else {
9847 if (!(p = lock_user_string(arg1))) {
9848 return -TARGET_EFAULT;
9850 ret = get_errno(acct(path(p)));
9851 unlock_user(p, arg1, 0);
9853 return ret;
9854 #ifdef TARGET_NR_umount2
9855 case TARGET_NR_umount2:
9856 if (!(p = lock_user_string(arg1)))
9857 return -TARGET_EFAULT;
9858 ret = get_errno(umount2(p, arg2));
9859 unlock_user(p, arg1, 0);
9860 return ret;
9861 #endif
9862 case TARGET_NR_ioctl:
9863 return do_ioctl(arg1, arg2, arg3);
9864 #ifdef TARGET_NR_fcntl
9865 case TARGET_NR_fcntl:
9866 return do_fcntl(arg1, arg2, arg3);
9867 #endif
9868 case TARGET_NR_setpgid:
9869 return get_errno(setpgid(arg1, arg2));
9870 case TARGET_NR_umask:
9871 return get_errno(umask(arg1));
9872 case TARGET_NR_chroot:
9873 if (!(p = lock_user_string(arg1)))
9874 return -TARGET_EFAULT;
9875 ret = get_errno(chroot(p));
9876 unlock_user(p, arg1, 0);
9877 return ret;
9878 #ifdef TARGET_NR_dup2
9879 case TARGET_NR_dup2:
9880 ret = get_errno(dup2(arg1, arg2));
9881 if (ret >= 0) {
9882 fd_trans_dup(arg1, arg2);
9884 return ret;
9885 #endif
9886 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9887 case TARGET_NR_dup3:
9889 int host_flags;
9891 if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9892 return -EINVAL;
9894 host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9895 ret = get_errno(dup3(arg1, arg2, host_flags));
9896 if (ret >= 0) {
9897 fd_trans_dup(arg1, arg2);
9899 return ret;
9901 #endif
9902 #ifdef TARGET_NR_getppid /* not on alpha */
9903 case TARGET_NR_getppid:
9904 return get_errno(getppid());
9905 #endif
9906 #ifdef TARGET_NR_getpgrp
9907 case TARGET_NR_getpgrp:
9908 return get_errno(getpgrp());
9909 #endif
9910 case TARGET_NR_setsid:
9911 return get_errno(setsid());
9912 #ifdef TARGET_NR_sigaction
9913 case TARGET_NR_sigaction:
9915 #if defined(TARGET_MIPS)
9916 struct target_sigaction act, oact, *pact, *old_act;
9918 if (arg2) {
9919 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9920 return -TARGET_EFAULT;
9921 act._sa_handler = old_act->_sa_handler;
9922 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9923 act.sa_flags = old_act->sa_flags;
9924 unlock_user_struct(old_act, arg2, 0);
9925 pact = &act;
9926 } else {
9927 pact = NULL;
9930 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9932 if (!is_error(ret) && arg3) {
9933 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9934 return -TARGET_EFAULT;
9935 old_act->_sa_handler = oact._sa_handler;
9936 old_act->sa_flags = oact.sa_flags;
9937 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9938 old_act->sa_mask.sig[1] = 0;
9939 old_act->sa_mask.sig[2] = 0;
9940 old_act->sa_mask.sig[3] = 0;
9941 unlock_user_struct(old_act, arg3, 1);
9943 #else
9944 struct target_old_sigaction *old_act;
9945 struct target_sigaction act, oact, *pact;
9946 if (arg2) {
9947 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9948 return -TARGET_EFAULT;
9949 act._sa_handler = old_act->_sa_handler;
9950 target_siginitset(&act.sa_mask, old_act->sa_mask);
9951 act.sa_flags = old_act->sa_flags;
9952 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9953 act.sa_restorer = old_act->sa_restorer;
9954 #endif
9955 unlock_user_struct(old_act, arg2, 0);
9956 pact = &act;
9957 } else {
9958 pact = NULL;
9960 ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9961 if (!is_error(ret) && arg3) {
9962 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9963 return -TARGET_EFAULT;
9964 old_act->_sa_handler = oact._sa_handler;
9965 old_act->sa_mask = oact.sa_mask.sig[0];
9966 old_act->sa_flags = oact.sa_flags;
9967 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9968 old_act->sa_restorer = oact.sa_restorer;
9969 #endif
9970 unlock_user_struct(old_act, arg3, 1);
9972 #endif
9974 return ret;
9975 #endif
9976 case TARGET_NR_rt_sigaction:
9979 * For Alpha and SPARC this is a 5 argument syscall, with
9980 * a 'restorer' parameter which must be copied into the
9981 * sa_restorer field of the sigaction struct.
9982 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9983 * and arg5 is the sigsetsize.
9985 #if defined(TARGET_ALPHA)
9986 target_ulong sigsetsize = arg4;
9987 target_ulong restorer = arg5;
9988 #elif defined(TARGET_SPARC)
9989 target_ulong restorer = arg4;
9990 target_ulong sigsetsize = arg5;
9991 #else
9992 target_ulong sigsetsize = arg4;
9993 target_ulong restorer = 0;
9994 #endif
9995 struct target_sigaction *act = NULL;
9996 struct target_sigaction *oact = NULL;
9998 if (sigsetsize != sizeof(target_sigset_t)) {
9999 return -TARGET_EINVAL;
10001 if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10002 return -TARGET_EFAULT;
10004 if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10005 ret = -TARGET_EFAULT;
10006 } else {
10007 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10008 if (oact) {
10009 unlock_user_struct(oact, arg3, 1);
10012 if (act) {
10013 unlock_user_struct(act, arg2, 0);
10016 return ret;
10017 #ifdef TARGET_NR_sgetmask /* not on alpha */
10018 case TARGET_NR_sgetmask:
10020 sigset_t cur_set;
10021 abi_ulong target_set;
10022 ret = do_sigprocmask(0, NULL, &cur_set);
10023 if (!ret) {
10024 host_to_target_old_sigset(&target_set, &cur_set);
10025 ret = target_set;
10028 return ret;
10029 #endif
10030 #ifdef TARGET_NR_ssetmask /* not on alpha */
10031 case TARGET_NR_ssetmask:
10033 sigset_t set, oset;
10034 abi_ulong target_set = arg1;
10035 target_to_host_old_sigset(&set, &target_set);
10036 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10037 if (!ret) {
10038 host_to_target_old_sigset(&target_set, &oset);
10039 ret = target_set;
10042 return ret;
10043 #endif
10044 #ifdef TARGET_NR_sigprocmask
10045 case TARGET_NR_sigprocmask:
10047 #if defined(TARGET_ALPHA)
10048 sigset_t set, oldset;
10049 abi_ulong mask;
10050 int how;
10052 switch (arg1) {
10053 case TARGET_SIG_BLOCK:
10054 how = SIG_BLOCK;
10055 break;
10056 case TARGET_SIG_UNBLOCK:
10057 how = SIG_UNBLOCK;
10058 break;
10059 case TARGET_SIG_SETMASK:
10060 how = SIG_SETMASK;
10061 break;
10062 default:
10063 return -TARGET_EINVAL;
10065 mask = arg2;
10066 target_to_host_old_sigset(&set, &mask);
10068 ret = do_sigprocmask(how, &set, &oldset);
10069 if (!is_error(ret)) {
10070 host_to_target_old_sigset(&mask, &oldset);
10071 ret = mask;
10072 cpu_env->ir[IR_V0] = 0; /* force no error */
10074 #else
10075 sigset_t set, oldset, *set_ptr;
10076 int how;
10078 if (arg2) {
10079 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10080 if (!p) {
10081 return -TARGET_EFAULT;
10083 target_to_host_old_sigset(&set, p);
10084 unlock_user(p, arg2, 0);
10085 set_ptr = &set;
10086 switch (arg1) {
10087 case TARGET_SIG_BLOCK:
10088 how = SIG_BLOCK;
10089 break;
10090 case TARGET_SIG_UNBLOCK:
10091 how = SIG_UNBLOCK;
10092 break;
10093 case TARGET_SIG_SETMASK:
10094 how = SIG_SETMASK;
10095 break;
10096 default:
10097 return -TARGET_EINVAL;
10099 } else {
10100 how = 0;
10101 set_ptr = NULL;
10103 ret = do_sigprocmask(how, set_ptr, &oldset);
10104 if (!is_error(ret) && arg3) {
10105 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10106 return -TARGET_EFAULT;
10107 host_to_target_old_sigset(p, &oldset);
10108 unlock_user(p, arg3, sizeof(target_sigset_t));
10110 #endif
10112 return ret;
10113 #endif
10114 case TARGET_NR_rt_sigprocmask:
10116 int how = arg1;
10117 sigset_t set, oldset, *set_ptr;
10119 if (arg4 != sizeof(target_sigset_t)) {
10120 return -TARGET_EINVAL;
10123 if (arg2) {
10124 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10125 if (!p) {
10126 return -TARGET_EFAULT;
10128 target_to_host_sigset(&set, p);
10129 unlock_user(p, arg2, 0);
10130 set_ptr = &set;
10131 switch(how) {
10132 case TARGET_SIG_BLOCK:
10133 how = SIG_BLOCK;
10134 break;
10135 case TARGET_SIG_UNBLOCK:
10136 how = SIG_UNBLOCK;
10137 break;
10138 case TARGET_SIG_SETMASK:
10139 how = SIG_SETMASK;
10140 break;
10141 default:
10142 return -TARGET_EINVAL;
10144 } else {
10145 how = 0;
10146 set_ptr = NULL;
10148 ret = do_sigprocmask(how, set_ptr, &oldset);
10149 if (!is_error(ret) && arg3) {
10150 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10151 return -TARGET_EFAULT;
10152 host_to_target_sigset(p, &oldset);
10153 unlock_user(p, arg3, sizeof(target_sigset_t));
10156 return ret;
10157 #ifdef TARGET_NR_sigpending
10158 case TARGET_NR_sigpending:
10160 sigset_t set;
10161 ret = get_errno(sigpending(&set));
10162 if (!is_error(ret)) {
10163 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10164 return -TARGET_EFAULT;
10165 host_to_target_old_sigset(p, &set);
10166 unlock_user(p, arg1, sizeof(target_sigset_t));
10169 return ret;
10170 #endif
10171 case TARGET_NR_rt_sigpending:
10173 sigset_t set;
10175 /* Yes, this check is >, not != like most. We follow the kernel's
10176 * logic and it does it like this because it implements
10177 * NR_sigpending through the same code path, and in that case
10178 * the old_sigset_t is smaller in size.
10180 if (arg2 > sizeof(target_sigset_t)) {
10181 return -TARGET_EINVAL;
10184 ret = get_errno(sigpending(&set));
10185 if (!is_error(ret)) {
10186 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10187 return -TARGET_EFAULT;
10188 host_to_target_sigset(p, &set);
10189 unlock_user(p, arg1, sizeof(target_sigset_t));
10192 return ret;
10193 #ifdef TARGET_NR_sigsuspend
10194 case TARGET_NR_sigsuspend:
10196 sigset_t *set;
10198 #if defined(TARGET_ALPHA)
10199 TaskState *ts = cpu->opaque;
10200 /* target_to_host_old_sigset will bswap back */
10201 abi_ulong mask = tswapal(arg1);
10202 set = &ts->sigsuspend_mask;
10203 target_to_host_old_sigset(set, &mask);
10204 #else
10205 ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10206 if (ret != 0) {
10207 return ret;
10209 #endif
10210 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10211 finish_sigsuspend_mask(ret);
10213 return ret;
10214 #endif
10215 case TARGET_NR_rt_sigsuspend:
10217 sigset_t *set;
10219 ret = process_sigsuspend_mask(&set, arg1, arg2);
10220 if (ret != 0) {
10221 return ret;
10223 ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10224 finish_sigsuspend_mask(ret);
10226 return ret;
10227 #ifdef TARGET_NR_rt_sigtimedwait
10228 case TARGET_NR_rt_sigtimedwait:
10230 sigset_t set;
10231 struct timespec uts, *puts;
10232 siginfo_t uinfo;
10234 if (arg4 != sizeof(target_sigset_t)) {
10235 return -TARGET_EINVAL;
10238 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10239 return -TARGET_EFAULT;
10240 target_to_host_sigset(&set, p);
10241 unlock_user(p, arg1, 0);
10242 if (arg3) {
10243 puts = &uts;
10244 if (target_to_host_timespec(puts, arg3)) {
10245 return -TARGET_EFAULT;
10247 } else {
10248 puts = NULL;
10250 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10251 SIGSET_T_SIZE));
10252 if (!is_error(ret)) {
10253 if (arg2) {
10254 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10256 if (!p) {
10257 return -TARGET_EFAULT;
10259 host_to_target_siginfo(p, &uinfo);
10260 unlock_user(p, arg2, sizeof(target_siginfo_t));
10262 ret = host_to_target_signal(ret);
10265 return ret;
10266 #endif
10267 #ifdef TARGET_NR_rt_sigtimedwait_time64
10268 case TARGET_NR_rt_sigtimedwait_time64:
10270 sigset_t set;
10271 struct timespec uts, *puts;
10272 siginfo_t uinfo;
10274 if (arg4 != sizeof(target_sigset_t)) {
10275 return -TARGET_EINVAL;
10278 p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10279 if (!p) {
10280 return -TARGET_EFAULT;
10282 target_to_host_sigset(&set, p);
10283 unlock_user(p, arg1, 0);
10284 if (arg3) {
10285 puts = &uts;
10286 if (target_to_host_timespec64(puts, arg3)) {
10287 return -TARGET_EFAULT;
10289 } else {
10290 puts = NULL;
10292 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10293 SIGSET_T_SIZE));
10294 if (!is_error(ret)) {
10295 if (arg2) {
10296 p = lock_user(VERIFY_WRITE, arg2,
10297 sizeof(target_siginfo_t), 0);
10298 if (!p) {
10299 return -TARGET_EFAULT;
10301 host_to_target_siginfo(p, &uinfo);
10302 unlock_user(p, arg2, sizeof(target_siginfo_t));
10304 ret = host_to_target_signal(ret);
10307 return ret;
10308 #endif
10309 case TARGET_NR_rt_sigqueueinfo:
10311 siginfo_t uinfo;
10313 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10314 if (!p) {
10315 return -TARGET_EFAULT;
10317 target_to_host_siginfo(&uinfo, p);
10318 unlock_user(p, arg3, 0);
10319 ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10321 return ret;
10322 case TARGET_NR_rt_tgsigqueueinfo:
10324 siginfo_t uinfo;
10326 p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10327 if (!p) {
10328 return -TARGET_EFAULT;
10330 target_to_host_siginfo(&uinfo, p);
10331 unlock_user(p, arg4, 0);
10332 ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10334 return ret;
10335 #ifdef TARGET_NR_sigreturn
10336 case TARGET_NR_sigreturn:
10337 if (block_signals()) {
10338 return -QEMU_ERESTARTSYS;
10340 return do_sigreturn(cpu_env);
10341 #endif
10342 case TARGET_NR_rt_sigreturn:
10343 if (block_signals()) {
10344 return -QEMU_ERESTARTSYS;
10346 return do_rt_sigreturn(cpu_env);
10347 case TARGET_NR_sethostname:
10348 if (!(p = lock_user_string(arg1)))
10349 return -TARGET_EFAULT;
10350 ret = get_errno(sethostname(p, arg2));
10351 unlock_user(p, arg1, 0);
10352 return ret;
10353 #ifdef TARGET_NR_setrlimit
10354 case TARGET_NR_setrlimit:
10356 int resource = target_to_host_resource(arg1);
10357 struct target_rlimit *target_rlim;
10358 struct rlimit rlim;
10359 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10360 return -TARGET_EFAULT;
10361 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10362 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10363 unlock_user_struct(target_rlim, arg2, 0);
10365 * If we just passed through resource limit settings for memory then
10366 * they would also apply to QEMU's own allocations, and QEMU will
10367 * crash or hang or die if its allocations fail. Ideally we would
10368 * track the guest allocations in QEMU and apply the limits ourselves.
10369 * For now, just tell the guest the call succeeded but don't actually
10370 * limit anything.
10372 if (resource != RLIMIT_AS &&
10373 resource != RLIMIT_DATA &&
10374 resource != RLIMIT_STACK) {
10375 return get_errno(setrlimit(resource, &rlim));
10376 } else {
10377 return 0;
10380 #endif
10381 #ifdef TARGET_NR_getrlimit
10382 case TARGET_NR_getrlimit:
10384 int resource = target_to_host_resource(arg1);
10385 struct target_rlimit *target_rlim;
10386 struct rlimit rlim;
10388 ret = get_errno(getrlimit(resource, &rlim));
10389 if (!is_error(ret)) {
10390 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10391 return -TARGET_EFAULT;
10392 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10393 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10394 unlock_user_struct(target_rlim, arg2, 1);
10397 return ret;
10398 #endif
10399 case TARGET_NR_getrusage:
10401 struct rusage rusage;
10402 ret = get_errno(getrusage(arg1, &rusage));
10403 if (!is_error(ret)) {
10404 ret = host_to_target_rusage(arg2, &rusage);
10407 return ret;
10408 #if defined(TARGET_NR_gettimeofday)
10409 case TARGET_NR_gettimeofday:
10411 struct timeval tv;
10412 struct timezone tz;
10414 ret = get_errno(gettimeofday(&tv, &tz));
10415 if (!is_error(ret)) {
10416 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10417 return -TARGET_EFAULT;
10419 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10420 return -TARGET_EFAULT;
10424 return ret;
10425 #endif
10426 #if defined(TARGET_NR_settimeofday)
10427 case TARGET_NR_settimeofday:
10429 struct timeval tv, *ptv = NULL;
10430 struct timezone tz, *ptz = NULL;
10432 if (arg1) {
10433 if (copy_from_user_timeval(&tv, arg1)) {
10434 return -TARGET_EFAULT;
10436 ptv = &tv;
10439 if (arg2) {
10440 if (copy_from_user_timezone(&tz, arg2)) {
10441 return -TARGET_EFAULT;
10443 ptz = &tz;
10446 return get_errno(settimeofday(ptv, ptz));
10448 #endif
10449 #if defined(TARGET_NR_select)
10450 case TARGET_NR_select:
10451 #if defined(TARGET_WANT_NI_OLD_SELECT)
10452 /* some architectures used to have old_select here
10453 * but now ENOSYS it.
10455 ret = -TARGET_ENOSYS;
10456 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10457 ret = do_old_select(arg1);
10458 #else
10459 ret = do_select(arg1, arg2, arg3, arg4, arg5);
10460 #endif
10461 return ret;
10462 #endif
10463 #ifdef TARGET_NR_pselect6
10464 case TARGET_NR_pselect6:
10465 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10466 #endif
10467 #ifdef TARGET_NR_pselect6_time64
10468 case TARGET_NR_pselect6_time64:
10469 return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10470 #endif
10471 #ifdef TARGET_NR_symlink
10472 case TARGET_NR_symlink:
10474 void *p2;
10475 p = lock_user_string(arg1);
10476 p2 = lock_user_string(arg2);
10477 if (!p || !p2)
10478 ret = -TARGET_EFAULT;
10479 else
10480 ret = get_errno(symlink(p, p2));
10481 unlock_user(p2, arg2, 0);
10482 unlock_user(p, arg1, 0);
10484 return ret;
10485 #endif
10486 #if defined(TARGET_NR_symlinkat)
10487 case TARGET_NR_symlinkat:
10489 void *p2;
10490 p = lock_user_string(arg1);
10491 p2 = lock_user_string(arg3);
10492 if (!p || !p2)
10493 ret = -TARGET_EFAULT;
10494 else
10495 ret = get_errno(symlinkat(p, arg2, p2));
10496 unlock_user(p2, arg3, 0);
10497 unlock_user(p, arg1, 0);
10499 return ret;
10500 #endif
10501 #ifdef TARGET_NR_readlink
10502 case TARGET_NR_readlink:
10504 void *p2;
10505 p = lock_user_string(arg1);
10506 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10507 ret = get_errno(do_guest_readlink(p, p2, arg3));
10508 unlock_user(p2, arg2, ret);
10509 unlock_user(p, arg1, 0);
10511 return ret;
10512 #endif
10513 #if defined(TARGET_NR_readlinkat)
10514 case TARGET_NR_readlinkat:
10516 void *p2;
10517 p = lock_user_string(arg2);
10518 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10519 if (!p || !p2) {
10520 ret = -TARGET_EFAULT;
10521 } else if (!arg4) {
10522 /* Short circuit this for the magic exe check. */
10523 ret = -TARGET_EINVAL;
10524 } else if (is_proc_myself((const char *)p, "exe")) {
10526 * Don't worry about sign mismatch as earlier mapping
10527 * logic would have thrown a bad address error.
10529 ret = MIN(strlen(exec_path), arg4);
10530 /* We cannot NUL terminate the string. */
10531 memcpy(p2, exec_path, ret);
10532 } else {
10533 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10535 unlock_user(p2, arg3, ret);
10536 unlock_user(p, arg2, 0);
10538 return ret;
10539 #endif
10540 #ifdef TARGET_NR_swapon
10541 case TARGET_NR_swapon:
10542 if (!(p = lock_user_string(arg1)))
10543 return -TARGET_EFAULT;
10544 ret = get_errno(swapon(p, arg2));
10545 unlock_user(p, arg1, 0);
10546 return ret;
10547 #endif
10548 case TARGET_NR_reboot:
10549 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10550 /* arg4 must be ignored in all other cases */
10551 p = lock_user_string(arg4);
10552 if (!p) {
10553 return -TARGET_EFAULT;
10555 ret = get_errno(reboot(arg1, arg2, arg3, p));
10556 unlock_user(p, arg4, 0);
10557 } else {
10558 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10560 return ret;
10561 #ifdef TARGET_NR_mmap
10562 case TARGET_NR_mmap:
10563 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10564 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10565 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10566 || defined(TARGET_S390X)
10568 abi_ulong *v;
10569 abi_ulong v1, v2, v3, v4, v5, v6;
10570 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10571 return -TARGET_EFAULT;
10572 v1 = tswapal(v[0]);
10573 v2 = tswapal(v[1]);
10574 v3 = tswapal(v[2]);
10575 v4 = tswapal(v[3]);
10576 v5 = tswapal(v[4]);
10577 v6 = tswapal(v[5]);
10578 unlock_user(v, arg1, 0);
10579 ret = get_errno(target_mmap(v1, v2, v3,
10580 target_to_host_bitmask(v4, mmap_flags_tbl),
10581 v5, v6));
10583 #else
10584 /* mmap pointers are always untagged */
10585 ret = get_errno(target_mmap(arg1, arg2, arg3,
10586 target_to_host_bitmask(arg4, mmap_flags_tbl),
10587 arg5,
10588 arg6));
10589 #endif
10590 return ret;
10591 #endif
10592 #ifdef TARGET_NR_mmap2
10593 case TARGET_NR_mmap2:
10594 #ifndef MMAP_SHIFT
10595 #define MMAP_SHIFT 12
10596 #endif
10597 ret = target_mmap(arg1, arg2, arg3,
10598 target_to_host_bitmask(arg4, mmap_flags_tbl),
10599 arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10600 return get_errno(ret);
10601 #endif
10602 case TARGET_NR_munmap:
10603 arg1 = cpu_untagged_addr(cpu, arg1);
10604 return get_errno(target_munmap(arg1, arg2));
10605 case TARGET_NR_mprotect:
10606 arg1 = cpu_untagged_addr(cpu, arg1);
10608 TaskState *ts = cpu->opaque;
10609 /* Special hack to detect libc making the stack executable. */
10610 if ((arg3 & PROT_GROWSDOWN)
10611 && arg1 >= ts->info->stack_limit
10612 && arg1 <= ts->info->start_stack) {
10613 arg3 &= ~PROT_GROWSDOWN;
10614 arg2 = arg2 + arg1 - ts->info->stack_limit;
10615 arg1 = ts->info->stack_limit;
10618 return get_errno(target_mprotect(arg1, arg2, arg3));
10619 #ifdef TARGET_NR_mremap
10620 case TARGET_NR_mremap:
10621 arg1 = cpu_untagged_addr(cpu, arg1);
10622 /* mremap new_addr (arg5) is always untagged */
10623 return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10624 #endif
10625 /* ??? msync/mlock/munlock are broken for softmmu. */
10626 #ifdef TARGET_NR_msync
10627 case TARGET_NR_msync:
10628 return get_errno(msync(g2h(cpu, arg1), arg2,
10629 target_to_host_msync_arg(arg3)));
10630 #endif
10631 #ifdef TARGET_NR_mlock
10632 case TARGET_NR_mlock:
10633 return get_errno(mlock(g2h(cpu, arg1), arg2));
10634 #endif
10635 #ifdef TARGET_NR_munlock
10636 case TARGET_NR_munlock:
10637 return get_errno(munlock(g2h(cpu, arg1), arg2));
10638 #endif
10639 #ifdef TARGET_NR_mlockall
10640 case TARGET_NR_mlockall:
10641 return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10642 #endif
10643 #ifdef TARGET_NR_munlockall
10644 case TARGET_NR_munlockall:
10645 return get_errno(munlockall());
10646 #endif
10647 #ifdef TARGET_NR_truncate
10648 case TARGET_NR_truncate:
10649 if (!(p = lock_user_string(arg1)))
10650 return -TARGET_EFAULT;
10651 ret = get_errno(truncate(p, arg2));
10652 unlock_user(p, arg1, 0);
10653 return ret;
10654 #endif
10655 #ifdef TARGET_NR_ftruncate
10656 case TARGET_NR_ftruncate:
10657 return get_errno(ftruncate(arg1, arg2));
10658 #endif
10659 case TARGET_NR_fchmod:
10660 return get_errno(fchmod(arg1, arg2));
10661 #if defined(TARGET_NR_fchmodat)
10662 case TARGET_NR_fchmodat:
10663 if (!(p = lock_user_string(arg2)))
10664 return -TARGET_EFAULT;
10665 ret = get_errno(fchmodat(arg1, p, arg3, 0));
10666 unlock_user(p, arg2, 0);
10667 return ret;
10668 #endif
10669 case TARGET_NR_getpriority:
10670 /* Note that negative values are valid for getpriority, so we must
10671 differentiate based on errno settings. */
10672 errno = 0;
10673 ret = getpriority(arg1, arg2);
10674 if (ret == -1 && errno != 0) {
10675 return -host_to_target_errno(errno);
10677 #ifdef TARGET_ALPHA
10678 /* Return value is the unbiased priority. Signal no error. */
10679 cpu_env->ir[IR_V0] = 0;
10680 #else
10681 /* Return value is a biased priority to avoid negative numbers. */
10682 ret = 20 - ret;
10683 #endif
10684 return ret;
10685 case TARGET_NR_setpriority:
10686 return get_errno(setpriority(arg1, arg2, arg3));
10687 #ifdef TARGET_NR_statfs
10688 case TARGET_NR_statfs:
10689 if (!(p = lock_user_string(arg1))) {
10690 return -TARGET_EFAULT;
10692 ret = get_errno(statfs(path(p), &stfs));
10693 unlock_user(p, arg1, 0);
10694 convert_statfs:
10695 if (!is_error(ret)) {
10696 struct target_statfs *target_stfs;
10698 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10699 return -TARGET_EFAULT;
10700 __put_user(stfs.f_type, &target_stfs->f_type);
10701 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10702 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10703 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10704 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10705 __put_user(stfs.f_files, &target_stfs->f_files);
10706 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10707 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10708 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10709 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10710 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10711 #ifdef _STATFS_F_FLAGS
10712 __put_user(stfs.f_flags, &target_stfs->f_flags);
10713 #else
10714 __put_user(0, &target_stfs->f_flags);
10715 #endif
10716 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10717 unlock_user_struct(target_stfs, arg2, 1);
10719 return ret;
10720 #endif
10721 #ifdef TARGET_NR_fstatfs
10722 case TARGET_NR_fstatfs:
10723 ret = get_errno(fstatfs(arg1, &stfs));
10724 goto convert_statfs;
10725 #endif
10726 #ifdef TARGET_NR_statfs64
10727 case TARGET_NR_statfs64:
10728 if (!(p = lock_user_string(arg1))) {
10729 return -TARGET_EFAULT;
10731 ret = get_errno(statfs(path(p), &stfs));
10732 unlock_user(p, arg1, 0);
10733 convert_statfs64:
10734 if (!is_error(ret)) {
10735 struct target_statfs64 *target_stfs;
10737 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10738 return -TARGET_EFAULT;
10739 __put_user(stfs.f_type, &target_stfs->f_type);
10740 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10741 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10742 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10743 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10744 __put_user(stfs.f_files, &target_stfs->f_files);
10745 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10746 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10747 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10748 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10749 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10750 #ifdef _STATFS_F_FLAGS
10751 __put_user(stfs.f_flags, &target_stfs->f_flags);
10752 #else
10753 __put_user(0, &target_stfs->f_flags);
10754 #endif
10755 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10756 unlock_user_struct(target_stfs, arg3, 1);
10758 return ret;
10759 case TARGET_NR_fstatfs64:
10760 ret = get_errno(fstatfs(arg1, &stfs));
10761 goto convert_statfs64;
10762 #endif
10763 #ifdef TARGET_NR_socketcall
10764 case TARGET_NR_socketcall:
10765 return do_socketcall(arg1, arg2);
10766 #endif
10767 #ifdef TARGET_NR_accept
10768 case TARGET_NR_accept:
10769 return do_accept4(arg1, arg2, arg3, 0);
10770 #endif
10771 #ifdef TARGET_NR_accept4
10772 case TARGET_NR_accept4:
10773 return do_accept4(arg1, arg2, arg3, arg4);
10774 #endif
10775 #ifdef TARGET_NR_bind
10776 case TARGET_NR_bind:
10777 return do_bind(arg1, arg2, arg3);
10778 #endif
10779 #ifdef TARGET_NR_connect
10780 case TARGET_NR_connect:
10781 return do_connect(arg1, arg2, arg3);
10782 #endif
10783 #ifdef TARGET_NR_getpeername
10784 case TARGET_NR_getpeername:
10785 return do_getpeername(arg1, arg2, arg3);
10786 #endif
10787 #ifdef TARGET_NR_getsockname
10788 case TARGET_NR_getsockname:
10789 return do_getsockname(arg1, arg2, arg3);
10790 #endif
10791 #ifdef TARGET_NR_getsockopt
10792 case TARGET_NR_getsockopt:
10793 return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10794 #endif
10795 #ifdef TARGET_NR_listen
10796 case TARGET_NR_listen:
10797 return get_errno(listen(arg1, arg2));
10798 #endif
10799 #ifdef TARGET_NR_recv
10800 case TARGET_NR_recv:
10801 return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10802 #endif
10803 #ifdef TARGET_NR_recvfrom
10804 case TARGET_NR_recvfrom:
10805 return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10806 #endif
10807 #ifdef TARGET_NR_recvmsg
10808 case TARGET_NR_recvmsg:
10809 return do_sendrecvmsg(arg1, arg2, arg3, 0);
10810 #endif
10811 #ifdef TARGET_NR_send
10812 case TARGET_NR_send:
10813 return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10814 #endif
10815 #ifdef TARGET_NR_sendmsg
10816 case TARGET_NR_sendmsg:
10817 return do_sendrecvmsg(arg1, arg2, arg3, 1);
10818 #endif
10819 #ifdef TARGET_NR_sendmmsg
10820 case TARGET_NR_sendmmsg:
10821 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10822 #endif
10823 #ifdef TARGET_NR_recvmmsg
10824 case TARGET_NR_recvmmsg:
10825 return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10826 #endif
10827 #ifdef TARGET_NR_sendto
10828 case TARGET_NR_sendto:
10829 return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10830 #endif
10831 #ifdef TARGET_NR_shutdown
10832 case TARGET_NR_shutdown:
10833 return get_errno(shutdown(arg1, arg2));
10834 #endif
10835 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10836 case TARGET_NR_getrandom:
10837 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10838 if (!p) {
10839 return -TARGET_EFAULT;
10841 ret = get_errno(getrandom(p, arg2, arg3));
10842 unlock_user(p, arg1, ret);
10843 return ret;
10844 #endif
10845 #ifdef TARGET_NR_socket
10846 case TARGET_NR_socket:
10847 return do_socket(arg1, arg2, arg3);
10848 #endif
10849 #ifdef TARGET_NR_socketpair
10850 case TARGET_NR_socketpair:
10851 return do_socketpair(arg1, arg2, arg3, arg4);
10852 #endif
10853 #ifdef TARGET_NR_setsockopt
10854 case TARGET_NR_setsockopt:
10855 return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10856 #endif
10857 #if defined(TARGET_NR_syslog)
10858 case TARGET_NR_syslog:
10860 int len = arg2;
10862 switch (arg1) {
10863 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
10864 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
10865 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
10866 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
10867 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
10868 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10869 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
10870 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
10871 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10872 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
10873 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
10874 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
10876 if (len < 0) {
10877 return -TARGET_EINVAL;
10879 if (len == 0) {
10880 return 0;
10882 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10883 if (!p) {
10884 return -TARGET_EFAULT;
10886 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10887 unlock_user(p, arg2, arg3);
10889 return ret;
10890 default:
10891 return -TARGET_EINVAL;
10894 break;
10895 #endif
10896 case TARGET_NR_setitimer:
10898 struct itimerval value, ovalue, *pvalue;
10900 if (arg2) {
10901 pvalue = &value;
10902 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10903 || copy_from_user_timeval(&pvalue->it_value,
10904 arg2 + sizeof(struct target_timeval)))
10905 return -TARGET_EFAULT;
10906 } else {
10907 pvalue = NULL;
10909 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10910 if (!is_error(ret) && arg3) {
10911 if (copy_to_user_timeval(arg3,
10912 &ovalue.it_interval)
10913 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10914 &ovalue.it_value))
10915 return -TARGET_EFAULT;
10918 return ret;
10919 case TARGET_NR_getitimer:
10921 struct itimerval value;
10923 ret = get_errno(getitimer(arg1, &value));
10924 if (!is_error(ret) && arg2) {
10925 if (copy_to_user_timeval(arg2,
10926 &value.it_interval)
10927 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10928 &value.it_value))
10929 return -TARGET_EFAULT;
10932 return ret;
10933 #ifdef TARGET_NR_stat
10934 case TARGET_NR_stat:
10935 if (!(p = lock_user_string(arg1))) {
10936 return -TARGET_EFAULT;
10938 ret = get_errno(stat(path(p), &st));
10939 unlock_user(p, arg1, 0);
10940 goto do_stat;
10941 #endif
10942 #ifdef TARGET_NR_lstat
10943 case TARGET_NR_lstat:
10944 if (!(p = lock_user_string(arg1))) {
10945 return -TARGET_EFAULT;
10947 ret = get_errno(lstat(path(p), &st));
10948 unlock_user(p, arg1, 0);
10949 goto do_stat;
10950 #endif
10951 #ifdef TARGET_NR_fstat
10952 case TARGET_NR_fstat:
10954 ret = get_errno(fstat(arg1, &st));
10955 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10956 do_stat:
10957 #endif
10958 if (!is_error(ret)) {
10959 struct target_stat *target_st;
10961 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10962 return -TARGET_EFAULT;
10963 memset(target_st, 0, sizeof(*target_st));
10964 __put_user(st.st_dev, &target_st->st_dev);
10965 __put_user(st.st_ino, &target_st->st_ino);
10966 __put_user(st.st_mode, &target_st->st_mode);
10967 __put_user(st.st_uid, &target_st->st_uid);
10968 __put_user(st.st_gid, &target_st->st_gid);
10969 __put_user(st.st_nlink, &target_st->st_nlink);
10970 __put_user(st.st_rdev, &target_st->st_rdev);
10971 __put_user(st.st_size, &target_st->st_size);
10972 __put_user(st.st_blksize, &target_st->st_blksize);
10973 __put_user(st.st_blocks, &target_st->st_blocks);
10974 __put_user(st.st_atime, &target_st->target_st_atime);
10975 __put_user(st.st_mtime, &target_st->target_st_mtime);
10976 __put_user(st.st_ctime, &target_st->target_st_ctime);
10977 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10978 __put_user(st.st_atim.tv_nsec,
10979 &target_st->target_st_atime_nsec);
10980 __put_user(st.st_mtim.tv_nsec,
10981 &target_st->target_st_mtime_nsec);
10982 __put_user(st.st_ctim.tv_nsec,
10983 &target_st->target_st_ctime_nsec);
10984 #endif
10985 unlock_user_struct(target_st, arg2, 1);
10988 return ret;
10989 #endif
10990 case TARGET_NR_vhangup:
10991 return get_errno(vhangup());
10992 #ifdef TARGET_NR_syscall
10993 case TARGET_NR_syscall:
10994 return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10995 arg6, arg7, arg8, 0);
10996 #endif
10997 #if defined(TARGET_NR_wait4)
10998 case TARGET_NR_wait4:
11000 int status;
11001 abi_long status_ptr = arg2;
11002 struct rusage rusage, *rusage_ptr;
11003 abi_ulong target_rusage = arg4;
11004 abi_long rusage_err;
11005 if (target_rusage)
11006 rusage_ptr = &rusage;
11007 else
11008 rusage_ptr = NULL;
11009 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11010 if (!is_error(ret)) {
11011 if (status_ptr && ret) {
11012 status = host_to_target_waitstatus(status);
11013 if (put_user_s32(status, status_ptr))
11014 return -TARGET_EFAULT;
11016 if (target_rusage) {
11017 rusage_err = host_to_target_rusage(target_rusage, &rusage);
11018 if (rusage_err) {
11019 ret = rusage_err;
11024 return ret;
11025 #endif
11026 #ifdef TARGET_NR_swapoff
11027 case TARGET_NR_swapoff:
11028 if (!(p = lock_user_string(arg1)))
11029 return -TARGET_EFAULT;
11030 ret = get_errno(swapoff(p));
11031 unlock_user(p, arg1, 0);
11032 return ret;
11033 #endif
11034 case TARGET_NR_sysinfo:
11036 struct target_sysinfo *target_value;
11037 struct sysinfo value;
11038 ret = get_errno(sysinfo(&value));
11039 if (!is_error(ret) && arg1)
11041 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11042 return -TARGET_EFAULT;
11043 __put_user(value.uptime, &target_value->uptime);
11044 __put_user(value.loads[0], &target_value->loads[0]);
11045 __put_user(value.loads[1], &target_value->loads[1]);
11046 __put_user(value.loads[2], &target_value->loads[2]);
11047 __put_user(value.totalram, &target_value->totalram);
11048 __put_user(value.freeram, &target_value->freeram);
11049 __put_user(value.sharedram, &target_value->sharedram);
11050 __put_user(value.bufferram, &target_value->bufferram);
11051 __put_user(value.totalswap, &target_value->totalswap);
11052 __put_user(value.freeswap, &target_value->freeswap);
11053 __put_user(value.procs, &target_value->procs);
11054 __put_user(value.totalhigh, &target_value->totalhigh);
11055 __put_user(value.freehigh, &target_value->freehigh);
11056 __put_user(value.mem_unit, &target_value->mem_unit);
11057 unlock_user_struct(target_value, arg1, 1);
11060 return ret;
11061 #ifdef TARGET_NR_ipc
11062 case TARGET_NR_ipc:
11063 return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11064 #endif
11065 #ifdef TARGET_NR_semget
11066 case TARGET_NR_semget:
11067 return get_errno(semget(arg1, arg2, arg3));
11068 #endif
11069 #ifdef TARGET_NR_semop
11070 case TARGET_NR_semop:
11071 return do_semtimedop(arg1, arg2, arg3, 0, false);
11072 #endif
11073 #ifdef TARGET_NR_semtimedop
11074 case TARGET_NR_semtimedop:
11075 return do_semtimedop(arg1, arg2, arg3, arg4, false);
11076 #endif
11077 #ifdef TARGET_NR_semtimedop_time64
11078 case TARGET_NR_semtimedop_time64:
11079 return do_semtimedop(arg1, arg2, arg3, arg4, true);
11080 #endif
11081 #ifdef TARGET_NR_semctl
11082 case TARGET_NR_semctl:
11083 return do_semctl(arg1, arg2, arg3, arg4);
11084 #endif
11085 #ifdef TARGET_NR_msgctl
11086 case TARGET_NR_msgctl:
11087 return do_msgctl(arg1, arg2, arg3);
11088 #endif
11089 #ifdef TARGET_NR_msgget
11090 case TARGET_NR_msgget:
11091 return get_errno(msgget(arg1, arg2));
11092 #endif
11093 #ifdef TARGET_NR_msgrcv
11094 case TARGET_NR_msgrcv:
11095 return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11096 #endif
11097 #ifdef TARGET_NR_msgsnd
11098 case TARGET_NR_msgsnd:
11099 return do_msgsnd(arg1, arg2, arg3, arg4);
11100 #endif
11101 #ifdef TARGET_NR_shmget
11102 case TARGET_NR_shmget:
11103 return get_errno(shmget(arg1, arg2, arg3));
11104 #endif
11105 #ifdef TARGET_NR_shmctl
11106 case TARGET_NR_shmctl:
11107 return do_shmctl(arg1, arg2, arg3);
11108 #endif
11109 #ifdef TARGET_NR_shmat
11110 case TARGET_NR_shmat:
11111 return do_shmat(cpu_env, arg1, arg2, arg3);
11112 #endif
11113 #ifdef TARGET_NR_shmdt
11114 case TARGET_NR_shmdt:
11115 return do_shmdt(arg1);
11116 #endif
11117 case TARGET_NR_fsync:
11118 return get_errno(fsync(arg1));
11119 case TARGET_NR_clone:
11120 /* Linux manages to have three different orderings for its
11121 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11122 * match the kernel's CONFIG_CLONE_* settings.
11123 * Microblaze is further special in that it uses a sixth
11124 * implicit argument to clone for the TLS pointer.
11126 #if defined(TARGET_MICROBLAZE)
11127 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11128 #elif defined(TARGET_CLONE_BACKWARDS)
11129 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11130 #elif defined(TARGET_CLONE_BACKWARDS2)
11131 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11132 #else
11133 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11134 #endif
11135 return ret;
11136 #ifdef __NR_exit_group
11137 /* new thread calls */
11138 case TARGET_NR_exit_group:
11139 preexit_cleanup(cpu_env, arg1);
11140 return get_errno(exit_group(arg1));
11141 #endif
11142 case TARGET_NR_setdomainname:
11143 if (!(p = lock_user_string(arg1)))
11144 return -TARGET_EFAULT;
11145 ret = get_errno(setdomainname(p, arg2));
11146 unlock_user(p, arg1, 0);
11147 return ret;
11148 case TARGET_NR_uname:
11149 /* no need to transcode because we use the linux syscall */
11151 struct new_utsname * buf;
11153 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11154 return -TARGET_EFAULT;
11155 ret = get_errno(sys_uname(buf));
11156 if (!is_error(ret)) {
11157 /* Overwrite the native machine name with whatever is being
11158 emulated. */
11159 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11160 sizeof(buf->machine));
11161 /* Allow the user to override the reported release. */
11162 if (qemu_uname_release && *qemu_uname_release) {
11163 g_strlcpy(buf->release, qemu_uname_release,
11164 sizeof(buf->release));
11167 unlock_user_struct(buf, arg1, 1);
11169 return ret;
11170 #ifdef TARGET_I386
11171 case TARGET_NR_modify_ldt:
11172 return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11173 #if !defined(TARGET_X86_64)
11174 case TARGET_NR_vm86:
11175 return do_vm86(cpu_env, arg1, arg2);
11176 #endif
11177 #endif
11178 #if defined(TARGET_NR_adjtimex)
11179 case TARGET_NR_adjtimex:
11181 struct timex host_buf;
11183 if (target_to_host_timex(&host_buf, arg1) != 0) {
11184 return -TARGET_EFAULT;
11186 ret = get_errno(adjtimex(&host_buf));
11187 if (!is_error(ret)) {
11188 if (host_to_target_timex(arg1, &host_buf) != 0) {
11189 return -TARGET_EFAULT;
11193 return ret;
11194 #endif
11195 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11196 case TARGET_NR_clock_adjtime:
11198 struct timex htx;
11200 if (target_to_host_timex(&htx, arg2) != 0) {
11201 return -TARGET_EFAULT;
11203 ret = get_errno(clock_adjtime(arg1, &htx));
11204 if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11205 return -TARGET_EFAULT;
11208 return ret;
11209 #endif
11210 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11211 case TARGET_NR_clock_adjtime64:
11213 struct timex htx;
11215 if (target_to_host_timex64(&htx, arg2) != 0) {
11216 return -TARGET_EFAULT;
11218 ret = get_errno(clock_adjtime(arg1, &htx));
11219 if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11220 return -TARGET_EFAULT;
11223 return ret;
11224 #endif
11225 case TARGET_NR_getpgid:
11226 return get_errno(getpgid(arg1));
11227 case TARGET_NR_fchdir:
11228 return get_errno(fchdir(arg1));
11229 case TARGET_NR_personality:
11230 return get_errno(personality(arg1));
11231 #ifdef TARGET_NR__llseek /* Not on alpha */
11232 case TARGET_NR__llseek:
11234 int64_t res;
11235 #if !defined(__NR_llseek)
11236 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11237 if (res == -1) {
11238 ret = get_errno(res);
11239 } else {
11240 ret = 0;
11242 #else
11243 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11244 #endif
11245 if ((ret == 0) && put_user_s64(res, arg4)) {
11246 return -TARGET_EFAULT;
11249 return ret;
11250 #endif
11251 #ifdef TARGET_NR_getdents
11252 case TARGET_NR_getdents:
11253 return do_getdents(arg1, arg2, arg3);
11254 #endif /* TARGET_NR_getdents */
11255 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11256 case TARGET_NR_getdents64:
11257 return do_getdents64(arg1, arg2, arg3);
11258 #endif /* TARGET_NR_getdents64 */
11259 #if defined(TARGET_NR__newselect)
11260 case TARGET_NR__newselect:
11261 return do_select(arg1, arg2, arg3, arg4, arg5);
11262 #endif
11263 #ifdef TARGET_NR_poll
11264 case TARGET_NR_poll:
11265 return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11266 #endif
11267 #ifdef TARGET_NR_ppoll
11268 case TARGET_NR_ppoll:
11269 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11270 #endif
11271 #ifdef TARGET_NR_ppoll_time64
11272 case TARGET_NR_ppoll_time64:
11273 return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11274 #endif
11275 case TARGET_NR_flock:
11276 /* NOTE: the flock constant seems to be the same for every
11277 Linux platform */
11278 return get_errno(safe_flock(arg1, arg2));
11279 case TARGET_NR_readv:
11281 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11282 if (vec != NULL) {
11283 ret = get_errno(safe_readv(arg1, vec, arg3));
11284 unlock_iovec(vec, arg2, arg3, 1);
11285 } else {
11286 ret = -host_to_target_errno(errno);
11289 return ret;
11290 case TARGET_NR_writev:
11292 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11293 if (vec != NULL) {
11294 ret = get_errno(safe_writev(arg1, vec, arg3));
11295 unlock_iovec(vec, arg2, arg3, 0);
11296 } else {
11297 ret = -host_to_target_errno(errno);
11300 return ret;
11301 #if defined(TARGET_NR_preadv)
11302 case TARGET_NR_preadv:
11304 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11305 if (vec != NULL) {
11306 unsigned long low, high;
11308 target_to_host_low_high(arg4, arg5, &low, &high);
11309 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11310 unlock_iovec(vec, arg2, arg3, 1);
11311 } else {
11312 ret = -host_to_target_errno(errno);
11315 return ret;
11316 #endif
11317 #if defined(TARGET_NR_pwritev)
11318 case TARGET_NR_pwritev:
11320 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11321 if (vec != NULL) {
11322 unsigned long low, high;
11324 target_to_host_low_high(arg4, arg5, &low, &high);
11325 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11326 unlock_iovec(vec, arg2, arg3, 0);
11327 } else {
11328 ret = -host_to_target_errno(errno);
11331 return ret;
11332 #endif
11333 case TARGET_NR_getsid:
11334 return get_errno(getsid(arg1));
11335 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11336 case TARGET_NR_fdatasync:
11337 return get_errno(fdatasync(arg1));
11338 #endif
11339 case TARGET_NR_sched_getaffinity:
11341 unsigned int mask_size;
11342 unsigned long *mask;
11345 * sched_getaffinity needs multiples of ulong, so need to take
11346 * care of mismatches between target ulong and host ulong sizes.
11348 if (arg2 & (sizeof(abi_ulong) - 1)) {
11349 return -TARGET_EINVAL;
11351 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11353 mask = alloca(mask_size);
11354 memset(mask, 0, mask_size);
11355 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11357 if (!is_error(ret)) {
11358 if (ret > arg2) {
11359 /* More data returned than the caller's buffer will fit.
11360 * This only happens if sizeof(abi_long) < sizeof(long)
11361 * and the caller passed us a buffer holding an odd number
11362 * of abi_longs. If the host kernel is actually using the
11363 * extra 4 bytes then fail EINVAL; otherwise we can just
11364 * ignore them and only copy the interesting part.
11366 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11367 if (numcpus > arg2 * 8) {
11368 return -TARGET_EINVAL;
11370 ret = arg2;
11373 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11374 return -TARGET_EFAULT;
11378 return ret;
11379 case TARGET_NR_sched_setaffinity:
11381 unsigned int mask_size;
11382 unsigned long *mask;
11385 * sched_setaffinity needs multiples of ulong, so need to take
11386 * care of mismatches between target ulong and host ulong sizes.
11388 if (arg2 & (sizeof(abi_ulong) - 1)) {
11389 return -TARGET_EINVAL;
11391 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11392 mask = alloca(mask_size);
11394 ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11395 if (ret) {
11396 return ret;
11399 return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11401 case TARGET_NR_getcpu:
11403 unsigned cpu, node;
11404 ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11405 arg2 ? &node : NULL,
11406 NULL));
11407 if (is_error(ret)) {
11408 return ret;
11410 if (arg1 && put_user_u32(cpu, arg1)) {
11411 return -TARGET_EFAULT;
11413 if (arg2 && put_user_u32(node, arg2)) {
11414 return -TARGET_EFAULT;
11417 return ret;
11418 case TARGET_NR_sched_setparam:
11420 struct target_sched_param *target_schp;
11421 struct sched_param schp;
11423 if (arg2 == 0) {
11424 return -TARGET_EINVAL;
11426 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11427 return -TARGET_EFAULT;
11429 schp.sched_priority = tswap32(target_schp->sched_priority);
11430 unlock_user_struct(target_schp, arg2, 0);
11431 return get_errno(sys_sched_setparam(arg1, &schp));
11433 case TARGET_NR_sched_getparam:
11435 struct target_sched_param *target_schp;
11436 struct sched_param schp;
11438 if (arg2 == 0) {
11439 return -TARGET_EINVAL;
11441 ret = get_errno(sys_sched_getparam(arg1, &schp));
11442 if (!is_error(ret)) {
11443 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11444 return -TARGET_EFAULT;
11446 target_schp->sched_priority = tswap32(schp.sched_priority);
11447 unlock_user_struct(target_schp, arg2, 1);
11450 return ret;
11451 case TARGET_NR_sched_setscheduler:
11453 struct target_sched_param *target_schp;
11454 struct sched_param schp;
11455 if (arg3 == 0) {
11456 return -TARGET_EINVAL;
11458 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11459 return -TARGET_EFAULT;
11461 schp.sched_priority = tswap32(target_schp->sched_priority);
11462 unlock_user_struct(target_schp, arg3, 0);
11463 return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11465 case TARGET_NR_sched_getscheduler:
11466 return get_errno(sys_sched_getscheduler(arg1));
11467 case TARGET_NR_sched_getattr:
11469 struct target_sched_attr *target_scha;
11470 struct sched_attr scha;
11471 if (arg2 == 0) {
11472 return -TARGET_EINVAL;
11474 if (arg3 > sizeof(scha)) {
11475 arg3 = sizeof(scha);
11477 ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11478 if (!is_error(ret)) {
11479 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11480 if (!target_scha) {
11481 return -TARGET_EFAULT;
11483 target_scha->size = tswap32(scha.size);
11484 target_scha->sched_policy = tswap32(scha.sched_policy);
11485 target_scha->sched_flags = tswap64(scha.sched_flags);
11486 target_scha->sched_nice = tswap32(scha.sched_nice);
11487 target_scha->sched_priority = tswap32(scha.sched_priority);
11488 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11489 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11490 target_scha->sched_period = tswap64(scha.sched_period);
11491 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11492 target_scha->sched_util_min = tswap32(scha.sched_util_min);
11493 target_scha->sched_util_max = tswap32(scha.sched_util_max);
11495 unlock_user(target_scha, arg2, arg3);
11497 return ret;
11499 case TARGET_NR_sched_setattr:
11501 struct target_sched_attr *target_scha;
11502 struct sched_attr scha;
11503 uint32_t size;
11504 int zeroed;
11505 if (arg2 == 0) {
11506 return -TARGET_EINVAL;
11508 if (get_user_u32(size, arg2)) {
11509 return -TARGET_EFAULT;
11511 if (!size) {
11512 size = offsetof(struct target_sched_attr, sched_util_min);
11514 if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11515 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11516 return -TARGET_EFAULT;
11518 return -TARGET_E2BIG;
11521 zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11522 if (zeroed < 0) {
11523 return zeroed;
11524 } else if (zeroed == 0) {
11525 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11526 return -TARGET_EFAULT;
11528 return -TARGET_E2BIG;
11530 if (size > sizeof(struct target_sched_attr)) {
11531 size = sizeof(struct target_sched_attr);
11534 target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11535 if (!target_scha) {
11536 return -TARGET_EFAULT;
11538 scha.size = size;
11539 scha.sched_policy = tswap32(target_scha->sched_policy);
11540 scha.sched_flags = tswap64(target_scha->sched_flags);
11541 scha.sched_nice = tswap32(target_scha->sched_nice);
11542 scha.sched_priority = tswap32(target_scha->sched_priority);
11543 scha.sched_runtime = tswap64(target_scha->sched_runtime);
11544 scha.sched_deadline = tswap64(target_scha->sched_deadline);
11545 scha.sched_period = tswap64(target_scha->sched_period);
11546 if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11547 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11548 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11550 unlock_user(target_scha, arg2, 0);
11551 return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11553 case TARGET_NR_sched_yield:
11554 return get_errno(sched_yield());
11555 case TARGET_NR_sched_get_priority_max:
11556 return get_errno(sched_get_priority_max(arg1));
11557 case TARGET_NR_sched_get_priority_min:
11558 return get_errno(sched_get_priority_min(arg1));
11559 #ifdef TARGET_NR_sched_rr_get_interval
11560 case TARGET_NR_sched_rr_get_interval:
11562 struct timespec ts;
11563 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11564 if (!is_error(ret)) {
11565 ret = host_to_target_timespec(arg2, &ts);
11568 return ret;
11569 #endif
11570 #ifdef TARGET_NR_sched_rr_get_interval_time64
11571 case TARGET_NR_sched_rr_get_interval_time64:
11573 struct timespec ts;
11574 ret = get_errno(sched_rr_get_interval(arg1, &ts));
11575 if (!is_error(ret)) {
11576 ret = host_to_target_timespec64(arg2, &ts);
11579 return ret;
11580 #endif
11581 #if defined(TARGET_NR_nanosleep)
11582 case TARGET_NR_nanosleep:
11584 struct timespec req, rem;
11585 target_to_host_timespec(&req, arg1);
11586 ret = get_errno(safe_nanosleep(&req, &rem));
11587 if (is_error(ret) && arg2) {
11588 host_to_target_timespec(arg2, &rem);
11591 return ret;
11592 #endif
11593 case TARGET_NR_prctl:
11594 return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11595 break;
11596 #ifdef TARGET_NR_arch_prctl
11597 case TARGET_NR_arch_prctl:
11598 return do_arch_prctl(cpu_env, arg1, arg2);
11599 #endif
11600 #ifdef TARGET_NR_pread64
11601 case TARGET_NR_pread64:
11602 if (regpairs_aligned(cpu_env, num)) {
11603 arg4 = arg5;
11604 arg5 = arg6;
11606 if (arg2 == 0 && arg3 == 0) {
11607 /* Special-case NULL buffer and zero length, which should succeed */
11608 p = 0;
11609 } else {
11610 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11611 if (!p) {
11612 return -TARGET_EFAULT;
11615 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11616 unlock_user(p, arg2, ret);
11617 return ret;
11618 case TARGET_NR_pwrite64:
11619 if (regpairs_aligned(cpu_env, num)) {
11620 arg4 = arg5;
11621 arg5 = arg6;
11623 if (arg2 == 0 && arg3 == 0) {
11624 /* Special-case NULL buffer and zero length, which should succeed */
11625 p = 0;
11626 } else {
11627 p = lock_user(VERIFY_READ, arg2, arg3, 1);
11628 if (!p) {
11629 return -TARGET_EFAULT;
11632 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11633 unlock_user(p, arg2, 0);
11634 return ret;
11635 #endif
11636 case TARGET_NR_getcwd:
11637 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11638 return -TARGET_EFAULT;
11639 ret = get_errno(sys_getcwd1(p, arg2));
11640 unlock_user(p, arg1, ret);
11641 return ret;
11642 case TARGET_NR_capget:
11643 case TARGET_NR_capset:
11645 struct target_user_cap_header *target_header;
11646 struct target_user_cap_data *target_data = NULL;
11647 struct __user_cap_header_struct header;
11648 struct __user_cap_data_struct data[2];
11649 struct __user_cap_data_struct *dataptr = NULL;
11650 int i, target_datalen;
11651 int data_items = 1;
11653 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11654 return -TARGET_EFAULT;
11656 header.version = tswap32(target_header->version);
11657 header.pid = tswap32(target_header->pid);
11659 if (header.version != _LINUX_CAPABILITY_VERSION) {
11660 /* Version 2 and up takes pointer to two user_data structs */
11661 data_items = 2;
11664 target_datalen = sizeof(*target_data) * data_items;
11666 if (arg2) {
11667 if (num == TARGET_NR_capget) {
11668 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11669 } else {
11670 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11672 if (!target_data) {
11673 unlock_user_struct(target_header, arg1, 0);
11674 return -TARGET_EFAULT;
11677 if (num == TARGET_NR_capset) {
11678 for (i = 0; i < data_items; i++) {
11679 data[i].effective = tswap32(target_data[i].effective);
11680 data[i].permitted = tswap32(target_data[i].permitted);
11681 data[i].inheritable = tswap32(target_data[i].inheritable);
11685 dataptr = data;
11688 if (num == TARGET_NR_capget) {
11689 ret = get_errno(capget(&header, dataptr));
11690 } else {
11691 ret = get_errno(capset(&header, dataptr));
11694 /* The kernel always updates version for both capget and capset */
11695 target_header->version = tswap32(header.version);
11696 unlock_user_struct(target_header, arg1, 1);
11698 if (arg2) {
11699 if (num == TARGET_NR_capget) {
11700 for (i = 0; i < data_items; i++) {
11701 target_data[i].effective = tswap32(data[i].effective);
11702 target_data[i].permitted = tswap32(data[i].permitted);
11703 target_data[i].inheritable = tswap32(data[i].inheritable);
11705 unlock_user(target_data, arg2, target_datalen);
11706 } else {
11707 unlock_user(target_data, arg2, 0);
11710 return ret;
11712 case TARGET_NR_sigaltstack:
11713 return do_sigaltstack(arg1, arg2, cpu_env);
11715 #ifdef CONFIG_SENDFILE
11716 #ifdef TARGET_NR_sendfile
11717 case TARGET_NR_sendfile:
11719 off_t *offp = NULL;
11720 off_t off;
11721 if (arg3) {
11722 ret = get_user_sal(off, arg3);
11723 if (is_error(ret)) {
11724 return ret;
11726 offp = &off;
11728 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11729 if (!is_error(ret) && arg3) {
11730 abi_long ret2 = put_user_sal(off, arg3);
11731 if (is_error(ret2)) {
11732 ret = ret2;
11735 return ret;
11737 #endif
11738 #ifdef TARGET_NR_sendfile64
11739 case TARGET_NR_sendfile64:
11741 off_t *offp = NULL;
11742 off_t off;
11743 if (arg3) {
11744 ret = get_user_s64(off, arg3);
11745 if (is_error(ret)) {
11746 return ret;
11748 offp = &off;
11750 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11751 if (!is_error(ret) && arg3) {
11752 abi_long ret2 = put_user_s64(off, arg3);
11753 if (is_error(ret2)) {
11754 ret = ret2;
11757 return ret;
11759 #endif
11760 #endif
11761 #ifdef TARGET_NR_vfork
11762 case TARGET_NR_vfork:
11763 return get_errno(do_fork(cpu_env,
11764 CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11765 0, 0, 0, 0));
11766 #endif
11767 #ifdef TARGET_NR_ugetrlimit
11768 case TARGET_NR_ugetrlimit:
11770 struct rlimit rlim;
11771 int resource = target_to_host_resource(arg1);
11772 ret = get_errno(getrlimit(resource, &rlim));
11773 if (!is_error(ret)) {
11774 struct target_rlimit *target_rlim;
11775 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11776 return -TARGET_EFAULT;
11777 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11778 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11779 unlock_user_struct(target_rlim, arg2, 1);
11781 return ret;
11783 #endif
11784 #ifdef TARGET_NR_truncate64
11785 case TARGET_NR_truncate64:
11786 if (!(p = lock_user_string(arg1)))
11787 return -TARGET_EFAULT;
11788 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11789 unlock_user(p, arg1, 0);
11790 return ret;
11791 #endif
11792 #ifdef TARGET_NR_ftruncate64
11793 case TARGET_NR_ftruncate64:
11794 return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11795 #endif
11796 #ifdef TARGET_NR_stat64
11797 case TARGET_NR_stat64:
11798 if (!(p = lock_user_string(arg1))) {
11799 return -TARGET_EFAULT;
11801 ret = get_errno(stat(path(p), &st));
11802 unlock_user(p, arg1, 0);
11803 if (!is_error(ret))
11804 ret = host_to_target_stat64(cpu_env, arg2, &st);
11805 return ret;
11806 #endif
11807 #ifdef TARGET_NR_lstat64
11808 case TARGET_NR_lstat64:
11809 if (!(p = lock_user_string(arg1))) {
11810 return -TARGET_EFAULT;
11812 ret = get_errno(lstat(path(p), &st));
11813 unlock_user(p, arg1, 0);
11814 if (!is_error(ret))
11815 ret = host_to_target_stat64(cpu_env, arg2, &st);
11816 return ret;
11817 #endif
11818 #ifdef TARGET_NR_fstat64
11819 case TARGET_NR_fstat64:
11820 ret = get_errno(fstat(arg1, &st));
11821 if (!is_error(ret))
11822 ret = host_to_target_stat64(cpu_env, arg2, &st);
11823 return ret;
11824 #endif
11825 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11826 #ifdef TARGET_NR_fstatat64
11827 case TARGET_NR_fstatat64:
11828 #endif
11829 #ifdef TARGET_NR_newfstatat
11830 case TARGET_NR_newfstatat:
11831 #endif
11832 if (!(p = lock_user_string(arg2))) {
11833 return -TARGET_EFAULT;
11835 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11836 unlock_user(p, arg2, 0);
11837 if (!is_error(ret))
11838 ret = host_to_target_stat64(cpu_env, arg3, &st);
11839 return ret;
11840 #endif
11841 #if defined(TARGET_NR_statx)
11842 case TARGET_NR_statx:
11844 struct target_statx *target_stx;
11845 int dirfd = arg1;
11846 int flags = arg3;
11848 p = lock_user_string(arg2);
11849 if (p == NULL) {
11850 return -TARGET_EFAULT;
11852 #if defined(__NR_statx)
11855 * It is assumed that struct statx is architecture independent.
11857 struct target_statx host_stx;
11858 int mask = arg4;
11860 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11861 if (!is_error(ret)) {
11862 if (host_to_target_statx(&host_stx, arg5) != 0) {
11863 unlock_user(p, arg2, 0);
11864 return -TARGET_EFAULT;
11868 if (ret != -TARGET_ENOSYS) {
11869 unlock_user(p, arg2, 0);
11870 return ret;
11873 #endif
11874 ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11875 unlock_user(p, arg2, 0);
11877 if (!is_error(ret)) {
11878 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11879 return -TARGET_EFAULT;
11881 memset(target_stx, 0, sizeof(*target_stx));
11882 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11883 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11884 __put_user(st.st_ino, &target_stx->stx_ino);
11885 __put_user(st.st_mode, &target_stx->stx_mode);
11886 __put_user(st.st_uid, &target_stx->stx_uid);
11887 __put_user(st.st_gid, &target_stx->stx_gid);
11888 __put_user(st.st_nlink, &target_stx->stx_nlink);
11889 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11890 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11891 __put_user(st.st_size, &target_stx->stx_size);
11892 __put_user(st.st_blksize, &target_stx->stx_blksize);
11893 __put_user(st.st_blocks, &target_stx->stx_blocks);
11894 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11895 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11896 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11897 unlock_user_struct(target_stx, arg5, 1);
11900 return ret;
11901 #endif
11902 #ifdef TARGET_NR_lchown
11903 case TARGET_NR_lchown:
11904 if (!(p = lock_user_string(arg1)))
11905 return -TARGET_EFAULT;
11906 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11907 unlock_user(p, arg1, 0);
11908 return ret;
11909 #endif
11910 #ifdef TARGET_NR_getuid
11911 case TARGET_NR_getuid:
11912 return get_errno(high2lowuid(getuid()));
11913 #endif
11914 #ifdef TARGET_NR_getgid
11915 case TARGET_NR_getgid:
11916 return get_errno(high2lowgid(getgid()));
11917 #endif
11918 #ifdef TARGET_NR_geteuid
11919 case TARGET_NR_geteuid:
11920 return get_errno(high2lowuid(geteuid()));
11921 #endif
11922 #ifdef TARGET_NR_getegid
11923 case TARGET_NR_getegid:
11924 return get_errno(high2lowgid(getegid()));
11925 #endif
11926 case TARGET_NR_setreuid:
11927 return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11928 case TARGET_NR_setregid:
11929 return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11930 case TARGET_NR_getgroups:
11931 { /* the same code as for TARGET_NR_getgroups32 */
11932 int gidsetsize = arg1;
11933 target_id *target_grouplist;
11934 g_autofree gid_t *grouplist = NULL;
11935 int i;
11937 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11938 return -TARGET_EINVAL;
11940 if (gidsetsize > 0) {
11941 grouplist = g_try_new(gid_t, gidsetsize);
11942 if (!grouplist) {
11943 return -TARGET_ENOMEM;
11946 ret = get_errno(getgroups(gidsetsize, grouplist));
11947 if (!is_error(ret) && gidsetsize > 0) {
11948 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11949 gidsetsize * sizeof(target_id), 0);
11950 if (!target_grouplist) {
11951 return -TARGET_EFAULT;
11953 for (i = 0; i < ret; i++) {
11954 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11956 unlock_user(target_grouplist, arg2,
11957 gidsetsize * sizeof(target_id));
11959 return ret;
11961 case TARGET_NR_setgroups:
11962 { /* the same code as for TARGET_NR_setgroups32 */
11963 int gidsetsize = arg1;
11964 target_id *target_grouplist;
11965 g_autofree gid_t *grouplist = NULL;
11966 int i;
11968 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11969 return -TARGET_EINVAL;
11971 if (gidsetsize > 0) {
11972 grouplist = g_try_new(gid_t, gidsetsize);
11973 if (!grouplist) {
11974 return -TARGET_ENOMEM;
11976 target_grouplist = lock_user(VERIFY_READ, arg2,
11977 gidsetsize * sizeof(target_id), 1);
11978 if (!target_grouplist) {
11979 return -TARGET_EFAULT;
11981 for (i = 0; i < gidsetsize; i++) {
11982 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11984 unlock_user(target_grouplist, arg2,
11985 gidsetsize * sizeof(target_id));
11987 return get_errno(setgroups(gidsetsize, grouplist));
11989 case TARGET_NR_fchown:
11990 return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11991 #if defined(TARGET_NR_fchownat)
11992 case TARGET_NR_fchownat:
11993 if (!(p = lock_user_string(arg2)))
11994 return -TARGET_EFAULT;
11995 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11996 low2highgid(arg4), arg5));
11997 unlock_user(p, arg2, 0);
11998 return ret;
11999 #endif
12000 #ifdef TARGET_NR_setresuid
12001 case TARGET_NR_setresuid:
12002 return get_errno(sys_setresuid(low2highuid(arg1),
12003 low2highuid(arg2),
12004 low2highuid(arg3)));
12005 #endif
12006 #ifdef TARGET_NR_getresuid
12007 case TARGET_NR_getresuid:
12009 uid_t ruid, euid, suid;
12010 ret = get_errno(getresuid(&ruid, &euid, &suid));
12011 if (!is_error(ret)) {
12012 if (put_user_id(high2lowuid(ruid), arg1)
12013 || put_user_id(high2lowuid(euid), arg2)
12014 || put_user_id(high2lowuid(suid), arg3))
12015 return -TARGET_EFAULT;
12018 return ret;
12019 #endif
12020 #ifdef TARGET_NR_getresgid
12021 case TARGET_NR_setresgid:
12022 return get_errno(sys_setresgid(low2highgid(arg1),
12023 low2highgid(arg2),
12024 low2highgid(arg3)));
12025 #endif
12026 #ifdef TARGET_NR_getresgid
12027 case TARGET_NR_getresgid:
12029 gid_t rgid, egid, sgid;
12030 ret = get_errno(getresgid(&rgid, &egid, &sgid));
12031 if (!is_error(ret)) {
12032 if (put_user_id(high2lowgid(rgid), arg1)
12033 || put_user_id(high2lowgid(egid), arg2)
12034 || put_user_id(high2lowgid(sgid), arg3))
12035 return -TARGET_EFAULT;
12038 return ret;
12039 #endif
12040 #ifdef TARGET_NR_chown
12041 case TARGET_NR_chown:
12042 if (!(p = lock_user_string(arg1)))
12043 return -TARGET_EFAULT;
12044 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12045 unlock_user(p, arg1, 0);
12046 return ret;
12047 #endif
12048 case TARGET_NR_setuid:
12049 return get_errno(sys_setuid(low2highuid(arg1)));
12050 case TARGET_NR_setgid:
12051 return get_errno(sys_setgid(low2highgid(arg1)));
12052 case TARGET_NR_setfsuid:
12053 return get_errno(setfsuid(arg1));
12054 case TARGET_NR_setfsgid:
12055 return get_errno(setfsgid(arg1));
12057 #ifdef TARGET_NR_lchown32
12058 case TARGET_NR_lchown32:
12059 if (!(p = lock_user_string(arg1)))
12060 return -TARGET_EFAULT;
12061 ret = get_errno(lchown(p, arg2, arg3));
12062 unlock_user(p, arg1, 0);
12063 return ret;
12064 #endif
12065 #ifdef TARGET_NR_getuid32
12066 case TARGET_NR_getuid32:
12067 return get_errno(getuid());
12068 #endif
12070 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12071 /* Alpha specific */
12072 case TARGET_NR_getxuid:
12074 uid_t euid;
12075 euid=geteuid();
12076 cpu_env->ir[IR_A4]=euid;
12078 return get_errno(getuid());
12079 #endif
12080 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12081 /* Alpha specific */
12082 case TARGET_NR_getxgid:
12084 uid_t egid;
12085 egid=getegid();
12086 cpu_env->ir[IR_A4]=egid;
12088 return get_errno(getgid());
12089 #endif
12090 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12091 /* Alpha specific */
12092 case TARGET_NR_osf_getsysinfo:
12093 ret = -TARGET_EOPNOTSUPP;
12094 switch (arg1) {
12095 case TARGET_GSI_IEEE_FP_CONTROL:
12097 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12098 uint64_t swcr = cpu_env->swcr;
12100 swcr &= ~SWCR_STATUS_MASK;
12101 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12103 if (put_user_u64 (swcr, arg2))
12104 return -TARGET_EFAULT;
12105 ret = 0;
12107 break;
12109 /* case GSI_IEEE_STATE_AT_SIGNAL:
12110 -- Not implemented in linux kernel.
12111 case GSI_UACPROC:
12112 -- Retrieves current unaligned access state; not much used.
12113 case GSI_PROC_TYPE:
12114 -- Retrieves implver information; surely not used.
12115 case GSI_GET_HWRPB:
12116 -- Grabs a copy of the HWRPB; surely not used.
12119 return ret;
12120 #endif
12121 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12122 /* Alpha specific */
12123 case TARGET_NR_osf_setsysinfo:
12124 ret = -TARGET_EOPNOTSUPP;
12125 switch (arg1) {
12126 case TARGET_SSI_IEEE_FP_CONTROL:
12128 uint64_t swcr, fpcr;
12130 if (get_user_u64 (swcr, arg2)) {
12131 return -TARGET_EFAULT;
12135 * The kernel calls swcr_update_status to update the
12136 * status bits from the fpcr at every point that it
12137 * could be queried. Therefore, we store the status
12138 * bits only in FPCR.
12140 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12142 fpcr = cpu_alpha_load_fpcr(cpu_env);
12143 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12144 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12145 cpu_alpha_store_fpcr(cpu_env, fpcr);
12146 ret = 0;
12148 break;
12150 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12152 uint64_t exc, fpcr, fex;
12154 if (get_user_u64(exc, arg2)) {
12155 return -TARGET_EFAULT;
12157 exc &= SWCR_STATUS_MASK;
12158 fpcr = cpu_alpha_load_fpcr(cpu_env);
12160 /* Old exceptions are not signaled. */
12161 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12162 fex = exc & ~fex;
12163 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12164 fex &= (cpu_env)->swcr;
12166 /* Update the hardware fpcr. */
12167 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12168 cpu_alpha_store_fpcr(cpu_env, fpcr);
12170 if (fex) {
12171 int si_code = TARGET_FPE_FLTUNK;
12172 target_siginfo_t info;
12174 if (fex & SWCR_TRAP_ENABLE_DNO) {
12175 si_code = TARGET_FPE_FLTUND;
12177 if (fex & SWCR_TRAP_ENABLE_INE) {
12178 si_code = TARGET_FPE_FLTRES;
12180 if (fex & SWCR_TRAP_ENABLE_UNF) {
12181 si_code = TARGET_FPE_FLTUND;
12183 if (fex & SWCR_TRAP_ENABLE_OVF) {
12184 si_code = TARGET_FPE_FLTOVF;
12186 if (fex & SWCR_TRAP_ENABLE_DZE) {
12187 si_code = TARGET_FPE_FLTDIV;
12189 if (fex & SWCR_TRAP_ENABLE_INV) {
12190 si_code = TARGET_FPE_FLTINV;
12193 info.si_signo = SIGFPE;
12194 info.si_errno = 0;
12195 info.si_code = si_code;
12196 info._sifields._sigfault._addr = (cpu_env)->pc;
12197 queue_signal(cpu_env, info.si_signo,
12198 QEMU_SI_FAULT, &info);
12200 ret = 0;
12202 break;
12204 /* case SSI_NVPAIRS:
12205 -- Used with SSIN_UACPROC to enable unaligned accesses.
12206 case SSI_IEEE_STATE_AT_SIGNAL:
12207 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12208 -- Not implemented in linux kernel
12211 return ret;
12212 #endif
12213 #ifdef TARGET_NR_osf_sigprocmask
12214 /* Alpha specific. */
12215 case TARGET_NR_osf_sigprocmask:
12217 abi_ulong mask;
12218 int how;
12219 sigset_t set, oldset;
12221 switch(arg1) {
12222 case TARGET_SIG_BLOCK:
12223 how = SIG_BLOCK;
12224 break;
12225 case TARGET_SIG_UNBLOCK:
12226 how = SIG_UNBLOCK;
12227 break;
12228 case TARGET_SIG_SETMASK:
12229 how = SIG_SETMASK;
12230 break;
12231 default:
12232 return -TARGET_EINVAL;
12234 mask = arg2;
12235 target_to_host_old_sigset(&set, &mask);
12236 ret = do_sigprocmask(how, &set, &oldset);
12237 if (!ret) {
12238 host_to_target_old_sigset(&mask, &oldset);
12239 ret = mask;
12242 return ret;
12243 #endif
12245 #ifdef TARGET_NR_getgid32
12246 case TARGET_NR_getgid32:
12247 return get_errno(getgid());
12248 #endif
12249 #ifdef TARGET_NR_geteuid32
12250 case TARGET_NR_geteuid32:
12251 return get_errno(geteuid());
12252 #endif
12253 #ifdef TARGET_NR_getegid32
12254 case TARGET_NR_getegid32:
12255 return get_errno(getegid());
12256 #endif
12257 #ifdef TARGET_NR_setreuid32
12258 case TARGET_NR_setreuid32:
12259 return get_errno(setreuid(arg1, arg2));
12260 #endif
12261 #ifdef TARGET_NR_setregid32
12262 case TARGET_NR_setregid32:
12263 return get_errno(setregid(arg1, arg2));
12264 #endif
12265 #ifdef TARGET_NR_getgroups32
12266 case TARGET_NR_getgroups32:
12267 { /* the same code as for TARGET_NR_getgroups */
12268 int gidsetsize = arg1;
12269 uint32_t *target_grouplist;
12270 g_autofree gid_t *grouplist = NULL;
12271 int i;
12273 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12274 return -TARGET_EINVAL;
12276 if (gidsetsize > 0) {
12277 grouplist = g_try_new(gid_t, gidsetsize);
12278 if (!grouplist) {
12279 return -TARGET_ENOMEM;
12282 ret = get_errno(getgroups(gidsetsize, grouplist));
12283 if (!is_error(ret) && gidsetsize > 0) {
12284 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12285 gidsetsize * 4, 0);
12286 if (!target_grouplist) {
12287 return -TARGET_EFAULT;
12289 for (i = 0; i < ret; i++) {
12290 target_grouplist[i] = tswap32(grouplist[i]);
12292 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12294 return ret;
12296 #endif
12297 #ifdef TARGET_NR_setgroups32
12298 case TARGET_NR_setgroups32:
12299 { /* the same code as for TARGET_NR_setgroups */
12300 int gidsetsize = arg1;
12301 uint32_t *target_grouplist;
12302 g_autofree gid_t *grouplist = NULL;
12303 int i;
12305 if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12306 return -TARGET_EINVAL;
12308 if (gidsetsize > 0) {
12309 grouplist = g_try_new(gid_t, gidsetsize);
12310 if (!grouplist) {
12311 return -TARGET_ENOMEM;
12313 target_grouplist = lock_user(VERIFY_READ, arg2,
12314 gidsetsize * 4, 1);
12315 if (!target_grouplist) {
12316 return -TARGET_EFAULT;
12318 for (i = 0; i < gidsetsize; i++) {
12319 grouplist[i] = tswap32(target_grouplist[i]);
12321 unlock_user(target_grouplist, arg2, 0);
12323 return get_errno(setgroups(gidsetsize, grouplist));
12325 #endif
12326 #ifdef TARGET_NR_fchown32
12327 case TARGET_NR_fchown32:
12328 return get_errno(fchown(arg1, arg2, arg3));
12329 #endif
12330 #ifdef TARGET_NR_setresuid32
12331 case TARGET_NR_setresuid32:
12332 return get_errno(sys_setresuid(arg1, arg2, arg3));
12333 #endif
12334 #ifdef TARGET_NR_getresuid32
12335 case TARGET_NR_getresuid32:
12337 uid_t ruid, euid, suid;
12338 ret = get_errno(getresuid(&ruid, &euid, &suid));
12339 if (!is_error(ret)) {
12340 if (put_user_u32(ruid, arg1)
12341 || put_user_u32(euid, arg2)
12342 || put_user_u32(suid, arg3))
12343 return -TARGET_EFAULT;
12346 return ret;
12347 #endif
12348 #ifdef TARGET_NR_setresgid32
12349 case TARGET_NR_setresgid32:
12350 return get_errno(sys_setresgid(arg1, arg2, arg3));
12351 #endif
12352 #ifdef TARGET_NR_getresgid32
12353 case TARGET_NR_getresgid32:
12355 gid_t rgid, egid, sgid;
12356 ret = get_errno(getresgid(&rgid, &egid, &sgid));
12357 if (!is_error(ret)) {
12358 if (put_user_u32(rgid, arg1)
12359 || put_user_u32(egid, arg2)
12360 || put_user_u32(sgid, arg3))
12361 return -TARGET_EFAULT;
12364 return ret;
12365 #endif
12366 #ifdef TARGET_NR_chown32
12367 case TARGET_NR_chown32:
12368 if (!(p = lock_user_string(arg1)))
12369 return -TARGET_EFAULT;
12370 ret = get_errno(chown(p, arg2, arg3));
12371 unlock_user(p, arg1, 0);
12372 return ret;
12373 #endif
12374 #ifdef TARGET_NR_setuid32
12375 case TARGET_NR_setuid32:
12376 return get_errno(sys_setuid(arg1));
12377 #endif
12378 #ifdef TARGET_NR_setgid32
12379 case TARGET_NR_setgid32:
12380 return get_errno(sys_setgid(arg1));
12381 #endif
12382 #ifdef TARGET_NR_setfsuid32
12383 case TARGET_NR_setfsuid32:
12384 return get_errno(setfsuid(arg1));
12385 #endif
12386 #ifdef TARGET_NR_setfsgid32
12387 case TARGET_NR_setfsgid32:
12388 return get_errno(setfsgid(arg1));
12389 #endif
12390 #ifdef TARGET_NR_mincore
12391 case TARGET_NR_mincore:
12393 void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12394 if (!a) {
12395 return -TARGET_ENOMEM;
12397 p = lock_user_string(arg3);
12398 if (!p) {
12399 ret = -TARGET_EFAULT;
12400 } else {
12401 ret = get_errno(mincore(a, arg2, p));
12402 unlock_user(p, arg3, ret);
12404 unlock_user(a, arg1, 0);
12406 return ret;
12407 #endif
12408 #ifdef TARGET_NR_arm_fadvise64_64
12409 case TARGET_NR_arm_fadvise64_64:
12410 /* arm_fadvise64_64 looks like fadvise64_64 but
12411 * with different argument order: fd, advice, offset, len
12412 * rather than the usual fd, offset, len, advice.
12413 * Note that offset and len are both 64-bit so appear as
12414 * pairs of 32-bit registers.
12416 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12417 target_offset64(arg5, arg6), arg2);
12418 return -host_to_target_errno(ret);
12419 #endif
12421 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12423 #ifdef TARGET_NR_fadvise64_64
12424 case TARGET_NR_fadvise64_64:
12425 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12426 /* 6 args: fd, advice, offset (high, low), len (high, low) */
12427 ret = arg2;
12428 arg2 = arg3;
12429 arg3 = arg4;
12430 arg4 = arg5;
12431 arg5 = arg6;
12432 arg6 = ret;
12433 #else
12434 /* 6 args: fd, offset (high, low), len (high, low), advice */
12435 if (regpairs_aligned(cpu_env, num)) {
12436 /* offset is in (3,4), len in (5,6) and advice in 7 */
12437 arg2 = arg3;
12438 arg3 = arg4;
12439 arg4 = arg5;
12440 arg5 = arg6;
12441 arg6 = arg7;
12443 #endif
12444 ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12445 target_offset64(arg4, arg5), arg6);
12446 return -host_to_target_errno(ret);
12447 #endif
12449 #ifdef TARGET_NR_fadvise64
12450 case TARGET_NR_fadvise64:
12451 /* 5 args: fd, offset (high, low), len, advice */
12452 if (regpairs_aligned(cpu_env, num)) {
12453 /* offset is in (3,4), len in 5 and advice in 6 */
12454 arg2 = arg3;
12455 arg3 = arg4;
12456 arg4 = arg5;
12457 arg5 = arg6;
12459 ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12460 return -host_to_target_errno(ret);
12461 #endif
12463 #else /* not a 32-bit ABI */
12464 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12465 #ifdef TARGET_NR_fadvise64_64
12466 case TARGET_NR_fadvise64_64:
12467 #endif
12468 #ifdef TARGET_NR_fadvise64
12469 case TARGET_NR_fadvise64:
12470 #endif
12471 #ifdef TARGET_S390X
12472 switch (arg4) {
12473 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12474 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12475 case 6: arg4 = POSIX_FADV_DONTNEED; break;
12476 case 7: arg4 = POSIX_FADV_NOREUSE; break;
12477 default: break;
12479 #endif
12480 return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12481 #endif
12482 #endif /* end of 64-bit ABI fadvise handling */
12484 #ifdef TARGET_NR_madvise
12485 case TARGET_NR_madvise:
12486 return target_madvise(arg1, arg2, arg3);
12487 #endif
12488 #ifdef TARGET_NR_fcntl64
12489 case TARGET_NR_fcntl64:
12491 int cmd;
12492 struct flock64 fl;
12493 from_flock64_fn *copyfrom = copy_from_user_flock64;
12494 to_flock64_fn *copyto = copy_to_user_flock64;
12496 #ifdef TARGET_ARM
12497 if (!cpu_env->eabi) {
12498 copyfrom = copy_from_user_oabi_flock64;
12499 copyto = copy_to_user_oabi_flock64;
12501 #endif
12503 cmd = target_to_host_fcntl_cmd(arg2);
12504 if (cmd == -TARGET_EINVAL) {
12505 return cmd;
12508 switch(arg2) {
12509 case TARGET_F_GETLK64:
12510 ret = copyfrom(&fl, arg3);
12511 if (ret) {
12512 break;
12514 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12515 if (ret == 0) {
12516 ret = copyto(arg3, &fl);
12518 break;
12520 case TARGET_F_SETLK64:
12521 case TARGET_F_SETLKW64:
12522 ret = copyfrom(&fl, arg3);
12523 if (ret) {
12524 break;
12526 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12527 break;
12528 default:
12529 ret = do_fcntl(arg1, arg2, arg3);
12530 break;
12532 return ret;
12534 #endif
12535 #ifdef TARGET_NR_cacheflush
12536 case TARGET_NR_cacheflush:
12537 /* self-modifying code is handled automatically, so nothing needed */
12538 return 0;
12539 #endif
12540 #ifdef TARGET_NR_getpagesize
12541 case TARGET_NR_getpagesize:
12542 return TARGET_PAGE_SIZE;
12543 #endif
12544 case TARGET_NR_gettid:
12545 return get_errno(sys_gettid());
12546 #ifdef TARGET_NR_readahead
12547 case TARGET_NR_readahead:
12548 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12549 if (regpairs_aligned(cpu_env, num)) {
12550 arg2 = arg3;
12551 arg3 = arg4;
12552 arg4 = arg5;
12554 ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12555 #else
12556 ret = get_errno(readahead(arg1, arg2, arg3));
12557 #endif
12558 return ret;
12559 #endif
12560 #ifdef CONFIG_ATTR
12561 #ifdef TARGET_NR_setxattr
12562 case TARGET_NR_listxattr:
12563 case TARGET_NR_llistxattr:
12565 void *p, *b = 0;
12566 if (arg2) {
12567 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12568 if (!b) {
12569 return -TARGET_EFAULT;
12572 p = lock_user_string(arg1);
12573 if (p) {
12574 if (num == TARGET_NR_listxattr) {
12575 ret = get_errno(listxattr(p, b, arg3));
12576 } else {
12577 ret = get_errno(llistxattr(p, b, arg3));
12579 } else {
12580 ret = -TARGET_EFAULT;
12582 unlock_user(p, arg1, 0);
12583 unlock_user(b, arg2, arg3);
12584 return ret;
12586 case TARGET_NR_flistxattr:
12588 void *b = 0;
12589 if (arg2) {
12590 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12591 if (!b) {
12592 return -TARGET_EFAULT;
12595 ret = get_errno(flistxattr(arg1, b, arg3));
12596 unlock_user(b, arg2, arg3);
12597 return ret;
12599 case TARGET_NR_setxattr:
12600 case TARGET_NR_lsetxattr:
12602 void *p, *n, *v = 0;
12603 if (arg3) {
12604 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12605 if (!v) {
12606 return -TARGET_EFAULT;
12609 p = lock_user_string(arg1);
12610 n = lock_user_string(arg2);
12611 if (p && n) {
12612 if (num == TARGET_NR_setxattr) {
12613 ret = get_errno(setxattr(p, n, v, arg4, arg5));
12614 } else {
12615 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12617 } else {
12618 ret = -TARGET_EFAULT;
12620 unlock_user(p, arg1, 0);
12621 unlock_user(n, arg2, 0);
12622 unlock_user(v, arg3, 0);
12624 return ret;
12625 case TARGET_NR_fsetxattr:
12627 void *n, *v = 0;
12628 if (arg3) {
12629 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12630 if (!v) {
12631 return -TARGET_EFAULT;
12634 n = lock_user_string(arg2);
12635 if (n) {
12636 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12637 } else {
12638 ret = -TARGET_EFAULT;
12640 unlock_user(n, arg2, 0);
12641 unlock_user(v, arg3, 0);
12643 return ret;
12644 case TARGET_NR_getxattr:
12645 case TARGET_NR_lgetxattr:
12647 void *p, *n, *v = 0;
12648 if (arg3) {
12649 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12650 if (!v) {
12651 return -TARGET_EFAULT;
12654 p = lock_user_string(arg1);
12655 n = lock_user_string(arg2);
12656 if (p && n) {
12657 if (num == TARGET_NR_getxattr) {
12658 ret = get_errno(getxattr(p, n, v, arg4));
12659 } else {
12660 ret = get_errno(lgetxattr(p, n, v, arg4));
12662 } else {
12663 ret = -TARGET_EFAULT;
12665 unlock_user(p, arg1, 0);
12666 unlock_user(n, arg2, 0);
12667 unlock_user(v, arg3, arg4);
12669 return ret;
12670 case TARGET_NR_fgetxattr:
12672 void *n, *v = 0;
12673 if (arg3) {
12674 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12675 if (!v) {
12676 return -TARGET_EFAULT;
12679 n = lock_user_string(arg2);
12680 if (n) {
12681 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12682 } else {
12683 ret = -TARGET_EFAULT;
12685 unlock_user(n, arg2, 0);
12686 unlock_user(v, arg3, arg4);
12688 return ret;
12689 case TARGET_NR_removexattr:
12690 case TARGET_NR_lremovexattr:
12692 void *p, *n;
12693 p = lock_user_string(arg1);
12694 n = lock_user_string(arg2);
12695 if (p && n) {
12696 if (num == TARGET_NR_removexattr) {
12697 ret = get_errno(removexattr(p, n));
12698 } else {
12699 ret = get_errno(lremovexattr(p, n));
12701 } else {
12702 ret = -TARGET_EFAULT;
12704 unlock_user(p, arg1, 0);
12705 unlock_user(n, arg2, 0);
12707 return ret;
12708 case TARGET_NR_fremovexattr:
12710 void *n;
12711 n = lock_user_string(arg2);
12712 if (n) {
12713 ret = get_errno(fremovexattr(arg1, n));
12714 } else {
12715 ret = -TARGET_EFAULT;
12717 unlock_user(n, arg2, 0);
12719 return ret;
12720 #endif
12721 #endif /* CONFIG_ATTR */
12722 #ifdef TARGET_NR_set_thread_area
12723 case TARGET_NR_set_thread_area:
12724 #if defined(TARGET_MIPS)
12725 cpu_env->active_tc.CP0_UserLocal = arg1;
12726 return 0;
12727 #elif defined(TARGET_CRIS)
12728 if (arg1 & 0xff)
12729 ret = -TARGET_EINVAL;
12730 else {
12731 cpu_env->pregs[PR_PID] = arg1;
12732 ret = 0;
12734 return ret;
12735 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12736 return do_set_thread_area(cpu_env, arg1);
12737 #elif defined(TARGET_M68K)
12739 TaskState *ts = cpu->opaque;
12740 ts->tp_value = arg1;
12741 return 0;
12743 #else
12744 return -TARGET_ENOSYS;
12745 #endif
12746 #endif
12747 #ifdef TARGET_NR_get_thread_area
12748 case TARGET_NR_get_thread_area:
12749 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12750 return do_get_thread_area(cpu_env, arg1);
12751 #elif defined(TARGET_M68K)
12753 TaskState *ts = cpu->opaque;
12754 return ts->tp_value;
12756 #else
12757 return -TARGET_ENOSYS;
12758 #endif
12759 #endif
12760 #ifdef TARGET_NR_getdomainname
12761 case TARGET_NR_getdomainname:
12762 return -TARGET_ENOSYS;
12763 #endif
12765 #ifdef TARGET_NR_clock_settime
12766 case TARGET_NR_clock_settime:
12768 struct timespec ts;
12770 ret = target_to_host_timespec(&ts, arg2);
12771 if (!is_error(ret)) {
12772 ret = get_errno(clock_settime(arg1, &ts));
12774 return ret;
12776 #endif
12777 #ifdef TARGET_NR_clock_settime64
12778 case TARGET_NR_clock_settime64:
12780 struct timespec ts;
12782 ret = target_to_host_timespec64(&ts, arg2);
12783 if (!is_error(ret)) {
12784 ret = get_errno(clock_settime(arg1, &ts));
12786 return ret;
12788 #endif
12789 #ifdef TARGET_NR_clock_gettime
12790 case TARGET_NR_clock_gettime:
12792 struct timespec ts;
12793 ret = get_errno(clock_gettime(arg1, &ts));
12794 if (!is_error(ret)) {
12795 ret = host_to_target_timespec(arg2, &ts);
12797 return ret;
12799 #endif
12800 #ifdef TARGET_NR_clock_gettime64
12801 case TARGET_NR_clock_gettime64:
12803 struct timespec ts;
12804 ret = get_errno(clock_gettime(arg1, &ts));
12805 if (!is_error(ret)) {
12806 ret = host_to_target_timespec64(arg2, &ts);
12808 return ret;
12810 #endif
12811 #ifdef TARGET_NR_clock_getres
12812 case TARGET_NR_clock_getres:
12814 struct timespec ts;
12815 ret = get_errno(clock_getres(arg1, &ts));
12816 if (!is_error(ret)) {
12817 host_to_target_timespec(arg2, &ts);
12819 return ret;
12821 #endif
12822 #ifdef TARGET_NR_clock_getres_time64
12823 case TARGET_NR_clock_getres_time64:
12825 struct timespec ts;
12826 ret = get_errno(clock_getres(arg1, &ts));
12827 if (!is_error(ret)) {
12828 host_to_target_timespec64(arg2, &ts);
12830 return ret;
12832 #endif
12833 #ifdef TARGET_NR_clock_nanosleep
12834 case TARGET_NR_clock_nanosleep:
12836 struct timespec ts;
12837 if (target_to_host_timespec(&ts, arg3)) {
12838 return -TARGET_EFAULT;
12840 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12841 &ts, arg4 ? &ts : NULL));
12843 * if the call is interrupted by a signal handler, it fails
12844 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12845 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12847 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12848 host_to_target_timespec(arg4, &ts)) {
12849 return -TARGET_EFAULT;
12852 return ret;
12854 #endif
12855 #ifdef TARGET_NR_clock_nanosleep_time64
12856 case TARGET_NR_clock_nanosleep_time64:
12858 struct timespec ts;
12860 if (target_to_host_timespec64(&ts, arg3)) {
12861 return -TARGET_EFAULT;
12864 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12865 &ts, arg4 ? &ts : NULL));
12867 if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12868 host_to_target_timespec64(arg4, &ts)) {
12869 return -TARGET_EFAULT;
12871 return ret;
12873 #endif
12875 #if defined(TARGET_NR_set_tid_address)
12876 case TARGET_NR_set_tid_address:
12878 TaskState *ts = cpu->opaque;
12879 ts->child_tidptr = arg1;
12880 /* do not call host set_tid_address() syscall, instead return tid() */
12881 return get_errno(sys_gettid());
12883 #endif
12885 case TARGET_NR_tkill:
12886 return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12888 case TARGET_NR_tgkill:
12889 return get_errno(safe_tgkill((int)arg1, (int)arg2,
12890 target_to_host_signal(arg3)));
12892 #ifdef TARGET_NR_set_robust_list
12893 case TARGET_NR_set_robust_list:
12894 case TARGET_NR_get_robust_list:
12895 /* The ABI for supporting robust futexes has userspace pass
12896 * the kernel a pointer to a linked list which is updated by
12897 * userspace after the syscall; the list is walked by the kernel
12898 * when the thread exits. Since the linked list in QEMU guest
12899 * memory isn't a valid linked list for the host and we have
12900 * no way to reliably intercept the thread-death event, we can't
12901 * support these. Silently return ENOSYS so that guest userspace
12902 * falls back to a non-robust futex implementation (which should
12903 * be OK except in the corner case of the guest crashing while
12904 * holding a mutex that is shared with another process via
12905 * shared memory).
12907 return -TARGET_ENOSYS;
12908 #endif
12910 #if defined(TARGET_NR_utimensat)
12911 case TARGET_NR_utimensat:
12913 struct timespec *tsp, ts[2];
12914 if (!arg3) {
12915 tsp = NULL;
12916 } else {
12917 if (target_to_host_timespec(ts, arg3)) {
12918 return -TARGET_EFAULT;
12920 if (target_to_host_timespec(ts + 1, arg3 +
12921 sizeof(struct target_timespec))) {
12922 return -TARGET_EFAULT;
12924 tsp = ts;
12926 if (!arg2)
12927 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12928 else {
12929 if (!(p = lock_user_string(arg2))) {
12930 return -TARGET_EFAULT;
12932 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12933 unlock_user(p, arg2, 0);
12936 return ret;
12937 #endif
12938 #ifdef TARGET_NR_utimensat_time64
12939 case TARGET_NR_utimensat_time64:
12941 struct timespec *tsp, ts[2];
12942 if (!arg3) {
12943 tsp = NULL;
12944 } else {
12945 if (target_to_host_timespec64(ts, arg3)) {
12946 return -TARGET_EFAULT;
12948 if (target_to_host_timespec64(ts + 1, arg3 +
12949 sizeof(struct target__kernel_timespec))) {
12950 return -TARGET_EFAULT;
12952 tsp = ts;
12954 if (!arg2)
12955 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12956 else {
12957 p = lock_user_string(arg2);
12958 if (!p) {
12959 return -TARGET_EFAULT;
12961 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12962 unlock_user(p, arg2, 0);
12965 return ret;
12966 #endif
12967 #ifdef TARGET_NR_futex
12968 case TARGET_NR_futex:
12969 return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12970 #endif
12971 #ifdef TARGET_NR_futex_time64
12972 case TARGET_NR_futex_time64:
12973 return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12974 #endif
12975 #ifdef CONFIG_INOTIFY
12976 #if defined(TARGET_NR_inotify_init)
12977 case TARGET_NR_inotify_init:
12978 ret = get_errno(inotify_init());
12979 if (ret >= 0) {
12980 fd_trans_register(ret, &target_inotify_trans);
12982 return ret;
12983 #endif
12984 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12985 case TARGET_NR_inotify_init1:
12986 ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12987 fcntl_flags_tbl)));
12988 if (ret >= 0) {
12989 fd_trans_register(ret, &target_inotify_trans);
12991 return ret;
12992 #endif
12993 #if defined(TARGET_NR_inotify_add_watch)
12994 case TARGET_NR_inotify_add_watch:
12995 p = lock_user_string(arg2);
12996 ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12997 unlock_user(p, arg2, 0);
12998 return ret;
12999 #endif
13000 #if defined(TARGET_NR_inotify_rm_watch)
13001 case TARGET_NR_inotify_rm_watch:
13002 return get_errno(inotify_rm_watch(arg1, arg2));
13003 #endif
13004 #endif
13006 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13007 case TARGET_NR_mq_open:
13009 struct mq_attr posix_mq_attr;
13010 struct mq_attr *pposix_mq_attr;
13011 int host_flags;
13013 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13014 pposix_mq_attr = NULL;
13015 if (arg4) {
13016 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13017 return -TARGET_EFAULT;
13019 pposix_mq_attr = &posix_mq_attr;
13021 p = lock_user_string(arg1 - 1);
13022 if (!p) {
13023 return -TARGET_EFAULT;
13025 ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13026 unlock_user (p, arg1, 0);
13028 return ret;
13030 case TARGET_NR_mq_unlink:
13031 p = lock_user_string(arg1 - 1);
13032 if (!p) {
13033 return -TARGET_EFAULT;
13035 ret = get_errno(mq_unlink(p));
13036 unlock_user (p, arg1, 0);
13037 return ret;
13039 #ifdef TARGET_NR_mq_timedsend
13040 case TARGET_NR_mq_timedsend:
13042 struct timespec ts;
13044 p = lock_user (VERIFY_READ, arg2, arg3, 1);
13045 if (arg5 != 0) {
13046 if (target_to_host_timespec(&ts, arg5)) {
13047 return -TARGET_EFAULT;
13049 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13050 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13051 return -TARGET_EFAULT;
13053 } else {
13054 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13056 unlock_user (p, arg2, arg3);
13058 return ret;
13059 #endif
13060 #ifdef TARGET_NR_mq_timedsend_time64
13061 case TARGET_NR_mq_timedsend_time64:
13063 struct timespec ts;
13065 p = lock_user(VERIFY_READ, arg2, arg3, 1);
13066 if (arg5 != 0) {
13067 if (target_to_host_timespec64(&ts, arg5)) {
13068 return -TARGET_EFAULT;
13070 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13071 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13072 return -TARGET_EFAULT;
13074 } else {
13075 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13077 unlock_user(p, arg2, arg3);
13079 return ret;
13080 #endif
13082 #ifdef TARGET_NR_mq_timedreceive
13083 case TARGET_NR_mq_timedreceive:
13085 struct timespec ts;
13086 unsigned int prio;
13088 p = lock_user (VERIFY_READ, arg2, arg3, 1);
13089 if (arg5 != 0) {
13090 if (target_to_host_timespec(&ts, arg5)) {
13091 return -TARGET_EFAULT;
13093 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13094 &prio, &ts));
13095 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13096 return -TARGET_EFAULT;
13098 } else {
13099 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13100 &prio, NULL));
13102 unlock_user (p, arg2, arg3);
13103 if (arg4 != 0)
13104 put_user_u32(prio, arg4);
13106 return ret;
13107 #endif
13108 #ifdef TARGET_NR_mq_timedreceive_time64
13109 case TARGET_NR_mq_timedreceive_time64:
13111 struct timespec ts;
13112 unsigned int prio;
13114 p = lock_user(VERIFY_READ, arg2, arg3, 1);
13115 if (arg5 != 0) {
13116 if (target_to_host_timespec64(&ts, arg5)) {
13117 return -TARGET_EFAULT;
13119 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13120 &prio, &ts));
13121 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13122 return -TARGET_EFAULT;
13124 } else {
13125 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13126 &prio, NULL));
13128 unlock_user(p, arg2, arg3);
13129 if (arg4 != 0) {
13130 put_user_u32(prio, arg4);
13133 return ret;
13134 #endif
13136 /* Not implemented for now... */
13137 /* case TARGET_NR_mq_notify: */
13138 /* break; */
13140 case TARGET_NR_mq_getsetattr:
13142 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13143 ret = 0;
13144 if (arg2 != 0) {
13145 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13146 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13147 &posix_mq_attr_out));
13148 } else if (arg3 != 0) {
13149 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13151 if (ret == 0 && arg3 != 0) {
13152 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13155 return ret;
13156 #endif
13158 #ifdef CONFIG_SPLICE
13159 #ifdef TARGET_NR_tee
13160 case TARGET_NR_tee:
13162 ret = get_errno(tee(arg1,arg2,arg3,arg4));
13164 return ret;
13165 #endif
13166 #ifdef TARGET_NR_splice
13167 case TARGET_NR_splice:
13169 loff_t loff_in, loff_out;
13170 loff_t *ploff_in = NULL, *ploff_out = NULL;
13171 if (arg2) {
13172 if (get_user_u64(loff_in, arg2)) {
13173 return -TARGET_EFAULT;
13175 ploff_in = &loff_in;
13177 if (arg4) {
13178 if (get_user_u64(loff_out, arg4)) {
13179 return -TARGET_EFAULT;
13181 ploff_out = &loff_out;
13183 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13184 if (arg2) {
13185 if (put_user_u64(loff_in, arg2)) {
13186 return -TARGET_EFAULT;
13189 if (arg4) {
13190 if (put_user_u64(loff_out, arg4)) {
13191 return -TARGET_EFAULT;
13195 return ret;
13196 #endif
13197 #ifdef TARGET_NR_vmsplice
13198 case TARGET_NR_vmsplice:
13200 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13201 if (vec != NULL) {
13202 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13203 unlock_iovec(vec, arg2, arg3, 0);
13204 } else {
13205 ret = -host_to_target_errno(errno);
13208 return ret;
13209 #endif
13210 #endif /* CONFIG_SPLICE */
13211 #ifdef CONFIG_EVENTFD
13212 #if defined(TARGET_NR_eventfd)
13213 case TARGET_NR_eventfd:
13214 ret = get_errno(eventfd(arg1, 0));
13215 if (ret >= 0) {
13216 fd_trans_register(ret, &target_eventfd_trans);
13218 return ret;
13219 #endif
13220 #if defined(TARGET_NR_eventfd2)
13221 case TARGET_NR_eventfd2:
13223 int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13224 if (arg2 & TARGET_O_NONBLOCK) {
13225 host_flags |= O_NONBLOCK;
13227 if (arg2 & TARGET_O_CLOEXEC) {
13228 host_flags |= O_CLOEXEC;
13230 ret = get_errno(eventfd(arg1, host_flags));
13231 if (ret >= 0) {
13232 fd_trans_register(ret, &target_eventfd_trans);
13234 return ret;
13236 #endif
13237 #endif /* CONFIG_EVENTFD */
13238 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13239 case TARGET_NR_fallocate:
13240 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13241 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13242 target_offset64(arg5, arg6)));
13243 #else
13244 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13245 #endif
13246 return ret;
13247 #endif
13248 #if defined(CONFIG_SYNC_FILE_RANGE)
13249 #if defined(TARGET_NR_sync_file_range)
13250 case TARGET_NR_sync_file_range:
13251 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13252 #if defined(TARGET_MIPS)
13253 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13254 target_offset64(arg5, arg6), arg7));
13255 #else
13256 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13257 target_offset64(arg4, arg5), arg6));
13258 #endif /* !TARGET_MIPS */
13259 #else
13260 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13261 #endif
13262 return ret;
13263 #endif
13264 #if defined(TARGET_NR_sync_file_range2) || \
13265 defined(TARGET_NR_arm_sync_file_range)
13266 #if defined(TARGET_NR_sync_file_range2)
13267 case TARGET_NR_sync_file_range2:
13268 #endif
13269 #if defined(TARGET_NR_arm_sync_file_range)
13270 case TARGET_NR_arm_sync_file_range:
13271 #endif
13272 /* This is like sync_file_range but the arguments are reordered */
13273 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13274 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13275 target_offset64(arg5, arg6), arg2));
13276 #else
13277 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13278 #endif
13279 return ret;
13280 #endif
13281 #endif
13282 #if defined(TARGET_NR_signalfd4)
13283 case TARGET_NR_signalfd4:
13284 return do_signalfd4(arg1, arg2, arg4);
13285 #endif
13286 #if defined(TARGET_NR_signalfd)
13287 case TARGET_NR_signalfd:
13288 return do_signalfd4(arg1, arg2, 0);
13289 #endif
13290 #if defined(CONFIG_EPOLL)
13291 #if defined(TARGET_NR_epoll_create)
13292 case TARGET_NR_epoll_create:
13293 return get_errno(epoll_create(arg1));
13294 #endif
13295 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13296 case TARGET_NR_epoll_create1:
13297 return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13298 #endif
13299 #if defined(TARGET_NR_epoll_ctl)
13300 case TARGET_NR_epoll_ctl:
13302 struct epoll_event ep;
13303 struct epoll_event *epp = 0;
13304 if (arg4) {
13305 if (arg2 != EPOLL_CTL_DEL) {
13306 struct target_epoll_event *target_ep;
13307 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13308 return -TARGET_EFAULT;
13310 ep.events = tswap32(target_ep->events);
13312 * The epoll_data_t union is just opaque data to the kernel,
13313 * so we transfer all 64 bits across and need not worry what
13314 * actual data type it is.
13316 ep.data.u64 = tswap64(target_ep->data.u64);
13317 unlock_user_struct(target_ep, arg4, 0);
13320 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13321 * non-null pointer, even though this argument is ignored.
13324 epp = &ep;
13326 return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13328 #endif
13330 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13331 #if defined(TARGET_NR_epoll_wait)
13332 case TARGET_NR_epoll_wait:
13333 #endif
13334 #if defined(TARGET_NR_epoll_pwait)
13335 case TARGET_NR_epoll_pwait:
13336 #endif
13338 struct target_epoll_event *target_ep;
13339 struct epoll_event *ep;
13340 int epfd = arg1;
13341 int maxevents = arg3;
13342 int timeout = arg4;
13344 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13345 return -TARGET_EINVAL;
13348 target_ep = lock_user(VERIFY_WRITE, arg2,
13349 maxevents * sizeof(struct target_epoll_event), 1);
13350 if (!target_ep) {
13351 return -TARGET_EFAULT;
13354 ep = g_try_new(struct epoll_event, maxevents);
13355 if (!ep) {
13356 unlock_user(target_ep, arg2, 0);
13357 return -TARGET_ENOMEM;
13360 switch (num) {
13361 #if defined(TARGET_NR_epoll_pwait)
13362 case TARGET_NR_epoll_pwait:
13364 sigset_t *set = NULL;
13366 if (arg5) {
13367 ret = process_sigsuspend_mask(&set, arg5, arg6);
13368 if (ret != 0) {
13369 break;
13373 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13374 set, SIGSET_T_SIZE));
13376 if (set) {
13377 finish_sigsuspend_mask(ret);
13379 break;
13381 #endif
13382 #if defined(TARGET_NR_epoll_wait)
13383 case TARGET_NR_epoll_wait:
13384 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13385 NULL, 0));
13386 break;
13387 #endif
13388 default:
13389 ret = -TARGET_ENOSYS;
13391 if (!is_error(ret)) {
13392 int i;
13393 for (i = 0; i < ret; i++) {
13394 target_ep[i].events = tswap32(ep[i].events);
13395 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13397 unlock_user(target_ep, arg2,
13398 ret * sizeof(struct target_epoll_event));
13399 } else {
13400 unlock_user(target_ep, arg2, 0);
13402 g_free(ep);
13403 return ret;
13405 #endif
13406 #endif
13407 #ifdef TARGET_NR_prlimit64
13408 case TARGET_NR_prlimit64:
13410 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13411 struct target_rlimit64 *target_rnew, *target_rold;
13412 struct host_rlimit64 rnew, rold, *rnewp = 0;
13413 int resource = target_to_host_resource(arg2);
13415 if (arg3 && (resource != RLIMIT_AS &&
13416 resource != RLIMIT_DATA &&
13417 resource != RLIMIT_STACK)) {
13418 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13419 return -TARGET_EFAULT;
13421 __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13422 __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13423 unlock_user_struct(target_rnew, arg3, 0);
13424 rnewp = &rnew;
13427 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13428 if (!is_error(ret) && arg4) {
13429 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13430 return -TARGET_EFAULT;
13432 __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13433 __put_user(rold.rlim_max, &target_rold->rlim_max);
13434 unlock_user_struct(target_rold, arg4, 1);
13436 return ret;
13438 #endif
13439 #ifdef TARGET_NR_gethostname
13440 case TARGET_NR_gethostname:
13442 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13443 if (name) {
13444 ret = get_errno(gethostname(name, arg2));
13445 unlock_user(name, arg1, arg2);
13446 } else {
13447 ret = -TARGET_EFAULT;
13449 return ret;
13451 #endif
13452 #ifdef TARGET_NR_atomic_cmpxchg_32
13453 case TARGET_NR_atomic_cmpxchg_32:
13455 /* should use start_exclusive from main.c */
13456 abi_ulong mem_value;
13457 if (get_user_u32(mem_value, arg6)) {
13458 target_siginfo_t info;
13459 info.si_signo = SIGSEGV;
13460 info.si_errno = 0;
13461 info.si_code = TARGET_SEGV_MAPERR;
13462 info._sifields._sigfault._addr = arg6;
13463 queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13464 ret = 0xdeadbeef;
13467 if (mem_value == arg2)
13468 put_user_u32(arg1, arg6);
13469 return mem_value;
13471 #endif
13472 #ifdef TARGET_NR_atomic_barrier
13473 case TARGET_NR_atomic_barrier:
13474 /* Like the kernel implementation and the
13475 qemu arm barrier, no-op this? */
13476 return 0;
13477 #endif
13479 #ifdef TARGET_NR_timer_create
13480 case TARGET_NR_timer_create:
13482 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13484 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13486 int clkid = arg1;
13487 int timer_index = next_free_host_timer();
13489 if (timer_index < 0) {
13490 ret = -TARGET_EAGAIN;
13491 } else {
13492 timer_t *phtimer = g_posix_timers + timer_index;
13494 if (arg2) {
13495 phost_sevp = &host_sevp;
13496 ret = target_to_host_sigevent(phost_sevp, arg2);
13497 if (ret != 0) {
13498 free_host_timer_slot(timer_index);
13499 return ret;
13503 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13504 if (ret) {
13505 free_host_timer_slot(timer_index);
13506 } else {
13507 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13508 timer_delete(*phtimer);
13509 free_host_timer_slot(timer_index);
13510 return -TARGET_EFAULT;
13514 return ret;
13516 #endif
13518 #ifdef TARGET_NR_timer_settime
13519 case TARGET_NR_timer_settime:
13521 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13522 * struct itimerspec * old_value */
13523 target_timer_t timerid = get_timer_id(arg1);
13525 if (timerid < 0) {
13526 ret = timerid;
13527 } else if (arg3 == 0) {
13528 ret = -TARGET_EINVAL;
13529 } else {
13530 timer_t htimer = g_posix_timers[timerid];
13531 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13533 if (target_to_host_itimerspec(&hspec_new, arg3)) {
13534 return -TARGET_EFAULT;
13536 ret = get_errno(
13537 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13538 if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13539 return -TARGET_EFAULT;
13542 return ret;
13544 #endif
13546 #ifdef TARGET_NR_timer_settime64
13547 case TARGET_NR_timer_settime64:
13549 target_timer_t timerid = get_timer_id(arg1);
13551 if (timerid < 0) {
13552 ret = timerid;
13553 } else if (arg3 == 0) {
13554 ret = -TARGET_EINVAL;
13555 } else {
13556 timer_t htimer = g_posix_timers[timerid];
13557 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13559 if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13560 return -TARGET_EFAULT;
13562 ret = get_errno(
13563 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13564 if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13565 return -TARGET_EFAULT;
13568 return ret;
13570 #endif
13572 #ifdef TARGET_NR_timer_gettime
13573 case TARGET_NR_timer_gettime:
13575 /* args: timer_t timerid, struct itimerspec *curr_value */
13576 target_timer_t timerid = get_timer_id(arg1);
13578 if (timerid < 0) {
13579 ret = timerid;
13580 } else if (!arg2) {
13581 ret = -TARGET_EFAULT;
13582 } else {
13583 timer_t htimer = g_posix_timers[timerid];
13584 struct itimerspec hspec;
13585 ret = get_errno(timer_gettime(htimer, &hspec));
13587 if (host_to_target_itimerspec(arg2, &hspec)) {
13588 ret = -TARGET_EFAULT;
13591 return ret;
13593 #endif
13595 #ifdef TARGET_NR_timer_gettime64
13596 case TARGET_NR_timer_gettime64:
13598 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13599 target_timer_t timerid = get_timer_id(arg1);
13601 if (timerid < 0) {
13602 ret = timerid;
13603 } else if (!arg2) {
13604 ret = -TARGET_EFAULT;
13605 } else {
13606 timer_t htimer = g_posix_timers[timerid];
13607 struct itimerspec hspec;
13608 ret = get_errno(timer_gettime(htimer, &hspec));
13610 if (host_to_target_itimerspec64(arg2, &hspec)) {
13611 ret = -TARGET_EFAULT;
13614 return ret;
13616 #endif
13618 #ifdef TARGET_NR_timer_getoverrun
13619 case TARGET_NR_timer_getoverrun:
13621 /* args: timer_t timerid */
13622 target_timer_t timerid = get_timer_id(arg1);
13624 if (timerid < 0) {
13625 ret = timerid;
13626 } else {
13627 timer_t htimer = g_posix_timers[timerid];
13628 ret = get_errno(timer_getoverrun(htimer));
13630 return ret;
13632 #endif
13634 #ifdef TARGET_NR_timer_delete
13635 case TARGET_NR_timer_delete:
13637 /* args: timer_t timerid */
13638 target_timer_t timerid = get_timer_id(arg1);
13640 if (timerid < 0) {
13641 ret = timerid;
13642 } else {
13643 timer_t htimer = g_posix_timers[timerid];
13644 ret = get_errno(timer_delete(htimer));
13645 free_host_timer_slot(timerid);
13647 return ret;
13649 #endif
13651 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13652 case TARGET_NR_timerfd_create:
13653 ret = get_errno(timerfd_create(arg1,
13654 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13655 if (ret >= 0) {
13656 fd_trans_register(ret, &target_timerfd_trans);
13658 return ret;
13659 #endif
13661 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13662 case TARGET_NR_timerfd_gettime:
13664 struct itimerspec its_curr;
13666 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13668 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13669 return -TARGET_EFAULT;
13672 return ret;
13673 #endif
13675 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13676 case TARGET_NR_timerfd_gettime64:
13678 struct itimerspec its_curr;
13680 ret = get_errno(timerfd_gettime(arg1, &its_curr));
13682 if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13683 return -TARGET_EFAULT;
13686 return ret;
13687 #endif
13689 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13690 case TARGET_NR_timerfd_settime:
13692 struct itimerspec its_new, its_old, *p_new;
13694 if (arg3) {
13695 if (target_to_host_itimerspec(&its_new, arg3)) {
13696 return -TARGET_EFAULT;
13698 p_new = &its_new;
13699 } else {
13700 p_new = NULL;
13703 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13705 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13706 return -TARGET_EFAULT;
13709 return ret;
13710 #endif
13712 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13713 case TARGET_NR_timerfd_settime64:
13715 struct itimerspec its_new, its_old, *p_new;
13717 if (arg3) {
13718 if (target_to_host_itimerspec64(&its_new, arg3)) {
13719 return -TARGET_EFAULT;
13721 p_new = &its_new;
13722 } else {
13723 p_new = NULL;
13726 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13728 if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13729 return -TARGET_EFAULT;
13732 return ret;
13733 #endif
13735 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13736 case TARGET_NR_ioprio_get:
13737 return get_errno(ioprio_get(arg1, arg2));
13738 #endif
13740 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13741 case TARGET_NR_ioprio_set:
13742 return get_errno(ioprio_set(arg1, arg2, arg3));
13743 #endif
13745 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13746 case TARGET_NR_setns:
13747 return get_errno(setns(arg1, arg2));
13748 #endif
13749 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13750 case TARGET_NR_unshare:
13751 return get_errno(unshare(arg1));
13752 #endif
13753 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13754 case TARGET_NR_kcmp:
13755 return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13756 #endif
13757 #ifdef TARGET_NR_swapcontext
13758 case TARGET_NR_swapcontext:
13759 /* PowerPC specific. */
13760 return do_swapcontext(cpu_env, arg1, arg2, arg3);
13761 #endif
13762 #ifdef TARGET_NR_memfd_create
13763 case TARGET_NR_memfd_create:
13764 p = lock_user_string(arg1);
13765 if (!p) {
13766 return -TARGET_EFAULT;
13768 ret = get_errno(memfd_create(p, arg2));
13769 fd_trans_unregister(ret);
13770 unlock_user(p, arg1, 0);
13771 return ret;
13772 #endif
13773 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13774 case TARGET_NR_membarrier:
13775 return get_errno(membarrier(arg1, arg2));
13776 #endif
13778 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13779 case TARGET_NR_copy_file_range:
13781 loff_t inoff, outoff;
13782 loff_t *pinoff = NULL, *poutoff = NULL;
13784 if (arg2) {
13785 if (get_user_u64(inoff, arg2)) {
13786 return -TARGET_EFAULT;
13788 pinoff = &inoff;
13790 if (arg4) {
13791 if (get_user_u64(outoff, arg4)) {
13792 return -TARGET_EFAULT;
13794 poutoff = &outoff;
13796 /* Do not sign-extend the count parameter. */
13797 ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13798 (abi_ulong)arg5, arg6));
13799 if (!is_error(ret) && ret > 0) {
13800 if (arg2) {
13801 if (put_user_u64(inoff, arg2)) {
13802 return -TARGET_EFAULT;
13805 if (arg4) {
13806 if (put_user_u64(outoff, arg4)) {
13807 return -TARGET_EFAULT;
13812 return ret;
13813 #endif
13815 #if defined(TARGET_NR_pivot_root)
13816 case TARGET_NR_pivot_root:
13818 void *p2;
13819 p = lock_user_string(arg1); /* new_root */
13820 p2 = lock_user_string(arg2); /* put_old */
13821 if (!p || !p2) {
13822 ret = -TARGET_EFAULT;
13823 } else {
13824 ret = get_errno(pivot_root(p, p2));
13826 unlock_user(p2, arg2, 0);
13827 unlock_user(p, arg1, 0);
13829 return ret;
13830 #endif
13832 #if defined(TARGET_NR_riscv_hwprobe)
13833 case TARGET_NR_riscv_hwprobe:
13834 return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13835 #endif
13837 default:
13838 qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13839 return -TARGET_ENOSYS;
13841 return ret;
13844 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13845 abi_long arg2, abi_long arg3, abi_long arg4,
13846 abi_long arg5, abi_long arg6, abi_long arg7,
13847 abi_long arg8)
13849 CPUState *cpu = env_cpu(cpu_env);
13850 abi_long ret;
13852 #ifdef DEBUG_ERESTARTSYS
13853 /* Debug-only code for exercising the syscall-restart code paths
13854 * in the per-architecture cpu main loops: restart every syscall
13855 * the guest makes once before letting it through.
13858 static bool flag;
13859 flag = !flag;
13860 if (flag) {
13861 return -QEMU_ERESTARTSYS;
13864 #endif
13866 record_syscall_start(cpu, num, arg1,
13867 arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13869 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13870 print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13873 ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13874 arg5, arg6, arg7, arg8);
13876 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13877 print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13878 arg3, arg4, arg5, arg6);
13881 record_syscall_return(cpu, num, ret);
13882 return ret;