4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
33 #include <sys/mount.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
40 #include <linux/capability.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
68 #include <sys/timerfd.h>
71 #include <sys/eventfd.h>
74 #include <sys/epoll.h>
77 #include "qemu/xattr.h"
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
82 #ifdef HAVE_SYS_KCOV_H
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
122 #include <linux/btrfs.h>
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
128 #include "linux_loop.h"
132 #include "user-internals.h"
134 #include "signal-common.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
148 #define CLONE_IO 0x80000000 /* Clone io context */
151 /* We can't directly call the host clone syscall, because this will
152 * badly confuse libc (breaking mutexes, for example). So we must
153 * divide clone flags into:
154 * * flag combinations that look like pthread_create()
155 * * flag combinations that look like fork()
156 * * flags we can implement within QEMU itself
157 * * flags we can't support and will return an error for
159 /* For thread creation, all these flags must be present; for
160 * fork, none must be present.
162 #define CLONE_THREAD_FLAGS \
163 (CLONE_VM | CLONE_FS | CLONE_FILES | \
164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
166 /* These flags are ignored:
167 * CLONE_DETACHED is now ignored by the kernel;
168 * CLONE_IO is just an optimisation hint to the I/O scheduler
170 #define CLONE_IGNORED_FLAGS \
171 (CLONE_DETACHED | CLONE_IO)
174 # define CLONE_PIDFD 0x00001000
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS \
179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS \
184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
187 #define CLONE_INVALID_FORK_FLAGS \
188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
190 #define CLONE_INVALID_THREAD_FLAGS \
191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
192 CLONE_IGNORED_FLAGS))
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195 * have almost all been allocated. We cannot support any of
196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198 * The checks against the invalid thread masks above will catch these.
199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203 * once. This exercises the codepaths for restart.
205 //#define DEBUG_ERESTARTSYS
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
221 #define _syscall0(type,name) \
222 static type name (void) \
224 return syscall(__NR_##name); \
227 #define _syscall1(type,name,type1,arg1) \
228 static type name (type1 arg1) \
230 return syscall(__NR_##name, arg1); \
233 #define _syscall2(type,name,type1,arg1,type2,arg2) \
234 static type name (type1 arg1,type2 arg2) \
236 return syscall(__NR_##name, arg1, arg2); \
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
240 static type name (type1 arg1,type2 arg2,type3 arg3) \
242 return syscall(__NR_##name, arg1, arg2, arg3); \
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
260 type5,arg5,type6,arg6) \
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
282 #define __NR_sys_statx __NR_statx
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid
)
301 /* For the 64-bit guest on 32-bit host case we must emulate
302 * getdents using getdents64, because otherwise the host
303 * might hand us back more dirent records than we can fit
304 * into the guest buffer after structure format conversion.
305 * Otherwise we emulate getdents with getdents if the host has it.
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
314 #if (defined(TARGET_NR_getdents) && \
315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
321 loff_t
*, res
, uint
, wh
);
323 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
324 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
326 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group
,int,error_code
)
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range
,int,first
,int,last
,int,flags
)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC (1U << 2)
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
339 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
343 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open
, pid_t
, pid
, unsigned int, flags
);
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal
, int, pidfd
, int, sig
, siginfo_t
*, info
,
350 unsigned int, flags
);
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd
, int, pidfd
, int, targetfd
, unsigned int, flags
);
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
357 unsigned long *, user_mask_ptr
);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
360 unsigned long *, user_mask_ptr
);
361 /* sched_attr is not defined in glibc */
364 uint32_t sched_policy
;
365 uint64_t sched_flags
;
367 uint32_t sched_priority
;
368 uint64_t sched_runtime
;
369 uint64_t sched_deadline
;
370 uint64_t sched_period
;
371 uint32_t sched_util_min
;
372 uint32_t sched_util_max
;
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
376 unsigned int, size
, unsigned int, flags
);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
379 unsigned int, flags
);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
384 const struct sched_param
*, param
);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
387 struct sched_param
*, param
);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
390 const struct sched_param
*, param
);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
393 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
395 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
396 struct __user_cap_data_struct
*, data
);
397 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
398 struct __user_cap_data_struct
*, data
);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get
, int, which
, int, who
)
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
411 unsigned long, idx1
, unsigned long, idx2
)
415 * It is assumed that struct statx is architecture independent.
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
419 unsigned int, mask
, struct target_statx
*, statxbuf
)
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier
, int, cmd
, int, flags
)
425 static const bitmask_transtbl fcntl_flags_tbl
[] = {
426 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
427 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
428 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
429 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
430 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
431 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
432 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
433 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
434 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
435 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
436 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
437 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
438 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
439 #if defined(O_DIRECT)
440 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
442 #if defined(O_NOATIME)
443 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
445 #if defined(O_CLOEXEC)
446 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
449 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
451 #if defined(O_TMPFILE)
452 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
454 /* Don't terminate the list prematurely on 64-bit host+guest. */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
461 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
467 const struct timespec
*,tsp
,int,flags
)
469 static int sys_utimensat(int dirfd
, const char *pathname
,
470 const struct timespec times
[2], int flags
)
476 #endif /* TARGET_NR_utimensat */
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
482 const char *, new, unsigned int, flags
)
484 static int sys_renameat2(int oldfd
, const char *old
,
485 int newfd
, const char *new, int flags
)
488 return renameat(oldfd
, old
, newfd
, new);
494 #endif /* TARGET_NR_renameat2 */
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64
{
516 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
517 const struct host_rlimit64
*, new_limit
,
518 struct host_rlimit64
*, old_limit
)
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers
[GUEST_TIMER_MAX
];
526 static int g_posix_timer_allocated
[GUEST_TIMER_MAX
];
528 static inline int next_free_host_timer(void)
531 for (k
= 0; k
< ARRAY_SIZE(g_posix_timer_allocated
); k
++) {
532 if (qatomic_xchg(g_posix_timer_allocated
+ k
, 1) == 0) {
539 static inline void free_host_timer_slot(int id
)
541 qatomic_store_release(g_posix_timer_allocated
+ id
, 0);
545 static inline int host_to_target_errno(int host_errno
)
547 switch (host_errno
) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
556 static inline int target_to_host_errno(int target_errno
)
558 switch (target_errno
) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
567 abi_long
get_errno(abi_long ret
)
570 return -host_to_target_errno(errno
);
575 const char *target_strerror(int err
)
577 if (err
== QEMU_ERESTARTSYS
) {
578 return "To be restarted";
580 if (err
== QEMU_ESIGRETURN
) {
581 return "Successful exit from sigreturn";
584 return strerror(target_to_host_errno(err
));
587 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
591 if (usize
<= ksize
) {
594 for (i
= ksize
; i
< usize
; i
++) {
595 if (get_user_u8(b
, addr
+ i
)) {
596 return -TARGET_EFAULT
;
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
608 return safe_syscall(__NR_##name); \
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
614 return safe_syscall(__NR_##name, arg1); \
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
620 return safe_syscall(__NR_##name, arg1, arg2); \
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637 type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645 type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647 type5 arg5, type6 arg6) \
649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
652 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
653 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
654 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
655 int, flags
, mode_t
, mode
)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
658 struct rusage
*, rusage
)
660 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
661 int, options
, struct rusage
*, rusage
)
662 safe_syscall5(int, execveat
, int, dirfd
, const char *, filename
,
663 char **, argv
, char **, envp
, int, flags
)
664 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
665 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
666 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
667 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
669 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
670 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
671 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
674 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
675 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
677 #if defined(__NR_futex)
678 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
679 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
681 #if defined(__NR_futex_time64)
682 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
683 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
685 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
686 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
687 safe_syscall2(int, tkill
, int, tid
, int, sig
)
688 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
689 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
690 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
691 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
692 unsigned long, pos_l
, unsigned long, pos_h
)
693 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
694 unsigned long, pos_l
, unsigned long, pos_h
)
695 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
697 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
698 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
699 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
700 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
701 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
702 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
703 safe_syscall2(int, flock
, int, fd
, int, operation
)
704 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
705 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
706 const struct timespec
*, uts
, size_t, sigsetsize
)
708 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
710 #if defined(TARGET_NR_nanosleep)
711 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
712 struct timespec
*, rem
)
714 #if defined(TARGET_NR_clock_nanosleep) || \
715 defined(TARGET_NR_clock_nanosleep_time64)
716 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
717 const struct timespec
*, req
, struct timespec
*, rem
)
721 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
724 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
725 void *, ptr
, long, fifth
)
729 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
733 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
734 long, msgtype
, int, flags
)
736 #ifdef __NR_semtimedop
737 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
738 unsigned, nsops
, const struct timespec
*, timeout
)
740 #if defined(TARGET_NR_mq_timedsend) || \
741 defined(TARGET_NR_mq_timedsend_time64)
742 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
743 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
745 #if defined(TARGET_NR_mq_timedreceive) || \
746 defined(TARGET_NR_mq_timedreceive_time64)
747 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
748 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
750 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
751 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
752 int, outfd
, loff_t
*, poutoff
, size_t, length
,
756 /* We do ioctl like this rather than via safe_syscall3 to preserve the
757 * "third argument might be integer or pointer or not present" behaviour of
760 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
761 /* Similarly for fcntl. Note that callers must always:
762 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
763 * use the flock64 struct rather than unsuffixed flock
764 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
767 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
772 static inline int host_to_target_sock_type(int host_type
)
776 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
778 target_type
= TARGET_SOCK_DGRAM
;
781 target_type
= TARGET_SOCK_STREAM
;
784 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
788 #if defined(SOCK_CLOEXEC)
789 if (host_type
& SOCK_CLOEXEC
) {
790 target_type
|= TARGET_SOCK_CLOEXEC
;
794 #if defined(SOCK_NONBLOCK)
795 if (host_type
& SOCK_NONBLOCK
) {
796 target_type
|= TARGET_SOCK_NONBLOCK
;
803 static abi_ulong target_brk
;
804 static abi_ulong brk_page
;
806 void target_set_brk(abi_ulong new_brk
)
808 target_brk
= new_brk
;
809 brk_page
= HOST_PAGE_ALIGN(target_brk
);
812 /* do_brk() must return target values and target errnos. */
813 abi_long
do_brk(abi_ulong brk_val
)
815 abi_long mapped_addr
;
816 abi_ulong new_alloc_size
;
817 abi_ulong new_brk
, new_host_brk_page
;
819 /* brk pointers are always untagged */
821 /* return old brk value if brk_val unchanged or zero */
822 if (!brk_val
|| brk_val
== target_brk
) {
826 new_brk
= TARGET_PAGE_ALIGN(brk_val
);
827 new_host_brk_page
= HOST_PAGE_ALIGN(brk_val
);
829 /* brk_val and old target_brk might be on the same page */
830 if (new_brk
== TARGET_PAGE_ALIGN(target_brk
)) {
831 if (brk_val
> target_brk
) {
832 /* empty remaining bytes in (possibly larger) host page */
833 memset(g2h_untagged(target_brk
), 0, new_host_brk_page
- target_brk
);
835 target_brk
= brk_val
;
839 /* Release heap if necesary */
840 if (new_brk
< target_brk
) {
841 /* empty remaining bytes in (possibly larger) host page */
842 memset(g2h_untagged(brk_val
), 0, new_host_brk_page
- brk_val
);
844 /* free unused host pages and set new brk_page */
845 target_munmap(new_host_brk_page
, brk_page
- new_host_brk_page
);
846 brk_page
= new_host_brk_page
;
848 target_brk
= brk_val
;
852 /* We need to allocate more memory after the brk... Note that
853 * we don't use MAP_FIXED because that will map over the top of
854 * any existing mapping (like the one with the host libc or qemu
855 * itself); instead we treat "mapped but at wrong address" as
856 * a failure and unmap again.
858 new_alloc_size
= new_host_brk_page
- brk_page
;
859 if (new_alloc_size
) {
860 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
861 PROT_READ
|PROT_WRITE
,
862 MAP_ANON
|MAP_PRIVATE
, 0, 0));
864 mapped_addr
= brk_page
;
867 if (mapped_addr
== brk_page
) {
868 /* Heap contents are initialized to zero, as for anonymous
869 * mapped pages. Technically the new pages are already
870 * initialized to zero since they *are* anonymous mapped
871 * pages, however we have to take care with the contents that
872 * come from the remaining part of the previous page: it may
873 * contains garbage data due to a previous heap usage (grown
875 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
877 target_brk
= brk_val
;
878 brk_page
= new_host_brk_page
;
880 } else if (mapped_addr
!= -1) {
881 /* Mapped but at wrong address, meaning there wasn't actually
882 * enough space for this brk.
884 target_munmap(mapped_addr
, new_alloc_size
);
888 #if defined(TARGET_ALPHA)
889 /* We (partially) emulate OSF/1 on Alpha, which requires we
890 return a proper errno, not an unchanged brk value. */
891 return -TARGET_ENOMEM
;
893 /* For everything else, return the previous break. */
897 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
898 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
899 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
900 abi_ulong target_fds_addr
,
904 abi_ulong b
, *target_fds
;
906 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
907 if (!(target_fds
= lock_user(VERIFY_READ
,
909 sizeof(abi_ulong
) * nw
,
911 return -TARGET_EFAULT
;
915 for (i
= 0; i
< nw
; i
++) {
916 /* grab the abi_ulong */
917 __get_user(b
, &target_fds
[i
]);
918 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
919 /* check the bit inside the abi_ulong */
926 unlock_user(target_fds
, target_fds_addr
, 0);
931 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
932 abi_ulong target_fds_addr
,
935 if (target_fds_addr
) {
936 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
937 return -TARGET_EFAULT
;
945 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
951 abi_ulong
*target_fds
;
953 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
954 if (!(target_fds
= lock_user(VERIFY_WRITE
,
956 sizeof(abi_ulong
) * nw
,
958 return -TARGET_EFAULT
;
961 for (i
= 0; i
< nw
; i
++) {
963 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
964 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
967 __put_user(v
, &target_fds
[i
]);
970 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
976 #if defined(__alpha__)
982 static inline abi_long
host_to_target_clock_t(long ticks
)
984 #if HOST_HZ == TARGET_HZ
987 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
991 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
992 const struct rusage
*rusage
)
994 struct target_rusage
*target_rusage
;
996 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
997 return -TARGET_EFAULT
;
998 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
999 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1000 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1001 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1002 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1003 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1004 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1005 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1006 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1007 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1008 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1009 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1010 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1011 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1012 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1013 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1014 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1015 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1016 unlock_user_struct(target_rusage
, target_addr
, 1);
1021 #ifdef TARGET_NR_setrlimit
1022 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1024 abi_ulong target_rlim_swap
;
1027 target_rlim_swap
= tswapal(target_rlim
);
1028 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1029 return RLIM_INFINITY
;
1031 result
= target_rlim_swap
;
1032 if (target_rlim_swap
!= (rlim_t
)result
)
1033 return RLIM_INFINITY
;
1039 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1040 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1042 abi_ulong target_rlim_swap
;
1045 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1046 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1048 target_rlim_swap
= rlim
;
1049 result
= tswapal(target_rlim_swap
);
1055 static inline int target_to_host_resource(int code
)
1058 case TARGET_RLIMIT_AS
:
1060 case TARGET_RLIMIT_CORE
:
1062 case TARGET_RLIMIT_CPU
:
1064 case TARGET_RLIMIT_DATA
:
1066 case TARGET_RLIMIT_FSIZE
:
1067 return RLIMIT_FSIZE
;
1068 case TARGET_RLIMIT_LOCKS
:
1069 return RLIMIT_LOCKS
;
1070 case TARGET_RLIMIT_MEMLOCK
:
1071 return RLIMIT_MEMLOCK
;
1072 case TARGET_RLIMIT_MSGQUEUE
:
1073 return RLIMIT_MSGQUEUE
;
1074 case TARGET_RLIMIT_NICE
:
1076 case TARGET_RLIMIT_NOFILE
:
1077 return RLIMIT_NOFILE
;
1078 case TARGET_RLIMIT_NPROC
:
1079 return RLIMIT_NPROC
;
1080 case TARGET_RLIMIT_RSS
:
1082 case TARGET_RLIMIT_RTPRIO
:
1083 return RLIMIT_RTPRIO
;
1084 #ifdef RLIMIT_RTTIME
1085 case TARGET_RLIMIT_RTTIME
:
1086 return RLIMIT_RTTIME
;
1088 case TARGET_RLIMIT_SIGPENDING
:
1089 return RLIMIT_SIGPENDING
;
1090 case TARGET_RLIMIT_STACK
:
1091 return RLIMIT_STACK
;
1097 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1098 abi_ulong target_tv_addr
)
1100 struct target_timeval
*target_tv
;
1102 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1103 return -TARGET_EFAULT
;
1106 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1107 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1109 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1114 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1115 const struct timeval
*tv
)
1117 struct target_timeval
*target_tv
;
1119 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1120 return -TARGET_EFAULT
;
1123 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1124 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1126 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1131 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1132 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1133 abi_ulong target_tv_addr
)
1135 struct target__kernel_sock_timeval
*target_tv
;
1137 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1138 return -TARGET_EFAULT
;
1141 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1142 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1144 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1150 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1151 const struct timeval
*tv
)
1153 struct target__kernel_sock_timeval
*target_tv
;
1155 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1156 return -TARGET_EFAULT
;
1159 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1160 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1162 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1167 #if defined(TARGET_NR_futex) || \
1168 defined(TARGET_NR_rt_sigtimedwait) || \
1169 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1170 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1171 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1172 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1173 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1174 defined(TARGET_NR_timer_settime) || \
1175 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1176 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1177 abi_ulong target_addr
)
1179 struct target_timespec
*target_ts
;
1181 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1182 return -TARGET_EFAULT
;
1184 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1185 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1186 unlock_user_struct(target_ts
, target_addr
, 0);
1191 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1192 defined(TARGET_NR_timer_settime64) || \
1193 defined(TARGET_NR_mq_timedsend_time64) || \
1194 defined(TARGET_NR_mq_timedreceive_time64) || \
1195 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1196 defined(TARGET_NR_clock_nanosleep_time64) || \
1197 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1198 defined(TARGET_NR_utimensat) || \
1199 defined(TARGET_NR_utimensat_time64) || \
1200 defined(TARGET_NR_semtimedop_time64) || \
1201 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1202 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1203 abi_ulong target_addr
)
1205 struct target__kernel_timespec
*target_ts
;
1207 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1208 return -TARGET_EFAULT
;
1210 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1211 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1212 /* in 32bit mode, this drops the padding */
1213 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1214 unlock_user_struct(target_ts
, target_addr
, 0);
1219 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1220 struct timespec
*host_ts
)
1222 struct target_timespec
*target_ts
;
1224 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1225 return -TARGET_EFAULT
;
1227 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1228 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1229 unlock_user_struct(target_ts
, target_addr
, 1);
1233 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1234 struct timespec
*host_ts
)
1236 struct target__kernel_timespec
*target_ts
;
1238 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1239 return -TARGET_EFAULT
;
1241 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1242 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1243 unlock_user_struct(target_ts
, target_addr
, 1);
1247 #if defined(TARGET_NR_gettimeofday)
1248 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1249 struct timezone
*tz
)
1251 struct target_timezone
*target_tz
;
1253 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1254 return -TARGET_EFAULT
;
1257 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1258 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1260 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1266 #if defined(TARGET_NR_settimeofday)
1267 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1268 abi_ulong target_tz_addr
)
1270 struct target_timezone
*target_tz
;
1272 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1273 return -TARGET_EFAULT
;
1276 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1277 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1279 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1285 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1288 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1289 abi_ulong target_mq_attr_addr
)
1291 struct target_mq_attr
*target_mq_attr
;
1293 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1294 target_mq_attr_addr
, 1))
1295 return -TARGET_EFAULT
;
1297 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1298 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1299 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1300 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1302 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1307 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1308 const struct mq_attr
*attr
)
1310 struct target_mq_attr
*target_mq_attr
;
1312 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1313 target_mq_attr_addr
, 0))
1314 return -TARGET_EFAULT
;
1316 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1317 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1318 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1319 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1321 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1327 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1328 /* do_select() must return target values and target errnos. */
1329 static abi_long
do_select(int n
,
1330 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1331 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1333 fd_set rfds
, wfds
, efds
;
1334 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1336 struct timespec ts
, *ts_ptr
;
1339 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1343 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1347 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1352 if (target_tv_addr
) {
1353 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1354 return -TARGET_EFAULT
;
1355 ts
.tv_sec
= tv
.tv_sec
;
1356 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1362 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1365 if (!is_error(ret
)) {
1366 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1367 return -TARGET_EFAULT
;
1368 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1369 return -TARGET_EFAULT
;
1370 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1371 return -TARGET_EFAULT
;
1373 if (target_tv_addr
) {
1374 tv
.tv_sec
= ts
.tv_sec
;
1375 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1376 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1377 return -TARGET_EFAULT
;
1385 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1386 static abi_long
do_old_select(abi_ulong arg1
)
1388 struct target_sel_arg_struct
*sel
;
1389 abi_ulong inp
, outp
, exp
, tvp
;
1392 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1393 return -TARGET_EFAULT
;
1396 nsel
= tswapal(sel
->n
);
1397 inp
= tswapal(sel
->inp
);
1398 outp
= tswapal(sel
->outp
);
1399 exp
= tswapal(sel
->exp
);
1400 tvp
= tswapal(sel
->tvp
);
1402 unlock_user_struct(sel
, arg1
, 0);
1404 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1409 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1410 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1411 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1414 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1415 fd_set rfds
, wfds
, efds
;
1416 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1417 struct timespec ts
, *ts_ptr
;
1421 * The 6th arg is actually two args smashed together,
1422 * so we cannot use the C library.
1429 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1437 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1441 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1445 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1451 * This takes a timespec, and not a timeval, so we cannot
1452 * use the do_select() helper ...
1456 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1457 return -TARGET_EFAULT
;
1460 if (target_to_host_timespec(&ts
, ts_addr
)) {
1461 return -TARGET_EFAULT
;
1469 /* Extract the two packed args for the sigset */
1472 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1474 return -TARGET_EFAULT
;
1476 arg_sigset
= tswapal(arg7
[0]);
1477 arg_sigsize
= tswapal(arg7
[1]);
1478 unlock_user(arg7
, arg6
, 0);
1481 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1486 sig
.size
= SIGSET_T_SIZE
;
1490 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1494 finish_sigsuspend_mask(ret
);
1497 if (!is_error(ret
)) {
1498 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1499 return -TARGET_EFAULT
;
1501 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1502 return -TARGET_EFAULT
;
1504 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1505 return -TARGET_EFAULT
;
1508 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1509 return -TARGET_EFAULT
;
1512 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1513 return -TARGET_EFAULT
;
1521 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1522 defined(TARGET_NR_ppoll_time64)
1523 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1524 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1526 struct target_pollfd
*target_pfd
;
1527 unsigned int nfds
= arg2
;
1535 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1536 return -TARGET_EINVAL
;
1538 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1539 sizeof(struct target_pollfd
) * nfds
, 1);
1541 return -TARGET_EFAULT
;
1544 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1545 for (i
= 0; i
< nfds
; i
++) {
1546 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1547 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1551 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1552 sigset_t
*set
= NULL
;
1556 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1557 unlock_user(target_pfd
, arg1
, 0);
1558 return -TARGET_EFAULT
;
1561 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1562 unlock_user(target_pfd
, arg1
, 0);
1563 return -TARGET_EFAULT
;
1571 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1573 unlock_user(target_pfd
, arg1
, 0);
1578 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1579 set
, SIGSET_T_SIZE
));
1582 finish_sigsuspend_mask(ret
);
1584 if (!is_error(ret
) && arg3
) {
1586 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1587 return -TARGET_EFAULT
;
1590 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1591 return -TARGET_EFAULT
;
1596 struct timespec ts
, *pts
;
1599 /* Convert ms to secs, ns */
1600 ts
.tv_sec
= arg3
/ 1000;
1601 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1604 /* -ve poll() timeout means "infinite" */
1607 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1610 if (!is_error(ret
)) {
1611 for (i
= 0; i
< nfds
; i
++) {
1612 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1615 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1620 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1621 int flags
, int is_pipe2
)
1625 ret
= pipe2(host_pipe
, flags
);
1628 return get_errno(ret
);
1630 /* Several targets have special calling conventions for the original
1631 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1633 #if defined(TARGET_ALPHA)
1634 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1635 return host_pipe
[0];
1636 #elif defined(TARGET_MIPS)
1637 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1638 return host_pipe
[0];
1639 #elif defined(TARGET_SH4)
1640 cpu_env
->gregs
[1] = host_pipe
[1];
1641 return host_pipe
[0];
1642 #elif defined(TARGET_SPARC)
1643 cpu_env
->regwptr
[1] = host_pipe
[1];
1644 return host_pipe
[0];
1648 if (put_user_s32(host_pipe
[0], pipedes
)
1649 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(abi_int
)))
1650 return -TARGET_EFAULT
;
1651 return get_errno(ret
);
1654 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1655 abi_ulong target_addr
,
1658 struct target_ip_mreqn
*target_smreqn
;
1660 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1662 return -TARGET_EFAULT
;
1663 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1664 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1665 if (len
== sizeof(struct target_ip_mreqn
))
1666 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1667 unlock_user(target_smreqn
, target_addr
, 0);
1672 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1673 abi_ulong target_addr
,
1676 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1677 sa_family_t sa_family
;
1678 struct target_sockaddr
*target_saddr
;
1680 if (fd_trans_target_to_host_addr(fd
)) {
1681 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1684 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1686 return -TARGET_EFAULT
;
1688 sa_family
= tswap16(target_saddr
->sa_family
);
1690 /* Oops. The caller might send a incomplete sun_path; sun_path
1691 * must be terminated by \0 (see the manual page), but
1692 * unfortunately it is quite common to specify sockaddr_un
1693 * length as "strlen(x->sun_path)" while it should be
1694 * "strlen(...) + 1". We'll fix that here if needed.
1695 * Linux kernel has a similar feature.
1698 if (sa_family
== AF_UNIX
) {
1699 if (len
< unix_maxlen
&& len
> 0) {
1700 char *cp
= (char*)target_saddr
;
1702 if ( cp
[len
-1] && !cp
[len
] )
1705 if (len
> unix_maxlen
)
1709 memcpy(addr
, target_saddr
, len
);
1710 addr
->sa_family
= sa_family
;
1711 if (sa_family
== AF_NETLINK
) {
1712 struct sockaddr_nl
*nladdr
;
1714 nladdr
= (struct sockaddr_nl
*)addr
;
1715 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1716 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1717 } else if (sa_family
== AF_PACKET
) {
1718 struct target_sockaddr_ll
*lladdr
;
1720 lladdr
= (struct target_sockaddr_ll
*)addr
;
1721 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1722 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1723 } else if (sa_family
== AF_INET6
) {
1724 struct sockaddr_in6
*in6addr
;
1726 in6addr
= (struct sockaddr_in6
*)addr
;
1727 in6addr
->sin6_scope_id
= tswap32(in6addr
->sin6_scope_id
);
1729 unlock_user(target_saddr
, target_addr
, 0);
1734 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1735 struct sockaddr
*addr
,
1738 struct target_sockaddr
*target_saddr
;
1745 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1747 return -TARGET_EFAULT
;
1748 memcpy(target_saddr
, addr
, len
);
1749 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1750 sizeof(target_saddr
->sa_family
)) {
1751 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1753 if (addr
->sa_family
== AF_NETLINK
&&
1754 len
>= sizeof(struct target_sockaddr_nl
)) {
1755 struct target_sockaddr_nl
*target_nl
=
1756 (struct target_sockaddr_nl
*)target_saddr
;
1757 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1758 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1759 } else if (addr
->sa_family
== AF_PACKET
) {
1760 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1761 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1762 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1763 } else if (addr
->sa_family
== AF_INET6
&&
1764 len
>= sizeof(struct target_sockaddr_in6
)) {
1765 struct target_sockaddr_in6
*target_in6
=
1766 (struct target_sockaddr_in6
*)target_saddr
;
1767 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1769 unlock_user(target_saddr
, target_addr
, len
);
1774 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1775 struct target_msghdr
*target_msgh
)
1777 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1778 abi_long msg_controllen
;
1779 abi_ulong target_cmsg_addr
;
1780 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1781 socklen_t space
= 0;
1783 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1784 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1786 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1787 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1788 target_cmsg_start
= target_cmsg
;
1790 return -TARGET_EFAULT
;
1792 while (cmsg
&& target_cmsg
) {
1793 void *data
= CMSG_DATA(cmsg
);
1794 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1796 int len
= tswapal(target_cmsg
->cmsg_len
)
1797 - sizeof(struct target_cmsghdr
);
1799 space
+= CMSG_SPACE(len
);
1800 if (space
> msgh
->msg_controllen
) {
1801 space
-= CMSG_SPACE(len
);
1802 /* This is a QEMU bug, since we allocated the payload
1803 * area ourselves (unlike overflow in host-to-target
1804 * conversion, which is just the guest giving us a buffer
1805 * that's too small). It can't happen for the payload types
1806 * we currently support; if it becomes an issue in future
1807 * we would need to improve our allocation strategy to
1808 * something more intelligent than "twice the size of the
1809 * target buffer we're reading from".
1811 qemu_log_mask(LOG_UNIMP
,
1812 ("Unsupported ancillary data %d/%d: "
1813 "unhandled msg size\n"),
1814 tswap32(target_cmsg
->cmsg_level
),
1815 tswap32(target_cmsg
->cmsg_type
));
1819 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1820 cmsg
->cmsg_level
= SOL_SOCKET
;
1822 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1824 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1825 cmsg
->cmsg_len
= CMSG_LEN(len
);
1827 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1828 int *fd
= (int *)data
;
1829 int *target_fd
= (int *)target_data
;
1830 int i
, numfds
= len
/ sizeof(int);
1832 for (i
= 0; i
< numfds
; i
++) {
1833 __get_user(fd
[i
], target_fd
+ i
);
1835 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1836 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1837 struct ucred
*cred
= (struct ucred
*)data
;
1838 struct target_ucred
*target_cred
=
1839 (struct target_ucred
*)target_data
;
1841 __get_user(cred
->pid
, &target_cred
->pid
);
1842 __get_user(cred
->uid
, &target_cred
->uid
);
1843 __get_user(cred
->gid
, &target_cred
->gid
);
1844 } else if (cmsg
->cmsg_level
== SOL_ALG
) {
1845 uint32_t *dst
= (uint32_t *)data
;
1847 memcpy(dst
, target_data
, len
);
1848 /* fix endianess of first 32-bit word */
1849 if (len
>= sizeof(uint32_t)) {
1850 *dst
= tswap32(*dst
);
1853 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1854 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1855 memcpy(data
, target_data
, len
);
1858 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1859 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1862 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1864 msgh
->msg_controllen
= space
;
1868 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1869 struct msghdr
*msgh
)
1871 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1872 abi_long msg_controllen
;
1873 abi_ulong target_cmsg_addr
;
1874 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1875 socklen_t space
= 0;
1877 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1878 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1880 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1881 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1882 target_cmsg_start
= target_cmsg
;
1884 return -TARGET_EFAULT
;
1886 while (cmsg
&& target_cmsg
) {
1887 void *data
= CMSG_DATA(cmsg
);
1888 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1890 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1891 int tgt_len
, tgt_space
;
1893 /* We never copy a half-header but may copy half-data;
1894 * this is Linux's behaviour in put_cmsg(). Note that
1895 * truncation here is a guest problem (which we report
1896 * to the guest via the CTRUNC bit), unlike truncation
1897 * in target_to_host_cmsg, which is a QEMU bug.
1899 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1900 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1904 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1905 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1907 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1909 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1911 /* Payload types which need a different size of payload on
1912 * the target must adjust tgt_len here.
1915 switch (cmsg
->cmsg_level
) {
1917 switch (cmsg
->cmsg_type
) {
1919 tgt_len
= sizeof(struct target_timeval
);
1929 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1930 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1931 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1934 /* We must now copy-and-convert len bytes of payload
1935 * into tgt_len bytes of destination space. Bear in mind
1936 * that in both source and destination we may be dealing
1937 * with a truncated value!
1939 switch (cmsg
->cmsg_level
) {
1941 switch (cmsg
->cmsg_type
) {
1944 int *fd
= (int *)data
;
1945 int *target_fd
= (int *)target_data
;
1946 int i
, numfds
= tgt_len
/ sizeof(int);
1948 for (i
= 0; i
< numfds
; i
++) {
1949 __put_user(fd
[i
], target_fd
+ i
);
1955 struct timeval
*tv
= (struct timeval
*)data
;
1956 struct target_timeval
*target_tv
=
1957 (struct target_timeval
*)target_data
;
1959 if (len
!= sizeof(struct timeval
) ||
1960 tgt_len
!= sizeof(struct target_timeval
)) {
1964 /* copy struct timeval to target */
1965 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1966 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1969 case SCM_CREDENTIALS
:
1971 struct ucred
*cred
= (struct ucred
*)data
;
1972 struct target_ucred
*target_cred
=
1973 (struct target_ucred
*)target_data
;
1975 __put_user(cred
->pid
, &target_cred
->pid
);
1976 __put_user(cred
->uid
, &target_cred
->uid
);
1977 __put_user(cred
->gid
, &target_cred
->gid
);
1986 switch (cmsg
->cmsg_type
) {
1989 uint32_t *v
= (uint32_t *)data
;
1990 uint32_t *t_int
= (uint32_t *)target_data
;
1992 if (len
!= sizeof(uint32_t) ||
1993 tgt_len
!= sizeof(uint32_t)) {
1996 __put_user(*v
, t_int
);
2002 struct sock_extended_err ee
;
2003 struct sockaddr_in offender
;
2005 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2006 struct errhdr_t
*target_errh
=
2007 (struct errhdr_t
*)target_data
;
2009 if (len
!= sizeof(struct errhdr_t
) ||
2010 tgt_len
!= sizeof(struct errhdr_t
)) {
2013 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2014 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2015 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2016 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2017 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2018 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2019 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2020 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2021 (void *) &errh
->offender
, sizeof(errh
->offender
));
2030 switch (cmsg
->cmsg_type
) {
2033 uint32_t *v
= (uint32_t *)data
;
2034 uint32_t *t_int
= (uint32_t *)target_data
;
2036 if (len
!= sizeof(uint32_t) ||
2037 tgt_len
!= sizeof(uint32_t)) {
2040 __put_user(*v
, t_int
);
2046 struct sock_extended_err ee
;
2047 struct sockaddr_in6 offender
;
2049 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2050 struct errhdr6_t
*target_errh
=
2051 (struct errhdr6_t
*)target_data
;
2053 if (len
!= sizeof(struct errhdr6_t
) ||
2054 tgt_len
!= sizeof(struct errhdr6_t
)) {
2057 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2058 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2059 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2060 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2061 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2062 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2063 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2064 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2065 (void *) &errh
->offender
, sizeof(errh
->offender
));
2075 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2076 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2077 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2078 if (tgt_len
> len
) {
2079 memset(target_data
+ len
, 0, tgt_len
- len
);
2083 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2084 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2085 if (msg_controllen
< tgt_space
) {
2086 tgt_space
= msg_controllen
;
2088 msg_controllen
-= tgt_space
;
2090 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2091 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2094 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2096 target_msgh
->msg_controllen
= tswapal(space
);
2100 /* do_setsockopt() Must return target values and target errnos. */
2101 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2102 abi_ulong optval_addr
, socklen_t optlen
)
2106 struct ip_mreqn
*ip_mreq
;
2107 struct ip_mreq_source
*ip_mreq_source
;
2112 /* TCP and UDP options all take an 'int' value. */
2113 if (optlen
< sizeof(uint32_t))
2114 return -TARGET_EINVAL
;
2116 if (get_user_u32(val
, optval_addr
))
2117 return -TARGET_EFAULT
;
2118 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2125 case IP_ROUTER_ALERT
:
2129 case IP_MTU_DISCOVER
:
2136 case IP_MULTICAST_TTL
:
2137 case IP_MULTICAST_LOOP
:
2139 if (optlen
>= sizeof(uint32_t)) {
2140 if (get_user_u32(val
, optval_addr
))
2141 return -TARGET_EFAULT
;
2142 } else if (optlen
>= 1) {
2143 if (get_user_u8(val
, optval_addr
))
2144 return -TARGET_EFAULT
;
2146 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2148 case IP_ADD_MEMBERSHIP
:
2149 case IP_DROP_MEMBERSHIP
:
2150 if (optlen
< sizeof (struct target_ip_mreq
) ||
2151 optlen
> sizeof (struct target_ip_mreqn
))
2152 return -TARGET_EINVAL
;
2154 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2155 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2156 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2159 case IP_BLOCK_SOURCE
:
2160 case IP_UNBLOCK_SOURCE
:
2161 case IP_ADD_SOURCE_MEMBERSHIP
:
2162 case IP_DROP_SOURCE_MEMBERSHIP
:
2163 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2164 return -TARGET_EINVAL
;
2166 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2167 if (!ip_mreq_source
) {
2168 return -TARGET_EFAULT
;
2170 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2171 unlock_user (ip_mreq_source
, optval_addr
, 0);
2180 case IPV6_MTU_DISCOVER
:
2183 case IPV6_RECVPKTINFO
:
2184 case IPV6_UNICAST_HOPS
:
2185 case IPV6_MULTICAST_HOPS
:
2186 case IPV6_MULTICAST_LOOP
:
2188 case IPV6_RECVHOPLIMIT
:
2189 case IPV6_2292HOPLIMIT
:
2192 case IPV6_2292PKTINFO
:
2193 case IPV6_RECVTCLASS
:
2194 case IPV6_RECVRTHDR
:
2195 case IPV6_2292RTHDR
:
2196 case IPV6_RECVHOPOPTS
:
2197 case IPV6_2292HOPOPTS
:
2198 case IPV6_RECVDSTOPTS
:
2199 case IPV6_2292DSTOPTS
:
2201 case IPV6_ADDR_PREFERENCES
:
2202 #ifdef IPV6_RECVPATHMTU
2203 case IPV6_RECVPATHMTU
:
2205 #ifdef IPV6_TRANSPARENT
2206 case IPV6_TRANSPARENT
:
2208 #ifdef IPV6_FREEBIND
2211 #ifdef IPV6_RECVORIGDSTADDR
2212 case IPV6_RECVORIGDSTADDR
:
2215 if (optlen
< sizeof(uint32_t)) {
2216 return -TARGET_EINVAL
;
2218 if (get_user_u32(val
, optval_addr
)) {
2219 return -TARGET_EFAULT
;
2221 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2222 &val
, sizeof(val
)));
2226 struct in6_pktinfo pki
;
2228 if (optlen
< sizeof(pki
)) {
2229 return -TARGET_EINVAL
;
2232 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2233 return -TARGET_EFAULT
;
2236 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2238 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2239 &pki
, sizeof(pki
)));
2242 case IPV6_ADD_MEMBERSHIP
:
2243 case IPV6_DROP_MEMBERSHIP
:
2245 struct ipv6_mreq ipv6mreq
;
2247 if (optlen
< sizeof(ipv6mreq
)) {
2248 return -TARGET_EINVAL
;
2251 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2252 return -TARGET_EFAULT
;
2255 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2257 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2258 &ipv6mreq
, sizeof(ipv6mreq
)));
2269 struct icmp6_filter icmp6f
;
2271 if (optlen
> sizeof(icmp6f
)) {
2272 optlen
= sizeof(icmp6f
);
2275 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2276 return -TARGET_EFAULT
;
2279 for (val
= 0; val
< 8; val
++) {
2280 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2283 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2295 /* those take an u32 value */
2296 if (optlen
< sizeof(uint32_t)) {
2297 return -TARGET_EINVAL
;
2300 if (get_user_u32(val
, optval_addr
)) {
2301 return -TARGET_EFAULT
;
2303 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2304 &val
, sizeof(val
)));
2311 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2316 char *alg_key
= g_malloc(optlen
);
2319 return -TARGET_ENOMEM
;
2321 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2323 return -TARGET_EFAULT
;
2325 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2330 case ALG_SET_AEAD_AUTHSIZE
:
2332 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2341 case TARGET_SOL_SOCKET
:
2343 case TARGET_SO_RCVTIMEO
:
2347 optname
= SO_RCVTIMEO
;
2350 if (optlen
!= sizeof(struct target_timeval
)) {
2351 return -TARGET_EINVAL
;
2354 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2355 return -TARGET_EFAULT
;
2358 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2362 case TARGET_SO_SNDTIMEO
:
2363 optname
= SO_SNDTIMEO
;
2365 case TARGET_SO_ATTACH_FILTER
:
2367 struct target_sock_fprog
*tfprog
;
2368 struct target_sock_filter
*tfilter
;
2369 struct sock_fprog fprog
;
2370 struct sock_filter
*filter
;
2373 if (optlen
!= sizeof(*tfprog
)) {
2374 return -TARGET_EINVAL
;
2376 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2377 return -TARGET_EFAULT
;
2379 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2380 tswapal(tfprog
->filter
), 0)) {
2381 unlock_user_struct(tfprog
, optval_addr
, 1);
2382 return -TARGET_EFAULT
;
2385 fprog
.len
= tswap16(tfprog
->len
);
2386 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2387 if (filter
== NULL
) {
2388 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2389 unlock_user_struct(tfprog
, optval_addr
, 1);
2390 return -TARGET_ENOMEM
;
2392 for (i
= 0; i
< fprog
.len
; i
++) {
2393 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2394 filter
[i
].jt
= tfilter
[i
].jt
;
2395 filter
[i
].jf
= tfilter
[i
].jf
;
2396 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2398 fprog
.filter
= filter
;
2400 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2401 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2404 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2405 unlock_user_struct(tfprog
, optval_addr
, 1);
2408 case TARGET_SO_BINDTODEVICE
:
2410 char *dev_ifname
, *addr_ifname
;
2412 if (optlen
> IFNAMSIZ
- 1) {
2413 optlen
= IFNAMSIZ
- 1;
2415 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2417 return -TARGET_EFAULT
;
2419 optname
= SO_BINDTODEVICE
;
2420 addr_ifname
= alloca(IFNAMSIZ
);
2421 memcpy(addr_ifname
, dev_ifname
, optlen
);
2422 addr_ifname
[optlen
] = 0;
2423 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2424 addr_ifname
, optlen
));
2425 unlock_user (dev_ifname
, optval_addr
, 0);
2428 case TARGET_SO_LINGER
:
2431 struct target_linger
*tlg
;
2433 if (optlen
!= sizeof(struct target_linger
)) {
2434 return -TARGET_EINVAL
;
2436 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2437 return -TARGET_EFAULT
;
2439 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2440 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2441 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2443 unlock_user_struct(tlg
, optval_addr
, 0);
2446 /* Options with 'int' argument. */
2447 case TARGET_SO_DEBUG
:
2450 case TARGET_SO_REUSEADDR
:
2451 optname
= SO_REUSEADDR
;
2454 case TARGET_SO_REUSEPORT
:
2455 optname
= SO_REUSEPORT
;
2458 case TARGET_SO_TYPE
:
2461 case TARGET_SO_ERROR
:
2464 case TARGET_SO_DONTROUTE
:
2465 optname
= SO_DONTROUTE
;
2467 case TARGET_SO_BROADCAST
:
2468 optname
= SO_BROADCAST
;
2470 case TARGET_SO_SNDBUF
:
2471 optname
= SO_SNDBUF
;
2473 case TARGET_SO_SNDBUFFORCE
:
2474 optname
= SO_SNDBUFFORCE
;
2476 case TARGET_SO_RCVBUF
:
2477 optname
= SO_RCVBUF
;
2479 case TARGET_SO_RCVBUFFORCE
:
2480 optname
= SO_RCVBUFFORCE
;
2482 case TARGET_SO_KEEPALIVE
:
2483 optname
= SO_KEEPALIVE
;
2485 case TARGET_SO_OOBINLINE
:
2486 optname
= SO_OOBINLINE
;
2488 case TARGET_SO_NO_CHECK
:
2489 optname
= SO_NO_CHECK
;
2491 case TARGET_SO_PRIORITY
:
2492 optname
= SO_PRIORITY
;
2495 case TARGET_SO_BSDCOMPAT
:
2496 optname
= SO_BSDCOMPAT
;
2499 case TARGET_SO_PASSCRED
:
2500 optname
= SO_PASSCRED
;
2502 case TARGET_SO_PASSSEC
:
2503 optname
= SO_PASSSEC
;
2505 case TARGET_SO_TIMESTAMP
:
2506 optname
= SO_TIMESTAMP
;
2508 case TARGET_SO_RCVLOWAT
:
2509 optname
= SO_RCVLOWAT
;
2514 if (optlen
< sizeof(uint32_t))
2515 return -TARGET_EINVAL
;
2517 if (get_user_u32(val
, optval_addr
))
2518 return -TARGET_EFAULT
;
2519 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2524 case NETLINK_PKTINFO
:
2525 case NETLINK_ADD_MEMBERSHIP
:
2526 case NETLINK_DROP_MEMBERSHIP
:
2527 case NETLINK_BROADCAST_ERROR
:
2528 case NETLINK_NO_ENOBUFS
:
2529 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2530 case NETLINK_LISTEN_ALL_NSID
:
2531 case NETLINK_CAP_ACK
:
2532 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2533 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2534 case NETLINK_EXT_ACK
:
2535 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2536 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2537 case NETLINK_GET_STRICT_CHK
:
2538 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2544 if (optlen
< sizeof(uint32_t)) {
2545 return -TARGET_EINVAL
;
2547 if (get_user_u32(val
, optval_addr
)) {
2548 return -TARGET_EFAULT
;
2550 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2553 #endif /* SOL_NETLINK */
2556 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2558 ret
= -TARGET_ENOPROTOOPT
;
2563 /* do_getsockopt() Must return target values and target errnos. */
2564 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2565 abi_ulong optval_addr
, abi_ulong optlen
)
2572 case TARGET_SOL_SOCKET
:
2575 /* These don't just return a single integer */
2576 case TARGET_SO_PEERNAME
:
2578 case TARGET_SO_RCVTIMEO
: {
2582 optname
= SO_RCVTIMEO
;
2585 if (get_user_u32(len
, optlen
)) {
2586 return -TARGET_EFAULT
;
2589 return -TARGET_EINVAL
;
2593 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2598 if (len
> sizeof(struct target_timeval
)) {
2599 len
= sizeof(struct target_timeval
);
2601 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2602 return -TARGET_EFAULT
;
2604 if (put_user_u32(len
, optlen
)) {
2605 return -TARGET_EFAULT
;
2609 case TARGET_SO_SNDTIMEO
:
2610 optname
= SO_SNDTIMEO
;
2612 case TARGET_SO_PEERCRED
: {
2615 struct target_ucred
*tcr
;
2617 if (get_user_u32(len
, optlen
)) {
2618 return -TARGET_EFAULT
;
2621 return -TARGET_EINVAL
;
2625 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2633 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2634 return -TARGET_EFAULT
;
2636 __put_user(cr
.pid
, &tcr
->pid
);
2637 __put_user(cr
.uid
, &tcr
->uid
);
2638 __put_user(cr
.gid
, &tcr
->gid
);
2639 unlock_user_struct(tcr
, optval_addr
, 1);
2640 if (put_user_u32(len
, optlen
)) {
2641 return -TARGET_EFAULT
;
2645 case TARGET_SO_PEERSEC
: {
2648 if (get_user_u32(len
, optlen
)) {
2649 return -TARGET_EFAULT
;
2652 return -TARGET_EINVAL
;
2654 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2656 return -TARGET_EFAULT
;
2659 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2661 if (put_user_u32(lv
, optlen
)) {
2662 ret
= -TARGET_EFAULT
;
2664 unlock_user(name
, optval_addr
, lv
);
2667 case TARGET_SO_LINGER
:
2671 struct target_linger
*tlg
;
2673 if (get_user_u32(len
, optlen
)) {
2674 return -TARGET_EFAULT
;
2677 return -TARGET_EINVAL
;
2681 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2689 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2690 return -TARGET_EFAULT
;
2692 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2693 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2694 unlock_user_struct(tlg
, optval_addr
, 1);
2695 if (put_user_u32(len
, optlen
)) {
2696 return -TARGET_EFAULT
;
2700 /* Options with 'int' argument. */
2701 case TARGET_SO_DEBUG
:
2704 case TARGET_SO_REUSEADDR
:
2705 optname
= SO_REUSEADDR
;
2708 case TARGET_SO_REUSEPORT
:
2709 optname
= SO_REUSEPORT
;
2712 case TARGET_SO_TYPE
:
2715 case TARGET_SO_ERROR
:
2718 case TARGET_SO_DONTROUTE
:
2719 optname
= SO_DONTROUTE
;
2721 case TARGET_SO_BROADCAST
:
2722 optname
= SO_BROADCAST
;
2724 case TARGET_SO_SNDBUF
:
2725 optname
= SO_SNDBUF
;
2727 case TARGET_SO_RCVBUF
:
2728 optname
= SO_RCVBUF
;
2730 case TARGET_SO_KEEPALIVE
:
2731 optname
= SO_KEEPALIVE
;
2733 case TARGET_SO_OOBINLINE
:
2734 optname
= SO_OOBINLINE
;
2736 case TARGET_SO_NO_CHECK
:
2737 optname
= SO_NO_CHECK
;
2739 case TARGET_SO_PRIORITY
:
2740 optname
= SO_PRIORITY
;
2743 case TARGET_SO_BSDCOMPAT
:
2744 optname
= SO_BSDCOMPAT
;
2747 case TARGET_SO_PASSCRED
:
2748 optname
= SO_PASSCRED
;
2750 case TARGET_SO_TIMESTAMP
:
2751 optname
= SO_TIMESTAMP
;
2753 case TARGET_SO_RCVLOWAT
:
2754 optname
= SO_RCVLOWAT
;
2756 case TARGET_SO_ACCEPTCONN
:
2757 optname
= SO_ACCEPTCONN
;
2759 case TARGET_SO_PROTOCOL
:
2760 optname
= SO_PROTOCOL
;
2762 case TARGET_SO_DOMAIN
:
2763 optname
= SO_DOMAIN
;
2771 /* TCP and UDP options all take an 'int' value. */
2773 if (get_user_u32(len
, optlen
))
2774 return -TARGET_EFAULT
;
2776 return -TARGET_EINVAL
;
2778 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2783 val
= host_to_target_sock_type(val
);
2786 val
= host_to_target_errno(val
);
2792 if (put_user_u32(val
, optval_addr
))
2793 return -TARGET_EFAULT
;
2795 if (put_user_u8(val
, optval_addr
))
2796 return -TARGET_EFAULT
;
2798 if (put_user_u32(len
, optlen
))
2799 return -TARGET_EFAULT
;
2806 case IP_ROUTER_ALERT
:
2810 case IP_MTU_DISCOVER
:
2816 case IP_MULTICAST_TTL
:
2817 case IP_MULTICAST_LOOP
:
2818 if (get_user_u32(len
, optlen
))
2819 return -TARGET_EFAULT
;
2821 return -TARGET_EINVAL
;
2823 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2826 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2828 if (put_user_u32(len
, optlen
)
2829 || put_user_u8(val
, optval_addr
))
2830 return -TARGET_EFAULT
;
2832 if (len
> sizeof(int))
2834 if (put_user_u32(len
, optlen
)
2835 || put_user_u32(val
, optval_addr
))
2836 return -TARGET_EFAULT
;
2840 ret
= -TARGET_ENOPROTOOPT
;
2846 case IPV6_MTU_DISCOVER
:
2849 case IPV6_RECVPKTINFO
:
2850 case IPV6_UNICAST_HOPS
:
2851 case IPV6_MULTICAST_HOPS
:
2852 case IPV6_MULTICAST_LOOP
:
2854 case IPV6_RECVHOPLIMIT
:
2855 case IPV6_2292HOPLIMIT
:
2858 case IPV6_2292PKTINFO
:
2859 case IPV6_RECVTCLASS
:
2860 case IPV6_RECVRTHDR
:
2861 case IPV6_2292RTHDR
:
2862 case IPV6_RECVHOPOPTS
:
2863 case IPV6_2292HOPOPTS
:
2864 case IPV6_RECVDSTOPTS
:
2865 case IPV6_2292DSTOPTS
:
2867 case IPV6_ADDR_PREFERENCES
:
2868 #ifdef IPV6_RECVPATHMTU
2869 case IPV6_RECVPATHMTU
:
2871 #ifdef IPV6_TRANSPARENT
2872 case IPV6_TRANSPARENT
:
2874 #ifdef IPV6_FREEBIND
2877 #ifdef IPV6_RECVORIGDSTADDR
2878 case IPV6_RECVORIGDSTADDR
:
2880 if (get_user_u32(len
, optlen
))
2881 return -TARGET_EFAULT
;
2883 return -TARGET_EINVAL
;
2885 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2888 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2890 if (put_user_u32(len
, optlen
)
2891 || put_user_u8(val
, optval_addr
))
2892 return -TARGET_EFAULT
;
2894 if (len
> sizeof(int))
2896 if (put_user_u32(len
, optlen
)
2897 || put_user_u32(val
, optval_addr
))
2898 return -TARGET_EFAULT
;
2902 ret
= -TARGET_ENOPROTOOPT
;
2909 case NETLINK_PKTINFO
:
2910 case NETLINK_BROADCAST_ERROR
:
2911 case NETLINK_NO_ENOBUFS
:
2912 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2913 case NETLINK_LISTEN_ALL_NSID
:
2914 case NETLINK_CAP_ACK
:
2915 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2917 case NETLINK_EXT_ACK
:
2918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2920 case NETLINK_GET_STRICT_CHK
:
2921 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2922 if (get_user_u32(len
, optlen
)) {
2923 return -TARGET_EFAULT
;
2925 if (len
!= sizeof(val
)) {
2926 return -TARGET_EINVAL
;
2929 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2933 if (put_user_u32(lv
, optlen
)
2934 || put_user_u32(val
, optval_addr
)) {
2935 return -TARGET_EFAULT
;
2938 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2939 case NETLINK_LIST_MEMBERSHIPS
:
2943 if (get_user_u32(len
, optlen
)) {
2944 return -TARGET_EFAULT
;
2947 return -TARGET_EINVAL
;
2949 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2950 if (!results
&& len
> 0) {
2951 return -TARGET_EFAULT
;
2954 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2956 unlock_user(results
, optval_addr
, 0);
2959 /* swap host endianess to target endianess. */
2960 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2961 results
[i
] = tswap32(results
[i
]);
2963 if (put_user_u32(lv
, optlen
)) {
2964 return -TARGET_EFAULT
;
2966 unlock_user(results
, optval_addr
, 0);
2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2974 #endif /* SOL_NETLINK */
2977 qemu_log_mask(LOG_UNIMP
,
2978 "getsockopt level=%d optname=%d not yet supported\n",
2980 ret
= -TARGET_EOPNOTSUPP
;
2986 /* Convert target low/high pair representing file offset into the host
2987 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2988 * as the kernel doesn't handle them either.
2990 static void target_to_host_low_high(abi_ulong tlow
,
2992 unsigned long *hlow
,
2993 unsigned long *hhigh
)
2995 uint64_t off
= tlow
|
2996 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2997 TARGET_LONG_BITS
/ 2;
3000 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3003 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3004 abi_ulong count
, int copy
)
3006 struct target_iovec
*target_vec
;
3008 abi_ulong total_len
, max_len
;
3011 bool bad_address
= false;
3017 if (count
> IOV_MAX
) {
3022 vec
= g_try_new0(struct iovec
, count
);
3028 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3029 count
* sizeof(struct target_iovec
), 1);
3030 if (target_vec
== NULL
) {
3035 /* ??? If host page size > target page size, this will result in a
3036 value larger than what we can actually support. */
3037 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3040 for (i
= 0; i
< count
; i
++) {
3041 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3042 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3047 } else if (len
== 0) {
3048 /* Zero length pointer is ignored. */
3049 vec
[i
].iov_base
= 0;
3051 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3052 /* If the first buffer pointer is bad, this is a fault. But
3053 * subsequent bad buffers will result in a partial write; this
3054 * is realized by filling the vector with null pointers and
3056 if (!vec
[i
].iov_base
) {
3067 if (len
> max_len
- total_len
) {
3068 len
= max_len
- total_len
;
3071 vec
[i
].iov_len
= len
;
3075 unlock_user(target_vec
, target_addr
, 0);
3080 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3081 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3084 unlock_user(target_vec
, target_addr
, 0);
3091 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3092 abi_ulong count
, int copy
)
3094 struct target_iovec
*target_vec
;
3097 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3098 count
* sizeof(struct target_iovec
), 1);
3100 for (i
= 0; i
< count
; i
++) {
3101 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3102 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3106 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3108 unlock_user(target_vec
, target_addr
, 0);
3114 static inline int target_to_host_sock_type(int *type
)
3117 int target_type
= *type
;
3119 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3120 case TARGET_SOCK_DGRAM
:
3121 host_type
= SOCK_DGRAM
;
3123 case TARGET_SOCK_STREAM
:
3124 host_type
= SOCK_STREAM
;
3127 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3130 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3131 #if defined(SOCK_CLOEXEC)
3132 host_type
|= SOCK_CLOEXEC
;
3134 return -TARGET_EINVAL
;
3137 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3138 #if defined(SOCK_NONBLOCK)
3139 host_type
|= SOCK_NONBLOCK
;
3140 #elif !defined(O_NONBLOCK)
3141 return -TARGET_EINVAL
;
3148 /* Try to emulate socket type flags after socket creation. */
3149 static int sock_flags_fixup(int fd
, int target_type
)
3151 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3152 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3153 int flags
= fcntl(fd
, F_GETFL
);
3154 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3156 return -TARGET_EINVAL
;
3163 /* do_socket() Must return target values and target errnos. */
3164 static abi_long
do_socket(int domain
, int type
, int protocol
)
3166 int target_type
= type
;
3169 ret
= target_to_host_sock_type(&type
);
3174 if (domain
== PF_NETLINK
&& !(
3175 #ifdef CONFIG_RTNETLINK
3176 protocol
== NETLINK_ROUTE
||
3178 protocol
== NETLINK_KOBJECT_UEVENT
||
3179 protocol
== NETLINK_AUDIT
)) {
3180 return -TARGET_EPROTONOSUPPORT
;
3183 if (domain
== AF_PACKET
||
3184 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3185 protocol
= tswap16(protocol
);
3188 ret
= get_errno(socket(domain
, type
, protocol
));
3190 ret
= sock_flags_fixup(ret
, target_type
);
3191 if (type
== SOCK_PACKET
) {
3192 /* Manage an obsolete case :
3193 * if socket type is SOCK_PACKET, bind by name
3195 fd_trans_register(ret
, &target_packet_trans
);
3196 } else if (domain
== PF_NETLINK
) {
3198 #ifdef CONFIG_RTNETLINK
3200 fd_trans_register(ret
, &target_netlink_route_trans
);
3203 case NETLINK_KOBJECT_UEVENT
:
3204 /* nothing to do: messages are strings */
3207 fd_trans_register(ret
, &target_netlink_audit_trans
);
3210 g_assert_not_reached();
3217 /* do_bind() Must return target values and target errnos. */
3218 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3224 if ((int)addrlen
< 0) {
3225 return -TARGET_EINVAL
;
3228 addr
= alloca(addrlen
+1);
3230 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3234 return get_errno(bind(sockfd
, addr
, addrlen
));
3237 /* do_connect() Must return target values and target errnos. */
3238 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3244 if ((int)addrlen
< 0) {
3245 return -TARGET_EINVAL
;
3248 addr
= alloca(addrlen
+1);
3250 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3254 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3257 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3258 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3259 int flags
, int send
)
3265 abi_ulong target_vec
;
3267 if (msgp
->msg_name
) {
3268 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3269 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3270 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3271 tswapal(msgp
->msg_name
),
3273 if (ret
== -TARGET_EFAULT
) {
3274 /* For connected sockets msg_name and msg_namelen must
3275 * be ignored, so returning EFAULT immediately is wrong.
3276 * Instead, pass a bad msg_name to the host kernel, and
3277 * let it decide whether to return EFAULT or not.
3279 msg
.msg_name
= (void *)-1;
3284 msg
.msg_name
= NULL
;
3285 msg
.msg_namelen
= 0;
3287 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3288 msg
.msg_control
= alloca(msg
.msg_controllen
);
3289 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3291 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3293 count
= tswapal(msgp
->msg_iovlen
);
3294 target_vec
= tswapal(msgp
->msg_iov
);
3296 if (count
> IOV_MAX
) {
3297 /* sendrcvmsg returns a different errno for this condition than
3298 * readv/writev, so we must catch it here before lock_iovec() does.
3300 ret
= -TARGET_EMSGSIZE
;
3304 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3305 target_vec
, count
, send
);
3307 ret
= -host_to_target_errno(errno
);
3308 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3313 msg
.msg_iovlen
= count
;
3317 if (fd_trans_target_to_host_data(fd
)) {
3320 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3321 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3322 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3323 msg
.msg_iov
->iov_len
);
3325 msg
.msg_iov
->iov_base
= host_msg
;
3326 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3330 ret
= target_to_host_cmsg(&msg
, msgp
);
3332 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3336 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3337 if (!is_error(ret
)) {
3339 if (fd_trans_host_to_target_data(fd
)) {
3340 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3341 MIN(msg
.msg_iov
->iov_len
, len
));
3343 if (!is_error(ret
)) {
3344 ret
= host_to_target_cmsg(msgp
, &msg
);
3346 if (!is_error(ret
)) {
3347 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3348 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3349 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3350 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3351 msg
.msg_name
, msg
.msg_namelen
);
3364 unlock_iovec(vec
, target_vec
, count
, !send
);
3370 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3371 int flags
, int send
)
3374 struct target_msghdr
*msgp
;
3376 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3380 return -TARGET_EFAULT
;
3382 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3383 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3387 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3388 * so it might not have this *mmsg-specific flag either.
3390 #ifndef MSG_WAITFORONE
3391 #define MSG_WAITFORONE 0x10000
3394 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3395 unsigned int vlen
, unsigned int flags
,
3398 struct target_mmsghdr
*mmsgp
;
3402 if (vlen
> UIO_MAXIOV
) {
3406 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3408 return -TARGET_EFAULT
;
3411 for (i
= 0; i
< vlen
; i
++) {
3412 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3413 if (is_error(ret
)) {
3416 mmsgp
[i
].msg_len
= tswap32(ret
);
3417 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3418 if (flags
& MSG_WAITFORONE
) {
3419 flags
|= MSG_DONTWAIT
;
3423 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3425 /* Return number of datagrams sent if we sent any at all;
3426 * otherwise return the error.
3434 /* do_accept4() Must return target values and target errnos. */
3435 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3436 abi_ulong target_addrlen_addr
, int flags
)
3438 socklen_t addrlen
, ret_addrlen
;
3443 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3445 if (target_addr
== 0) {
3446 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3449 /* linux returns EFAULT if addrlen pointer is invalid */
3450 if (get_user_u32(addrlen
, target_addrlen_addr
))
3451 return -TARGET_EFAULT
;
3453 if ((int)addrlen
< 0) {
3454 return -TARGET_EINVAL
;
3457 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3458 return -TARGET_EFAULT
;
3461 addr
= alloca(addrlen
);
3463 ret_addrlen
= addrlen
;
3464 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3465 if (!is_error(ret
)) {
3466 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3467 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3468 ret
= -TARGET_EFAULT
;
3474 /* do_getpeername() Must return target values and target errnos. */
3475 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3476 abi_ulong target_addrlen_addr
)
3478 socklen_t addrlen
, ret_addrlen
;
3482 if (get_user_u32(addrlen
, target_addrlen_addr
))
3483 return -TARGET_EFAULT
;
3485 if ((int)addrlen
< 0) {
3486 return -TARGET_EINVAL
;
3489 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3490 return -TARGET_EFAULT
;
3493 addr
= alloca(addrlen
);
3495 ret_addrlen
= addrlen
;
3496 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3497 if (!is_error(ret
)) {
3498 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3499 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3500 ret
= -TARGET_EFAULT
;
3506 /* do_getsockname() Must return target values and target errnos. */
3507 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3508 abi_ulong target_addrlen_addr
)
3510 socklen_t addrlen
, ret_addrlen
;
3514 if (get_user_u32(addrlen
, target_addrlen_addr
))
3515 return -TARGET_EFAULT
;
3517 if ((int)addrlen
< 0) {
3518 return -TARGET_EINVAL
;
3521 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3522 return -TARGET_EFAULT
;
3525 addr
= alloca(addrlen
);
3527 ret_addrlen
= addrlen
;
3528 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3529 if (!is_error(ret
)) {
3530 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3531 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3532 ret
= -TARGET_EFAULT
;
3538 /* do_socketpair() Must return target values and target errnos. */
3539 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3540 abi_ulong target_tab_addr
)
3545 target_to_host_sock_type(&type
);
3547 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3548 if (!is_error(ret
)) {
3549 if (put_user_s32(tab
[0], target_tab_addr
)
3550 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3551 ret
= -TARGET_EFAULT
;
3556 /* do_sendto() Must return target values and target errnos. */
3557 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3558 abi_ulong target_addr
, socklen_t addrlen
)
3562 void *copy_msg
= NULL
;
3565 if ((int)addrlen
< 0) {
3566 return -TARGET_EINVAL
;
3569 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3571 return -TARGET_EFAULT
;
3572 if (fd_trans_target_to_host_data(fd
)) {
3573 copy_msg
= host_msg
;
3574 host_msg
= g_malloc(len
);
3575 memcpy(host_msg
, copy_msg
, len
);
3576 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3582 addr
= alloca(addrlen
+1);
3583 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3587 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3589 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3594 host_msg
= copy_msg
;
3596 unlock_user(host_msg
, msg
, 0);
3600 /* do_recvfrom() Must return target values and target errnos. */
3601 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3602 abi_ulong target_addr
,
3603 abi_ulong target_addrlen
)
3605 socklen_t addrlen
, ret_addrlen
;
3613 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3615 return -TARGET_EFAULT
;
3619 if (get_user_u32(addrlen
, target_addrlen
)) {
3620 ret
= -TARGET_EFAULT
;
3623 if ((int)addrlen
< 0) {
3624 ret
= -TARGET_EINVAL
;
3627 addr
= alloca(addrlen
);
3628 ret_addrlen
= addrlen
;
3629 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3630 addr
, &ret_addrlen
));
3632 addr
= NULL
; /* To keep compiler quiet. */
3633 addrlen
= 0; /* To keep compiler quiet. */
3634 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3636 if (!is_error(ret
)) {
3637 if (fd_trans_host_to_target_data(fd
)) {
3639 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3640 if (is_error(trans
)) {
3646 host_to_target_sockaddr(target_addr
, addr
,
3647 MIN(addrlen
, ret_addrlen
));
3648 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3649 ret
= -TARGET_EFAULT
;
3653 unlock_user(host_msg
, msg
, len
);
3656 unlock_user(host_msg
, msg
, 0);
3661 #ifdef TARGET_NR_socketcall
3662 /* do_socketcall() must return target values and target errnos. */
3663 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3665 static const unsigned nargs
[] = { /* number of arguments per operation */
3666 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3667 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3668 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3669 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3670 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3671 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3672 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3673 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3674 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3675 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3676 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3677 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3678 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3679 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3680 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3681 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3682 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3683 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3684 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3685 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3687 abi_long a
[6]; /* max 6 args */
3690 /* check the range of the first argument num */
3691 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3692 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3693 return -TARGET_EINVAL
;
3695 /* ensure we have space for args */
3696 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3697 return -TARGET_EINVAL
;
3699 /* collect the arguments in a[] according to nargs[] */
3700 for (i
= 0; i
< nargs
[num
]; ++i
) {
3701 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3702 return -TARGET_EFAULT
;
3705 /* now when we have the args, invoke the appropriate underlying function */
3707 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3708 return do_socket(a
[0], a
[1], a
[2]);
3709 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3710 return do_bind(a
[0], a
[1], a
[2]);
3711 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3712 return do_connect(a
[0], a
[1], a
[2]);
3713 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3714 return get_errno(listen(a
[0], a
[1]));
3715 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3716 return do_accept4(a
[0], a
[1], a
[2], 0);
3717 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3718 return do_getsockname(a
[0], a
[1], a
[2]);
3719 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3720 return do_getpeername(a
[0], a
[1], a
[2]);
3721 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3722 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3723 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3724 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3725 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3726 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3727 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3728 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3729 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3730 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3731 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3732 return get_errno(shutdown(a
[0], a
[1]));
3733 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3734 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3735 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3736 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3737 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3738 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3739 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3740 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3741 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3742 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3743 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3744 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3745 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3746 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3748 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3749 return -TARGET_EINVAL
;
3754 #define N_SHM_REGIONS 32
3756 static struct shm_region
{
3760 } shm_regions
[N_SHM_REGIONS
];
3762 #ifndef TARGET_SEMID64_DS
3763 /* asm-generic version of this struct */
3764 struct target_semid64_ds
3766 struct target_ipc_perm sem_perm
;
3767 abi_ulong sem_otime
;
3768 #if TARGET_ABI_BITS == 32
3769 abi_ulong __unused1
;
3771 abi_ulong sem_ctime
;
3772 #if TARGET_ABI_BITS == 32
3773 abi_ulong __unused2
;
3775 abi_ulong sem_nsems
;
3776 abi_ulong __unused3
;
3777 abi_ulong __unused4
;
3781 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3782 abi_ulong target_addr
)
3784 struct target_ipc_perm
*target_ip
;
3785 struct target_semid64_ds
*target_sd
;
3787 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3788 return -TARGET_EFAULT
;
3789 target_ip
= &(target_sd
->sem_perm
);
3790 host_ip
->__key
= tswap32(target_ip
->__key
);
3791 host_ip
->uid
= tswap32(target_ip
->uid
);
3792 host_ip
->gid
= tswap32(target_ip
->gid
);
3793 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3794 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3795 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3796 host_ip
->mode
= tswap32(target_ip
->mode
);
3798 host_ip
->mode
= tswap16(target_ip
->mode
);
3800 #if defined(TARGET_PPC)
3801 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3803 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3805 unlock_user_struct(target_sd
, target_addr
, 0);
3809 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3810 struct ipc_perm
*host_ip
)
3812 struct target_ipc_perm
*target_ip
;
3813 struct target_semid64_ds
*target_sd
;
3815 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3816 return -TARGET_EFAULT
;
3817 target_ip
= &(target_sd
->sem_perm
);
3818 target_ip
->__key
= tswap32(host_ip
->__key
);
3819 target_ip
->uid
= tswap32(host_ip
->uid
);
3820 target_ip
->gid
= tswap32(host_ip
->gid
);
3821 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3822 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3823 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3824 target_ip
->mode
= tswap32(host_ip
->mode
);
3826 target_ip
->mode
= tswap16(host_ip
->mode
);
3828 #if defined(TARGET_PPC)
3829 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3831 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3833 unlock_user_struct(target_sd
, target_addr
, 1);
3837 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3838 abi_ulong target_addr
)
3840 struct target_semid64_ds
*target_sd
;
3842 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3843 return -TARGET_EFAULT
;
3844 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3845 return -TARGET_EFAULT
;
3846 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3847 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3848 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3849 unlock_user_struct(target_sd
, target_addr
, 0);
3853 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3854 struct semid_ds
*host_sd
)
3856 struct target_semid64_ds
*target_sd
;
3858 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3859 return -TARGET_EFAULT
;
3860 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3861 return -TARGET_EFAULT
;
3862 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3863 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3864 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3865 unlock_user_struct(target_sd
, target_addr
, 1);
3869 struct target_seminfo
{
3882 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3883 struct seminfo
*host_seminfo
)
3885 struct target_seminfo
*target_seminfo
;
3886 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3887 return -TARGET_EFAULT
;
3888 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3889 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3890 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3891 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3892 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3893 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3894 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3895 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3896 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3897 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3898 unlock_user_struct(target_seminfo
, target_addr
, 1);
3904 struct semid_ds
*buf
;
3905 unsigned short *array
;
3906 struct seminfo
*__buf
;
3909 union target_semun
{
3916 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3917 abi_ulong target_addr
)
3920 unsigned short *array
;
3922 struct semid_ds semid_ds
;
3925 semun
.buf
= &semid_ds
;
3927 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3929 return get_errno(ret
);
3931 nsems
= semid_ds
.sem_nsems
;
3933 *host_array
= g_try_new(unsigned short, nsems
);
3935 return -TARGET_ENOMEM
;
3937 array
= lock_user(VERIFY_READ
, target_addr
,
3938 nsems
*sizeof(unsigned short), 1);
3940 g_free(*host_array
);
3941 return -TARGET_EFAULT
;
3944 for(i
=0; i
<nsems
; i
++) {
3945 __get_user((*host_array
)[i
], &array
[i
]);
3947 unlock_user(array
, target_addr
, 0);
3952 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3953 unsigned short **host_array
)
3956 unsigned short *array
;
3958 struct semid_ds semid_ds
;
3961 semun
.buf
= &semid_ds
;
3963 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3965 return get_errno(ret
);
3967 nsems
= semid_ds
.sem_nsems
;
3969 array
= lock_user(VERIFY_WRITE
, target_addr
,
3970 nsems
*sizeof(unsigned short), 0);
3972 return -TARGET_EFAULT
;
3974 for(i
=0; i
<nsems
; i
++) {
3975 __put_user((*host_array
)[i
], &array
[i
]);
3977 g_free(*host_array
);
3978 unlock_user(array
, target_addr
, 1);
3983 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3984 abi_ulong target_arg
)
3986 union target_semun target_su
= { .buf
= target_arg
};
3988 struct semid_ds dsarg
;
3989 unsigned short *array
= NULL
;
3990 struct seminfo seminfo
;
3991 abi_long ret
= -TARGET_EINVAL
;
3998 /* In 64 bit cross-endian situations, we will erroneously pick up
3999 * the wrong half of the union for the "val" element. To rectify
4000 * this, the entire 8-byte structure is byteswapped, followed by
4001 * a swap of the 4 byte val field. In other cases, the data is
4002 * already in proper host byte order. */
4003 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4004 target_su
.buf
= tswapal(target_su
.buf
);
4005 arg
.val
= tswap32(target_su
.val
);
4007 arg
.val
= target_su
.val
;
4009 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4013 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4017 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4018 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4025 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4029 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4030 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4036 arg
.__buf
= &seminfo
;
4037 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4038 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4046 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4053 struct target_sembuf
{
4054 unsigned short sem_num
;
4059 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4060 abi_ulong target_addr
,
4063 struct target_sembuf
*target_sembuf
;
4066 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4067 nsops
*sizeof(struct target_sembuf
), 1);
4069 return -TARGET_EFAULT
;
4071 for(i
=0; i
<nsops
; i
++) {
4072 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4073 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4074 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4077 unlock_user(target_sembuf
, target_addr
, 0);
4082 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4083 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4086 * This macro is required to handle the s390 variants, which passes the
4087 * arguments in a different order than default.
4090 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4091 (__nsops), (__timeout), (__sops)
4093 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4094 (__nsops), 0, (__sops), (__timeout)
4097 static inline abi_long
do_semtimedop(int semid
,
4100 abi_long timeout
, bool time64
)
4102 struct sembuf
*sops
;
4103 struct timespec ts
, *pts
= NULL
;
4109 if (target_to_host_timespec64(pts
, timeout
)) {
4110 return -TARGET_EFAULT
;
4113 if (target_to_host_timespec(pts
, timeout
)) {
4114 return -TARGET_EFAULT
;
4119 if (nsops
> TARGET_SEMOPM
) {
4120 return -TARGET_E2BIG
;
4123 sops
= g_new(struct sembuf
, nsops
);
4125 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4127 return -TARGET_EFAULT
;
4130 ret
= -TARGET_ENOSYS
;
4131 #ifdef __NR_semtimedop
4132 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4135 if (ret
== -TARGET_ENOSYS
) {
4136 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4137 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4145 struct target_msqid_ds
4147 struct target_ipc_perm msg_perm
;
4148 abi_ulong msg_stime
;
4149 #if TARGET_ABI_BITS == 32
4150 abi_ulong __unused1
;
4152 abi_ulong msg_rtime
;
4153 #if TARGET_ABI_BITS == 32
4154 abi_ulong __unused2
;
4156 abi_ulong msg_ctime
;
4157 #if TARGET_ABI_BITS == 32
4158 abi_ulong __unused3
;
4160 abi_ulong __msg_cbytes
;
4162 abi_ulong msg_qbytes
;
4163 abi_ulong msg_lspid
;
4164 abi_ulong msg_lrpid
;
4165 abi_ulong __unused4
;
4166 abi_ulong __unused5
;
4169 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4170 abi_ulong target_addr
)
4172 struct target_msqid_ds
*target_md
;
4174 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4175 return -TARGET_EFAULT
;
4176 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4177 return -TARGET_EFAULT
;
4178 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4179 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4180 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4181 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4182 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4183 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4184 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4185 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4186 unlock_user_struct(target_md
, target_addr
, 0);
4190 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4191 struct msqid_ds
*host_md
)
4193 struct target_msqid_ds
*target_md
;
4195 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4196 return -TARGET_EFAULT
;
4197 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4198 return -TARGET_EFAULT
;
4199 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4200 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4201 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4202 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4203 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4204 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4205 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4206 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4207 unlock_user_struct(target_md
, target_addr
, 1);
4211 struct target_msginfo
{
4219 unsigned short int msgseg
;
4222 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4223 struct msginfo
*host_msginfo
)
4225 struct target_msginfo
*target_msginfo
;
4226 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4227 return -TARGET_EFAULT
;
4228 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4229 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4230 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4231 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4232 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4233 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4234 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4235 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4236 unlock_user_struct(target_msginfo
, target_addr
, 1);
4240 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4242 struct msqid_ds dsarg
;
4243 struct msginfo msginfo
;
4244 abi_long ret
= -TARGET_EINVAL
;
4252 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4253 return -TARGET_EFAULT
;
4254 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4255 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4256 return -TARGET_EFAULT
;
4259 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4263 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4264 if (host_to_target_msginfo(ptr
, &msginfo
))
4265 return -TARGET_EFAULT
;
4272 struct target_msgbuf
{
4277 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4278 ssize_t msgsz
, int msgflg
)
4280 struct target_msgbuf
*target_mb
;
4281 struct msgbuf
*host_mb
;
4285 return -TARGET_EINVAL
;
4288 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4289 return -TARGET_EFAULT
;
4290 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4292 unlock_user_struct(target_mb
, msgp
, 0);
4293 return -TARGET_ENOMEM
;
4295 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4296 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4297 ret
= -TARGET_ENOSYS
;
4299 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4302 if (ret
== -TARGET_ENOSYS
) {
4304 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4307 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4313 unlock_user_struct(target_mb
, msgp
, 0);
4319 #if defined(__sparc__)
4320 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4321 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4322 #elif defined(__s390x__)
4323 /* The s390 sys_ipc variant has only five parameters. */
4324 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4325 ((long int[]){(long int)__msgp, __msgtyp})
4327 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4328 ((long int[]){(long int)__msgp, __msgtyp}), 0
4332 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4333 ssize_t msgsz
, abi_long msgtyp
,
4336 struct target_msgbuf
*target_mb
;
4338 struct msgbuf
*host_mb
;
4342 return -TARGET_EINVAL
;
4345 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4346 return -TARGET_EFAULT
;
4348 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4350 ret
= -TARGET_ENOMEM
;
4353 ret
= -TARGET_ENOSYS
;
4355 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4358 if (ret
== -TARGET_ENOSYS
) {
4359 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4360 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4365 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4366 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4367 if (!target_mtext
) {
4368 ret
= -TARGET_EFAULT
;
4371 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4372 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4375 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4379 unlock_user_struct(target_mb
, msgp
, 1);
4384 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4385 abi_ulong target_addr
)
4387 struct target_shmid_ds
*target_sd
;
4389 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4390 return -TARGET_EFAULT
;
4391 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4392 return -TARGET_EFAULT
;
4393 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4394 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4395 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4396 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4397 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4398 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4399 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4400 unlock_user_struct(target_sd
, target_addr
, 0);
4404 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4405 struct shmid_ds
*host_sd
)
4407 struct target_shmid_ds
*target_sd
;
4409 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4410 return -TARGET_EFAULT
;
4411 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4412 return -TARGET_EFAULT
;
4413 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4414 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4415 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4416 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4417 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4418 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4419 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4420 unlock_user_struct(target_sd
, target_addr
, 1);
4424 struct target_shminfo
{
4432 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4433 struct shminfo
*host_shminfo
)
4435 struct target_shminfo
*target_shminfo
;
4436 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4437 return -TARGET_EFAULT
;
4438 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4439 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4440 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4441 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4442 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4443 unlock_user_struct(target_shminfo
, target_addr
, 1);
4447 struct target_shm_info
{
4452 abi_ulong swap_attempts
;
4453 abi_ulong swap_successes
;
4456 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4457 struct shm_info
*host_shm_info
)
4459 struct target_shm_info
*target_shm_info
;
4460 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4461 return -TARGET_EFAULT
;
4462 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4463 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4464 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4465 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4466 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4467 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4468 unlock_user_struct(target_shm_info
, target_addr
, 1);
4472 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4474 struct shmid_ds dsarg
;
4475 struct shminfo shminfo
;
4476 struct shm_info shm_info
;
4477 abi_long ret
= -TARGET_EINVAL
;
4485 if (target_to_host_shmid_ds(&dsarg
, buf
))
4486 return -TARGET_EFAULT
;
4487 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4488 if (host_to_target_shmid_ds(buf
, &dsarg
))
4489 return -TARGET_EFAULT
;
4492 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4493 if (host_to_target_shminfo(buf
, &shminfo
))
4494 return -TARGET_EFAULT
;
4497 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4498 if (host_to_target_shm_info(buf
, &shm_info
))
4499 return -TARGET_EFAULT
;
4504 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4511 #ifndef TARGET_FORCE_SHMLBA
4512 /* For most architectures, SHMLBA is the same as the page size;
4513 * some architectures have larger values, in which case they should
4514 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4515 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4516 * and defining its own value for SHMLBA.
4518 * The kernel also permits SHMLBA to be set by the architecture to a
4519 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4520 * this means that addresses are rounded to the large size if
4521 * SHM_RND is set but addresses not aligned to that size are not rejected
4522 * as long as they are at least page-aligned. Since the only architecture
4523 * which uses this is ia64 this code doesn't provide for that oddity.
4525 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4527 return TARGET_PAGE_SIZE
;
4531 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4532 int shmid
, abi_ulong shmaddr
, int shmflg
)
4534 CPUState
*cpu
= env_cpu(cpu_env
);
4537 struct shmid_ds shm_info
;
4541 /* shmat pointers are always untagged */
4543 /* find out the length of the shared memory segment */
4544 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4545 if (is_error(ret
)) {
4546 /* can't get length, bail out */
4550 shmlba
= target_shmlba(cpu_env
);
4552 if (shmaddr
& (shmlba
- 1)) {
4553 if (shmflg
& SHM_RND
) {
4554 shmaddr
&= ~(shmlba
- 1);
4556 return -TARGET_EINVAL
;
4559 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4560 return -TARGET_EINVAL
;
4566 * We're mapping shared memory, so ensure we generate code for parallel
4567 * execution and flush old translations. This will work up to the level
4568 * supported by the host -- anything that requires EXCP_ATOMIC will not
4569 * be atomic with respect to an external process.
4571 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4572 cpu
->tcg_cflags
|= CF_PARALLEL
;
4577 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4579 abi_ulong mmap_start
;
4581 /* In order to use the host shmat, we need to honor host SHMLBA. */
4582 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4584 if (mmap_start
== -1) {
4586 host_raddr
= (void *)-1;
4588 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4589 shmflg
| SHM_REMAP
);
4592 if (host_raddr
== (void *)-1) {
4594 return get_errno((long)host_raddr
);
4596 raddr
=h2g((unsigned long)host_raddr
);
4598 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
- 1,
4599 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4600 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4602 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4603 if (!shm_regions
[i
].in_use
) {
4604 shm_regions
[i
].in_use
= true;
4605 shm_regions
[i
].start
= raddr
;
4606 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4616 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4621 /* shmdt pointers are always untagged */
4625 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4626 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4627 shm_regions
[i
].in_use
= false;
4628 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
- 1, 0);
4632 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4639 #ifdef TARGET_NR_ipc
4640 /* ??? This only works with linear mappings. */
4641 /* do_ipc() must return target values and target errnos. */
4642 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4643 unsigned int call
, abi_long first
,
4644 abi_long second
, abi_long third
,
4645 abi_long ptr
, abi_long fifth
)
4650 version
= call
>> 16;
4655 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4657 case IPCOP_semtimedop
:
4659 * The s390 sys_ipc variant has only five parameters instead of six
4660 * (as for default variant) and the only difference is the handling of
4661 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4662 * to a struct timespec where the generic variant uses fifth parameter.
4664 #if defined(TARGET_S390X)
4665 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4667 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4672 ret
= get_errno(semget(first
, second
, third
));
4675 case IPCOP_semctl
: {
4676 /* The semun argument to semctl is passed by value, so dereference the
4679 get_user_ual(atptr
, ptr
);
4680 ret
= do_semctl(first
, second
, third
, atptr
);
4685 ret
= get_errno(msgget(first
, second
));
4689 ret
= do_msgsnd(first
, ptr
, second
, third
);
4693 ret
= do_msgctl(first
, second
, ptr
);
4700 struct target_ipc_kludge
{
4705 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4706 ret
= -TARGET_EFAULT
;
4710 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4712 unlock_user_struct(tmp
, ptr
, 0);
4716 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4725 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4726 if (is_error(raddr
))
4727 return get_errno(raddr
);
4728 if (put_user_ual(raddr
, third
))
4729 return -TARGET_EFAULT
;
4733 ret
= -TARGET_EINVAL
;
4738 ret
= do_shmdt(ptr
);
4742 /* IPC_* flag values are the same on all linux platforms */
4743 ret
= get_errno(shmget(first
, second
, third
));
4746 /* IPC_* and SHM_* command values are the same on all linux platforms */
4748 ret
= do_shmctl(first
, second
, ptr
);
4751 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4753 ret
= -TARGET_ENOSYS
;
4760 /* kernel structure types definitions */
4762 #define STRUCT(name, ...) STRUCT_ ## name,
4763 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4765 #include "syscall_types.h"
4769 #undef STRUCT_SPECIAL
4771 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4772 #define STRUCT_SPECIAL(name)
4773 #include "syscall_types.h"
4775 #undef STRUCT_SPECIAL
4777 #define MAX_STRUCT_SIZE 4096
4779 #ifdef CONFIG_FIEMAP
4780 /* So fiemap access checks don't overflow on 32 bit systems.
4781 * This is very slightly smaller than the limit imposed by
4782 * the underlying kernel.
4784 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4785 / sizeof(struct fiemap_extent))
4787 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4788 int fd
, int cmd
, abi_long arg
)
4790 /* The parameter for this ioctl is a struct fiemap followed
4791 * by an array of struct fiemap_extent whose size is set
4792 * in fiemap->fm_extent_count. The array is filled in by the
4795 int target_size_in
, target_size_out
;
4797 const argtype
*arg_type
= ie
->arg_type
;
4798 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4801 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4805 assert(arg_type
[0] == TYPE_PTR
);
4806 assert(ie
->access
== IOC_RW
);
4808 target_size_in
= thunk_type_size(arg_type
, 0);
4809 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4811 return -TARGET_EFAULT
;
4813 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4814 unlock_user(argptr
, arg
, 0);
4815 fm
= (struct fiemap
*)buf_temp
;
4816 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4817 return -TARGET_EINVAL
;
4820 outbufsz
= sizeof (*fm
) +
4821 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4823 if (outbufsz
> MAX_STRUCT_SIZE
) {
4824 /* We can't fit all the extents into the fixed size buffer.
4825 * Allocate one that is large enough and use it instead.
4827 fm
= g_try_malloc(outbufsz
);
4829 return -TARGET_ENOMEM
;
4831 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4834 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4835 if (!is_error(ret
)) {
4836 target_size_out
= target_size_in
;
4837 /* An extent_count of 0 means we were only counting the extents
4838 * so there are no structs to copy
4840 if (fm
->fm_extent_count
!= 0) {
4841 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4843 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4845 ret
= -TARGET_EFAULT
;
4847 /* Convert the struct fiemap */
4848 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4849 if (fm
->fm_extent_count
!= 0) {
4850 p
= argptr
+ target_size_in
;
4851 /* ...and then all the struct fiemap_extents */
4852 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4853 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4858 unlock_user(argptr
, arg
, target_size_out
);
4868 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4869 int fd
, int cmd
, abi_long arg
)
4871 const argtype
*arg_type
= ie
->arg_type
;
4875 struct ifconf
*host_ifconf
;
4877 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4878 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4879 int target_ifreq_size
;
4884 abi_long target_ifc_buf
;
4888 assert(arg_type
[0] == TYPE_PTR
);
4889 assert(ie
->access
== IOC_RW
);
4892 target_size
= thunk_type_size(arg_type
, 0);
4894 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4896 return -TARGET_EFAULT
;
4897 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4898 unlock_user(argptr
, arg
, 0);
4900 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4901 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4902 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4904 if (target_ifc_buf
!= 0) {
4905 target_ifc_len
= host_ifconf
->ifc_len
;
4906 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4907 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4909 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4910 if (outbufsz
> MAX_STRUCT_SIZE
) {
4912 * We can't fit all the extents into the fixed size buffer.
4913 * Allocate one that is large enough and use it instead.
4915 host_ifconf
= g_try_malloc(outbufsz
);
4917 return -TARGET_ENOMEM
;
4919 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4922 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4924 host_ifconf
->ifc_len
= host_ifc_len
;
4926 host_ifc_buf
= NULL
;
4928 host_ifconf
->ifc_buf
= host_ifc_buf
;
4930 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4931 if (!is_error(ret
)) {
4932 /* convert host ifc_len to target ifc_len */
4934 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4935 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4936 host_ifconf
->ifc_len
= target_ifc_len
;
4938 /* restore target ifc_buf */
4940 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4942 /* copy struct ifconf to target user */
4944 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4946 return -TARGET_EFAULT
;
4947 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4948 unlock_user(argptr
, arg
, target_size
);
4950 if (target_ifc_buf
!= 0) {
4951 /* copy ifreq[] to target user */
4952 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4953 for (i
= 0; i
< nb_ifreq
; i
++) {
4954 thunk_convert(argptr
+ i
* target_ifreq_size
,
4955 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4956 ifreq_arg_type
, THUNK_TARGET
);
4958 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4963 g_free(host_ifconf
);
4969 #if defined(CONFIG_USBFS)
4970 #if HOST_LONG_BITS > 64
4971 #error USBDEVFS thunks do not support >64 bit hosts yet.
4974 uint64_t target_urb_adr
;
4975 uint64_t target_buf_adr
;
4976 char *target_buf_ptr
;
4977 struct usbdevfs_urb host_urb
;
4980 static GHashTable
*usbdevfs_urb_hashtable(void)
4982 static GHashTable
*urb_hashtable
;
4984 if (!urb_hashtable
) {
4985 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4987 return urb_hashtable
;
4990 static void urb_hashtable_insert(struct live_urb
*urb
)
4992 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4993 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4996 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4998 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4999 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5002 static void urb_hashtable_remove(struct live_urb
*urb
)
5004 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5005 g_hash_table_remove(urb_hashtable
, urb
);
5009 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5010 int fd
, int cmd
, abi_long arg
)
5012 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5013 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5014 struct live_urb
*lurb
;
5018 uintptr_t target_urb_adr
;
5021 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5023 memset(buf_temp
, 0, sizeof(uint64_t));
5024 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5025 if (is_error(ret
)) {
5029 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5030 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5031 if (!lurb
->target_urb_adr
) {
5032 return -TARGET_EFAULT
;
5034 urb_hashtable_remove(lurb
);
5035 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5036 lurb
->host_urb
.buffer_length
);
5037 lurb
->target_buf_ptr
= NULL
;
5039 /* restore the guest buffer pointer */
5040 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5042 /* update the guest urb struct */
5043 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5046 return -TARGET_EFAULT
;
5048 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5049 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5051 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5052 /* write back the urb handle */
5053 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5056 return -TARGET_EFAULT
;
5059 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5060 target_urb_adr
= lurb
->target_urb_adr
;
5061 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5062 unlock_user(argptr
, arg
, target_size
);
5069 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5070 uint8_t *buf_temp
__attribute__((unused
)),
5071 int fd
, int cmd
, abi_long arg
)
5073 struct live_urb
*lurb
;
5075 /* map target address back to host URB with metadata. */
5076 lurb
= urb_hashtable_lookup(arg
);
5078 return -TARGET_EFAULT
;
5080 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5084 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5085 int fd
, int cmd
, abi_long arg
)
5087 const argtype
*arg_type
= ie
->arg_type
;
5092 struct live_urb
*lurb
;
5095 * each submitted URB needs to map to a unique ID for the
5096 * kernel, and that unique ID needs to be a pointer to
5097 * host memory. hence, we need to malloc for each URB.
5098 * isochronous transfers have a variable length struct.
5101 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5103 /* construct host copy of urb and metadata */
5104 lurb
= g_try_new0(struct live_urb
, 1);
5106 return -TARGET_ENOMEM
;
5109 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5112 return -TARGET_EFAULT
;
5114 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5115 unlock_user(argptr
, arg
, 0);
5117 lurb
->target_urb_adr
= arg
;
5118 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5120 /* buffer space used depends on endpoint type so lock the entire buffer */
5121 /* control type urbs should check the buffer contents for true direction */
5122 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5123 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5124 lurb
->host_urb
.buffer_length
, 1);
5125 if (lurb
->target_buf_ptr
== NULL
) {
5127 return -TARGET_EFAULT
;
5130 /* update buffer pointer in host copy */
5131 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5133 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5134 if (is_error(ret
)) {
5135 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5138 urb_hashtable_insert(lurb
);
5143 #endif /* CONFIG_USBFS */
5145 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5146 int cmd
, abi_long arg
)
5149 struct dm_ioctl
*host_dm
;
5150 abi_long guest_data
;
5151 uint32_t guest_data_size
;
5153 const argtype
*arg_type
= ie
->arg_type
;
5155 void *big_buf
= NULL
;
5159 target_size
= thunk_type_size(arg_type
, 0);
5160 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5162 ret
= -TARGET_EFAULT
;
5165 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5166 unlock_user(argptr
, arg
, 0);
5168 /* buf_temp is too small, so fetch things into a bigger buffer */
5169 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5170 memcpy(big_buf
, buf_temp
, target_size
);
5174 guest_data
= arg
+ host_dm
->data_start
;
5175 if ((guest_data
- arg
) < 0) {
5176 ret
= -TARGET_EINVAL
;
5179 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5180 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5182 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5184 ret
= -TARGET_EFAULT
;
5188 switch (ie
->host_cmd
) {
5190 case DM_LIST_DEVICES
:
5193 case DM_DEV_SUSPEND
:
5196 case DM_TABLE_STATUS
:
5197 case DM_TABLE_CLEAR
:
5199 case DM_LIST_VERSIONS
:
5203 case DM_DEV_SET_GEOMETRY
:
5204 /* data contains only strings */
5205 memcpy(host_data
, argptr
, guest_data_size
);
5208 memcpy(host_data
, argptr
, guest_data_size
);
5209 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5213 void *gspec
= argptr
;
5214 void *cur_data
= host_data
;
5215 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5216 int spec_size
= thunk_type_size(arg_type
, 0);
5219 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5220 struct dm_target_spec
*spec
= cur_data
;
5224 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5225 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5227 spec
->next
= sizeof(*spec
) + slen
;
5228 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5230 cur_data
+= spec
->next
;
5235 ret
= -TARGET_EINVAL
;
5236 unlock_user(argptr
, guest_data
, 0);
5239 unlock_user(argptr
, guest_data
, 0);
5241 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5242 if (!is_error(ret
)) {
5243 guest_data
= arg
+ host_dm
->data_start
;
5244 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5245 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5246 switch (ie
->host_cmd
) {
5251 case DM_DEV_SUSPEND
:
5254 case DM_TABLE_CLEAR
:
5256 case DM_DEV_SET_GEOMETRY
:
5257 /* no return data */
5259 case DM_LIST_DEVICES
:
5261 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5262 uint32_t remaining_data
= guest_data_size
;
5263 void *cur_data
= argptr
;
5264 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5265 int nl_size
= 12; /* can't use thunk_size due to alignment */
5268 uint32_t next
= nl
->next
;
5270 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5272 if (remaining_data
< nl
->next
) {
5273 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5276 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5277 strcpy(cur_data
+ nl_size
, nl
->name
);
5278 cur_data
+= nl
->next
;
5279 remaining_data
-= nl
->next
;
5283 nl
= (void*)nl
+ next
;
5288 case DM_TABLE_STATUS
:
5290 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5291 void *cur_data
= argptr
;
5292 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5293 int spec_size
= thunk_type_size(arg_type
, 0);
5296 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5297 uint32_t next
= spec
->next
;
5298 int slen
= strlen((char*)&spec
[1]) + 1;
5299 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5300 if (guest_data_size
< spec
->next
) {
5301 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5304 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5305 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5306 cur_data
= argptr
+ spec
->next
;
5307 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5313 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5314 int count
= *(uint32_t*)hdata
;
5315 uint64_t *hdev
= hdata
+ 8;
5316 uint64_t *gdev
= argptr
+ 8;
5319 *(uint32_t*)argptr
= tswap32(count
);
5320 for (i
= 0; i
< count
; i
++) {
5321 *gdev
= tswap64(*hdev
);
5327 case DM_LIST_VERSIONS
:
5329 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5330 uint32_t remaining_data
= guest_data_size
;
5331 void *cur_data
= argptr
;
5332 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5333 int vers_size
= thunk_type_size(arg_type
, 0);
5336 uint32_t next
= vers
->next
;
5338 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5340 if (remaining_data
< vers
->next
) {
5341 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5344 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5345 strcpy(cur_data
+ vers_size
, vers
->name
);
5346 cur_data
+= vers
->next
;
5347 remaining_data
-= vers
->next
;
5351 vers
= (void*)vers
+ next
;
5356 unlock_user(argptr
, guest_data
, 0);
5357 ret
= -TARGET_EINVAL
;
5360 unlock_user(argptr
, guest_data
, guest_data_size
);
5362 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5364 ret
= -TARGET_EFAULT
;
5367 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5368 unlock_user(argptr
, arg
, target_size
);
5375 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5376 int cmd
, abi_long arg
)
5380 const argtype
*arg_type
= ie
->arg_type
;
5381 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5384 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5385 struct blkpg_partition host_part
;
5387 /* Read and convert blkpg */
5389 target_size
= thunk_type_size(arg_type
, 0);
5390 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5392 ret
= -TARGET_EFAULT
;
5395 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5396 unlock_user(argptr
, arg
, 0);
5398 switch (host_blkpg
->op
) {
5399 case BLKPG_ADD_PARTITION
:
5400 case BLKPG_DEL_PARTITION
:
5401 /* payload is struct blkpg_partition */
5404 /* Unknown opcode */
5405 ret
= -TARGET_EINVAL
;
5409 /* Read and convert blkpg->data */
5410 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5411 target_size
= thunk_type_size(part_arg_type
, 0);
5412 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5414 ret
= -TARGET_EFAULT
;
5417 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5418 unlock_user(argptr
, arg
, 0);
5420 /* Swizzle the data pointer to our local copy and call! */
5421 host_blkpg
->data
= &host_part
;
5422 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5428 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5429 int fd
, int cmd
, abi_long arg
)
5431 const argtype
*arg_type
= ie
->arg_type
;
5432 const StructEntry
*se
;
5433 const argtype
*field_types
;
5434 const int *dst_offsets
, *src_offsets
;
5437 abi_ulong
*target_rt_dev_ptr
= NULL
;
5438 unsigned long *host_rt_dev_ptr
= NULL
;
5442 assert(ie
->access
== IOC_W
);
5443 assert(*arg_type
== TYPE_PTR
);
5445 assert(*arg_type
== TYPE_STRUCT
);
5446 target_size
= thunk_type_size(arg_type
, 0);
5447 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5449 return -TARGET_EFAULT
;
5452 assert(*arg_type
== (int)STRUCT_rtentry
);
5453 se
= struct_entries
+ *arg_type
++;
5454 assert(se
->convert
[0] == NULL
);
5455 /* convert struct here to be able to catch rt_dev string */
5456 field_types
= se
->field_types
;
5457 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5458 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5459 for (i
= 0; i
< se
->nb_fields
; i
++) {
5460 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5461 assert(*field_types
== TYPE_PTRVOID
);
5462 target_rt_dev_ptr
= argptr
+ src_offsets
[i
];
5463 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5464 if (*target_rt_dev_ptr
!= 0) {
5465 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5466 tswapal(*target_rt_dev_ptr
));
5467 if (!*host_rt_dev_ptr
) {
5468 unlock_user(argptr
, arg
, 0);
5469 return -TARGET_EFAULT
;
5472 *host_rt_dev_ptr
= 0;
5477 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5478 argptr
+ src_offsets
[i
],
5479 field_types
, THUNK_HOST
);
5481 unlock_user(argptr
, arg
, 0);
5483 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5485 assert(host_rt_dev_ptr
!= NULL
);
5486 assert(target_rt_dev_ptr
!= NULL
);
5487 if (*host_rt_dev_ptr
!= 0) {
5488 unlock_user((void *)*host_rt_dev_ptr
,
5489 *target_rt_dev_ptr
, 0);
5494 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5495 int fd
, int cmd
, abi_long arg
)
5497 int sig
= target_to_host_signal(arg
);
5498 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5501 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5502 int fd
, int cmd
, abi_long arg
)
5507 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5508 if (is_error(ret
)) {
5512 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5513 if (copy_to_user_timeval(arg
, &tv
)) {
5514 return -TARGET_EFAULT
;
5517 if (copy_to_user_timeval64(arg
, &tv
)) {
5518 return -TARGET_EFAULT
;
5525 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5526 int fd
, int cmd
, abi_long arg
)
5531 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5532 if (is_error(ret
)) {
5536 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5537 if (host_to_target_timespec(arg
, &ts
)) {
5538 return -TARGET_EFAULT
;
5541 if (host_to_target_timespec64(arg
, &ts
)) {
5542 return -TARGET_EFAULT
;
5550 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5551 int fd
, int cmd
, abi_long arg
)
5553 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5554 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5560 static void unlock_drm_version(struct drm_version
*host_ver
,
5561 struct target_drm_version
*target_ver
,
5564 unlock_user(host_ver
->name
, target_ver
->name
,
5565 copy
? host_ver
->name_len
: 0);
5566 unlock_user(host_ver
->date
, target_ver
->date
,
5567 copy
? host_ver
->date_len
: 0);
5568 unlock_user(host_ver
->desc
, target_ver
->desc
,
5569 copy
? host_ver
->desc_len
: 0);
5572 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5573 struct target_drm_version
*target_ver
)
5575 memset(host_ver
, 0, sizeof(*host_ver
));
5577 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5578 if (host_ver
->name_len
) {
5579 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5580 target_ver
->name_len
, 0);
5581 if (!host_ver
->name
) {
5586 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5587 if (host_ver
->date_len
) {
5588 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5589 target_ver
->date_len
, 0);
5590 if (!host_ver
->date
) {
5595 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5596 if (host_ver
->desc_len
) {
5597 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5598 target_ver
->desc_len
, 0);
5599 if (!host_ver
->desc
) {
5606 unlock_drm_version(host_ver
, target_ver
, false);
5610 static inline void host_to_target_drmversion(
5611 struct target_drm_version
*target_ver
,
5612 struct drm_version
*host_ver
)
5614 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5615 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5616 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5617 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5618 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5619 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5620 unlock_drm_version(host_ver
, target_ver
, true);
5623 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5624 int fd
, int cmd
, abi_long arg
)
5626 struct drm_version
*ver
;
5627 struct target_drm_version
*target_ver
;
5630 switch (ie
->host_cmd
) {
5631 case DRM_IOCTL_VERSION
:
5632 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5633 return -TARGET_EFAULT
;
5635 ver
= (struct drm_version
*)buf_temp
;
5636 ret
= target_to_host_drmversion(ver
, target_ver
);
5637 if (!is_error(ret
)) {
5638 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5639 if (is_error(ret
)) {
5640 unlock_drm_version(ver
, target_ver
, false);
5642 host_to_target_drmversion(target_ver
, ver
);
5645 unlock_user_struct(target_ver
, arg
, 0);
5648 return -TARGET_ENOSYS
;
5651 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5652 struct drm_i915_getparam
*gparam
,
5653 int fd
, abi_long arg
)
5657 struct target_drm_i915_getparam
*target_gparam
;
5659 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5660 return -TARGET_EFAULT
;
5663 __get_user(gparam
->param
, &target_gparam
->param
);
5664 gparam
->value
= &value
;
5665 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5666 put_user_s32(value
, target_gparam
->value
);
5668 unlock_user_struct(target_gparam
, arg
, 0);
5672 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5673 int fd
, int cmd
, abi_long arg
)
5675 switch (ie
->host_cmd
) {
5676 case DRM_IOCTL_I915_GETPARAM
:
5677 return do_ioctl_drm_i915_getparam(ie
,
5678 (struct drm_i915_getparam
*)buf_temp
,
5681 return -TARGET_ENOSYS
;
5687 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5688 int fd
, int cmd
, abi_long arg
)
5690 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5691 struct tun_filter
*target_filter
;
5694 assert(ie
->access
== IOC_W
);
5696 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5697 if (!target_filter
) {
5698 return -TARGET_EFAULT
;
5700 filter
->flags
= tswap16(target_filter
->flags
);
5701 filter
->count
= tswap16(target_filter
->count
);
5702 unlock_user(target_filter
, arg
, 0);
5704 if (filter
->count
) {
5705 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5707 return -TARGET_EFAULT
;
5710 target_addr
= lock_user(VERIFY_READ
,
5711 arg
+ offsetof(struct tun_filter
, addr
),
5712 filter
->count
* ETH_ALEN
, 1);
5714 return -TARGET_EFAULT
;
5716 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5717 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5720 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5723 IOCTLEntry ioctl_entries
[] = {
5724 #define IOCTL(cmd, access, ...) \
5725 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5726 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5727 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5728 #define IOCTL_IGNORE(cmd) \
5729 { TARGET_ ## cmd, 0, #cmd },
5734 /* ??? Implement proper locking for ioctls. */
5735 /* do_ioctl() Must return target values and target errnos. */
5736 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5738 const IOCTLEntry
*ie
;
5739 const argtype
*arg_type
;
5741 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5747 if (ie
->target_cmd
== 0) {
5749 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5750 return -TARGET_ENOTTY
;
5752 if (ie
->target_cmd
== cmd
)
5756 arg_type
= ie
->arg_type
;
5758 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5759 } else if (!ie
->host_cmd
) {
5760 /* Some architectures define BSD ioctls in their headers
5761 that are not implemented in Linux. */
5762 return -TARGET_ENOTTY
;
5765 switch(arg_type
[0]) {
5768 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5774 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5778 target_size
= thunk_type_size(arg_type
, 0);
5779 switch(ie
->access
) {
5781 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5782 if (!is_error(ret
)) {
5783 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5785 return -TARGET_EFAULT
;
5786 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5787 unlock_user(argptr
, arg
, target_size
);
5791 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5793 return -TARGET_EFAULT
;
5794 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5795 unlock_user(argptr
, arg
, 0);
5796 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5800 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5802 return -TARGET_EFAULT
;
5803 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5804 unlock_user(argptr
, arg
, 0);
5805 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5806 if (!is_error(ret
)) {
5807 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5809 return -TARGET_EFAULT
;
5810 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5811 unlock_user(argptr
, arg
, target_size
);
5817 qemu_log_mask(LOG_UNIMP
,
5818 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5819 (long)cmd
, arg_type
[0]);
5820 ret
= -TARGET_ENOTTY
;
5826 static const bitmask_transtbl iflag_tbl
[] = {
5827 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5828 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5829 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5830 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5831 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5832 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5833 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5834 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5835 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5836 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5837 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5838 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5839 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5840 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5841 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5845 static const bitmask_transtbl oflag_tbl
[] = {
5846 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5847 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5848 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5849 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5850 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5851 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5852 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5853 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5854 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5855 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5856 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5857 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5858 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5859 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5860 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5861 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5862 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5863 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5864 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5865 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5866 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5867 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5868 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5869 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5873 static const bitmask_transtbl cflag_tbl
[] = {
5874 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5875 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5876 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5877 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5878 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5879 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5880 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5881 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5882 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5883 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5884 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5885 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5886 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5887 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5888 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5889 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5890 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5891 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5892 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5893 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5894 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5895 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5896 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5897 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5898 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5899 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5900 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5901 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5902 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5903 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5904 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5908 static const bitmask_transtbl lflag_tbl
[] = {
5909 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5910 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5911 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5912 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5913 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5914 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5915 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5916 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5917 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5918 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5919 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5920 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5921 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5922 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5923 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5924 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5928 static void target_to_host_termios (void *dst
, const void *src
)
5930 struct host_termios
*host
= dst
;
5931 const struct target_termios
*target
= src
;
5934 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5936 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5938 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5940 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5941 host
->c_line
= target
->c_line
;
5943 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5944 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5945 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5946 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5947 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5948 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5949 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5950 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5951 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5952 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5953 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5954 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5955 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5956 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5957 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5958 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5959 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5960 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5963 static void host_to_target_termios (void *dst
, const void *src
)
5965 struct target_termios
*target
= dst
;
5966 const struct host_termios
*host
= src
;
5969 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5971 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5973 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5975 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5976 target
->c_line
= host
->c_line
;
5978 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5979 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5980 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5981 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5982 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5983 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5984 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5985 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5986 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5987 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5988 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5989 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5990 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5991 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5992 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5993 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5994 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5995 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5998 static const StructEntry struct_termios_def
= {
5999 .convert
= { host_to_target_termios
, target_to_host_termios
},
6000 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6001 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6002 .print
= print_termios
,
6005 static const bitmask_transtbl mmap_flags_tbl
[] = {
6006 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6007 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6008 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6009 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6010 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6011 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6012 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6013 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6014 MAP_DENYWRITE
, MAP_DENYWRITE
},
6015 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6016 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6017 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6018 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6019 MAP_NORESERVE
, MAP_NORESERVE
},
6020 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6021 /* MAP_STACK had been ignored by the kernel for quite some time.
6022 Recognize it for the target insofar as we do not want to pass
6023 it through to the host. */
6024 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6029 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6030 * TARGET_I386 is defined if TARGET_X86_64 is defined
6032 #if defined(TARGET_I386)
6034 /* NOTE: there is really one LDT for all the threads */
6035 static uint8_t *ldt_table
;
6037 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6044 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6045 if (size
> bytecount
)
6047 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6049 return -TARGET_EFAULT
;
6050 /* ??? Should this by byteswapped? */
6051 memcpy(p
, ldt_table
, size
);
6052 unlock_user(p
, ptr
, size
);
6056 /* XXX: add locking support */
6057 static abi_long
write_ldt(CPUX86State
*env
,
6058 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6060 struct target_modify_ldt_ldt_s ldt_info
;
6061 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6062 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6063 int seg_not_present
, useable
, lm
;
6064 uint32_t *lp
, entry_1
, entry_2
;
6066 if (bytecount
!= sizeof(ldt_info
))
6067 return -TARGET_EINVAL
;
6068 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6069 return -TARGET_EFAULT
;
6070 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6071 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6072 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6073 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6074 unlock_user_struct(target_ldt_info
, ptr
, 0);
6076 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6077 return -TARGET_EINVAL
;
6078 seg_32bit
= ldt_info
.flags
& 1;
6079 contents
= (ldt_info
.flags
>> 1) & 3;
6080 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6081 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6082 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6083 useable
= (ldt_info
.flags
>> 6) & 1;
6087 lm
= (ldt_info
.flags
>> 7) & 1;
6089 if (contents
== 3) {
6091 return -TARGET_EINVAL
;
6092 if (seg_not_present
== 0)
6093 return -TARGET_EINVAL
;
6095 /* allocate the LDT */
6097 env
->ldt
.base
= target_mmap(0,
6098 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6099 PROT_READ
|PROT_WRITE
,
6100 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6101 if (env
->ldt
.base
== -1)
6102 return -TARGET_ENOMEM
;
6103 memset(g2h_untagged(env
->ldt
.base
), 0,
6104 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6105 env
->ldt
.limit
= 0xffff;
6106 ldt_table
= g2h_untagged(env
->ldt
.base
);
6109 /* NOTE: same code as Linux kernel */
6110 /* Allow LDTs to be cleared by the user. */
6111 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6114 read_exec_only
== 1 &&
6116 limit_in_pages
== 0 &&
6117 seg_not_present
== 1 &&
6125 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6126 (ldt_info
.limit
& 0x0ffff);
6127 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6128 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6129 (ldt_info
.limit
& 0xf0000) |
6130 ((read_exec_only
^ 1) << 9) |
6132 ((seg_not_present
^ 1) << 15) |
6134 (limit_in_pages
<< 23) |
6138 entry_2
|= (useable
<< 20);
6140 /* Install the new entry ... */
6142 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6143 lp
[0] = tswap32(entry_1
);
6144 lp
[1] = tswap32(entry_2
);
6148 /* specific and weird i386 syscalls */
6149 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6150 unsigned long bytecount
)
6156 ret
= read_ldt(ptr
, bytecount
);
6159 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6162 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6165 ret
= -TARGET_ENOSYS
;
6171 #if defined(TARGET_ABI32)
6172 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6174 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6175 struct target_modify_ldt_ldt_s ldt_info
;
6176 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6177 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6178 int seg_not_present
, useable
, lm
;
6179 uint32_t *lp
, entry_1
, entry_2
;
6182 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6183 if (!target_ldt_info
)
6184 return -TARGET_EFAULT
;
6185 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6186 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6187 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6188 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6189 if (ldt_info
.entry_number
== -1) {
6190 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6191 if (gdt_table
[i
] == 0) {
6192 ldt_info
.entry_number
= i
;
6193 target_ldt_info
->entry_number
= tswap32(i
);
6198 unlock_user_struct(target_ldt_info
, ptr
, 1);
6200 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6201 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6202 return -TARGET_EINVAL
;
6203 seg_32bit
= ldt_info
.flags
& 1;
6204 contents
= (ldt_info
.flags
>> 1) & 3;
6205 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6206 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6207 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6208 useable
= (ldt_info
.flags
>> 6) & 1;
6212 lm
= (ldt_info
.flags
>> 7) & 1;
6215 if (contents
== 3) {
6216 if (seg_not_present
== 0)
6217 return -TARGET_EINVAL
;
6220 /* NOTE: same code as Linux kernel */
6221 /* Allow LDTs to be cleared by the user. */
6222 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6223 if ((contents
== 0 &&
6224 read_exec_only
== 1 &&
6226 limit_in_pages
== 0 &&
6227 seg_not_present
== 1 &&
6235 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6236 (ldt_info
.limit
& 0x0ffff);
6237 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6238 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6239 (ldt_info
.limit
& 0xf0000) |
6240 ((read_exec_only
^ 1) << 9) |
6242 ((seg_not_present
^ 1) << 15) |
6244 (limit_in_pages
<< 23) |
6249 /* Install the new entry ... */
6251 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6252 lp
[0] = tswap32(entry_1
);
6253 lp
[1] = tswap32(entry_2
);
6257 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6259 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6260 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6261 uint32_t base_addr
, limit
, flags
;
6262 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6263 int seg_not_present
, useable
, lm
;
6264 uint32_t *lp
, entry_1
, entry_2
;
6266 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6267 if (!target_ldt_info
)
6268 return -TARGET_EFAULT
;
6269 idx
= tswap32(target_ldt_info
->entry_number
);
6270 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6271 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6272 unlock_user_struct(target_ldt_info
, ptr
, 1);
6273 return -TARGET_EINVAL
;
6275 lp
= (uint32_t *)(gdt_table
+ idx
);
6276 entry_1
= tswap32(lp
[0]);
6277 entry_2
= tswap32(lp
[1]);
6279 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6280 contents
= (entry_2
>> 10) & 3;
6281 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6282 seg_32bit
= (entry_2
>> 22) & 1;
6283 limit_in_pages
= (entry_2
>> 23) & 1;
6284 useable
= (entry_2
>> 20) & 1;
6288 lm
= (entry_2
>> 21) & 1;
6290 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6291 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6292 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6293 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6294 base_addr
= (entry_1
>> 16) |
6295 (entry_2
& 0xff000000) |
6296 ((entry_2
& 0xff) << 16);
6297 target_ldt_info
->base_addr
= tswapal(base_addr
);
6298 target_ldt_info
->limit
= tswap32(limit
);
6299 target_ldt_info
->flags
= tswap32(flags
);
6300 unlock_user_struct(target_ldt_info
, ptr
, 1);
6304 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6306 return -TARGET_ENOSYS
;
6309 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6316 case TARGET_ARCH_SET_GS
:
6317 case TARGET_ARCH_SET_FS
:
6318 if (code
== TARGET_ARCH_SET_GS
)
6322 cpu_x86_load_seg(env
, idx
, 0);
6323 env
->segs
[idx
].base
= addr
;
6325 case TARGET_ARCH_GET_GS
:
6326 case TARGET_ARCH_GET_FS
:
6327 if (code
== TARGET_ARCH_GET_GS
)
6331 val
= env
->segs
[idx
].base
;
6332 if (put_user(val
, addr
, abi_ulong
))
6333 ret
= -TARGET_EFAULT
;
6336 ret
= -TARGET_EINVAL
;
6341 #endif /* defined(TARGET_ABI32 */
6342 #endif /* defined(TARGET_I386) */
6345 * These constants are generic. Supply any that are missing from the host.
6348 # define PR_SET_NAME 15
6349 # define PR_GET_NAME 16
6351 #ifndef PR_SET_FP_MODE
6352 # define PR_SET_FP_MODE 45
6353 # define PR_GET_FP_MODE 46
6354 # define PR_FP_MODE_FR (1 << 0)
6355 # define PR_FP_MODE_FRE (1 << 1)
6357 #ifndef PR_SVE_SET_VL
6358 # define PR_SVE_SET_VL 50
6359 # define PR_SVE_GET_VL 51
6360 # define PR_SVE_VL_LEN_MASK 0xffff
6361 # define PR_SVE_VL_INHERIT (1 << 17)
6363 #ifndef PR_PAC_RESET_KEYS
6364 # define PR_PAC_RESET_KEYS 54
6365 # define PR_PAC_APIAKEY (1 << 0)
6366 # define PR_PAC_APIBKEY (1 << 1)
6367 # define PR_PAC_APDAKEY (1 << 2)
6368 # define PR_PAC_APDBKEY (1 << 3)
6369 # define PR_PAC_APGAKEY (1 << 4)
6371 #ifndef PR_SET_TAGGED_ADDR_CTRL
6372 # define PR_SET_TAGGED_ADDR_CTRL 55
6373 # define PR_GET_TAGGED_ADDR_CTRL 56
6374 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6376 #ifndef PR_MTE_TCF_SHIFT
6377 # define PR_MTE_TCF_SHIFT 1
6378 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6379 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6380 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6381 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6382 # define PR_MTE_TAG_SHIFT 3
6383 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6385 #ifndef PR_SET_IO_FLUSHER
6386 # define PR_SET_IO_FLUSHER 57
6387 # define PR_GET_IO_FLUSHER 58
6389 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6390 # define PR_SET_SYSCALL_USER_DISPATCH 59
6392 #ifndef PR_SME_SET_VL
6393 # define PR_SME_SET_VL 63
6394 # define PR_SME_GET_VL 64
6395 # define PR_SME_VL_LEN_MASK 0xffff
6396 # define PR_SME_VL_INHERIT (1 << 17)
6399 #include "target_prctl.h"
6401 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6403 return -TARGET_EINVAL
;
6406 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6408 return -TARGET_EINVAL
;
6411 #ifndef do_prctl_get_fp_mode
6412 #define do_prctl_get_fp_mode do_prctl_inval0
6414 #ifndef do_prctl_set_fp_mode
6415 #define do_prctl_set_fp_mode do_prctl_inval1
6417 #ifndef do_prctl_sve_get_vl
6418 #define do_prctl_sve_get_vl do_prctl_inval0
6420 #ifndef do_prctl_sve_set_vl
6421 #define do_prctl_sve_set_vl do_prctl_inval1
6423 #ifndef do_prctl_reset_keys
6424 #define do_prctl_reset_keys do_prctl_inval1
6426 #ifndef do_prctl_set_tagged_addr_ctrl
6427 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6429 #ifndef do_prctl_get_tagged_addr_ctrl
6430 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6432 #ifndef do_prctl_get_unalign
6433 #define do_prctl_get_unalign do_prctl_inval1
6435 #ifndef do_prctl_set_unalign
6436 #define do_prctl_set_unalign do_prctl_inval1
6438 #ifndef do_prctl_sme_get_vl
6439 #define do_prctl_sme_get_vl do_prctl_inval0
6441 #ifndef do_prctl_sme_set_vl
6442 #define do_prctl_sme_set_vl do_prctl_inval1
6445 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6446 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6451 case PR_GET_PDEATHSIG
:
6454 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6456 if (!is_error(ret
) &&
6457 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6458 return -TARGET_EFAULT
;
6462 case PR_SET_PDEATHSIG
:
6463 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6467 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6469 return -TARGET_EFAULT
;
6471 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6473 unlock_user(name
, arg2
, 16);
6478 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6480 return -TARGET_EFAULT
;
6482 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6484 unlock_user(name
, arg2
, 0);
6487 case PR_GET_FP_MODE
:
6488 return do_prctl_get_fp_mode(env
);
6489 case PR_SET_FP_MODE
:
6490 return do_prctl_set_fp_mode(env
, arg2
);
6492 return do_prctl_sve_get_vl(env
);
6494 return do_prctl_sve_set_vl(env
, arg2
);
6496 return do_prctl_sme_get_vl(env
);
6498 return do_prctl_sme_set_vl(env
, arg2
);
6499 case PR_PAC_RESET_KEYS
:
6500 if (arg3
|| arg4
|| arg5
) {
6501 return -TARGET_EINVAL
;
6503 return do_prctl_reset_keys(env
, arg2
);
6504 case PR_SET_TAGGED_ADDR_CTRL
:
6505 if (arg3
|| arg4
|| arg5
) {
6506 return -TARGET_EINVAL
;
6508 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6509 case PR_GET_TAGGED_ADDR_CTRL
:
6510 if (arg2
|| arg3
|| arg4
|| arg5
) {
6511 return -TARGET_EINVAL
;
6513 return do_prctl_get_tagged_addr_ctrl(env
);
6515 case PR_GET_UNALIGN
:
6516 return do_prctl_get_unalign(env
, arg2
);
6517 case PR_SET_UNALIGN
:
6518 return do_prctl_set_unalign(env
, arg2
);
6520 case PR_CAP_AMBIENT
:
6521 case PR_CAPBSET_READ
:
6522 case PR_CAPBSET_DROP
:
6523 case PR_GET_DUMPABLE
:
6524 case PR_SET_DUMPABLE
:
6525 case PR_GET_KEEPCAPS
:
6526 case PR_SET_KEEPCAPS
:
6527 case PR_GET_SECUREBITS
:
6528 case PR_SET_SECUREBITS
:
6531 case PR_GET_TIMERSLACK
:
6532 case PR_SET_TIMERSLACK
:
6534 case PR_MCE_KILL_GET
:
6535 case PR_GET_NO_NEW_PRIVS
:
6536 case PR_SET_NO_NEW_PRIVS
:
6537 case PR_GET_IO_FLUSHER
:
6538 case PR_SET_IO_FLUSHER
:
6539 /* Some prctl options have no pointer arguments and we can pass on. */
6540 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6542 case PR_GET_CHILD_SUBREAPER
:
6543 case PR_SET_CHILD_SUBREAPER
:
6544 case PR_GET_SPECULATION_CTRL
:
6545 case PR_SET_SPECULATION_CTRL
:
6546 case PR_GET_TID_ADDRESS
:
6548 return -TARGET_EINVAL
;
6552 /* Was used for SPE on PowerPC. */
6553 return -TARGET_EINVAL
;
6560 case PR_GET_SECCOMP
:
6561 case PR_SET_SECCOMP
:
6562 case PR_SET_SYSCALL_USER_DISPATCH
:
6563 case PR_GET_THP_DISABLE
:
6564 case PR_SET_THP_DISABLE
:
6567 /* Disable to prevent the target disabling stuff we need. */
6568 return -TARGET_EINVAL
;
6571 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6573 return -TARGET_EINVAL
;
6577 #define NEW_STACK_SIZE 0x40000
6580 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6583 pthread_mutex_t mutex
;
6584 pthread_cond_t cond
;
6587 abi_ulong child_tidptr
;
6588 abi_ulong parent_tidptr
;
6592 static void *clone_func(void *arg
)
6594 new_thread_info
*info
= arg
;
6599 rcu_register_thread();
6600 tcg_register_thread();
6604 ts
= (TaskState
*)cpu
->opaque
;
6605 info
->tid
= sys_gettid();
6607 if (info
->child_tidptr
)
6608 put_user_u32(info
->tid
, info
->child_tidptr
);
6609 if (info
->parent_tidptr
)
6610 put_user_u32(info
->tid
, info
->parent_tidptr
);
6611 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6612 /* Enable signals. */
6613 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6614 /* Signal to the parent that we're ready. */
6615 pthread_mutex_lock(&info
->mutex
);
6616 pthread_cond_broadcast(&info
->cond
);
6617 pthread_mutex_unlock(&info
->mutex
);
6618 /* Wait until the parent has finished initializing the tls state. */
6619 pthread_mutex_lock(&clone_lock
);
6620 pthread_mutex_unlock(&clone_lock
);
6626 /* do_fork() Must return host values and target errnos (unlike most
6627 do_*() functions). */
6628 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6629 abi_ulong parent_tidptr
, target_ulong newtls
,
6630 abi_ulong child_tidptr
)
6632 CPUState
*cpu
= env_cpu(env
);
6636 CPUArchState
*new_env
;
6639 flags
&= ~CLONE_IGNORED_FLAGS
;
6641 /* Emulate vfork() with fork() */
6642 if (flags
& CLONE_VFORK
)
6643 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6645 if (flags
& CLONE_VM
) {
6646 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6647 new_thread_info info
;
6648 pthread_attr_t attr
;
6650 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6651 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6652 return -TARGET_EINVAL
;
6655 ts
= g_new0(TaskState
, 1);
6656 init_task_state(ts
);
6658 /* Grab a mutex so that thread setup appears atomic. */
6659 pthread_mutex_lock(&clone_lock
);
6662 * If this is our first additional thread, we need to ensure we
6663 * generate code for parallel execution and flush old translations.
6664 * Do this now so that the copy gets CF_PARALLEL too.
6666 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6667 cpu
->tcg_cflags
|= CF_PARALLEL
;
6671 /* we create a new CPU instance. */
6672 new_env
= cpu_copy(env
);
6673 /* Init regs that differ from the parent. */
6674 cpu_clone_regs_child(new_env
, newsp
, flags
);
6675 cpu_clone_regs_parent(env
, flags
);
6676 new_cpu
= env_cpu(new_env
);
6677 new_cpu
->opaque
= ts
;
6678 ts
->bprm
= parent_ts
->bprm
;
6679 ts
->info
= parent_ts
->info
;
6680 ts
->signal_mask
= parent_ts
->signal_mask
;
6682 if (flags
& CLONE_CHILD_CLEARTID
) {
6683 ts
->child_tidptr
= child_tidptr
;
6686 if (flags
& CLONE_SETTLS
) {
6687 cpu_set_tls (new_env
, newtls
);
6690 memset(&info
, 0, sizeof(info
));
6691 pthread_mutex_init(&info
.mutex
, NULL
);
6692 pthread_mutex_lock(&info
.mutex
);
6693 pthread_cond_init(&info
.cond
, NULL
);
6695 if (flags
& CLONE_CHILD_SETTID
) {
6696 info
.child_tidptr
= child_tidptr
;
6698 if (flags
& CLONE_PARENT_SETTID
) {
6699 info
.parent_tidptr
= parent_tidptr
;
6702 ret
= pthread_attr_init(&attr
);
6703 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6704 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6705 /* It is not safe to deliver signals until the child has finished
6706 initializing, so temporarily block all signals. */
6707 sigfillset(&sigmask
);
6708 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6709 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6711 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6712 /* TODO: Free new CPU state if thread creation failed. */
6714 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6715 pthread_attr_destroy(&attr
);
6717 /* Wait for the child to initialize. */
6718 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6723 pthread_mutex_unlock(&info
.mutex
);
6724 pthread_cond_destroy(&info
.cond
);
6725 pthread_mutex_destroy(&info
.mutex
);
6726 pthread_mutex_unlock(&clone_lock
);
6728 /* if no CLONE_VM, we consider it is a fork */
6729 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6730 return -TARGET_EINVAL
;
6733 /* We can't support custom termination signals */
6734 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6735 return -TARGET_EINVAL
;
6738 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6739 if (flags
& CLONE_PIDFD
) {
6740 return -TARGET_EINVAL
;
6744 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6745 if ((flags
& CLONE_PIDFD
) && (flags
& CLONE_PARENT_SETTID
)) {
6746 return -TARGET_EINVAL
;
6749 if (block_signals()) {
6750 return -QEMU_ERESTARTSYS
;
6756 /* Child Process. */
6757 cpu_clone_regs_child(env
, newsp
, flags
);
6759 /* There is a race condition here. The parent process could
6760 theoretically read the TID in the child process before the child
6761 tid is set. This would require using either ptrace
6762 (not implemented) or having *_tidptr to point at a shared memory
6763 mapping. We can't repeat the spinlock hack used above because
6764 the child process gets its own copy of the lock. */
6765 if (flags
& CLONE_CHILD_SETTID
)
6766 put_user_u32(sys_gettid(), child_tidptr
);
6767 if (flags
& CLONE_PARENT_SETTID
)
6768 put_user_u32(sys_gettid(), parent_tidptr
);
6769 ts
= (TaskState
*)cpu
->opaque
;
6770 if (flags
& CLONE_SETTLS
)
6771 cpu_set_tls (env
, newtls
);
6772 if (flags
& CLONE_CHILD_CLEARTID
)
6773 ts
->child_tidptr
= child_tidptr
;
6775 cpu_clone_regs_parent(env
, flags
);
6776 if (flags
& CLONE_PIDFD
) {
6778 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6779 int pid_child
= ret
;
6780 pid_fd
= pidfd_open(pid_child
, 0);
6782 fcntl(pid_fd
, F_SETFD
, fcntl(pid_fd
, F_GETFL
)
6788 put_user_u32(pid_fd
, parent_tidptr
);
6792 g_assert(!cpu_in_exclusive_context(cpu
));
6797 /* warning : doesn't handle linux specific flags... */
6798 static int target_to_host_fcntl_cmd(int cmd
)
6803 case TARGET_F_DUPFD
:
6804 case TARGET_F_GETFD
:
6805 case TARGET_F_SETFD
:
6806 case TARGET_F_GETFL
:
6807 case TARGET_F_SETFL
:
6808 case TARGET_F_OFD_GETLK
:
6809 case TARGET_F_OFD_SETLK
:
6810 case TARGET_F_OFD_SETLKW
:
6813 case TARGET_F_GETLK
:
6816 case TARGET_F_SETLK
:
6819 case TARGET_F_SETLKW
:
6822 case TARGET_F_GETOWN
:
6825 case TARGET_F_SETOWN
:
6828 case TARGET_F_GETSIG
:
6831 case TARGET_F_SETSIG
:
6834 #if TARGET_ABI_BITS == 32
6835 case TARGET_F_GETLK64
:
6838 case TARGET_F_SETLK64
:
6841 case TARGET_F_SETLKW64
:
6845 case TARGET_F_SETLEASE
:
6848 case TARGET_F_GETLEASE
:
6851 #ifdef F_DUPFD_CLOEXEC
6852 case TARGET_F_DUPFD_CLOEXEC
:
6853 ret
= F_DUPFD_CLOEXEC
;
6856 case TARGET_F_NOTIFY
:
6860 case TARGET_F_GETOWN_EX
:
6865 case TARGET_F_SETOWN_EX
:
6870 case TARGET_F_SETPIPE_SZ
:
6873 case TARGET_F_GETPIPE_SZ
:
6878 case TARGET_F_ADD_SEALS
:
6881 case TARGET_F_GET_SEALS
:
6886 ret
= -TARGET_EINVAL
;
6890 #if defined(__powerpc64__)
6891 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6892 * is not supported by kernel. The glibc fcntl call actually adjusts
6893 * them to 5, 6 and 7 before making the syscall(). Since we make the
6894 * syscall directly, adjust to what is supported by the kernel.
6896 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6897 ret
-= F_GETLK64
- 5;
6904 #define FLOCK_TRANSTBL \
6906 TRANSTBL_CONVERT(F_RDLCK); \
6907 TRANSTBL_CONVERT(F_WRLCK); \
6908 TRANSTBL_CONVERT(F_UNLCK); \
6911 static int target_to_host_flock(int type
)
6913 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6915 #undef TRANSTBL_CONVERT
6916 return -TARGET_EINVAL
;
6919 static int host_to_target_flock(int type
)
6921 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6923 #undef TRANSTBL_CONVERT
6924 /* if we don't know how to convert the value coming
6925 * from the host we copy to the target field as-is
6930 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6931 abi_ulong target_flock_addr
)
6933 struct target_flock
*target_fl
;
6936 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6937 return -TARGET_EFAULT
;
6940 __get_user(l_type
, &target_fl
->l_type
);
6941 l_type
= target_to_host_flock(l_type
);
6945 fl
->l_type
= l_type
;
6946 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6947 __get_user(fl
->l_start
, &target_fl
->l_start
);
6948 __get_user(fl
->l_len
, &target_fl
->l_len
);
6949 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6950 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6954 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6955 const struct flock64
*fl
)
6957 struct target_flock
*target_fl
;
6960 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6961 return -TARGET_EFAULT
;
6964 l_type
= host_to_target_flock(fl
->l_type
);
6965 __put_user(l_type
, &target_fl
->l_type
);
6966 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6967 __put_user(fl
->l_start
, &target_fl
->l_start
);
6968 __put_user(fl
->l_len
, &target_fl
->l_len
);
6969 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6970 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6974 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6975 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6977 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6978 struct target_oabi_flock64
{
6986 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6987 abi_ulong target_flock_addr
)
6989 struct target_oabi_flock64
*target_fl
;
6992 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6993 return -TARGET_EFAULT
;
6996 __get_user(l_type
, &target_fl
->l_type
);
6997 l_type
= target_to_host_flock(l_type
);
7001 fl
->l_type
= l_type
;
7002 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7003 __get_user(fl
->l_start
, &target_fl
->l_start
);
7004 __get_user(fl
->l_len
, &target_fl
->l_len
);
7005 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7006 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7010 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
7011 const struct flock64
*fl
)
7013 struct target_oabi_flock64
*target_fl
;
7016 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7017 return -TARGET_EFAULT
;
7020 l_type
= host_to_target_flock(fl
->l_type
);
7021 __put_user(l_type
, &target_fl
->l_type
);
7022 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7023 __put_user(fl
->l_start
, &target_fl
->l_start
);
7024 __put_user(fl
->l_len
, &target_fl
->l_len
);
7025 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7026 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7031 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
7032 abi_ulong target_flock_addr
)
7034 struct target_flock64
*target_fl
;
7037 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
7038 return -TARGET_EFAULT
;
7041 __get_user(l_type
, &target_fl
->l_type
);
7042 l_type
= target_to_host_flock(l_type
);
7046 fl
->l_type
= l_type
;
7047 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7048 __get_user(fl
->l_start
, &target_fl
->l_start
);
7049 __get_user(fl
->l_len
, &target_fl
->l_len
);
7050 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7051 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7055 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
7056 const struct flock64
*fl
)
7058 struct target_flock64
*target_fl
;
7061 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7062 return -TARGET_EFAULT
;
7065 l_type
= host_to_target_flock(fl
->l_type
);
7066 __put_user(l_type
, &target_fl
->l_type
);
7067 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7068 __put_user(fl
->l_start
, &target_fl
->l_start
);
7069 __put_user(fl
->l_len
, &target_fl
->l_len
);
7070 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7071 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7075 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7077 struct flock64 fl64
;
7079 struct f_owner_ex fox
;
7080 struct target_f_owner_ex
*target_fox
;
7083 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7085 if (host_cmd
== -TARGET_EINVAL
)
7089 case TARGET_F_GETLK
:
7090 ret
= copy_from_user_flock(&fl64
, arg
);
7094 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7096 ret
= copy_to_user_flock(arg
, &fl64
);
7100 case TARGET_F_SETLK
:
7101 case TARGET_F_SETLKW
:
7102 ret
= copy_from_user_flock(&fl64
, arg
);
7106 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7109 case TARGET_F_GETLK64
:
7110 case TARGET_F_OFD_GETLK
:
7111 ret
= copy_from_user_flock64(&fl64
, arg
);
7115 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7117 ret
= copy_to_user_flock64(arg
, &fl64
);
7120 case TARGET_F_SETLK64
:
7121 case TARGET_F_SETLKW64
:
7122 case TARGET_F_OFD_SETLK
:
7123 case TARGET_F_OFD_SETLKW
:
7124 ret
= copy_from_user_flock64(&fl64
, arg
);
7128 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7131 case TARGET_F_GETFL
:
7132 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7134 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7138 case TARGET_F_SETFL
:
7139 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7140 target_to_host_bitmask(arg
,
7145 case TARGET_F_GETOWN_EX
:
7146 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7148 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7149 return -TARGET_EFAULT
;
7150 target_fox
->type
= tswap32(fox
.type
);
7151 target_fox
->pid
= tswap32(fox
.pid
);
7152 unlock_user_struct(target_fox
, arg
, 1);
7158 case TARGET_F_SETOWN_EX
:
7159 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7160 return -TARGET_EFAULT
;
7161 fox
.type
= tswap32(target_fox
->type
);
7162 fox
.pid
= tswap32(target_fox
->pid
);
7163 unlock_user_struct(target_fox
, arg
, 0);
7164 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7168 case TARGET_F_SETSIG
:
7169 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7172 case TARGET_F_GETSIG
:
7173 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7176 case TARGET_F_SETOWN
:
7177 case TARGET_F_GETOWN
:
7178 case TARGET_F_SETLEASE
:
7179 case TARGET_F_GETLEASE
:
7180 case TARGET_F_SETPIPE_SZ
:
7181 case TARGET_F_GETPIPE_SZ
:
7182 case TARGET_F_ADD_SEALS
:
7183 case TARGET_F_GET_SEALS
:
7184 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7188 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7196 static inline int high2lowuid(int uid
)
7204 static inline int high2lowgid(int gid
)
7212 static inline int low2highuid(int uid
)
7214 if ((int16_t)uid
== -1)
7220 static inline int low2highgid(int gid
)
7222 if ((int16_t)gid
== -1)
7227 static inline int tswapid(int id
)
7232 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7234 #else /* !USE_UID16 */
7235 static inline int high2lowuid(int uid
)
7239 static inline int high2lowgid(int gid
)
7243 static inline int low2highuid(int uid
)
7247 static inline int low2highgid(int gid
)
7251 static inline int tswapid(int id
)
7256 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7258 #endif /* USE_UID16 */
7260 /* We must do direct syscalls for setting UID/GID, because we want to
7261 * implement the Linux system call semantics of "change only for this thread",
7262 * not the libc/POSIX semantics of "change for all threads in process".
7263 * (See http://ewontfix.com/17/ for more details.)
7264 * We use the 32-bit version of the syscalls if present; if it is not
7265 * then either the host architecture supports 32-bit UIDs natively with
7266 * the standard syscall, or the 16-bit UID is the best we can do.
7268 #ifdef __NR_setuid32
7269 #define __NR_sys_setuid __NR_setuid32
7271 #define __NR_sys_setuid __NR_setuid
7273 #ifdef __NR_setgid32
7274 #define __NR_sys_setgid __NR_setgid32
7276 #define __NR_sys_setgid __NR_setgid
7278 #ifdef __NR_setresuid32
7279 #define __NR_sys_setresuid __NR_setresuid32
7281 #define __NR_sys_setresuid __NR_setresuid
7283 #ifdef __NR_setresgid32
7284 #define __NR_sys_setresgid __NR_setresgid32
7286 #define __NR_sys_setresgid __NR_setresgid
7289 _syscall1(int, sys_setuid
, uid_t
, uid
)
7290 _syscall1(int, sys_setgid
, gid_t
, gid
)
7291 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7292 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7294 void syscall_init(void)
7297 const argtype
*arg_type
;
7300 thunk_init(STRUCT_MAX
);
7302 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7303 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7304 #include "syscall_types.h"
7306 #undef STRUCT_SPECIAL
7308 /* we patch the ioctl size if necessary. We rely on the fact that
7309 no ioctl has all the bits at '1' in the size field */
7311 while (ie
->target_cmd
!= 0) {
7312 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7313 TARGET_IOC_SIZEMASK
) {
7314 arg_type
= ie
->arg_type
;
7315 if (arg_type
[0] != TYPE_PTR
) {
7316 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7321 size
= thunk_type_size(arg_type
, 0);
7322 ie
->target_cmd
= (ie
->target_cmd
&
7323 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7324 (size
<< TARGET_IOC_SIZESHIFT
);
7327 /* automatic consistency check if same arch */
7328 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7329 (defined(__x86_64__) && defined(TARGET_X86_64))
7330 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7331 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7332 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7339 #ifdef TARGET_NR_truncate64
7340 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7345 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7349 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7353 #ifdef TARGET_NR_ftruncate64
7354 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7359 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7363 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7367 #if defined(TARGET_NR_timer_settime) || \
7368 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7369 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7370 abi_ulong target_addr
)
7372 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7373 offsetof(struct target_itimerspec
,
7375 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7376 offsetof(struct target_itimerspec
,
7378 return -TARGET_EFAULT
;
7385 #if defined(TARGET_NR_timer_settime64) || \
7386 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7387 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7388 abi_ulong target_addr
)
7390 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7391 offsetof(struct target__kernel_itimerspec
,
7393 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7394 offsetof(struct target__kernel_itimerspec
,
7396 return -TARGET_EFAULT
;
7403 #if ((defined(TARGET_NR_timerfd_gettime) || \
7404 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7405 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7406 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7407 struct itimerspec
*host_its
)
7409 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7411 &host_its
->it_interval
) ||
7412 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7414 &host_its
->it_value
)) {
7415 return -TARGET_EFAULT
;
7421 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7422 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7423 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7424 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7425 struct itimerspec
*host_its
)
7427 if (host_to_target_timespec64(target_addr
+
7428 offsetof(struct target__kernel_itimerspec
,
7430 &host_its
->it_interval
) ||
7431 host_to_target_timespec64(target_addr
+
7432 offsetof(struct target__kernel_itimerspec
,
7434 &host_its
->it_value
)) {
7435 return -TARGET_EFAULT
;
7441 #if defined(TARGET_NR_adjtimex) || \
7442 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7443 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7444 abi_long target_addr
)
7446 struct target_timex
*target_tx
;
7448 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7449 return -TARGET_EFAULT
;
7452 __get_user(host_tx
->modes
, &target_tx
->modes
);
7453 __get_user(host_tx
->offset
, &target_tx
->offset
);
7454 __get_user(host_tx
->freq
, &target_tx
->freq
);
7455 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7456 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7457 __get_user(host_tx
->status
, &target_tx
->status
);
7458 __get_user(host_tx
->constant
, &target_tx
->constant
);
7459 __get_user(host_tx
->precision
, &target_tx
->precision
);
7460 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7461 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7462 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7463 __get_user(host_tx
->tick
, &target_tx
->tick
);
7464 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7465 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7466 __get_user(host_tx
->shift
, &target_tx
->shift
);
7467 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7468 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7469 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7470 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7471 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7472 __get_user(host_tx
->tai
, &target_tx
->tai
);
7474 unlock_user_struct(target_tx
, target_addr
, 0);
7478 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7479 struct timex
*host_tx
)
7481 struct target_timex
*target_tx
;
7483 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7484 return -TARGET_EFAULT
;
7487 __put_user(host_tx
->modes
, &target_tx
->modes
);
7488 __put_user(host_tx
->offset
, &target_tx
->offset
);
7489 __put_user(host_tx
->freq
, &target_tx
->freq
);
7490 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7491 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7492 __put_user(host_tx
->status
, &target_tx
->status
);
7493 __put_user(host_tx
->constant
, &target_tx
->constant
);
7494 __put_user(host_tx
->precision
, &target_tx
->precision
);
7495 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7496 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7497 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7498 __put_user(host_tx
->tick
, &target_tx
->tick
);
7499 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7500 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7501 __put_user(host_tx
->shift
, &target_tx
->shift
);
7502 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7503 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7504 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7505 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7506 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7507 __put_user(host_tx
->tai
, &target_tx
->tai
);
7509 unlock_user_struct(target_tx
, target_addr
, 1);
7515 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7516 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7517 abi_long target_addr
)
7519 struct target__kernel_timex
*target_tx
;
7521 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7522 offsetof(struct target__kernel_timex
,
7524 return -TARGET_EFAULT
;
7527 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7528 return -TARGET_EFAULT
;
7531 __get_user(host_tx
->modes
, &target_tx
->modes
);
7532 __get_user(host_tx
->offset
, &target_tx
->offset
);
7533 __get_user(host_tx
->freq
, &target_tx
->freq
);
7534 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7535 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7536 __get_user(host_tx
->status
, &target_tx
->status
);
7537 __get_user(host_tx
->constant
, &target_tx
->constant
);
7538 __get_user(host_tx
->precision
, &target_tx
->precision
);
7539 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7540 __get_user(host_tx
->tick
, &target_tx
->tick
);
7541 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7542 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7543 __get_user(host_tx
->shift
, &target_tx
->shift
);
7544 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7545 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7546 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7547 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7548 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7549 __get_user(host_tx
->tai
, &target_tx
->tai
);
7551 unlock_user_struct(target_tx
, target_addr
, 0);
7555 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7556 struct timex
*host_tx
)
7558 struct target__kernel_timex
*target_tx
;
7560 if (copy_to_user_timeval64(target_addr
+
7561 offsetof(struct target__kernel_timex
, time
),
7563 return -TARGET_EFAULT
;
7566 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7567 return -TARGET_EFAULT
;
7570 __put_user(host_tx
->modes
, &target_tx
->modes
);
7571 __put_user(host_tx
->offset
, &target_tx
->offset
);
7572 __put_user(host_tx
->freq
, &target_tx
->freq
);
7573 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7574 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7575 __put_user(host_tx
->status
, &target_tx
->status
);
7576 __put_user(host_tx
->constant
, &target_tx
->constant
);
7577 __put_user(host_tx
->precision
, &target_tx
->precision
);
7578 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7579 __put_user(host_tx
->tick
, &target_tx
->tick
);
7580 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7581 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7582 __put_user(host_tx
->shift
, &target_tx
->shift
);
7583 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7584 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7585 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7586 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7587 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7588 __put_user(host_tx
->tai
, &target_tx
->tai
);
7590 unlock_user_struct(target_tx
, target_addr
, 1);
7595 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7596 #define sigev_notify_thread_id _sigev_un._tid
7599 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7600 abi_ulong target_addr
)
7602 struct target_sigevent
*target_sevp
;
7604 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7605 return -TARGET_EFAULT
;
7608 /* This union is awkward on 64 bit systems because it has a 32 bit
7609 * integer and a pointer in it; we follow the conversion approach
7610 * used for handling sigval types in signal.c so the guest should get
7611 * the correct value back even if we did a 64 bit byteswap and it's
7612 * using the 32 bit integer.
7614 host_sevp
->sigev_value
.sival_ptr
=
7615 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7616 host_sevp
->sigev_signo
=
7617 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7618 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7619 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7621 unlock_user_struct(target_sevp
, target_addr
, 1);
7625 #if defined(TARGET_NR_mlockall)
7626 static inline int target_to_host_mlockall_arg(int arg
)
7630 if (arg
& TARGET_MCL_CURRENT
) {
7631 result
|= MCL_CURRENT
;
7633 if (arg
& TARGET_MCL_FUTURE
) {
7634 result
|= MCL_FUTURE
;
7637 if (arg
& TARGET_MCL_ONFAULT
) {
7638 result
|= MCL_ONFAULT
;
7646 static inline int target_to_host_msync_arg(abi_long arg
)
7648 return ((arg
& TARGET_MS_ASYNC
) ? MS_ASYNC
: 0) |
7649 ((arg
& TARGET_MS_INVALIDATE
) ? MS_INVALIDATE
: 0) |
7650 ((arg
& TARGET_MS_SYNC
) ? MS_SYNC
: 0) |
7651 (arg
& ~(TARGET_MS_ASYNC
| TARGET_MS_INVALIDATE
| TARGET_MS_SYNC
));
7654 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7655 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7656 defined(TARGET_NR_newfstatat))
7657 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7658 abi_ulong target_addr
,
7659 struct stat
*host_st
)
7661 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7662 if (cpu_env
->eabi
) {
7663 struct target_eabi_stat64
*target_st
;
7665 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7666 return -TARGET_EFAULT
;
7667 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7668 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7669 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7670 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7671 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7673 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7674 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7675 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7676 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7677 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7678 __put_user(host_st
->st_size
, &target_st
->st_size
);
7679 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7680 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7681 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7682 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7683 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7684 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7685 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7686 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7687 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7689 unlock_user_struct(target_st
, target_addr
, 1);
7693 #if defined(TARGET_HAS_STRUCT_STAT64)
7694 struct target_stat64
*target_st
;
7696 struct target_stat
*target_st
;
7699 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7700 return -TARGET_EFAULT
;
7701 memset(target_st
, 0, sizeof(*target_st
));
7702 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7703 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7704 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7705 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7707 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7708 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7709 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7710 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7711 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7712 /* XXX: better use of kernel struct */
7713 __put_user(host_st
->st_size
, &target_st
->st_size
);
7714 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7715 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7716 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7717 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7718 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7719 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7720 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7721 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7722 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7724 unlock_user_struct(target_st
, target_addr
, 1);
7731 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7732 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7733 abi_ulong target_addr
)
7735 struct target_statx
*target_stx
;
7737 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7738 return -TARGET_EFAULT
;
7740 memset(target_stx
, 0, sizeof(*target_stx
));
7742 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7743 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7744 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7745 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7746 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7747 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7748 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7749 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7750 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7751 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7752 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7753 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7754 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7755 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7756 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7757 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7758 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7759 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7760 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7761 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7762 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7763 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7764 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7766 unlock_user_struct(target_stx
, target_addr
, 1);
7772 static int do_sys_futex(int *uaddr
, int op
, int val
,
7773 const struct timespec
*timeout
, int *uaddr2
,
7776 #if HOST_LONG_BITS == 64
7777 #if defined(__NR_futex)
7778 /* always a 64-bit time_t, it doesn't define _time64 version */
7779 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7782 #else /* HOST_LONG_BITS == 64 */
7783 #if defined(__NR_futex_time64)
7784 if (sizeof(timeout
->tv_sec
) == 8) {
7785 /* _time64 function on 32bit arch */
7786 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7789 #if defined(__NR_futex)
7790 /* old function on 32bit arch */
7791 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7793 #endif /* HOST_LONG_BITS == 64 */
7794 g_assert_not_reached();
7797 static int do_safe_futex(int *uaddr
, int op
, int val
,
7798 const struct timespec
*timeout
, int *uaddr2
,
7801 #if HOST_LONG_BITS == 64
7802 #if defined(__NR_futex)
7803 /* always a 64-bit time_t, it doesn't define _time64 version */
7804 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7806 #else /* HOST_LONG_BITS == 64 */
7807 #if defined(__NR_futex_time64)
7808 if (sizeof(timeout
->tv_sec
) == 8) {
7809 /* _time64 function on 32bit arch */
7810 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7814 #if defined(__NR_futex)
7815 /* old function on 32bit arch */
7816 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7818 #endif /* HOST_LONG_BITS == 64 */
7819 return -TARGET_ENOSYS
;
7822 /* ??? Using host futex calls even when target atomic operations
7823 are not really atomic probably breaks things. However implementing
7824 futexes locally would make futexes shared between multiple processes
7825 tricky. However they're probably useless because guest atomic
7826 operations won't work either. */
7827 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7828 static int do_futex(CPUState
*cpu
, bool time64
, target_ulong uaddr
,
7829 int op
, int val
, target_ulong timeout
,
7830 target_ulong uaddr2
, int val3
)
7832 struct timespec ts
, *pts
= NULL
;
7833 void *haddr2
= NULL
;
7836 /* We assume FUTEX_* constants are the same on both host and target. */
7837 #ifdef FUTEX_CMD_MASK
7838 base_op
= op
& FUTEX_CMD_MASK
;
7844 case FUTEX_WAIT_BITSET
:
7847 case FUTEX_WAIT_REQUEUE_PI
:
7849 haddr2
= g2h(cpu
, uaddr2
);
7852 case FUTEX_LOCK_PI2
:
7855 case FUTEX_WAKE_BITSET
:
7856 case FUTEX_TRYLOCK_PI
:
7857 case FUTEX_UNLOCK_PI
:
7861 val
= target_to_host_signal(val
);
7864 case FUTEX_CMP_REQUEUE
:
7865 case FUTEX_CMP_REQUEUE_PI
:
7866 val3
= tswap32(val3
);
7871 * For these, the 4th argument is not TIMEOUT, but VAL2.
7872 * But the prototype of do_safe_futex takes a pointer, so
7873 * insert casts to satisfy the compiler. We do not need
7874 * to tswap VAL2 since it's not compared to guest memory.
7876 pts
= (struct timespec
*)(uintptr_t)timeout
;
7878 haddr2
= g2h(cpu
, uaddr2
);
7881 return -TARGET_ENOSYS
;
7886 ? target_to_host_timespec64(pts
, timeout
)
7887 : target_to_host_timespec(pts
, timeout
)) {
7888 return -TARGET_EFAULT
;
7891 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, haddr2
, val3
);
7895 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7896 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7897 abi_long handle
, abi_long mount_id
,
7900 struct file_handle
*target_fh
;
7901 struct file_handle
*fh
;
7905 unsigned int size
, total_size
;
7907 if (get_user_s32(size
, handle
)) {
7908 return -TARGET_EFAULT
;
7911 name
= lock_user_string(pathname
);
7913 return -TARGET_EFAULT
;
7916 total_size
= sizeof(struct file_handle
) + size
;
7917 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7919 unlock_user(name
, pathname
, 0);
7920 return -TARGET_EFAULT
;
7923 fh
= g_malloc0(total_size
);
7924 fh
->handle_bytes
= size
;
7926 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7927 unlock_user(name
, pathname
, 0);
7929 /* man name_to_handle_at(2):
7930 * Other than the use of the handle_bytes field, the caller should treat
7931 * the file_handle structure as an opaque data type
7934 memcpy(target_fh
, fh
, total_size
);
7935 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7936 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7938 unlock_user(target_fh
, handle
, total_size
);
7940 if (put_user_s32(mid
, mount_id
)) {
7941 return -TARGET_EFAULT
;
7949 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7950 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7953 struct file_handle
*target_fh
;
7954 struct file_handle
*fh
;
7955 unsigned int size
, total_size
;
7958 if (get_user_s32(size
, handle
)) {
7959 return -TARGET_EFAULT
;
7962 total_size
= sizeof(struct file_handle
) + size
;
7963 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7965 return -TARGET_EFAULT
;
7968 fh
= g_memdup(target_fh
, total_size
);
7969 fh
->handle_bytes
= size
;
7970 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7972 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7973 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7977 unlock_user(target_fh
, handle
, total_size
);
7983 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7985 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7988 target_sigset_t
*target_mask
;
7992 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7993 return -TARGET_EINVAL
;
7995 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7996 return -TARGET_EFAULT
;
7999 target_to_host_sigset(&host_mask
, target_mask
);
8001 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
8003 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
8005 fd_trans_register(ret
, &target_signalfd_trans
);
8008 unlock_user_struct(target_mask
, mask
, 0);
8014 /* Map host to target signal numbers for the wait family of syscalls.
8015 Assume all other status bits are the same. */
8016 int host_to_target_waitstatus(int status
)
8018 if (WIFSIGNALED(status
)) {
8019 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
8021 if (WIFSTOPPED(status
)) {
8022 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
8028 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
8030 CPUState
*cpu
= env_cpu(cpu_env
);
8031 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
8034 for (i
= 0; i
< bprm
->argc
; i
++) {
8035 size_t len
= strlen(bprm
->argv
[i
]) + 1;
8037 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
8045 static void show_smaps(int fd
, unsigned long size
)
8047 unsigned long page_size_kb
= TARGET_PAGE_SIZE
>> 10;
8048 unsigned long size_kb
= size
>> 10;
8050 dprintf(fd
, "Size: %lu kB\n"
8051 "KernelPageSize: %lu kB\n"
8052 "MMUPageSize: %lu kB\n"
8056 "Shared_Clean: 0 kB\n"
8057 "Shared_Dirty: 0 kB\n"
8058 "Private_Clean: 0 kB\n"
8059 "Private_Dirty: 0 kB\n"
8060 "Referenced: 0 kB\n"
8063 "AnonHugePages: 0 kB\n"
8064 "ShmemPmdMapped: 0 kB\n"
8065 "FilePmdMapped: 0 kB\n"
8066 "Shared_Hugetlb: 0 kB\n"
8067 "Private_Hugetlb: 0 kB\n"
8071 "THPeligible: 0\n", size_kb
, page_size_kb
, page_size_kb
);
8074 static int open_self_maps_1(CPUArchState
*cpu_env
, int fd
, bool smaps
)
8076 CPUState
*cpu
= env_cpu(cpu_env
);
8077 TaskState
*ts
= cpu
->opaque
;
8078 GSList
*map_info
= read_self_maps();
8082 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
8083 MapInfo
*e
= (MapInfo
*) s
->data
;
8085 if (h2g_valid(e
->start
)) {
8086 unsigned long min
= e
->start
;
8087 unsigned long max
= e
->end
;
8088 int flags
= page_get_flags(h2g(min
));
8091 max
= h2g_valid(max
- 1) ?
8092 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8094 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
8099 if (h2g(max
) == ts
->info
->stack_limit
) {
8101 if (h2g(min
) == ts
->info
->stack_limit
) {
8108 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8109 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8110 h2g(min
), h2g(max
- 1) + 1,
8111 (flags
& PAGE_READ
) ? 'r' : '-',
8112 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8113 (flags
& PAGE_EXEC
) ? 'x' : '-',
8114 e
->is_priv
? 'p' : 's',
8115 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8117 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8122 show_smaps(fd
, max
- min
);
8123 dprintf(fd
, "VmFlags:%s%s%s%s%s%s%s%s\n",
8124 (flags
& PAGE_READ
) ? " rd" : "",
8125 (flags
& PAGE_WRITE_ORG
) ? " wr" : "",
8126 (flags
& PAGE_EXEC
) ? " ex" : "",
8127 e
->is_priv
? "" : " sh",
8128 (flags
& PAGE_READ
) ? " mr" : "",
8129 (flags
& PAGE_WRITE_ORG
) ? " mw" : "",
8130 (flags
& PAGE_EXEC
) ? " me" : "",
8131 e
->is_priv
? "" : " ms");
8136 free_self_maps(map_info
);
8138 #ifdef TARGET_VSYSCALL_PAGE
8140 * We only support execution from the vsyscall page.
8141 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8143 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8144 " --xp 00000000 00:00 0",
8145 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8146 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8148 show_smaps(fd
, TARGET_PAGE_SIZE
);
8149 dprintf(fd
, "VmFlags: ex\n");
8156 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
8158 return open_self_maps_1(cpu_env
, fd
, false);
8161 static int open_self_smaps(CPUArchState
*cpu_env
, int fd
)
8163 return open_self_maps_1(cpu_env
, fd
, true);
8166 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8168 CPUState
*cpu
= env_cpu(cpu_env
);
8169 TaskState
*ts
= cpu
->opaque
;
8170 g_autoptr(GString
) buf
= g_string_new(NULL
);
8173 for (i
= 0; i
< 44; i
++) {
8176 g_string_printf(buf
, FMT_pid
" ", getpid());
8177 } else if (i
== 1) {
8179 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8180 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8181 g_string_printf(buf
, "(%.15s) ", bin
);
8182 } else if (i
== 2) {
8184 g_string_assign(buf
, "R "); /* we are running right now */
8185 } else if (i
== 3) {
8187 g_string_printf(buf
, FMT_pid
" ", getppid());
8188 } else if (i
== 21) {
8190 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8191 } else if (i
== 27) {
8193 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8195 /* for the rest, there is MasterCard */
8196 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8199 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8207 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8209 CPUState
*cpu
= env_cpu(cpu_env
);
8210 TaskState
*ts
= cpu
->opaque
;
8211 abi_ulong auxv
= ts
->info
->saved_auxv
;
8212 abi_ulong len
= ts
->info
->auxv_len
;
8216 * Auxiliary vector is stored in target process stack.
8217 * read in whole auxv vector and copy it to file
8219 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8223 r
= write(fd
, ptr
, len
);
8230 lseek(fd
, 0, SEEK_SET
);
8231 unlock_user(ptr
, auxv
, len
);
8237 static int is_proc_myself(const char *filename
, const char *entry
)
8239 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8240 filename
+= strlen("/proc/");
8241 if (!strncmp(filename
, "self/", strlen("self/"))) {
8242 filename
+= strlen("self/");
8243 } else if (*filename
>= '1' && *filename
<= '9') {
8245 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8246 if (!strncmp(filename
, myself
, strlen(myself
))) {
8247 filename
+= strlen(myself
);
8254 if (!strcmp(filename
, entry
)) {
8261 static void excp_dump_file(FILE *logfile
, CPUArchState
*env
,
8262 const char *fmt
, int code
)
8265 CPUState
*cs
= env_cpu(env
);
8267 fprintf(logfile
, fmt
, code
);
8268 fprintf(logfile
, "Failing executable: %s\n", exec_path
);
8269 cpu_dump_state(cs
, logfile
, 0);
8270 open_self_maps(env
, fileno(logfile
));
8274 void target_exception_dump(CPUArchState
*env
, const char *fmt
, int code
)
8276 /* dump to console */
8277 excp_dump_file(stderr
, env
, fmt
, code
);
8279 /* dump to log file */
8280 if (qemu_log_separate()) {
8281 FILE *logfile
= qemu_log_trylock();
8283 excp_dump_file(logfile
, env
, fmt
, code
);
8284 qemu_log_unlock(logfile
);
8288 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8289 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8290 defined(TARGET_RISCV) || defined(TARGET_S390X)
8291 static int is_proc(const char *filename
, const char *entry
)
8293 return strcmp(filename
, entry
) == 0;
8297 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8298 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8305 fp
= fopen("/proc/net/route", "r");
8312 read
= getline(&line
, &len
, fp
);
8313 dprintf(fd
, "%s", line
);
8317 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8319 uint32_t dest
, gw
, mask
;
8320 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8323 fields
= sscanf(line
,
8324 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8325 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8326 &mask
, &mtu
, &window
, &irtt
);
8330 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8331 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8332 metric
, tswap32(mask
), mtu
, window
, irtt
);
8342 #if defined(TARGET_SPARC)
8343 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8345 dprintf(fd
, "type\t\t: sun4u\n");
8350 #if defined(TARGET_HPPA)
8351 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8355 num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8356 for (i
= 0; i
< num_cpus
; i
++) {
8357 dprintf(fd
, "processor\t: %d\n", i
);
8358 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8359 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8360 dprintf(fd
, "capabilities\t: os32\n");
8361 dprintf(fd
, "model\t\t: 9000/778/B160L - "
8362 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8368 #if defined(TARGET_RISCV)
8369 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8372 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8373 RISCVCPU
*cpu
= env_archcpu(cpu_env
);
8374 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg((CPURISCVState
*) cpu_env
);
8375 char *isa_string
= riscv_isa_string(cpu
);
8379 mmu
= (cpu_env
->xl
== MXL_RV32
) ? "sv32" : "sv48";
8384 for (i
= 0; i
< num_cpus
; i
++) {
8385 dprintf(fd
, "processor\t: %d\n", i
);
8386 dprintf(fd
, "hart\t\t: %d\n", i
);
8387 dprintf(fd
, "isa\t\t: %s\n", isa_string
);
8388 dprintf(fd
, "mmu\t\t: %s\n", mmu
);
8389 dprintf(fd
, "uarch\t\t: qemu\n\n");
8397 #if defined(TARGET_S390X)
8399 * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8400 * show in /proc/cpuinfo.
8402 * Skip the following in order to match the missing support in op_ecag():
8403 * - show_cacheinfo().
8404 * - show_cpu_topology().
8407 * Use fixed values for certain fields:
8408 * - bogomips per cpu - from a qemu-system-s390x run.
8409 * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8411 * Keep the code structure close to arch/s390/kernel/processor.c.
8414 static void show_facilities(int fd
)
8416 size_t sizeof_stfl_bytes
= 2048;
8417 g_autofree
uint8_t *stfl_bytes
= g_new0(uint8_t, sizeof_stfl_bytes
);
8420 dprintf(fd
, "facilities :");
8421 s390_get_feat_block(S390_FEAT_TYPE_STFL
, stfl_bytes
);
8422 for (bit
= 0; bit
< sizeof_stfl_bytes
* 8; bit
++) {
8423 if (test_be_bit(bit
, stfl_bytes
)) {
8424 dprintf(fd
, " %d", bit
);
8430 static int cpu_ident(unsigned long n
)
8432 return deposit32(0, CPU_ID_BITS
- CPU_PHYS_ADDR_BITS
, CPU_PHYS_ADDR_BITS
,
8436 static void show_cpu_summary(CPUArchState
*cpu_env
, int fd
)
8438 S390CPUModel
*model
= env_archcpu(cpu_env
)->model
;
8439 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8440 uint32_t elf_hwcap
= get_elf_hwcap();
8441 const char *hwcap_str
;
8444 dprintf(fd
, "vendor_id : IBM/S390\n"
8445 "# processors : %i\n"
8446 "bogomips per cpu: 13370.00\n",
8448 dprintf(fd
, "max thread id : 0\n");
8449 dprintf(fd
, "features\t: ");
8450 for (i
= 0; i
< sizeof(elf_hwcap
) * 8; i
++) {
8451 if (!(elf_hwcap
& (1 << i
))) {
8454 hwcap_str
= elf_hwcap_str(i
);
8456 dprintf(fd
, "%s ", hwcap_str
);
8460 show_facilities(fd
);
8461 for (i
= 0; i
< num_cpus
; i
++) {
8462 dprintf(fd
, "processor %d: "
8464 "identification = %06X, "
8466 i
, model
->cpu_ver
, cpu_ident(i
), model
->def
->type
);
8470 static void show_cpu_ids(CPUArchState
*cpu_env
, int fd
, unsigned long n
)
8472 S390CPUModel
*model
= env_archcpu(cpu_env
)->model
;
8474 dprintf(fd
, "version : %02X\n", model
->cpu_ver
);
8475 dprintf(fd
, "identification : %06X\n", cpu_ident(n
));
8476 dprintf(fd
, "machine : %04X\n", model
->def
->type
);
8479 static void show_cpuinfo(CPUArchState
*cpu_env
, int fd
, unsigned long n
)
8481 dprintf(fd
, "\ncpu number : %ld\n", n
);
8482 show_cpu_ids(cpu_env
, fd
, n
);
8485 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8487 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8490 show_cpu_summary(cpu_env
, fd
);
8491 for (i
= 0; i
< num_cpus
; i
++) {
8492 show_cpuinfo(cpu_env
, fd
, i
);
8498 #if defined(TARGET_M68K)
8499 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8501 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8506 int do_guest_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
,
8507 int flags
, mode_t mode
, bool safe
)
8510 const char *filename
;
8511 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8512 int (*cmp
)(const char *s1
, const char *s2
);
8514 const struct fake_open
*fake_open
;
8515 static const struct fake_open fakes
[] = {
8516 { "maps", open_self_maps
, is_proc_myself
},
8517 { "smaps", open_self_smaps
, is_proc_myself
},
8518 { "stat", open_self_stat
, is_proc_myself
},
8519 { "auxv", open_self_auxv
, is_proc_myself
},
8520 { "cmdline", open_self_cmdline
, is_proc_myself
},
8521 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8522 { "/proc/net/route", open_net_route
, is_proc
},
8524 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8525 defined(TARGET_RISCV) || defined(TARGET_S390X)
8526 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8528 #if defined(TARGET_M68K)
8529 { "/proc/hardware", open_hardware
, is_proc
},
8531 { NULL
, NULL
, NULL
}
8534 if (is_proc_myself(pathname
, "exe")) {
8536 return safe_openat(dirfd
, exec_path
, flags
, mode
);
8538 return openat(dirfd
, exec_path
, flags
, mode
);
8542 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8543 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8548 if (fake_open
->filename
) {
8550 char filename
[PATH_MAX
];
8553 fd
= memfd_create("qemu-open", 0);
8555 if (errno
!= ENOSYS
) {
8558 /* create temporary file to map stat to */
8559 tmpdir
= getenv("TMPDIR");
8562 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8563 fd
= mkstemp(filename
);
8570 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8576 lseek(fd
, 0, SEEK_SET
);
8582 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8584 return openat(dirfd
, path(pathname
), flags
, mode
);
8588 ssize_t
do_guest_readlink(const char *pathname
, char *buf
, size_t bufsiz
)
8592 if (!pathname
|| !buf
) {
8598 /* Short circuit this for the magic exe check. */
8603 if (is_proc_myself((const char *)pathname
, "exe")) {
8605 * Don't worry about sign mismatch as earlier mapping
8606 * logic would have thrown a bad address error.
8608 ret
= MIN(strlen(exec_path
), bufsiz
);
8609 /* We cannot NUL terminate the string. */
8610 memcpy(buf
, exec_path
, ret
);
8612 ret
= readlink(path(pathname
), buf
, bufsiz
);
8618 static int do_execveat(CPUArchState
*cpu_env
, int dirfd
,
8619 abi_long pathname
, abi_long guest_argp
,
8620 abi_long guest_envp
, int flags
)
8623 char **argp
, **envp
;
8632 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8633 if (get_user_ual(addr
, gp
)) {
8634 return -TARGET_EFAULT
;
8642 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8643 if (get_user_ual(addr
, gp
)) {
8644 return -TARGET_EFAULT
;
8652 argp
= g_new0(char *, argc
+ 1);
8653 envp
= g_new0(char *, envc
+ 1);
8655 for (gp
= guest_argp
, q
= argp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8656 if (get_user_ual(addr
, gp
)) {
8662 *q
= lock_user_string(addr
);
8669 for (gp
= guest_envp
, q
= envp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8670 if (get_user_ual(addr
, gp
)) {
8676 *q
= lock_user_string(addr
);
8684 * Although execve() is not an interruptible syscall it is
8685 * a special case where we must use the safe_syscall wrapper:
8686 * if we allow a signal to happen before we make the host
8687 * syscall then we will 'lose' it, because at the point of
8688 * execve the process leaves QEMU's control. So we use the
8689 * safe syscall wrapper to ensure that we either take the
8690 * signal as a guest signal, or else it does not happen
8691 * before the execve completes and makes it the other
8692 * program's problem.
8694 p
= lock_user_string(pathname
);
8699 if (is_proc_myself(p
, "exe")) {
8700 ret
= get_errno(safe_execveat(dirfd
, exec_path
, argp
, envp
, flags
));
8702 ret
= get_errno(safe_execveat(dirfd
, p
, argp
, envp
, flags
));
8705 unlock_user(p
, pathname
, 0);
8710 ret
= -TARGET_EFAULT
;
8713 for (gp
= guest_argp
, q
= argp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8714 if (get_user_ual(addr
, gp
) || !addr
) {
8717 unlock_user(*q
, addr
, 0);
8719 for (gp
= guest_envp
, q
= envp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8720 if (get_user_ual(addr
, gp
) || !addr
) {
8723 unlock_user(*q
, addr
, 0);
8731 #define TIMER_MAGIC 0x0caf0000
8732 #define TIMER_MAGIC_MASK 0xffff0000
8734 /* Convert QEMU provided timer ID back to internal 16bit index format */
8735 static target_timer_t
get_timer_id(abi_long arg
)
8737 target_timer_t timerid
= arg
;
8739 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8740 return -TARGET_EINVAL
;
8745 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8746 return -TARGET_EINVAL
;
8752 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8754 abi_ulong target_addr
,
8757 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8758 unsigned host_bits
= sizeof(*host_mask
) * 8;
8759 abi_ulong
*target_mask
;
8762 assert(host_size
>= target_size
);
8764 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8766 return -TARGET_EFAULT
;
8768 memset(host_mask
, 0, host_size
);
8770 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8771 unsigned bit
= i
* target_bits
;
8774 __get_user(val
, &target_mask
[i
]);
8775 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8776 if (val
& (1UL << j
)) {
8777 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8782 unlock_user(target_mask
, target_addr
, 0);
8786 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8788 abi_ulong target_addr
,
8791 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8792 unsigned host_bits
= sizeof(*host_mask
) * 8;
8793 abi_ulong
*target_mask
;
8796 assert(host_size
>= target_size
);
8798 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8800 return -TARGET_EFAULT
;
8803 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8804 unsigned bit
= i
* target_bits
;
8807 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8808 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8812 __put_user(val
, &target_mask
[i
]);
8815 unlock_user(target_mask
, target_addr
, target_size
);
8819 #ifdef TARGET_NR_getdents
8820 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8822 g_autofree
void *hdirp
= NULL
;
8824 int hlen
, hoff
, toff
;
8825 int hreclen
, treclen
;
8826 off64_t prev_diroff
= 0;
8828 hdirp
= g_try_malloc(count
);
8830 return -TARGET_ENOMEM
;
8833 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8834 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8836 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8839 hlen
= get_errno(hlen
);
8840 if (is_error(hlen
)) {
8844 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8846 return -TARGET_EFAULT
;
8849 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8850 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8851 struct linux_dirent
*hde
= hdirp
+ hoff
;
8853 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8855 struct target_dirent
*tde
= tdirp
+ toff
;
8859 namelen
= strlen(hde
->d_name
);
8860 hreclen
= hde
->d_reclen
;
8861 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8862 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8864 if (toff
+ treclen
> count
) {
8866 * If the host struct is smaller than the target struct, or
8867 * requires less alignment and thus packs into less space,
8868 * then the host can return more entries than we can pass
8872 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8876 * Return what we have, resetting the file pointer to the
8877 * location of the first record not returned.
8879 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8883 prev_diroff
= hde
->d_off
;
8884 tde
->d_ino
= tswapal(hde
->d_ino
);
8885 tde
->d_off
= tswapal(hde
->d_off
);
8886 tde
->d_reclen
= tswap16(treclen
);
8887 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8890 * The getdents type is in what was formerly a padding byte at the
8891 * end of the structure.
8893 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8894 type
= *((uint8_t *)hde
+ hreclen
- 1);
8898 *((uint8_t *)tde
+ treclen
- 1) = type
;
8901 unlock_user(tdirp
, arg2
, toff
);
8904 #endif /* TARGET_NR_getdents */
8906 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8907 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8909 g_autofree
void *hdirp
= NULL
;
8911 int hlen
, hoff
, toff
;
8912 int hreclen
, treclen
;
8913 off64_t prev_diroff
= 0;
8915 hdirp
= g_try_malloc(count
);
8917 return -TARGET_ENOMEM
;
8920 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8921 if (is_error(hlen
)) {
8925 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8927 return -TARGET_EFAULT
;
8930 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8931 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8932 struct target_dirent64
*tde
= tdirp
+ toff
;
8935 namelen
= strlen(hde
->d_name
) + 1;
8936 hreclen
= hde
->d_reclen
;
8937 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8938 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8940 if (toff
+ treclen
> count
) {
8942 * If the host struct is smaller than the target struct, or
8943 * requires less alignment and thus packs into less space,
8944 * then the host can return more entries than we can pass
8948 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8952 * Return what we have, resetting the file pointer to the
8953 * location of the first record not returned.
8955 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8959 prev_diroff
= hde
->d_off
;
8960 tde
->d_ino
= tswap64(hde
->d_ino
);
8961 tde
->d_off
= tswap64(hde
->d_off
);
8962 tde
->d_reclen
= tswap16(treclen
);
8963 tde
->d_type
= hde
->d_type
;
8964 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8967 unlock_user(tdirp
, arg2
, toff
);
8970 #endif /* TARGET_NR_getdents64 */
8972 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8973 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8976 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
8977 #define __NR_sys_open_tree __NR_open_tree
8978 _syscall3(int, sys_open_tree
, int, __dfd
, const char *, __filename
,
8979 unsigned int, __flags
)
8982 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
8983 #define __NR_sys_move_mount __NR_move_mount
8984 _syscall5(int, sys_move_mount
, int, __from_dfd
, const char *, __from_pathname
,
8985 int, __to_dfd
, const char *, __to_pathname
, unsigned int, flag
)
8988 /* This is an internal helper for do_syscall so that it is easier
8989 * to have a single return point, so that actions, such as logging
8990 * of syscall results, can be performed.
8991 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8993 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
8994 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8995 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8998 CPUState
*cpu
= env_cpu(cpu_env
);
9000 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9001 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9002 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9003 || defined(TARGET_NR_statx)
9006 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9007 || defined(TARGET_NR_fstatfs)
9013 case TARGET_NR_exit
:
9014 /* In old applications this may be used to implement _exit(2).
9015 However in threaded applications it is used for thread termination,
9016 and _exit_group is used for application termination.
9017 Do thread termination if we have more then one thread. */
9019 if (block_signals()) {
9020 return -QEMU_ERESTARTSYS
;
9023 pthread_mutex_lock(&clone_lock
);
9025 if (CPU_NEXT(first_cpu
)) {
9026 TaskState
*ts
= cpu
->opaque
;
9028 if (ts
->child_tidptr
) {
9029 put_user_u32(0, ts
->child_tidptr
);
9030 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
9031 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
9034 object_unparent(OBJECT(cpu
));
9035 object_unref(OBJECT(cpu
));
9037 * At this point the CPU should be unrealized and removed
9038 * from cpu lists. We can clean-up the rest of the thread
9039 * data without the lock held.
9042 pthread_mutex_unlock(&clone_lock
);
9046 rcu_unregister_thread();
9050 pthread_mutex_unlock(&clone_lock
);
9051 preexit_cleanup(cpu_env
, arg1
);
9053 return 0; /* avoid warning */
9054 case TARGET_NR_read
:
9055 if (arg2
== 0 && arg3
== 0) {
9056 return get_errno(safe_read(arg1
, 0, 0));
9058 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9059 return -TARGET_EFAULT
;
9060 ret
= get_errno(safe_read(arg1
, p
, arg3
));
9062 fd_trans_host_to_target_data(arg1
)) {
9063 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
9065 unlock_user(p
, arg2
, ret
);
9068 case TARGET_NR_write
:
9069 if (arg2
== 0 && arg3
== 0) {
9070 return get_errno(safe_write(arg1
, 0, 0));
9072 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9073 return -TARGET_EFAULT
;
9074 if (fd_trans_target_to_host_data(arg1
)) {
9075 void *copy
= g_malloc(arg3
);
9076 memcpy(copy
, p
, arg3
);
9077 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
9079 ret
= get_errno(safe_write(arg1
, copy
, ret
));
9083 ret
= get_errno(safe_write(arg1
, p
, arg3
));
9085 unlock_user(p
, arg2
, 0);
9088 #ifdef TARGET_NR_open
9089 case TARGET_NR_open
:
9090 if (!(p
= lock_user_string(arg1
)))
9091 return -TARGET_EFAULT
;
9092 ret
= get_errno(do_guest_openat(cpu_env
, AT_FDCWD
, p
,
9093 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
9095 fd_trans_unregister(ret
);
9096 unlock_user(p
, arg1
, 0);
9099 case TARGET_NR_openat
:
9100 if (!(p
= lock_user_string(arg2
)))
9101 return -TARGET_EFAULT
;
9102 ret
= get_errno(do_guest_openat(cpu_env
, arg1
, p
,
9103 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
9105 fd_trans_unregister(ret
);
9106 unlock_user(p
, arg2
, 0);
9108 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9109 case TARGET_NR_name_to_handle_at
:
9110 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
9113 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9114 case TARGET_NR_open_by_handle_at
:
9115 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
9116 fd_trans_unregister(ret
);
9119 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9120 case TARGET_NR_pidfd_open
:
9121 return get_errno(pidfd_open(arg1
, arg2
));
9123 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9124 case TARGET_NR_pidfd_send_signal
:
9126 siginfo_t uinfo
, *puinfo
;
9129 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9131 return -TARGET_EFAULT
;
9133 target_to_host_siginfo(&uinfo
, p
);
9134 unlock_user(p
, arg3
, 0);
9139 ret
= get_errno(pidfd_send_signal(arg1
, target_to_host_signal(arg2
),
9144 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9145 case TARGET_NR_pidfd_getfd
:
9146 return get_errno(pidfd_getfd(arg1
, arg2
, arg3
));
9148 case TARGET_NR_close
:
9149 fd_trans_unregister(arg1
);
9150 return get_errno(close(arg1
));
9151 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9152 case TARGET_NR_close_range
:
9153 ret
= get_errno(sys_close_range(arg1
, arg2
, arg3
));
9154 if (ret
== 0 && !(arg3
& CLOSE_RANGE_CLOEXEC
)) {
9156 maxfd
= MIN(arg2
, target_fd_max
);
9157 for (fd
= arg1
; fd
< maxfd
; fd
++) {
9158 fd_trans_unregister(fd
);
9165 return do_brk(arg1
);
9166 #ifdef TARGET_NR_fork
9167 case TARGET_NR_fork
:
9168 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
9170 #ifdef TARGET_NR_waitpid
9171 case TARGET_NR_waitpid
:
9174 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
9175 if (!is_error(ret
) && arg2
&& ret
9176 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
9177 return -TARGET_EFAULT
;
9181 #ifdef TARGET_NR_waitid
9182 case TARGET_NR_waitid
:
9186 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
9187 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
9188 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
9189 return -TARGET_EFAULT
;
9190 host_to_target_siginfo(p
, &info
);
9191 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
9196 #ifdef TARGET_NR_creat /* not on alpha */
9197 case TARGET_NR_creat
:
9198 if (!(p
= lock_user_string(arg1
)))
9199 return -TARGET_EFAULT
;
9200 ret
= get_errno(creat(p
, arg2
));
9201 fd_trans_unregister(ret
);
9202 unlock_user(p
, arg1
, 0);
9205 #ifdef TARGET_NR_link
9206 case TARGET_NR_link
:
9209 p
= lock_user_string(arg1
);
9210 p2
= lock_user_string(arg2
);
9212 ret
= -TARGET_EFAULT
;
9214 ret
= get_errno(link(p
, p2
));
9215 unlock_user(p2
, arg2
, 0);
9216 unlock_user(p
, arg1
, 0);
9220 #if defined(TARGET_NR_linkat)
9221 case TARGET_NR_linkat
:
9225 return -TARGET_EFAULT
;
9226 p
= lock_user_string(arg2
);
9227 p2
= lock_user_string(arg4
);
9229 ret
= -TARGET_EFAULT
;
9231 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
9232 unlock_user(p
, arg2
, 0);
9233 unlock_user(p2
, arg4
, 0);
9237 #ifdef TARGET_NR_unlink
9238 case TARGET_NR_unlink
:
9239 if (!(p
= lock_user_string(arg1
)))
9240 return -TARGET_EFAULT
;
9241 ret
= get_errno(unlink(p
));
9242 unlock_user(p
, arg1
, 0);
9245 #if defined(TARGET_NR_unlinkat)
9246 case TARGET_NR_unlinkat
:
9247 if (!(p
= lock_user_string(arg2
)))
9248 return -TARGET_EFAULT
;
9249 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
9250 unlock_user(p
, arg2
, 0);
9253 case TARGET_NR_execveat
:
9254 return do_execveat(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
9255 case TARGET_NR_execve
:
9256 return do_execveat(cpu_env
, AT_FDCWD
, arg1
, arg2
, arg3
, 0);
9257 case TARGET_NR_chdir
:
9258 if (!(p
= lock_user_string(arg1
)))
9259 return -TARGET_EFAULT
;
9260 ret
= get_errno(chdir(p
));
9261 unlock_user(p
, arg1
, 0);
9263 #ifdef TARGET_NR_time
9264 case TARGET_NR_time
:
9267 ret
= get_errno(time(&host_time
));
9270 && put_user_sal(host_time
, arg1
))
9271 return -TARGET_EFAULT
;
9275 #ifdef TARGET_NR_mknod
9276 case TARGET_NR_mknod
:
9277 if (!(p
= lock_user_string(arg1
)))
9278 return -TARGET_EFAULT
;
9279 ret
= get_errno(mknod(p
, arg2
, arg3
));
9280 unlock_user(p
, arg1
, 0);
9283 #if defined(TARGET_NR_mknodat)
9284 case TARGET_NR_mknodat
:
9285 if (!(p
= lock_user_string(arg2
)))
9286 return -TARGET_EFAULT
;
9287 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
9288 unlock_user(p
, arg2
, 0);
9291 #ifdef TARGET_NR_chmod
9292 case TARGET_NR_chmod
:
9293 if (!(p
= lock_user_string(arg1
)))
9294 return -TARGET_EFAULT
;
9295 ret
= get_errno(chmod(p
, arg2
));
9296 unlock_user(p
, arg1
, 0);
9299 #ifdef TARGET_NR_lseek
9300 case TARGET_NR_lseek
:
9301 return get_errno(lseek(arg1
, arg2
, arg3
));
9303 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9304 /* Alpha specific */
9305 case TARGET_NR_getxpid
:
9306 cpu_env
->ir
[IR_A4
] = getppid();
9307 return get_errno(getpid());
9309 #ifdef TARGET_NR_getpid
9310 case TARGET_NR_getpid
:
9311 return get_errno(getpid());
9313 case TARGET_NR_mount
:
9315 /* need to look at the data field */
9319 p
= lock_user_string(arg1
);
9321 return -TARGET_EFAULT
;
9327 p2
= lock_user_string(arg2
);
9330 unlock_user(p
, arg1
, 0);
9332 return -TARGET_EFAULT
;
9336 p3
= lock_user_string(arg3
);
9339 unlock_user(p
, arg1
, 0);
9341 unlock_user(p2
, arg2
, 0);
9342 return -TARGET_EFAULT
;
9348 /* FIXME - arg5 should be locked, but it isn't clear how to
9349 * do that since it's not guaranteed to be a NULL-terminated
9353 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
9355 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
9357 ret
= get_errno(ret
);
9360 unlock_user(p
, arg1
, 0);
9362 unlock_user(p2
, arg2
, 0);
9364 unlock_user(p3
, arg3
, 0);
9368 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9369 #if defined(TARGET_NR_umount)
9370 case TARGET_NR_umount
:
9372 #if defined(TARGET_NR_oldumount)
9373 case TARGET_NR_oldumount
:
9375 if (!(p
= lock_user_string(arg1
)))
9376 return -TARGET_EFAULT
;
9377 ret
= get_errno(umount(p
));
9378 unlock_user(p
, arg1
, 0);
9381 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9382 case TARGET_NR_move_mount
:
9386 if (!arg2
|| !arg4
) {
9387 return -TARGET_EFAULT
;
9390 p2
= lock_user_string(arg2
);
9392 return -TARGET_EFAULT
;
9395 p4
= lock_user_string(arg4
);
9397 unlock_user(p2
, arg2
, 0);
9398 return -TARGET_EFAULT
;
9400 ret
= get_errno(sys_move_mount(arg1
, p2
, arg3
, p4
, arg5
));
9402 unlock_user(p2
, arg2
, 0);
9403 unlock_user(p4
, arg4
, 0);
9408 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9409 case TARGET_NR_open_tree
:
9415 return -TARGET_EFAULT
;
9418 p2
= lock_user_string(arg2
);
9420 return -TARGET_EFAULT
;
9423 host_flags
= arg3
& ~TARGET_O_CLOEXEC
;
9424 if (arg3
& TARGET_O_CLOEXEC
) {
9425 host_flags
|= O_CLOEXEC
;
9428 ret
= get_errno(sys_open_tree(arg1
, p2
, host_flags
));
9430 unlock_user(p2
, arg2
, 0);
9435 #ifdef TARGET_NR_stime /* not on alpha */
9436 case TARGET_NR_stime
:
9440 if (get_user_sal(ts
.tv_sec
, arg1
)) {
9441 return -TARGET_EFAULT
;
9443 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
9446 #ifdef TARGET_NR_alarm /* not on alpha */
9447 case TARGET_NR_alarm
:
9450 #ifdef TARGET_NR_pause /* not on alpha */
9451 case TARGET_NR_pause
:
9452 if (!block_signals()) {
9453 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
9455 return -TARGET_EINTR
;
9457 #ifdef TARGET_NR_utime
9458 case TARGET_NR_utime
:
9460 struct utimbuf tbuf
, *host_tbuf
;
9461 struct target_utimbuf
*target_tbuf
;
9463 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9464 return -TARGET_EFAULT
;
9465 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9466 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9467 unlock_user_struct(target_tbuf
, arg2
, 0);
9472 if (!(p
= lock_user_string(arg1
)))
9473 return -TARGET_EFAULT
;
9474 ret
= get_errno(utime(p
, host_tbuf
));
9475 unlock_user(p
, arg1
, 0);
9479 #ifdef TARGET_NR_utimes
9480 case TARGET_NR_utimes
:
9482 struct timeval
*tvp
, tv
[2];
9484 if (copy_from_user_timeval(&tv
[0], arg2
)
9485 || copy_from_user_timeval(&tv
[1],
9486 arg2
+ sizeof(struct target_timeval
)))
9487 return -TARGET_EFAULT
;
9492 if (!(p
= lock_user_string(arg1
)))
9493 return -TARGET_EFAULT
;
9494 ret
= get_errno(utimes(p
, tvp
));
9495 unlock_user(p
, arg1
, 0);
9499 #if defined(TARGET_NR_futimesat)
9500 case TARGET_NR_futimesat
:
9502 struct timeval
*tvp
, tv
[2];
9504 if (copy_from_user_timeval(&tv
[0], arg3
)
9505 || copy_from_user_timeval(&tv
[1],
9506 arg3
+ sizeof(struct target_timeval
)))
9507 return -TARGET_EFAULT
;
9512 if (!(p
= lock_user_string(arg2
))) {
9513 return -TARGET_EFAULT
;
9515 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9516 unlock_user(p
, arg2
, 0);
9520 #ifdef TARGET_NR_access
9521 case TARGET_NR_access
:
9522 if (!(p
= lock_user_string(arg1
))) {
9523 return -TARGET_EFAULT
;
9525 ret
= get_errno(access(path(p
), arg2
));
9526 unlock_user(p
, arg1
, 0);
9529 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9530 case TARGET_NR_faccessat
:
9531 if (!(p
= lock_user_string(arg2
))) {
9532 return -TARGET_EFAULT
;
9534 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9535 unlock_user(p
, arg2
, 0);
9538 #if defined(TARGET_NR_faccessat2)
9539 case TARGET_NR_faccessat2
:
9540 if (!(p
= lock_user_string(arg2
))) {
9541 return -TARGET_EFAULT
;
9543 ret
= get_errno(faccessat(arg1
, p
, arg3
, arg4
));
9544 unlock_user(p
, arg2
, 0);
9547 #ifdef TARGET_NR_nice /* not on alpha */
9548 case TARGET_NR_nice
:
9549 return get_errno(nice(arg1
));
9551 case TARGET_NR_sync
:
9554 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9555 case TARGET_NR_syncfs
:
9556 return get_errno(syncfs(arg1
));
9558 case TARGET_NR_kill
:
9559 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9560 #ifdef TARGET_NR_rename
9561 case TARGET_NR_rename
:
9564 p
= lock_user_string(arg1
);
9565 p2
= lock_user_string(arg2
);
9567 ret
= -TARGET_EFAULT
;
9569 ret
= get_errno(rename(p
, p2
));
9570 unlock_user(p2
, arg2
, 0);
9571 unlock_user(p
, arg1
, 0);
9575 #if defined(TARGET_NR_renameat)
9576 case TARGET_NR_renameat
:
9579 p
= lock_user_string(arg2
);
9580 p2
= lock_user_string(arg4
);
9582 ret
= -TARGET_EFAULT
;
9584 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9585 unlock_user(p2
, arg4
, 0);
9586 unlock_user(p
, arg2
, 0);
9590 #if defined(TARGET_NR_renameat2)
9591 case TARGET_NR_renameat2
:
9594 p
= lock_user_string(arg2
);
9595 p2
= lock_user_string(arg4
);
9597 ret
= -TARGET_EFAULT
;
9599 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9601 unlock_user(p2
, arg4
, 0);
9602 unlock_user(p
, arg2
, 0);
9606 #ifdef TARGET_NR_mkdir
9607 case TARGET_NR_mkdir
:
9608 if (!(p
= lock_user_string(arg1
)))
9609 return -TARGET_EFAULT
;
9610 ret
= get_errno(mkdir(p
, arg2
));
9611 unlock_user(p
, arg1
, 0);
9614 #if defined(TARGET_NR_mkdirat)
9615 case TARGET_NR_mkdirat
:
9616 if (!(p
= lock_user_string(arg2
)))
9617 return -TARGET_EFAULT
;
9618 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9619 unlock_user(p
, arg2
, 0);
9622 #ifdef TARGET_NR_rmdir
9623 case TARGET_NR_rmdir
:
9624 if (!(p
= lock_user_string(arg1
)))
9625 return -TARGET_EFAULT
;
9626 ret
= get_errno(rmdir(p
));
9627 unlock_user(p
, arg1
, 0);
9631 ret
= get_errno(dup(arg1
));
9633 fd_trans_dup(arg1
, ret
);
9636 #ifdef TARGET_NR_pipe
9637 case TARGET_NR_pipe
:
9638 return do_pipe(cpu_env
, arg1
, 0, 0);
9640 #ifdef TARGET_NR_pipe2
9641 case TARGET_NR_pipe2
:
9642 return do_pipe(cpu_env
, arg1
,
9643 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9645 case TARGET_NR_times
:
9647 struct target_tms
*tmsp
;
9649 ret
= get_errno(times(&tms
));
9651 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9653 return -TARGET_EFAULT
;
9654 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9655 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9656 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9657 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9660 ret
= host_to_target_clock_t(ret
);
9663 case TARGET_NR_acct
:
9665 ret
= get_errno(acct(NULL
));
9667 if (!(p
= lock_user_string(arg1
))) {
9668 return -TARGET_EFAULT
;
9670 ret
= get_errno(acct(path(p
)));
9671 unlock_user(p
, arg1
, 0);
9674 #ifdef TARGET_NR_umount2
9675 case TARGET_NR_umount2
:
9676 if (!(p
= lock_user_string(arg1
)))
9677 return -TARGET_EFAULT
;
9678 ret
= get_errno(umount2(p
, arg2
));
9679 unlock_user(p
, arg1
, 0);
9682 case TARGET_NR_ioctl
:
9683 return do_ioctl(arg1
, arg2
, arg3
);
9684 #ifdef TARGET_NR_fcntl
9685 case TARGET_NR_fcntl
:
9686 return do_fcntl(arg1
, arg2
, arg3
);
9688 case TARGET_NR_setpgid
:
9689 return get_errno(setpgid(arg1
, arg2
));
9690 case TARGET_NR_umask
:
9691 return get_errno(umask(arg1
));
9692 case TARGET_NR_chroot
:
9693 if (!(p
= lock_user_string(arg1
)))
9694 return -TARGET_EFAULT
;
9695 ret
= get_errno(chroot(p
));
9696 unlock_user(p
, arg1
, 0);
9698 #ifdef TARGET_NR_dup2
9699 case TARGET_NR_dup2
:
9700 ret
= get_errno(dup2(arg1
, arg2
));
9702 fd_trans_dup(arg1
, arg2
);
9706 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9707 case TARGET_NR_dup3
:
9711 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9714 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9715 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9717 fd_trans_dup(arg1
, arg2
);
9722 #ifdef TARGET_NR_getppid /* not on alpha */
9723 case TARGET_NR_getppid
:
9724 return get_errno(getppid());
9726 #ifdef TARGET_NR_getpgrp
9727 case TARGET_NR_getpgrp
:
9728 return get_errno(getpgrp());
9730 case TARGET_NR_setsid
:
9731 return get_errno(setsid());
9732 #ifdef TARGET_NR_sigaction
9733 case TARGET_NR_sigaction
:
9735 #if defined(TARGET_MIPS)
9736 struct target_sigaction act
, oact
, *pact
, *old_act
;
9739 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9740 return -TARGET_EFAULT
;
9741 act
._sa_handler
= old_act
->_sa_handler
;
9742 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9743 act
.sa_flags
= old_act
->sa_flags
;
9744 unlock_user_struct(old_act
, arg2
, 0);
9750 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9752 if (!is_error(ret
) && arg3
) {
9753 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9754 return -TARGET_EFAULT
;
9755 old_act
->_sa_handler
= oact
._sa_handler
;
9756 old_act
->sa_flags
= oact
.sa_flags
;
9757 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9758 old_act
->sa_mask
.sig
[1] = 0;
9759 old_act
->sa_mask
.sig
[2] = 0;
9760 old_act
->sa_mask
.sig
[3] = 0;
9761 unlock_user_struct(old_act
, arg3
, 1);
9764 struct target_old_sigaction
*old_act
;
9765 struct target_sigaction act
, oact
, *pact
;
9767 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9768 return -TARGET_EFAULT
;
9769 act
._sa_handler
= old_act
->_sa_handler
;
9770 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9771 act
.sa_flags
= old_act
->sa_flags
;
9772 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9773 act
.sa_restorer
= old_act
->sa_restorer
;
9775 unlock_user_struct(old_act
, arg2
, 0);
9780 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9781 if (!is_error(ret
) && arg3
) {
9782 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9783 return -TARGET_EFAULT
;
9784 old_act
->_sa_handler
= oact
._sa_handler
;
9785 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9786 old_act
->sa_flags
= oact
.sa_flags
;
9787 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9788 old_act
->sa_restorer
= oact
.sa_restorer
;
9790 unlock_user_struct(old_act
, arg3
, 1);
9796 case TARGET_NR_rt_sigaction
:
9799 * For Alpha and SPARC this is a 5 argument syscall, with
9800 * a 'restorer' parameter which must be copied into the
9801 * sa_restorer field of the sigaction struct.
9802 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9803 * and arg5 is the sigsetsize.
9805 #if defined(TARGET_ALPHA)
9806 target_ulong sigsetsize
= arg4
;
9807 target_ulong restorer
= arg5
;
9808 #elif defined(TARGET_SPARC)
9809 target_ulong restorer
= arg4
;
9810 target_ulong sigsetsize
= arg5
;
9812 target_ulong sigsetsize
= arg4
;
9813 target_ulong restorer
= 0;
9815 struct target_sigaction
*act
= NULL
;
9816 struct target_sigaction
*oact
= NULL
;
9818 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9819 return -TARGET_EINVAL
;
9821 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9822 return -TARGET_EFAULT
;
9824 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9825 ret
= -TARGET_EFAULT
;
9827 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9829 unlock_user_struct(oact
, arg3
, 1);
9833 unlock_user_struct(act
, arg2
, 0);
9837 #ifdef TARGET_NR_sgetmask /* not on alpha */
9838 case TARGET_NR_sgetmask
:
9841 abi_ulong target_set
;
9842 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9844 host_to_target_old_sigset(&target_set
, &cur_set
);
9850 #ifdef TARGET_NR_ssetmask /* not on alpha */
9851 case TARGET_NR_ssetmask
:
9854 abi_ulong target_set
= arg1
;
9855 target_to_host_old_sigset(&set
, &target_set
);
9856 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9858 host_to_target_old_sigset(&target_set
, &oset
);
9864 #ifdef TARGET_NR_sigprocmask
9865 case TARGET_NR_sigprocmask
:
9867 #if defined(TARGET_ALPHA)
9868 sigset_t set
, oldset
;
9873 case TARGET_SIG_BLOCK
:
9876 case TARGET_SIG_UNBLOCK
:
9879 case TARGET_SIG_SETMASK
:
9883 return -TARGET_EINVAL
;
9886 target_to_host_old_sigset(&set
, &mask
);
9888 ret
= do_sigprocmask(how
, &set
, &oldset
);
9889 if (!is_error(ret
)) {
9890 host_to_target_old_sigset(&mask
, &oldset
);
9892 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
9895 sigset_t set
, oldset
, *set_ptr
;
9899 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9901 return -TARGET_EFAULT
;
9903 target_to_host_old_sigset(&set
, p
);
9904 unlock_user(p
, arg2
, 0);
9907 case TARGET_SIG_BLOCK
:
9910 case TARGET_SIG_UNBLOCK
:
9913 case TARGET_SIG_SETMASK
:
9917 return -TARGET_EINVAL
;
9923 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9924 if (!is_error(ret
) && arg3
) {
9925 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9926 return -TARGET_EFAULT
;
9927 host_to_target_old_sigset(p
, &oldset
);
9928 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9934 case TARGET_NR_rt_sigprocmask
:
9937 sigset_t set
, oldset
, *set_ptr
;
9939 if (arg4
!= sizeof(target_sigset_t
)) {
9940 return -TARGET_EINVAL
;
9944 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9946 return -TARGET_EFAULT
;
9948 target_to_host_sigset(&set
, p
);
9949 unlock_user(p
, arg2
, 0);
9952 case TARGET_SIG_BLOCK
:
9955 case TARGET_SIG_UNBLOCK
:
9958 case TARGET_SIG_SETMASK
:
9962 return -TARGET_EINVAL
;
9968 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9969 if (!is_error(ret
) && arg3
) {
9970 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9971 return -TARGET_EFAULT
;
9972 host_to_target_sigset(p
, &oldset
);
9973 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9977 #ifdef TARGET_NR_sigpending
9978 case TARGET_NR_sigpending
:
9981 ret
= get_errno(sigpending(&set
));
9982 if (!is_error(ret
)) {
9983 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9984 return -TARGET_EFAULT
;
9985 host_to_target_old_sigset(p
, &set
);
9986 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9991 case TARGET_NR_rt_sigpending
:
9995 /* Yes, this check is >, not != like most. We follow the kernel's
9996 * logic and it does it like this because it implements
9997 * NR_sigpending through the same code path, and in that case
9998 * the old_sigset_t is smaller in size.
10000 if (arg2
> sizeof(target_sigset_t
)) {
10001 return -TARGET_EINVAL
;
10004 ret
= get_errno(sigpending(&set
));
10005 if (!is_error(ret
)) {
10006 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
10007 return -TARGET_EFAULT
;
10008 host_to_target_sigset(p
, &set
);
10009 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
10013 #ifdef TARGET_NR_sigsuspend
10014 case TARGET_NR_sigsuspend
:
10018 #if defined(TARGET_ALPHA)
10019 TaskState
*ts
= cpu
->opaque
;
10020 /* target_to_host_old_sigset will bswap back */
10021 abi_ulong mask
= tswapal(arg1
);
10022 set
= &ts
->sigsuspend_mask
;
10023 target_to_host_old_sigset(set
, &mask
);
10025 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
10030 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10031 finish_sigsuspend_mask(ret
);
10035 case TARGET_NR_rt_sigsuspend
:
10039 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
10043 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10044 finish_sigsuspend_mask(ret
);
10047 #ifdef TARGET_NR_rt_sigtimedwait
10048 case TARGET_NR_rt_sigtimedwait
:
10051 struct timespec uts
, *puts
;
10054 if (arg4
!= sizeof(target_sigset_t
)) {
10055 return -TARGET_EINVAL
;
10058 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
10059 return -TARGET_EFAULT
;
10060 target_to_host_sigset(&set
, p
);
10061 unlock_user(p
, arg1
, 0);
10064 if (target_to_host_timespec(puts
, arg3
)) {
10065 return -TARGET_EFAULT
;
10070 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10072 if (!is_error(ret
)) {
10074 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
10077 return -TARGET_EFAULT
;
10079 host_to_target_siginfo(p
, &uinfo
);
10080 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10082 ret
= host_to_target_signal(ret
);
10087 #ifdef TARGET_NR_rt_sigtimedwait_time64
10088 case TARGET_NR_rt_sigtimedwait_time64
:
10091 struct timespec uts
, *puts
;
10094 if (arg4
!= sizeof(target_sigset_t
)) {
10095 return -TARGET_EINVAL
;
10098 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
10100 return -TARGET_EFAULT
;
10102 target_to_host_sigset(&set
, p
);
10103 unlock_user(p
, arg1
, 0);
10106 if (target_to_host_timespec64(puts
, arg3
)) {
10107 return -TARGET_EFAULT
;
10112 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10114 if (!is_error(ret
)) {
10116 p
= lock_user(VERIFY_WRITE
, arg2
,
10117 sizeof(target_siginfo_t
), 0);
10119 return -TARGET_EFAULT
;
10121 host_to_target_siginfo(p
, &uinfo
);
10122 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10124 ret
= host_to_target_signal(ret
);
10129 case TARGET_NR_rt_sigqueueinfo
:
10133 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
10135 return -TARGET_EFAULT
;
10137 target_to_host_siginfo(&uinfo
, p
);
10138 unlock_user(p
, arg3
, 0);
10139 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, target_to_host_signal(arg2
), &uinfo
));
10142 case TARGET_NR_rt_tgsigqueueinfo
:
10146 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
10148 return -TARGET_EFAULT
;
10150 target_to_host_siginfo(&uinfo
, p
);
10151 unlock_user(p
, arg4
, 0);
10152 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, target_to_host_signal(arg3
), &uinfo
));
10155 #ifdef TARGET_NR_sigreturn
10156 case TARGET_NR_sigreturn
:
10157 if (block_signals()) {
10158 return -QEMU_ERESTARTSYS
;
10160 return do_sigreturn(cpu_env
);
10162 case TARGET_NR_rt_sigreturn
:
10163 if (block_signals()) {
10164 return -QEMU_ERESTARTSYS
;
10166 return do_rt_sigreturn(cpu_env
);
10167 case TARGET_NR_sethostname
:
10168 if (!(p
= lock_user_string(arg1
)))
10169 return -TARGET_EFAULT
;
10170 ret
= get_errno(sethostname(p
, arg2
));
10171 unlock_user(p
, arg1
, 0);
10173 #ifdef TARGET_NR_setrlimit
10174 case TARGET_NR_setrlimit
:
10176 int resource
= target_to_host_resource(arg1
);
10177 struct target_rlimit
*target_rlim
;
10178 struct rlimit rlim
;
10179 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
10180 return -TARGET_EFAULT
;
10181 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
10182 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
10183 unlock_user_struct(target_rlim
, arg2
, 0);
10185 * If we just passed through resource limit settings for memory then
10186 * they would also apply to QEMU's own allocations, and QEMU will
10187 * crash or hang or die if its allocations fail. Ideally we would
10188 * track the guest allocations in QEMU and apply the limits ourselves.
10189 * For now, just tell the guest the call succeeded but don't actually
10192 if (resource
!= RLIMIT_AS
&&
10193 resource
!= RLIMIT_DATA
&&
10194 resource
!= RLIMIT_STACK
) {
10195 return get_errno(setrlimit(resource
, &rlim
));
10201 #ifdef TARGET_NR_getrlimit
10202 case TARGET_NR_getrlimit
:
10204 int resource
= target_to_host_resource(arg1
);
10205 struct target_rlimit
*target_rlim
;
10206 struct rlimit rlim
;
10208 ret
= get_errno(getrlimit(resource
, &rlim
));
10209 if (!is_error(ret
)) {
10210 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10211 return -TARGET_EFAULT
;
10212 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10213 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10214 unlock_user_struct(target_rlim
, arg2
, 1);
10219 case TARGET_NR_getrusage
:
10221 struct rusage rusage
;
10222 ret
= get_errno(getrusage(arg1
, &rusage
));
10223 if (!is_error(ret
)) {
10224 ret
= host_to_target_rusage(arg2
, &rusage
);
10228 #if defined(TARGET_NR_gettimeofday)
10229 case TARGET_NR_gettimeofday
:
10232 struct timezone tz
;
10234 ret
= get_errno(gettimeofday(&tv
, &tz
));
10235 if (!is_error(ret
)) {
10236 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
10237 return -TARGET_EFAULT
;
10239 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
10240 return -TARGET_EFAULT
;
10246 #if defined(TARGET_NR_settimeofday)
10247 case TARGET_NR_settimeofday
:
10249 struct timeval tv
, *ptv
= NULL
;
10250 struct timezone tz
, *ptz
= NULL
;
10253 if (copy_from_user_timeval(&tv
, arg1
)) {
10254 return -TARGET_EFAULT
;
10260 if (copy_from_user_timezone(&tz
, arg2
)) {
10261 return -TARGET_EFAULT
;
10266 return get_errno(settimeofday(ptv
, ptz
));
10269 #if defined(TARGET_NR_select)
10270 case TARGET_NR_select
:
10271 #if defined(TARGET_WANT_NI_OLD_SELECT)
10272 /* some architectures used to have old_select here
10273 * but now ENOSYS it.
10275 ret
= -TARGET_ENOSYS
;
10276 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10277 ret
= do_old_select(arg1
);
10279 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10283 #ifdef TARGET_NR_pselect6
10284 case TARGET_NR_pselect6
:
10285 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
10287 #ifdef TARGET_NR_pselect6_time64
10288 case TARGET_NR_pselect6_time64
:
10289 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
10291 #ifdef TARGET_NR_symlink
10292 case TARGET_NR_symlink
:
10295 p
= lock_user_string(arg1
);
10296 p2
= lock_user_string(arg2
);
10298 ret
= -TARGET_EFAULT
;
10300 ret
= get_errno(symlink(p
, p2
));
10301 unlock_user(p2
, arg2
, 0);
10302 unlock_user(p
, arg1
, 0);
10306 #if defined(TARGET_NR_symlinkat)
10307 case TARGET_NR_symlinkat
:
10310 p
= lock_user_string(arg1
);
10311 p2
= lock_user_string(arg3
);
10313 ret
= -TARGET_EFAULT
;
10315 ret
= get_errno(symlinkat(p
, arg2
, p2
));
10316 unlock_user(p2
, arg3
, 0);
10317 unlock_user(p
, arg1
, 0);
10321 #ifdef TARGET_NR_readlink
10322 case TARGET_NR_readlink
:
10325 p
= lock_user_string(arg1
);
10326 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10327 ret
= get_errno(do_guest_readlink(p
, p2
, arg3
));
10328 unlock_user(p2
, arg2
, ret
);
10329 unlock_user(p
, arg1
, 0);
10333 #if defined(TARGET_NR_readlinkat)
10334 case TARGET_NR_readlinkat
:
10337 p
= lock_user_string(arg2
);
10338 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10340 ret
= -TARGET_EFAULT
;
10341 } else if (!arg4
) {
10342 /* Short circuit this for the magic exe check. */
10343 ret
= -TARGET_EINVAL
;
10344 } else if (is_proc_myself((const char *)p
, "exe")) {
10346 * Don't worry about sign mismatch as earlier mapping
10347 * logic would have thrown a bad address error.
10349 ret
= MIN(strlen(exec_path
), arg4
);
10350 /* We cannot NUL terminate the string. */
10351 memcpy(p2
, exec_path
, ret
);
10353 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
10355 unlock_user(p2
, arg3
, ret
);
10356 unlock_user(p
, arg2
, 0);
10360 #ifdef TARGET_NR_swapon
10361 case TARGET_NR_swapon
:
10362 if (!(p
= lock_user_string(arg1
)))
10363 return -TARGET_EFAULT
;
10364 ret
= get_errno(swapon(p
, arg2
));
10365 unlock_user(p
, arg1
, 0);
10368 case TARGET_NR_reboot
:
10369 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
10370 /* arg4 must be ignored in all other cases */
10371 p
= lock_user_string(arg4
);
10373 return -TARGET_EFAULT
;
10375 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
10376 unlock_user(p
, arg4
, 0);
10378 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
10381 #ifdef TARGET_NR_mmap
10382 case TARGET_NR_mmap
:
10383 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10384 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10385 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10386 || defined(TARGET_S390X)
10389 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
10390 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
10391 return -TARGET_EFAULT
;
10392 v1
= tswapal(v
[0]);
10393 v2
= tswapal(v
[1]);
10394 v3
= tswapal(v
[2]);
10395 v4
= tswapal(v
[3]);
10396 v5
= tswapal(v
[4]);
10397 v6
= tswapal(v
[5]);
10398 unlock_user(v
, arg1
, 0);
10399 ret
= get_errno(target_mmap(v1
, v2
, v3
,
10400 target_to_host_bitmask(v4
, mmap_flags_tbl
),
10404 /* mmap pointers are always untagged */
10405 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
10406 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10412 #ifdef TARGET_NR_mmap2
10413 case TARGET_NR_mmap2
:
10415 #define MMAP_SHIFT 12
10417 ret
= target_mmap(arg1
, arg2
, arg3
,
10418 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10419 arg5
, arg6
<< MMAP_SHIFT
);
10420 return get_errno(ret
);
10422 case TARGET_NR_munmap
:
10423 arg1
= cpu_untagged_addr(cpu
, arg1
);
10424 return get_errno(target_munmap(arg1
, arg2
));
10425 case TARGET_NR_mprotect
:
10426 arg1
= cpu_untagged_addr(cpu
, arg1
);
10428 TaskState
*ts
= cpu
->opaque
;
10429 /* Special hack to detect libc making the stack executable. */
10430 if ((arg3
& PROT_GROWSDOWN
)
10431 && arg1
>= ts
->info
->stack_limit
10432 && arg1
<= ts
->info
->start_stack
) {
10433 arg3
&= ~PROT_GROWSDOWN
;
10434 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
10435 arg1
= ts
->info
->stack_limit
;
10438 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
10439 #ifdef TARGET_NR_mremap
10440 case TARGET_NR_mremap
:
10441 arg1
= cpu_untagged_addr(cpu
, arg1
);
10442 /* mremap new_addr (arg5) is always untagged */
10443 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
10445 /* ??? msync/mlock/munlock are broken for softmmu. */
10446 #ifdef TARGET_NR_msync
10447 case TARGET_NR_msync
:
10448 return get_errno(msync(g2h(cpu
, arg1
), arg2
,
10449 target_to_host_msync_arg(arg3
)));
10451 #ifdef TARGET_NR_mlock
10452 case TARGET_NR_mlock
:
10453 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10455 #ifdef TARGET_NR_munlock
10456 case TARGET_NR_munlock
:
10457 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10459 #ifdef TARGET_NR_mlockall
10460 case TARGET_NR_mlockall
:
10461 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10463 #ifdef TARGET_NR_munlockall
10464 case TARGET_NR_munlockall
:
10465 return get_errno(munlockall());
10467 #ifdef TARGET_NR_truncate
10468 case TARGET_NR_truncate
:
10469 if (!(p
= lock_user_string(arg1
)))
10470 return -TARGET_EFAULT
;
10471 ret
= get_errno(truncate(p
, arg2
));
10472 unlock_user(p
, arg1
, 0);
10475 #ifdef TARGET_NR_ftruncate
10476 case TARGET_NR_ftruncate
:
10477 return get_errno(ftruncate(arg1
, arg2
));
10479 case TARGET_NR_fchmod
:
10480 return get_errno(fchmod(arg1
, arg2
));
10481 #if defined(TARGET_NR_fchmodat)
10482 case TARGET_NR_fchmodat
:
10483 if (!(p
= lock_user_string(arg2
)))
10484 return -TARGET_EFAULT
;
10485 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10486 unlock_user(p
, arg2
, 0);
10489 case TARGET_NR_getpriority
:
10490 /* Note that negative values are valid for getpriority, so we must
10491 differentiate based on errno settings. */
10493 ret
= getpriority(arg1
, arg2
);
10494 if (ret
== -1 && errno
!= 0) {
10495 return -host_to_target_errno(errno
);
10497 #ifdef TARGET_ALPHA
10498 /* Return value is the unbiased priority. Signal no error. */
10499 cpu_env
->ir
[IR_V0
] = 0;
10501 /* Return value is a biased priority to avoid negative numbers. */
10505 case TARGET_NR_setpriority
:
10506 return get_errno(setpriority(arg1
, arg2
, arg3
));
10507 #ifdef TARGET_NR_statfs
10508 case TARGET_NR_statfs
:
10509 if (!(p
= lock_user_string(arg1
))) {
10510 return -TARGET_EFAULT
;
10512 ret
= get_errno(statfs(path(p
), &stfs
));
10513 unlock_user(p
, arg1
, 0);
10515 if (!is_error(ret
)) {
10516 struct target_statfs
*target_stfs
;
10518 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10519 return -TARGET_EFAULT
;
10520 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10521 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10522 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10523 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10524 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10525 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10526 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10527 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10528 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10529 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10530 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10531 #ifdef _STATFS_F_FLAGS
10532 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10534 __put_user(0, &target_stfs
->f_flags
);
10536 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10537 unlock_user_struct(target_stfs
, arg2
, 1);
10541 #ifdef TARGET_NR_fstatfs
10542 case TARGET_NR_fstatfs
:
10543 ret
= get_errno(fstatfs(arg1
, &stfs
));
10544 goto convert_statfs
;
10546 #ifdef TARGET_NR_statfs64
10547 case TARGET_NR_statfs64
:
10548 if (!(p
= lock_user_string(arg1
))) {
10549 return -TARGET_EFAULT
;
10551 ret
= get_errno(statfs(path(p
), &stfs
));
10552 unlock_user(p
, arg1
, 0);
10554 if (!is_error(ret
)) {
10555 struct target_statfs64
*target_stfs
;
10557 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10558 return -TARGET_EFAULT
;
10559 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10560 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10561 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10562 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10563 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10564 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10565 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10566 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10567 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10568 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10569 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10570 #ifdef _STATFS_F_FLAGS
10571 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10573 __put_user(0, &target_stfs
->f_flags
);
10575 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10576 unlock_user_struct(target_stfs
, arg3
, 1);
10579 case TARGET_NR_fstatfs64
:
10580 ret
= get_errno(fstatfs(arg1
, &stfs
));
10581 goto convert_statfs64
;
10583 #ifdef TARGET_NR_socketcall
10584 case TARGET_NR_socketcall
:
10585 return do_socketcall(arg1
, arg2
);
10587 #ifdef TARGET_NR_accept
10588 case TARGET_NR_accept
:
10589 return do_accept4(arg1
, arg2
, arg3
, 0);
10591 #ifdef TARGET_NR_accept4
10592 case TARGET_NR_accept4
:
10593 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10595 #ifdef TARGET_NR_bind
10596 case TARGET_NR_bind
:
10597 return do_bind(arg1
, arg2
, arg3
);
10599 #ifdef TARGET_NR_connect
10600 case TARGET_NR_connect
:
10601 return do_connect(arg1
, arg2
, arg3
);
10603 #ifdef TARGET_NR_getpeername
10604 case TARGET_NR_getpeername
:
10605 return do_getpeername(arg1
, arg2
, arg3
);
10607 #ifdef TARGET_NR_getsockname
10608 case TARGET_NR_getsockname
:
10609 return do_getsockname(arg1
, arg2
, arg3
);
10611 #ifdef TARGET_NR_getsockopt
10612 case TARGET_NR_getsockopt
:
10613 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10615 #ifdef TARGET_NR_listen
10616 case TARGET_NR_listen
:
10617 return get_errno(listen(arg1
, arg2
));
10619 #ifdef TARGET_NR_recv
10620 case TARGET_NR_recv
:
10621 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10623 #ifdef TARGET_NR_recvfrom
10624 case TARGET_NR_recvfrom
:
10625 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10627 #ifdef TARGET_NR_recvmsg
10628 case TARGET_NR_recvmsg
:
10629 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10631 #ifdef TARGET_NR_send
10632 case TARGET_NR_send
:
10633 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10635 #ifdef TARGET_NR_sendmsg
10636 case TARGET_NR_sendmsg
:
10637 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10639 #ifdef TARGET_NR_sendmmsg
10640 case TARGET_NR_sendmmsg
:
10641 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10643 #ifdef TARGET_NR_recvmmsg
10644 case TARGET_NR_recvmmsg
:
10645 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10647 #ifdef TARGET_NR_sendto
10648 case TARGET_NR_sendto
:
10649 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10651 #ifdef TARGET_NR_shutdown
10652 case TARGET_NR_shutdown
:
10653 return get_errno(shutdown(arg1
, arg2
));
10655 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10656 case TARGET_NR_getrandom
:
10657 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10659 return -TARGET_EFAULT
;
10661 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10662 unlock_user(p
, arg1
, ret
);
10665 #ifdef TARGET_NR_socket
10666 case TARGET_NR_socket
:
10667 return do_socket(arg1
, arg2
, arg3
);
10669 #ifdef TARGET_NR_socketpair
10670 case TARGET_NR_socketpair
:
10671 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10673 #ifdef TARGET_NR_setsockopt
10674 case TARGET_NR_setsockopt
:
10675 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10677 #if defined(TARGET_NR_syslog)
10678 case TARGET_NR_syslog
:
10683 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10684 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10685 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10686 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10687 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10688 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10689 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10690 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10691 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10692 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10693 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10694 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10697 return -TARGET_EINVAL
;
10702 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10704 return -TARGET_EFAULT
;
10706 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10707 unlock_user(p
, arg2
, arg3
);
10711 return -TARGET_EINVAL
;
10716 case TARGET_NR_setitimer
:
10718 struct itimerval value
, ovalue
, *pvalue
;
10722 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10723 || copy_from_user_timeval(&pvalue
->it_value
,
10724 arg2
+ sizeof(struct target_timeval
)))
10725 return -TARGET_EFAULT
;
10729 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10730 if (!is_error(ret
) && arg3
) {
10731 if (copy_to_user_timeval(arg3
,
10732 &ovalue
.it_interval
)
10733 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10735 return -TARGET_EFAULT
;
10739 case TARGET_NR_getitimer
:
10741 struct itimerval value
;
10743 ret
= get_errno(getitimer(arg1
, &value
));
10744 if (!is_error(ret
) && arg2
) {
10745 if (copy_to_user_timeval(arg2
,
10746 &value
.it_interval
)
10747 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10749 return -TARGET_EFAULT
;
10753 #ifdef TARGET_NR_stat
10754 case TARGET_NR_stat
:
10755 if (!(p
= lock_user_string(arg1
))) {
10756 return -TARGET_EFAULT
;
10758 ret
= get_errno(stat(path(p
), &st
));
10759 unlock_user(p
, arg1
, 0);
10762 #ifdef TARGET_NR_lstat
10763 case TARGET_NR_lstat
:
10764 if (!(p
= lock_user_string(arg1
))) {
10765 return -TARGET_EFAULT
;
10767 ret
= get_errno(lstat(path(p
), &st
));
10768 unlock_user(p
, arg1
, 0);
10771 #ifdef TARGET_NR_fstat
10772 case TARGET_NR_fstat
:
10774 ret
= get_errno(fstat(arg1
, &st
));
10775 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10778 if (!is_error(ret
)) {
10779 struct target_stat
*target_st
;
10781 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10782 return -TARGET_EFAULT
;
10783 memset(target_st
, 0, sizeof(*target_st
));
10784 __put_user(st
.st_dev
, &target_st
->st_dev
);
10785 __put_user(st
.st_ino
, &target_st
->st_ino
);
10786 __put_user(st
.st_mode
, &target_st
->st_mode
);
10787 __put_user(st
.st_uid
, &target_st
->st_uid
);
10788 __put_user(st
.st_gid
, &target_st
->st_gid
);
10789 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10790 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10791 __put_user(st
.st_size
, &target_st
->st_size
);
10792 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10793 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10794 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10795 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10796 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10797 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10798 __put_user(st
.st_atim
.tv_nsec
,
10799 &target_st
->target_st_atime_nsec
);
10800 __put_user(st
.st_mtim
.tv_nsec
,
10801 &target_st
->target_st_mtime_nsec
);
10802 __put_user(st
.st_ctim
.tv_nsec
,
10803 &target_st
->target_st_ctime_nsec
);
10805 unlock_user_struct(target_st
, arg2
, 1);
10810 case TARGET_NR_vhangup
:
10811 return get_errno(vhangup());
10812 #ifdef TARGET_NR_syscall
10813 case TARGET_NR_syscall
:
10814 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10815 arg6
, arg7
, arg8
, 0);
10817 #if defined(TARGET_NR_wait4)
10818 case TARGET_NR_wait4
:
10821 abi_long status_ptr
= arg2
;
10822 struct rusage rusage
, *rusage_ptr
;
10823 abi_ulong target_rusage
= arg4
;
10824 abi_long rusage_err
;
10826 rusage_ptr
= &rusage
;
10829 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10830 if (!is_error(ret
)) {
10831 if (status_ptr
&& ret
) {
10832 status
= host_to_target_waitstatus(status
);
10833 if (put_user_s32(status
, status_ptr
))
10834 return -TARGET_EFAULT
;
10836 if (target_rusage
) {
10837 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10846 #ifdef TARGET_NR_swapoff
10847 case TARGET_NR_swapoff
:
10848 if (!(p
= lock_user_string(arg1
)))
10849 return -TARGET_EFAULT
;
10850 ret
= get_errno(swapoff(p
));
10851 unlock_user(p
, arg1
, 0);
10854 case TARGET_NR_sysinfo
:
10856 struct target_sysinfo
*target_value
;
10857 struct sysinfo value
;
10858 ret
= get_errno(sysinfo(&value
));
10859 if (!is_error(ret
) && arg1
)
10861 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10862 return -TARGET_EFAULT
;
10863 __put_user(value
.uptime
, &target_value
->uptime
);
10864 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10865 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10866 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10867 __put_user(value
.totalram
, &target_value
->totalram
);
10868 __put_user(value
.freeram
, &target_value
->freeram
);
10869 __put_user(value
.sharedram
, &target_value
->sharedram
);
10870 __put_user(value
.bufferram
, &target_value
->bufferram
);
10871 __put_user(value
.totalswap
, &target_value
->totalswap
);
10872 __put_user(value
.freeswap
, &target_value
->freeswap
);
10873 __put_user(value
.procs
, &target_value
->procs
);
10874 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10875 __put_user(value
.freehigh
, &target_value
->freehigh
);
10876 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10877 unlock_user_struct(target_value
, arg1
, 1);
10881 #ifdef TARGET_NR_ipc
10882 case TARGET_NR_ipc
:
10883 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10885 #ifdef TARGET_NR_semget
10886 case TARGET_NR_semget
:
10887 return get_errno(semget(arg1
, arg2
, arg3
));
10889 #ifdef TARGET_NR_semop
10890 case TARGET_NR_semop
:
10891 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10893 #ifdef TARGET_NR_semtimedop
10894 case TARGET_NR_semtimedop
:
10895 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10897 #ifdef TARGET_NR_semtimedop_time64
10898 case TARGET_NR_semtimedop_time64
:
10899 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10901 #ifdef TARGET_NR_semctl
10902 case TARGET_NR_semctl
:
10903 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10905 #ifdef TARGET_NR_msgctl
10906 case TARGET_NR_msgctl
:
10907 return do_msgctl(arg1
, arg2
, arg3
);
10909 #ifdef TARGET_NR_msgget
10910 case TARGET_NR_msgget
:
10911 return get_errno(msgget(arg1
, arg2
));
10913 #ifdef TARGET_NR_msgrcv
10914 case TARGET_NR_msgrcv
:
10915 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10917 #ifdef TARGET_NR_msgsnd
10918 case TARGET_NR_msgsnd
:
10919 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10921 #ifdef TARGET_NR_shmget
10922 case TARGET_NR_shmget
:
10923 return get_errno(shmget(arg1
, arg2
, arg3
));
10925 #ifdef TARGET_NR_shmctl
10926 case TARGET_NR_shmctl
:
10927 return do_shmctl(arg1
, arg2
, arg3
);
10929 #ifdef TARGET_NR_shmat
10930 case TARGET_NR_shmat
:
10931 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10933 #ifdef TARGET_NR_shmdt
10934 case TARGET_NR_shmdt
:
10935 return do_shmdt(arg1
);
10937 case TARGET_NR_fsync
:
10938 return get_errno(fsync(arg1
));
10939 case TARGET_NR_clone
:
10940 /* Linux manages to have three different orderings for its
10941 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10942 * match the kernel's CONFIG_CLONE_* settings.
10943 * Microblaze is further special in that it uses a sixth
10944 * implicit argument to clone for the TLS pointer.
10946 #if defined(TARGET_MICROBLAZE)
10947 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10948 #elif defined(TARGET_CLONE_BACKWARDS)
10949 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10950 #elif defined(TARGET_CLONE_BACKWARDS2)
10951 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10953 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10956 #ifdef __NR_exit_group
10957 /* new thread calls */
10958 case TARGET_NR_exit_group
:
10959 preexit_cleanup(cpu_env
, arg1
);
10960 return get_errno(exit_group(arg1
));
10962 case TARGET_NR_setdomainname
:
10963 if (!(p
= lock_user_string(arg1
)))
10964 return -TARGET_EFAULT
;
10965 ret
= get_errno(setdomainname(p
, arg2
));
10966 unlock_user(p
, arg1
, 0);
10968 case TARGET_NR_uname
:
10969 /* no need to transcode because we use the linux syscall */
10971 struct new_utsname
* buf
;
10973 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10974 return -TARGET_EFAULT
;
10975 ret
= get_errno(sys_uname(buf
));
10976 if (!is_error(ret
)) {
10977 /* Overwrite the native machine name with whatever is being
10979 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10980 sizeof(buf
->machine
));
10981 /* Allow the user to override the reported release. */
10982 if (qemu_uname_release
&& *qemu_uname_release
) {
10983 g_strlcpy(buf
->release
, qemu_uname_release
,
10984 sizeof(buf
->release
));
10987 unlock_user_struct(buf
, arg1
, 1);
10991 case TARGET_NR_modify_ldt
:
10992 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10993 #if !defined(TARGET_X86_64)
10994 case TARGET_NR_vm86
:
10995 return do_vm86(cpu_env
, arg1
, arg2
);
10998 #if defined(TARGET_NR_adjtimex)
10999 case TARGET_NR_adjtimex
:
11001 struct timex host_buf
;
11003 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
11004 return -TARGET_EFAULT
;
11006 ret
= get_errno(adjtimex(&host_buf
));
11007 if (!is_error(ret
)) {
11008 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
11009 return -TARGET_EFAULT
;
11015 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11016 case TARGET_NR_clock_adjtime
:
11018 struct timex htx
, *phtx
= &htx
;
11020 if (target_to_host_timex(phtx
, arg2
) != 0) {
11021 return -TARGET_EFAULT
;
11023 ret
= get_errno(clock_adjtime(arg1
, phtx
));
11024 if (!is_error(ret
) && phtx
) {
11025 if (host_to_target_timex(arg2
, phtx
) != 0) {
11026 return -TARGET_EFAULT
;
11032 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11033 case TARGET_NR_clock_adjtime64
:
11037 if (target_to_host_timex64(&htx
, arg2
) != 0) {
11038 return -TARGET_EFAULT
;
11040 ret
= get_errno(clock_adjtime(arg1
, &htx
));
11041 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
11042 return -TARGET_EFAULT
;
11047 case TARGET_NR_getpgid
:
11048 return get_errno(getpgid(arg1
));
11049 case TARGET_NR_fchdir
:
11050 return get_errno(fchdir(arg1
));
11051 case TARGET_NR_personality
:
11052 return get_errno(personality(arg1
));
11053 #ifdef TARGET_NR__llseek /* Not on alpha */
11054 case TARGET_NR__llseek
:
11057 #if !defined(__NR_llseek)
11058 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
11060 ret
= get_errno(res
);
11065 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
11067 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
11068 return -TARGET_EFAULT
;
11073 #ifdef TARGET_NR_getdents
11074 case TARGET_NR_getdents
:
11075 return do_getdents(arg1
, arg2
, arg3
);
11076 #endif /* TARGET_NR_getdents */
11077 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11078 case TARGET_NR_getdents64
:
11079 return do_getdents64(arg1
, arg2
, arg3
);
11080 #endif /* TARGET_NR_getdents64 */
11081 #if defined(TARGET_NR__newselect)
11082 case TARGET_NR__newselect
:
11083 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
11085 #ifdef TARGET_NR_poll
11086 case TARGET_NR_poll
:
11087 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
11089 #ifdef TARGET_NR_ppoll
11090 case TARGET_NR_ppoll
:
11091 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
11093 #ifdef TARGET_NR_ppoll_time64
11094 case TARGET_NR_ppoll_time64
:
11095 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
11097 case TARGET_NR_flock
:
11098 /* NOTE: the flock constant seems to be the same for every
11100 return get_errno(safe_flock(arg1
, arg2
));
11101 case TARGET_NR_readv
:
11103 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11105 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
11106 unlock_iovec(vec
, arg2
, arg3
, 1);
11108 ret
= -host_to_target_errno(errno
);
11112 case TARGET_NR_writev
:
11114 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11116 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
11117 unlock_iovec(vec
, arg2
, arg3
, 0);
11119 ret
= -host_to_target_errno(errno
);
11123 #if defined(TARGET_NR_preadv)
11124 case TARGET_NR_preadv
:
11126 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11128 unsigned long low
, high
;
11130 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11131 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
11132 unlock_iovec(vec
, arg2
, arg3
, 1);
11134 ret
= -host_to_target_errno(errno
);
11139 #if defined(TARGET_NR_pwritev)
11140 case TARGET_NR_pwritev
:
11142 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11144 unsigned long low
, high
;
11146 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11147 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
11148 unlock_iovec(vec
, arg2
, arg3
, 0);
11150 ret
= -host_to_target_errno(errno
);
11155 case TARGET_NR_getsid
:
11156 return get_errno(getsid(arg1
));
11157 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11158 case TARGET_NR_fdatasync
:
11159 return get_errno(fdatasync(arg1
));
11161 case TARGET_NR_sched_getaffinity
:
11163 unsigned int mask_size
;
11164 unsigned long *mask
;
11167 * sched_getaffinity needs multiples of ulong, so need to take
11168 * care of mismatches between target ulong and host ulong sizes.
11170 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11171 return -TARGET_EINVAL
;
11173 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11175 mask
= alloca(mask_size
);
11176 memset(mask
, 0, mask_size
);
11177 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
11179 if (!is_error(ret
)) {
11181 /* More data returned than the caller's buffer will fit.
11182 * This only happens if sizeof(abi_long) < sizeof(long)
11183 * and the caller passed us a buffer holding an odd number
11184 * of abi_longs. If the host kernel is actually using the
11185 * extra 4 bytes then fail EINVAL; otherwise we can just
11186 * ignore them and only copy the interesting part.
11188 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
11189 if (numcpus
> arg2
* 8) {
11190 return -TARGET_EINVAL
;
11195 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
11196 return -TARGET_EFAULT
;
11201 case TARGET_NR_sched_setaffinity
:
11203 unsigned int mask_size
;
11204 unsigned long *mask
;
11207 * sched_setaffinity needs multiples of ulong, so need to take
11208 * care of mismatches between target ulong and host ulong sizes.
11210 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11211 return -TARGET_EINVAL
;
11213 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11214 mask
= alloca(mask_size
);
11216 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
11221 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
11223 case TARGET_NR_getcpu
:
11225 unsigned cpu
, node
;
11226 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
11227 arg2
? &node
: NULL
,
11229 if (is_error(ret
)) {
11232 if (arg1
&& put_user_u32(cpu
, arg1
)) {
11233 return -TARGET_EFAULT
;
11235 if (arg2
&& put_user_u32(node
, arg2
)) {
11236 return -TARGET_EFAULT
;
11240 case TARGET_NR_sched_setparam
:
11242 struct target_sched_param
*target_schp
;
11243 struct sched_param schp
;
11246 return -TARGET_EINVAL
;
11248 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
11249 return -TARGET_EFAULT
;
11251 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11252 unlock_user_struct(target_schp
, arg2
, 0);
11253 return get_errno(sys_sched_setparam(arg1
, &schp
));
11255 case TARGET_NR_sched_getparam
:
11257 struct target_sched_param
*target_schp
;
11258 struct sched_param schp
;
11261 return -TARGET_EINVAL
;
11263 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
11264 if (!is_error(ret
)) {
11265 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
11266 return -TARGET_EFAULT
;
11268 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
11269 unlock_user_struct(target_schp
, arg2
, 1);
11273 case TARGET_NR_sched_setscheduler
:
11275 struct target_sched_param
*target_schp
;
11276 struct sched_param schp
;
11278 return -TARGET_EINVAL
;
11280 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
11281 return -TARGET_EFAULT
;
11283 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11284 unlock_user_struct(target_schp
, arg3
, 0);
11285 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
11287 case TARGET_NR_sched_getscheduler
:
11288 return get_errno(sys_sched_getscheduler(arg1
));
11289 case TARGET_NR_sched_getattr
:
11291 struct target_sched_attr
*target_scha
;
11292 struct sched_attr scha
;
11294 return -TARGET_EINVAL
;
11296 if (arg3
> sizeof(scha
)) {
11297 arg3
= sizeof(scha
);
11299 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
11300 if (!is_error(ret
)) {
11301 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11302 if (!target_scha
) {
11303 return -TARGET_EFAULT
;
11305 target_scha
->size
= tswap32(scha
.size
);
11306 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
11307 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
11308 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
11309 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
11310 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
11311 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
11312 target_scha
->sched_period
= tswap64(scha
.sched_period
);
11313 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
11314 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
11315 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
11317 unlock_user(target_scha
, arg2
, arg3
);
11321 case TARGET_NR_sched_setattr
:
11323 struct target_sched_attr
*target_scha
;
11324 struct sched_attr scha
;
11328 return -TARGET_EINVAL
;
11330 if (get_user_u32(size
, arg2
)) {
11331 return -TARGET_EFAULT
;
11334 size
= offsetof(struct target_sched_attr
, sched_util_min
);
11336 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
11337 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11338 return -TARGET_EFAULT
;
11340 return -TARGET_E2BIG
;
11343 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
11346 } else if (zeroed
== 0) {
11347 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11348 return -TARGET_EFAULT
;
11350 return -TARGET_E2BIG
;
11352 if (size
> sizeof(struct target_sched_attr
)) {
11353 size
= sizeof(struct target_sched_attr
);
11356 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
11357 if (!target_scha
) {
11358 return -TARGET_EFAULT
;
11361 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
11362 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
11363 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
11364 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
11365 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
11366 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
11367 scha
.sched_period
= tswap64(target_scha
->sched_period
);
11368 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
11369 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
11370 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
11372 unlock_user(target_scha
, arg2
, 0);
11373 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
11375 case TARGET_NR_sched_yield
:
11376 return get_errno(sched_yield());
11377 case TARGET_NR_sched_get_priority_max
:
11378 return get_errno(sched_get_priority_max(arg1
));
11379 case TARGET_NR_sched_get_priority_min
:
11380 return get_errno(sched_get_priority_min(arg1
));
11381 #ifdef TARGET_NR_sched_rr_get_interval
11382 case TARGET_NR_sched_rr_get_interval
:
11384 struct timespec ts
;
11385 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11386 if (!is_error(ret
)) {
11387 ret
= host_to_target_timespec(arg2
, &ts
);
11392 #ifdef TARGET_NR_sched_rr_get_interval_time64
11393 case TARGET_NR_sched_rr_get_interval_time64
:
11395 struct timespec ts
;
11396 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11397 if (!is_error(ret
)) {
11398 ret
= host_to_target_timespec64(arg2
, &ts
);
11403 #if defined(TARGET_NR_nanosleep)
11404 case TARGET_NR_nanosleep
:
11406 struct timespec req
, rem
;
11407 target_to_host_timespec(&req
, arg1
);
11408 ret
= get_errno(safe_nanosleep(&req
, &rem
));
11409 if (is_error(ret
) && arg2
) {
11410 host_to_target_timespec(arg2
, &rem
);
11415 case TARGET_NR_prctl
:
11416 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
11418 #ifdef TARGET_NR_arch_prctl
11419 case TARGET_NR_arch_prctl
:
11420 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11422 #ifdef TARGET_NR_pread64
11423 case TARGET_NR_pread64
:
11424 if (regpairs_aligned(cpu_env
, num
)) {
11428 if (arg2
== 0 && arg3
== 0) {
11429 /* Special-case NULL buffer and zero length, which should succeed */
11432 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11434 return -TARGET_EFAULT
;
11437 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11438 unlock_user(p
, arg2
, ret
);
11440 case TARGET_NR_pwrite64
:
11441 if (regpairs_aligned(cpu_env
, num
)) {
11445 if (arg2
== 0 && arg3
== 0) {
11446 /* Special-case NULL buffer and zero length, which should succeed */
11449 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11451 return -TARGET_EFAULT
;
11454 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11455 unlock_user(p
, arg2
, 0);
11458 case TARGET_NR_getcwd
:
11459 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11460 return -TARGET_EFAULT
;
11461 ret
= get_errno(sys_getcwd1(p
, arg2
));
11462 unlock_user(p
, arg1
, ret
);
11464 case TARGET_NR_capget
:
11465 case TARGET_NR_capset
:
11467 struct target_user_cap_header
*target_header
;
11468 struct target_user_cap_data
*target_data
= NULL
;
11469 struct __user_cap_header_struct header
;
11470 struct __user_cap_data_struct data
[2];
11471 struct __user_cap_data_struct
*dataptr
= NULL
;
11472 int i
, target_datalen
;
11473 int data_items
= 1;
11475 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11476 return -TARGET_EFAULT
;
11478 header
.version
= tswap32(target_header
->version
);
11479 header
.pid
= tswap32(target_header
->pid
);
11481 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11482 /* Version 2 and up takes pointer to two user_data structs */
11486 target_datalen
= sizeof(*target_data
) * data_items
;
11489 if (num
== TARGET_NR_capget
) {
11490 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11492 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11494 if (!target_data
) {
11495 unlock_user_struct(target_header
, arg1
, 0);
11496 return -TARGET_EFAULT
;
11499 if (num
== TARGET_NR_capset
) {
11500 for (i
= 0; i
< data_items
; i
++) {
11501 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11502 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11503 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11510 if (num
== TARGET_NR_capget
) {
11511 ret
= get_errno(capget(&header
, dataptr
));
11513 ret
= get_errno(capset(&header
, dataptr
));
11516 /* The kernel always updates version for both capget and capset */
11517 target_header
->version
= tswap32(header
.version
);
11518 unlock_user_struct(target_header
, arg1
, 1);
11521 if (num
== TARGET_NR_capget
) {
11522 for (i
= 0; i
< data_items
; i
++) {
11523 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11524 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11525 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11527 unlock_user(target_data
, arg2
, target_datalen
);
11529 unlock_user(target_data
, arg2
, 0);
11534 case TARGET_NR_sigaltstack
:
11535 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11537 #ifdef CONFIG_SENDFILE
11538 #ifdef TARGET_NR_sendfile
11539 case TARGET_NR_sendfile
:
11541 off_t
*offp
= NULL
;
11544 ret
= get_user_sal(off
, arg3
);
11545 if (is_error(ret
)) {
11550 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11551 if (!is_error(ret
) && arg3
) {
11552 abi_long ret2
= put_user_sal(off
, arg3
);
11553 if (is_error(ret2
)) {
11560 #ifdef TARGET_NR_sendfile64
11561 case TARGET_NR_sendfile64
:
11563 off_t
*offp
= NULL
;
11566 ret
= get_user_s64(off
, arg3
);
11567 if (is_error(ret
)) {
11572 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11573 if (!is_error(ret
) && arg3
) {
11574 abi_long ret2
= put_user_s64(off
, arg3
);
11575 if (is_error(ret2
)) {
11583 #ifdef TARGET_NR_vfork
11584 case TARGET_NR_vfork
:
11585 return get_errno(do_fork(cpu_env
,
11586 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11589 #ifdef TARGET_NR_ugetrlimit
11590 case TARGET_NR_ugetrlimit
:
11592 struct rlimit rlim
;
11593 int resource
= target_to_host_resource(arg1
);
11594 ret
= get_errno(getrlimit(resource
, &rlim
));
11595 if (!is_error(ret
)) {
11596 struct target_rlimit
*target_rlim
;
11597 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11598 return -TARGET_EFAULT
;
11599 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11600 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11601 unlock_user_struct(target_rlim
, arg2
, 1);
11606 #ifdef TARGET_NR_truncate64
11607 case TARGET_NR_truncate64
:
11608 if (!(p
= lock_user_string(arg1
)))
11609 return -TARGET_EFAULT
;
11610 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11611 unlock_user(p
, arg1
, 0);
11614 #ifdef TARGET_NR_ftruncate64
11615 case TARGET_NR_ftruncate64
:
11616 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11618 #ifdef TARGET_NR_stat64
11619 case TARGET_NR_stat64
:
11620 if (!(p
= lock_user_string(arg1
))) {
11621 return -TARGET_EFAULT
;
11623 ret
= get_errno(stat(path(p
), &st
));
11624 unlock_user(p
, arg1
, 0);
11625 if (!is_error(ret
))
11626 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11629 #ifdef TARGET_NR_lstat64
11630 case TARGET_NR_lstat64
:
11631 if (!(p
= lock_user_string(arg1
))) {
11632 return -TARGET_EFAULT
;
11634 ret
= get_errno(lstat(path(p
), &st
));
11635 unlock_user(p
, arg1
, 0);
11636 if (!is_error(ret
))
11637 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11640 #ifdef TARGET_NR_fstat64
11641 case TARGET_NR_fstat64
:
11642 ret
= get_errno(fstat(arg1
, &st
));
11643 if (!is_error(ret
))
11644 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11647 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11648 #ifdef TARGET_NR_fstatat64
11649 case TARGET_NR_fstatat64
:
11651 #ifdef TARGET_NR_newfstatat
11652 case TARGET_NR_newfstatat
:
11654 if (!(p
= lock_user_string(arg2
))) {
11655 return -TARGET_EFAULT
;
11657 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11658 unlock_user(p
, arg2
, 0);
11659 if (!is_error(ret
))
11660 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11663 #if defined(TARGET_NR_statx)
11664 case TARGET_NR_statx
:
11666 struct target_statx
*target_stx
;
11670 p
= lock_user_string(arg2
);
11672 return -TARGET_EFAULT
;
11674 #if defined(__NR_statx)
11677 * It is assumed that struct statx is architecture independent.
11679 struct target_statx host_stx
;
11682 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11683 if (!is_error(ret
)) {
11684 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11685 unlock_user(p
, arg2
, 0);
11686 return -TARGET_EFAULT
;
11690 if (ret
!= -TARGET_ENOSYS
) {
11691 unlock_user(p
, arg2
, 0);
11696 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11697 unlock_user(p
, arg2
, 0);
11699 if (!is_error(ret
)) {
11700 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11701 return -TARGET_EFAULT
;
11703 memset(target_stx
, 0, sizeof(*target_stx
));
11704 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11705 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11706 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11707 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11708 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11709 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11710 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11711 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11712 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11713 __put_user(st
.st_size
, &target_stx
->stx_size
);
11714 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11715 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11716 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11717 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11718 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11719 unlock_user_struct(target_stx
, arg5
, 1);
11724 #ifdef TARGET_NR_lchown
11725 case TARGET_NR_lchown
:
11726 if (!(p
= lock_user_string(arg1
)))
11727 return -TARGET_EFAULT
;
11728 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11729 unlock_user(p
, arg1
, 0);
11732 #ifdef TARGET_NR_getuid
11733 case TARGET_NR_getuid
:
11734 return get_errno(high2lowuid(getuid()));
11736 #ifdef TARGET_NR_getgid
11737 case TARGET_NR_getgid
:
11738 return get_errno(high2lowgid(getgid()));
11740 #ifdef TARGET_NR_geteuid
11741 case TARGET_NR_geteuid
:
11742 return get_errno(high2lowuid(geteuid()));
11744 #ifdef TARGET_NR_getegid
11745 case TARGET_NR_getegid
:
11746 return get_errno(high2lowgid(getegid()));
11748 case TARGET_NR_setreuid
:
11749 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11750 case TARGET_NR_setregid
:
11751 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11752 case TARGET_NR_getgroups
:
11753 { /* the same code as for TARGET_NR_getgroups32 */
11754 int gidsetsize
= arg1
;
11755 target_id
*target_grouplist
;
11756 g_autofree gid_t
*grouplist
= NULL
;
11759 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11760 return -TARGET_EINVAL
;
11762 if (gidsetsize
> 0) {
11763 grouplist
= g_try_new(gid_t
, gidsetsize
);
11765 return -TARGET_ENOMEM
;
11768 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11769 if (!is_error(ret
) && gidsetsize
> 0) {
11770 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
11771 gidsetsize
* sizeof(target_id
), 0);
11772 if (!target_grouplist
) {
11773 return -TARGET_EFAULT
;
11775 for (i
= 0; i
< ret
; i
++) {
11776 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11778 unlock_user(target_grouplist
, arg2
,
11779 gidsetsize
* sizeof(target_id
));
11783 case TARGET_NR_setgroups
:
11784 { /* the same code as for TARGET_NR_setgroups32 */
11785 int gidsetsize
= arg1
;
11786 target_id
*target_grouplist
;
11787 g_autofree gid_t
*grouplist
= NULL
;
11790 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11791 return -TARGET_EINVAL
;
11793 if (gidsetsize
> 0) {
11794 grouplist
= g_try_new(gid_t
, gidsetsize
);
11796 return -TARGET_ENOMEM
;
11798 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
11799 gidsetsize
* sizeof(target_id
), 1);
11800 if (!target_grouplist
) {
11801 return -TARGET_EFAULT
;
11803 for (i
= 0; i
< gidsetsize
; i
++) {
11804 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11806 unlock_user(target_grouplist
, arg2
,
11807 gidsetsize
* sizeof(target_id
));
11809 return get_errno(setgroups(gidsetsize
, grouplist
));
11811 case TARGET_NR_fchown
:
11812 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11813 #if defined(TARGET_NR_fchownat)
11814 case TARGET_NR_fchownat
:
11815 if (!(p
= lock_user_string(arg2
)))
11816 return -TARGET_EFAULT
;
11817 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11818 low2highgid(arg4
), arg5
));
11819 unlock_user(p
, arg2
, 0);
11822 #ifdef TARGET_NR_setresuid
11823 case TARGET_NR_setresuid
:
11824 return get_errno(sys_setresuid(low2highuid(arg1
),
11826 low2highuid(arg3
)));
11828 #ifdef TARGET_NR_getresuid
11829 case TARGET_NR_getresuid
:
11831 uid_t ruid
, euid
, suid
;
11832 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11833 if (!is_error(ret
)) {
11834 if (put_user_id(high2lowuid(ruid
), arg1
)
11835 || put_user_id(high2lowuid(euid
), arg2
)
11836 || put_user_id(high2lowuid(suid
), arg3
))
11837 return -TARGET_EFAULT
;
11842 #ifdef TARGET_NR_getresgid
11843 case TARGET_NR_setresgid
:
11844 return get_errno(sys_setresgid(low2highgid(arg1
),
11846 low2highgid(arg3
)));
11848 #ifdef TARGET_NR_getresgid
11849 case TARGET_NR_getresgid
:
11851 gid_t rgid
, egid
, sgid
;
11852 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11853 if (!is_error(ret
)) {
11854 if (put_user_id(high2lowgid(rgid
), arg1
)
11855 || put_user_id(high2lowgid(egid
), arg2
)
11856 || put_user_id(high2lowgid(sgid
), arg3
))
11857 return -TARGET_EFAULT
;
11862 #ifdef TARGET_NR_chown
11863 case TARGET_NR_chown
:
11864 if (!(p
= lock_user_string(arg1
)))
11865 return -TARGET_EFAULT
;
11866 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11867 unlock_user(p
, arg1
, 0);
11870 case TARGET_NR_setuid
:
11871 return get_errno(sys_setuid(low2highuid(arg1
)));
11872 case TARGET_NR_setgid
:
11873 return get_errno(sys_setgid(low2highgid(arg1
)));
11874 case TARGET_NR_setfsuid
:
11875 return get_errno(setfsuid(arg1
));
11876 case TARGET_NR_setfsgid
:
11877 return get_errno(setfsgid(arg1
));
11879 #ifdef TARGET_NR_lchown32
11880 case TARGET_NR_lchown32
:
11881 if (!(p
= lock_user_string(arg1
)))
11882 return -TARGET_EFAULT
;
11883 ret
= get_errno(lchown(p
, arg2
, arg3
));
11884 unlock_user(p
, arg1
, 0);
11887 #ifdef TARGET_NR_getuid32
11888 case TARGET_NR_getuid32
:
11889 return get_errno(getuid());
11892 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11893 /* Alpha specific */
11894 case TARGET_NR_getxuid
:
11898 cpu_env
->ir
[IR_A4
]=euid
;
11900 return get_errno(getuid());
11902 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11903 /* Alpha specific */
11904 case TARGET_NR_getxgid
:
11908 cpu_env
->ir
[IR_A4
]=egid
;
11910 return get_errno(getgid());
11912 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11913 /* Alpha specific */
11914 case TARGET_NR_osf_getsysinfo
:
11915 ret
= -TARGET_EOPNOTSUPP
;
11917 case TARGET_GSI_IEEE_FP_CONTROL
:
11919 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11920 uint64_t swcr
= cpu_env
->swcr
;
11922 swcr
&= ~SWCR_STATUS_MASK
;
11923 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11925 if (put_user_u64 (swcr
, arg2
))
11926 return -TARGET_EFAULT
;
11931 /* case GSI_IEEE_STATE_AT_SIGNAL:
11932 -- Not implemented in linux kernel.
11934 -- Retrieves current unaligned access state; not much used.
11935 case GSI_PROC_TYPE:
11936 -- Retrieves implver information; surely not used.
11937 case GSI_GET_HWRPB:
11938 -- Grabs a copy of the HWRPB; surely not used.
11943 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11944 /* Alpha specific */
11945 case TARGET_NR_osf_setsysinfo
:
11946 ret
= -TARGET_EOPNOTSUPP
;
11948 case TARGET_SSI_IEEE_FP_CONTROL
:
11950 uint64_t swcr
, fpcr
;
11952 if (get_user_u64 (swcr
, arg2
)) {
11953 return -TARGET_EFAULT
;
11957 * The kernel calls swcr_update_status to update the
11958 * status bits from the fpcr at every point that it
11959 * could be queried. Therefore, we store the status
11960 * bits only in FPCR.
11962 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11964 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11965 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11966 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11967 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11972 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11974 uint64_t exc
, fpcr
, fex
;
11976 if (get_user_u64(exc
, arg2
)) {
11977 return -TARGET_EFAULT
;
11979 exc
&= SWCR_STATUS_MASK
;
11980 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11982 /* Old exceptions are not signaled. */
11983 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11985 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11986 fex
&= (cpu_env
)->swcr
;
11988 /* Update the hardware fpcr. */
11989 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11990 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11993 int si_code
= TARGET_FPE_FLTUNK
;
11994 target_siginfo_t info
;
11996 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11997 si_code
= TARGET_FPE_FLTUND
;
11999 if (fex
& SWCR_TRAP_ENABLE_INE
) {
12000 si_code
= TARGET_FPE_FLTRES
;
12002 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
12003 si_code
= TARGET_FPE_FLTUND
;
12005 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
12006 si_code
= TARGET_FPE_FLTOVF
;
12008 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
12009 si_code
= TARGET_FPE_FLTDIV
;
12011 if (fex
& SWCR_TRAP_ENABLE_INV
) {
12012 si_code
= TARGET_FPE_FLTINV
;
12015 info
.si_signo
= SIGFPE
;
12017 info
.si_code
= si_code
;
12018 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
12019 queue_signal(cpu_env
, info
.si_signo
,
12020 QEMU_SI_FAULT
, &info
);
12026 /* case SSI_NVPAIRS:
12027 -- Used with SSIN_UACPROC to enable unaligned accesses.
12028 case SSI_IEEE_STATE_AT_SIGNAL:
12029 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12030 -- Not implemented in linux kernel
12035 #ifdef TARGET_NR_osf_sigprocmask
12036 /* Alpha specific. */
12037 case TARGET_NR_osf_sigprocmask
:
12041 sigset_t set
, oldset
;
12044 case TARGET_SIG_BLOCK
:
12047 case TARGET_SIG_UNBLOCK
:
12050 case TARGET_SIG_SETMASK
:
12054 return -TARGET_EINVAL
;
12057 target_to_host_old_sigset(&set
, &mask
);
12058 ret
= do_sigprocmask(how
, &set
, &oldset
);
12060 host_to_target_old_sigset(&mask
, &oldset
);
12067 #ifdef TARGET_NR_getgid32
12068 case TARGET_NR_getgid32
:
12069 return get_errno(getgid());
12071 #ifdef TARGET_NR_geteuid32
12072 case TARGET_NR_geteuid32
:
12073 return get_errno(geteuid());
12075 #ifdef TARGET_NR_getegid32
12076 case TARGET_NR_getegid32
:
12077 return get_errno(getegid());
12079 #ifdef TARGET_NR_setreuid32
12080 case TARGET_NR_setreuid32
:
12081 return get_errno(setreuid(arg1
, arg2
));
12083 #ifdef TARGET_NR_setregid32
12084 case TARGET_NR_setregid32
:
12085 return get_errno(setregid(arg1
, arg2
));
12087 #ifdef TARGET_NR_getgroups32
12088 case TARGET_NR_getgroups32
:
12089 { /* the same code as for TARGET_NR_getgroups */
12090 int gidsetsize
= arg1
;
12091 uint32_t *target_grouplist
;
12092 g_autofree gid_t
*grouplist
= NULL
;
12095 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12096 return -TARGET_EINVAL
;
12098 if (gidsetsize
> 0) {
12099 grouplist
= g_try_new(gid_t
, gidsetsize
);
12101 return -TARGET_ENOMEM
;
12104 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
12105 if (!is_error(ret
) && gidsetsize
> 0) {
12106 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
12107 gidsetsize
* 4, 0);
12108 if (!target_grouplist
) {
12109 return -TARGET_EFAULT
;
12111 for (i
= 0; i
< ret
; i
++) {
12112 target_grouplist
[i
] = tswap32(grouplist
[i
]);
12114 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
12119 #ifdef TARGET_NR_setgroups32
12120 case TARGET_NR_setgroups32
:
12121 { /* the same code as for TARGET_NR_setgroups */
12122 int gidsetsize
= arg1
;
12123 uint32_t *target_grouplist
;
12124 g_autofree gid_t
*grouplist
= NULL
;
12127 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12128 return -TARGET_EINVAL
;
12130 if (gidsetsize
> 0) {
12131 grouplist
= g_try_new(gid_t
, gidsetsize
);
12133 return -TARGET_ENOMEM
;
12135 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
12136 gidsetsize
* 4, 1);
12137 if (!target_grouplist
) {
12138 return -TARGET_EFAULT
;
12140 for (i
= 0; i
< gidsetsize
; i
++) {
12141 grouplist
[i
] = tswap32(target_grouplist
[i
]);
12143 unlock_user(target_grouplist
, arg2
, 0);
12145 return get_errno(setgroups(gidsetsize
, grouplist
));
12148 #ifdef TARGET_NR_fchown32
12149 case TARGET_NR_fchown32
:
12150 return get_errno(fchown(arg1
, arg2
, arg3
));
12152 #ifdef TARGET_NR_setresuid32
12153 case TARGET_NR_setresuid32
:
12154 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
12156 #ifdef TARGET_NR_getresuid32
12157 case TARGET_NR_getresuid32
:
12159 uid_t ruid
, euid
, suid
;
12160 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
12161 if (!is_error(ret
)) {
12162 if (put_user_u32(ruid
, arg1
)
12163 || put_user_u32(euid
, arg2
)
12164 || put_user_u32(suid
, arg3
))
12165 return -TARGET_EFAULT
;
12170 #ifdef TARGET_NR_setresgid32
12171 case TARGET_NR_setresgid32
:
12172 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
12174 #ifdef TARGET_NR_getresgid32
12175 case TARGET_NR_getresgid32
:
12177 gid_t rgid
, egid
, sgid
;
12178 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
12179 if (!is_error(ret
)) {
12180 if (put_user_u32(rgid
, arg1
)
12181 || put_user_u32(egid
, arg2
)
12182 || put_user_u32(sgid
, arg3
))
12183 return -TARGET_EFAULT
;
12188 #ifdef TARGET_NR_chown32
12189 case TARGET_NR_chown32
:
12190 if (!(p
= lock_user_string(arg1
)))
12191 return -TARGET_EFAULT
;
12192 ret
= get_errno(chown(p
, arg2
, arg3
));
12193 unlock_user(p
, arg1
, 0);
12196 #ifdef TARGET_NR_setuid32
12197 case TARGET_NR_setuid32
:
12198 return get_errno(sys_setuid(arg1
));
12200 #ifdef TARGET_NR_setgid32
12201 case TARGET_NR_setgid32
:
12202 return get_errno(sys_setgid(arg1
));
12204 #ifdef TARGET_NR_setfsuid32
12205 case TARGET_NR_setfsuid32
:
12206 return get_errno(setfsuid(arg1
));
12208 #ifdef TARGET_NR_setfsgid32
12209 case TARGET_NR_setfsgid32
:
12210 return get_errno(setfsgid(arg1
));
12212 #ifdef TARGET_NR_mincore
12213 case TARGET_NR_mincore
:
12215 void *a
= lock_user(VERIFY_NONE
, arg1
, arg2
, 0);
12217 return -TARGET_ENOMEM
;
12219 p
= lock_user_string(arg3
);
12221 ret
= -TARGET_EFAULT
;
12223 ret
= get_errno(mincore(a
, arg2
, p
));
12224 unlock_user(p
, arg3
, ret
);
12226 unlock_user(a
, arg1
, 0);
12230 #ifdef TARGET_NR_arm_fadvise64_64
12231 case TARGET_NR_arm_fadvise64_64
:
12232 /* arm_fadvise64_64 looks like fadvise64_64 but
12233 * with different argument order: fd, advice, offset, len
12234 * rather than the usual fd, offset, len, advice.
12235 * Note that offset and len are both 64-bit so appear as
12236 * pairs of 32-bit registers.
12238 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
12239 target_offset64(arg5
, arg6
), arg2
);
12240 return -host_to_target_errno(ret
);
12243 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12245 #ifdef TARGET_NR_fadvise64_64
12246 case TARGET_NR_fadvise64_64
:
12247 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12248 /* 6 args: fd, advice, offset (high, low), len (high, low) */
12256 /* 6 args: fd, offset (high, low), len (high, low), advice */
12257 if (regpairs_aligned(cpu_env
, num
)) {
12258 /* offset is in (3,4), len in (5,6) and advice in 7 */
12266 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
12267 target_offset64(arg4
, arg5
), arg6
);
12268 return -host_to_target_errno(ret
);
12271 #ifdef TARGET_NR_fadvise64
12272 case TARGET_NR_fadvise64
:
12273 /* 5 args: fd, offset (high, low), len, advice */
12274 if (regpairs_aligned(cpu_env
, num
)) {
12275 /* offset is in (3,4), len in 5 and advice in 6 */
12281 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
12282 return -host_to_target_errno(ret
);
12285 #else /* not a 32-bit ABI */
12286 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12287 #ifdef TARGET_NR_fadvise64_64
12288 case TARGET_NR_fadvise64_64
:
12290 #ifdef TARGET_NR_fadvise64
12291 case TARGET_NR_fadvise64
:
12293 #ifdef TARGET_S390X
12295 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
12296 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
12297 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
12298 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
12302 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
12304 #endif /* end of 64-bit ABI fadvise handling */
12306 #ifdef TARGET_NR_madvise
12307 case TARGET_NR_madvise
:
12308 return target_madvise(arg1
, arg2
, arg3
);
12310 #ifdef TARGET_NR_fcntl64
12311 case TARGET_NR_fcntl64
:
12315 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
12316 to_flock64_fn
*copyto
= copy_to_user_flock64
;
12319 if (!cpu_env
->eabi
) {
12320 copyfrom
= copy_from_user_oabi_flock64
;
12321 copyto
= copy_to_user_oabi_flock64
;
12325 cmd
= target_to_host_fcntl_cmd(arg2
);
12326 if (cmd
== -TARGET_EINVAL
) {
12331 case TARGET_F_GETLK64
:
12332 ret
= copyfrom(&fl
, arg3
);
12336 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12338 ret
= copyto(arg3
, &fl
);
12342 case TARGET_F_SETLK64
:
12343 case TARGET_F_SETLKW64
:
12344 ret
= copyfrom(&fl
, arg3
);
12348 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12351 ret
= do_fcntl(arg1
, arg2
, arg3
);
12357 #ifdef TARGET_NR_cacheflush
12358 case TARGET_NR_cacheflush
:
12359 /* self-modifying code is handled automatically, so nothing needed */
12362 #ifdef TARGET_NR_getpagesize
12363 case TARGET_NR_getpagesize
:
12364 return TARGET_PAGE_SIZE
;
12366 case TARGET_NR_gettid
:
12367 return get_errno(sys_gettid());
12368 #ifdef TARGET_NR_readahead
12369 case TARGET_NR_readahead
:
12370 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12371 if (regpairs_aligned(cpu_env
, num
)) {
12376 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
12378 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
12383 #ifdef TARGET_NR_setxattr
12384 case TARGET_NR_listxattr
:
12385 case TARGET_NR_llistxattr
:
12389 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12391 return -TARGET_EFAULT
;
12394 p
= lock_user_string(arg1
);
12396 if (num
== TARGET_NR_listxattr
) {
12397 ret
= get_errno(listxattr(p
, b
, arg3
));
12399 ret
= get_errno(llistxattr(p
, b
, arg3
));
12402 ret
= -TARGET_EFAULT
;
12404 unlock_user(p
, arg1
, 0);
12405 unlock_user(b
, arg2
, arg3
);
12408 case TARGET_NR_flistxattr
:
12412 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12414 return -TARGET_EFAULT
;
12417 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12418 unlock_user(b
, arg2
, arg3
);
12421 case TARGET_NR_setxattr
:
12422 case TARGET_NR_lsetxattr
:
12424 void *p
, *n
, *v
= 0;
12426 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12428 return -TARGET_EFAULT
;
12431 p
= lock_user_string(arg1
);
12432 n
= lock_user_string(arg2
);
12434 if (num
== TARGET_NR_setxattr
) {
12435 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12437 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12440 ret
= -TARGET_EFAULT
;
12442 unlock_user(p
, arg1
, 0);
12443 unlock_user(n
, arg2
, 0);
12444 unlock_user(v
, arg3
, 0);
12447 case TARGET_NR_fsetxattr
:
12451 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12453 return -TARGET_EFAULT
;
12456 n
= lock_user_string(arg2
);
12458 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12460 ret
= -TARGET_EFAULT
;
12462 unlock_user(n
, arg2
, 0);
12463 unlock_user(v
, arg3
, 0);
12466 case TARGET_NR_getxattr
:
12467 case TARGET_NR_lgetxattr
:
12469 void *p
, *n
, *v
= 0;
12471 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12473 return -TARGET_EFAULT
;
12476 p
= lock_user_string(arg1
);
12477 n
= lock_user_string(arg2
);
12479 if (num
== TARGET_NR_getxattr
) {
12480 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12482 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12485 ret
= -TARGET_EFAULT
;
12487 unlock_user(p
, arg1
, 0);
12488 unlock_user(n
, arg2
, 0);
12489 unlock_user(v
, arg3
, arg4
);
12492 case TARGET_NR_fgetxattr
:
12496 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12498 return -TARGET_EFAULT
;
12501 n
= lock_user_string(arg2
);
12503 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12505 ret
= -TARGET_EFAULT
;
12507 unlock_user(n
, arg2
, 0);
12508 unlock_user(v
, arg3
, arg4
);
12511 case TARGET_NR_removexattr
:
12512 case TARGET_NR_lremovexattr
:
12515 p
= lock_user_string(arg1
);
12516 n
= lock_user_string(arg2
);
12518 if (num
== TARGET_NR_removexattr
) {
12519 ret
= get_errno(removexattr(p
, n
));
12521 ret
= get_errno(lremovexattr(p
, n
));
12524 ret
= -TARGET_EFAULT
;
12526 unlock_user(p
, arg1
, 0);
12527 unlock_user(n
, arg2
, 0);
12530 case TARGET_NR_fremovexattr
:
12533 n
= lock_user_string(arg2
);
12535 ret
= get_errno(fremovexattr(arg1
, n
));
12537 ret
= -TARGET_EFAULT
;
12539 unlock_user(n
, arg2
, 0);
12543 #endif /* CONFIG_ATTR */
12544 #ifdef TARGET_NR_set_thread_area
12545 case TARGET_NR_set_thread_area
:
12546 #if defined(TARGET_MIPS)
12547 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12549 #elif defined(TARGET_CRIS)
12551 ret
= -TARGET_EINVAL
;
12553 cpu_env
->pregs
[PR_PID
] = arg1
;
12557 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12558 return do_set_thread_area(cpu_env
, arg1
);
12559 #elif defined(TARGET_M68K)
12561 TaskState
*ts
= cpu
->opaque
;
12562 ts
->tp_value
= arg1
;
12566 return -TARGET_ENOSYS
;
12569 #ifdef TARGET_NR_get_thread_area
12570 case TARGET_NR_get_thread_area
:
12571 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12572 return do_get_thread_area(cpu_env
, arg1
);
12573 #elif defined(TARGET_M68K)
12575 TaskState
*ts
= cpu
->opaque
;
12576 return ts
->tp_value
;
12579 return -TARGET_ENOSYS
;
12582 #ifdef TARGET_NR_getdomainname
12583 case TARGET_NR_getdomainname
:
12584 return -TARGET_ENOSYS
;
12587 #ifdef TARGET_NR_clock_settime
12588 case TARGET_NR_clock_settime
:
12590 struct timespec ts
;
12592 ret
= target_to_host_timespec(&ts
, arg2
);
12593 if (!is_error(ret
)) {
12594 ret
= get_errno(clock_settime(arg1
, &ts
));
12599 #ifdef TARGET_NR_clock_settime64
12600 case TARGET_NR_clock_settime64
:
12602 struct timespec ts
;
12604 ret
= target_to_host_timespec64(&ts
, arg2
);
12605 if (!is_error(ret
)) {
12606 ret
= get_errno(clock_settime(arg1
, &ts
));
12611 #ifdef TARGET_NR_clock_gettime
12612 case TARGET_NR_clock_gettime
:
12614 struct timespec ts
;
12615 ret
= get_errno(clock_gettime(arg1
, &ts
));
12616 if (!is_error(ret
)) {
12617 ret
= host_to_target_timespec(arg2
, &ts
);
12622 #ifdef TARGET_NR_clock_gettime64
12623 case TARGET_NR_clock_gettime64
:
12625 struct timespec ts
;
12626 ret
= get_errno(clock_gettime(arg1
, &ts
));
12627 if (!is_error(ret
)) {
12628 ret
= host_to_target_timespec64(arg2
, &ts
);
12633 #ifdef TARGET_NR_clock_getres
12634 case TARGET_NR_clock_getres
:
12636 struct timespec ts
;
12637 ret
= get_errno(clock_getres(arg1
, &ts
));
12638 if (!is_error(ret
)) {
12639 host_to_target_timespec(arg2
, &ts
);
12644 #ifdef TARGET_NR_clock_getres_time64
12645 case TARGET_NR_clock_getres_time64
:
12647 struct timespec ts
;
12648 ret
= get_errno(clock_getres(arg1
, &ts
));
12649 if (!is_error(ret
)) {
12650 host_to_target_timespec64(arg2
, &ts
);
12655 #ifdef TARGET_NR_clock_nanosleep
12656 case TARGET_NR_clock_nanosleep
:
12658 struct timespec ts
;
12659 if (target_to_host_timespec(&ts
, arg3
)) {
12660 return -TARGET_EFAULT
;
12662 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12663 &ts
, arg4
? &ts
: NULL
));
12665 * if the call is interrupted by a signal handler, it fails
12666 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12667 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12669 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12670 host_to_target_timespec(arg4
, &ts
)) {
12671 return -TARGET_EFAULT
;
12677 #ifdef TARGET_NR_clock_nanosleep_time64
12678 case TARGET_NR_clock_nanosleep_time64
:
12680 struct timespec ts
;
12682 if (target_to_host_timespec64(&ts
, arg3
)) {
12683 return -TARGET_EFAULT
;
12686 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12687 &ts
, arg4
? &ts
: NULL
));
12689 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12690 host_to_target_timespec64(arg4
, &ts
)) {
12691 return -TARGET_EFAULT
;
12697 #if defined(TARGET_NR_set_tid_address)
12698 case TARGET_NR_set_tid_address
:
12700 TaskState
*ts
= cpu
->opaque
;
12701 ts
->child_tidptr
= arg1
;
12702 /* do not call host set_tid_address() syscall, instead return tid() */
12703 return get_errno(sys_gettid());
12707 case TARGET_NR_tkill
:
12708 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12710 case TARGET_NR_tgkill
:
12711 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12712 target_to_host_signal(arg3
)));
12714 #ifdef TARGET_NR_set_robust_list
12715 case TARGET_NR_set_robust_list
:
12716 case TARGET_NR_get_robust_list
:
12717 /* The ABI for supporting robust futexes has userspace pass
12718 * the kernel a pointer to a linked list which is updated by
12719 * userspace after the syscall; the list is walked by the kernel
12720 * when the thread exits. Since the linked list in QEMU guest
12721 * memory isn't a valid linked list for the host and we have
12722 * no way to reliably intercept the thread-death event, we can't
12723 * support these. Silently return ENOSYS so that guest userspace
12724 * falls back to a non-robust futex implementation (which should
12725 * be OK except in the corner case of the guest crashing while
12726 * holding a mutex that is shared with another process via
12729 return -TARGET_ENOSYS
;
12732 #if defined(TARGET_NR_utimensat)
12733 case TARGET_NR_utimensat
:
12735 struct timespec
*tsp
, ts
[2];
12739 if (target_to_host_timespec(ts
, arg3
)) {
12740 return -TARGET_EFAULT
;
12742 if (target_to_host_timespec(ts
+ 1, arg3
+
12743 sizeof(struct target_timespec
))) {
12744 return -TARGET_EFAULT
;
12749 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12751 if (!(p
= lock_user_string(arg2
))) {
12752 return -TARGET_EFAULT
;
12754 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12755 unlock_user(p
, arg2
, 0);
12760 #ifdef TARGET_NR_utimensat_time64
12761 case TARGET_NR_utimensat_time64
:
12763 struct timespec
*tsp
, ts
[2];
12767 if (target_to_host_timespec64(ts
, arg3
)) {
12768 return -TARGET_EFAULT
;
12770 if (target_to_host_timespec64(ts
+ 1, arg3
+
12771 sizeof(struct target__kernel_timespec
))) {
12772 return -TARGET_EFAULT
;
12777 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12779 p
= lock_user_string(arg2
);
12781 return -TARGET_EFAULT
;
12783 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12784 unlock_user(p
, arg2
, 0);
12789 #ifdef TARGET_NR_futex
12790 case TARGET_NR_futex
:
12791 return do_futex(cpu
, false, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12793 #ifdef TARGET_NR_futex_time64
12794 case TARGET_NR_futex_time64
:
12795 return do_futex(cpu
, true, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12797 #ifdef CONFIG_INOTIFY
12798 #if defined(TARGET_NR_inotify_init)
12799 case TARGET_NR_inotify_init
:
12800 ret
= get_errno(inotify_init());
12802 fd_trans_register(ret
, &target_inotify_trans
);
12806 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12807 case TARGET_NR_inotify_init1
:
12808 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12809 fcntl_flags_tbl
)));
12811 fd_trans_register(ret
, &target_inotify_trans
);
12815 #if defined(TARGET_NR_inotify_add_watch)
12816 case TARGET_NR_inotify_add_watch
:
12817 p
= lock_user_string(arg2
);
12818 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12819 unlock_user(p
, arg2
, 0);
12822 #if defined(TARGET_NR_inotify_rm_watch)
12823 case TARGET_NR_inotify_rm_watch
:
12824 return get_errno(inotify_rm_watch(arg1
, arg2
));
12828 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12829 case TARGET_NR_mq_open
:
12831 struct mq_attr posix_mq_attr
;
12832 struct mq_attr
*pposix_mq_attr
;
12835 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12836 pposix_mq_attr
= NULL
;
12838 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12839 return -TARGET_EFAULT
;
12841 pposix_mq_attr
= &posix_mq_attr
;
12843 p
= lock_user_string(arg1
- 1);
12845 return -TARGET_EFAULT
;
12847 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12848 unlock_user (p
, arg1
, 0);
12852 case TARGET_NR_mq_unlink
:
12853 p
= lock_user_string(arg1
- 1);
12855 return -TARGET_EFAULT
;
12857 ret
= get_errno(mq_unlink(p
));
12858 unlock_user (p
, arg1
, 0);
12861 #ifdef TARGET_NR_mq_timedsend
12862 case TARGET_NR_mq_timedsend
:
12864 struct timespec ts
;
12866 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12868 if (target_to_host_timespec(&ts
, arg5
)) {
12869 return -TARGET_EFAULT
;
12871 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12872 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12873 return -TARGET_EFAULT
;
12876 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12878 unlock_user (p
, arg2
, arg3
);
12882 #ifdef TARGET_NR_mq_timedsend_time64
12883 case TARGET_NR_mq_timedsend_time64
:
12885 struct timespec ts
;
12887 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12889 if (target_to_host_timespec64(&ts
, arg5
)) {
12890 return -TARGET_EFAULT
;
12892 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12893 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12894 return -TARGET_EFAULT
;
12897 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12899 unlock_user(p
, arg2
, arg3
);
12904 #ifdef TARGET_NR_mq_timedreceive
12905 case TARGET_NR_mq_timedreceive
:
12907 struct timespec ts
;
12910 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12912 if (target_to_host_timespec(&ts
, arg5
)) {
12913 return -TARGET_EFAULT
;
12915 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12917 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12918 return -TARGET_EFAULT
;
12921 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12924 unlock_user (p
, arg2
, arg3
);
12926 put_user_u32(prio
, arg4
);
12930 #ifdef TARGET_NR_mq_timedreceive_time64
12931 case TARGET_NR_mq_timedreceive_time64
:
12933 struct timespec ts
;
12936 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12938 if (target_to_host_timespec64(&ts
, arg5
)) {
12939 return -TARGET_EFAULT
;
12941 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12943 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12944 return -TARGET_EFAULT
;
12947 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12950 unlock_user(p
, arg2
, arg3
);
12952 put_user_u32(prio
, arg4
);
12958 /* Not implemented for now... */
12959 /* case TARGET_NR_mq_notify: */
12962 case TARGET_NR_mq_getsetattr
:
12964 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12967 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12968 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12969 &posix_mq_attr_out
));
12970 } else if (arg3
!= 0) {
12971 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12973 if (ret
== 0 && arg3
!= 0) {
12974 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12980 #ifdef CONFIG_SPLICE
12981 #ifdef TARGET_NR_tee
12982 case TARGET_NR_tee
:
12984 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12988 #ifdef TARGET_NR_splice
12989 case TARGET_NR_splice
:
12991 loff_t loff_in
, loff_out
;
12992 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12994 if (get_user_u64(loff_in
, arg2
)) {
12995 return -TARGET_EFAULT
;
12997 ploff_in
= &loff_in
;
13000 if (get_user_u64(loff_out
, arg4
)) {
13001 return -TARGET_EFAULT
;
13003 ploff_out
= &loff_out
;
13005 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
13007 if (put_user_u64(loff_in
, arg2
)) {
13008 return -TARGET_EFAULT
;
13012 if (put_user_u64(loff_out
, arg4
)) {
13013 return -TARGET_EFAULT
;
13019 #ifdef TARGET_NR_vmsplice
13020 case TARGET_NR_vmsplice
:
13022 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
13024 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
13025 unlock_iovec(vec
, arg2
, arg3
, 0);
13027 ret
= -host_to_target_errno(errno
);
13032 #endif /* CONFIG_SPLICE */
13033 #ifdef CONFIG_EVENTFD
13034 #if defined(TARGET_NR_eventfd)
13035 case TARGET_NR_eventfd
:
13036 ret
= get_errno(eventfd(arg1
, 0));
13038 fd_trans_register(ret
, &target_eventfd_trans
);
13042 #if defined(TARGET_NR_eventfd2)
13043 case TARGET_NR_eventfd2
:
13045 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
13046 if (arg2
& TARGET_O_NONBLOCK
) {
13047 host_flags
|= O_NONBLOCK
;
13049 if (arg2
& TARGET_O_CLOEXEC
) {
13050 host_flags
|= O_CLOEXEC
;
13052 ret
= get_errno(eventfd(arg1
, host_flags
));
13054 fd_trans_register(ret
, &target_eventfd_trans
);
13059 #endif /* CONFIG_EVENTFD */
13060 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13061 case TARGET_NR_fallocate
:
13062 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13063 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
13064 target_offset64(arg5
, arg6
)));
13066 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
13070 #if defined(CONFIG_SYNC_FILE_RANGE)
13071 #if defined(TARGET_NR_sync_file_range)
13072 case TARGET_NR_sync_file_range
:
13073 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13074 #if defined(TARGET_MIPS)
13075 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13076 target_offset64(arg5
, arg6
), arg7
));
13078 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
13079 target_offset64(arg4
, arg5
), arg6
));
13080 #endif /* !TARGET_MIPS */
13082 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
13086 #if defined(TARGET_NR_sync_file_range2) || \
13087 defined(TARGET_NR_arm_sync_file_range)
13088 #if defined(TARGET_NR_sync_file_range2)
13089 case TARGET_NR_sync_file_range2
:
13091 #if defined(TARGET_NR_arm_sync_file_range)
13092 case TARGET_NR_arm_sync_file_range
:
13094 /* This is like sync_file_range but the arguments are reordered */
13095 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13096 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13097 target_offset64(arg5
, arg6
), arg2
));
13099 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
13104 #if defined(TARGET_NR_signalfd4)
13105 case TARGET_NR_signalfd4
:
13106 return do_signalfd4(arg1
, arg2
, arg4
);
13108 #if defined(TARGET_NR_signalfd)
13109 case TARGET_NR_signalfd
:
13110 return do_signalfd4(arg1
, arg2
, 0);
13112 #if defined(CONFIG_EPOLL)
13113 #if defined(TARGET_NR_epoll_create)
13114 case TARGET_NR_epoll_create
:
13115 return get_errno(epoll_create(arg1
));
13117 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13118 case TARGET_NR_epoll_create1
:
13119 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
13121 #if defined(TARGET_NR_epoll_ctl)
13122 case TARGET_NR_epoll_ctl
:
13124 struct epoll_event ep
;
13125 struct epoll_event
*epp
= 0;
13127 if (arg2
!= EPOLL_CTL_DEL
) {
13128 struct target_epoll_event
*target_ep
;
13129 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
13130 return -TARGET_EFAULT
;
13132 ep
.events
= tswap32(target_ep
->events
);
13134 * The epoll_data_t union is just opaque data to the kernel,
13135 * so we transfer all 64 bits across and need not worry what
13136 * actual data type it is.
13138 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
13139 unlock_user_struct(target_ep
, arg4
, 0);
13142 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13143 * non-null pointer, even though this argument is ignored.
13148 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
13152 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13153 #if defined(TARGET_NR_epoll_wait)
13154 case TARGET_NR_epoll_wait
:
13156 #if defined(TARGET_NR_epoll_pwait)
13157 case TARGET_NR_epoll_pwait
:
13160 struct target_epoll_event
*target_ep
;
13161 struct epoll_event
*ep
;
13163 int maxevents
= arg3
;
13164 int timeout
= arg4
;
13166 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
13167 return -TARGET_EINVAL
;
13170 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
13171 maxevents
* sizeof(struct target_epoll_event
), 1);
13173 return -TARGET_EFAULT
;
13176 ep
= g_try_new(struct epoll_event
, maxevents
);
13178 unlock_user(target_ep
, arg2
, 0);
13179 return -TARGET_ENOMEM
;
13183 #if defined(TARGET_NR_epoll_pwait)
13184 case TARGET_NR_epoll_pwait
:
13186 sigset_t
*set
= NULL
;
13189 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
13195 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13196 set
, SIGSET_T_SIZE
));
13199 finish_sigsuspend_mask(ret
);
13204 #if defined(TARGET_NR_epoll_wait)
13205 case TARGET_NR_epoll_wait
:
13206 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13211 ret
= -TARGET_ENOSYS
;
13213 if (!is_error(ret
)) {
13215 for (i
= 0; i
< ret
; i
++) {
13216 target_ep
[i
].events
= tswap32(ep
[i
].events
);
13217 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
13219 unlock_user(target_ep
, arg2
,
13220 ret
* sizeof(struct target_epoll_event
));
13222 unlock_user(target_ep
, arg2
, 0);
13229 #ifdef TARGET_NR_prlimit64
13230 case TARGET_NR_prlimit64
:
13232 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13233 struct target_rlimit64
*target_rnew
, *target_rold
;
13234 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
13235 int resource
= target_to_host_resource(arg2
);
13237 if (arg3
&& (resource
!= RLIMIT_AS
&&
13238 resource
!= RLIMIT_DATA
&&
13239 resource
!= RLIMIT_STACK
)) {
13240 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
13241 return -TARGET_EFAULT
;
13243 __get_user(rnew
.rlim_cur
, &target_rnew
->rlim_cur
);
13244 __get_user(rnew
.rlim_max
, &target_rnew
->rlim_max
);
13245 unlock_user_struct(target_rnew
, arg3
, 0);
13249 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
13250 if (!is_error(ret
) && arg4
) {
13251 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
13252 return -TARGET_EFAULT
;
13254 __put_user(rold
.rlim_cur
, &target_rold
->rlim_cur
);
13255 __put_user(rold
.rlim_max
, &target_rold
->rlim_max
);
13256 unlock_user_struct(target_rold
, arg4
, 1);
13261 #ifdef TARGET_NR_gethostname
13262 case TARGET_NR_gethostname
:
13264 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
13266 ret
= get_errno(gethostname(name
, arg2
));
13267 unlock_user(name
, arg1
, arg2
);
13269 ret
= -TARGET_EFAULT
;
13274 #ifdef TARGET_NR_atomic_cmpxchg_32
13275 case TARGET_NR_atomic_cmpxchg_32
:
13277 /* should use start_exclusive from main.c */
13278 abi_ulong mem_value
;
13279 if (get_user_u32(mem_value
, arg6
)) {
13280 target_siginfo_t info
;
13281 info
.si_signo
= SIGSEGV
;
13283 info
.si_code
= TARGET_SEGV_MAPERR
;
13284 info
._sifields
._sigfault
._addr
= arg6
;
13285 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
13289 if (mem_value
== arg2
)
13290 put_user_u32(arg1
, arg6
);
13294 #ifdef TARGET_NR_atomic_barrier
13295 case TARGET_NR_atomic_barrier
:
13296 /* Like the kernel implementation and the
13297 qemu arm barrier, no-op this? */
13301 #ifdef TARGET_NR_timer_create
13302 case TARGET_NR_timer_create
:
13304 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13306 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
13309 int timer_index
= next_free_host_timer();
13311 if (timer_index
< 0) {
13312 ret
= -TARGET_EAGAIN
;
13314 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
13317 phost_sevp
= &host_sevp
;
13318 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
13320 free_host_timer_slot(timer_index
);
13325 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
13327 free_host_timer_slot(timer_index
);
13329 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
13330 timer_delete(*phtimer
);
13331 free_host_timer_slot(timer_index
);
13332 return -TARGET_EFAULT
;
13340 #ifdef TARGET_NR_timer_settime
13341 case TARGET_NR_timer_settime
:
13343 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13344 * struct itimerspec * old_value */
13345 target_timer_t timerid
= get_timer_id(arg1
);
13349 } else if (arg3
== 0) {
13350 ret
= -TARGET_EINVAL
;
13352 timer_t htimer
= g_posix_timers
[timerid
];
13353 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13355 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
13356 return -TARGET_EFAULT
;
13359 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13360 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
13361 return -TARGET_EFAULT
;
13368 #ifdef TARGET_NR_timer_settime64
13369 case TARGET_NR_timer_settime64
:
13371 target_timer_t timerid
= get_timer_id(arg1
);
13375 } else if (arg3
== 0) {
13376 ret
= -TARGET_EINVAL
;
13378 timer_t htimer
= g_posix_timers
[timerid
];
13379 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13381 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
13382 return -TARGET_EFAULT
;
13385 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13386 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
13387 return -TARGET_EFAULT
;
13394 #ifdef TARGET_NR_timer_gettime
13395 case TARGET_NR_timer_gettime
:
13397 /* args: timer_t timerid, struct itimerspec *curr_value */
13398 target_timer_t timerid
= get_timer_id(arg1
);
13402 } else if (!arg2
) {
13403 ret
= -TARGET_EFAULT
;
13405 timer_t htimer
= g_posix_timers
[timerid
];
13406 struct itimerspec hspec
;
13407 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13409 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13410 ret
= -TARGET_EFAULT
;
13417 #ifdef TARGET_NR_timer_gettime64
13418 case TARGET_NR_timer_gettime64
:
13420 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13421 target_timer_t timerid
= get_timer_id(arg1
);
13425 } else if (!arg2
) {
13426 ret
= -TARGET_EFAULT
;
13428 timer_t htimer
= g_posix_timers
[timerid
];
13429 struct itimerspec hspec
;
13430 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13432 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13433 ret
= -TARGET_EFAULT
;
13440 #ifdef TARGET_NR_timer_getoverrun
13441 case TARGET_NR_timer_getoverrun
:
13443 /* args: timer_t timerid */
13444 target_timer_t timerid
= get_timer_id(arg1
);
13449 timer_t htimer
= g_posix_timers
[timerid
];
13450 ret
= get_errno(timer_getoverrun(htimer
));
13456 #ifdef TARGET_NR_timer_delete
13457 case TARGET_NR_timer_delete
:
13459 /* args: timer_t timerid */
13460 target_timer_t timerid
= get_timer_id(arg1
);
13465 timer_t htimer
= g_posix_timers
[timerid
];
13466 ret
= get_errno(timer_delete(htimer
));
13467 free_host_timer_slot(timerid
);
13473 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13474 case TARGET_NR_timerfd_create
:
13475 ret
= get_errno(timerfd_create(arg1
,
13476 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13478 fd_trans_register(ret
, &target_timerfd_trans
);
13483 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13484 case TARGET_NR_timerfd_gettime
:
13486 struct itimerspec its_curr
;
13488 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13490 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13491 return -TARGET_EFAULT
;
13497 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13498 case TARGET_NR_timerfd_gettime64
:
13500 struct itimerspec its_curr
;
13502 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13504 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13505 return -TARGET_EFAULT
;
13511 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13512 case TARGET_NR_timerfd_settime
:
13514 struct itimerspec its_new
, its_old
, *p_new
;
13517 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13518 return -TARGET_EFAULT
;
13525 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13527 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13528 return -TARGET_EFAULT
;
13534 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13535 case TARGET_NR_timerfd_settime64
:
13537 struct itimerspec its_new
, its_old
, *p_new
;
13540 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13541 return -TARGET_EFAULT
;
13548 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13550 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13551 return -TARGET_EFAULT
;
13557 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13558 case TARGET_NR_ioprio_get
:
13559 return get_errno(ioprio_get(arg1
, arg2
));
13562 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13563 case TARGET_NR_ioprio_set
:
13564 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13567 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13568 case TARGET_NR_setns
:
13569 return get_errno(setns(arg1
, arg2
));
13571 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13572 case TARGET_NR_unshare
:
13573 return get_errno(unshare(arg1
));
13575 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13576 case TARGET_NR_kcmp
:
13577 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13579 #ifdef TARGET_NR_swapcontext
13580 case TARGET_NR_swapcontext
:
13581 /* PowerPC specific. */
13582 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13584 #ifdef TARGET_NR_memfd_create
13585 case TARGET_NR_memfd_create
:
13586 p
= lock_user_string(arg1
);
13588 return -TARGET_EFAULT
;
13590 ret
= get_errno(memfd_create(p
, arg2
));
13591 fd_trans_unregister(ret
);
13592 unlock_user(p
, arg1
, 0);
13595 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13596 case TARGET_NR_membarrier
:
13597 return get_errno(membarrier(arg1
, arg2
));
13600 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13601 case TARGET_NR_copy_file_range
:
13603 loff_t inoff
, outoff
;
13604 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13607 if (get_user_u64(inoff
, arg2
)) {
13608 return -TARGET_EFAULT
;
13613 if (get_user_u64(outoff
, arg4
)) {
13614 return -TARGET_EFAULT
;
13618 /* Do not sign-extend the count parameter. */
13619 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13620 (abi_ulong
)arg5
, arg6
));
13621 if (!is_error(ret
) && ret
> 0) {
13623 if (put_user_u64(inoff
, arg2
)) {
13624 return -TARGET_EFAULT
;
13628 if (put_user_u64(outoff
, arg4
)) {
13629 return -TARGET_EFAULT
;
13637 #if defined(TARGET_NR_pivot_root)
13638 case TARGET_NR_pivot_root
:
13641 p
= lock_user_string(arg1
); /* new_root */
13642 p2
= lock_user_string(arg2
); /* put_old */
13644 ret
= -TARGET_EFAULT
;
13646 ret
= get_errno(pivot_root(p
, p2
));
13648 unlock_user(p2
, arg2
, 0);
13649 unlock_user(p
, arg1
, 0);
13655 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13656 return -TARGET_ENOSYS
;
13661 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13662 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13663 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13666 CPUState
*cpu
= env_cpu(cpu_env
);
13669 #ifdef DEBUG_ERESTARTSYS
13670 /* Debug-only code for exercising the syscall-restart code paths
13671 * in the per-architecture cpu main loops: restart every syscall
13672 * the guest makes once before letting it through.
13678 return -QEMU_ERESTARTSYS
;
13683 record_syscall_start(cpu
, num
, arg1
,
13684 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13686 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13687 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13690 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13691 arg5
, arg6
, arg7
, arg8
);
13693 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13694 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13695 arg3
, arg4
, arg5
, arg6
);
13698 record_syscall_return(cpu
, num
, ret
);