4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
33 #include <sys/mount.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
40 #include <linux/capability.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
48 #include <sys/times.h>
51 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
68 #include <sys/timerfd.h>
71 #include <sys/eventfd.h>
74 #include <sys/epoll.h>
77 #include "qemu/xattr.h"
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
82 #ifdef HAVE_SYS_KCOV_H
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
122 #include <linux/btrfs.h>
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
128 #include "linux_loop.h"
132 #include "user-internals.h"
134 #include "signal-common.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
148 #define CLONE_IO 0x80000000 /* Clone io context */
151 /* We can't directly call the host clone syscall, because this will
152 * badly confuse libc (breaking mutexes, for example). So we must
153 * divide clone flags into:
154 * * flag combinations that look like pthread_create()
155 * * flag combinations that look like fork()
156 * * flags we can implement within QEMU itself
157 * * flags we can't support and will return an error for
159 /* For thread creation, all these flags must be present; for
160 * fork, none must be present.
162 #define CLONE_THREAD_FLAGS \
163 (CLONE_VM | CLONE_FS | CLONE_FILES | \
164 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
166 /* These flags are ignored:
167 * CLONE_DETACHED is now ignored by the kernel;
168 * CLONE_IO is just an optimisation hint to the I/O scheduler
170 #define CLONE_IGNORED_FLAGS \
171 (CLONE_DETACHED | CLONE_IO)
174 # define CLONE_PIDFD 0x00001000
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS \
179 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS \
184 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
185 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
187 #define CLONE_INVALID_FORK_FLAGS \
188 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
190 #define CLONE_INVALID_THREAD_FLAGS \
191 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
192 CLONE_IGNORED_FLAGS))
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195 * have almost all been allocated. We cannot support any of
196 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198 * The checks against the invalid thread masks above will catch these.
199 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203 * once. This exercises the codepaths for restart.
205 //#define DEBUG_ERESTARTSYS
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
221 #define _syscall0(type,name) \
222 static type name (void) \
224 return syscall(__NR_##name); \
227 #define _syscall1(type,name,type1,arg1) \
228 static type name (type1 arg1) \
230 return syscall(__NR_##name, arg1); \
233 #define _syscall2(type,name,type1,arg1,type2,arg2) \
234 static type name (type1 arg1,type2 arg2) \
236 return syscall(__NR_##name, arg1, arg2); \
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
240 static type name (type1 arg1,type2 arg2,type3 arg3) \
242 return syscall(__NR_##name, arg1, arg2, arg3); \
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
255 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
260 type5,arg5,type6,arg6) \
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
264 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
282 #define __NR_sys_statx __NR_statx
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid
)
301 /* For the 64-bit guest on 32-bit host case we must emulate
302 * getdents using getdents64, because otherwise the host
303 * might hand us back more dirent records than we can fit
304 * into the guest buffer after structure format conversion.
305 * Otherwise we emulate getdents with getdents if the host has it.
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents
, unsigned int, fd
, struct linux_dirent
*, dirp
, unsigned int, count
);
314 #if (defined(TARGET_NR_getdents) && \
315 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64
, unsigned int, fd
, struct linux_dirent64
*, dirp
, unsigned int, count
);
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek
, unsigned int, fd
, unsigned long, hi
, unsigned long, lo
,
321 loff_t
*, res
, unsigned int, wh
);
323 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
324 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
326 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group
,int,error_code
)
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range
,int,first
,int,last
,int,flags
)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC (1U << 2)
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
339 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
343 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open
, pid_t
, pid
, unsigned int, flags
);
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal
, int, pidfd
, int, sig
, siginfo_t
*, info
,
350 unsigned int, flags
);
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd
, int, pidfd
, int, targetfd
, unsigned int, flags
);
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
357 unsigned long *, user_mask_ptr
);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
360 unsigned long *, user_mask_ptr
);
361 /* sched_attr is not defined in glibc */
364 uint32_t sched_policy
;
365 uint64_t sched_flags
;
367 uint32_t sched_priority
;
368 uint64_t sched_runtime
;
369 uint64_t sched_deadline
;
370 uint64_t sched_period
;
371 uint32_t sched_util_min
;
372 uint32_t sched_util_max
;
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
376 unsigned int, size
, unsigned int, flags
);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
379 unsigned int, flags
);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
384 const struct sched_param
*, param
);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
387 struct sched_param
*, param
);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
390 const struct sched_param
*, param
);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
393 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
395 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
396 struct __user_cap_data_struct
*, data
);
397 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
398 struct __user_cap_data_struct
*, data
);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get
, int, which
, int, who
)
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
411 unsigned long, idx1
, unsigned long, idx2
)
415 * It is assumed that struct statx is architecture independent.
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
419 unsigned int, mask
, struct target_statx
*, statxbuf
)
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier
, int, cmd
, int, flags
)
425 static const bitmask_transtbl fcntl_flags_tbl
[] = {
426 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
427 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
428 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
429 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
430 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
431 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
432 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
433 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
434 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
435 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
436 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
437 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
438 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
439 #if defined(O_DIRECT)
440 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
442 #if defined(O_NOATIME)
443 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
445 #if defined(O_CLOEXEC)
446 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
449 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
451 #if defined(O_TMPFILE)
452 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
454 /* Don't terminate the list prematurely on 64-bit host+guest. */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
461 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
467 const struct timespec
*,tsp
,int,flags
)
469 static int sys_utimensat(int dirfd
, const char *pathname
,
470 const struct timespec times
[2], int flags
)
476 #endif /* TARGET_NR_utimensat */
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
482 const char *, new, unsigned int, flags
)
484 static int sys_renameat2(int oldfd
, const char *old
,
485 int newfd
, const char *new, int flags
)
488 return renameat(oldfd
, old
, newfd
, new);
494 #endif /* TARGET_NR_renameat2 */
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64
{
516 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
517 const struct host_rlimit64
*, new_limit
,
518 struct host_rlimit64
*, old_limit
)
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers
[GUEST_TIMER_MAX
];
526 static int g_posix_timer_allocated
[GUEST_TIMER_MAX
];
528 static inline int next_free_host_timer(void)
531 for (k
= 0; k
< ARRAY_SIZE(g_posix_timer_allocated
); k
++) {
532 if (qatomic_xchg(g_posix_timer_allocated
+ k
, 1) == 0) {
539 static inline void free_host_timer_slot(int id
)
541 qatomic_store_release(g_posix_timer_allocated
+ id
, 0);
545 static inline int host_to_target_errno(int host_errno
)
547 switch (host_errno
) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
556 static inline int target_to_host_errno(int target_errno
)
558 switch (target_errno
) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
567 abi_long
get_errno(abi_long ret
)
570 return -host_to_target_errno(errno
);
575 const char *target_strerror(int err
)
577 if (err
== QEMU_ERESTARTSYS
) {
578 return "To be restarted";
580 if (err
== QEMU_ESIGRETURN
) {
581 return "Successful exit from sigreturn";
584 return strerror(target_to_host_errno(err
));
587 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
591 if (usize
<= ksize
) {
594 for (i
= ksize
; i
< usize
; i
++) {
595 if (get_user_u8(b
, addr
+ i
)) {
596 return -TARGET_EFAULT
;
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
608 return safe_syscall(__NR_##name); \
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
614 return safe_syscall(__NR_##name, arg1); \
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
620 return safe_syscall(__NR_##name, arg1, arg2); \
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
626 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
633 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637 type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645 type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647 type5 arg5, type6 arg6) \
649 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
652 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
653 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
654 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
655 int, flags
, mode_t
, mode
)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
658 struct rusage
*, rusage
)
660 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
661 int, options
, struct rusage
*, rusage
)
662 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
663 safe_syscall5(int, execveat
, int, dirfd
, const char *, filename
,
664 char **, argv
, char **, envp
, int, flags
)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
668 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
672 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
675 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
676 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
680 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
684 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
686 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
687 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
688 safe_syscall2(int, tkill
, int, tid
, int, sig
)
689 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
690 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
691 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
692 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
693 unsigned long, pos_l
, unsigned long, pos_h
)
694 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
695 unsigned long, pos_l
, unsigned long, pos_h
)
696 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
698 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
699 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
700 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
701 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
702 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
703 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
704 safe_syscall2(int, flock
, int, fd
, int, operation
)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
707 const struct timespec
*, uts
, size_t, sigsetsize
)
709 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
713 struct timespec
*, rem
)
715 #if defined(TARGET_NR_clock_nanosleep) || \
716 defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
718 const struct timespec
*, req
, struct timespec
*, rem
)
722 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
725 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
726 void *, ptr
, long, fifth
)
730 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
734 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
735 long, msgtype
, int, flags
)
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
739 unsigned, nsops
, const struct timespec
*, timeout
)
741 #if defined(TARGET_NR_mq_timedsend) || \
742 defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
744 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
746 #if defined(TARGET_NR_mq_timedreceive) || \
747 defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
749 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
753 int, outfd
, loff_t
*, poutoff
, size_t, length
,
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758 * "third argument might be integer or pointer or not present" behaviour of
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764 * use the flock64 struct rather than unsuffixed flock
765 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
773 static inline int host_to_target_sock_type(int host_type
)
777 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
779 target_type
= TARGET_SOCK_DGRAM
;
782 target_type
= TARGET_SOCK_STREAM
;
785 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
789 #if defined(SOCK_CLOEXEC)
790 if (host_type
& SOCK_CLOEXEC
) {
791 target_type
|= TARGET_SOCK_CLOEXEC
;
795 #if defined(SOCK_NONBLOCK)
796 if (host_type
& SOCK_NONBLOCK
) {
797 target_type
|= TARGET_SOCK_NONBLOCK
;
804 static abi_ulong target_brk
;
805 static abi_ulong brk_page
;
807 void target_set_brk(abi_ulong new_brk
)
809 target_brk
= TARGET_PAGE_ALIGN(new_brk
);
810 brk_page
= HOST_PAGE_ALIGN(target_brk
);
813 /* do_brk() must return target values and target errnos. */
814 abi_long
do_brk(abi_ulong brk_val
)
816 abi_long mapped_addr
;
817 abi_ulong new_alloc_size
;
818 abi_ulong new_brk
, new_host_brk_page
;
820 /* brk pointers are always untagged */
822 /* return old brk value if brk_val unchanged or zero */
823 if (!brk_val
|| brk_val
== target_brk
) {
827 new_brk
= TARGET_PAGE_ALIGN(brk_val
);
828 new_host_brk_page
= HOST_PAGE_ALIGN(brk_val
);
830 /* brk_val and old target_brk might be on the same page */
831 if (new_brk
== TARGET_PAGE_ALIGN(target_brk
)) {
832 if (brk_val
> target_brk
) {
833 /* empty remaining bytes in (possibly larger) host page */
834 memset(g2h_untagged(target_brk
), 0, new_host_brk_page
- target_brk
);
836 target_brk
= brk_val
;
840 /* Release heap if necesary */
841 if (new_brk
< target_brk
) {
842 /* empty remaining bytes in (possibly larger) host page */
843 memset(g2h_untagged(brk_val
), 0, new_host_brk_page
- brk_val
);
845 /* free unused host pages and set new brk_page */
846 target_munmap(new_host_brk_page
, brk_page
- new_host_brk_page
);
847 brk_page
= new_host_brk_page
;
849 target_brk
= brk_val
;
853 /* We need to allocate more memory after the brk... Note that
854 * we don't use MAP_FIXED because that will map over the top of
855 * any existing mapping (like the one with the host libc or qemu
856 * itself); instead we treat "mapped but at wrong address" as
857 * a failure and unmap again.
859 new_alloc_size
= new_host_brk_page
- brk_page
;
860 if (new_alloc_size
) {
861 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
862 PROT_READ
|PROT_WRITE
,
863 MAP_ANON
|MAP_PRIVATE
, 0, 0));
865 mapped_addr
= brk_page
;
868 if (mapped_addr
== brk_page
) {
869 /* Heap contents are initialized to zero, as for anonymous
870 * mapped pages. Technically the new pages are already
871 * initialized to zero since they *are* anonymous mapped
872 * pages, however we have to take care with the contents that
873 * come from the remaining part of the previous page: it may
874 * contains garbage data due to a previous heap usage (grown
876 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
878 target_brk
= brk_val
;
879 brk_page
= new_host_brk_page
;
881 } else if (mapped_addr
!= -1) {
882 /* Mapped but at wrong address, meaning there wasn't actually
883 * enough space for this brk.
885 target_munmap(mapped_addr
, new_alloc_size
);
889 #if defined(TARGET_ALPHA)
890 /* We (partially) emulate OSF/1 on Alpha, which requires we
891 return a proper errno, not an unchanged brk value. */
892 return -TARGET_ENOMEM
;
894 /* For everything else, return the previous break. */
898 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
899 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
900 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
901 abi_ulong target_fds_addr
,
905 abi_ulong b
, *target_fds
;
907 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
908 if (!(target_fds
= lock_user(VERIFY_READ
,
910 sizeof(abi_ulong
) * nw
,
912 return -TARGET_EFAULT
;
916 for (i
= 0; i
< nw
; i
++) {
917 /* grab the abi_ulong */
918 __get_user(b
, &target_fds
[i
]);
919 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
920 /* check the bit inside the abi_ulong */
927 unlock_user(target_fds
, target_fds_addr
, 0);
932 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
933 abi_ulong target_fds_addr
,
936 if (target_fds_addr
) {
937 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
938 return -TARGET_EFAULT
;
946 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
952 abi_ulong
*target_fds
;
954 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
955 if (!(target_fds
= lock_user(VERIFY_WRITE
,
957 sizeof(abi_ulong
) * nw
,
959 return -TARGET_EFAULT
;
962 for (i
= 0; i
< nw
; i
++) {
964 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
965 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
968 __put_user(v
, &target_fds
[i
]);
971 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
977 #if defined(__alpha__)
983 static inline abi_long
host_to_target_clock_t(long ticks
)
985 #if HOST_HZ == TARGET_HZ
988 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
992 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
993 const struct rusage
*rusage
)
995 struct target_rusage
*target_rusage
;
997 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
998 return -TARGET_EFAULT
;
999 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1000 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1001 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1002 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1003 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1004 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1005 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1006 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1007 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1008 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1009 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1010 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1011 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1012 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1013 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1014 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1015 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1016 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1017 unlock_user_struct(target_rusage
, target_addr
, 1);
1022 #ifdef TARGET_NR_setrlimit
1023 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1025 abi_ulong target_rlim_swap
;
1028 target_rlim_swap
= tswapal(target_rlim
);
1029 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1030 return RLIM_INFINITY
;
1032 result
= target_rlim_swap
;
1033 if (target_rlim_swap
!= (rlim_t
)result
)
1034 return RLIM_INFINITY
;
1040 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1041 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1043 abi_ulong target_rlim_swap
;
1046 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1047 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1049 target_rlim_swap
= rlim
;
1050 result
= tswapal(target_rlim_swap
);
1056 static inline int target_to_host_resource(int code
)
1059 case TARGET_RLIMIT_AS
:
1061 case TARGET_RLIMIT_CORE
:
1063 case TARGET_RLIMIT_CPU
:
1065 case TARGET_RLIMIT_DATA
:
1067 case TARGET_RLIMIT_FSIZE
:
1068 return RLIMIT_FSIZE
;
1069 case TARGET_RLIMIT_LOCKS
:
1070 return RLIMIT_LOCKS
;
1071 case TARGET_RLIMIT_MEMLOCK
:
1072 return RLIMIT_MEMLOCK
;
1073 case TARGET_RLIMIT_MSGQUEUE
:
1074 return RLIMIT_MSGQUEUE
;
1075 case TARGET_RLIMIT_NICE
:
1077 case TARGET_RLIMIT_NOFILE
:
1078 return RLIMIT_NOFILE
;
1079 case TARGET_RLIMIT_NPROC
:
1080 return RLIMIT_NPROC
;
1081 case TARGET_RLIMIT_RSS
:
1083 case TARGET_RLIMIT_RTPRIO
:
1084 return RLIMIT_RTPRIO
;
1085 #ifdef RLIMIT_RTTIME
1086 case TARGET_RLIMIT_RTTIME
:
1087 return RLIMIT_RTTIME
;
1089 case TARGET_RLIMIT_SIGPENDING
:
1090 return RLIMIT_SIGPENDING
;
1091 case TARGET_RLIMIT_STACK
:
1092 return RLIMIT_STACK
;
1098 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1099 abi_ulong target_tv_addr
)
1101 struct target_timeval
*target_tv
;
1103 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1104 return -TARGET_EFAULT
;
1107 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1108 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1110 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1115 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1116 const struct timeval
*tv
)
1118 struct target_timeval
*target_tv
;
1120 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1121 return -TARGET_EFAULT
;
1124 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1125 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1127 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1132 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1133 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1134 abi_ulong target_tv_addr
)
1136 struct target__kernel_sock_timeval
*target_tv
;
1138 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1139 return -TARGET_EFAULT
;
1142 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1143 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1145 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1151 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1152 const struct timeval
*tv
)
1154 struct target__kernel_sock_timeval
*target_tv
;
1156 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1157 return -TARGET_EFAULT
;
1160 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1161 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1163 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1168 #if defined(TARGET_NR_futex) || \
1169 defined(TARGET_NR_rt_sigtimedwait) || \
1170 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1171 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1172 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1173 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1174 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1175 defined(TARGET_NR_timer_settime) || \
1176 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1177 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1178 abi_ulong target_addr
)
1180 struct target_timespec
*target_ts
;
1182 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1183 return -TARGET_EFAULT
;
1185 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1186 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1187 unlock_user_struct(target_ts
, target_addr
, 0);
1192 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1193 defined(TARGET_NR_timer_settime64) || \
1194 defined(TARGET_NR_mq_timedsend_time64) || \
1195 defined(TARGET_NR_mq_timedreceive_time64) || \
1196 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1197 defined(TARGET_NR_clock_nanosleep_time64) || \
1198 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1199 defined(TARGET_NR_utimensat) || \
1200 defined(TARGET_NR_utimensat_time64) || \
1201 defined(TARGET_NR_semtimedop_time64) || \
1202 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1203 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1204 abi_ulong target_addr
)
1206 struct target__kernel_timespec
*target_ts
;
1208 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1209 return -TARGET_EFAULT
;
1211 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1212 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1213 /* in 32bit mode, this drops the padding */
1214 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1215 unlock_user_struct(target_ts
, target_addr
, 0);
1220 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1221 struct timespec
*host_ts
)
1223 struct target_timespec
*target_ts
;
1225 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1226 return -TARGET_EFAULT
;
1228 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1229 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1230 unlock_user_struct(target_ts
, target_addr
, 1);
1234 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1235 struct timespec
*host_ts
)
1237 struct target__kernel_timespec
*target_ts
;
1239 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1240 return -TARGET_EFAULT
;
1242 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1243 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1244 unlock_user_struct(target_ts
, target_addr
, 1);
1248 #if defined(TARGET_NR_gettimeofday)
1249 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1250 struct timezone
*tz
)
1252 struct target_timezone
*target_tz
;
1254 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1255 return -TARGET_EFAULT
;
1258 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1259 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1261 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1267 #if defined(TARGET_NR_settimeofday)
1268 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1269 abi_ulong target_tz_addr
)
1271 struct target_timezone
*target_tz
;
1273 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1274 return -TARGET_EFAULT
;
1277 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1278 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1280 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1286 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1289 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1290 abi_ulong target_mq_attr_addr
)
1292 struct target_mq_attr
*target_mq_attr
;
1294 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1295 target_mq_attr_addr
, 1))
1296 return -TARGET_EFAULT
;
1298 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1299 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1300 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1301 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1303 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1308 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1309 const struct mq_attr
*attr
)
1311 struct target_mq_attr
*target_mq_attr
;
1313 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1314 target_mq_attr_addr
, 0))
1315 return -TARGET_EFAULT
;
1317 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1318 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1319 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1320 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1322 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1328 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1329 /* do_select() must return target values and target errnos. */
1330 static abi_long
do_select(int n
,
1331 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1332 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1334 fd_set rfds
, wfds
, efds
;
1335 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1337 struct timespec ts
, *ts_ptr
;
1340 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1344 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1348 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1353 if (target_tv_addr
) {
1354 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1355 return -TARGET_EFAULT
;
1356 ts
.tv_sec
= tv
.tv_sec
;
1357 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1363 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1366 if (!is_error(ret
)) {
1367 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1368 return -TARGET_EFAULT
;
1369 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1370 return -TARGET_EFAULT
;
1371 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1372 return -TARGET_EFAULT
;
1374 if (target_tv_addr
) {
1375 tv
.tv_sec
= ts
.tv_sec
;
1376 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1377 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1378 return -TARGET_EFAULT
;
1386 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1387 static abi_long
do_old_select(abi_ulong arg1
)
1389 struct target_sel_arg_struct
*sel
;
1390 abi_ulong inp
, outp
, exp
, tvp
;
1393 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1394 return -TARGET_EFAULT
;
1397 nsel
= tswapal(sel
->n
);
1398 inp
= tswapal(sel
->inp
);
1399 outp
= tswapal(sel
->outp
);
1400 exp
= tswapal(sel
->exp
);
1401 tvp
= tswapal(sel
->tvp
);
1403 unlock_user_struct(sel
, arg1
, 0);
1405 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1410 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1411 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1412 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1415 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1416 fd_set rfds
, wfds
, efds
;
1417 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1418 struct timespec ts
, *ts_ptr
;
1422 * The 6th arg is actually two args smashed together,
1423 * so we cannot use the C library.
1430 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1438 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1442 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1446 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1452 * This takes a timespec, and not a timeval, so we cannot
1453 * use the do_select() helper ...
1457 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1458 return -TARGET_EFAULT
;
1461 if (target_to_host_timespec(&ts
, ts_addr
)) {
1462 return -TARGET_EFAULT
;
1470 /* Extract the two packed args for the sigset */
1473 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1475 return -TARGET_EFAULT
;
1477 arg_sigset
= tswapal(arg7
[0]);
1478 arg_sigsize
= tswapal(arg7
[1]);
1479 unlock_user(arg7
, arg6
, 0);
1482 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1487 sig
.size
= SIGSET_T_SIZE
;
1491 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1495 finish_sigsuspend_mask(ret
);
1498 if (!is_error(ret
)) {
1499 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1500 return -TARGET_EFAULT
;
1502 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1503 return -TARGET_EFAULT
;
1505 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1506 return -TARGET_EFAULT
;
1509 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1510 return -TARGET_EFAULT
;
1513 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1514 return -TARGET_EFAULT
;
1522 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1523 defined(TARGET_NR_ppoll_time64)
1524 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1525 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1527 struct target_pollfd
*target_pfd
;
1528 unsigned int nfds
= arg2
;
1536 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1537 return -TARGET_EINVAL
;
1539 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1540 sizeof(struct target_pollfd
) * nfds
, 1);
1542 return -TARGET_EFAULT
;
1545 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1546 for (i
= 0; i
< nfds
; i
++) {
1547 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1548 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1552 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1553 sigset_t
*set
= NULL
;
1557 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1558 unlock_user(target_pfd
, arg1
, 0);
1559 return -TARGET_EFAULT
;
1562 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1563 unlock_user(target_pfd
, arg1
, 0);
1564 return -TARGET_EFAULT
;
1572 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1574 unlock_user(target_pfd
, arg1
, 0);
1579 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1580 set
, SIGSET_T_SIZE
));
1583 finish_sigsuspend_mask(ret
);
1585 if (!is_error(ret
) && arg3
) {
1587 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1588 return -TARGET_EFAULT
;
1591 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1592 return -TARGET_EFAULT
;
1597 struct timespec ts
, *pts
;
1600 /* Convert ms to secs, ns */
1601 ts
.tv_sec
= arg3
/ 1000;
1602 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1605 /* -ve poll() timeout means "infinite" */
1608 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1611 if (!is_error(ret
)) {
1612 for (i
= 0; i
< nfds
; i
++) {
1613 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1616 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1621 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1622 int flags
, int is_pipe2
)
1626 ret
= pipe2(host_pipe
, flags
);
1629 return get_errno(ret
);
1631 /* Several targets have special calling conventions for the original
1632 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1634 #if defined(TARGET_ALPHA)
1635 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1636 return host_pipe
[0];
1637 #elif defined(TARGET_MIPS)
1638 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1639 return host_pipe
[0];
1640 #elif defined(TARGET_SH4)
1641 cpu_env
->gregs
[1] = host_pipe
[1];
1642 return host_pipe
[0];
1643 #elif defined(TARGET_SPARC)
1644 cpu_env
->regwptr
[1] = host_pipe
[1];
1645 return host_pipe
[0];
1649 if (put_user_s32(host_pipe
[0], pipedes
)
1650 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(abi_int
)))
1651 return -TARGET_EFAULT
;
1652 return get_errno(ret
);
1655 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1656 abi_ulong target_addr
,
1659 struct target_ip_mreqn
*target_smreqn
;
1661 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1663 return -TARGET_EFAULT
;
1664 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1665 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1666 if (len
== sizeof(struct target_ip_mreqn
))
1667 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1668 unlock_user(target_smreqn
, target_addr
, 0);
1673 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1674 abi_ulong target_addr
,
1677 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1678 sa_family_t sa_family
;
1679 struct target_sockaddr
*target_saddr
;
1681 if (fd_trans_target_to_host_addr(fd
)) {
1682 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1685 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1687 return -TARGET_EFAULT
;
1689 sa_family
= tswap16(target_saddr
->sa_family
);
1691 /* Oops. The caller might send a incomplete sun_path; sun_path
1692 * must be terminated by \0 (see the manual page), but
1693 * unfortunately it is quite common to specify sockaddr_un
1694 * length as "strlen(x->sun_path)" while it should be
1695 * "strlen(...) + 1". We'll fix that here if needed.
1696 * Linux kernel has a similar feature.
1699 if (sa_family
== AF_UNIX
) {
1700 if (len
< unix_maxlen
&& len
> 0) {
1701 char *cp
= (char*)target_saddr
;
1703 if ( cp
[len
-1] && !cp
[len
] )
1706 if (len
> unix_maxlen
)
1710 memcpy(addr
, target_saddr
, len
);
1711 addr
->sa_family
= sa_family
;
1712 if (sa_family
== AF_NETLINK
) {
1713 struct sockaddr_nl
*nladdr
;
1715 nladdr
= (struct sockaddr_nl
*)addr
;
1716 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1717 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1718 } else if (sa_family
== AF_PACKET
) {
1719 struct target_sockaddr_ll
*lladdr
;
1721 lladdr
= (struct target_sockaddr_ll
*)addr
;
1722 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1723 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1724 } else if (sa_family
== AF_INET6
) {
1725 struct sockaddr_in6
*in6addr
;
1727 in6addr
= (struct sockaddr_in6
*)addr
;
1728 in6addr
->sin6_scope_id
= tswap32(in6addr
->sin6_scope_id
);
1730 unlock_user(target_saddr
, target_addr
, 0);
1735 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1736 struct sockaddr
*addr
,
1739 struct target_sockaddr
*target_saddr
;
1746 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1748 return -TARGET_EFAULT
;
1749 memcpy(target_saddr
, addr
, len
);
1750 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1751 sizeof(target_saddr
->sa_family
)) {
1752 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1754 if (addr
->sa_family
== AF_NETLINK
&&
1755 len
>= sizeof(struct target_sockaddr_nl
)) {
1756 struct target_sockaddr_nl
*target_nl
=
1757 (struct target_sockaddr_nl
*)target_saddr
;
1758 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1759 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1760 } else if (addr
->sa_family
== AF_PACKET
) {
1761 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1762 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1763 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1764 } else if (addr
->sa_family
== AF_INET6
&&
1765 len
>= sizeof(struct target_sockaddr_in6
)) {
1766 struct target_sockaddr_in6
*target_in6
=
1767 (struct target_sockaddr_in6
*)target_saddr
;
1768 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1770 unlock_user(target_saddr
, target_addr
, len
);
1775 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1776 struct target_msghdr
*target_msgh
)
1778 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1779 abi_long msg_controllen
;
1780 abi_ulong target_cmsg_addr
;
1781 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1782 socklen_t space
= 0;
1784 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1785 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1787 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1788 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1789 target_cmsg_start
= target_cmsg
;
1791 return -TARGET_EFAULT
;
1793 while (cmsg
&& target_cmsg
) {
1794 void *data
= CMSG_DATA(cmsg
);
1795 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1797 int len
= tswapal(target_cmsg
->cmsg_len
)
1798 - sizeof(struct target_cmsghdr
);
1800 space
+= CMSG_SPACE(len
);
1801 if (space
> msgh
->msg_controllen
) {
1802 space
-= CMSG_SPACE(len
);
1803 /* This is a QEMU bug, since we allocated the payload
1804 * area ourselves (unlike overflow in host-to-target
1805 * conversion, which is just the guest giving us a buffer
1806 * that's too small). It can't happen for the payload types
1807 * we currently support; if it becomes an issue in future
1808 * we would need to improve our allocation strategy to
1809 * something more intelligent than "twice the size of the
1810 * target buffer we're reading from".
1812 qemu_log_mask(LOG_UNIMP
,
1813 ("Unsupported ancillary data %d/%d: "
1814 "unhandled msg size\n"),
1815 tswap32(target_cmsg
->cmsg_level
),
1816 tswap32(target_cmsg
->cmsg_type
));
1820 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1821 cmsg
->cmsg_level
= SOL_SOCKET
;
1823 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1825 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1826 cmsg
->cmsg_len
= CMSG_LEN(len
);
1828 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1829 int *fd
= (int *)data
;
1830 int *target_fd
= (int *)target_data
;
1831 int i
, numfds
= len
/ sizeof(int);
1833 for (i
= 0; i
< numfds
; i
++) {
1834 __get_user(fd
[i
], target_fd
+ i
);
1836 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1837 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1838 struct ucred
*cred
= (struct ucred
*)data
;
1839 struct target_ucred
*target_cred
=
1840 (struct target_ucred
*)target_data
;
1842 __get_user(cred
->pid
, &target_cred
->pid
);
1843 __get_user(cred
->uid
, &target_cred
->uid
);
1844 __get_user(cred
->gid
, &target_cred
->gid
);
1845 } else if (cmsg
->cmsg_level
== SOL_ALG
) {
1846 uint32_t *dst
= (uint32_t *)data
;
1848 memcpy(dst
, target_data
, len
);
1849 /* fix endianess of first 32-bit word */
1850 if (len
>= sizeof(uint32_t)) {
1851 *dst
= tswap32(*dst
);
1854 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1855 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1856 memcpy(data
, target_data
, len
);
1859 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1860 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1863 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1865 msgh
->msg_controllen
= space
;
1869 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1870 struct msghdr
*msgh
)
1872 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1873 abi_long msg_controllen
;
1874 abi_ulong target_cmsg_addr
;
1875 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1876 socklen_t space
= 0;
1878 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1879 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1881 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1882 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1883 target_cmsg_start
= target_cmsg
;
1885 return -TARGET_EFAULT
;
1887 while (cmsg
&& target_cmsg
) {
1888 void *data
= CMSG_DATA(cmsg
);
1889 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1891 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1892 int tgt_len
, tgt_space
;
1894 /* We never copy a half-header but may copy half-data;
1895 * this is Linux's behaviour in put_cmsg(). Note that
1896 * truncation here is a guest problem (which we report
1897 * to the guest via the CTRUNC bit), unlike truncation
1898 * in target_to_host_cmsg, which is a QEMU bug.
1900 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1901 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1905 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1906 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1908 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1910 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1912 /* Payload types which need a different size of payload on
1913 * the target must adjust tgt_len here.
1916 switch (cmsg
->cmsg_level
) {
1918 switch (cmsg
->cmsg_type
) {
1920 tgt_len
= sizeof(struct target_timeval
);
1930 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1931 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1932 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1935 /* We must now copy-and-convert len bytes of payload
1936 * into tgt_len bytes of destination space. Bear in mind
1937 * that in both source and destination we may be dealing
1938 * with a truncated value!
1940 switch (cmsg
->cmsg_level
) {
1942 switch (cmsg
->cmsg_type
) {
1945 int *fd
= (int *)data
;
1946 int *target_fd
= (int *)target_data
;
1947 int i
, numfds
= tgt_len
/ sizeof(int);
1949 for (i
= 0; i
< numfds
; i
++) {
1950 __put_user(fd
[i
], target_fd
+ i
);
1956 struct timeval
*tv
= (struct timeval
*)data
;
1957 struct target_timeval
*target_tv
=
1958 (struct target_timeval
*)target_data
;
1960 if (len
!= sizeof(struct timeval
) ||
1961 tgt_len
!= sizeof(struct target_timeval
)) {
1965 /* copy struct timeval to target */
1966 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1967 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1970 case SCM_CREDENTIALS
:
1972 struct ucred
*cred
= (struct ucred
*)data
;
1973 struct target_ucred
*target_cred
=
1974 (struct target_ucred
*)target_data
;
1976 __put_user(cred
->pid
, &target_cred
->pid
);
1977 __put_user(cred
->uid
, &target_cred
->uid
);
1978 __put_user(cred
->gid
, &target_cred
->gid
);
1987 switch (cmsg
->cmsg_type
) {
1990 uint32_t *v
= (uint32_t *)data
;
1991 uint32_t *t_int
= (uint32_t *)target_data
;
1993 if (len
!= sizeof(uint32_t) ||
1994 tgt_len
!= sizeof(uint32_t)) {
1997 __put_user(*v
, t_int
);
2003 struct sock_extended_err ee
;
2004 struct sockaddr_in offender
;
2006 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2007 struct errhdr_t
*target_errh
=
2008 (struct errhdr_t
*)target_data
;
2010 if (len
!= sizeof(struct errhdr_t
) ||
2011 tgt_len
!= sizeof(struct errhdr_t
)) {
2014 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2015 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2016 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2017 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2018 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2019 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2020 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2021 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2022 (void *) &errh
->offender
, sizeof(errh
->offender
));
2031 switch (cmsg
->cmsg_type
) {
2034 uint32_t *v
= (uint32_t *)data
;
2035 uint32_t *t_int
= (uint32_t *)target_data
;
2037 if (len
!= sizeof(uint32_t) ||
2038 tgt_len
!= sizeof(uint32_t)) {
2041 __put_user(*v
, t_int
);
2047 struct sock_extended_err ee
;
2048 struct sockaddr_in6 offender
;
2050 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2051 struct errhdr6_t
*target_errh
=
2052 (struct errhdr6_t
*)target_data
;
2054 if (len
!= sizeof(struct errhdr6_t
) ||
2055 tgt_len
!= sizeof(struct errhdr6_t
)) {
2058 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2059 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2060 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2061 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2062 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2063 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2064 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2065 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2066 (void *) &errh
->offender
, sizeof(errh
->offender
));
2076 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2077 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2078 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2079 if (tgt_len
> len
) {
2080 memset(target_data
+ len
, 0, tgt_len
- len
);
2084 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2085 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2086 if (msg_controllen
< tgt_space
) {
2087 tgt_space
= msg_controllen
;
2089 msg_controllen
-= tgt_space
;
2091 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2092 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2095 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2097 target_msgh
->msg_controllen
= tswapal(space
);
2101 /* do_setsockopt() Must return target values and target errnos. */
2102 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2103 abi_ulong optval_addr
, socklen_t optlen
)
2107 struct ip_mreqn
*ip_mreq
;
2108 struct ip_mreq_source
*ip_mreq_source
;
2113 /* TCP and UDP options all take an 'int' value. */
2114 if (optlen
< sizeof(uint32_t))
2115 return -TARGET_EINVAL
;
2117 if (get_user_u32(val
, optval_addr
))
2118 return -TARGET_EFAULT
;
2119 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2126 case IP_ROUTER_ALERT
:
2130 case IP_MTU_DISCOVER
:
2137 case IP_MULTICAST_TTL
:
2138 case IP_MULTICAST_LOOP
:
2140 if (optlen
>= sizeof(uint32_t)) {
2141 if (get_user_u32(val
, optval_addr
))
2142 return -TARGET_EFAULT
;
2143 } else if (optlen
>= 1) {
2144 if (get_user_u8(val
, optval_addr
))
2145 return -TARGET_EFAULT
;
2147 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2149 case IP_ADD_MEMBERSHIP
:
2150 case IP_DROP_MEMBERSHIP
:
2151 if (optlen
< sizeof (struct target_ip_mreq
) ||
2152 optlen
> sizeof (struct target_ip_mreqn
))
2153 return -TARGET_EINVAL
;
2155 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2156 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2157 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2160 case IP_BLOCK_SOURCE
:
2161 case IP_UNBLOCK_SOURCE
:
2162 case IP_ADD_SOURCE_MEMBERSHIP
:
2163 case IP_DROP_SOURCE_MEMBERSHIP
:
2164 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2165 return -TARGET_EINVAL
;
2167 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2168 if (!ip_mreq_source
) {
2169 return -TARGET_EFAULT
;
2171 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2172 unlock_user (ip_mreq_source
, optval_addr
, 0);
2181 case IPV6_MTU_DISCOVER
:
2184 case IPV6_RECVPKTINFO
:
2185 case IPV6_UNICAST_HOPS
:
2186 case IPV6_MULTICAST_HOPS
:
2187 case IPV6_MULTICAST_LOOP
:
2189 case IPV6_RECVHOPLIMIT
:
2190 case IPV6_2292HOPLIMIT
:
2193 case IPV6_2292PKTINFO
:
2194 case IPV6_RECVTCLASS
:
2195 case IPV6_RECVRTHDR
:
2196 case IPV6_2292RTHDR
:
2197 case IPV6_RECVHOPOPTS
:
2198 case IPV6_2292HOPOPTS
:
2199 case IPV6_RECVDSTOPTS
:
2200 case IPV6_2292DSTOPTS
:
2202 case IPV6_ADDR_PREFERENCES
:
2203 #ifdef IPV6_RECVPATHMTU
2204 case IPV6_RECVPATHMTU
:
2206 #ifdef IPV6_TRANSPARENT
2207 case IPV6_TRANSPARENT
:
2209 #ifdef IPV6_FREEBIND
2212 #ifdef IPV6_RECVORIGDSTADDR
2213 case IPV6_RECVORIGDSTADDR
:
2216 if (optlen
< sizeof(uint32_t)) {
2217 return -TARGET_EINVAL
;
2219 if (get_user_u32(val
, optval_addr
)) {
2220 return -TARGET_EFAULT
;
2222 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2223 &val
, sizeof(val
)));
2227 struct in6_pktinfo pki
;
2229 if (optlen
< sizeof(pki
)) {
2230 return -TARGET_EINVAL
;
2233 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2234 return -TARGET_EFAULT
;
2237 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2239 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2240 &pki
, sizeof(pki
)));
2243 case IPV6_ADD_MEMBERSHIP
:
2244 case IPV6_DROP_MEMBERSHIP
:
2246 struct ipv6_mreq ipv6mreq
;
2248 if (optlen
< sizeof(ipv6mreq
)) {
2249 return -TARGET_EINVAL
;
2252 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2253 return -TARGET_EFAULT
;
2256 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2258 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2259 &ipv6mreq
, sizeof(ipv6mreq
)));
2270 struct icmp6_filter icmp6f
;
2272 if (optlen
> sizeof(icmp6f
)) {
2273 optlen
= sizeof(icmp6f
);
2276 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2277 return -TARGET_EFAULT
;
2280 for (val
= 0; val
< 8; val
++) {
2281 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2284 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2296 /* those take an u32 value */
2297 if (optlen
< sizeof(uint32_t)) {
2298 return -TARGET_EINVAL
;
2301 if (get_user_u32(val
, optval_addr
)) {
2302 return -TARGET_EFAULT
;
2304 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2305 &val
, sizeof(val
)));
2312 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2317 char *alg_key
= g_malloc(optlen
);
2320 return -TARGET_ENOMEM
;
2322 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2324 return -TARGET_EFAULT
;
2326 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2331 case ALG_SET_AEAD_AUTHSIZE
:
2333 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2342 case TARGET_SOL_SOCKET
:
2344 case TARGET_SO_RCVTIMEO
:
2348 optname
= SO_RCVTIMEO
;
2351 if (optlen
!= sizeof(struct target_timeval
)) {
2352 return -TARGET_EINVAL
;
2355 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2356 return -TARGET_EFAULT
;
2359 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2363 case TARGET_SO_SNDTIMEO
:
2364 optname
= SO_SNDTIMEO
;
2366 case TARGET_SO_ATTACH_FILTER
:
2368 struct target_sock_fprog
*tfprog
;
2369 struct target_sock_filter
*tfilter
;
2370 struct sock_fprog fprog
;
2371 struct sock_filter
*filter
;
2374 if (optlen
!= sizeof(*tfprog
)) {
2375 return -TARGET_EINVAL
;
2377 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2378 return -TARGET_EFAULT
;
2380 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2381 tswapal(tfprog
->filter
), 0)) {
2382 unlock_user_struct(tfprog
, optval_addr
, 1);
2383 return -TARGET_EFAULT
;
2386 fprog
.len
= tswap16(tfprog
->len
);
2387 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2388 if (filter
== NULL
) {
2389 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2390 unlock_user_struct(tfprog
, optval_addr
, 1);
2391 return -TARGET_ENOMEM
;
2393 for (i
= 0; i
< fprog
.len
; i
++) {
2394 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2395 filter
[i
].jt
= tfilter
[i
].jt
;
2396 filter
[i
].jf
= tfilter
[i
].jf
;
2397 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2399 fprog
.filter
= filter
;
2401 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2402 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2405 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2406 unlock_user_struct(tfprog
, optval_addr
, 1);
2409 case TARGET_SO_BINDTODEVICE
:
2411 char *dev_ifname
, *addr_ifname
;
2413 if (optlen
> IFNAMSIZ
- 1) {
2414 optlen
= IFNAMSIZ
- 1;
2416 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2418 return -TARGET_EFAULT
;
2420 optname
= SO_BINDTODEVICE
;
2421 addr_ifname
= alloca(IFNAMSIZ
);
2422 memcpy(addr_ifname
, dev_ifname
, optlen
);
2423 addr_ifname
[optlen
] = 0;
2424 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2425 addr_ifname
, optlen
));
2426 unlock_user (dev_ifname
, optval_addr
, 0);
2429 case TARGET_SO_LINGER
:
2432 struct target_linger
*tlg
;
2434 if (optlen
!= sizeof(struct target_linger
)) {
2435 return -TARGET_EINVAL
;
2437 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2438 return -TARGET_EFAULT
;
2440 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2441 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2442 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2444 unlock_user_struct(tlg
, optval_addr
, 0);
2447 /* Options with 'int' argument. */
2448 case TARGET_SO_DEBUG
:
2451 case TARGET_SO_REUSEADDR
:
2452 optname
= SO_REUSEADDR
;
2455 case TARGET_SO_REUSEPORT
:
2456 optname
= SO_REUSEPORT
;
2459 case TARGET_SO_TYPE
:
2462 case TARGET_SO_ERROR
:
2465 case TARGET_SO_DONTROUTE
:
2466 optname
= SO_DONTROUTE
;
2468 case TARGET_SO_BROADCAST
:
2469 optname
= SO_BROADCAST
;
2471 case TARGET_SO_SNDBUF
:
2472 optname
= SO_SNDBUF
;
2474 case TARGET_SO_SNDBUFFORCE
:
2475 optname
= SO_SNDBUFFORCE
;
2477 case TARGET_SO_RCVBUF
:
2478 optname
= SO_RCVBUF
;
2480 case TARGET_SO_RCVBUFFORCE
:
2481 optname
= SO_RCVBUFFORCE
;
2483 case TARGET_SO_KEEPALIVE
:
2484 optname
= SO_KEEPALIVE
;
2486 case TARGET_SO_OOBINLINE
:
2487 optname
= SO_OOBINLINE
;
2489 case TARGET_SO_NO_CHECK
:
2490 optname
= SO_NO_CHECK
;
2492 case TARGET_SO_PRIORITY
:
2493 optname
= SO_PRIORITY
;
2496 case TARGET_SO_BSDCOMPAT
:
2497 optname
= SO_BSDCOMPAT
;
2500 case TARGET_SO_PASSCRED
:
2501 optname
= SO_PASSCRED
;
2503 case TARGET_SO_PASSSEC
:
2504 optname
= SO_PASSSEC
;
2506 case TARGET_SO_TIMESTAMP
:
2507 optname
= SO_TIMESTAMP
;
2509 case TARGET_SO_RCVLOWAT
:
2510 optname
= SO_RCVLOWAT
;
2515 if (optlen
< sizeof(uint32_t))
2516 return -TARGET_EINVAL
;
2518 if (get_user_u32(val
, optval_addr
))
2519 return -TARGET_EFAULT
;
2520 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2525 case NETLINK_PKTINFO
:
2526 case NETLINK_ADD_MEMBERSHIP
:
2527 case NETLINK_DROP_MEMBERSHIP
:
2528 case NETLINK_BROADCAST_ERROR
:
2529 case NETLINK_NO_ENOBUFS
:
2530 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2531 case NETLINK_LISTEN_ALL_NSID
:
2532 case NETLINK_CAP_ACK
:
2533 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2534 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2535 case NETLINK_EXT_ACK
:
2536 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2537 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2538 case NETLINK_GET_STRICT_CHK
:
2539 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2545 if (optlen
< sizeof(uint32_t)) {
2546 return -TARGET_EINVAL
;
2548 if (get_user_u32(val
, optval_addr
)) {
2549 return -TARGET_EFAULT
;
2551 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2554 #endif /* SOL_NETLINK */
2557 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2559 ret
= -TARGET_ENOPROTOOPT
;
2564 /* do_getsockopt() Must return target values and target errnos. */
2565 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2566 abi_ulong optval_addr
, abi_ulong optlen
)
2573 case TARGET_SOL_SOCKET
:
2576 /* These don't just return a single integer */
2577 case TARGET_SO_PEERNAME
:
2579 case TARGET_SO_RCVTIMEO
: {
2583 optname
= SO_RCVTIMEO
;
2586 if (get_user_u32(len
, optlen
)) {
2587 return -TARGET_EFAULT
;
2590 return -TARGET_EINVAL
;
2594 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2599 if (len
> sizeof(struct target_timeval
)) {
2600 len
= sizeof(struct target_timeval
);
2602 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2603 return -TARGET_EFAULT
;
2605 if (put_user_u32(len
, optlen
)) {
2606 return -TARGET_EFAULT
;
2610 case TARGET_SO_SNDTIMEO
:
2611 optname
= SO_SNDTIMEO
;
2613 case TARGET_SO_PEERCRED
: {
2616 struct target_ucred
*tcr
;
2618 if (get_user_u32(len
, optlen
)) {
2619 return -TARGET_EFAULT
;
2622 return -TARGET_EINVAL
;
2626 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2634 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2635 return -TARGET_EFAULT
;
2637 __put_user(cr
.pid
, &tcr
->pid
);
2638 __put_user(cr
.uid
, &tcr
->uid
);
2639 __put_user(cr
.gid
, &tcr
->gid
);
2640 unlock_user_struct(tcr
, optval_addr
, 1);
2641 if (put_user_u32(len
, optlen
)) {
2642 return -TARGET_EFAULT
;
2646 case TARGET_SO_PEERSEC
: {
2649 if (get_user_u32(len
, optlen
)) {
2650 return -TARGET_EFAULT
;
2653 return -TARGET_EINVAL
;
2655 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2657 return -TARGET_EFAULT
;
2660 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2662 if (put_user_u32(lv
, optlen
)) {
2663 ret
= -TARGET_EFAULT
;
2665 unlock_user(name
, optval_addr
, lv
);
2668 case TARGET_SO_LINGER
:
2672 struct target_linger
*tlg
;
2674 if (get_user_u32(len
, optlen
)) {
2675 return -TARGET_EFAULT
;
2678 return -TARGET_EINVAL
;
2682 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2690 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2691 return -TARGET_EFAULT
;
2693 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2694 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2695 unlock_user_struct(tlg
, optval_addr
, 1);
2696 if (put_user_u32(len
, optlen
)) {
2697 return -TARGET_EFAULT
;
2701 /* Options with 'int' argument. */
2702 case TARGET_SO_DEBUG
:
2705 case TARGET_SO_REUSEADDR
:
2706 optname
= SO_REUSEADDR
;
2709 case TARGET_SO_REUSEPORT
:
2710 optname
= SO_REUSEPORT
;
2713 case TARGET_SO_TYPE
:
2716 case TARGET_SO_ERROR
:
2719 case TARGET_SO_DONTROUTE
:
2720 optname
= SO_DONTROUTE
;
2722 case TARGET_SO_BROADCAST
:
2723 optname
= SO_BROADCAST
;
2725 case TARGET_SO_SNDBUF
:
2726 optname
= SO_SNDBUF
;
2728 case TARGET_SO_RCVBUF
:
2729 optname
= SO_RCVBUF
;
2731 case TARGET_SO_KEEPALIVE
:
2732 optname
= SO_KEEPALIVE
;
2734 case TARGET_SO_OOBINLINE
:
2735 optname
= SO_OOBINLINE
;
2737 case TARGET_SO_NO_CHECK
:
2738 optname
= SO_NO_CHECK
;
2740 case TARGET_SO_PRIORITY
:
2741 optname
= SO_PRIORITY
;
2744 case TARGET_SO_BSDCOMPAT
:
2745 optname
= SO_BSDCOMPAT
;
2748 case TARGET_SO_PASSCRED
:
2749 optname
= SO_PASSCRED
;
2751 case TARGET_SO_TIMESTAMP
:
2752 optname
= SO_TIMESTAMP
;
2754 case TARGET_SO_RCVLOWAT
:
2755 optname
= SO_RCVLOWAT
;
2757 case TARGET_SO_ACCEPTCONN
:
2758 optname
= SO_ACCEPTCONN
;
2760 case TARGET_SO_PROTOCOL
:
2761 optname
= SO_PROTOCOL
;
2763 case TARGET_SO_DOMAIN
:
2764 optname
= SO_DOMAIN
;
2772 /* TCP and UDP options all take an 'int' value. */
2774 if (get_user_u32(len
, optlen
))
2775 return -TARGET_EFAULT
;
2777 return -TARGET_EINVAL
;
2779 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2784 val
= host_to_target_sock_type(val
);
2787 val
= host_to_target_errno(val
);
2793 if (put_user_u32(val
, optval_addr
))
2794 return -TARGET_EFAULT
;
2796 if (put_user_u8(val
, optval_addr
))
2797 return -TARGET_EFAULT
;
2799 if (put_user_u32(len
, optlen
))
2800 return -TARGET_EFAULT
;
2807 case IP_ROUTER_ALERT
:
2811 case IP_MTU_DISCOVER
:
2817 case IP_MULTICAST_TTL
:
2818 case IP_MULTICAST_LOOP
:
2819 if (get_user_u32(len
, optlen
))
2820 return -TARGET_EFAULT
;
2822 return -TARGET_EINVAL
;
2824 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2827 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2829 if (put_user_u32(len
, optlen
)
2830 || put_user_u8(val
, optval_addr
))
2831 return -TARGET_EFAULT
;
2833 if (len
> sizeof(int))
2835 if (put_user_u32(len
, optlen
)
2836 || put_user_u32(val
, optval_addr
))
2837 return -TARGET_EFAULT
;
2841 ret
= -TARGET_ENOPROTOOPT
;
2847 case IPV6_MTU_DISCOVER
:
2850 case IPV6_RECVPKTINFO
:
2851 case IPV6_UNICAST_HOPS
:
2852 case IPV6_MULTICAST_HOPS
:
2853 case IPV6_MULTICAST_LOOP
:
2855 case IPV6_RECVHOPLIMIT
:
2856 case IPV6_2292HOPLIMIT
:
2859 case IPV6_2292PKTINFO
:
2860 case IPV6_RECVTCLASS
:
2861 case IPV6_RECVRTHDR
:
2862 case IPV6_2292RTHDR
:
2863 case IPV6_RECVHOPOPTS
:
2864 case IPV6_2292HOPOPTS
:
2865 case IPV6_RECVDSTOPTS
:
2866 case IPV6_2292DSTOPTS
:
2868 case IPV6_ADDR_PREFERENCES
:
2869 #ifdef IPV6_RECVPATHMTU
2870 case IPV6_RECVPATHMTU
:
2872 #ifdef IPV6_TRANSPARENT
2873 case IPV6_TRANSPARENT
:
2875 #ifdef IPV6_FREEBIND
2878 #ifdef IPV6_RECVORIGDSTADDR
2879 case IPV6_RECVORIGDSTADDR
:
2881 if (get_user_u32(len
, optlen
))
2882 return -TARGET_EFAULT
;
2884 return -TARGET_EINVAL
;
2886 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2889 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2891 if (put_user_u32(len
, optlen
)
2892 || put_user_u8(val
, optval_addr
))
2893 return -TARGET_EFAULT
;
2895 if (len
> sizeof(int))
2897 if (put_user_u32(len
, optlen
)
2898 || put_user_u32(val
, optval_addr
))
2899 return -TARGET_EFAULT
;
2903 ret
= -TARGET_ENOPROTOOPT
;
2910 case NETLINK_PKTINFO
:
2911 case NETLINK_BROADCAST_ERROR
:
2912 case NETLINK_NO_ENOBUFS
:
2913 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2914 case NETLINK_LISTEN_ALL_NSID
:
2915 case NETLINK_CAP_ACK
:
2916 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2917 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2918 case NETLINK_EXT_ACK
:
2919 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2920 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2921 case NETLINK_GET_STRICT_CHK
:
2922 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2923 if (get_user_u32(len
, optlen
)) {
2924 return -TARGET_EFAULT
;
2926 if (len
!= sizeof(val
)) {
2927 return -TARGET_EINVAL
;
2930 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2934 if (put_user_u32(lv
, optlen
)
2935 || put_user_u32(val
, optval_addr
)) {
2936 return -TARGET_EFAULT
;
2939 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2940 case NETLINK_LIST_MEMBERSHIPS
:
2944 if (get_user_u32(len
, optlen
)) {
2945 return -TARGET_EFAULT
;
2948 return -TARGET_EINVAL
;
2950 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2951 if (!results
&& len
> 0) {
2952 return -TARGET_EFAULT
;
2955 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2957 unlock_user(results
, optval_addr
, 0);
2960 /* swap host endianess to target endianess. */
2961 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2962 results
[i
] = tswap32(results
[i
]);
2964 if (put_user_u32(lv
, optlen
)) {
2965 return -TARGET_EFAULT
;
2967 unlock_user(results
, optval_addr
, 0);
2970 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2975 #endif /* SOL_NETLINK */
2978 qemu_log_mask(LOG_UNIMP
,
2979 "getsockopt level=%d optname=%d not yet supported\n",
2981 ret
= -TARGET_EOPNOTSUPP
;
2987 /* Convert target low/high pair representing file offset into the host
2988 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2989 * as the kernel doesn't handle them either.
2991 static void target_to_host_low_high(abi_ulong tlow
,
2993 unsigned long *hlow
,
2994 unsigned long *hhigh
)
2996 uint64_t off
= tlow
|
2997 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2998 TARGET_LONG_BITS
/ 2;
3001 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3004 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3005 abi_ulong count
, int copy
)
3007 struct target_iovec
*target_vec
;
3009 abi_ulong total_len
, max_len
;
3012 bool bad_address
= false;
3018 if (count
> IOV_MAX
) {
3023 vec
= g_try_new0(struct iovec
, count
);
3029 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3030 count
* sizeof(struct target_iovec
), 1);
3031 if (target_vec
== NULL
) {
3036 /* ??? If host page size > target page size, this will result in a
3037 value larger than what we can actually support. */
3038 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3041 for (i
= 0; i
< count
; i
++) {
3042 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3043 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3048 } else if (len
== 0) {
3049 /* Zero length pointer is ignored. */
3050 vec
[i
].iov_base
= 0;
3052 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3053 /* If the first buffer pointer is bad, this is a fault. But
3054 * subsequent bad buffers will result in a partial write; this
3055 * is realized by filling the vector with null pointers and
3057 if (!vec
[i
].iov_base
) {
3068 if (len
> max_len
- total_len
) {
3069 len
= max_len
- total_len
;
3072 vec
[i
].iov_len
= len
;
3076 unlock_user(target_vec
, target_addr
, 0);
3081 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3082 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3085 unlock_user(target_vec
, target_addr
, 0);
3092 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3093 abi_ulong count
, int copy
)
3095 struct target_iovec
*target_vec
;
3098 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3099 count
* sizeof(struct target_iovec
), 1);
3101 for (i
= 0; i
< count
; i
++) {
3102 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3103 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3107 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3109 unlock_user(target_vec
, target_addr
, 0);
3115 static inline int target_to_host_sock_type(int *type
)
3118 int target_type
= *type
;
3120 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3121 case TARGET_SOCK_DGRAM
:
3122 host_type
= SOCK_DGRAM
;
3124 case TARGET_SOCK_STREAM
:
3125 host_type
= SOCK_STREAM
;
3128 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3131 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3132 #if defined(SOCK_CLOEXEC)
3133 host_type
|= SOCK_CLOEXEC
;
3135 return -TARGET_EINVAL
;
3138 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3139 #if defined(SOCK_NONBLOCK)
3140 host_type
|= SOCK_NONBLOCK
;
3141 #elif !defined(O_NONBLOCK)
3142 return -TARGET_EINVAL
;
3149 /* Try to emulate socket type flags after socket creation. */
3150 static int sock_flags_fixup(int fd
, int target_type
)
3152 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3153 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3154 int flags
= fcntl(fd
, F_GETFL
);
3155 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3157 return -TARGET_EINVAL
;
3164 /* do_socket() Must return target values and target errnos. */
3165 static abi_long
do_socket(int domain
, int type
, int protocol
)
3167 int target_type
= type
;
3170 ret
= target_to_host_sock_type(&type
);
3175 if (domain
== PF_NETLINK
&& !(
3176 #ifdef CONFIG_RTNETLINK
3177 protocol
== NETLINK_ROUTE
||
3179 protocol
== NETLINK_KOBJECT_UEVENT
||
3180 protocol
== NETLINK_AUDIT
)) {
3181 return -TARGET_EPROTONOSUPPORT
;
3184 if (domain
== AF_PACKET
||
3185 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3186 protocol
= tswap16(protocol
);
3189 ret
= get_errno(socket(domain
, type
, protocol
));
3191 ret
= sock_flags_fixup(ret
, target_type
);
3192 if (type
== SOCK_PACKET
) {
3193 /* Manage an obsolete case :
3194 * if socket type is SOCK_PACKET, bind by name
3196 fd_trans_register(ret
, &target_packet_trans
);
3197 } else if (domain
== PF_NETLINK
) {
3199 #ifdef CONFIG_RTNETLINK
3201 fd_trans_register(ret
, &target_netlink_route_trans
);
3204 case NETLINK_KOBJECT_UEVENT
:
3205 /* nothing to do: messages are strings */
3208 fd_trans_register(ret
, &target_netlink_audit_trans
);
3211 g_assert_not_reached();
3218 /* do_bind() Must return target values and target errnos. */
3219 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3225 if ((int)addrlen
< 0) {
3226 return -TARGET_EINVAL
;
3229 addr
= alloca(addrlen
+1);
3231 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3235 return get_errno(bind(sockfd
, addr
, addrlen
));
3238 /* do_connect() Must return target values and target errnos. */
3239 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3245 if ((int)addrlen
< 0) {
3246 return -TARGET_EINVAL
;
3249 addr
= alloca(addrlen
+1);
3251 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3255 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3258 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3259 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3260 int flags
, int send
)
3266 abi_ulong target_vec
;
3268 if (msgp
->msg_name
) {
3269 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3270 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3271 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3272 tswapal(msgp
->msg_name
),
3274 if (ret
== -TARGET_EFAULT
) {
3275 /* For connected sockets msg_name and msg_namelen must
3276 * be ignored, so returning EFAULT immediately is wrong.
3277 * Instead, pass a bad msg_name to the host kernel, and
3278 * let it decide whether to return EFAULT or not.
3280 msg
.msg_name
= (void *)-1;
3285 msg
.msg_name
= NULL
;
3286 msg
.msg_namelen
= 0;
3288 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3289 msg
.msg_control
= alloca(msg
.msg_controllen
);
3290 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3292 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3294 count
= tswapal(msgp
->msg_iovlen
);
3295 target_vec
= tswapal(msgp
->msg_iov
);
3297 if (count
> IOV_MAX
) {
3298 /* sendrcvmsg returns a different errno for this condition than
3299 * readv/writev, so we must catch it here before lock_iovec() does.
3301 ret
= -TARGET_EMSGSIZE
;
3305 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3306 target_vec
, count
, send
);
3308 ret
= -host_to_target_errno(errno
);
3309 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3314 msg
.msg_iovlen
= count
;
3318 if (fd_trans_target_to_host_data(fd
)) {
3321 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3322 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3323 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3324 msg
.msg_iov
->iov_len
);
3326 msg
.msg_iov
->iov_base
= host_msg
;
3327 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3331 ret
= target_to_host_cmsg(&msg
, msgp
);
3333 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3337 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3338 if (!is_error(ret
)) {
3340 if (fd_trans_host_to_target_data(fd
)) {
3341 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3342 MIN(msg
.msg_iov
->iov_len
, len
));
3344 if (!is_error(ret
)) {
3345 ret
= host_to_target_cmsg(msgp
, &msg
);
3347 if (!is_error(ret
)) {
3348 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3349 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3350 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3351 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3352 msg
.msg_name
, msg
.msg_namelen
);
3365 unlock_iovec(vec
, target_vec
, count
, !send
);
3371 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3372 int flags
, int send
)
3375 struct target_msghdr
*msgp
;
3377 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3381 return -TARGET_EFAULT
;
3383 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3384 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3388 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3389 * so it might not have this *mmsg-specific flag either.
3391 #ifndef MSG_WAITFORONE
3392 #define MSG_WAITFORONE 0x10000
3395 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3396 unsigned int vlen
, unsigned int flags
,
3399 struct target_mmsghdr
*mmsgp
;
3403 if (vlen
> UIO_MAXIOV
) {
3407 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3409 return -TARGET_EFAULT
;
3412 for (i
= 0; i
< vlen
; i
++) {
3413 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3414 if (is_error(ret
)) {
3417 mmsgp
[i
].msg_len
= tswap32(ret
);
3418 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3419 if (flags
& MSG_WAITFORONE
) {
3420 flags
|= MSG_DONTWAIT
;
3424 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3426 /* Return number of datagrams sent if we sent any at all;
3427 * otherwise return the error.
3435 /* do_accept4() Must return target values and target errnos. */
3436 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3437 abi_ulong target_addrlen_addr
, int flags
)
3439 socklen_t addrlen
, ret_addrlen
;
3444 if (flags
& ~(TARGET_SOCK_CLOEXEC
| TARGET_SOCK_NONBLOCK
)) {
3445 return -TARGET_EINVAL
;
3449 if (flags
& TARGET_SOCK_NONBLOCK
) {
3450 host_flags
|= SOCK_NONBLOCK
;
3452 if (flags
& TARGET_SOCK_CLOEXEC
) {
3453 host_flags
|= SOCK_CLOEXEC
;
3456 if (target_addr
== 0) {
3457 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3460 /* linux returns EFAULT if addrlen pointer is invalid */
3461 if (get_user_u32(addrlen
, target_addrlen_addr
))
3462 return -TARGET_EFAULT
;
3464 if ((int)addrlen
< 0) {
3465 return -TARGET_EINVAL
;
3468 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3469 return -TARGET_EFAULT
;
3472 addr
= alloca(addrlen
);
3474 ret_addrlen
= addrlen
;
3475 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3476 if (!is_error(ret
)) {
3477 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3478 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3479 ret
= -TARGET_EFAULT
;
3485 /* do_getpeername() Must return target values and target errnos. */
3486 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3487 abi_ulong target_addrlen_addr
)
3489 socklen_t addrlen
, ret_addrlen
;
3493 if (get_user_u32(addrlen
, target_addrlen_addr
))
3494 return -TARGET_EFAULT
;
3496 if ((int)addrlen
< 0) {
3497 return -TARGET_EINVAL
;
3500 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3501 return -TARGET_EFAULT
;
3504 addr
= alloca(addrlen
);
3506 ret_addrlen
= addrlen
;
3507 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3508 if (!is_error(ret
)) {
3509 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3510 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3511 ret
= -TARGET_EFAULT
;
3517 /* do_getsockname() Must return target values and target errnos. */
3518 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3519 abi_ulong target_addrlen_addr
)
3521 socklen_t addrlen
, ret_addrlen
;
3525 if (get_user_u32(addrlen
, target_addrlen_addr
))
3526 return -TARGET_EFAULT
;
3528 if ((int)addrlen
< 0) {
3529 return -TARGET_EINVAL
;
3532 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3533 return -TARGET_EFAULT
;
3536 addr
= alloca(addrlen
);
3538 ret_addrlen
= addrlen
;
3539 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3540 if (!is_error(ret
)) {
3541 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3542 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3543 ret
= -TARGET_EFAULT
;
3549 /* do_socketpair() Must return target values and target errnos. */
3550 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3551 abi_ulong target_tab_addr
)
3556 target_to_host_sock_type(&type
);
3558 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3559 if (!is_error(ret
)) {
3560 if (put_user_s32(tab
[0], target_tab_addr
)
3561 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3562 ret
= -TARGET_EFAULT
;
3567 /* do_sendto() Must return target values and target errnos. */
3568 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3569 abi_ulong target_addr
, socklen_t addrlen
)
3573 void *copy_msg
= NULL
;
3576 if ((int)addrlen
< 0) {
3577 return -TARGET_EINVAL
;
3580 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3582 return -TARGET_EFAULT
;
3583 if (fd_trans_target_to_host_data(fd
)) {
3584 copy_msg
= host_msg
;
3585 host_msg
= g_malloc(len
);
3586 memcpy(host_msg
, copy_msg
, len
);
3587 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3593 addr
= alloca(addrlen
+1);
3594 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3598 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3600 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3605 host_msg
= copy_msg
;
3607 unlock_user(host_msg
, msg
, 0);
3611 /* do_recvfrom() Must return target values and target errnos. */
3612 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3613 abi_ulong target_addr
,
3614 abi_ulong target_addrlen
)
3616 socklen_t addrlen
, ret_addrlen
;
3624 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3626 return -TARGET_EFAULT
;
3630 if (get_user_u32(addrlen
, target_addrlen
)) {
3631 ret
= -TARGET_EFAULT
;
3634 if ((int)addrlen
< 0) {
3635 ret
= -TARGET_EINVAL
;
3638 addr
= alloca(addrlen
);
3639 ret_addrlen
= addrlen
;
3640 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3641 addr
, &ret_addrlen
));
3643 addr
= NULL
; /* To keep compiler quiet. */
3644 addrlen
= 0; /* To keep compiler quiet. */
3645 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3647 if (!is_error(ret
)) {
3648 if (fd_trans_host_to_target_data(fd
)) {
3650 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3651 if (is_error(trans
)) {
3657 host_to_target_sockaddr(target_addr
, addr
,
3658 MIN(addrlen
, ret_addrlen
));
3659 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3660 ret
= -TARGET_EFAULT
;
3664 unlock_user(host_msg
, msg
, len
);
3667 unlock_user(host_msg
, msg
, 0);
3672 #ifdef TARGET_NR_socketcall
3673 /* do_socketcall() must return target values and target errnos. */
3674 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3676 static const unsigned nargs
[] = { /* number of arguments per operation */
3677 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3678 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3679 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3680 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3681 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3682 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3683 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3684 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3685 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3686 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3687 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3688 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3689 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3690 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3691 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3692 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3693 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3694 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3695 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3696 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3698 abi_long a
[6]; /* max 6 args */
3701 /* check the range of the first argument num */
3702 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3703 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3704 return -TARGET_EINVAL
;
3706 /* ensure we have space for args */
3707 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3708 return -TARGET_EINVAL
;
3710 /* collect the arguments in a[] according to nargs[] */
3711 for (i
= 0; i
< nargs
[num
]; ++i
) {
3712 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3713 return -TARGET_EFAULT
;
3716 /* now when we have the args, invoke the appropriate underlying function */
3718 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3719 return do_socket(a
[0], a
[1], a
[2]);
3720 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3721 return do_bind(a
[0], a
[1], a
[2]);
3722 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3723 return do_connect(a
[0], a
[1], a
[2]);
3724 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3725 return get_errno(listen(a
[0], a
[1]));
3726 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3727 return do_accept4(a
[0], a
[1], a
[2], 0);
3728 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3729 return do_getsockname(a
[0], a
[1], a
[2]);
3730 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3731 return do_getpeername(a
[0], a
[1], a
[2]);
3732 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3733 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3734 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3735 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3736 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3737 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3738 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3739 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3740 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3741 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3742 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3743 return get_errno(shutdown(a
[0], a
[1]));
3744 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3745 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3746 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3747 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3748 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3749 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3750 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3751 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3752 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3753 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3754 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3755 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3756 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3757 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3759 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3760 return -TARGET_EINVAL
;
3765 #define N_SHM_REGIONS 32
3767 static struct shm_region
{
3771 } shm_regions
[N_SHM_REGIONS
];
3773 #ifndef TARGET_SEMID64_DS
3774 /* asm-generic version of this struct */
3775 struct target_semid64_ds
3777 struct target_ipc_perm sem_perm
;
3778 abi_ulong sem_otime
;
3779 #if TARGET_ABI_BITS == 32
3780 abi_ulong __unused1
;
3782 abi_ulong sem_ctime
;
3783 #if TARGET_ABI_BITS == 32
3784 abi_ulong __unused2
;
3786 abi_ulong sem_nsems
;
3787 abi_ulong __unused3
;
3788 abi_ulong __unused4
;
3792 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3793 abi_ulong target_addr
)
3795 struct target_ipc_perm
*target_ip
;
3796 struct target_semid64_ds
*target_sd
;
3798 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3799 return -TARGET_EFAULT
;
3800 target_ip
= &(target_sd
->sem_perm
);
3801 host_ip
->__key
= tswap32(target_ip
->__key
);
3802 host_ip
->uid
= tswap32(target_ip
->uid
);
3803 host_ip
->gid
= tswap32(target_ip
->gid
);
3804 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3805 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3806 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3807 host_ip
->mode
= tswap32(target_ip
->mode
);
3809 host_ip
->mode
= tswap16(target_ip
->mode
);
3811 #if defined(TARGET_PPC)
3812 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3814 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3816 unlock_user_struct(target_sd
, target_addr
, 0);
3820 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3821 struct ipc_perm
*host_ip
)
3823 struct target_ipc_perm
*target_ip
;
3824 struct target_semid64_ds
*target_sd
;
3826 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3827 return -TARGET_EFAULT
;
3828 target_ip
= &(target_sd
->sem_perm
);
3829 target_ip
->__key
= tswap32(host_ip
->__key
);
3830 target_ip
->uid
= tswap32(host_ip
->uid
);
3831 target_ip
->gid
= tswap32(host_ip
->gid
);
3832 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3833 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3834 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3835 target_ip
->mode
= tswap32(host_ip
->mode
);
3837 target_ip
->mode
= tswap16(host_ip
->mode
);
3839 #if defined(TARGET_PPC)
3840 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3842 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3844 unlock_user_struct(target_sd
, target_addr
, 1);
3848 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3849 abi_ulong target_addr
)
3851 struct target_semid64_ds
*target_sd
;
3853 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3854 return -TARGET_EFAULT
;
3855 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3856 return -TARGET_EFAULT
;
3857 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3858 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3859 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3860 unlock_user_struct(target_sd
, target_addr
, 0);
3864 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3865 struct semid_ds
*host_sd
)
3867 struct target_semid64_ds
*target_sd
;
3869 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3870 return -TARGET_EFAULT
;
3871 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3872 return -TARGET_EFAULT
;
3873 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3874 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3875 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3876 unlock_user_struct(target_sd
, target_addr
, 1);
3880 struct target_seminfo
{
3893 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3894 struct seminfo
*host_seminfo
)
3896 struct target_seminfo
*target_seminfo
;
3897 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3898 return -TARGET_EFAULT
;
3899 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3900 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3901 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3902 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3903 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3904 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3905 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3906 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3907 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3908 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3909 unlock_user_struct(target_seminfo
, target_addr
, 1);
3915 struct semid_ds
*buf
;
3916 unsigned short *array
;
3917 struct seminfo
*__buf
;
3920 union target_semun
{
3927 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3928 abi_ulong target_addr
)
3931 unsigned short *array
;
3933 struct semid_ds semid_ds
;
3936 semun
.buf
= &semid_ds
;
3938 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3940 return get_errno(ret
);
3942 nsems
= semid_ds
.sem_nsems
;
3944 *host_array
= g_try_new(unsigned short, nsems
);
3946 return -TARGET_ENOMEM
;
3948 array
= lock_user(VERIFY_READ
, target_addr
,
3949 nsems
*sizeof(unsigned short), 1);
3951 g_free(*host_array
);
3952 return -TARGET_EFAULT
;
3955 for(i
=0; i
<nsems
; i
++) {
3956 __get_user((*host_array
)[i
], &array
[i
]);
3958 unlock_user(array
, target_addr
, 0);
3963 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3964 unsigned short **host_array
)
3967 unsigned short *array
;
3969 struct semid_ds semid_ds
;
3972 semun
.buf
= &semid_ds
;
3974 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3976 return get_errno(ret
);
3978 nsems
= semid_ds
.sem_nsems
;
3980 array
= lock_user(VERIFY_WRITE
, target_addr
,
3981 nsems
*sizeof(unsigned short), 0);
3983 return -TARGET_EFAULT
;
3985 for(i
=0; i
<nsems
; i
++) {
3986 __put_user((*host_array
)[i
], &array
[i
]);
3988 g_free(*host_array
);
3989 unlock_user(array
, target_addr
, 1);
3994 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3995 abi_ulong target_arg
)
3997 union target_semun target_su
= { .buf
= target_arg
};
3999 struct semid_ds dsarg
;
4000 unsigned short *array
= NULL
;
4001 struct seminfo seminfo
;
4002 abi_long ret
= -TARGET_EINVAL
;
4009 /* In 64 bit cross-endian situations, we will erroneously pick up
4010 * the wrong half of the union for the "val" element. To rectify
4011 * this, the entire 8-byte structure is byteswapped, followed by
4012 * a swap of the 4 byte val field. In other cases, the data is
4013 * already in proper host byte order. */
4014 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4015 target_su
.buf
= tswapal(target_su
.buf
);
4016 arg
.val
= tswap32(target_su
.val
);
4018 arg
.val
= target_su
.val
;
4020 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4024 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4028 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4029 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4036 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4040 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4041 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4047 arg
.__buf
= &seminfo
;
4048 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4049 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4057 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4064 struct target_sembuf
{
4065 unsigned short sem_num
;
4070 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4071 abi_ulong target_addr
,
4074 struct target_sembuf
*target_sembuf
;
4077 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4078 nsops
*sizeof(struct target_sembuf
), 1);
4080 return -TARGET_EFAULT
;
4082 for(i
=0; i
<nsops
; i
++) {
4083 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4084 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4085 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4088 unlock_user(target_sembuf
, target_addr
, 0);
4093 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4094 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4097 * This macro is required to handle the s390 variants, which passes the
4098 * arguments in a different order than default.
4101 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4102 (__nsops), (__timeout), (__sops)
4104 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4105 (__nsops), 0, (__sops), (__timeout)
4108 static inline abi_long
do_semtimedop(int semid
,
4111 abi_long timeout
, bool time64
)
4113 struct sembuf
*sops
;
4114 struct timespec ts
, *pts
= NULL
;
4120 if (target_to_host_timespec64(pts
, timeout
)) {
4121 return -TARGET_EFAULT
;
4124 if (target_to_host_timespec(pts
, timeout
)) {
4125 return -TARGET_EFAULT
;
4130 if (nsops
> TARGET_SEMOPM
) {
4131 return -TARGET_E2BIG
;
4134 sops
= g_new(struct sembuf
, nsops
);
4136 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4138 return -TARGET_EFAULT
;
4141 ret
= -TARGET_ENOSYS
;
4142 #ifdef __NR_semtimedop
4143 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4146 if (ret
== -TARGET_ENOSYS
) {
4147 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4148 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4156 struct target_msqid_ds
4158 struct target_ipc_perm msg_perm
;
4159 abi_ulong msg_stime
;
4160 #if TARGET_ABI_BITS == 32
4161 abi_ulong __unused1
;
4163 abi_ulong msg_rtime
;
4164 #if TARGET_ABI_BITS == 32
4165 abi_ulong __unused2
;
4167 abi_ulong msg_ctime
;
4168 #if TARGET_ABI_BITS == 32
4169 abi_ulong __unused3
;
4171 abi_ulong __msg_cbytes
;
4173 abi_ulong msg_qbytes
;
4174 abi_ulong msg_lspid
;
4175 abi_ulong msg_lrpid
;
4176 abi_ulong __unused4
;
4177 abi_ulong __unused5
;
4180 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4181 abi_ulong target_addr
)
4183 struct target_msqid_ds
*target_md
;
4185 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4186 return -TARGET_EFAULT
;
4187 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4188 return -TARGET_EFAULT
;
4189 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4190 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4191 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4192 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4193 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4194 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4195 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4196 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4197 unlock_user_struct(target_md
, target_addr
, 0);
4201 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4202 struct msqid_ds
*host_md
)
4204 struct target_msqid_ds
*target_md
;
4206 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4207 return -TARGET_EFAULT
;
4208 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4209 return -TARGET_EFAULT
;
4210 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4211 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4212 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4213 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4214 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4215 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4216 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4217 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4218 unlock_user_struct(target_md
, target_addr
, 1);
4222 struct target_msginfo
{
4230 unsigned short int msgseg
;
4233 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4234 struct msginfo
*host_msginfo
)
4236 struct target_msginfo
*target_msginfo
;
4237 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4238 return -TARGET_EFAULT
;
4239 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4240 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4241 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4242 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4243 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4244 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4245 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4246 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4247 unlock_user_struct(target_msginfo
, target_addr
, 1);
4251 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4253 struct msqid_ds dsarg
;
4254 struct msginfo msginfo
;
4255 abi_long ret
= -TARGET_EINVAL
;
4263 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4264 return -TARGET_EFAULT
;
4265 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4266 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4267 return -TARGET_EFAULT
;
4270 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4274 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4275 if (host_to_target_msginfo(ptr
, &msginfo
))
4276 return -TARGET_EFAULT
;
4283 struct target_msgbuf
{
4288 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4289 ssize_t msgsz
, int msgflg
)
4291 struct target_msgbuf
*target_mb
;
4292 struct msgbuf
*host_mb
;
4296 return -TARGET_EINVAL
;
4299 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4300 return -TARGET_EFAULT
;
4301 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4303 unlock_user_struct(target_mb
, msgp
, 0);
4304 return -TARGET_ENOMEM
;
4306 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4307 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4308 ret
= -TARGET_ENOSYS
;
4310 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4313 if (ret
== -TARGET_ENOSYS
) {
4315 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4318 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4324 unlock_user_struct(target_mb
, msgp
, 0);
4330 #if defined(__sparc__)
4331 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4332 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4333 #elif defined(__s390x__)
4334 /* The s390 sys_ipc variant has only five parameters. */
4335 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4336 ((long int[]){(long int)__msgp, __msgtyp})
4338 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4339 ((long int[]){(long int)__msgp, __msgtyp}), 0
4343 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4344 ssize_t msgsz
, abi_long msgtyp
,
4347 struct target_msgbuf
*target_mb
;
4349 struct msgbuf
*host_mb
;
4353 return -TARGET_EINVAL
;
4356 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4357 return -TARGET_EFAULT
;
4359 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4361 ret
= -TARGET_ENOMEM
;
4364 ret
= -TARGET_ENOSYS
;
4366 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4369 if (ret
== -TARGET_ENOSYS
) {
4370 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4371 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4376 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4377 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4378 if (!target_mtext
) {
4379 ret
= -TARGET_EFAULT
;
4382 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4383 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4386 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4390 unlock_user_struct(target_mb
, msgp
, 1);
4395 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4396 abi_ulong target_addr
)
4398 struct target_shmid_ds
*target_sd
;
4400 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4401 return -TARGET_EFAULT
;
4402 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4403 return -TARGET_EFAULT
;
4404 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4405 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4406 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4407 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4408 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4409 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4410 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4411 unlock_user_struct(target_sd
, target_addr
, 0);
4415 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4416 struct shmid_ds
*host_sd
)
4418 struct target_shmid_ds
*target_sd
;
4420 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4421 return -TARGET_EFAULT
;
4422 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4423 return -TARGET_EFAULT
;
4424 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4425 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4426 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4427 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4428 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4429 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4430 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4431 unlock_user_struct(target_sd
, target_addr
, 1);
4435 struct target_shminfo
{
4443 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4444 struct shminfo
*host_shminfo
)
4446 struct target_shminfo
*target_shminfo
;
4447 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4448 return -TARGET_EFAULT
;
4449 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4450 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4451 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4452 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4453 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4454 unlock_user_struct(target_shminfo
, target_addr
, 1);
4458 struct target_shm_info
{
4463 abi_ulong swap_attempts
;
4464 abi_ulong swap_successes
;
4467 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4468 struct shm_info
*host_shm_info
)
4470 struct target_shm_info
*target_shm_info
;
4471 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4472 return -TARGET_EFAULT
;
4473 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4474 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4475 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4476 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4477 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4478 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4479 unlock_user_struct(target_shm_info
, target_addr
, 1);
4483 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4485 struct shmid_ds dsarg
;
4486 struct shminfo shminfo
;
4487 struct shm_info shm_info
;
4488 abi_long ret
= -TARGET_EINVAL
;
4496 if (target_to_host_shmid_ds(&dsarg
, buf
))
4497 return -TARGET_EFAULT
;
4498 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4499 if (host_to_target_shmid_ds(buf
, &dsarg
))
4500 return -TARGET_EFAULT
;
4503 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4504 if (host_to_target_shminfo(buf
, &shminfo
))
4505 return -TARGET_EFAULT
;
4508 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4509 if (host_to_target_shm_info(buf
, &shm_info
))
4510 return -TARGET_EFAULT
;
4515 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4522 #ifndef TARGET_FORCE_SHMLBA
4523 /* For most architectures, SHMLBA is the same as the page size;
4524 * some architectures have larger values, in which case they should
4525 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4526 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4527 * and defining its own value for SHMLBA.
4529 * The kernel also permits SHMLBA to be set by the architecture to a
4530 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4531 * this means that addresses are rounded to the large size if
4532 * SHM_RND is set but addresses not aligned to that size are not rejected
4533 * as long as they are at least page-aligned. Since the only architecture
4534 * which uses this is ia64 this code doesn't provide for that oddity.
4536 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4538 return TARGET_PAGE_SIZE
;
4542 static abi_ulong
do_shmat(CPUArchState
*cpu_env
, int shmid
,
4543 abi_ulong shmaddr
, int shmflg
)
4545 CPUState
*cpu
= env_cpu(cpu_env
);
4548 struct shmid_ds shm_info
;
4552 /* shmat pointers are always untagged */
4554 /* find out the length of the shared memory segment */
4555 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4556 if (is_error(ret
)) {
4557 /* can't get length, bail out */
4561 shmlba
= target_shmlba(cpu_env
);
4563 if (shmaddr
& (shmlba
- 1)) {
4564 if (shmflg
& SHM_RND
) {
4565 shmaddr
&= ~(shmlba
- 1);
4567 return -TARGET_EINVAL
;
4570 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4571 return -TARGET_EINVAL
;
4577 * We're mapping shared memory, so ensure we generate code for parallel
4578 * execution and flush old translations. This will work up to the level
4579 * supported by the host -- anything that requires EXCP_ATOMIC will not
4580 * be atomic with respect to an external process.
4582 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4583 cpu
->tcg_cflags
|= CF_PARALLEL
;
4588 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4590 abi_ulong mmap_start
;
4592 /* In order to use the host shmat, we need to honor host SHMLBA. */
4593 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4595 if (mmap_start
== -1) {
4597 host_raddr
= (void *)-1;
4599 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4600 shmflg
| SHM_REMAP
);
4603 if (host_raddr
== (void *)-1) {
4605 return get_errno((intptr_t)host_raddr
);
4607 raddr
= h2g((uintptr_t)host_raddr
);
4609 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
- 1,
4610 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4611 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4613 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4614 if (!shm_regions
[i
].in_use
) {
4615 shm_regions
[i
].in_use
= true;
4616 shm_regions
[i
].start
= raddr
;
4617 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4626 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4631 /* shmdt pointers are always untagged */
4635 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4636 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4637 shm_regions
[i
].in_use
= false;
4638 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
- 1, 0);
4642 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4649 #ifdef TARGET_NR_ipc
4650 /* ??? This only works with linear mappings. */
4651 /* do_ipc() must return target values and target errnos. */
4652 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4653 unsigned int call
, abi_long first
,
4654 abi_long second
, abi_long third
,
4655 abi_long ptr
, abi_long fifth
)
4660 version
= call
>> 16;
4665 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4667 case IPCOP_semtimedop
:
4669 * The s390 sys_ipc variant has only five parameters instead of six
4670 * (as for default variant) and the only difference is the handling of
4671 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4672 * to a struct timespec where the generic variant uses fifth parameter.
4674 #if defined(TARGET_S390X)
4675 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4677 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4682 ret
= get_errno(semget(first
, second
, third
));
4685 case IPCOP_semctl
: {
4686 /* The semun argument to semctl is passed by value, so dereference the
4689 get_user_ual(atptr
, ptr
);
4690 ret
= do_semctl(first
, second
, third
, atptr
);
4695 ret
= get_errno(msgget(first
, second
));
4699 ret
= do_msgsnd(first
, ptr
, second
, third
);
4703 ret
= do_msgctl(first
, second
, ptr
);
4710 struct target_ipc_kludge
{
4715 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4716 ret
= -TARGET_EFAULT
;
4720 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4722 unlock_user_struct(tmp
, ptr
, 0);
4726 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4735 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4736 if (is_error(raddr
))
4737 return get_errno(raddr
);
4738 if (put_user_ual(raddr
, third
))
4739 return -TARGET_EFAULT
;
4743 ret
= -TARGET_EINVAL
;
4748 ret
= do_shmdt(ptr
);
4752 /* IPC_* flag values are the same on all linux platforms */
4753 ret
= get_errno(shmget(first
, second
, third
));
4756 /* IPC_* and SHM_* command values are the same on all linux platforms */
4758 ret
= do_shmctl(first
, second
, ptr
);
4761 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4763 ret
= -TARGET_ENOSYS
;
4770 /* kernel structure types definitions */
4772 #define STRUCT(name, ...) STRUCT_ ## name,
4773 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4775 #include "syscall_types.h"
4779 #undef STRUCT_SPECIAL
4781 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4782 #define STRUCT_SPECIAL(name)
4783 #include "syscall_types.h"
4785 #undef STRUCT_SPECIAL
4787 #define MAX_STRUCT_SIZE 4096
4789 #ifdef CONFIG_FIEMAP
4790 /* So fiemap access checks don't overflow on 32 bit systems.
4791 * This is very slightly smaller than the limit imposed by
4792 * the underlying kernel.
4794 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4795 / sizeof(struct fiemap_extent))
4797 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4798 int fd
, int cmd
, abi_long arg
)
4800 /* The parameter for this ioctl is a struct fiemap followed
4801 * by an array of struct fiemap_extent whose size is set
4802 * in fiemap->fm_extent_count. The array is filled in by the
4805 int target_size_in
, target_size_out
;
4807 const argtype
*arg_type
= ie
->arg_type
;
4808 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4811 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4815 assert(arg_type
[0] == TYPE_PTR
);
4816 assert(ie
->access
== IOC_RW
);
4818 target_size_in
= thunk_type_size(arg_type
, 0);
4819 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4821 return -TARGET_EFAULT
;
4823 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4824 unlock_user(argptr
, arg
, 0);
4825 fm
= (struct fiemap
*)buf_temp
;
4826 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4827 return -TARGET_EINVAL
;
4830 outbufsz
= sizeof (*fm
) +
4831 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4833 if (outbufsz
> MAX_STRUCT_SIZE
) {
4834 /* We can't fit all the extents into the fixed size buffer.
4835 * Allocate one that is large enough and use it instead.
4837 fm
= g_try_malloc(outbufsz
);
4839 return -TARGET_ENOMEM
;
4841 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4844 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4845 if (!is_error(ret
)) {
4846 target_size_out
= target_size_in
;
4847 /* An extent_count of 0 means we were only counting the extents
4848 * so there are no structs to copy
4850 if (fm
->fm_extent_count
!= 0) {
4851 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4853 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4855 ret
= -TARGET_EFAULT
;
4857 /* Convert the struct fiemap */
4858 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4859 if (fm
->fm_extent_count
!= 0) {
4860 p
= argptr
+ target_size_in
;
4861 /* ...and then all the struct fiemap_extents */
4862 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4863 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4868 unlock_user(argptr
, arg
, target_size_out
);
4878 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4879 int fd
, int cmd
, abi_long arg
)
4881 const argtype
*arg_type
= ie
->arg_type
;
4885 struct ifconf
*host_ifconf
;
4887 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4888 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4889 int target_ifreq_size
;
4894 abi_long target_ifc_buf
;
4898 assert(arg_type
[0] == TYPE_PTR
);
4899 assert(ie
->access
== IOC_RW
);
4902 target_size
= thunk_type_size(arg_type
, 0);
4904 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4906 return -TARGET_EFAULT
;
4907 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4908 unlock_user(argptr
, arg
, 0);
4910 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4911 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4912 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4914 if (target_ifc_buf
!= 0) {
4915 target_ifc_len
= host_ifconf
->ifc_len
;
4916 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4917 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4919 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4920 if (outbufsz
> MAX_STRUCT_SIZE
) {
4922 * We can't fit all the extents into the fixed size buffer.
4923 * Allocate one that is large enough and use it instead.
4925 host_ifconf
= g_try_malloc(outbufsz
);
4927 return -TARGET_ENOMEM
;
4929 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4932 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4934 host_ifconf
->ifc_len
= host_ifc_len
;
4936 host_ifc_buf
= NULL
;
4938 host_ifconf
->ifc_buf
= host_ifc_buf
;
4940 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4941 if (!is_error(ret
)) {
4942 /* convert host ifc_len to target ifc_len */
4944 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4945 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4946 host_ifconf
->ifc_len
= target_ifc_len
;
4948 /* restore target ifc_buf */
4950 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4952 /* copy struct ifconf to target user */
4954 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4956 return -TARGET_EFAULT
;
4957 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4958 unlock_user(argptr
, arg
, target_size
);
4960 if (target_ifc_buf
!= 0) {
4961 /* copy ifreq[] to target user */
4962 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4963 for (i
= 0; i
< nb_ifreq
; i
++) {
4964 thunk_convert(argptr
+ i
* target_ifreq_size
,
4965 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4966 ifreq_arg_type
, THUNK_TARGET
);
4968 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4973 g_free(host_ifconf
);
4979 #if defined(CONFIG_USBFS)
4980 #if HOST_LONG_BITS > 64
4981 #error USBDEVFS thunks do not support >64 bit hosts yet.
4984 uint64_t target_urb_adr
;
4985 uint64_t target_buf_adr
;
4986 char *target_buf_ptr
;
4987 struct usbdevfs_urb host_urb
;
4990 static GHashTable
*usbdevfs_urb_hashtable(void)
4992 static GHashTable
*urb_hashtable
;
4994 if (!urb_hashtable
) {
4995 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4997 return urb_hashtable
;
5000 static void urb_hashtable_insert(struct live_urb
*urb
)
5002 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5003 g_hash_table_insert(urb_hashtable
, urb
, urb
);
5006 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
5008 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5009 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
5012 static void urb_hashtable_remove(struct live_urb
*urb
)
5014 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
5015 g_hash_table_remove(urb_hashtable
, urb
);
5019 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5020 int fd
, int cmd
, abi_long arg
)
5022 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5023 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5024 struct live_urb
*lurb
;
5028 uintptr_t target_urb_adr
;
5031 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5033 memset(buf_temp
, 0, sizeof(uint64_t));
5034 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5035 if (is_error(ret
)) {
5039 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5040 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5041 if (!lurb
->target_urb_adr
) {
5042 return -TARGET_EFAULT
;
5044 urb_hashtable_remove(lurb
);
5045 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5046 lurb
->host_urb
.buffer_length
);
5047 lurb
->target_buf_ptr
= NULL
;
5049 /* restore the guest buffer pointer */
5050 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5052 /* update the guest urb struct */
5053 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5056 return -TARGET_EFAULT
;
5058 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5059 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5061 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5062 /* write back the urb handle */
5063 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5066 return -TARGET_EFAULT
;
5069 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5070 target_urb_adr
= lurb
->target_urb_adr
;
5071 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5072 unlock_user(argptr
, arg
, target_size
);
5079 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5080 uint8_t *buf_temp
__attribute__((unused
)),
5081 int fd
, int cmd
, abi_long arg
)
5083 struct live_urb
*lurb
;
5085 /* map target address back to host URB with metadata. */
5086 lurb
= urb_hashtable_lookup(arg
);
5088 return -TARGET_EFAULT
;
5090 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5094 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5095 int fd
, int cmd
, abi_long arg
)
5097 const argtype
*arg_type
= ie
->arg_type
;
5102 struct live_urb
*lurb
;
5105 * each submitted URB needs to map to a unique ID for the
5106 * kernel, and that unique ID needs to be a pointer to
5107 * host memory. hence, we need to malloc for each URB.
5108 * isochronous transfers have a variable length struct.
5111 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5113 /* construct host copy of urb and metadata */
5114 lurb
= g_try_new0(struct live_urb
, 1);
5116 return -TARGET_ENOMEM
;
5119 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5122 return -TARGET_EFAULT
;
5124 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5125 unlock_user(argptr
, arg
, 0);
5127 lurb
->target_urb_adr
= arg
;
5128 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5130 /* buffer space used depends on endpoint type so lock the entire buffer */
5131 /* control type urbs should check the buffer contents for true direction */
5132 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5133 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5134 lurb
->host_urb
.buffer_length
, 1);
5135 if (lurb
->target_buf_ptr
== NULL
) {
5137 return -TARGET_EFAULT
;
5140 /* update buffer pointer in host copy */
5141 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5143 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5144 if (is_error(ret
)) {
5145 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5148 urb_hashtable_insert(lurb
);
5153 #endif /* CONFIG_USBFS */
5155 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5156 int cmd
, abi_long arg
)
5159 struct dm_ioctl
*host_dm
;
5160 abi_long guest_data
;
5161 uint32_t guest_data_size
;
5163 const argtype
*arg_type
= ie
->arg_type
;
5165 void *big_buf
= NULL
;
5169 target_size
= thunk_type_size(arg_type
, 0);
5170 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5172 ret
= -TARGET_EFAULT
;
5175 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5176 unlock_user(argptr
, arg
, 0);
5178 /* buf_temp is too small, so fetch things into a bigger buffer */
5179 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5180 memcpy(big_buf
, buf_temp
, target_size
);
5184 guest_data
= arg
+ host_dm
->data_start
;
5185 if ((guest_data
- arg
) < 0) {
5186 ret
= -TARGET_EINVAL
;
5189 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5190 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5192 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5194 ret
= -TARGET_EFAULT
;
5198 switch (ie
->host_cmd
) {
5200 case DM_LIST_DEVICES
:
5203 case DM_DEV_SUSPEND
:
5206 case DM_TABLE_STATUS
:
5207 case DM_TABLE_CLEAR
:
5209 case DM_LIST_VERSIONS
:
5213 case DM_DEV_SET_GEOMETRY
:
5214 /* data contains only strings */
5215 memcpy(host_data
, argptr
, guest_data_size
);
5218 memcpy(host_data
, argptr
, guest_data_size
);
5219 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5223 void *gspec
= argptr
;
5224 void *cur_data
= host_data
;
5225 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5226 int spec_size
= thunk_type_size(arg_type
, 0);
5229 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5230 struct dm_target_spec
*spec
= cur_data
;
5234 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5235 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5237 spec
->next
= sizeof(*spec
) + slen
;
5238 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5240 cur_data
+= spec
->next
;
5245 ret
= -TARGET_EINVAL
;
5246 unlock_user(argptr
, guest_data
, 0);
5249 unlock_user(argptr
, guest_data
, 0);
5251 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5252 if (!is_error(ret
)) {
5253 guest_data
= arg
+ host_dm
->data_start
;
5254 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5255 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5256 switch (ie
->host_cmd
) {
5261 case DM_DEV_SUSPEND
:
5264 case DM_TABLE_CLEAR
:
5266 case DM_DEV_SET_GEOMETRY
:
5267 /* no return data */
5269 case DM_LIST_DEVICES
:
5271 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5272 uint32_t remaining_data
= guest_data_size
;
5273 void *cur_data
= argptr
;
5274 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5275 int nl_size
= 12; /* can't use thunk_size due to alignment */
5278 uint32_t next
= nl
->next
;
5280 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5282 if (remaining_data
< nl
->next
) {
5283 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5286 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5287 strcpy(cur_data
+ nl_size
, nl
->name
);
5288 cur_data
+= nl
->next
;
5289 remaining_data
-= nl
->next
;
5293 nl
= (void*)nl
+ next
;
5298 case DM_TABLE_STATUS
:
5300 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5301 void *cur_data
= argptr
;
5302 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5303 int spec_size
= thunk_type_size(arg_type
, 0);
5306 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5307 uint32_t next
= spec
->next
;
5308 int slen
= strlen((char*)&spec
[1]) + 1;
5309 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5310 if (guest_data_size
< spec
->next
) {
5311 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5314 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5315 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5316 cur_data
= argptr
+ spec
->next
;
5317 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5323 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5324 int count
= *(uint32_t*)hdata
;
5325 uint64_t *hdev
= hdata
+ 8;
5326 uint64_t *gdev
= argptr
+ 8;
5329 *(uint32_t*)argptr
= tswap32(count
);
5330 for (i
= 0; i
< count
; i
++) {
5331 *gdev
= tswap64(*hdev
);
5337 case DM_LIST_VERSIONS
:
5339 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5340 uint32_t remaining_data
= guest_data_size
;
5341 void *cur_data
= argptr
;
5342 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5343 int vers_size
= thunk_type_size(arg_type
, 0);
5346 uint32_t next
= vers
->next
;
5348 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5350 if (remaining_data
< vers
->next
) {
5351 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5354 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5355 strcpy(cur_data
+ vers_size
, vers
->name
);
5356 cur_data
+= vers
->next
;
5357 remaining_data
-= vers
->next
;
5361 vers
= (void*)vers
+ next
;
5366 unlock_user(argptr
, guest_data
, 0);
5367 ret
= -TARGET_EINVAL
;
5370 unlock_user(argptr
, guest_data
, guest_data_size
);
5372 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5374 ret
= -TARGET_EFAULT
;
5377 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5378 unlock_user(argptr
, arg
, target_size
);
5385 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5386 int cmd
, abi_long arg
)
5390 const argtype
*arg_type
= ie
->arg_type
;
5391 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5394 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5395 struct blkpg_partition host_part
;
5397 /* Read and convert blkpg */
5399 target_size
= thunk_type_size(arg_type
, 0);
5400 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5402 ret
= -TARGET_EFAULT
;
5405 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5406 unlock_user(argptr
, arg
, 0);
5408 switch (host_blkpg
->op
) {
5409 case BLKPG_ADD_PARTITION
:
5410 case BLKPG_DEL_PARTITION
:
5411 /* payload is struct blkpg_partition */
5414 /* Unknown opcode */
5415 ret
= -TARGET_EINVAL
;
5419 /* Read and convert blkpg->data */
5420 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5421 target_size
= thunk_type_size(part_arg_type
, 0);
5422 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5424 ret
= -TARGET_EFAULT
;
5427 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5428 unlock_user(argptr
, arg
, 0);
5430 /* Swizzle the data pointer to our local copy and call! */
5431 host_blkpg
->data
= &host_part
;
5432 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5438 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5439 int fd
, int cmd
, abi_long arg
)
5441 const argtype
*arg_type
= ie
->arg_type
;
5442 const StructEntry
*se
;
5443 const argtype
*field_types
;
5444 const int *dst_offsets
, *src_offsets
;
5447 abi_ulong
*target_rt_dev_ptr
= NULL
;
5448 unsigned long *host_rt_dev_ptr
= NULL
;
5452 assert(ie
->access
== IOC_W
);
5453 assert(*arg_type
== TYPE_PTR
);
5455 assert(*arg_type
== TYPE_STRUCT
);
5456 target_size
= thunk_type_size(arg_type
, 0);
5457 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5459 return -TARGET_EFAULT
;
5462 assert(*arg_type
== (int)STRUCT_rtentry
);
5463 se
= struct_entries
+ *arg_type
++;
5464 assert(se
->convert
[0] == NULL
);
5465 /* convert struct here to be able to catch rt_dev string */
5466 field_types
= se
->field_types
;
5467 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5468 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5469 for (i
= 0; i
< se
->nb_fields
; i
++) {
5470 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5471 assert(*field_types
== TYPE_PTRVOID
);
5472 target_rt_dev_ptr
= argptr
+ src_offsets
[i
];
5473 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5474 if (*target_rt_dev_ptr
!= 0) {
5475 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5476 tswapal(*target_rt_dev_ptr
));
5477 if (!*host_rt_dev_ptr
) {
5478 unlock_user(argptr
, arg
, 0);
5479 return -TARGET_EFAULT
;
5482 *host_rt_dev_ptr
= 0;
5487 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5488 argptr
+ src_offsets
[i
],
5489 field_types
, THUNK_HOST
);
5491 unlock_user(argptr
, arg
, 0);
5493 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5495 assert(host_rt_dev_ptr
!= NULL
);
5496 assert(target_rt_dev_ptr
!= NULL
);
5497 if (*host_rt_dev_ptr
!= 0) {
5498 unlock_user((void *)*host_rt_dev_ptr
,
5499 *target_rt_dev_ptr
, 0);
5504 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5505 int fd
, int cmd
, abi_long arg
)
5507 int sig
= target_to_host_signal(arg
);
5508 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5511 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5512 int fd
, int cmd
, abi_long arg
)
5517 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5518 if (is_error(ret
)) {
5522 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5523 if (copy_to_user_timeval(arg
, &tv
)) {
5524 return -TARGET_EFAULT
;
5527 if (copy_to_user_timeval64(arg
, &tv
)) {
5528 return -TARGET_EFAULT
;
5535 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5536 int fd
, int cmd
, abi_long arg
)
5541 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5542 if (is_error(ret
)) {
5546 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5547 if (host_to_target_timespec(arg
, &ts
)) {
5548 return -TARGET_EFAULT
;
5551 if (host_to_target_timespec64(arg
, &ts
)) {
5552 return -TARGET_EFAULT
;
5560 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5561 int fd
, int cmd
, abi_long arg
)
5563 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5564 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5570 static void unlock_drm_version(struct drm_version
*host_ver
,
5571 struct target_drm_version
*target_ver
,
5574 unlock_user(host_ver
->name
, target_ver
->name
,
5575 copy
? host_ver
->name_len
: 0);
5576 unlock_user(host_ver
->date
, target_ver
->date
,
5577 copy
? host_ver
->date_len
: 0);
5578 unlock_user(host_ver
->desc
, target_ver
->desc
,
5579 copy
? host_ver
->desc_len
: 0);
5582 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5583 struct target_drm_version
*target_ver
)
5585 memset(host_ver
, 0, sizeof(*host_ver
));
5587 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5588 if (host_ver
->name_len
) {
5589 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5590 target_ver
->name_len
, 0);
5591 if (!host_ver
->name
) {
5596 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5597 if (host_ver
->date_len
) {
5598 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5599 target_ver
->date_len
, 0);
5600 if (!host_ver
->date
) {
5605 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5606 if (host_ver
->desc_len
) {
5607 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5608 target_ver
->desc_len
, 0);
5609 if (!host_ver
->desc
) {
5616 unlock_drm_version(host_ver
, target_ver
, false);
5620 static inline void host_to_target_drmversion(
5621 struct target_drm_version
*target_ver
,
5622 struct drm_version
*host_ver
)
5624 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5625 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5626 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5627 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5628 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5629 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5630 unlock_drm_version(host_ver
, target_ver
, true);
5633 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5634 int fd
, int cmd
, abi_long arg
)
5636 struct drm_version
*ver
;
5637 struct target_drm_version
*target_ver
;
5640 switch (ie
->host_cmd
) {
5641 case DRM_IOCTL_VERSION
:
5642 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5643 return -TARGET_EFAULT
;
5645 ver
= (struct drm_version
*)buf_temp
;
5646 ret
= target_to_host_drmversion(ver
, target_ver
);
5647 if (!is_error(ret
)) {
5648 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5649 if (is_error(ret
)) {
5650 unlock_drm_version(ver
, target_ver
, false);
5652 host_to_target_drmversion(target_ver
, ver
);
5655 unlock_user_struct(target_ver
, arg
, 0);
5658 return -TARGET_ENOSYS
;
5661 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5662 struct drm_i915_getparam
*gparam
,
5663 int fd
, abi_long arg
)
5667 struct target_drm_i915_getparam
*target_gparam
;
5669 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5670 return -TARGET_EFAULT
;
5673 __get_user(gparam
->param
, &target_gparam
->param
);
5674 gparam
->value
= &value
;
5675 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5676 put_user_s32(value
, target_gparam
->value
);
5678 unlock_user_struct(target_gparam
, arg
, 0);
5682 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5683 int fd
, int cmd
, abi_long arg
)
5685 switch (ie
->host_cmd
) {
5686 case DRM_IOCTL_I915_GETPARAM
:
5687 return do_ioctl_drm_i915_getparam(ie
,
5688 (struct drm_i915_getparam
*)buf_temp
,
5691 return -TARGET_ENOSYS
;
5697 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5698 int fd
, int cmd
, abi_long arg
)
5700 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5701 struct tun_filter
*target_filter
;
5704 assert(ie
->access
== IOC_W
);
5706 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5707 if (!target_filter
) {
5708 return -TARGET_EFAULT
;
5710 filter
->flags
= tswap16(target_filter
->flags
);
5711 filter
->count
= tswap16(target_filter
->count
);
5712 unlock_user(target_filter
, arg
, 0);
5714 if (filter
->count
) {
5715 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5717 return -TARGET_EFAULT
;
5720 target_addr
= lock_user(VERIFY_READ
,
5721 arg
+ offsetof(struct tun_filter
, addr
),
5722 filter
->count
* ETH_ALEN
, 1);
5724 return -TARGET_EFAULT
;
5726 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5727 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5730 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5733 IOCTLEntry ioctl_entries
[] = {
5734 #define IOCTL(cmd, access, ...) \
5735 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5736 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5737 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5738 #define IOCTL_IGNORE(cmd) \
5739 { TARGET_ ## cmd, 0, #cmd },
5744 /* ??? Implement proper locking for ioctls. */
5745 /* do_ioctl() Must return target values and target errnos. */
5746 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5748 const IOCTLEntry
*ie
;
5749 const argtype
*arg_type
;
5751 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5757 if (ie
->target_cmd
== 0) {
5759 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5760 return -TARGET_ENOTTY
;
5762 if (ie
->target_cmd
== cmd
)
5766 arg_type
= ie
->arg_type
;
5768 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5769 } else if (!ie
->host_cmd
) {
5770 /* Some architectures define BSD ioctls in their headers
5771 that are not implemented in Linux. */
5772 return -TARGET_ENOTTY
;
5775 switch(arg_type
[0]) {
5778 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5784 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5788 target_size
= thunk_type_size(arg_type
, 0);
5789 switch(ie
->access
) {
5791 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5792 if (!is_error(ret
)) {
5793 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5795 return -TARGET_EFAULT
;
5796 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5797 unlock_user(argptr
, arg
, target_size
);
5801 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5803 return -TARGET_EFAULT
;
5804 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5805 unlock_user(argptr
, arg
, 0);
5806 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5810 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5812 return -TARGET_EFAULT
;
5813 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5814 unlock_user(argptr
, arg
, 0);
5815 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5816 if (!is_error(ret
)) {
5817 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5819 return -TARGET_EFAULT
;
5820 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5821 unlock_user(argptr
, arg
, target_size
);
5827 qemu_log_mask(LOG_UNIMP
,
5828 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5829 (long)cmd
, arg_type
[0]);
5830 ret
= -TARGET_ENOTTY
;
5836 static const bitmask_transtbl iflag_tbl
[] = {
5837 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5838 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5839 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5840 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5841 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5842 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5843 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5844 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5845 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5846 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5847 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5848 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5849 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5850 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5851 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5855 static const bitmask_transtbl oflag_tbl
[] = {
5856 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5857 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5858 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5859 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5860 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5861 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5862 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5863 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5864 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5865 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5866 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5867 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5868 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5869 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5870 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5871 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5872 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5873 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5874 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5875 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5876 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5877 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5878 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5879 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5883 static const bitmask_transtbl cflag_tbl
[] = {
5884 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5885 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5886 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5887 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5888 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5889 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5890 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5891 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5892 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5893 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5894 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5895 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5896 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5897 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5898 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5899 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5900 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5901 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5902 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5903 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5904 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5905 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5906 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5907 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5908 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5909 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5910 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5911 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5912 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5913 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5914 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5918 static const bitmask_transtbl lflag_tbl
[] = {
5919 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5920 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5921 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5922 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5923 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5924 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5925 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5926 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5927 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5928 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5929 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5930 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5931 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5932 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5933 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5934 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5938 static void target_to_host_termios (void *dst
, const void *src
)
5940 struct host_termios
*host
= dst
;
5941 const struct target_termios
*target
= src
;
5944 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5946 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5948 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5950 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5951 host
->c_line
= target
->c_line
;
5953 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5954 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5955 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5956 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5957 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5958 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5959 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5960 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5961 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5962 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5963 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5964 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5965 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5966 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5967 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5968 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5969 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5970 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5973 static void host_to_target_termios (void *dst
, const void *src
)
5975 struct target_termios
*target
= dst
;
5976 const struct host_termios
*host
= src
;
5979 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5981 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5983 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5985 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5986 target
->c_line
= host
->c_line
;
5988 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5989 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5990 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5991 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5992 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5993 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5994 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5995 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5996 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5997 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5998 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5999 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6000 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6001 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6002 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6003 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6004 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6005 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6008 static const StructEntry struct_termios_def
= {
6009 .convert
= { host_to_target_termios
, target_to_host_termios
},
6010 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6011 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6012 .print
= print_termios
,
6015 /* If the host does not provide these bits, they may be safely discarded. */
6019 #ifndef MAP_UNINITIALIZED
6020 #define MAP_UNINITIALIZED 0
6023 static const bitmask_transtbl mmap_flags_tbl
[] = {
6024 { TARGET_MAP_TYPE
, TARGET_MAP_SHARED
, MAP_TYPE
, MAP_SHARED
},
6025 { TARGET_MAP_TYPE
, TARGET_MAP_PRIVATE
, MAP_TYPE
, MAP_PRIVATE
},
6026 { TARGET_MAP_TYPE
, TARGET_MAP_SHARED_VALIDATE
,
6027 MAP_TYPE
, MAP_SHARED_VALIDATE
},
6028 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6029 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6030 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6031 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6032 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6033 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6034 MAP_DENYWRITE
, MAP_DENYWRITE
},
6035 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6036 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6037 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6038 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6039 MAP_NORESERVE
, MAP_NORESERVE
},
6040 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6041 /* MAP_STACK had been ignored by the kernel for quite some time.
6042 Recognize it for the target insofar as we do not want to pass
6043 it through to the host. */
6044 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6045 { TARGET_MAP_SYNC
, TARGET_MAP_SYNC
, MAP_SYNC
, MAP_SYNC
},
6046 { TARGET_MAP_NONBLOCK
, TARGET_MAP_NONBLOCK
, MAP_NONBLOCK
, MAP_NONBLOCK
},
6047 { TARGET_MAP_POPULATE
, TARGET_MAP_POPULATE
, MAP_POPULATE
, MAP_POPULATE
},
6048 { TARGET_MAP_FIXED_NOREPLACE
, TARGET_MAP_FIXED_NOREPLACE
,
6049 MAP_FIXED_NOREPLACE
, MAP_FIXED_NOREPLACE
},
6050 { TARGET_MAP_UNINITIALIZED
, TARGET_MAP_UNINITIALIZED
,
6051 MAP_UNINITIALIZED
, MAP_UNINITIALIZED
},
6056 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6057 * TARGET_I386 is defined if TARGET_X86_64 is defined
6059 #if defined(TARGET_I386)
6061 /* NOTE: there is really one LDT for all the threads */
6062 static uint8_t *ldt_table
;
6064 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6071 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6072 if (size
> bytecount
)
6074 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6076 return -TARGET_EFAULT
;
6077 /* ??? Should this by byteswapped? */
6078 memcpy(p
, ldt_table
, size
);
6079 unlock_user(p
, ptr
, size
);
6083 /* XXX: add locking support */
6084 static abi_long
write_ldt(CPUX86State
*env
,
6085 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6087 struct target_modify_ldt_ldt_s ldt_info
;
6088 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6089 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6090 int seg_not_present
, useable
, lm
;
6091 uint32_t *lp
, entry_1
, entry_2
;
6093 if (bytecount
!= sizeof(ldt_info
))
6094 return -TARGET_EINVAL
;
6095 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6096 return -TARGET_EFAULT
;
6097 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6098 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6099 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6100 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6101 unlock_user_struct(target_ldt_info
, ptr
, 0);
6103 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6104 return -TARGET_EINVAL
;
6105 seg_32bit
= ldt_info
.flags
& 1;
6106 contents
= (ldt_info
.flags
>> 1) & 3;
6107 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6108 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6109 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6110 useable
= (ldt_info
.flags
>> 6) & 1;
6114 lm
= (ldt_info
.flags
>> 7) & 1;
6116 if (contents
== 3) {
6118 return -TARGET_EINVAL
;
6119 if (seg_not_present
== 0)
6120 return -TARGET_EINVAL
;
6122 /* allocate the LDT */
6124 env
->ldt
.base
= target_mmap(0,
6125 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6126 PROT_READ
|PROT_WRITE
,
6127 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6128 if (env
->ldt
.base
== -1)
6129 return -TARGET_ENOMEM
;
6130 memset(g2h_untagged(env
->ldt
.base
), 0,
6131 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6132 env
->ldt
.limit
= 0xffff;
6133 ldt_table
= g2h_untagged(env
->ldt
.base
);
6136 /* NOTE: same code as Linux kernel */
6137 /* Allow LDTs to be cleared by the user. */
6138 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6141 read_exec_only
== 1 &&
6143 limit_in_pages
== 0 &&
6144 seg_not_present
== 1 &&
6152 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6153 (ldt_info
.limit
& 0x0ffff);
6154 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6155 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6156 (ldt_info
.limit
& 0xf0000) |
6157 ((read_exec_only
^ 1) << 9) |
6159 ((seg_not_present
^ 1) << 15) |
6161 (limit_in_pages
<< 23) |
6165 entry_2
|= (useable
<< 20);
6167 /* Install the new entry ... */
6169 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6170 lp
[0] = tswap32(entry_1
);
6171 lp
[1] = tswap32(entry_2
);
6175 /* specific and weird i386 syscalls */
6176 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6177 unsigned long bytecount
)
6183 ret
= read_ldt(ptr
, bytecount
);
6186 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6189 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6192 ret
= -TARGET_ENOSYS
;
6198 #if defined(TARGET_ABI32)
6199 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6201 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6202 struct target_modify_ldt_ldt_s ldt_info
;
6203 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6204 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6205 int seg_not_present
, useable
, lm
;
6206 uint32_t *lp
, entry_1
, entry_2
;
6209 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6210 if (!target_ldt_info
)
6211 return -TARGET_EFAULT
;
6212 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6213 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6214 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6215 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6216 if (ldt_info
.entry_number
== -1) {
6217 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6218 if (gdt_table
[i
] == 0) {
6219 ldt_info
.entry_number
= i
;
6220 target_ldt_info
->entry_number
= tswap32(i
);
6225 unlock_user_struct(target_ldt_info
, ptr
, 1);
6227 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6228 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6229 return -TARGET_EINVAL
;
6230 seg_32bit
= ldt_info
.flags
& 1;
6231 contents
= (ldt_info
.flags
>> 1) & 3;
6232 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6233 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6234 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6235 useable
= (ldt_info
.flags
>> 6) & 1;
6239 lm
= (ldt_info
.flags
>> 7) & 1;
6242 if (contents
== 3) {
6243 if (seg_not_present
== 0)
6244 return -TARGET_EINVAL
;
6247 /* NOTE: same code as Linux kernel */
6248 /* Allow LDTs to be cleared by the user. */
6249 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6250 if ((contents
== 0 &&
6251 read_exec_only
== 1 &&
6253 limit_in_pages
== 0 &&
6254 seg_not_present
== 1 &&
6262 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6263 (ldt_info
.limit
& 0x0ffff);
6264 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6265 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6266 (ldt_info
.limit
& 0xf0000) |
6267 ((read_exec_only
^ 1) << 9) |
6269 ((seg_not_present
^ 1) << 15) |
6271 (limit_in_pages
<< 23) |
6276 /* Install the new entry ... */
6278 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6279 lp
[0] = tswap32(entry_1
);
6280 lp
[1] = tswap32(entry_2
);
6284 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6286 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6287 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6288 uint32_t base_addr
, limit
, flags
;
6289 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6290 int seg_not_present
, useable
, lm
;
6291 uint32_t *lp
, entry_1
, entry_2
;
6293 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6294 if (!target_ldt_info
)
6295 return -TARGET_EFAULT
;
6296 idx
= tswap32(target_ldt_info
->entry_number
);
6297 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6298 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6299 unlock_user_struct(target_ldt_info
, ptr
, 1);
6300 return -TARGET_EINVAL
;
6302 lp
= (uint32_t *)(gdt_table
+ idx
);
6303 entry_1
= tswap32(lp
[0]);
6304 entry_2
= tswap32(lp
[1]);
6306 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6307 contents
= (entry_2
>> 10) & 3;
6308 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6309 seg_32bit
= (entry_2
>> 22) & 1;
6310 limit_in_pages
= (entry_2
>> 23) & 1;
6311 useable
= (entry_2
>> 20) & 1;
6315 lm
= (entry_2
>> 21) & 1;
6317 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6318 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6319 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6320 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6321 base_addr
= (entry_1
>> 16) |
6322 (entry_2
& 0xff000000) |
6323 ((entry_2
& 0xff) << 16);
6324 target_ldt_info
->base_addr
= tswapal(base_addr
);
6325 target_ldt_info
->limit
= tswap32(limit
);
6326 target_ldt_info
->flags
= tswap32(flags
);
6327 unlock_user_struct(target_ldt_info
, ptr
, 1);
6331 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6333 return -TARGET_ENOSYS
;
6336 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6343 case TARGET_ARCH_SET_GS
:
6344 case TARGET_ARCH_SET_FS
:
6345 if (code
== TARGET_ARCH_SET_GS
)
6349 cpu_x86_load_seg(env
, idx
, 0);
6350 env
->segs
[idx
].base
= addr
;
6352 case TARGET_ARCH_GET_GS
:
6353 case TARGET_ARCH_GET_FS
:
6354 if (code
== TARGET_ARCH_GET_GS
)
6358 val
= env
->segs
[idx
].base
;
6359 if (put_user(val
, addr
, abi_ulong
))
6360 ret
= -TARGET_EFAULT
;
6363 ret
= -TARGET_EINVAL
;
6368 #endif /* defined(TARGET_ABI32 */
6369 #endif /* defined(TARGET_I386) */
6372 * These constants are generic. Supply any that are missing from the host.
6375 # define PR_SET_NAME 15
6376 # define PR_GET_NAME 16
6378 #ifndef PR_SET_FP_MODE
6379 # define PR_SET_FP_MODE 45
6380 # define PR_GET_FP_MODE 46
6381 # define PR_FP_MODE_FR (1 << 0)
6382 # define PR_FP_MODE_FRE (1 << 1)
6384 #ifndef PR_SVE_SET_VL
6385 # define PR_SVE_SET_VL 50
6386 # define PR_SVE_GET_VL 51
6387 # define PR_SVE_VL_LEN_MASK 0xffff
6388 # define PR_SVE_VL_INHERIT (1 << 17)
6390 #ifndef PR_PAC_RESET_KEYS
6391 # define PR_PAC_RESET_KEYS 54
6392 # define PR_PAC_APIAKEY (1 << 0)
6393 # define PR_PAC_APIBKEY (1 << 1)
6394 # define PR_PAC_APDAKEY (1 << 2)
6395 # define PR_PAC_APDBKEY (1 << 3)
6396 # define PR_PAC_APGAKEY (1 << 4)
6398 #ifndef PR_SET_TAGGED_ADDR_CTRL
6399 # define PR_SET_TAGGED_ADDR_CTRL 55
6400 # define PR_GET_TAGGED_ADDR_CTRL 56
6401 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6403 #ifndef PR_MTE_TCF_SHIFT
6404 # define PR_MTE_TCF_SHIFT 1
6405 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6406 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6407 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6408 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6409 # define PR_MTE_TAG_SHIFT 3
6410 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6412 #ifndef PR_SET_IO_FLUSHER
6413 # define PR_SET_IO_FLUSHER 57
6414 # define PR_GET_IO_FLUSHER 58
6416 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6417 # define PR_SET_SYSCALL_USER_DISPATCH 59
6419 #ifndef PR_SME_SET_VL
6420 # define PR_SME_SET_VL 63
6421 # define PR_SME_GET_VL 64
6422 # define PR_SME_VL_LEN_MASK 0xffff
6423 # define PR_SME_VL_INHERIT (1 << 17)
6426 #include "target_prctl.h"
6428 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6430 return -TARGET_EINVAL
;
6433 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6435 return -TARGET_EINVAL
;
6438 #ifndef do_prctl_get_fp_mode
6439 #define do_prctl_get_fp_mode do_prctl_inval0
6441 #ifndef do_prctl_set_fp_mode
6442 #define do_prctl_set_fp_mode do_prctl_inval1
6444 #ifndef do_prctl_sve_get_vl
6445 #define do_prctl_sve_get_vl do_prctl_inval0
6447 #ifndef do_prctl_sve_set_vl
6448 #define do_prctl_sve_set_vl do_prctl_inval1
6450 #ifndef do_prctl_reset_keys
6451 #define do_prctl_reset_keys do_prctl_inval1
6453 #ifndef do_prctl_set_tagged_addr_ctrl
6454 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6456 #ifndef do_prctl_get_tagged_addr_ctrl
6457 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6459 #ifndef do_prctl_get_unalign
6460 #define do_prctl_get_unalign do_prctl_inval1
6462 #ifndef do_prctl_set_unalign
6463 #define do_prctl_set_unalign do_prctl_inval1
6465 #ifndef do_prctl_sme_get_vl
6466 #define do_prctl_sme_get_vl do_prctl_inval0
6468 #ifndef do_prctl_sme_set_vl
6469 #define do_prctl_sme_set_vl do_prctl_inval1
6472 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6473 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6478 case PR_GET_PDEATHSIG
:
6481 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6483 if (!is_error(ret
) &&
6484 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6485 return -TARGET_EFAULT
;
6489 case PR_SET_PDEATHSIG
:
6490 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6494 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6496 return -TARGET_EFAULT
;
6498 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6500 unlock_user(name
, arg2
, 16);
6505 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6507 return -TARGET_EFAULT
;
6509 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6511 unlock_user(name
, arg2
, 0);
6514 case PR_GET_FP_MODE
:
6515 return do_prctl_get_fp_mode(env
);
6516 case PR_SET_FP_MODE
:
6517 return do_prctl_set_fp_mode(env
, arg2
);
6519 return do_prctl_sve_get_vl(env
);
6521 return do_prctl_sve_set_vl(env
, arg2
);
6523 return do_prctl_sme_get_vl(env
);
6525 return do_prctl_sme_set_vl(env
, arg2
);
6526 case PR_PAC_RESET_KEYS
:
6527 if (arg3
|| arg4
|| arg5
) {
6528 return -TARGET_EINVAL
;
6530 return do_prctl_reset_keys(env
, arg2
);
6531 case PR_SET_TAGGED_ADDR_CTRL
:
6532 if (arg3
|| arg4
|| arg5
) {
6533 return -TARGET_EINVAL
;
6535 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6536 case PR_GET_TAGGED_ADDR_CTRL
:
6537 if (arg2
|| arg3
|| arg4
|| arg5
) {
6538 return -TARGET_EINVAL
;
6540 return do_prctl_get_tagged_addr_ctrl(env
);
6542 case PR_GET_UNALIGN
:
6543 return do_prctl_get_unalign(env
, arg2
);
6544 case PR_SET_UNALIGN
:
6545 return do_prctl_set_unalign(env
, arg2
);
6547 case PR_CAP_AMBIENT
:
6548 case PR_CAPBSET_READ
:
6549 case PR_CAPBSET_DROP
:
6550 case PR_GET_DUMPABLE
:
6551 case PR_SET_DUMPABLE
:
6552 case PR_GET_KEEPCAPS
:
6553 case PR_SET_KEEPCAPS
:
6554 case PR_GET_SECUREBITS
:
6555 case PR_SET_SECUREBITS
:
6558 case PR_GET_TIMERSLACK
:
6559 case PR_SET_TIMERSLACK
:
6561 case PR_MCE_KILL_GET
:
6562 case PR_GET_NO_NEW_PRIVS
:
6563 case PR_SET_NO_NEW_PRIVS
:
6564 case PR_GET_IO_FLUSHER
:
6565 case PR_SET_IO_FLUSHER
:
6566 /* Some prctl options have no pointer arguments and we can pass on. */
6567 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6569 case PR_GET_CHILD_SUBREAPER
:
6570 case PR_SET_CHILD_SUBREAPER
:
6571 case PR_GET_SPECULATION_CTRL
:
6572 case PR_SET_SPECULATION_CTRL
:
6573 case PR_GET_TID_ADDRESS
:
6575 return -TARGET_EINVAL
;
6579 /* Was used for SPE on PowerPC. */
6580 return -TARGET_EINVAL
;
6587 case PR_GET_SECCOMP
:
6588 case PR_SET_SECCOMP
:
6589 case PR_SET_SYSCALL_USER_DISPATCH
:
6590 case PR_GET_THP_DISABLE
:
6591 case PR_SET_THP_DISABLE
:
6594 /* Disable to prevent the target disabling stuff we need. */
6595 return -TARGET_EINVAL
;
6598 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6600 return -TARGET_EINVAL
;
6604 #define NEW_STACK_SIZE 0x40000
6607 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6610 pthread_mutex_t mutex
;
6611 pthread_cond_t cond
;
6614 abi_ulong child_tidptr
;
6615 abi_ulong parent_tidptr
;
6619 static void *clone_func(void *arg
)
6621 new_thread_info
*info
= arg
;
6626 rcu_register_thread();
6627 tcg_register_thread();
6631 ts
= (TaskState
*)cpu
->opaque
;
6632 info
->tid
= sys_gettid();
6634 if (info
->child_tidptr
)
6635 put_user_u32(info
->tid
, info
->child_tidptr
);
6636 if (info
->parent_tidptr
)
6637 put_user_u32(info
->tid
, info
->parent_tidptr
);
6638 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6639 /* Enable signals. */
6640 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6641 /* Signal to the parent that we're ready. */
6642 pthread_mutex_lock(&info
->mutex
);
6643 pthread_cond_broadcast(&info
->cond
);
6644 pthread_mutex_unlock(&info
->mutex
);
6645 /* Wait until the parent has finished initializing the tls state. */
6646 pthread_mutex_lock(&clone_lock
);
6647 pthread_mutex_unlock(&clone_lock
);
6653 /* do_fork() Must return host values and target errnos (unlike most
6654 do_*() functions). */
6655 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6656 abi_ulong parent_tidptr
, target_ulong newtls
,
6657 abi_ulong child_tidptr
)
6659 CPUState
*cpu
= env_cpu(env
);
6663 CPUArchState
*new_env
;
6666 flags
&= ~CLONE_IGNORED_FLAGS
;
6668 /* Emulate vfork() with fork() */
6669 if (flags
& CLONE_VFORK
)
6670 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6672 if (flags
& CLONE_VM
) {
6673 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6674 new_thread_info info
;
6675 pthread_attr_t attr
;
6677 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6678 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6679 return -TARGET_EINVAL
;
6682 ts
= g_new0(TaskState
, 1);
6683 init_task_state(ts
);
6685 /* Grab a mutex so that thread setup appears atomic. */
6686 pthread_mutex_lock(&clone_lock
);
6689 * If this is our first additional thread, we need to ensure we
6690 * generate code for parallel execution and flush old translations.
6691 * Do this now so that the copy gets CF_PARALLEL too.
6693 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6694 cpu
->tcg_cflags
|= CF_PARALLEL
;
6698 /* we create a new CPU instance. */
6699 new_env
= cpu_copy(env
);
6700 /* Init regs that differ from the parent. */
6701 cpu_clone_regs_child(new_env
, newsp
, flags
);
6702 cpu_clone_regs_parent(env
, flags
);
6703 new_cpu
= env_cpu(new_env
);
6704 new_cpu
->opaque
= ts
;
6705 ts
->bprm
= parent_ts
->bprm
;
6706 ts
->info
= parent_ts
->info
;
6707 ts
->signal_mask
= parent_ts
->signal_mask
;
6709 if (flags
& CLONE_CHILD_CLEARTID
) {
6710 ts
->child_tidptr
= child_tidptr
;
6713 if (flags
& CLONE_SETTLS
) {
6714 cpu_set_tls (new_env
, newtls
);
6717 memset(&info
, 0, sizeof(info
));
6718 pthread_mutex_init(&info
.mutex
, NULL
);
6719 pthread_mutex_lock(&info
.mutex
);
6720 pthread_cond_init(&info
.cond
, NULL
);
6722 if (flags
& CLONE_CHILD_SETTID
) {
6723 info
.child_tidptr
= child_tidptr
;
6725 if (flags
& CLONE_PARENT_SETTID
) {
6726 info
.parent_tidptr
= parent_tidptr
;
6729 ret
= pthread_attr_init(&attr
);
6730 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6731 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6732 /* It is not safe to deliver signals until the child has finished
6733 initializing, so temporarily block all signals. */
6734 sigfillset(&sigmask
);
6735 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6736 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6738 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6739 /* TODO: Free new CPU state if thread creation failed. */
6741 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6742 pthread_attr_destroy(&attr
);
6744 /* Wait for the child to initialize. */
6745 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6750 pthread_mutex_unlock(&info
.mutex
);
6751 pthread_cond_destroy(&info
.cond
);
6752 pthread_mutex_destroy(&info
.mutex
);
6753 pthread_mutex_unlock(&clone_lock
);
6755 /* if no CLONE_VM, we consider it is a fork */
6756 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6757 return -TARGET_EINVAL
;
6760 /* We can't support custom termination signals */
6761 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6762 return -TARGET_EINVAL
;
6765 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6766 if (flags
& CLONE_PIDFD
) {
6767 return -TARGET_EINVAL
;
6771 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6772 if ((flags
& CLONE_PIDFD
) && (flags
& CLONE_PARENT_SETTID
)) {
6773 return -TARGET_EINVAL
;
6776 if (block_signals()) {
6777 return -QEMU_ERESTARTSYS
;
6783 /* Child Process. */
6784 cpu_clone_regs_child(env
, newsp
, flags
);
6786 /* There is a race condition here. The parent process could
6787 theoretically read the TID in the child process before the child
6788 tid is set. This would require using either ptrace
6789 (not implemented) or having *_tidptr to point at a shared memory
6790 mapping. We can't repeat the spinlock hack used above because
6791 the child process gets its own copy of the lock. */
6792 if (flags
& CLONE_CHILD_SETTID
)
6793 put_user_u32(sys_gettid(), child_tidptr
);
6794 if (flags
& CLONE_PARENT_SETTID
)
6795 put_user_u32(sys_gettid(), parent_tidptr
);
6796 ts
= (TaskState
*)cpu
->opaque
;
6797 if (flags
& CLONE_SETTLS
)
6798 cpu_set_tls (env
, newtls
);
6799 if (flags
& CLONE_CHILD_CLEARTID
)
6800 ts
->child_tidptr
= child_tidptr
;
6802 cpu_clone_regs_parent(env
, flags
);
6803 if (flags
& CLONE_PIDFD
) {
6805 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6806 int pid_child
= ret
;
6807 pid_fd
= pidfd_open(pid_child
, 0);
6809 fcntl(pid_fd
, F_SETFD
, fcntl(pid_fd
, F_GETFL
)
6815 put_user_u32(pid_fd
, parent_tidptr
);
6819 g_assert(!cpu_in_exclusive_context(cpu
));
6824 /* warning : doesn't handle linux specific flags... */
6825 static int target_to_host_fcntl_cmd(int cmd
)
6830 case TARGET_F_DUPFD
:
6831 case TARGET_F_GETFD
:
6832 case TARGET_F_SETFD
:
6833 case TARGET_F_GETFL
:
6834 case TARGET_F_SETFL
:
6835 case TARGET_F_OFD_GETLK
:
6836 case TARGET_F_OFD_SETLK
:
6837 case TARGET_F_OFD_SETLKW
:
6840 case TARGET_F_GETLK
:
6843 case TARGET_F_SETLK
:
6846 case TARGET_F_SETLKW
:
6849 case TARGET_F_GETOWN
:
6852 case TARGET_F_SETOWN
:
6855 case TARGET_F_GETSIG
:
6858 case TARGET_F_SETSIG
:
6861 #if TARGET_ABI_BITS == 32
6862 case TARGET_F_GETLK64
:
6865 case TARGET_F_SETLK64
:
6868 case TARGET_F_SETLKW64
:
6872 case TARGET_F_SETLEASE
:
6875 case TARGET_F_GETLEASE
:
6878 #ifdef F_DUPFD_CLOEXEC
6879 case TARGET_F_DUPFD_CLOEXEC
:
6880 ret
= F_DUPFD_CLOEXEC
;
6883 case TARGET_F_NOTIFY
:
6887 case TARGET_F_GETOWN_EX
:
6892 case TARGET_F_SETOWN_EX
:
6897 case TARGET_F_SETPIPE_SZ
:
6900 case TARGET_F_GETPIPE_SZ
:
6905 case TARGET_F_ADD_SEALS
:
6908 case TARGET_F_GET_SEALS
:
6913 ret
= -TARGET_EINVAL
;
6917 #if defined(__powerpc64__)
6918 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6919 * is not supported by kernel. The glibc fcntl call actually adjusts
6920 * them to 5, 6 and 7 before making the syscall(). Since we make the
6921 * syscall directly, adjust to what is supported by the kernel.
6923 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6924 ret
-= F_GETLK64
- 5;
6931 #define FLOCK_TRANSTBL \
6933 TRANSTBL_CONVERT(F_RDLCK); \
6934 TRANSTBL_CONVERT(F_WRLCK); \
6935 TRANSTBL_CONVERT(F_UNLCK); \
6938 static int target_to_host_flock(int type
)
6940 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6942 #undef TRANSTBL_CONVERT
6943 return -TARGET_EINVAL
;
6946 static int host_to_target_flock(int type
)
6948 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6950 #undef TRANSTBL_CONVERT
6951 /* if we don't know how to convert the value coming
6952 * from the host we copy to the target field as-is
6957 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6958 abi_ulong target_flock_addr
)
6960 struct target_flock
*target_fl
;
6963 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6964 return -TARGET_EFAULT
;
6967 __get_user(l_type
, &target_fl
->l_type
);
6968 l_type
= target_to_host_flock(l_type
);
6972 fl
->l_type
= l_type
;
6973 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6974 __get_user(fl
->l_start
, &target_fl
->l_start
);
6975 __get_user(fl
->l_len
, &target_fl
->l_len
);
6976 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6977 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6981 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6982 const struct flock64
*fl
)
6984 struct target_flock
*target_fl
;
6987 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6988 return -TARGET_EFAULT
;
6991 l_type
= host_to_target_flock(fl
->l_type
);
6992 __put_user(l_type
, &target_fl
->l_type
);
6993 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6994 __put_user(fl
->l_start
, &target_fl
->l_start
);
6995 __put_user(fl
->l_len
, &target_fl
->l_len
);
6996 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6997 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7001 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
7002 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
7004 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
7005 struct target_oabi_flock64
{
7013 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
7014 abi_ulong target_flock_addr
)
7016 struct target_oabi_flock64
*target_fl
;
7019 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
7020 return -TARGET_EFAULT
;
7023 __get_user(l_type
, &target_fl
->l_type
);
7024 l_type
= target_to_host_flock(l_type
);
7028 fl
->l_type
= l_type
;
7029 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7030 __get_user(fl
->l_start
, &target_fl
->l_start
);
7031 __get_user(fl
->l_len
, &target_fl
->l_len
);
7032 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7033 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7037 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
7038 const struct flock64
*fl
)
7040 struct target_oabi_flock64
*target_fl
;
7043 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7044 return -TARGET_EFAULT
;
7047 l_type
= host_to_target_flock(fl
->l_type
);
7048 __put_user(l_type
, &target_fl
->l_type
);
7049 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7050 __put_user(fl
->l_start
, &target_fl
->l_start
);
7051 __put_user(fl
->l_len
, &target_fl
->l_len
);
7052 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7053 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7058 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
7059 abi_ulong target_flock_addr
)
7061 struct target_flock64
*target_fl
;
7064 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
7065 return -TARGET_EFAULT
;
7068 __get_user(l_type
, &target_fl
->l_type
);
7069 l_type
= target_to_host_flock(l_type
);
7073 fl
->l_type
= l_type
;
7074 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7075 __get_user(fl
->l_start
, &target_fl
->l_start
);
7076 __get_user(fl
->l_len
, &target_fl
->l_len
);
7077 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7078 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7082 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
7083 const struct flock64
*fl
)
7085 struct target_flock64
*target_fl
;
7088 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7089 return -TARGET_EFAULT
;
7092 l_type
= host_to_target_flock(fl
->l_type
);
7093 __put_user(l_type
, &target_fl
->l_type
);
7094 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7095 __put_user(fl
->l_start
, &target_fl
->l_start
);
7096 __put_user(fl
->l_len
, &target_fl
->l_len
);
7097 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7098 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7102 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7104 struct flock64 fl64
;
7106 struct f_owner_ex fox
;
7107 struct target_f_owner_ex
*target_fox
;
7110 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7112 if (host_cmd
== -TARGET_EINVAL
)
7116 case TARGET_F_GETLK
:
7117 ret
= copy_from_user_flock(&fl64
, arg
);
7121 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7123 ret
= copy_to_user_flock(arg
, &fl64
);
7127 case TARGET_F_SETLK
:
7128 case TARGET_F_SETLKW
:
7129 ret
= copy_from_user_flock(&fl64
, arg
);
7133 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7136 case TARGET_F_GETLK64
:
7137 case TARGET_F_OFD_GETLK
:
7138 ret
= copy_from_user_flock64(&fl64
, arg
);
7142 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7144 ret
= copy_to_user_flock64(arg
, &fl64
);
7147 case TARGET_F_SETLK64
:
7148 case TARGET_F_SETLKW64
:
7149 case TARGET_F_OFD_SETLK
:
7150 case TARGET_F_OFD_SETLKW
:
7151 ret
= copy_from_user_flock64(&fl64
, arg
);
7155 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7158 case TARGET_F_GETFL
:
7159 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7161 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7162 /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7163 if (O_LARGEFILE
== 0 && HOST_LONG_BITS
== 64) {
7164 ret
|= TARGET_O_LARGEFILE
;
7169 case TARGET_F_SETFL
:
7170 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7171 target_to_host_bitmask(arg
,
7176 case TARGET_F_GETOWN_EX
:
7177 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7179 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7180 return -TARGET_EFAULT
;
7181 target_fox
->type
= tswap32(fox
.type
);
7182 target_fox
->pid
= tswap32(fox
.pid
);
7183 unlock_user_struct(target_fox
, arg
, 1);
7189 case TARGET_F_SETOWN_EX
:
7190 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7191 return -TARGET_EFAULT
;
7192 fox
.type
= tswap32(target_fox
->type
);
7193 fox
.pid
= tswap32(target_fox
->pid
);
7194 unlock_user_struct(target_fox
, arg
, 0);
7195 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7199 case TARGET_F_SETSIG
:
7200 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7203 case TARGET_F_GETSIG
:
7204 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7207 case TARGET_F_SETOWN
:
7208 case TARGET_F_GETOWN
:
7209 case TARGET_F_SETLEASE
:
7210 case TARGET_F_GETLEASE
:
7211 case TARGET_F_SETPIPE_SZ
:
7212 case TARGET_F_GETPIPE_SZ
:
7213 case TARGET_F_ADD_SEALS
:
7214 case TARGET_F_GET_SEALS
:
7215 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7219 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7227 static inline int high2lowuid(int uid
)
7235 static inline int high2lowgid(int gid
)
7243 static inline int low2highuid(int uid
)
7245 if ((int16_t)uid
== -1)
7251 static inline int low2highgid(int gid
)
7253 if ((int16_t)gid
== -1)
7258 static inline int tswapid(int id
)
7263 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7265 #else /* !USE_UID16 */
7266 static inline int high2lowuid(int uid
)
7270 static inline int high2lowgid(int gid
)
7274 static inline int low2highuid(int uid
)
7278 static inline int low2highgid(int gid
)
7282 static inline int tswapid(int id
)
7287 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7289 #endif /* USE_UID16 */
7291 /* We must do direct syscalls for setting UID/GID, because we want to
7292 * implement the Linux system call semantics of "change only for this thread",
7293 * not the libc/POSIX semantics of "change for all threads in process".
7294 * (See http://ewontfix.com/17/ for more details.)
7295 * We use the 32-bit version of the syscalls if present; if it is not
7296 * then either the host architecture supports 32-bit UIDs natively with
7297 * the standard syscall, or the 16-bit UID is the best we can do.
7299 #ifdef __NR_setuid32
7300 #define __NR_sys_setuid __NR_setuid32
7302 #define __NR_sys_setuid __NR_setuid
7304 #ifdef __NR_setgid32
7305 #define __NR_sys_setgid __NR_setgid32
7307 #define __NR_sys_setgid __NR_setgid
7309 #ifdef __NR_setresuid32
7310 #define __NR_sys_setresuid __NR_setresuid32
7312 #define __NR_sys_setresuid __NR_setresuid
7314 #ifdef __NR_setresgid32
7315 #define __NR_sys_setresgid __NR_setresgid32
7317 #define __NR_sys_setresgid __NR_setresgid
7320 _syscall1(int, sys_setuid
, uid_t
, uid
)
7321 _syscall1(int, sys_setgid
, gid_t
, gid
)
7322 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7323 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7325 void syscall_init(void)
7328 const argtype
*arg_type
;
7331 thunk_init(STRUCT_MAX
);
7333 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7334 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7335 #include "syscall_types.h"
7337 #undef STRUCT_SPECIAL
7339 /* we patch the ioctl size if necessary. We rely on the fact that
7340 no ioctl has all the bits at '1' in the size field */
7342 while (ie
->target_cmd
!= 0) {
7343 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7344 TARGET_IOC_SIZEMASK
) {
7345 arg_type
= ie
->arg_type
;
7346 if (arg_type
[0] != TYPE_PTR
) {
7347 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7352 size
= thunk_type_size(arg_type
, 0);
7353 ie
->target_cmd
= (ie
->target_cmd
&
7354 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7355 (size
<< TARGET_IOC_SIZESHIFT
);
7358 /* automatic consistency check if same arch */
7359 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7360 (defined(__x86_64__) && defined(TARGET_X86_64))
7361 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7362 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7363 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7370 #ifdef TARGET_NR_truncate64
7371 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7376 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7380 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7384 #ifdef TARGET_NR_ftruncate64
7385 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7390 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7394 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7398 #if defined(TARGET_NR_timer_settime) || \
7399 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7400 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7401 abi_ulong target_addr
)
7403 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7404 offsetof(struct target_itimerspec
,
7406 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7407 offsetof(struct target_itimerspec
,
7409 return -TARGET_EFAULT
;
7416 #if defined(TARGET_NR_timer_settime64) || \
7417 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7418 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7419 abi_ulong target_addr
)
7421 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7422 offsetof(struct target__kernel_itimerspec
,
7424 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7425 offsetof(struct target__kernel_itimerspec
,
7427 return -TARGET_EFAULT
;
7434 #if ((defined(TARGET_NR_timerfd_gettime) || \
7435 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7436 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7437 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7438 struct itimerspec
*host_its
)
7440 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7442 &host_its
->it_interval
) ||
7443 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7445 &host_its
->it_value
)) {
7446 return -TARGET_EFAULT
;
7452 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7453 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7454 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7455 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7456 struct itimerspec
*host_its
)
7458 if (host_to_target_timespec64(target_addr
+
7459 offsetof(struct target__kernel_itimerspec
,
7461 &host_its
->it_interval
) ||
7462 host_to_target_timespec64(target_addr
+
7463 offsetof(struct target__kernel_itimerspec
,
7465 &host_its
->it_value
)) {
7466 return -TARGET_EFAULT
;
7472 #if defined(TARGET_NR_adjtimex) || \
7473 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7474 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7475 abi_long target_addr
)
7477 struct target_timex
*target_tx
;
7479 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7480 return -TARGET_EFAULT
;
7483 __get_user(host_tx
->modes
, &target_tx
->modes
);
7484 __get_user(host_tx
->offset
, &target_tx
->offset
);
7485 __get_user(host_tx
->freq
, &target_tx
->freq
);
7486 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7487 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7488 __get_user(host_tx
->status
, &target_tx
->status
);
7489 __get_user(host_tx
->constant
, &target_tx
->constant
);
7490 __get_user(host_tx
->precision
, &target_tx
->precision
);
7491 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7492 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7493 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7494 __get_user(host_tx
->tick
, &target_tx
->tick
);
7495 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7496 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7497 __get_user(host_tx
->shift
, &target_tx
->shift
);
7498 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7499 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7500 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7501 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7502 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7503 __get_user(host_tx
->tai
, &target_tx
->tai
);
7505 unlock_user_struct(target_tx
, target_addr
, 0);
7509 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7510 struct timex
*host_tx
)
7512 struct target_timex
*target_tx
;
7514 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7515 return -TARGET_EFAULT
;
7518 __put_user(host_tx
->modes
, &target_tx
->modes
);
7519 __put_user(host_tx
->offset
, &target_tx
->offset
);
7520 __put_user(host_tx
->freq
, &target_tx
->freq
);
7521 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7522 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7523 __put_user(host_tx
->status
, &target_tx
->status
);
7524 __put_user(host_tx
->constant
, &target_tx
->constant
);
7525 __put_user(host_tx
->precision
, &target_tx
->precision
);
7526 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7527 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7528 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7529 __put_user(host_tx
->tick
, &target_tx
->tick
);
7530 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7531 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7532 __put_user(host_tx
->shift
, &target_tx
->shift
);
7533 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7534 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7535 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7536 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7537 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7538 __put_user(host_tx
->tai
, &target_tx
->tai
);
7540 unlock_user_struct(target_tx
, target_addr
, 1);
7546 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7547 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7548 abi_long target_addr
)
7550 struct target__kernel_timex
*target_tx
;
7552 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7553 offsetof(struct target__kernel_timex
,
7555 return -TARGET_EFAULT
;
7558 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7559 return -TARGET_EFAULT
;
7562 __get_user(host_tx
->modes
, &target_tx
->modes
);
7563 __get_user(host_tx
->offset
, &target_tx
->offset
);
7564 __get_user(host_tx
->freq
, &target_tx
->freq
);
7565 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7566 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7567 __get_user(host_tx
->status
, &target_tx
->status
);
7568 __get_user(host_tx
->constant
, &target_tx
->constant
);
7569 __get_user(host_tx
->precision
, &target_tx
->precision
);
7570 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7571 __get_user(host_tx
->tick
, &target_tx
->tick
);
7572 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7573 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7574 __get_user(host_tx
->shift
, &target_tx
->shift
);
7575 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7576 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7577 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7578 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7579 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7580 __get_user(host_tx
->tai
, &target_tx
->tai
);
7582 unlock_user_struct(target_tx
, target_addr
, 0);
7586 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7587 struct timex
*host_tx
)
7589 struct target__kernel_timex
*target_tx
;
7591 if (copy_to_user_timeval64(target_addr
+
7592 offsetof(struct target__kernel_timex
, time
),
7594 return -TARGET_EFAULT
;
7597 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7598 return -TARGET_EFAULT
;
7601 __put_user(host_tx
->modes
, &target_tx
->modes
);
7602 __put_user(host_tx
->offset
, &target_tx
->offset
);
7603 __put_user(host_tx
->freq
, &target_tx
->freq
);
7604 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7605 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7606 __put_user(host_tx
->status
, &target_tx
->status
);
7607 __put_user(host_tx
->constant
, &target_tx
->constant
);
7608 __put_user(host_tx
->precision
, &target_tx
->precision
);
7609 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7610 __put_user(host_tx
->tick
, &target_tx
->tick
);
7611 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7612 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7613 __put_user(host_tx
->shift
, &target_tx
->shift
);
7614 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7615 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7616 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7617 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7618 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7619 __put_user(host_tx
->tai
, &target_tx
->tai
);
7621 unlock_user_struct(target_tx
, target_addr
, 1);
7626 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7627 #define sigev_notify_thread_id _sigev_un._tid
7630 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7631 abi_ulong target_addr
)
7633 struct target_sigevent
*target_sevp
;
7635 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7636 return -TARGET_EFAULT
;
7639 /* This union is awkward on 64 bit systems because it has a 32 bit
7640 * integer and a pointer in it; we follow the conversion approach
7641 * used for handling sigval types in signal.c so the guest should get
7642 * the correct value back even if we did a 64 bit byteswap and it's
7643 * using the 32 bit integer.
7645 host_sevp
->sigev_value
.sival_ptr
=
7646 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7647 host_sevp
->sigev_signo
=
7648 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7649 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7650 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7652 unlock_user_struct(target_sevp
, target_addr
, 1);
7656 #if defined(TARGET_NR_mlockall)
7657 static inline int target_to_host_mlockall_arg(int arg
)
7661 if (arg
& TARGET_MCL_CURRENT
) {
7662 result
|= MCL_CURRENT
;
7664 if (arg
& TARGET_MCL_FUTURE
) {
7665 result
|= MCL_FUTURE
;
7668 if (arg
& TARGET_MCL_ONFAULT
) {
7669 result
|= MCL_ONFAULT
;
7677 static inline int target_to_host_msync_arg(abi_long arg
)
7679 return ((arg
& TARGET_MS_ASYNC
) ? MS_ASYNC
: 0) |
7680 ((arg
& TARGET_MS_INVALIDATE
) ? MS_INVALIDATE
: 0) |
7681 ((arg
& TARGET_MS_SYNC
) ? MS_SYNC
: 0) |
7682 (arg
& ~(TARGET_MS_ASYNC
| TARGET_MS_INVALIDATE
| TARGET_MS_SYNC
));
7685 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7686 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7687 defined(TARGET_NR_newfstatat))
7688 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7689 abi_ulong target_addr
,
7690 struct stat
*host_st
)
7692 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7693 if (cpu_env
->eabi
) {
7694 struct target_eabi_stat64
*target_st
;
7696 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7697 return -TARGET_EFAULT
;
7698 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7699 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7700 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7701 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7702 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7704 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7705 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7706 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7707 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7708 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7709 __put_user(host_st
->st_size
, &target_st
->st_size
);
7710 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7711 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7712 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7713 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7714 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7715 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7716 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7717 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7718 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7720 unlock_user_struct(target_st
, target_addr
, 1);
7724 #if defined(TARGET_HAS_STRUCT_STAT64)
7725 struct target_stat64
*target_st
;
7727 struct target_stat
*target_st
;
7730 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7731 return -TARGET_EFAULT
;
7732 memset(target_st
, 0, sizeof(*target_st
));
7733 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7734 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7735 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7736 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7738 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7739 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7740 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7741 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7742 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7743 /* XXX: better use of kernel struct */
7744 __put_user(host_st
->st_size
, &target_st
->st_size
);
7745 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7746 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7747 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7748 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7749 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7750 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7751 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7752 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7753 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7755 unlock_user_struct(target_st
, target_addr
, 1);
7762 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7763 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7764 abi_ulong target_addr
)
7766 struct target_statx
*target_stx
;
7768 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7769 return -TARGET_EFAULT
;
7771 memset(target_stx
, 0, sizeof(*target_stx
));
7773 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7774 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7775 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7776 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7777 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7778 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7779 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7780 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7781 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7782 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7783 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7784 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7785 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7786 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7787 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7788 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7789 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7790 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7791 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7792 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7793 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7794 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7795 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7797 unlock_user_struct(target_stx
, target_addr
, 1);
7803 static int do_sys_futex(int *uaddr
, int op
, int val
,
7804 const struct timespec
*timeout
, int *uaddr2
,
7807 #if HOST_LONG_BITS == 64
7808 #if defined(__NR_futex)
7809 /* always a 64-bit time_t, it doesn't define _time64 version */
7810 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7813 #else /* HOST_LONG_BITS == 64 */
7814 #if defined(__NR_futex_time64)
7815 if (sizeof(timeout
->tv_sec
) == 8) {
7816 /* _time64 function on 32bit arch */
7817 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7820 #if defined(__NR_futex)
7821 /* old function on 32bit arch */
7822 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7824 #endif /* HOST_LONG_BITS == 64 */
7825 g_assert_not_reached();
7828 static int do_safe_futex(int *uaddr
, int op
, int val
,
7829 const struct timespec
*timeout
, int *uaddr2
,
7832 #if HOST_LONG_BITS == 64
7833 #if defined(__NR_futex)
7834 /* always a 64-bit time_t, it doesn't define _time64 version */
7835 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7837 #else /* HOST_LONG_BITS == 64 */
7838 #if defined(__NR_futex_time64)
7839 if (sizeof(timeout
->tv_sec
) == 8) {
7840 /* _time64 function on 32bit arch */
7841 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7845 #if defined(__NR_futex)
7846 /* old function on 32bit arch */
7847 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7849 #endif /* HOST_LONG_BITS == 64 */
7850 return -TARGET_ENOSYS
;
7853 /* ??? Using host futex calls even when target atomic operations
7854 are not really atomic probably breaks things. However implementing
7855 futexes locally would make futexes shared between multiple processes
7856 tricky. However they're probably useless because guest atomic
7857 operations won't work either. */
7858 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7859 static int do_futex(CPUState
*cpu
, bool time64
, target_ulong uaddr
,
7860 int op
, int val
, target_ulong timeout
,
7861 target_ulong uaddr2
, int val3
)
7863 struct timespec ts
, *pts
= NULL
;
7864 void *haddr2
= NULL
;
7867 /* We assume FUTEX_* constants are the same on both host and target. */
7868 #ifdef FUTEX_CMD_MASK
7869 base_op
= op
& FUTEX_CMD_MASK
;
7875 case FUTEX_WAIT_BITSET
:
7878 case FUTEX_WAIT_REQUEUE_PI
:
7880 haddr2
= g2h(cpu
, uaddr2
);
7883 case FUTEX_LOCK_PI2
:
7886 case FUTEX_WAKE_BITSET
:
7887 case FUTEX_TRYLOCK_PI
:
7888 case FUTEX_UNLOCK_PI
:
7892 val
= target_to_host_signal(val
);
7895 case FUTEX_CMP_REQUEUE
:
7896 case FUTEX_CMP_REQUEUE_PI
:
7897 val3
= tswap32(val3
);
7902 * For these, the 4th argument is not TIMEOUT, but VAL2.
7903 * But the prototype of do_safe_futex takes a pointer, so
7904 * insert casts to satisfy the compiler. We do not need
7905 * to tswap VAL2 since it's not compared to guest memory.
7907 pts
= (struct timespec
*)(uintptr_t)timeout
;
7909 haddr2
= g2h(cpu
, uaddr2
);
7912 return -TARGET_ENOSYS
;
7917 ? target_to_host_timespec64(pts
, timeout
)
7918 : target_to_host_timespec(pts
, timeout
)) {
7919 return -TARGET_EFAULT
;
7922 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, haddr2
, val3
);
7926 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7927 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7928 abi_long handle
, abi_long mount_id
,
7931 struct file_handle
*target_fh
;
7932 struct file_handle
*fh
;
7936 unsigned int size
, total_size
;
7938 if (get_user_s32(size
, handle
)) {
7939 return -TARGET_EFAULT
;
7942 name
= lock_user_string(pathname
);
7944 return -TARGET_EFAULT
;
7947 total_size
= sizeof(struct file_handle
) + size
;
7948 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7950 unlock_user(name
, pathname
, 0);
7951 return -TARGET_EFAULT
;
7954 fh
= g_malloc0(total_size
);
7955 fh
->handle_bytes
= size
;
7957 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7958 unlock_user(name
, pathname
, 0);
7960 /* man name_to_handle_at(2):
7961 * Other than the use of the handle_bytes field, the caller should treat
7962 * the file_handle structure as an opaque data type
7965 memcpy(target_fh
, fh
, total_size
);
7966 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7967 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7969 unlock_user(target_fh
, handle
, total_size
);
7971 if (put_user_s32(mid
, mount_id
)) {
7972 return -TARGET_EFAULT
;
7980 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7981 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7984 struct file_handle
*target_fh
;
7985 struct file_handle
*fh
;
7986 unsigned int size
, total_size
;
7989 if (get_user_s32(size
, handle
)) {
7990 return -TARGET_EFAULT
;
7993 total_size
= sizeof(struct file_handle
) + size
;
7994 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7996 return -TARGET_EFAULT
;
7999 fh
= g_memdup(target_fh
, total_size
);
8000 fh
->handle_bytes
= size
;
8001 fh
->handle_type
= tswap32(target_fh
->handle_type
);
8003 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
8004 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
8008 unlock_user(target_fh
, handle
, total_size
);
8014 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8016 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
8019 target_sigset_t
*target_mask
;
8023 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
8024 return -TARGET_EINVAL
;
8026 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
8027 return -TARGET_EFAULT
;
8030 target_to_host_sigset(&host_mask
, target_mask
);
8032 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
8034 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
8036 fd_trans_register(ret
, &target_signalfd_trans
);
8039 unlock_user_struct(target_mask
, mask
, 0);
8045 /* Map host to target signal numbers for the wait family of syscalls.
8046 Assume all other status bits are the same. */
8047 int host_to_target_waitstatus(int status
)
8049 if (WIFSIGNALED(status
)) {
8050 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
8052 if (WIFSTOPPED(status
)) {
8053 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
8059 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
8061 CPUState
*cpu
= env_cpu(cpu_env
);
8062 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
8065 for (i
= 0; i
< bprm
->argc
; i
++) {
8066 size_t len
= strlen(bprm
->argv
[i
]) + 1;
8068 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
8076 static void show_smaps(int fd
, unsigned long size
)
8078 unsigned long page_size_kb
= TARGET_PAGE_SIZE
>> 10;
8079 unsigned long size_kb
= size
>> 10;
8081 dprintf(fd
, "Size: %lu kB\n"
8082 "KernelPageSize: %lu kB\n"
8083 "MMUPageSize: %lu kB\n"
8087 "Shared_Clean: 0 kB\n"
8088 "Shared_Dirty: 0 kB\n"
8089 "Private_Clean: 0 kB\n"
8090 "Private_Dirty: 0 kB\n"
8091 "Referenced: 0 kB\n"
8094 "AnonHugePages: 0 kB\n"
8095 "ShmemPmdMapped: 0 kB\n"
8096 "FilePmdMapped: 0 kB\n"
8097 "Shared_Hugetlb: 0 kB\n"
8098 "Private_Hugetlb: 0 kB\n"
8102 "THPeligible: 0\n", size_kb
, page_size_kb
, page_size_kb
);
8105 static int open_self_maps_1(CPUArchState
*cpu_env
, int fd
, bool smaps
)
8107 CPUState
*cpu
= env_cpu(cpu_env
);
8108 TaskState
*ts
= cpu
->opaque
;
8109 GSList
*map_info
= read_self_maps();
8113 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
8114 MapInfo
*e
= (MapInfo
*) s
->data
;
8116 if (h2g_valid(e
->start
)) {
8117 unsigned long min
= e
->start
;
8118 unsigned long max
= e
->end
;
8119 int flags
= page_get_flags(h2g(min
));
8122 max
= h2g_valid(max
- 1) ?
8123 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8125 if (!page_check_range(h2g(min
), max
- min
, flags
)) {
8130 if (h2g(max
) == ts
->info
->stack_limit
) {
8132 if (h2g(min
) == ts
->info
->stack_limit
) {
8139 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8140 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8141 h2g(min
), h2g(max
- 1) + 1,
8142 (flags
& PAGE_READ
) ? 'r' : '-',
8143 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8144 (flags
& PAGE_EXEC
) ? 'x' : '-',
8145 e
->is_priv
? 'p' : 's',
8146 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8148 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8153 show_smaps(fd
, max
- min
);
8154 dprintf(fd
, "VmFlags:%s%s%s%s%s%s%s%s\n",
8155 (flags
& PAGE_READ
) ? " rd" : "",
8156 (flags
& PAGE_WRITE_ORG
) ? " wr" : "",
8157 (flags
& PAGE_EXEC
) ? " ex" : "",
8158 e
->is_priv
? "" : " sh",
8159 (flags
& PAGE_READ
) ? " mr" : "",
8160 (flags
& PAGE_WRITE_ORG
) ? " mw" : "",
8161 (flags
& PAGE_EXEC
) ? " me" : "",
8162 e
->is_priv
? "" : " ms");
8167 free_self_maps(map_info
);
8169 #ifdef TARGET_VSYSCALL_PAGE
8171 * We only support execution from the vsyscall page.
8172 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8174 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8175 " --xp 00000000 00:00 0",
8176 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8177 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8179 show_smaps(fd
, TARGET_PAGE_SIZE
);
8180 dprintf(fd
, "VmFlags: ex\n");
8187 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
8189 return open_self_maps_1(cpu_env
, fd
, false);
8192 static int open_self_smaps(CPUArchState
*cpu_env
, int fd
)
8194 return open_self_maps_1(cpu_env
, fd
, true);
8197 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8199 CPUState
*cpu
= env_cpu(cpu_env
);
8200 TaskState
*ts
= cpu
->opaque
;
8201 g_autoptr(GString
) buf
= g_string_new(NULL
);
8204 for (i
= 0; i
< 44; i
++) {
8207 g_string_printf(buf
, FMT_pid
" ", getpid());
8208 } else if (i
== 1) {
8210 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8211 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8212 g_string_printf(buf
, "(%.15s) ", bin
);
8213 } else if (i
== 2) {
8215 g_string_assign(buf
, "R "); /* we are running right now */
8216 } else if (i
== 3) {
8218 g_string_printf(buf
, FMT_pid
" ", getppid());
8219 } else if (i
== 21) {
8221 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8222 } else if (i
== 27) {
8224 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8226 /* for the rest, there is MasterCard */
8227 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8230 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8238 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8240 CPUState
*cpu
= env_cpu(cpu_env
);
8241 TaskState
*ts
= cpu
->opaque
;
8242 abi_ulong auxv
= ts
->info
->saved_auxv
;
8243 abi_ulong len
= ts
->info
->auxv_len
;
8247 * Auxiliary vector is stored in target process stack.
8248 * read in whole auxv vector and copy it to file
8250 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8254 r
= write(fd
, ptr
, len
);
8261 lseek(fd
, 0, SEEK_SET
);
8262 unlock_user(ptr
, auxv
, len
);
8268 static int is_proc_myself(const char *filename
, const char *entry
)
8270 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8271 filename
+= strlen("/proc/");
8272 if (!strncmp(filename
, "self/", strlen("self/"))) {
8273 filename
+= strlen("self/");
8274 } else if (*filename
>= '1' && *filename
<= '9') {
8276 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8277 if (!strncmp(filename
, myself
, strlen(myself
))) {
8278 filename
+= strlen(myself
);
8285 if (!strcmp(filename
, entry
)) {
8292 static void excp_dump_file(FILE *logfile
, CPUArchState
*env
,
8293 const char *fmt
, int code
)
8296 CPUState
*cs
= env_cpu(env
);
8298 fprintf(logfile
, fmt
, code
);
8299 fprintf(logfile
, "Failing executable: %s\n", exec_path
);
8300 cpu_dump_state(cs
, logfile
, 0);
8301 open_self_maps(env
, fileno(logfile
));
8305 void target_exception_dump(CPUArchState
*env
, const char *fmt
, int code
)
8307 /* dump to console */
8308 excp_dump_file(stderr
, env
, fmt
, code
);
8310 /* dump to log file */
8311 if (qemu_log_separate()) {
8312 FILE *logfile
= qemu_log_trylock();
8314 excp_dump_file(logfile
, env
, fmt
, code
);
8315 qemu_log_unlock(logfile
);
8319 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8320 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8321 defined(TARGET_RISCV) || defined(TARGET_S390X)
8322 static int is_proc(const char *filename
, const char *entry
)
8324 return strcmp(filename
, entry
) == 0;
8328 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8329 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8336 fp
= fopen("/proc/net/route", "r");
8343 read
= getline(&line
, &len
, fp
);
8344 dprintf(fd
, "%s", line
);
8348 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8350 uint32_t dest
, gw
, mask
;
8351 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8354 fields
= sscanf(line
,
8355 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8356 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8357 &mask
, &mtu
, &window
, &irtt
);
8361 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8362 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8363 metric
, tswap32(mask
), mtu
, window
, irtt
);
8373 #if defined(TARGET_SPARC)
8374 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8376 dprintf(fd
, "type\t\t: sun4u\n");
8381 #if defined(TARGET_HPPA)
8382 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8386 num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8387 for (i
= 0; i
< num_cpus
; i
++) {
8388 dprintf(fd
, "processor\t: %d\n", i
);
8389 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8390 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8391 dprintf(fd
, "capabilities\t: os32\n");
8392 dprintf(fd
, "model\t\t: 9000/778/B160L - "
8393 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8399 #if defined(TARGET_RISCV)
8400 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8403 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8404 RISCVCPU
*cpu
= env_archcpu(cpu_env
);
8405 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg((CPURISCVState
*) cpu_env
);
8406 char *isa_string
= riscv_isa_string(cpu
);
8410 mmu
= (cpu_env
->xl
== MXL_RV32
) ? "sv32" : "sv48";
8415 for (i
= 0; i
< num_cpus
; i
++) {
8416 dprintf(fd
, "processor\t: %d\n", i
);
8417 dprintf(fd
, "hart\t\t: %d\n", i
);
8418 dprintf(fd
, "isa\t\t: %s\n", isa_string
);
8419 dprintf(fd
, "mmu\t\t: %s\n", mmu
);
8420 dprintf(fd
, "uarch\t\t: qemu\n\n");
8428 #if defined(TARGET_S390X)
8430 * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8431 * show in /proc/cpuinfo.
8433 * Skip the following in order to match the missing support in op_ecag():
8434 * - show_cacheinfo().
8435 * - show_cpu_topology().
8438 * Use fixed values for certain fields:
8439 * - bogomips per cpu - from a qemu-system-s390x run.
8440 * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8442 * Keep the code structure close to arch/s390/kernel/processor.c.
8445 static void show_facilities(int fd
)
8447 size_t sizeof_stfl_bytes
= 2048;
8448 g_autofree
uint8_t *stfl_bytes
= g_new0(uint8_t, sizeof_stfl_bytes
);
8451 dprintf(fd
, "facilities :");
8452 s390_get_feat_block(S390_FEAT_TYPE_STFL
, stfl_bytes
);
8453 for (bit
= 0; bit
< sizeof_stfl_bytes
* 8; bit
++) {
8454 if (test_be_bit(bit
, stfl_bytes
)) {
8455 dprintf(fd
, " %d", bit
);
8461 static int cpu_ident(unsigned long n
)
8463 return deposit32(0, CPU_ID_BITS
- CPU_PHYS_ADDR_BITS
, CPU_PHYS_ADDR_BITS
,
8467 static void show_cpu_summary(CPUArchState
*cpu_env
, int fd
)
8469 S390CPUModel
*model
= env_archcpu(cpu_env
)->model
;
8470 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8471 uint32_t elf_hwcap
= get_elf_hwcap();
8472 const char *hwcap_str
;
8475 dprintf(fd
, "vendor_id : IBM/S390\n"
8476 "# processors : %i\n"
8477 "bogomips per cpu: 13370.00\n",
8479 dprintf(fd
, "max thread id : 0\n");
8480 dprintf(fd
, "features\t: ");
8481 for (i
= 0; i
< sizeof(elf_hwcap
) * 8; i
++) {
8482 if (!(elf_hwcap
& (1 << i
))) {
8485 hwcap_str
= elf_hwcap_str(i
);
8487 dprintf(fd
, "%s ", hwcap_str
);
8491 show_facilities(fd
);
8492 for (i
= 0; i
< num_cpus
; i
++) {
8493 dprintf(fd
, "processor %d: "
8495 "identification = %06X, "
8497 i
, model
->cpu_ver
, cpu_ident(i
), model
->def
->type
);
8501 static void show_cpu_ids(CPUArchState
*cpu_env
, int fd
, unsigned long n
)
8503 S390CPUModel
*model
= env_archcpu(cpu_env
)->model
;
8505 dprintf(fd
, "version : %02X\n", model
->cpu_ver
);
8506 dprintf(fd
, "identification : %06X\n", cpu_ident(n
));
8507 dprintf(fd
, "machine : %04X\n", model
->def
->type
);
8510 static void show_cpuinfo(CPUArchState
*cpu_env
, int fd
, unsigned long n
)
8512 dprintf(fd
, "\ncpu number : %ld\n", n
);
8513 show_cpu_ids(cpu_env
, fd
, n
);
8516 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8518 int num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8521 show_cpu_summary(cpu_env
, fd
);
8522 for (i
= 0; i
< num_cpus
; i
++) {
8523 show_cpuinfo(cpu_env
, fd
, i
);
8529 #if defined(TARGET_M68K)
8530 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8532 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8537 int do_guest_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
,
8538 int flags
, mode_t mode
, bool safe
)
8541 const char *filename
;
8542 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8543 int (*cmp
)(const char *s1
, const char *s2
);
8545 const struct fake_open
*fake_open
;
8546 static const struct fake_open fakes
[] = {
8547 { "maps", open_self_maps
, is_proc_myself
},
8548 { "smaps", open_self_smaps
, is_proc_myself
},
8549 { "stat", open_self_stat
, is_proc_myself
},
8550 { "auxv", open_self_auxv
, is_proc_myself
},
8551 { "cmdline", open_self_cmdline
, is_proc_myself
},
8552 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8553 { "/proc/net/route", open_net_route
, is_proc
},
8555 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8556 defined(TARGET_RISCV) || defined(TARGET_S390X)
8557 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8559 #if defined(TARGET_M68K)
8560 { "/proc/hardware", open_hardware
, is_proc
},
8562 { NULL
, NULL
, NULL
}
8565 if (is_proc_myself(pathname
, "exe")) {
8567 return safe_openat(dirfd
, exec_path
, flags
, mode
);
8569 return openat(dirfd
, exec_path
, flags
, mode
);
8573 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8574 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8579 if (fake_open
->filename
) {
8581 char filename
[PATH_MAX
];
8584 fd
= memfd_create("qemu-open", 0);
8586 if (errno
!= ENOSYS
) {
8589 /* create temporary file to map stat to */
8590 tmpdir
= getenv("TMPDIR");
8593 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8594 fd
= mkstemp(filename
);
8601 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8607 lseek(fd
, 0, SEEK_SET
);
8613 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8615 return openat(dirfd
, path(pathname
), flags
, mode
);
8619 ssize_t
do_guest_readlink(const char *pathname
, char *buf
, size_t bufsiz
)
8623 if (!pathname
|| !buf
) {
8629 /* Short circuit this for the magic exe check. */
8634 if (is_proc_myself((const char *)pathname
, "exe")) {
8636 * Don't worry about sign mismatch as earlier mapping
8637 * logic would have thrown a bad address error.
8639 ret
= MIN(strlen(exec_path
), bufsiz
);
8640 /* We cannot NUL terminate the string. */
8641 memcpy(buf
, exec_path
, ret
);
8643 ret
= readlink(path(pathname
), buf
, bufsiz
);
8649 static int do_execv(CPUArchState
*cpu_env
, int dirfd
,
8650 abi_long pathname
, abi_long guest_argp
,
8651 abi_long guest_envp
, int flags
, bool is_execveat
)
8654 char **argp
, **envp
;
8663 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8664 if (get_user_ual(addr
, gp
)) {
8665 return -TARGET_EFAULT
;
8673 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8674 if (get_user_ual(addr
, gp
)) {
8675 return -TARGET_EFAULT
;
8683 argp
= g_new0(char *, argc
+ 1);
8684 envp
= g_new0(char *, envc
+ 1);
8686 for (gp
= guest_argp
, q
= argp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8687 if (get_user_ual(addr
, gp
)) {
8693 *q
= lock_user_string(addr
);
8700 for (gp
= guest_envp
, q
= envp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8701 if (get_user_ual(addr
, gp
)) {
8707 *q
= lock_user_string(addr
);
8715 * Although execve() is not an interruptible syscall it is
8716 * a special case where we must use the safe_syscall wrapper:
8717 * if we allow a signal to happen before we make the host
8718 * syscall then we will 'lose' it, because at the point of
8719 * execve the process leaves QEMU's control. So we use the
8720 * safe syscall wrapper to ensure that we either take the
8721 * signal as a guest signal, or else it does not happen
8722 * before the execve completes and makes it the other
8723 * program's problem.
8725 p
= lock_user_string(pathname
);
8730 const char *exe
= p
;
8731 if (is_proc_myself(p
, "exe")) {
8735 ? safe_execveat(dirfd
, exe
, argp
, envp
, flags
)
8736 : safe_execve(exe
, argp
, envp
);
8737 ret
= get_errno(ret
);
8739 unlock_user(p
, pathname
, 0);
8744 ret
= -TARGET_EFAULT
;
8747 for (gp
= guest_argp
, q
= argp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8748 if (get_user_ual(addr
, gp
) || !addr
) {
8751 unlock_user(*q
, addr
, 0);
8753 for (gp
= guest_envp
, q
= envp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8754 if (get_user_ual(addr
, gp
) || !addr
) {
8757 unlock_user(*q
, addr
, 0);
8765 #define TIMER_MAGIC 0x0caf0000
8766 #define TIMER_MAGIC_MASK 0xffff0000
8768 /* Convert QEMU provided timer ID back to internal 16bit index format */
8769 static target_timer_t
get_timer_id(abi_long arg
)
8771 target_timer_t timerid
= arg
;
8773 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8774 return -TARGET_EINVAL
;
8779 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8780 return -TARGET_EINVAL
;
8786 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8788 abi_ulong target_addr
,
8791 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8792 unsigned host_bits
= sizeof(*host_mask
) * 8;
8793 abi_ulong
*target_mask
;
8796 assert(host_size
>= target_size
);
8798 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8800 return -TARGET_EFAULT
;
8802 memset(host_mask
, 0, host_size
);
8804 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8805 unsigned bit
= i
* target_bits
;
8808 __get_user(val
, &target_mask
[i
]);
8809 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8810 if (val
& (1UL << j
)) {
8811 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8816 unlock_user(target_mask
, target_addr
, 0);
8820 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8822 abi_ulong target_addr
,
8825 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8826 unsigned host_bits
= sizeof(*host_mask
) * 8;
8827 abi_ulong
*target_mask
;
8830 assert(host_size
>= target_size
);
8832 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8834 return -TARGET_EFAULT
;
8837 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8838 unsigned bit
= i
* target_bits
;
8841 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8842 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8846 __put_user(val
, &target_mask
[i
]);
8849 unlock_user(target_mask
, target_addr
, target_size
);
8853 #ifdef TARGET_NR_getdents
8854 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8856 g_autofree
void *hdirp
= NULL
;
8858 int hlen
, hoff
, toff
;
8859 int hreclen
, treclen
;
8860 off64_t prev_diroff
= 0;
8862 hdirp
= g_try_malloc(count
);
8864 return -TARGET_ENOMEM
;
8867 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8868 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8870 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8873 hlen
= get_errno(hlen
);
8874 if (is_error(hlen
)) {
8878 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8880 return -TARGET_EFAULT
;
8883 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8884 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8885 struct linux_dirent
*hde
= hdirp
+ hoff
;
8887 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8889 struct target_dirent
*tde
= tdirp
+ toff
;
8893 namelen
= strlen(hde
->d_name
);
8894 hreclen
= hde
->d_reclen
;
8895 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8896 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8898 if (toff
+ treclen
> count
) {
8900 * If the host struct is smaller than the target struct, or
8901 * requires less alignment and thus packs into less space,
8902 * then the host can return more entries than we can pass
8906 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8910 * Return what we have, resetting the file pointer to the
8911 * location of the first record not returned.
8913 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8917 prev_diroff
= hde
->d_off
;
8918 tde
->d_ino
= tswapal(hde
->d_ino
);
8919 tde
->d_off
= tswapal(hde
->d_off
);
8920 tde
->d_reclen
= tswap16(treclen
);
8921 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8924 * The getdents type is in what was formerly a padding byte at the
8925 * end of the structure.
8927 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8928 type
= *((uint8_t *)hde
+ hreclen
- 1);
8932 *((uint8_t *)tde
+ treclen
- 1) = type
;
8935 unlock_user(tdirp
, arg2
, toff
);
8938 #endif /* TARGET_NR_getdents */
8940 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8941 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8943 g_autofree
void *hdirp
= NULL
;
8945 int hlen
, hoff
, toff
;
8946 int hreclen
, treclen
;
8947 off64_t prev_diroff
= 0;
8949 hdirp
= g_try_malloc(count
);
8951 return -TARGET_ENOMEM
;
8954 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8955 if (is_error(hlen
)) {
8959 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8961 return -TARGET_EFAULT
;
8964 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8965 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8966 struct target_dirent64
*tde
= tdirp
+ toff
;
8969 namelen
= strlen(hde
->d_name
) + 1;
8970 hreclen
= hde
->d_reclen
;
8971 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8972 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8974 if (toff
+ treclen
> count
) {
8976 * If the host struct is smaller than the target struct, or
8977 * requires less alignment and thus packs into less space,
8978 * then the host can return more entries than we can pass
8982 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8986 * Return what we have, resetting the file pointer to the
8987 * location of the first record not returned.
8989 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8993 prev_diroff
= hde
->d_off
;
8994 tde
->d_ino
= tswap64(hde
->d_ino
);
8995 tde
->d_off
= tswap64(hde
->d_off
);
8996 tde
->d_reclen
= tswap16(treclen
);
8997 tde
->d_type
= hde
->d_type
;
8998 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
9001 unlock_user(tdirp
, arg2
, toff
);
9004 #endif /* TARGET_NR_getdents64 */
9006 #if defined(TARGET_NR_riscv_hwprobe)
9008 #define RISCV_HWPROBE_KEY_MVENDORID 0
9009 #define RISCV_HWPROBE_KEY_MARCHID 1
9010 #define RISCV_HWPROBE_KEY_MIMPID 2
9012 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9013 #define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9015 #define RISCV_HWPROBE_KEY_IMA_EXT_0 4
9016 #define RISCV_HWPROBE_IMA_FD (1 << 0)
9017 #define RISCV_HWPROBE_IMA_C (1 << 1)
9019 #define RISCV_HWPROBE_KEY_CPUPERF_0 5
9020 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
9021 #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
9022 #define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
9023 #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
9024 #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9025 #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
9027 struct riscv_hwprobe
{
9032 static void risc_hwprobe_fill_pairs(CPURISCVState
*env
,
9033 struct riscv_hwprobe
*pair
,
9036 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg(env
);
9038 for (; pair_count
> 0; pair_count
--, pair
++) {
9041 __put_user(0, &pair
->value
);
9042 __get_user(key
, &pair
->key
);
9044 case RISCV_HWPROBE_KEY_MVENDORID
:
9045 __put_user(cfg
->mvendorid
, &pair
->value
);
9047 case RISCV_HWPROBE_KEY_MARCHID
:
9048 __put_user(cfg
->marchid
, &pair
->value
);
9050 case RISCV_HWPROBE_KEY_MIMPID
:
9051 __put_user(cfg
->mimpid
, &pair
->value
);
9053 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR
:
9054 value
= riscv_has_ext(env
, RVI
) &&
9055 riscv_has_ext(env
, RVM
) &&
9056 riscv_has_ext(env
, RVA
) ?
9057 RISCV_HWPROBE_BASE_BEHAVIOR_IMA
: 0;
9058 __put_user(value
, &pair
->value
);
9060 case RISCV_HWPROBE_KEY_IMA_EXT_0
:
9061 value
= riscv_has_ext(env
, RVF
) &&
9062 riscv_has_ext(env
, RVD
) ?
9063 RISCV_HWPROBE_IMA_FD
: 0;
9064 value
|= riscv_has_ext(env
, RVC
) ?
9065 RISCV_HWPROBE_IMA_C
: pair
->value
;
9066 __put_user(value
, &pair
->value
);
9068 case RISCV_HWPROBE_KEY_CPUPERF_0
:
9069 __put_user(RISCV_HWPROBE_MISALIGNED_FAST
, &pair
->value
);
9072 __put_user(-1, &pair
->key
);
9078 static int cpu_set_valid(abi_long arg3
, abi_long arg4
)
9081 size_t host_mask_size
, target_mask_size
;
9082 unsigned long *host_mask
;
9085 * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9086 * arg3 contains the cpu count.
9088 tmp
= (8 * sizeof(abi_ulong
));
9089 target_mask_size
= ((arg3
+ tmp
- 1) / tmp
) * sizeof(abi_ulong
);
9090 host_mask_size
= (target_mask_size
+ (sizeof(*host_mask
) - 1)) &
9091 ~(sizeof(*host_mask
) - 1);
9093 host_mask
= alloca(host_mask_size
);
9095 ret
= target_to_host_cpu_mask(host_mask
, host_mask_size
,
9096 arg4
, target_mask_size
);
9101 for (i
= 0 ; i
< host_mask_size
/ sizeof(*host_mask
); i
++) {
9102 if (host_mask
[i
] != 0) {
9106 return -TARGET_EINVAL
;
9109 static abi_long
do_riscv_hwprobe(CPUArchState
*cpu_env
, abi_long arg1
,
9110 abi_long arg2
, abi_long arg3
,
9111 abi_long arg4
, abi_long arg5
)
9114 struct riscv_hwprobe
*host_pairs
;
9116 /* flags must be 0 */
9118 return -TARGET_EINVAL
;
9123 ret
= cpu_set_valid(arg3
, arg4
);
9127 } else if (arg4
!= 0) {
9128 return -TARGET_EINVAL
;
9136 host_pairs
= lock_user(VERIFY_WRITE
, arg1
,
9137 sizeof(*host_pairs
) * (size_t)arg2
, 0);
9138 if (host_pairs
== NULL
) {
9139 return -TARGET_EFAULT
;
9141 risc_hwprobe_fill_pairs(cpu_env
, host_pairs
, arg2
);
9142 unlock_user(host_pairs
, arg1
, sizeof(*host_pairs
) * (size_t)arg2
);
9145 #endif /* TARGET_NR_riscv_hwprobe */
9147 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9148 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
9151 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9152 #define __NR_sys_open_tree __NR_open_tree
9153 _syscall3(int, sys_open_tree
, int, __dfd
, const char *, __filename
,
9154 unsigned int, __flags
)
9157 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9158 #define __NR_sys_move_mount __NR_move_mount
9159 _syscall5(int, sys_move_mount
, int, __from_dfd
, const char *, __from_pathname
,
9160 int, __to_dfd
, const char *, __to_pathname
, unsigned int, flag
)
9163 /* This is an internal helper for do_syscall so that it is easier
9164 * to have a single return point, so that actions, such as logging
9165 * of syscall results, can be performed.
9166 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9168 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
9169 abi_long arg2
, abi_long arg3
, abi_long arg4
,
9170 abi_long arg5
, abi_long arg6
, abi_long arg7
,
9173 CPUState
*cpu
= env_cpu(cpu_env
);
9175 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9176 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9177 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9178 || defined(TARGET_NR_statx)
9181 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9182 || defined(TARGET_NR_fstatfs)
9188 case TARGET_NR_exit
:
9189 /* In old applications this may be used to implement _exit(2).
9190 However in threaded applications it is used for thread termination,
9191 and _exit_group is used for application termination.
9192 Do thread termination if we have more then one thread. */
9194 if (block_signals()) {
9195 return -QEMU_ERESTARTSYS
;
9198 pthread_mutex_lock(&clone_lock
);
9200 if (CPU_NEXT(first_cpu
)) {
9201 TaskState
*ts
= cpu
->opaque
;
9203 if (ts
->child_tidptr
) {
9204 put_user_u32(0, ts
->child_tidptr
);
9205 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
9206 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
9209 object_unparent(OBJECT(cpu
));
9210 object_unref(OBJECT(cpu
));
9212 * At this point the CPU should be unrealized and removed
9213 * from cpu lists. We can clean-up the rest of the thread
9214 * data without the lock held.
9217 pthread_mutex_unlock(&clone_lock
);
9221 rcu_unregister_thread();
9225 pthread_mutex_unlock(&clone_lock
);
9226 preexit_cleanup(cpu_env
, arg1
);
9228 return 0; /* avoid warning */
9229 case TARGET_NR_read
:
9230 if (arg2
== 0 && arg3
== 0) {
9231 return get_errno(safe_read(arg1
, 0, 0));
9233 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9234 return -TARGET_EFAULT
;
9235 ret
= get_errno(safe_read(arg1
, p
, arg3
));
9237 fd_trans_host_to_target_data(arg1
)) {
9238 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
9240 unlock_user(p
, arg2
, ret
);
9243 case TARGET_NR_write
:
9244 if (arg2
== 0 && arg3
== 0) {
9245 return get_errno(safe_write(arg1
, 0, 0));
9247 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9248 return -TARGET_EFAULT
;
9249 if (fd_trans_target_to_host_data(arg1
)) {
9250 void *copy
= g_malloc(arg3
);
9251 memcpy(copy
, p
, arg3
);
9252 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
9254 ret
= get_errno(safe_write(arg1
, copy
, ret
));
9258 ret
= get_errno(safe_write(arg1
, p
, arg3
));
9260 unlock_user(p
, arg2
, 0);
9263 #ifdef TARGET_NR_open
9264 case TARGET_NR_open
:
9265 if (!(p
= lock_user_string(arg1
)))
9266 return -TARGET_EFAULT
;
9267 ret
= get_errno(do_guest_openat(cpu_env
, AT_FDCWD
, p
,
9268 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
9270 fd_trans_unregister(ret
);
9271 unlock_user(p
, arg1
, 0);
9274 case TARGET_NR_openat
:
9275 if (!(p
= lock_user_string(arg2
)))
9276 return -TARGET_EFAULT
;
9277 ret
= get_errno(do_guest_openat(cpu_env
, arg1
, p
,
9278 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
9280 fd_trans_unregister(ret
);
9281 unlock_user(p
, arg2
, 0);
9283 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9284 case TARGET_NR_name_to_handle_at
:
9285 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
9288 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9289 case TARGET_NR_open_by_handle_at
:
9290 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
9291 fd_trans_unregister(ret
);
9294 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9295 case TARGET_NR_pidfd_open
:
9296 return get_errno(pidfd_open(arg1
, arg2
));
9298 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9299 case TARGET_NR_pidfd_send_signal
:
9301 siginfo_t uinfo
, *puinfo
;
9304 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9306 return -TARGET_EFAULT
;
9308 target_to_host_siginfo(&uinfo
, p
);
9309 unlock_user(p
, arg3
, 0);
9314 ret
= get_errno(pidfd_send_signal(arg1
, target_to_host_signal(arg2
),
9319 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9320 case TARGET_NR_pidfd_getfd
:
9321 return get_errno(pidfd_getfd(arg1
, arg2
, arg3
));
9323 case TARGET_NR_close
:
9324 fd_trans_unregister(arg1
);
9325 return get_errno(close(arg1
));
9326 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9327 case TARGET_NR_close_range
:
9328 ret
= get_errno(sys_close_range(arg1
, arg2
, arg3
));
9329 if (ret
== 0 && !(arg3
& CLOSE_RANGE_CLOEXEC
)) {
9331 maxfd
= MIN(arg2
, target_fd_max
);
9332 for (fd
= arg1
; fd
< maxfd
; fd
++) {
9333 fd_trans_unregister(fd
);
9340 return do_brk(arg1
);
9341 #ifdef TARGET_NR_fork
9342 case TARGET_NR_fork
:
9343 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
9345 #ifdef TARGET_NR_waitpid
9346 case TARGET_NR_waitpid
:
9349 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
9350 if (!is_error(ret
) && arg2
&& ret
9351 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
9352 return -TARGET_EFAULT
;
9356 #ifdef TARGET_NR_waitid
9357 case TARGET_NR_waitid
:
9361 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
9362 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
9363 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
9364 return -TARGET_EFAULT
;
9365 host_to_target_siginfo(p
, &info
);
9366 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
9371 #ifdef TARGET_NR_creat /* not on alpha */
9372 case TARGET_NR_creat
:
9373 if (!(p
= lock_user_string(arg1
)))
9374 return -TARGET_EFAULT
;
9375 ret
= get_errno(creat(p
, arg2
));
9376 fd_trans_unregister(ret
);
9377 unlock_user(p
, arg1
, 0);
9380 #ifdef TARGET_NR_link
9381 case TARGET_NR_link
:
9384 p
= lock_user_string(arg1
);
9385 p2
= lock_user_string(arg2
);
9387 ret
= -TARGET_EFAULT
;
9389 ret
= get_errno(link(p
, p2
));
9390 unlock_user(p2
, arg2
, 0);
9391 unlock_user(p
, arg1
, 0);
9395 #if defined(TARGET_NR_linkat)
9396 case TARGET_NR_linkat
:
9400 return -TARGET_EFAULT
;
9401 p
= lock_user_string(arg2
);
9402 p2
= lock_user_string(arg4
);
9404 ret
= -TARGET_EFAULT
;
9406 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
9407 unlock_user(p
, arg2
, 0);
9408 unlock_user(p2
, arg4
, 0);
9412 #ifdef TARGET_NR_unlink
9413 case TARGET_NR_unlink
:
9414 if (!(p
= lock_user_string(arg1
)))
9415 return -TARGET_EFAULT
;
9416 ret
= get_errno(unlink(p
));
9417 unlock_user(p
, arg1
, 0);
9420 #if defined(TARGET_NR_unlinkat)
9421 case TARGET_NR_unlinkat
:
9422 if (!(p
= lock_user_string(arg2
)))
9423 return -TARGET_EFAULT
;
9424 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
9425 unlock_user(p
, arg2
, 0);
9428 case TARGET_NR_execveat
:
9429 return do_execv(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, true);
9430 case TARGET_NR_execve
:
9431 return do_execv(cpu_env
, AT_FDCWD
, arg1
, arg2
, arg3
, 0, false);
9432 case TARGET_NR_chdir
:
9433 if (!(p
= lock_user_string(arg1
)))
9434 return -TARGET_EFAULT
;
9435 ret
= get_errno(chdir(p
));
9436 unlock_user(p
, arg1
, 0);
9438 #ifdef TARGET_NR_time
9439 case TARGET_NR_time
:
9442 ret
= get_errno(time(&host_time
));
9445 && put_user_sal(host_time
, arg1
))
9446 return -TARGET_EFAULT
;
9450 #ifdef TARGET_NR_mknod
9451 case TARGET_NR_mknod
:
9452 if (!(p
= lock_user_string(arg1
)))
9453 return -TARGET_EFAULT
;
9454 ret
= get_errno(mknod(p
, arg2
, arg3
));
9455 unlock_user(p
, arg1
, 0);
9458 #if defined(TARGET_NR_mknodat)
9459 case TARGET_NR_mknodat
:
9460 if (!(p
= lock_user_string(arg2
)))
9461 return -TARGET_EFAULT
;
9462 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
9463 unlock_user(p
, arg2
, 0);
9466 #ifdef TARGET_NR_chmod
9467 case TARGET_NR_chmod
:
9468 if (!(p
= lock_user_string(arg1
)))
9469 return -TARGET_EFAULT
;
9470 ret
= get_errno(chmod(p
, arg2
));
9471 unlock_user(p
, arg1
, 0);
9474 #ifdef TARGET_NR_lseek
9475 case TARGET_NR_lseek
:
9476 return get_errno(lseek(arg1
, arg2
, arg3
));
9478 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9479 /* Alpha specific */
9480 case TARGET_NR_getxpid
:
9481 cpu_env
->ir
[IR_A4
] = getppid();
9482 return get_errno(getpid());
9484 #ifdef TARGET_NR_getpid
9485 case TARGET_NR_getpid
:
9486 return get_errno(getpid());
9488 case TARGET_NR_mount
:
9490 /* need to look at the data field */
9494 p
= lock_user_string(arg1
);
9496 return -TARGET_EFAULT
;
9502 p2
= lock_user_string(arg2
);
9505 unlock_user(p
, arg1
, 0);
9507 return -TARGET_EFAULT
;
9511 p3
= lock_user_string(arg3
);
9514 unlock_user(p
, arg1
, 0);
9516 unlock_user(p2
, arg2
, 0);
9517 return -TARGET_EFAULT
;
9523 /* FIXME - arg5 should be locked, but it isn't clear how to
9524 * do that since it's not guaranteed to be a NULL-terminated
9528 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
9530 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
9532 ret
= get_errno(ret
);
9535 unlock_user(p
, arg1
, 0);
9537 unlock_user(p2
, arg2
, 0);
9539 unlock_user(p3
, arg3
, 0);
9543 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9544 #if defined(TARGET_NR_umount)
9545 case TARGET_NR_umount
:
9547 #if defined(TARGET_NR_oldumount)
9548 case TARGET_NR_oldumount
:
9550 if (!(p
= lock_user_string(arg1
)))
9551 return -TARGET_EFAULT
;
9552 ret
= get_errno(umount(p
));
9553 unlock_user(p
, arg1
, 0);
9556 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9557 case TARGET_NR_move_mount
:
9561 if (!arg2
|| !arg4
) {
9562 return -TARGET_EFAULT
;
9565 p2
= lock_user_string(arg2
);
9567 return -TARGET_EFAULT
;
9570 p4
= lock_user_string(arg4
);
9572 unlock_user(p2
, arg2
, 0);
9573 return -TARGET_EFAULT
;
9575 ret
= get_errno(sys_move_mount(arg1
, p2
, arg3
, p4
, arg5
));
9577 unlock_user(p2
, arg2
, 0);
9578 unlock_user(p4
, arg4
, 0);
9583 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9584 case TARGET_NR_open_tree
:
9590 return -TARGET_EFAULT
;
9593 p2
= lock_user_string(arg2
);
9595 return -TARGET_EFAULT
;
9598 host_flags
= arg3
& ~TARGET_O_CLOEXEC
;
9599 if (arg3
& TARGET_O_CLOEXEC
) {
9600 host_flags
|= O_CLOEXEC
;
9603 ret
= get_errno(sys_open_tree(arg1
, p2
, host_flags
));
9605 unlock_user(p2
, arg2
, 0);
9610 #ifdef TARGET_NR_stime /* not on alpha */
9611 case TARGET_NR_stime
:
9615 if (get_user_sal(ts
.tv_sec
, arg1
)) {
9616 return -TARGET_EFAULT
;
9618 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
9621 #ifdef TARGET_NR_alarm /* not on alpha */
9622 case TARGET_NR_alarm
:
9625 #ifdef TARGET_NR_pause /* not on alpha */
9626 case TARGET_NR_pause
:
9627 if (!block_signals()) {
9628 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
9630 return -TARGET_EINTR
;
9632 #ifdef TARGET_NR_utime
9633 case TARGET_NR_utime
:
9635 struct utimbuf tbuf
, *host_tbuf
;
9636 struct target_utimbuf
*target_tbuf
;
9638 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9639 return -TARGET_EFAULT
;
9640 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9641 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9642 unlock_user_struct(target_tbuf
, arg2
, 0);
9647 if (!(p
= lock_user_string(arg1
)))
9648 return -TARGET_EFAULT
;
9649 ret
= get_errno(utime(p
, host_tbuf
));
9650 unlock_user(p
, arg1
, 0);
9654 #ifdef TARGET_NR_utimes
9655 case TARGET_NR_utimes
:
9657 struct timeval
*tvp
, tv
[2];
9659 if (copy_from_user_timeval(&tv
[0], arg2
)
9660 || copy_from_user_timeval(&tv
[1],
9661 arg2
+ sizeof(struct target_timeval
)))
9662 return -TARGET_EFAULT
;
9667 if (!(p
= lock_user_string(arg1
)))
9668 return -TARGET_EFAULT
;
9669 ret
= get_errno(utimes(p
, tvp
));
9670 unlock_user(p
, arg1
, 0);
9674 #if defined(TARGET_NR_futimesat)
9675 case TARGET_NR_futimesat
:
9677 struct timeval
*tvp
, tv
[2];
9679 if (copy_from_user_timeval(&tv
[0], arg3
)
9680 || copy_from_user_timeval(&tv
[1],
9681 arg3
+ sizeof(struct target_timeval
)))
9682 return -TARGET_EFAULT
;
9687 if (!(p
= lock_user_string(arg2
))) {
9688 return -TARGET_EFAULT
;
9690 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9691 unlock_user(p
, arg2
, 0);
9695 #ifdef TARGET_NR_access
9696 case TARGET_NR_access
:
9697 if (!(p
= lock_user_string(arg1
))) {
9698 return -TARGET_EFAULT
;
9700 ret
= get_errno(access(path(p
), arg2
));
9701 unlock_user(p
, arg1
, 0);
9704 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9705 case TARGET_NR_faccessat
:
9706 if (!(p
= lock_user_string(arg2
))) {
9707 return -TARGET_EFAULT
;
9709 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9710 unlock_user(p
, arg2
, 0);
9713 #if defined(TARGET_NR_faccessat2)
9714 case TARGET_NR_faccessat2
:
9715 if (!(p
= lock_user_string(arg2
))) {
9716 return -TARGET_EFAULT
;
9718 ret
= get_errno(faccessat(arg1
, p
, arg3
, arg4
));
9719 unlock_user(p
, arg2
, 0);
9722 #ifdef TARGET_NR_nice /* not on alpha */
9723 case TARGET_NR_nice
:
9724 return get_errno(nice(arg1
));
9726 case TARGET_NR_sync
:
9729 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9730 case TARGET_NR_syncfs
:
9731 return get_errno(syncfs(arg1
));
9733 case TARGET_NR_kill
:
9734 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9735 #ifdef TARGET_NR_rename
9736 case TARGET_NR_rename
:
9739 p
= lock_user_string(arg1
);
9740 p2
= lock_user_string(arg2
);
9742 ret
= -TARGET_EFAULT
;
9744 ret
= get_errno(rename(p
, p2
));
9745 unlock_user(p2
, arg2
, 0);
9746 unlock_user(p
, arg1
, 0);
9750 #if defined(TARGET_NR_renameat)
9751 case TARGET_NR_renameat
:
9754 p
= lock_user_string(arg2
);
9755 p2
= lock_user_string(arg4
);
9757 ret
= -TARGET_EFAULT
;
9759 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9760 unlock_user(p2
, arg4
, 0);
9761 unlock_user(p
, arg2
, 0);
9765 #if defined(TARGET_NR_renameat2)
9766 case TARGET_NR_renameat2
:
9769 p
= lock_user_string(arg2
);
9770 p2
= lock_user_string(arg4
);
9772 ret
= -TARGET_EFAULT
;
9774 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9776 unlock_user(p2
, arg4
, 0);
9777 unlock_user(p
, arg2
, 0);
9781 #ifdef TARGET_NR_mkdir
9782 case TARGET_NR_mkdir
:
9783 if (!(p
= lock_user_string(arg1
)))
9784 return -TARGET_EFAULT
;
9785 ret
= get_errno(mkdir(p
, arg2
));
9786 unlock_user(p
, arg1
, 0);
9789 #if defined(TARGET_NR_mkdirat)
9790 case TARGET_NR_mkdirat
:
9791 if (!(p
= lock_user_string(arg2
)))
9792 return -TARGET_EFAULT
;
9793 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9794 unlock_user(p
, arg2
, 0);
9797 #ifdef TARGET_NR_rmdir
9798 case TARGET_NR_rmdir
:
9799 if (!(p
= lock_user_string(arg1
)))
9800 return -TARGET_EFAULT
;
9801 ret
= get_errno(rmdir(p
));
9802 unlock_user(p
, arg1
, 0);
9806 ret
= get_errno(dup(arg1
));
9808 fd_trans_dup(arg1
, ret
);
9811 #ifdef TARGET_NR_pipe
9812 case TARGET_NR_pipe
:
9813 return do_pipe(cpu_env
, arg1
, 0, 0);
9815 #ifdef TARGET_NR_pipe2
9816 case TARGET_NR_pipe2
:
9817 return do_pipe(cpu_env
, arg1
,
9818 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9820 case TARGET_NR_times
:
9822 struct target_tms
*tmsp
;
9824 ret
= get_errno(times(&tms
));
9826 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9828 return -TARGET_EFAULT
;
9829 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9830 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9831 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9832 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9835 ret
= host_to_target_clock_t(ret
);
9838 case TARGET_NR_acct
:
9840 ret
= get_errno(acct(NULL
));
9842 if (!(p
= lock_user_string(arg1
))) {
9843 return -TARGET_EFAULT
;
9845 ret
= get_errno(acct(path(p
)));
9846 unlock_user(p
, arg1
, 0);
9849 #ifdef TARGET_NR_umount2
9850 case TARGET_NR_umount2
:
9851 if (!(p
= lock_user_string(arg1
)))
9852 return -TARGET_EFAULT
;
9853 ret
= get_errno(umount2(p
, arg2
));
9854 unlock_user(p
, arg1
, 0);
9857 case TARGET_NR_ioctl
:
9858 return do_ioctl(arg1
, arg2
, arg3
);
9859 #ifdef TARGET_NR_fcntl
9860 case TARGET_NR_fcntl
:
9861 return do_fcntl(arg1
, arg2
, arg3
);
9863 case TARGET_NR_setpgid
:
9864 return get_errno(setpgid(arg1
, arg2
));
9865 case TARGET_NR_umask
:
9866 return get_errno(umask(arg1
));
9867 case TARGET_NR_chroot
:
9868 if (!(p
= lock_user_string(arg1
)))
9869 return -TARGET_EFAULT
;
9870 ret
= get_errno(chroot(p
));
9871 unlock_user(p
, arg1
, 0);
9873 #ifdef TARGET_NR_dup2
9874 case TARGET_NR_dup2
:
9875 ret
= get_errno(dup2(arg1
, arg2
));
9877 fd_trans_dup(arg1
, arg2
);
9881 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9882 case TARGET_NR_dup3
:
9886 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9889 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9890 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9892 fd_trans_dup(arg1
, arg2
);
9897 #ifdef TARGET_NR_getppid /* not on alpha */
9898 case TARGET_NR_getppid
:
9899 return get_errno(getppid());
9901 #ifdef TARGET_NR_getpgrp
9902 case TARGET_NR_getpgrp
:
9903 return get_errno(getpgrp());
9905 case TARGET_NR_setsid
:
9906 return get_errno(setsid());
9907 #ifdef TARGET_NR_sigaction
9908 case TARGET_NR_sigaction
:
9910 #if defined(TARGET_MIPS)
9911 struct target_sigaction act
, oact
, *pact
, *old_act
;
9914 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9915 return -TARGET_EFAULT
;
9916 act
._sa_handler
= old_act
->_sa_handler
;
9917 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9918 act
.sa_flags
= old_act
->sa_flags
;
9919 unlock_user_struct(old_act
, arg2
, 0);
9925 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9927 if (!is_error(ret
) && arg3
) {
9928 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9929 return -TARGET_EFAULT
;
9930 old_act
->_sa_handler
= oact
._sa_handler
;
9931 old_act
->sa_flags
= oact
.sa_flags
;
9932 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9933 old_act
->sa_mask
.sig
[1] = 0;
9934 old_act
->sa_mask
.sig
[2] = 0;
9935 old_act
->sa_mask
.sig
[3] = 0;
9936 unlock_user_struct(old_act
, arg3
, 1);
9939 struct target_old_sigaction
*old_act
;
9940 struct target_sigaction act
, oact
, *pact
;
9942 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9943 return -TARGET_EFAULT
;
9944 act
._sa_handler
= old_act
->_sa_handler
;
9945 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9946 act
.sa_flags
= old_act
->sa_flags
;
9947 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9948 act
.sa_restorer
= old_act
->sa_restorer
;
9950 unlock_user_struct(old_act
, arg2
, 0);
9955 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9956 if (!is_error(ret
) && arg3
) {
9957 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9958 return -TARGET_EFAULT
;
9959 old_act
->_sa_handler
= oact
._sa_handler
;
9960 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9961 old_act
->sa_flags
= oact
.sa_flags
;
9962 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9963 old_act
->sa_restorer
= oact
.sa_restorer
;
9965 unlock_user_struct(old_act
, arg3
, 1);
9971 case TARGET_NR_rt_sigaction
:
9974 * For Alpha and SPARC this is a 5 argument syscall, with
9975 * a 'restorer' parameter which must be copied into the
9976 * sa_restorer field of the sigaction struct.
9977 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9978 * and arg5 is the sigsetsize.
9980 #if defined(TARGET_ALPHA)
9981 target_ulong sigsetsize
= arg4
;
9982 target_ulong restorer
= arg5
;
9983 #elif defined(TARGET_SPARC)
9984 target_ulong restorer
= arg4
;
9985 target_ulong sigsetsize
= arg5
;
9987 target_ulong sigsetsize
= arg4
;
9988 target_ulong restorer
= 0;
9990 struct target_sigaction
*act
= NULL
;
9991 struct target_sigaction
*oact
= NULL
;
9993 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9994 return -TARGET_EINVAL
;
9996 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9997 return -TARGET_EFAULT
;
9999 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
10000 ret
= -TARGET_EFAULT
;
10002 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
10004 unlock_user_struct(oact
, arg3
, 1);
10008 unlock_user_struct(act
, arg2
, 0);
10012 #ifdef TARGET_NR_sgetmask /* not on alpha */
10013 case TARGET_NR_sgetmask
:
10016 abi_ulong target_set
;
10017 ret
= do_sigprocmask(0, NULL
, &cur_set
);
10019 host_to_target_old_sigset(&target_set
, &cur_set
);
10025 #ifdef TARGET_NR_ssetmask /* not on alpha */
10026 case TARGET_NR_ssetmask
:
10028 sigset_t set
, oset
;
10029 abi_ulong target_set
= arg1
;
10030 target_to_host_old_sigset(&set
, &target_set
);
10031 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
10033 host_to_target_old_sigset(&target_set
, &oset
);
10039 #ifdef TARGET_NR_sigprocmask
10040 case TARGET_NR_sigprocmask
:
10042 #if defined(TARGET_ALPHA)
10043 sigset_t set
, oldset
;
10048 case TARGET_SIG_BLOCK
:
10051 case TARGET_SIG_UNBLOCK
:
10054 case TARGET_SIG_SETMASK
:
10058 return -TARGET_EINVAL
;
10061 target_to_host_old_sigset(&set
, &mask
);
10063 ret
= do_sigprocmask(how
, &set
, &oldset
);
10064 if (!is_error(ret
)) {
10065 host_to_target_old_sigset(&mask
, &oldset
);
10067 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
10070 sigset_t set
, oldset
, *set_ptr
;
10074 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
10076 return -TARGET_EFAULT
;
10078 target_to_host_old_sigset(&set
, p
);
10079 unlock_user(p
, arg2
, 0);
10082 case TARGET_SIG_BLOCK
:
10085 case TARGET_SIG_UNBLOCK
:
10088 case TARGET_SIG_SETMASK
:
10092 return -TARGET_EINVAL
;
10098 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
10099 if (!is_error(ret
) && arg3
) {
10100 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
10101 return -TARGET_EFAULT
;
10102 host_to_target_old_sigset(p
, &oldset
);
10103 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
10109 case TARGET_NR_rt_sigprocmask
:
10112 sigset_t set
, oldset
, *set_ptr
;
10114 if (arg4
!= sizeof(target_sigset_t
)) {
10115 return -TARGET_EINVAL
;
10119 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
10121 return -TARGET_EFAULT
;
10123 target_to_host_sigset(&set
, p
);
10124 unlock_user(p
, arg2
, 0);
10127 case TARGET_SIG_BLOCK
:
10130 case TARGET_SIG_UNBLOCK
:
10133 case TARGET_SIG_SETMASK
:
10137 return -TARGET_EINVAL
;
10143 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
10144 if (!is_error(ret
) && arg3
) {
10145 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
10146 return -TARGET_EFAULT
;
10147 host_to_target_sigset(p
, &oldset
);
10148 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
10152 #ifdef TARGET_NR_sigpending
10153 case TARGET_NR_sigpending
:
10156 ret
= get_errno(sigpending(&set
));
10157 if (!is_error(ret
)) {
10158 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
10159 return -TARGET_EFAULT
;
10160 host_to_target_old_sigset(p
, &set
);
10161 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
10166 case TARGET_NR_rt_sigpending
:
10170 /* Yes, this check is >, not != like most. We follow the kernel's
10171 * logic and it does it like this because it implements
10172 * NR_sigpending through the same code path, and in that case
10173 * the old_sigset_t is smaller in size.
10175 if (arg2
> sizeof(target_sigset_t
)) {
10176 return -TARGET_EINVAL
;
10179 ret
= get_errno(sigpending(&set
));
10180 if (!is_error(ret
)) {
10181 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
10182 return -TARGET_EFAULT
;
10183 host_to_target_sigset(p
, &set
);
10184 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
10188 #ifdef TARGET_NR_sigsuspend
10189 case TARGET_NR_sigsuspend
:
10193 #if defined(TARGET_ALPHA)
10194 TaskState
*ts
= cpu
->opaque
;
10195 /* target_to_host_old_sigset will bswap back */
10196 abi_ulong mask
= tswapal(arg1
);
10197 set
= &ts
->sigsuspend_mask
;
10198 target_to_host_old_sigset(set
, &mask
);
10200 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
10205 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10206 finish_sigsuspend_mask(ret
);
10210 case TARGET_NR_rt_sigsuspend
:
10214 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
10218 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10219 finish_sigsuspend_mask(ret
);
10222 #ifdef TARGET_NR_rt_sigtimedwait
10223 case TARGET_NR_rt_sigtimedwait
:
10226 struct timespec uts
, *puts
;
10229 if (arg4
!= sizeof(target_sigset_t
)) {
10230 return -TARGET_EINVAL
;
10233 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
10234 return -TARGET_EFAULT
;
10235 target_to_host_sigset(&set
, p
);
10236 unlock_user(p
, arg1
, 0);
10239 if (target_to_host_timespec(puts
, arg3
)) {
10240 return -TARGET_EFAULT
;
10245 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10247 if (!is_error(ret
)) {
10249 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
10252 return -TARGET_EFAULT
;
10254 host_to_target_siginfo(p
, &uinfo
);
10255 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10257 ret
= host_to_target_signal(ret
);
10262 #ifdef TARGET_NR_rt_sigtimedwait_time64
10263 case TARGET_NR_rt_sigtimedwait_time64
:
10266 struct timespec uts
, *puts
;
10269 if (arg4
!= sizeof(target_sigset_t
)) {
10270 return -TARGET_EINVAL
;
10273 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
10275 return -TARGET_EFAULT
;
10277 target_to_host_sigset(&set
, p
);
10278 unlock_user(p
, arg1
, 0);
10281 if (target_to_host_timespec64(puts
, arg3
)) {
10282 return -TARGET_EFAULT
;
10287 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10289 if (!is_error(ret
)) {
10291 p
= lock_user(VERIFY_WRITE
, arg2
,
10292 sizeof(target_siginfo_t
), 0);
10294 return -TARGET_EFAULT
;
10296 host_to_target_siginfo(p
, &uinfo
);
10297 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10299 ret
= host_to_target_signal(ret
);
10304 case TARGET_NR_rt_sigqueueinfo
:
10308 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
10310 return -TARGET_EFAULT
;
10312 target_to_host_siginfo(&uinfo
, p
);
10313 unlock_user(p
, arg3
, 0);
10314 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, target_to_host_signal(arg2
), &uinfo
));
10317 case TARGET_NR_rt_tgsigqueueinfo
:
10321 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
10323 return -TARGET_EFAULT
;
10325 target_to_host_siginfo(&uinfo
, p
);
10326 unlock_user(p
, arg4
, 0);
10327 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, target_to_host_signal(arg3
), &uinfo
));
10330 #ifdef TARGET_NR_sigreturn
10331 case TARGET_NR_sigreturn
:
10332 if (block_signals()) {
10333 return -QEMU_ERESTARTSYS
;
10335 return do_sigreturn(cpu_env
);
10337 case TARGET_NR_rt_sigreturn
:
10338 if (block_signals()) {
10339 return -QEMU_ERESTARTSYS
;
10341 return do_rt_sigreturn(cpu_env
);
10342 case TARGET_NR_sethostname
:
10343 if (!(p
= lock_user_string(arg1
)))
10344 return -TARGET_EFAULT
;
10345 ret
= get_errno(sethostname(p
, arg2
));
10346 unlock_user(p
, arg1
, 0);
10348 #ifdef TARGET_NR_setrlimit
10349 case TARGET_NR_setrlimit
:
10351 int resource
= target_to_host_resource(arg1
);
10352 struct target_rlimit
*target_rlim
;
10353 struct rlimit rlim
;
10354 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
10355 return -TARGET_EFAULT
;
10356 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
10357 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
10358 unlock_user_struct(target_rlim
, arg2
, 0);
10360 * If we just passed through resource limit settings for memory then
10361 * they would also apply to QEMU's own allocations, and QEMU will
10362 * crash or hang or die if its allocations fail. Ideally we would
10363 * track the guest allocations in QEMU and apply the limits ourselves.
10364 * For now, just tell the guest the call succeeded but don't actually
10367 if (resource
!= RLIMIT_AS
&&
10368 resource
!= RLIMIT_DATA
&&
10369 resource
!= RLIMIT_STACK
) {
10370 return get_errno(setrlimit(resource
, &rlim
));
10376 #ifdef TARGET_NR_getrlimit
10377 case TARGET_NR_getrlimit
:
10379 int resource
= target_to_host_resource(arg1
);
10380 struct target_rlimit
*target_rlim
;
10381 struct rlimit rlim
;
10383 ret
= get_errno(getrlimit(resource
, &rlim
));
10384 if (!is_error(ret
)) {
10385 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10386 return -TARGET_EFAULT
;
10387 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10388 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10389 unlock_user_struct(target_rlim
, arg2
, 1);
10394 case TARGET_NR_getrusage
:
10396 struct rusage rusage
;
10397 ret
= get_errno(getrusage(arg1
, &rusage
));
10398 if (!is_error(ret
)) {
10399 ret
= host_to_target_rusage(arg2
, &rusage
);
10403 #if defined(TARGET_NR_gettimeofday)
10404 case TARGET_NR_gettimeofday
:
10407 struct timezone tz
;
10409 ret
= get_errno(gettimeofday(&tv
, &tz
));
10410 if (!is_error(ret
)) {
10411 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
10412 return -TARGET_EFAULT
;
10414 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
10415 return -TARGET_EFAULT
;
10421 #if defined(TARGET_NR_settimeofday)
10422 case TARGET_NR_settimeofday
:
10424 struct timeval tv
, *ptv
= NULL
;
10425 struct timezone tz
, *ptz
= NULL
;
10428 if (copy_from_user_timeval(&tv
, arg1
)) {
10429 return -TARGET_EFAULT
;
10435 if (copy_from_user_timezone(&tz
, arg2
)) {
10436 return -TARGET_EFAULT
;
10441 return get_errno(settimeofday(ptv
, ptz
));
10444 #if defined(TARGET_NR_select)
10445 case TARGET_NR_select
:
10446 #if defined(TARGET_WANT_NI_OLD_SELECT)
10447 /* some architectures used to have old_select here
10448 * but now ENOSYS it.
10450 ret
= -TARGET_ENOSYS
;
10451 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10452 ret
= do_old_select(arg1
);
10454 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10458 #ifdef TARGET_NR_pselect6
10459 case TARGET_NR_pselect6
:
10460 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
10462 #ifdef TARGET_NR_pselect6_time64
10463 case TARGET_NR_pselect6_time64
:
10464 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
10466 #ifdef TARGET_NR_symlink
10467 case TARGET_NR_symlink
:
10470 p
= lock_user_string(arg1
);
10471 p2
= lock_user_string(arg2
);
10473 ret
= -TARGET_EFAULT
;
10475 ret
= get_errno(symlink(p
, p2
));
10476 unlock_user(p2
, arg2
, 0);
10477 unlock_user(p
, arg1
, 0);
10481 #if defined(TARGET_NR_symlinkat)
10482 case TARGET_NR_symlinkat
:
10485 p
= lock_user_string(arg1
);
10486 p2
= lock_user_string(arg3
);
10488 ret
= -TARGET_EFAULT
;
10490 ret
= get_errno(symlinkat(p
, arg2
, p2
));
10491 unlock_user(p2
, arg3
, 0);
10492 unlock_user(p
, arg1
, 0);
10496 #ifdef TARGET_NR_readlink
10497 case TARGET_NR_readlink
:
10500 p
= lock_user_string(arg1
);
10501 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10502 ret
= get_errno(do_guest_readlink(p
, p2
, arg3
));
10503 unlock_user(p2
, arg2
, ret
);
10504 unlock_user(p
, arg1
, 0);
10508 #if defined(TARGET_NR_readlinkat)
10509 case TARGET_NR_readlinkat
:
10512 p
= lock_user_string(arg2
);
10513 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10515 ret
= -TARGET_EFAULT
;
10516 } else if (!arg4
) {
10517 /* Short circuit this for the magic exe check. */
10518 ret
= -TARGET_EINVAL
;
10519 } else if (is_proc_myself((const char *)p
, "exe")) {
10521 * Don't worry about sign mismatch as earlier mapping
10522 * logic would have thrown a bad address error.
10524 ret
= MIN(strlen(exec_path
), arg4
);
10525 /* We cannot NUL terminate the string. */
10526 memcpy(p2
, exec_path
, ret
);
10528 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
10530 unlock_user(p2
, arg3
, ret
);
10531 unlock_user(p
, arg2
, 0);
10535 #ifdef TARGET_NR_swapon
10536 case TARGET_NR_swapon
:
10537 if (!(p
= lock_user_string(arg1
)))
10538 return -TARGET_EFAULT
;
10539 ret
= get_errno(swapon(p
, arg2
));
10540 unlock_user(p
, arg1
, 0);
10543 case TARGET_NR_reboot
:
10544 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
10545 /* arg4 must be ignored in all other cases */
10546 p
= lock_user_string(arg4
);
10548 return -TARGET_EFAULT
;
10550 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
10551 unlock_user(p
, arg4
, 0);
10553 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
10556 #ifdef TARGET_NR_mmap
10557 case TARGET_NR_mmap
:
10558 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10559 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10560 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10561 || defined(TARGET_S390X)
10564 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
10565 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
10566 return -TARGET_EFAULT
;
10567 v1
= tswapal(v
[0]);
10568 v2
= tswapal(v
[1]);
10569 v3
= tswapal(v
[2]);
10570 v4
= tswapal(v
[3]);
10571 v5
= tswapal(v
[4]);
10572 v6
= tswapal(v
[5]);
10573 unlock_user(v
, arg1
, 0);
10574 ret
= get_errno(target_mmap(v1
, v2
, v3
,
10575 target_to_host_bitmask(v4
, mmap_flags_tbl
),
10579 /* mmap pointers are always untagged */
10580 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
10581 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10587 #ifdef TARGET_NR_mmap2
10588 case TARGET_NR_mmap2
:
10590 #define MMAP_SHIFT 12
10592 ret
= target_mmap(arg1
, arg2
, arg3
,
10593 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10594 arg5
, (off_t
)(abi_ulong
)arg6
<< MMAP_SHIFT
);
10595 return get_errno(ret
);
10597 case TARGET_NR_munmap
:
10598 arg1
= cpu_untagged_addr(cpu
, arg1
);
10599 return get_errno(target_munmap(arg1
, arg2
));
10600 case TARGET_NR_mprotect
:
10601 arg1
= cpu_untagged_addr(cpu
, arg1
);
10603 TaskState
*ts
= cpu
->opaque
;
10604 /* Special hack to detect libc making the stack executable. */
10605 if ((arg3
& PROT_GROWSDOWN
)
10606 && arg1
>= ts
->info
->stack_limit
10607 && arg1
<= ts
->info
->start_stack
) {
10608 arg3
&= ~PROT_GROWSDOWN
;
10609 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
10610 arg1
= ts
->info
->stack_limit
;
10613 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
10614 #ifdef TARGET_NR_mremap
10615 case TARGET_NR_mremap
:
10616 arg1
= cpu_untagged_addr(cpu
, arg1
);
10617 /* mremap new_addr (arg5) is always untagged */
10618 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
10620 /* ??? msync/mlock/munlock are broken for softmmu. */
10621 #ifdef TARGET_NR_msync
10622 case TARGET_NR_msync
:
10623 return get_errno(msync(g2h(cpu
, arg1
), arg2
,
10624 target_to_host_msync_arg(arg3
)));
10626 #ifdef TARGET_NR_mlock
10627 case TARGET_NR_mlock
:
10628 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10630 #ifdef TARGET_NR_munlock
10631 case TARGET_NR_munlock
:
10632 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10634 #ifdef TARGET_NR_mlockall
10635 case TARGET_NR_mlockall
:
10636 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10638 #ifdef TARGET_NR_munlockall
10639 case TARGET_NR_munlockall
:
10640 return get_errno(munlockall());
10642 #ifdef TARGET_NR_truncate
10643 case TARGET_NR_truncate
:
10644 if (!(p
= lock_user_string(arg1
)))
10645 return -TARGET_EFAULT
;
10646 ret
= get_errno(truncate(p
, arg2
));
10647 unlock_user(p
, arg1
, 0);
10650 #ifdef TARGET_NR_ftruncate
10651 case TARGET_NR_ftruncate
:
10652 return get_errno(ftruncate(arg1
, arg2
));
10654 case TARGET_NR_fchmod
:
10655 return get_errno(fchmod(arg1
, arg2
));
10656 #if defined(TARGET_NR_fchmodat)
10657 case TARGET_NR_fchmodat
:
10658 if (!(p
= lock_user_string(arg2
)))
10659 return -TARGET_EFAULT
;
10660 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10661 unlock_user(p
, arg2
, 0);
10664 case TARGET_NR_getpriority
:
10665 /* Note that negative values are valid for getpriority, so we must
10666 differentiate based on errno settings. */
10668 ret
= getpriority(arg1
, arg2
);
10669 if (ret
== -1 && errno
!= 0) {
10670 return -host_to_target_errno(errno
);
10672 #ifdef TARGET_ALPHA
10673 /* Return value is the unbiased priority. Signal no error. */
10674 cpu_env
->ir
[IR_V0
] = 0;
10676 /* Return value is a biased priority to avoid negative numbers. */
10680 case TARGET_NR_setpriority
:
10681 return get_errno(setpriority(arg1
, arg2
, arg3
));
10682 #ifdef TARGET_NR_statfs
10683 case TARGET_NR_statfs
:
10684 if (!(p
= lock_user_string(arg1
))) {
10685 return -TARGET_EFAULT
;
10687 ret
= get_errno(statfs(path(p
), &stfs
));
10688 unlock_user(p
, arg1
, 0);
10690 if (!is_error(ret
)) {
10691 struct target_statfs
*target_stfs
;
10693 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10694 return -TARGET_EFAULT
;
10695 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10696 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10697 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10698 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10699 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10700 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10701 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10702 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10703 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10704 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10705 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10706 #ifdef _STATFS_F_FLAGS
10707 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10709 __put_user(0, &target_stfs
->f_flags
);
10711 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10712 unlock_user_struct(target_stfs
, arg2
, 1);
10716 #ifdef TARGET_NR_fstatfs
10717 case TARGET_NR_fstatfs
:
10718 ret
= get_errno(fstatfs(arg1
, &stfs
));
10719 goto convert_statfs
;
10721 #ifdef TARGET_NR_statfs64
10722 case TARGET_NR_statfs64
:
10723 if (!(p
= lock_user_string(arg1
))) {
10724 return -TARGET_EFAULT
;
10726 ret
= get_errno(statfs(path(p
), &stfs
));
10727 unlock_user(p
, arg1
, 0);
10729 if (!is_error(ret
)) {
10730 struct target_statfs64
*target_stfs
;
10732 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10733 return -TARGET_EFAULT
;
10734 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10735 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10736 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10737 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10738 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10739 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10740 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10741 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10742 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10743 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10744 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10745 #ifdef _STATFS_F_FLAGS
10746 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10748 __put_user(0, &target_stfs
->f_flags
);
10750 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10751 unlock_user_struct(target_stfs
, arg3
, 1);
10754 case TARGET_NR_fstatfs64
:
10755 ret
= get_errno(fstatfs(arg1
, &stfs
));
10756 goto convert_statfs64
;
10758 #ifdef TARGET_NR_socketcall
10759 case TARGET_NR_socketcall
:
10760 return do_socketcall(arg1
, arg2
);
10762 #ifdef TARGET_NR_accept
10763 case TARGET_NR_accept
:
10764 return do_accept4(arg1
, arg2
, arg3
, 0);
10766 #ifdef TARGET_NR_accept4
10767 case TARGET_NR_accept4
:
10768 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10770 #ifdef TARGET_NR_bind
10771 case TARGET_NR_bind
:
10772 return do_bind(arg1
, arg2
, arg3
);
10774 #ifdef TARGET_NR_connect
10775 case TARGET_NR_connect
:
10776 return do_connect(arg1
, arg2
, arg3
);
10778 #ifdef TARGET_NR_getpeername
10779 case TARGET_NR_getpeername
:
10780 return do_getpeername(arg1
, arg2
, arg3
);
10782 #ifdef TARGET_NR_getsockname
10783 case TARGET_NR_getsockname
:
10784 return do_getsockname(arg1
, arg2
, arg3
);
10786 #ifdef TARGET_NR_getsockopt
10787 case TARGET_NR_getsockopt
:
10788 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10790 #ifdef TARGET_NR_listen
10791 case TARGET_NR_listen
:
10792 return get_errno(listen(arg1
, arg2
));
10794 #ifdef TARGET_NR_recv
10795 case TARGET_NR_recv
:
10796 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10798 #ifdef TARGET_NR_recvfrom
10799 case TARGET_NR_recvfrom
:
10800 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10802 #ifdef TARGET_NR_recvmsg
10803 case TARGET_NR_recvmsg
:
10804 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10806 #ifdef TARGET_NR_send
10807 case TARGET_NR_send
:
10808 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10810 #ifdef TARGET_NR_sendmsg
10811 case TARGET_NR_sendmsg
:
10812 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10814 #ifdef TARGET_NR_sendmmsg
10815 case TARGET_NR_sendmmsg
:
10816 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10818 #ifdef TARGET_NR_recvmmsg
10819 case TARGET_NR_recvmmsg
:
10820 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10822 #ifdef TARGET_NR_sendto
10823 case TARGET_NR_sendto
:
10824 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10826 #ifdef TARGET_NR_shutdown
10827 case TARGET_NR_shutdown
:
10828 return get_errno(shutdown(arg1
, arg2
));
10830 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10831 case TARGET_NR_getrandom
:
10832 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10834 return -TARGET_EFAULT
;
10836 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10837 unlock_user(p
, arg1
, ret
);
10840 #ifdef TARGET_NR_socket
10841 case TARGET_NR_socket
:
10842 return do_socket(arg1
, arg2
, arg3
);
10844 #ifdef TARGET_NR_socketpair
10845 case TARGET_NR_socketpair
:
10846 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10848 #ifdef TARGET_NR_setsockopt
10849 case TARGET_NR_setsockopt
:
10850 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10852 #if defined(TARGET_NR_syslog)
10853 case TARGET_NR_syslog
:
10858 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10859 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10860 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10861 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10862 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10863 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10864 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10865 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10866 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10867 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10868 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10869 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10872 return -TARGET_EINVAL
;
10877 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10879 return -TARGET_EFAULT
;
10881 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10882 unlock_user(p
, arg2
, arg3
);
10886 return -TARGET_EINVAL
;
10891 case TARGET_NR_setitimer
:
10893 struct itimerval value
, ovalue
, *pvalue
;
10897 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10898 || copy_from_user_timeval(&pvalue
->it_value
,
10899 arg2
+ sizeof(struct target_timeval
)))
10900 return -TARGET_EFAULT
;
10904 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10905 if (!is_error(ret
) && arg3
) {
10906 if (copy_to_user_timeval(arg3
,
10907 &ovalue
.it_interval
)
10908 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10910 return -TARGET_EFAULT
;
10914 case TARGET_NR_getitimer
:
10916 struct itimerval value
;
10918 ret
= get_errno(getitimer(arg1
, &value
));
10919 if (!is_error(ret
) && arg2
) {
10920 if (copy_to_user_timeval(arg2
,
10921 &value
.it_interval
)
10922 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10924 return -TARGET_EFAULT
;
10928 #ifdef TARGET_NR_stat
10929 case TARGET_NR_stat
:
10930 if (!(p
= lock_user_string(arg1
))) {
10931 return -TARGET_EFAULT
;
10933 ret
= get_errno(stat(path(p
), &st
));
10934 unlock_user(p
, arg1
, 0);
10937 #ifdef TARGET_NR_lstat
10938 case TARGET_NR_lstat
:
10939 if (!(p
= lock_user_string(arg1
))) {
10940 return -TARGET_EFAULT
;
10942 ret
= get_errno(lstat(path(p
), &st
));
10943 unlock_user(p
, arg1
, 0);
10946 #ifdef TARGET_NR_fstat
10947 case TARGET_NR_fstat
:
10949 ret
= get_errno(fstat(arg1
, &st
));
10950 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10953 if (!is_error(ret
)) {
10954 struct target_stat
*target_st
;
10956 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10957 return -TARGET_EFAULT
;
10958 memset(target_st
, 0, sizeof(*target_st
));
10959 __put_user(st
.st_dev
, &target_st
->st_dev
);
10960 __put_user(st
.st_ino
, &target_st
->st_ino
);
10961 __put_user(st
.st_mode
, &target_st
->st_mode
);
10962 __put_user(st
.st_uid
, &target_st
->st_uid
);
10963 __put_user(st
.st_gid
, &target_st
->st_gid
);
10964 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10965 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10966 __put_user(st
.st_size
, &target_st
->st_size
);
10967 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10968 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10969 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10970 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10971 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10972 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10973 __put_user(st
.st_atim
.tv_nsec
,
10974 &target_st
->target_st_atime_nsec
);
10975 __put_user(st
.st_mtim
.tv_nsec
,
10976 &target_st
->target_st_mtime_nsec
);
10977 __put_user(st
.st_ctim
.tv_nsec
,
10978 &target_st
->target_st_ctime_nsec
);
10980 unlock_user_struct(target_st
, arg2
, 1);
10985 case TARGET_NR_vhangup
:
10986 return get_errno(vhangup());
10987 #ifdef TARGET_NR_syscall
10988 case TARGET_NR_syscall
:
10989 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10990 arg6
, arg7
, arg8
, 0);
10992 #if defined(TARGET_NR_wait4)
10993 case TARGET_NR_wait4
:
10996 abi_long status_ptr
= arg2
;
10997 struct rusage rusage
, *rusage_ptr
;
10998 abi_ulong target_rusage
= arg4
;
10999 abi_long rusage_err
;
11001 rusage_ptr
= &rusage
;
11004 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
11005 if (!is_error(ret
)) {
11006 if (status_ptr
&& ret
) {
11007 status
= host_to_target_waitstatus(status
);
11008 if (put_user_s32(status
, status_ptr
))
11009 return -TARGET_EFAULT
;
11011 if (target_rusage
) {
11012 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
11021 #ifdef TARGET_NR_swapoff
11022 case TARGET_NR_swapoff
:
11023 if (!(p
= lock_user_string(arg1
)))
11024 return -TARGET_EFAULT
;
11025 ret
= get_errno(swapoff(p
));
11026 unlock_user(p
, arg1
, 0);
11029 case TARGET_NR_sysinfo
:
11031 struct target_sysinfo
*target_value
;
11032 struct sysinfo value
;
11033 ret
= get_errno(sysinfo(&value
));
11034 if (!is_error(ret
) && arg1
)
11036 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
11037 return -TARGET_EFAULT
;
11038 __put_user(value
.uptime
, &target_value
->uptime
);
11039 __put_user(value
.loads
[0], &target_value
->loads
[0]);
11040 __put_user(value
.loads
[1], &target_value
->loads
[1]);
11041 __put_user(value
.loads
[2], &target_value
->loads
[2]);
11042 __put_user(value
.totalram
, &target_value
->totalram
);
11043 __put_user(value
.freeram
, &target_value
->freeram
);
11044 __put_user(value
.sharedram
, &target_value
->sharedram
);
11045 __put_user(value
.bufferram
, &target_value
->bufferram
);
11046 __put_user(value
.totalswap
, &target_value
->totalswap
);
11047 __put_user(value
.freeswap
, &target_value
->freeswap
);
11048 __put_user(value
.procs
, &target_value
->procs
);
11049 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
11050 __put_user(value
.freehigh
, &target_value
->freehigh
);
11051 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
11052 unlock_user_struct(target_value
, arg1
, 1);
11056 #ifdef TARGET_NR_ipc
11057 case TARGET_NR_ipc
:
11058 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11060 #ifdef TARGET_NR_semget
11061 case TARGET_NR_semget
:
11062 return get_errno(semget(arg1
, arg2
, arg3
));
11064 #ifdef TARGET_NR_semop
11065 case TARGET_NR_semop
:
11066 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
11068 #ifdef TARGET_NR_semtimedop
11069 case TARGET_NR_semtimedop
:
11070 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
11072 #ifdef TARGET_NR_semtimedop_time64
11073 case TARGET_NR_semtimedop_time64
:
11074 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
11076 #ifdef TARGET_NR_semctl
11077 case TARGET_NR_semctl
:
11078 return do_semctl(arg1
, arg2
, arg3
, arg4
);
11080 #ifdef TARGET_NR_msgctl
11081 case TARGET_NR_msgctl
:
11082 return do_msgctl(arg1
, arg2
, arg3
);
11084 #ifdef TARGET_NR_msgget
11085 case TARGET_NR_msgget
:
11086 return get_errno(msgget(arg1
, arg2
));
11088 #ifdef TARGET_NR_msgrcv
11089 case TARGET_NR_msgrcv
:
11090 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
11092 #ifdef TARGET_NR_msgsnd
11093 case TARGET_NR_msgsnd
:
11094 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
11096 #ifdef TARGET_NR_shmget
11097 case TARGET_NR_shmget
:
11098 return get_errno(shmget(arg1
, arg2
, arg3
));
11100 #ifdef TARGET_NR_shmctl
11101 case TARGET_NR_shmctl
:
11102 return do_shmctl(arg1
, arg2
, arg3
);
11104 #ifdef TARGET_NR_shmat
11105 case TARGET_NR_shmat
:
11106 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
11108 #ifdef TARGET_NR_shmdt
11109 case TARGET_NR_shmdt
:
11110 return do_shmdt(arg1
);
11112 case TARGET_NR_fsync
:
11113 return get_errno(fsync(arg1
));
11114 case TARGET_NR_clone
:
11115 /* Linux manages to have three different orderings for its
11116 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11117 * match the kernel's CONFIG_CLONE_* settings.
11118 * Microblaze is further special in that it uses a sixth
11119 * implicit argument to clone for the TLS pointer.
11121 #if defined(TARGET_MICROBLAZE)
11122 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
11123 #elif defined(TARGET_CLONE_BACKWARDS)
11124 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
11125 #elif defined(TARGET_CLONE_BACKWARDS2)
11126 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
11128 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
11131 #ifdef __NR_exit_group
11132 /* new thread calls */
11133 case TARGET_NR_exit_group
:
11134 preexit_cleanup(cpu_env
, arg1
);
11135 return get_errno(exit_group(arg1
));
11137 case TARGET_NR_setdomainname
:
11138 if (!(p
= lock_user_string(arg1
)))
11139 return -TARGET_EFAULT
;
11140 ret
= get_errno(setdomainname(p
, arg2
));
11141 unlock_user(p
, arg1
, 0);
11143 case TARGET_NR_uname
:
11144 /* no need to transcode because we use the linux syscall */
11146 struct new_utsname
* buf
;
11148 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
11149 return -TARGET_EFAULT
;
11150 ret
= get_errno(sys_uname(buf
));
11151 if (!is_error(ret
)) {
11152 /* Overwrite the native machine name with whatever is being
11154 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
11155 sizeof(buf
->machine
));
11156 /* Allow the user to override the reported release. */
11157 if (qemu_uname_release
&& *qemu_uname_release
) {
11158 g_strlcpy(buf
->release
, qemu_uname_release
,
11159 sizeof(buf
->release
));
11162 unlock_user_struct(buf
, arg1
, 1);
11166 case TARGET_NR_modify_ldt
:
11167 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
11168 #if !defined(TARGET_X86_64)
11169 case TARGET_NR_vm86
:
11170 return do_vm86(cpu_env
, arg1
, arg2
);
11173 #if defined(TARGET_NR_adjtimex)
11174 case TARGET_NR_adjtimex
:
11176 struct timex host_buf
;
11178 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
11179 return -TARGET_EFAULT
;
11181 ret
= get_errno(adjtimex(&host_buf
));
11182 if (!is_error(ret
)) {
11183 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
11184 return -TARGET_EFAULT
;
11190 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11191 case TARGET_NR_clock_adjtime
:
11195 if (target_to_host_timex(&htx
, arg2
) != 0) {
11196 return -TARGET_EFAULT
;
11198 ret
= get_errno(clock_adjtime(arg1
, &htx
));
11199 if (!is_error(ret
) && host_to_target_timex(arg2
, &htx
)) {
11200 return -TARGET_EFAULT
;
11205 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11206 case TARGET_NR_clock_adjtime64
:
11210 if (target_to_host_timex64(&htx
, arg2
) != 0) {
11211 return -TARGET_EFAULT
;
11213 ret
= get_errno(clock_adjtime(arg1
, &htx
));
11214 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
11215 return -TARGET_EFAULT
;
11220 case TARGET_NR_getpgid
:
11221 return get_errno(getpgid(arg1
));
11222 case TARGET_NR_fchdir
:
11223 return get_errno(fchdir(arg1
));
11224 case TARGET_NR_personality
:
11225 return get_errno(personality(arg1
));
11226 #ifdef TARGET_NR__llseek /* Not on alpha */
11227 case TARGET_NR__llseek
:
11230 #if !defined(__NR_llseek)
11231 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
11233 ret
= get_errno(res
);
11238 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
11240 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
11241 return -TARGET_EFAULT
;
11246 #ifdef TARGET_NR_getdents
11247 case TARGET_NR_getdents
:
11248 return do_getdents(arg1
, arg2
, arg3
);
11249 #endif /* TARGET_NR_getdents */
11250 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11251 case TARGET_NR_getdents64
:
11252 return do_getdents64(arg1
, arg2
, arg3
);
11253 #endif /* TARGET_NR_getdents64 */
11254 #if defined(TARGET_NR__newselect)
11255 case TARGET_NR__newselect
:
11256 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
11258 #ifdef TARGET_NR_poll
11259 case TARGET_NR_poll
:
11260 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
11262 #ifdef TARGET_NR_ppoll
11263 case TARGET_NR_ppoll
:
11264 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
11266 #ifdef TARGET_NR_ppoll_time64
11267 case TARGET_NR_ppoll_time64
:
11268 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
11270 case TARGET_NR_flock
:
11271 /* NOTE: the flock constant seems to be the same for every
11273 return get_errno(safe_flock(arg1
, arg2
));
11274 case TARGET_NR_readv
:
11276 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11278 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
11279 unlock_iovec(vec
, arg2
, arg3
, 1);
11281 ret
= -host_to_target_errno(errno
);
11285 case TARGET_NR_writev
:
11287 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11289 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
11290 unlock_iovec(vec
, arg2
, arg3
, 0);
11292 ret
= -host_to_target_errno(errno
);
11296 #if defined(TARGET_NR_preadv)
11297 case TARGET_NR_preadv
:
11299 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11301 unsigned long low
, high
;
11303 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11304 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
11305 unlock_iovec(vec
, arg2
, arg3
, 1);
11307 ret
= -host_to_target_errno(errno
);
11312 #if defined(TARGET_NR_pwritev)
11313 case TARGET_NR_pwritev
:
11315 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11317 unsigned long low
, high
;
11319 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11320 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
11321 unlock_iovec(vec
, arg2
, arg3
, 0);
11323 ret
= -host_to_target_errno(errno
);
11328 case TARGET_NR_getsid
:
11329 return get_errno(getsid(arg1
));
11330 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11331 case TARGET_NR_fdatasync
:
11332 return get_errno(fdatasync(arg1
));
11334 case TARGET_NR_sched_getaffinity
:
11336 unsigned int mask_size
;
11337 unsigned long *mask
;
11340 * sched_getaffinity needs multiples of ulong, so need to take
11341 * care of mismatches between target ulong and host ulong sizes.
11343 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11344 return -TARGET_EINVAL
;
11346 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11348 mask
= alloca(mask_size
);
11349 memset(mask
, 0, mask_size
);
11350 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
11352 if (!is_error(ret
)) {
11354 /* More data returned than the caller's buffer will fit.
11355 * This only happens if sizeof(abi_long) < sizeof(long)
11356 * and the caller passed us a buffer holding an odd number
11357 * of abi_longs. If the host kernel is actually using the
11358 * extra 4 bytes then fail EINVAL; otherwise we can just
11359 * ignore them and only copy the interesting part.
11361 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
11362 if (numcpus
> arg2
* 8) {
11363 return -TARGET_EINVAL
;
11368 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
11369 return -TARGET_EFAULT
;
11374 case TARGET_NR_sched_setaffinity
:
11376 unsigned int mask_size
;
11377 unsigned long *mask
;
11380 * sched_setaffinity needs multiples of ulong, so need to take
11381 * care of mismatches between target ulong and host ulong sizes.
11383 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11384 return -TARGET_EINVAL
;
11386 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11387 mask
= alloca(mask_size
);
11389 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
11394 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
11396 case TARGET_NR_getcpu
:
11398 unsigned cpu
, node
;
11399 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
11400 arg2
? &node
: NULL
,
11402 if (is_error(ret
)) {
11405 if (arg1
&& put_user_u32(cpu
, arg1
)) {
11406 return -TARGET_EFAULT
;
11408 if (arg2
&& put_user_u32(node
, arg2
)) {
11409 return -TARGET_EFAULT
;
11413 case TARGET_NR_sched_setparam
:
11415 struct target_sched_param
*target_schp
;
11416 struct sched_param schp
;
11419 return -TARGET_EINVAL
;
11421 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
11422 return -TARGET_EFAULT
;
11424 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11425 unlock_user_struct(target_schp
, arg2
, 0);
11426 return get_errno(sys_sched_setparam(arg1
, &schp
));
11428 case TARGET_NR_sched_getparam
:
11430 struct target_sched_param
*target_schp
;
11431 struct sched_param schp
;
11434 return -TARGET_EINVAL
;
11436 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
11437 if (!is_error(ret
)) {
11438 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
11439 return -TARGET_EFAULT
;
11441 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
11442 unlock_user_struct(target_schp
, arg2
, 1);
11446 case TARGET_NR_sched_setscheduler
:
11448 struct target_sched_param
*target_schp
;
11449 struct sched_param schp
;
11451 return -TARGET_EINVAL
;
11453 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
11454 return -TARGET_EFAULT
;
11456 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11457 unlock_user_struct(target_schp
, arg3
, 0);
11458 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
11460 case TARGET_NR_sched_getscheduler
:
11461 return get_errno(sys_sched_getscheduler(arg1
));
11462 case TARGET_NR_sched_getattr
:
11464 struct target_sched_attr
*target_scha
;
11465 struct sched_attr scha
;
11467 return -TARGET_EINVAL
;
11469 if (arg3
> sizeof(scha
)) {
11470 arg3
= sizeof(scha
);
11472 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
11473 if (!is_error(ret
)) {
11474 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11475 if (!target_scha
) {
11476 return -TARGET_EFAULT
;
11478 target_scha
->size
= tswap32(scha
.size
);
11479 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
11480 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
11481 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
11482 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
11483 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
11484 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
11485 target_scha
->sched_period
= tswap64(scha
.sched_period
);
11486 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
11487 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
11488 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
11490 unlock_user(target_scha
, arg2
, arg3
);
11494 case TARGET_NR_sched_setattr
:
11496 struct target_sched_attr
*target_scha
;
11497 struct sched_attr scha
;
11501 return -TARGET_EINVAL
;
11503 if (get_user_u32(size
, arg2
)) {
11504 return -TARGET_EFAULT
;
11507 size
= offsetof(struct target_sched_attr
, sched_util_min
);
11509 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
11510 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11511 return -TARGET_EFAULT
;
11513 return -TARGET_E2BIG
;
11516 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
11519 } else if (zeroed
== 0) {
11520 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11521 return -TARGET_EFAULT
;
11523 return -TARGET_E2BIG
;
11525 if (size
> sizeof(struct target_sched_attr
)) {
11526 size
= sizeof(struct target_sched_attr
);
11529 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
11530 if (!target_scha
) {
11531 return -TARGET_EFAULT
;
11534 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
11535 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
11536 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
11537 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
11538 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
11539 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
11540 scha
.sched_period
= tswap64(target_scha
->sched_period
);
11541 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
11542 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
11543 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
11545 unlock_user(target_scha
, arg2
, 0);
11546 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
11548 case TARGET_NR_sched_yield
:
11549 return get_errno(sched_yield());
11550 case TARGET_NR_sched_get_priority_max
:
11551 return get_errno(sched_get_priority_max(arg1
));
11552 case TARGET_NR_sched_get_priority_min
:
11553 return get_errno(sched_get_priority_min(arg1
));
11554 #ifdef TARGET_NR_sched_rr_get_interval
11555 case TARGET_NR_sched_rr_get_interval
:
11557 struct timespec ts
;
11558 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11559 if (!is_error(ret
)) {
11560 ret
= host_to_target_timespec(arg2
, &ts
);
11565 #ifdef TARGET_NR_sched_rr_get_interval_time64
11566 case TARGET_NR_sched_rr_get_interval_time64
:
11568 struct timespec ts
;
11569 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11570 if (!is_error(ret
)) {
11571 ret
= host_to_target_timespec64(arg2
, &ts
);
11576 #if defined(TARGET_NR_nanosleep)
11577 case TARGET_NR_nanosleep
:
11579 struct timespec req
, rem
;
11580 target_to_host_timespec(&req
, arg1
);
11581 ret
= get_errno(safe_nanosleep(&req
, &rem
));
11582 if (is_error(ret
) && arg2
) {
11583 host_to_target_timespec(arg2
, &rem
);
11588 case TARGET_NR_prctl
:
11589 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
11591 #ifdef TARGET_NR_arch_prctl
11592 case TARGET_NR_arch_prctl
:
11593 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11595 #ifdef TARGET_NR_pread64
11596 case TARGET_NR_pread64
:
11597 if (regpairs_aligned(cpu_env
, num
)) {
11601 if (arg2
== 0 && arg3
== 0) {
11602 /* Special-case NULL buffer and zero length, which should succeed */
11605 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11607 return -TARGET_EFAULT
;
11610 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11611 unlock_user(p
, arg2
, ret
);
11613 case TARGET_NR_pwrite64
:
11614 if (regpairs_aligned(cpu_env
, num
)) {
11618 if (arg2
== 0 && arg3
== 0) {
11619 /* Special-case NULL buffer and zero length, which should succeed */
11622 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11624 return -TARGET_EFAULT
;
11627 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11628 unlock_user(p
, arg2
, 0);
11631 case TARGET_NR_getcwd
:
11632 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11633 return -TARGET_EFAULT
;
11634 ret
= get_errno(sys_getcwd1(p
, arg2
));
11635 unlock_user(p
, arg1
, ret
);
11637 case TARGET_NR_capget
:
11638 case TARGET_NR_capset
:
11640 struct target_user_cap_header
*target_header
;
11641 struct target_user_cap_data
*target_data
= NULL
;
11642 struct __user_cap_header_struct header
;
11643 struct __user_cap_data_struct data
[2];
11644 struct __user_cap_data_struct
*dataptr
= NULL
;
11645 int i
, target_datalen
;
11646 int data_items
= 1;
11648 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11649 return -TARGET_EFAULT
;
11651 header
.version
= tswap32(target_header
->version
);
11652 header
.pid
= tswap32(target_header
->pid
);
11654 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11655 /* Version 2 and up takes pointer to two user_data structs */
11659 target_datalen
= sizeof(*target_data
) * data_items
;
11662 if (num
== TARGET_NR_capget
) {
11663 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11665 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11667 if (!target_data
) {
11668 unlock_user_struct(target_header
, arg1
, 0);
11669 return -TARGET_EFAULT
;
11672 if (num
== TARGET_NR_capset
) {
11673 for (i
= 0; i
< data_items
; i
++) {
11674 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11675 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11676 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11683 if (num
== TARGET_NR_capget
) {
11684 ret
= get_errno(capget(&header
, dataptr
));
11686 ret
= get_errno(capset(&header
, dataptr
));
11689 /* The kernel always updates version for both capget and capset */
11690 target_header
->version
= tswap32(header
.version
);
11691 unlock_user_struct(target_header
, arg1
, 1);
11694 if (num
== TARGET_NR_capget
) {
11695 for (i
= 0; i
< data_items
; i
++) {
11696 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11697 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11698 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11700 unlock_user(target_data
, arg2
, target_datalen
);
11702 unlock_user(target_data
, arg2
, 0);
11707 case TARGET_NR_sigaltstack
:
11708 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11710 #ifdef CONFIG_SENDFILE
11711 #ifdef TARGET_NR_sendfile
11712 case TARGET_NR_sendfile
:
11714 off_t
*offp
= NULL
;
11717 ret
= get_user_sal(off
, arg3
);
11718 if (is_error(ret
)) {
11723 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11724 if (!is_error(ret
) && arg3
) {
11725 abi_long ret2
= put_user_sal(off
, arg3
);
11726 if (is_error(ret2
)) {
11733 #ifdef TARGET_NR_sendfile64
11734 case TARGET_NR_sendfile64
:
11736 off_t
*offp
= NULL
;
11739 ret
= get_user_s64(off
, arg3
);
11740 if (is_error(ret
)) {
11745 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11746 if (!is_error(ret
) && arg3
) {
11747 abi_long ret2
= put_user_s64(off
, arg3
);
11748 if (is_error(ret2
)) {
11756 #ifdef TARGET_NR_vfork
11757 case TARGET_NR_vfork
:
11758 return get_errno(do_fork(cpu_env
,
11759 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11762 #ifdef TARGET_NR_ugetrlimit
11763 case TARGET_NR_ugetrlimit
:
11765 struct rlimit rlim
;
11766 int resource
= target_to_host_resource(arg1
);
11767 ret
= get_errno(getrlimit(resource
, &rlim
));
11768 if (!is_error(ret
)) {
11769 struct target_rlimit
*target_rlim
;
11770 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11771 return -TARGET_EFAULT
;
11772 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11773 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11774 unlock_user_struct(target_rlim
, arg2
, 1);
11779 #ifdef TARGET_NR_truncate64
11780 case TARGET_NR_truncate64
:
11781 if (!(p
= lock_user_string(arg1
)))
11782 return -TARGET_EFAULT
;
11783 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11784 unlock_user(p
, arg1
, 0);
11787 #ifdef TARGET_NR_ftruncate64
11788 case TARGET_NR_ftruncate64
:
11789 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11791 #ifdef TARGET_NR_stat64
11792 case TARGET_NR_stat64
:
11793 if (!(p
= lock_user_string(arg1
))) {
11794 return -TARGET_EFAULT
;
11796 ret
= get_errno(stat(path(p
), &st
));
11797 unlock_user(p
, arg1
, 0);
11798 if (!is_error(ret
))
11799 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11802 #ifdef TARGET_NR_lstat64
11803 case TARGET_NR_lstat64
:
11804 if (!(p
= lock_user_string(arg1
))) {
11805 return -TARGET_EFAULT
;
11807 ret
= get_errno(lstat(path(p
), &st
));
11808 unlock_user(p
, arg1
, 0);
11809 if (!is_error(ret
))
11810 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11813 #ifdef TARGET_NR_fstat64
11814 case TARGET_NR_fstat64
:
11815 ret
= get_errno(fstat(arg1
, &st
));
11816 if (!is_error(ret
))
11817 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11820 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11821 #ifdef TARGET_NR_fstatat64
11822 case TARGET_NR_fstatat64
:
11824 #ifdef TARGET_NR_newfstatat
11825 case TARGET_NR_newfstatat
:
11827 if (!(p
= lock_user_string(arg2
))) {
11828 return -TARGET_EFAULT
;
11830 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11831 unlock_user(p
, arg2
, 0);
11832 if (!is_error(ret
))
11833 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11836 #if defined(TARGET_NR_statx)
11837 case TARGET_NR_statx
:
11839 struct target_statx
*target_stx
;
11843 p
= lock_user_string(arg2
);
11845 return -TARGET_EFAULT
;
11847 #if defined(__NR_statx)
11850 * It is assumed that struct statx is architecture independent.
11852 struct target_statx host_stx
;
11855 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11856 if (!is_error(ret
)) {
11857 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11858 unlock_user(p
, arg2
, 0);
11859 return -TARGET_EFAULT
;
11863 if (ret
!= -TARGET_ENOSYS
) {
11864 unlock_user(p
, arg2
, 0);
11869 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11870 unlock_user(p
, arg2
, 0);
11872 if (!is_error(ret
)) {
11873 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11874 return -TARGET_EFAULT
;
11876 memset(target_stx
, 0, sizeof(*target_stx
));
11877 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11878 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11879 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11880 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11881 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11882 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11883 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11884 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11885 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11886 __put_user(st
.st_size
, &target_stx
->stx_size
);
11887 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11888 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11889 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11890 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11891 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11892 unlock_user_struct(target_stx
, arg5
, 1);
11897 #ifdef TARGET_NR_lchown
11898 case TARGET_NR_lchown
:
11899 if (!(p
= lock_user_string(arg1
)))
11900 return -TARGET_EFAULT
;
11901 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11902 unlock_user(p
, arg1
, 0);
11905 #ifdef TARGET_NR_getuid
11906 case TARGET_NR_getuid
:
11907 return get_errno(high2lowuid(getuid()));
11909 #ifdef TARGET_NR_getgid
11910 case TARGET_NR_getgid
:
11911 return get_errno(high2lowgid(getgid()));
11913 #ifdef TARGET_NR_geteuid
11914 case TARGET_NR_geteuid
:
11915 return get_errno(high2lowuid(geteuid()));
11917 #ifdef TARGET_NR_getegid
11918 case TARGET_NR_getegid
:
11919 return get_errno(high2lowgid(getegid()));
11921 case TARGET_NR_setreuid
:
11922 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11923 case TARGET_NR_setregid
:
11924 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11925 case TARGET_NR_getgroups
:
11926 { /* the same code as for TARGET_NR_getgroups32 */
11927 int gidsetsize
= arg1
;
11928 target_id
*target_grouplist
;
11929 g_autofree gid_t
*grouplist
= NULL
;
11932 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11933 return -TARGET_EINVAL
;
11935 if (gidsetsize
> 0) {
11936 grouplist
= g_try_new(gid_t
, gidsetsize
);
11938 return -TARGET_ENOMEM
;
11941 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11942 if (!is_error(ret
) && gidsetsize
> 0) {
11943 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
11944 gidsetsize
* sizeof(target_id
), 0);
11945 if (!target_grouplist
) {
11946 return -TARGET_EFAULT
;
11948 for (i
= 0; i
< ret
; i
++) {
11949 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11951 unlock_user(target_grouplist
, arg2
,
11952 gidsetsize
* sizeof(target_id
));
11956 case TARGET_NR_setgroups
:
11957 { /* the same code as for TARGET_NR_setgroups32 */
11958 int gidsetsize
= arg1
;
11959 target_id
*target_grouplist
;
11960 g_autofree gid_t
*grouplist
= NULL
;
11963 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11964 return -TARGET_EINVAL
;
11966 if (gidsetsize
> 0) {
11967 grouplist
= g_try_new(gid_t
, gidsetsize
);
11969 return -TARGET_ENOMEM
;
11971 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
11972 gidsetsize
* sizeof(target_id
), 1);
11973 if (!target_grouplist
) {
11974 return -TARGET_EFAULT
;
11976 for (i
= 0; i
< gidsetsize
; i
++) {
11977 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11979 unlock_user(target_grouplist
, arg2
,
11980 gidsetsize
* sizeof(target_id
));
11982 return get_errno(setgroups(gidsetsize
, grouplist
));
11984 case TARGET_NR_fchown
:
11985 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11986 #if defined(TARGET_NR_fchownat)
11987 case TARGET_NR_fchownat
:
11988 if (!(p
= lock_user_string(arg2
)))
11989 return -TARGET_EFAULT
;
11990 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11991 low2highgid(arg4
), arg5
));
11992 unlock_user(p
, arg2
, 0);
11995 #ifdef TARGET_NR_setresuid
11996 case TARGET_NR_setresuid
:
11997 return get_errno(sys_setresuid(low2highuid(arg1
),
11999 low2highuid(arg3
)));
12001 #ifdef TARGET_NR_getresuid
12002 case TARGET_NR_getresuid
:
12004 uid_t ruid
, euid
, suid
;
12005 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
12006 if (!is_error(ret
)) {
12007 if (put_user_id(high2lowuid(ruid
), arg1
)
12008 || put_user_id(high2lowuid(euid
), arg2
)
12009 || put_user_id(high2lowuid(suid
), arg3
))
12010 return -TARGET_EFAULT
;
12015 #ifdef TARGET_NR_getresgid
12016 case TARGET_NR_setresgid
:
12017 return get_errno(sys_setresgid(low2highgid(arg1
),
12019 low2highgid(arg3
)));
12021 #ifdef TARGET_NR_getresgid
12022 case TARGET_NR_getresgid
:
12024 gid_t rgid
, egid
, sgid
;
12025 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
12026 if (!is_error(ret
)) {
12027 if (put_user_id(high2lowgid(rgid
), arg1
)
12028 || put_user_id(high2lowgid(egid
), arg2
)
12029 || put_user_id(high2lowgid(sgid
), arg3
))
12030 return -TARGET_EFAULT
;
12035 #ifdef TARGET_NR_chown
12036 case TARGET_NR_chown
:
12037 if (!(p
= lock_user_string(arg1
)))
12038 return -TARGET_EFAULT
;
12039 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
12040 unlock_user(p
, arg1
, 0);
12043 case TARGET_NR_setuid
:
12044 return get_errno(sys_setuid(low2highuid(arg1
)));
12045 case TARGET_NR_setgid
:
12046 return get_errno(sys_setgid(low2highgid(arg1
)));
12047 case TARGET_NR_setfsuid
:
12048 return get_errno(setfsuid(arg1
));
12049 case TARGET_NR_setfsgid
:
12050 return get_errno(setfsgid(arg1
));
12052 #ifdef TARGET_NR_lchown32
12053 case TARGET_NR_lchown32
:
12054 if (!(p
= lock_user_string(arg1
)))
12055 return -TARGET_EFAULT
;
12056 ret
= get_errno(lchown(p
, arg2
, arg3
));
12057 unlock_user(p
, arg1
, 0);
12060 #ifdef TARGET_NR_getuid32
12061 case TARGET_NR_getuid32
:
12062 return get_errno(getuid());
12065 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12066 /* Alpha specific */
12067 case TARGET_NR_getxuid
:
12071 cpu_env
->ir
[IR_A4
]=euid
;
12073 return get_errno(getuid());
12075 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12076 /* Alpha specific */
12077 case TARGET_NR_getxgid
:
12081 cpu_env
->ir
[IR_A4
]=egid
;
12083 return get_errno(getgid());
12085 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12086 /* Alpha specific */
12087 case TARGET_NR_osf_getsysinfo
:
12088 ret
= -TARGET_EOPNOTSUPP
;
12090 case TARGET_GSI_IEEE_FP_CONTROL
:
12092 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12093 uint64_t swcr
= cpu_env
->swcr
;
12095 swcr
&= ~SWCR_STATUS_MASK
;
12096 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
12098 if (put_user_u64 (swcr
, arg2
))
12099 return -TARGET_EFAULT
;
12104 /* case GSI_IEEE_STATE_AT_SIGNAL:
12105 -- Not implemented in linux kernel.
12107 -- Retrieves current unaligned access state; not much used.
12108 case GSI_PROC_TYPE:
12109 -- Retrieves implver information; surely not used.
12110 case GSI_GET_HWRPB:
12111 -- Grabs a copy of the HWRPB; surely not used.
12116 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12117 /* Alpha specific */
12118 case TARGET_NR_osf_setsysinfo
:
12119 ret
= -TARGET_EOPNOTSUPP
;
12121 case TARGET_SSI_IEEE_FP_CONTROL
:
12123 uint64_t swcr
, fpcr
;
12125 if (get_user_u64 (swcr
, arg2
)) {
12126 return -TARGET_EFAULT
;
12130 * The kernel calls swcr_update_status to update the
12131 * status bits from the fpcr at every point that it
12132 * could be queried. Therefore, we store the status
12133 * bits only in FPCR.
12135 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
12137 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12138 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
12139 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
12140 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
12145 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
12147 uint64_t exc
, fpcr
, fex
;
12149 if (get_user_u64(exc
, arg2
)) {
12150 return -TARGET_EFAULT
;
12152 exc
&= SWCR_STATUS_MASK
;
12153 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12155 /* Old exceptions are not signaled. */
12156 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
12158 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
12159 fex
&= (cpu_env
)->swcr
;
12161 /* Update the hardware fpcr. */
12162 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
12163 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
12166 int si_code
= TARGET_FPE_FLTUNK
;
12167 target_siginfo_t info
;
12169 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
12170 si_code
= TARGET_FPE_FLTUND
;
12172 if (fex
& SWCR_TRAP_ENABLE_INE
) {
12173 si_code
= TARGET_FPE_FLTRES
;
12175 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
12176 si_code
= TARGET_FPE_FLTUND
;
12178 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
12179 si_code
= TARGET_FPE_FLTOVF
;
12181 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
12182 si_code
= TARGET_FPE_FLTDIV
;
12184 if (fex
& SWCR_TRAP_ENABLE_INV
) {
12185 si_code
= TARGET_FPE_FLTINV
;
12188 info
.si_signo
= SIGFPE
;
12190 info
.si_code
= si_code
;
12191 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
12192 queue_signal(cpu_env
, info
.si_signo
,
12193 QEMU_SI_FAULT
, &info
);
12199 /* case SSI_NVPAIRS:
12200 -- Used with SSIN_UACPROC to enable unaligned accesses.
12201 case SSI_IEEE_STATE_AT_SIGNAL:
12202 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12203 -- Not implemented in linux kernel
12208 #ifdef TARGET_NR_osf_sigprocmask
12209 /* Alpha specific. */
12210 case TARGET_NR_osf_sigprocmask
:
12214 sigset_t set
, oldset
;
12217 case TARGET_SIG_BLOCK
:
12220 case TARGET_SIG_UNBLOCK
:
12223 case TARGET_SIG_SETMASK
:
12227 return -TARGET_EINVAL
;
12230 target_to_host_old_sigset(&set
, &mask
);
12231 ret
= do_sigprocmask(how
, &set
, &oldset
);
12233 host_to_target_old_sigset(&mask
, &oldset
);
12240 #ifdef TARGET_NR_getgid32
12241 case TARGET_NR_getgid32
:
12242 return get_errno(getgid());
12244 #ifdef TARGET_NR_geteuid32
12245 case TARGET_NR_geteuid32
:
12246 return get_errno(geteuid());
12248 #ifdef TARGET_NR_getegid32
12249 case TARGET_NR_getegid32
:
12250 return get_errno(getegid());
12252 #ifdef TARGET_NR_setreuid32
12253 case TARGET_NR_setreuid32
:
12254 return get_errno(setreuid(arg1
, arg2
));
12256 #ifdef TARGET_NR_setregid32
12257 case TARGET_NR_setregid32
:
12258 return get_errno(setregid(arg1
, arg2
));
12260 #ifdef TARGET_NR_getgroups32
12261 case TARGET_NR_getgroups32
:
12262 { /* the same code as for TARGET_NR_getgroups */
12263 int gidsetsize
= arg1
;
12264 uint32_t *target_grouplist
;
12265 g_autofree gid_t
*grouplist
= NULL
;
12268 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12269 return -TARGET_EINVAL
;
12271 if (gidsetsize
> 0) {
12272 grouplist
= g_try_new(gid_t
, gidsetsize
);
12274 return -TARGET_ENOMEM
;
12277 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
12278 if (!is_error(ret
) && gidsetsize
> 0) {
12279 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
12280 gidsetsize
* 4, 0);
12281 if (!target_grouplist
) {
12282 return -TARGET_EFAULT
;
12284 for (i
= 0; i
< ret
; i
++) {
12285 target_grouplist
[i
] = tswap32(grouplist
[i
]);
12287 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
12292 #ifdef TARGET_NR_setgroups32
12293 case TARGET_NR_setgroups32
:
12294 { /* the same code as for TARGET_NR_setgroups */
12295 int gidsetsize
= arg1
;
12296 uint32_t *target_grouplist
;
12297 g_autofree gid_t
*grouplist
= NULL
;
12300 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12301 return -TARGET_EINVAL
;
12303 if (gidsetsize
> 0) {
12304 grouplist
= g_try_new(gid_t
, gidsetsize
);
12306 return -TARGET_ENOMEM
;
12308 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
12309 gidsetsize
* 4, 1);
12310 if (!target_grouplist
) {
12311 return -TARGET_EFAULT
;
12313 for (i
= 0; i
< gidsetsize
; i
++) {
12314 grouplist
[i
] = tswap32(target_grouplist
[i
]);
12316 unlock_user(target_grouplist
, arg2
, 0);
12318 return get_errno(setgroups(gidsetsize
, grouplist
));
12321 #ifdef TARGET_NR_fchown32
12322 case TARGET_NR_fchown32
:
12323 return get_errno(fchown(arg1
, arg2
, arg3
));
12325 #ifdef TARGET_NR_setresuid32
12326 case TARGET_NR_setresuid32
:
12327 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
12329 #ifdef TARGET_NR_getresuid32
12330 case TARGET_NR_getresuid32
:
12332 uid_t ruid
, euid
, suid
;
12333 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
12334 if (!is_error(ret
)) {
12335 if (put_user_u32(ruid
, arg1
)
12336 || put_user_u32(euid
, arg2
)
12337 || put_user_u32(suid
, arg3
))
12338 return -TARGET_EFAULT
;
12343 #ifdef TARGET_NR_setresgid32
12344 case TARGET_NR_setresgid32
:
12345 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
12347 #ifdef TARGET_NR_getresgid32
12348 case TARGET_NR_getresgid32
:
12350 gid_t rgid
, egid
, sgid
;
12351 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
12352 if (!is_error(ret
)) {
12353 if (put_user_u32(rgid
, arg1
)
12354 || put_user_u32(egid
, arg2
)
12355 || put_user_u32(sgid
, arg3
))
12356 return -TARGET_EFAULT
;
12361 #ifdef TARGET_NR_chown32
12362 case TARGET_NR_chown32
:
12363 if (!(p
= lock_user_string(arg1
)))
12364 return -TARGET_EFAULT
;
12365 ret
= get_errno(chown(p
, arg2
, arg3
));
12366 unlock_user(p
, arg1
, 0);
12369 #ifdef TARGET_NR_setuid32
12370 case TARGET_NR_setuid32
:
12371 return get_errno(sys_setuid(arg1
));
12373 #ifdef TARGET_NR_setgid32
12374 case TARGET_NR_setgid32
:
12375 return get_errno(sys_setgid(arg1
));
12377 #ifdef TARGET_NR_setfsuid32
12378 case TARGET_NR_setfsuid32
:
12379 return get_errno(setfsuid(arg1
));
12381 #ifdef TARGET_NR_setfsgid32
12382 case TARGET_NR_setfsgid32
:
12383 return get_errno(setfsgid(arg1
));
12385 #ifdef TARGET_NR_mincore
12386 case TARGET_NR_mincore
:
12388 void *a
= lock_user(VERIFY_NONE
, arg1
, arg2
, 0);
12390 return -TARGET_ENOMEM
;
12392 p
= lock_user_string(arg3
);
12394 ret
= -TARGET_EFAULT
;
12396 ret
= get_errno(mincore(a
, arg2
, p
));
12397 unlock_user(p
, arg3
, ret
);
12399 unlock_user(a
, arg1
, 0);
12403 #ifdef TARGET_NR_arm_fadvise64_64
12404 case TARGET_NR_arm_fadvise64_64
:
12405 /* arm_fadvise64_64 looks like fadvise64_64 but
12406 * with different argument order: fd, advice, offset, len
12407 * rather than the usual fd, offset, len, advice.
12408 * Note that offset and len are both 64-bit so appear as
12409 * pairs of 32-bit registers.
12411 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
12412 target_offset64(arg5
, arg6
), arg2
);
12413 return -host_to_target_errno(ret
);
12416 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12418 #ifdef TARGET_NR_fadvise64_64
12419 case TARGET_NR_fadvise64_64
:
12420 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12421 /* 6 args: fd, advice, offset (high, low), len (high, low) */
12429 /* 6 args: fd, offset (high, low), len (high, low), advice */
12430 if (regpairs_aligned(cpu_env
, num
)) {
12431 /* offset is in (3,4), len in (5,6) and advice in 7 */
12439 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
12440 target_offset64(arg4
, arg5
), arg6
);
12441 return -host_to_target_errno(ret
);
12444 #ifdef TARGET_NR_fadvise64
12445 case TARGET_NR_fadvise64
:
12446 /* 5 args: fd, offset (high, low), len, advice */
12447 if (regpairs_aligned(cpu_env
, num
)) {
12448 /* offset is in (3,4), len in 5 and advice in 6 */
12454 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
12455 return -host_to_target_errno(ret
);
12458 #else /* not a 32-bit ABI */
12459 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12460 #ifdef TARGET_NR_fadvise64_64
12461 case TARGET_NR_fadvise64_64
:
12463 #ifdef TARGET_NR_fadvise64
12464 case TARGET_NR_fadvise64
:
12466 #ifdef TARGET_S390X
12468 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
12469 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
12470 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
12471 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
12475 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
12477 #endif /* end of 64-bit ABI fadvise handling */
12479 #ifdef TARGET_NR_madvise
12480 case TARGET_NR_madvise
:
12481 return target_madvise(arg1
, arg2
, arg3
);
12483 #ifdef TARGET_NR_fcntl64
12484 case TARGET_NR_fcntl64
:
12488 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
12489 to_flock64_fn
*copyto
= copy_to_user_flock64
;
12492 if (!cpu_env
->eabi
) {
12493 copyfrom
= copy_from_user_oabi_flock64
;
12494 copyto
= copy_to_user_oabi_flock64
;
12498 cmd
= target_to_host_fcntl_cmd(arg2
);
12499 if (cmd
== -TARGET_EINVAL
) {
12504 case TARGET_F_GETLK64
:
12505 ret
= copyfrom(&fl
, arg3
);
12509 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12511 ret
= copyto(arg3
, &fl
);
12515 case TARGET_F_SETLK64
:
12516 case TARGET_F_SETLKW64
:
12517 ret
= copyfrom(&fl
, arg3
);
12521 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12524 ret
= do_fcntl(arg1
, arg2
, arg3
);
12530 #ifdef TARGET_NR_cacheflush
12531 case TARGET_NR_cacheflush
:
12532 /* self-modifying code is handled automatically, so nothing needed */
12535 #ifdef TARGET_NR_getpagesize
12536 case TARGET_NR_getpagesize
:
12537 return TARGET_PAGE_SIZE
;
12539 case TARGET_NR_gettid
:
12540 return get_errno(sys_gettid());
12541 #ifdef TARGET_NR_readahead
12542 case TARGET_NR_readahead
:
12543 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12544 if (regpairs_aligned(cpu_env
, num
)) {
12549 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
12551 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
12556 #ifdef TARGET_NR_setxattr
12557 case TARGET_NR_listxattr
:
12558 case TARGET_NR_llistxattr
:
12562 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12564 return -TARGET_EFAULT
;
12567 p
= lock_user_string(arg1
);
12569 if (num
== TARGET_NR_listxattr
) {
12570 ret
= get_errno(listxattr(p
, b
, arg3
));
12572 ret
= get_errno(llistxattr(p
, b
, arg3
));
12575 ret
= -TARGET_EFAULT
;
12577 unlock_user(p
, arg1
, 0);
12578 unlock_user(b
, arg2
, arg3
);
12581 case TARGET_NR_flistxattr
:
12585 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12587 return -TARGET_EFAULT
;
12590 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12591 unlock_user(b
, arg2
, arg3
);
12594 case TARGET_NR_setxattr
:
12595 case TARGET_NR_lsetxattr
:
12597 void *p
, *n
, *v
= 0;
12599 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12601 return -TARGET_EFAULT
;
12604 p
= lock_user_string(arg1
);
12605 n
= lock_user_string(arg2
);
12607 if (num
== TARGET_NR_setxattr
) {
12608 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12610 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12613 ret
= -TARGET_EFAULT
;
12615 unlock_user(p
, arg1
, 0);
12616 unlock_user(n
, arg2
, 0);
12617 unlock_user(v
, arg3
, 0);
12620 case TARGET_NR_fsetxattr
:
12624 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12626 return -TARGET_EFAULT
;
12629 n
= lock_user_string(arg2
);
12631 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12633 ret
= -TARGET_EFAULT
;
12635 unlock_user(n
, arg2
, 0);
12636 unlock_user(v
, arg3
, 0);
12639 case TARGET_NR_getxattr
:
12640 case TARGET_NR_lgetxattr
:
12642 void *p
, *n
, *v
= 0;
12644 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12646 return -TARGET_EFAULT
;
12649 p
= lock_user_string(arg1
);
12650 n
= lock_user_string(arg2
);
12652 if (num
== TARGET_NR_getxattr
) {
12653 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12655 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12658 ret
= -TARGET_EFAULT
;
12660 unlock_user(p
, arg1
, 0);
12661 unlock_user(n
, arg2
, 0);
12662 unlock_user(v
, arg3
, arg4
);
12665 case TARGET_NR_fgetxattr
:
12669 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12671 return -TARGET_EFAULT
;
12674 n
= lock_user_string(arg2
);
12676 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12678 ret
= -TARGET_EFAULT
;
12680 unlock_user(n
, arg2
, 0);
12681 unlock_user(v
, arg3
, arg4
);
12684 case TARGET_NR_removexattr
:
12685 case TARGET_NR_lremovexattr
:
12688 p
= lock_user_string(arg1
);
12689 n
= lock_user_string(arg2
);
12691 if (num
== TARGET_NR_removexattr
) {
12692 ret
= get_errno(removexattr(p
, n
));
12694 ret
= get_errno(lremovexattr(p
, n
));
12697 ret
= -TARGET_EFAULT
;
12699 unlock_user(p
, arg1
, 0);
12700 unlock_user(n
, arg2
, 0);
12703 case TARGET_NR_fremovexattr
:
12706 n
= lock_user_string(arg2
);
12708 ret
= get_errno(fremovexattr(arg1
, n
));
12710 ret
= -TARGET_EFAULT
;
12712 unlock_user(n
, arg2
, 0);
12716 #endif /* CONFIG_ATTR */
12717 #ifdef TARGET_NR_set_thread_area
12718 case TARGET_NR_set_thread_area
:
12719 #if defined(TARGET_MIPS)
12720 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12722 #elif defined(TARGET_CRIS)
12724 ret
= -TARGET_EINVAL
;
12726 cpu_env
->pregs
[PR_PID
] = arg1
;
12730 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12731 return do_set_thread_area(cpu_env
, arg1
);
12732 #elif defined(TARGET_M68K)
12734 TaskState
*ts
= cpu
->opaque
;
12735 ts
->tp_value
= arg1
;
12739 return -TARGET_ENOSYS
;
12742 #ifdef TARGET_NR_get_thread_area
12743 case TARGET_NR_get_thread_area
:
12744 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12745 return do_get_thread_area(cpu_env
, arg1
);
12746 #elif defined(TARGET_M68K)
12748 TaskState
*ts
= cpu
->opaque
;
12749 return ts
->tp_value
;
12752 return -TARGET_ENOSYS
;
12755 #ifdef TARGET_NR_getdomainname
12756 case TARGET_NR_getdomainname
:
12757 return -TARGET_ENOSYS
;
12760 #ifdef TARGET_NR_clock_settime
12761 case TARGET_NR_clock_settime
:
12763 struct timespec ts
;
12765 ret
= target_to_host_timespec(&ts
, arg2
);
12766 if (!is_error(ret
)) {
12767 ret
= get_errno(clock_settime(arg1
, &ts
));
12772 #ifdef TARGET_NR_clock_settime64
12773 case TARGET_NR_clock_settime64
:
12775 struct timespec ts
;
12777 ret
= target_to_host_timespec64(&ts
, arg2
);
12778 if (!is_error(ret
)) {
12779 ret
= get_errno(clock_settime(arg1
, &ts
));
12784 #ifdef TARGET_NR_clock_gettime
12785 case TARGET_NR_clock_gettime
:
12787 struct timespec ts
;
12788 ret
= get_errno(clock_gettime(arg1
, &ts
));
12789 if (!is_error(ret
)) {
12790 ret
= host_to_target_timespec(arg2
, &ts
);
12795 #ifdef TARGET_NR_clock_gettime64
12796 case TARGET_NR_clock_gettime64
:
12798 struct timespec ts
;
12799 ret
= get_errno(clock_gettime(arg1
, &ts
));
12800 if (!is_error(ret
)) {
12801 ret
= host_to_target_timespec64(arg2
, &ts
);
12806 #ifdef TARGET_NR_clock_getres
12807 case TARGET_NR_clock_getres
:
12809 struct timespec ts
;
12810 ret
= get_errno(clock_getres(arg1
, &ts
));
12811 if (!is_error(ret
)) {
12812 host_to_target_timespec(arg2
, &ts
);
12817 #ifdef TARGET_NR_clock_getres_time64
12818 case TARGET_NR_clock_getres_time64
:
12820 struct timespec ts
;
12821 ret
= get_errno(clock_getres(arg1
, &ts
));
12822 if (!is_error(ret
)) {
12823 host_to_target_timespec64(arg2
, &ts
);
12828 #ifdef TARGET_NR_clock_nanosleep
12829 case TARGET_NR_clock_nanosleep
:
12831 struct timespec ts
;
12832 if (target_to_host_timespec(&ts
, arg3
)) {
12833 return -TARGET_EFAULT
;
12835 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12836 &ts
, arg4
? &ts
: NULL
));
12838 * if the call is interrupted by a signal handler, it fails
12839 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12840 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12842 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12843 host_to_target_timespec(arg4
, &ts
)) {
12844 return -TARGET_EFAULT
;
12850 #ifdef TARGET_NR_clock_nanosleep_time64
12851 case TARGET_NR_clock_nanosleep_time64
:
12853 struct timespec ts
;
12855 if (target_to_host_timespec64(&ts
, arg3
)) {
12856 return -TARGET_EFAULT
;
12859 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12860 &ts
, arg4
? &ts
: NULL
));
12862 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12863 host_to_target_timespec64(arg4
, &ts
)) {
12864 return -TARGET_EFAULT
;
12870 #if defined(TARGET_NR_set_tid_address)
12871 case TARGET_NR_set_tid_address
:
12873 TaskState
*ts
= cpu
->opaque
;
12874 ts
->child_tidptr
= arg1
;
12875 /* do not call host set_tid_address() syscall, instead return tid() */
12876 return get_errno(sys_gettid());
12880 case TARGET_NR_tkill
:
12881 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12883 case TARGET_NR_tgkill
:
12884 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12885 target_to_host_signal(arg3
)));
12887 #ifdef TARGET_NR_set_robust_list
12888 case TARGET_NR_set_robust_list
:
12889 case TARGET_NR_get_robust_list
:
12890 /* The ABI for supporting robust futexes has userspace pass
12891 * the kernel a pointer to a linked list which is updated by
12892 * userspace after the syscall; the list is walked by the kernel
12893 * when the thread exits. Since the linked list in QEMU guest
12894 * memory isn't a valid linked list for the host and we have
12895 * no way to reliably intercept the thread-death event, we can't
12896 * support these. Silently return ENOSYS so that guest userspace
12897 * falls back to a non-robust futex implementation (which should
12898 * be OK except in the corner case of the guest crashing while
12899 * holding a mutex that is shared with another process via
12902 return -TARGET_ENOSYS
;
12905 #if defined(TARGET_NR_utimensat)
12906 case TARGET_NR_utimensat
:
12908 struct timespec
*tsp
, ts
[2];
12912 if (target_to_host_timespec(ts
, arg3
)) {
12913 return -TARGET_EFAULT
;
12915 if (target_to_host_timespec(ts
+ 1, arg3
+
12916 sizeof(struct target_timespec
))) {
12917 return -TARGET_EFAULT
;
12922 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12924 if (!(p
= lock_user_string(arg2
))) {
12925 return -TARGET_EFAULT
;
12927 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12928 unlock_user(p
, arg2
, 0);
12933 #ifdef TARGET_NR_utimensat_time64
12934 case TARGET_NR_utimensat_time64
:
12936 struct timespec
*tsp
, ts
[2];
12940 if (target_to_host_timespec64(ts
, arg3
)) {
12941 return -TARGET_EFAULT
;
12943 if (target_to_host_timespec64(ts
+ 1, arg3
+
12944 sizeof(struct target__kernel_timespec
))) {
12945 return -TARGET_EFAULT
;
12950 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12952 p
= lock_user_string(arg2
);
12954 return -TARGET_EFAULT
;
12956 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12957 unlock_user(p
, arg2
, 0);
12962 #ifdef TARGET_NR_futex
12963 case TARGET_NR_futex
:
12964 return do_futex(cpu
, false, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12966 #ifdef TARGET_NR_futex_time64
12967 case TARGET_NR_futex_time64
:
12968 return do_futex(cpu
, true, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12970 #ifdef CONFIG_INOTIFY
12971 #if defined(TARGET_NR_inotify_init)
12972 case TARGET_NR_inotify_init
:
12973 ret
= get_errno(inotify_init());
12975 fd_trans_register(ret
, &target_inotify_trans
);
12979 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12980 case TARGET_NR_inotify_init1
:
12981 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12982 fcntl_flags_tbl
)));
12984 fd_trans_register(ret
, &target_inotify_trans
);
12988 #if defined(TARGET_NR_inotify_add_watch)
12989 case TARGET_NR_inotify_add_watch
:
12990 p
= lock_user_string(arg2
);
12991 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12992 unlock_user(p
, arg2
, 0);
12995 #if defined(TARGET_NR_inotify_rm_watch)
12996 case TARGET_NR_inotify_rm_watch
:
12997 return get_errno(inotify_rm_watch(arg1
, arg2
));
13001 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13002 case TARGET_NR_mq_open
:
13004 struct mq_attr posix_mq_attr
;
13005 struct mq_attr
*pposix_mq_attr
;
13008 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
13009 pposix_mq_attr
= NULL
;
13011 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
13012 return -TARGET_EFAULT
;
13014 pposix_mq_attr
= &posix_mq_attr
;
13016 p
= lock_user_string(arg1
- 1);
13018 return -TARGET_EFAULT
;
13020 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
13021 unlock_user (p
, arg1
, 0);
13025 case TARGET_NR_mq_unlink
:
13026 p
= lock_user_string(arg1
- 1);
13028 return -TARGET_EFAULT
;
13030 ret
= get_errno(mq_unlink(p
));
13031 unlock_user (p
, arg1
, 0);
13034 #ifdef TARGET_NR_mq_timedsend
13035 case TARGET_NR_mq_timedsend
:
13037 struct timespec ts
;
13039 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
13041 if (target_to_host_timespec(&ts
, arg5
)) {
13042 return -TARGET_EFAULT
;
13044 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
13045 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
13046 return -TARGET_EFAULT
;
13049 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
13051 unlock_user (p
, arg2
, arg3
);
13055 #ifdef TARGET_NR_mq_timedsend_time64
13056 case TARGET_NR_mq_timedsend_time64
:
13058 struct timespec ts
;
13060 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
13062 if (target_to_host_timespec64(&ts
, arg5
)) {
13063 return -TARGET_EFAULT
;
13065 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
13066 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
13067 return -TARGET_EFAULT
;
13070 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
13072 unlock_user(p
, arg2
, arg3
);
13077 #ifdef TARGET_NR_mq_timedreceive
13078 case TARGET_NR_mq_timedreceive
:
13080 struct timespec ts
;
13083 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
13085 if (target_to_host_timespec(&ts
, arg5
)) {
13086 return -TARGET_EFAULT
;
13088 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13090 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
13091 return -TARGET_EFAULT
;
13094 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13097 unlock_user (p
, arg2
, arg3
);
13099 put_user_u32(prio
, arg4
);
13103 #ifdef TARGET_NR_mq_timedreceive_time64
13104 case TARGET_NR_mq_timedreceive_time64
:
13106 struct timespec ts
;
13109 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
13111 if (target_to_host_timespec64(&ts
, arg5
)) {
13112 return -TARGET_EFAULT
;
13114 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13116 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
13117 return -TARGET_EFAULT
;
13120 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13123 unlock_user(p
, arg2
, arg3
);
13125 put_user_u32(prio
, arg4
);
13131 /* Not implemented for now... */
13132 /* case TARGET_NR_mq_notify: */
13135 case TARGET_NR_mq_getsetattr
:
13137 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
13140 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
13141 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
13142 &posix_mq_attr_out
));
13143 } else if (arg3
!= 0) {
13144 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
13146 if (ret
== 0 && arg3
!= 0) {
13147 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
13153 #ifdef CONFIG_SPLICE
13154 #ifdef TARGET_NR_tee
13155 case TARGET_NR_tee
:
13157 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
13161 #ifdef TARGET_NR_splice
13162 case TARGET_NR_splice
:
13164 loff_t loff_in
, loff_out
;
13165 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
13167 if (get_user_u64(loff_in
, arg2
)) {
13168 return -TARGET_EFAULT
;
13170 ploff_in
= &loff_in
;
13173 if (get_user_u64(loff_out
, arg4
)) {
13174 return -TARGET_EFAULT
;
13176 ploff_out
= &loff_out
;
13178 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
13180 if (put_user_u64(loff_in
, arg2
)) {
13181 return -TARGET_EFAULT
;
13185 if (put_user_u64(loff_out
, arg4
)) {
13186 return -TARGET_EFAULT
;
13192 #ifdef TARGET_NR_vmsplice
13193 case TARGET_NR_vmsplice
:
13195 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
13197 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
13198 unlock_iovec(vec
, arg2
, arg3
, 0);
13200 ret
= -host_to_target_errno(errno
);
13205 #endif /* CONFIG_SPLICE */
13206 #ifdef CONFIG_EVENTFD
13207 #if defined(TARGET_NR_eventfd)
13208 case TARGET_NR_eventfd
:
13209 ret
= get_errno(eventfd(arg1
, 0));
13211 fd_trans_register(ret
, &target_eventfd_trans
);
13215 #if defined(TARGET_NR_eventfd2)
13216 case TARGET_NR_eventfd2
:
13218 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
13219 if (arg2
& TARGET_O_NONBLOCK
) {
13220 host_flags
|= O_NONBLOCK
;
13222 if (arg2
& TARGET_O_CLOEXEC
) {
13223 host_flags
|= O_CLOEXEC
;
13225 ret
= get_errno(eventfd(arg1
, host_flags
));
13227 fd_trans_register(ret
, &target_eventfd_trans
);
13232 #endif /* CONFIG_EVENTFD */
13233 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13234 case TARGET_NR_fallocate
:
13235 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13236 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
13237 target_offset64(arg5
, arg6
)));
13239 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
13243 #if defined(CONFIG_SYNC_FILE_RANGE)
13244 #if defined(TARGET_NR_sync_file_range)
13245 case TARGET_NR_sync_file_range
:
13246 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13247 #if defined(TARGET_MIPS)
13248 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13249 target_offset64(arg5
, arg6
), arg7
));
13251 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
13252 target_offset64(arg4
, arg5
), arg6
));
13253 #endif /* !TARGET_MIPS */
13255 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
13259 #if defined(TARGET_NR_sync_file_range2) || \
13260 defined(TARGET_NR_arm_sync_file_range)
13261 #if defined(TARGET_NR_sync_file_range2)
13262 case TARGET_NR_sync_file_range2
:
13264 #if defined(TARGET_NR_arm_sync_file_range)
13265 case TARGET_NR_arm_sync_file_range
:
13267 /* This is like sync_file_range but the arguments are reordered */
13268 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13269 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13270 target_offset64(arg5
, arg6
), arg2
));
13272 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
13277 #if defined(TARGET_NR_signalfd4)
13278 case TARGET_NR_signalfd4
:
13279 return do_signalfd4(arg1
, arg2
, arg4
);
13281 #if defined(TARGET_NR_signalfd)
13282 case TARGET_NR_signalfd
:
13283 return do_signalfd4(arg1
, arg2
, 0);
13285 #if defined(CONFIG_EPOLL)
13286 #if defined(TARGET_NR_epoll_create)
13287 case TARGET_NR_epoll_create
:
13288 return get_errno(epoll_create(arg1
));
13290 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13291 case TARGET_NR_epoll_create1
:
13292 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
13294 #if defined(TARGET_NR_epoll_ctl)
13295 case TARGET_NR_epoll_ctl
:
13297 struct epoll_event ep
;
13298 struct epoll_event
*epp
= 0;
13300 if (arg2
!= EPOLL_CTL_DEL
) {
13301 struct target_epoll_event
*target_ep
;
13302 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
13303 return -TARGET_EFAULT
;
13305 ep
.events
= tswap32(target_ep
->events
);
13307 * The epoll_data_t union is just opaque data to the kernel,
13308 * so we transfer all 64 bits across and need not worry what
13309 * actual data type it is.
13311 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
13312 unlock_user_struct(target_ep
, arg4
, 0);
13315 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13316 * non-null pointer, even though this argument is ignored.
13321 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
13325 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13326 #if defined(TARGET_NR_epoll_wait)
13327 case TARGET_NR_epoll_wait
:
13329 #if defined(TARGET_NR_epoll_pwait)
13330 case TARGET_NR_epoll_pwait
:
13333 struct target_epoll_event
*target_ep
;
13334 struct epoll_event
*ep
;
13336 int maxevents
= arg3
;
13337 int timeout
= arg4
;
13339 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
13340 return -TARGET_EINVAL
;
13343 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
13344 maxevents
* sizeof(struct target_epoll_event
), 1);
13346 return -TARGET_EFAULT
;
13349 ep
= g_try_new(struct epoll_event
, maxevents
);
13351 unlock_user(target_ep
, arg2
, 0);
13352 return -TARGET_ENOMEM
;
13356 #if defined(TARGET_NR_epoll_pwait)
13357 case TARGET_NR_epoll_pwait
:
13359 sigset_t
*set
= NULL
;
13362 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
13368 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13369 set
, SIGSET_T_SIZE
));
13372 finish_sigsuspend_mask(ret
);
13377 #if defined(TARGET_NR_epoll_wait)
13378 case TARGET_NR_epoll_wait
:
13379 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13384 ret
= -TARGET_ENOSYS
;
13386 if (!is_error(ret
)) {
13388 for (i
= 0; i
< ret
; i
++) {
13389 target_ep
[i
].events
= tswap32(ep
[i
].events
);
13390 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
13392 unlock_user(target_ep
, arg2
,
13393 ret
* sizeof(struct target_epoll_event
));
13395 unlock_user(target_ep
, arg2
, 0);
13402 #ifdef TARGET_NR_prlimit64
13403 case TARGET_NR_prlimit64
:
13405 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13406 struct target_rlimit64
*target_rnew
, *target_rold
;
13407 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
13408 int resource
= target_to_host_resource(arg2
);
13410 if (arg3
&& (resource
!= RLIMIT_AS
&&
13411 resource
!= RLIMIT_DATA
&&
13412 resource
!= RLIMIT_STACK
)) {
13413 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
13414 return -TARGET_EFAULT
;
13416 __get_user(rnew
.rlim_cur
, &target_rnew
->rlim_cur
);
13417 __get_user(rnew
.rlim_max
, &target_rnew
->rlim_max
);
13418 unlock_user_struct(target_rnew
, arg3
, 0);
13422 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
13423 if (!is_error(ret
) && arg4
) {
13424 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
13425 return -TARGET_EFAULT
;
13427 __put_user(rold
.rlim_cur
, &target_rold
->rlim_cur
);
13428 __put_user(rold
.rlim_max
, &target_rold
->rlim_max
);
13429 unlock_user_struct(target_rold
, arg4
, 1);
13434 #ifdef TARGET_NR_gethostname
13435 case TARGET_NR_gethostname
:
13437 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
13439 ret
= get_errno(gethostname(name
, arg2
));
13440 unlock_user(name
, arg1
, arg2
);
13442 ret
= -TARGET_EFAULT
;
13447 #ifdef TARGET_NR_atomic_cmpxchg_32
13448 case TARGET_NR_atomic_cmpxchg_32
:
13450 /* should use start_exclusive from main.c */
13451 abi_ulong mem_value
;
13452 if (get_user_u32(mem_value
, arg6
)) {
13453 target_siginfo_t info
;
13454 info
.si_signo
= SIGSEGV
;
13456 info
.si_code
= TARGET_SEGV_MAPERR
;
13457 info
._sifields
._sigfault
._addr
= arg6
;
13458 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
13462 if (mem_value
== arg2
)
13463 put_user_u32(arg1
, arg6
);
13467 #ifdef TARGET_NR_atomic_barrier
13468 case TARGET_NR_atomic_barrier
:
13469 /* Like the kernel implementation and the
13470 qemu arm barrier, no-op this? */
13474 #ifdef TARGET_NR_timer_create
13475 case TARGET_NR_timer_create
:
13477 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13479 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
13482 int timer_index
= next_free_host_timer();
13484 if (timer_index
< 0) {
13485 ret
= -TARGET_EAGAIN
;
13487 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
13490 phost_sevp
= &host_sevp
;
13491 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
13493 free_host_timer_slot(timer_index
);
13498 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
13500 free_host_timer_slot(timer_index
);
13502 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
13503 timer_delete(*phtimer
);
13504 free_host_timer_slot(timer_index
);
13505 return -TARGET_EFAULT
;
13513 #ifdef TARGET_NR_timer_settime
13514 case TARGET_NR_timer_settime
:
13516 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13517 * struct itimerspec * old_value */
13518 target_timer_t timerid
= get_timer_id(arg1
);
13522 } else if (arg3
== 0) {
13523 ret
= -TARGET_EINVAL
;
13525 timer_t htimer
= g_posix_timers
[timerid
];
13526 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13528 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
13529 return -TARGET_EFAULT
;
13532 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13533 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
13534 return -TARGET_EFAULT
;
13541 #ifdef TARGET_NR_timer_settime64
13542 case TARGET_NR_timer_settime64
:
13544 target_timer_t timerid
= get_timer_id(arg1
);
13548 } else if (arg3
== 0) {
13549 ret
= -TARGET_EINVAL
;
13551 timer_t htimer
= g_posix_timers
[timerid
];
13552 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13554 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
13555 return -TARGET_EFAULT
;
13558 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13559 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
13560 return -TARGET_EFAULT
;
13567 #ifdef TARGET_NR_timer_gettime
13568 case TARGET_NR_timer_gettime
:
13570 /* args: timer_t timerid, struct itimerspec *curr_value */
13571 target_timer_t timerid
= get_timer_id(arg1
);
13575 } else if (!arg2
) {
13576 ret
= -TARGET_EFAULT
;
13578 timer_t htimer
= g_posix_timers
[timerid
];
13579 struct itimerspec hspec
;
13580 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13582 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13583 ret
= -TARGET_EFAULT
;
13590 #ifdef TARGET_NR_timer_gettime64
13591 case TARGET_NR_timer_gettime64
:
13593 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13594 target_timer_t timerid
= get_timer_id(arg1
);
13598 } else if (!arg2
) {
13599 ret
= -TARGET_EFAULT
;
13601 timer_t htimer
= g_posix_timers
[timerid
];
13602 struct itimerspec hspec
;
13603 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13605 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13606 ret
= -TARGET_EFAULT
;
13613 #ifdef TARGET_NR_timer_getoverrun
13614 case TARGET_NR_timer_getoverrun
:
13616 /* args: timer_t timerid */
13617 target_timer_t timerid
= get_timer_id(arg1
);
13622 timer_t htimer
= g_posix_timers
[timerid
];
13623 ret
= get_errno(timer_getoverrun(htimer
));
13629 #ifdef TARGET_NR_timer_delete
13630 case TARGET_NR_timer_delete
:
13632 /* args: timer_t timerid */
13633 target_timer_t timerid
= get_timer_id(arg1
);
13638 timer_t htimer
= g_posix_timers
[timerid
];
13639 ret
= get_errno(timer_delete(htimer
));
13640 free_host_timer_slot(timerid
);
13646 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13647 case TARGET_NR_timerfd_create
:
13648 ret
= get_errno(timerfd_create(arg1
,
13649 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13651 fd_trans_register(ret
, &target_timerfd_trans
);
13656 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13657 case TARGET_NR_timerfd_gettime
:
13659 struct itimerspec its_curr
;
13661 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13663 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13664 return -TARGET_EFAULT
;
13670 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13671 case TARGET_NR_timerfd_gettime64
:
13673 struct itimerspec its_curr
;
13675 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13677 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13678 return -TARGET_EFAULT
;
13684 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13685 case TARGET_NR_timerfd_settime
:
13687 struct itimerspec its_new
, its_old
, *p_new
;
13690 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13691 return -TARGET_EFAULT
;
13698 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13700 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13701 return -TARGET_EFAULT
;
13707 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13708 case TARGET_NR_timerfd_settime64
:
13710 struct itimerspec its_new
, its_old
, *p_new
;
13713 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13714 return -TARGET_EFAULT
;
13721 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13723 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13724 return -TARGET_EFAULT
;
13730 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13731 case TARGET_NR_ioprio_get
:
13732 return get_errno(ioprio_get(arg1
, arg2
));
13735 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13736 case TARGET_NR_ioprio_set
:
13737 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13740 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13741 case TARGET_NR_setns
:
13742 return get_errno(setns(arg1
, arg2
));
13744 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13745 case TARGET_NR_unshare
:
13746 return get_errno(unshare(arg1
));
13748 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13749 case TARGET_NR_kcmp
:
13750 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13752 #ifdef TARGET_NR_swapcontext
13753 case TARGET_NR_swapcontext
:
13754 /* PowerPC specific. */
13755 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13757 #ifdef TARGET_NR_memfd_create
13758 case TARGET_NR_memfd_create
:
13759 p
= lock_user_string(arg1
);
13761 return -TARGET_EFAULT
;
13763 ret
= get_errno(memfd_create(p
, arg2
));
13764 fd_trans_unregister(ret
);
13765 unlock_user(p
, arg1
, 0);
13768 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13769 case TARGET_NR_membarrier
:
13770 return get_errno(membarrier(arg1
, arg2
));
13773 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13774 case TARGET_NR_copy_file_range
:
13776 loff_t inoff
, outoff
;
13777 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13780 if (get_user_u64(inoff
, arg2
)) {
13781 return -TARGET_EFAULT
;
13786 if (get_user_u64(outoff
, arg4
)) {
13787 return -TARGET_EFAULT
;
13791 /* Do not sign-extend the count parameter. */
13792 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13793 (abi_ulong
)arg5
, arg6
));
13794 if (!is_error(ret
) && ret
> 0) {
13796 if (put_user_u64(inoff
, arg2
)) {
13797 return -TARGET_EFAULT
;
13801 if (put_user_u64(outoff
, arg4
)) {
13802 return -TARGET_EFAULT
;
13810 #if defined(TARGET_NR_pivot_root)
13811 case TARGET_NR_pivot_root
:
13814 p
= lock_user_string(arg1
); /* new_root */
13815 p2
= lock_user_string(arg2
); /* put_old */
13817 ret
= -TARGET_EFAULT
;
13819 ret
= get_errno(pivot_root(p
, p2
));
13821 unlock_user(p2
, arg2
, 0);
13822 unlock_user(p
, arg1
, 0);
13827 #if defined(TARGET_NR_riscv_hwprobe)
13828 case TARGET_NR_riscv_hwprobe
:
13829 return do_riscv_hwprobe(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
13833 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13834 return -TARGET_ENOSYS
;
13839 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13840 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13841 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13844 CPUState
*cpu
= env_cpu(cpu_env
);
13847 #ifdef DEBUG_ERESTARTSYS
13848 /* Debug-only code for exercising the syscall-restart code paths
13849 * in the per-architecture cpu main loops: restart every syscall
13850 * the guest makes once before letting it through.
13856 return -QEMU_ERESTARTSYS
;
13861 record_syscall_start(cpu
, num
, arg1
,
13862 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13864 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13865 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13868 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13869 arg5
, arg6
, arg7
, arg8
);
13871 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13872 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13873 arg3
, arg4
, arg5
, arg6
);
13876 record_syscall_return(cpu
, num
, ret
);