4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
143 #include "cpu_loop-common.h"
146 #define CLONE_IO 0x80000000 /* Clone io context */
149 /* We can't directly call the host clone syscall, because this will
150 * badly confuse libc (breaking mutexes, for example). So we must
151 * divide clone flags into:
152 * * flag combinations that look like pthread_create()
153 * * flag combinations that look like fork()
154 * * flags we can implement within QEMU itself
155 * * flags we can't support and will return an error for
157 /* For thread creation, all these flags must be present; for
158 * fork, none must be present.
160 #define CLONE_THREAD_FLAGS \
161 (CLONE_VM | CLONE_FS | CLONE_FILES | \
162 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
164 /* These flags are ignored:
165 * CLONE_DETACHED is now ignored by the kernel;
166 * CLONE_IO is just an optimisation hint to the I/O scheduler
168 #define CLONE_IGNORED_FLAGS \
169 (CLONE_DETACHED | CLONE_IO)
171 /* Flags for fork which we can implement within QEMU itself */
172 #define CLONE_OPTIONAL_FORK_FLAGS \
173 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
174 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
176 /* Flags for thread creation which we can implement within QEMU itself */
177 #define CLONE_OPTIONAL_THREAD_FLAGS \
178 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
179 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
181 #define CLONE_INVALID_FORK_FLAGS \
182 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
184 #define CLONE_INVALID_THREAD_FLAGS \
185 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
186 CLONE_IGNORED_FLAGS))
188 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
189 * have almost all been allocated. We cannot support any of
190 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
191 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
192 * The checks against the invalid thread masks above will catch these.
193 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
196 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
197 * once. This exercises the codepaths for restart.
199 //#define DEBUG_ERESTARTSYS
201 //#include <linux/msdos_fs.h>
202 #define VFAT_IOCTL_READDIR_BOTH \
203 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
204 #define VFAT_IOCTL_READDIR_SHORT \
205 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
215 #define _syscall0(type,name) \
216 static type name (void) \
218 return syscall(__NR_##name); \
221 #define _syscall1(type,name,type1,arg1) \
222 static type name (type1 arg1) \
224 return syscall(__NR_##name, arg1); \
227 #define _syscall2(type,name,type1,arg1,type2,arg2) \
228 static type name (type1 arg1,type2 arg2) \
230 return syscall(__NR_##name, arg1, arg2); \
233 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
234 static type name (type1 arg1,type2 arg2,type3 arg3) \
236 return syscall(__NR_##name, arg1, arg2, arg3); \
239 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
240 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
242 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
245 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
249 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
253 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
254 type5,arg5,type6,arg6) \
255 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
258 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
262 #define __NR_sys_uname __NR_uname
263 #define __NR_sys_getcwd1 __NR_getcwd
264 #define __NR_sys_getdents __NR_getdents
265 #define __NR_sys_getdents64 __NR_getdents64
266 #define __NR_sys_getpriority __NR_getpriority
267 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
268 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
269 #define __NR_sys_syslog __NR_syslog
270 #if defined(__NR_futex)
271 # define __NR_sys_futex __NR_futex
273 #if defined(__NR_futex_time64)
274 # define __NR_sys_futex_time64 __NR_futex_time64
276 #define __NR_sys_statx __NR_statx
278 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
279 #define __NR__llseek __NR_lseek
282 /* Newer kernel ports have llseek() instead of _llseek() */
283 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
284 #define TARGET_NR__llseek TARGET_NR_llseek
287 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
288 #ifndef TARGET_O_NONBLOCK_MASK
289 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
292 #define __NR_sys_gettid __NR_gettid
293 _syscall0(int, sys_gettid
)
295 /* For the 64-bit guest on 32-bit host case we must emulate
296 * getdents using getdents64, because otherwise the host
297 * might hand us back more dirent records than we can fit
298 * into the guest buffer after structure format conversion.
299 * Otherwise we emulate getdents with getdents if the host has it.
301 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
302 #define EMULATE_GETDENTS_WITH_GETDENTS
305 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
306 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
308 #if (defined(TARGET_NR_getdents) && \
309 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
310 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
311 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
313 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
314 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
315 loff_t
*, res
, uint
, wh
);
317 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
318 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
320 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
321 #ifdef __NR_exit_group
322 _syscall1(int,exit_group
,int,error_code
)
324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
325 #define __NR_sys_close_range __NR_close_range
326 _syscall3(int,sys_close_range
,int,first
,int,last
,int,flags
)
327 #ifndef CLOSE_RANGE_CLOEXEC
328 #define CLOSE_RANGE_CLOEXEC (1U << 2)
331 #if defined(__NR_futex)
332 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
333 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
335 #if defined(__NR_futex_time64)
336 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
337 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
339 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
340 _syscall2(int, pidfd_open
, pid_t
, pid
, unsigned int, flags
);
342 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
343 _syscall4(int, pidfd_send_signal
, int, pidfd
, int, sig
, siginfo_t
*, info
,
344 unsigned int, flags
);
346 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
347 _syscall3(int, pidfd_getfd
, int, pidfd
, int, targetfd
, unsigned int, flags
);
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
351 unsigned long *, user_mask_ptr
);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
354 unsigned long *, user_mask_ptr
);
355 /* sched_attr is not defined in glibc */
358 uint32_t sched_policy
;
359 uint64_t sched_flags
;
361 uint32_t sched_priority
;
362 uint64_t sched_runtime
;
363 uint64_t sched_deadline
;
364 uint64_t sched_period
;
365 uint32_t sched_util_min
;
366 uint32_t sched_util_max
;
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
370 unsigned int, size
, unsigned int, flags
);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
373 unsigned int, flags
);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
378 const struct sched_param
*, param
);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
381 struct sched_param
*, param
);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
384 const struct sched_param
*, param
);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
387 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
389 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
390 struct __user_cap_data_struct
*, data
);
391 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
392 struct __user_cap_data_struct
*, data
);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get
, int, which
, int, who
)
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
405 unsigned long, idx1
, unsigned long, idx2
)
409 * It is assumed that struct statx is architecture independent.
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
413 unsigned int, mask
, struct target_statx
*, statxbuf
)
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier
, int, cmd
, int, flags
)
419 static const bitmask_transtbl fcntl_flags_tbl
[] = {
420 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
421 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
422 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
423 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
424 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
425 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
426 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
427 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
428 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
429 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
430 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
431 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
432 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
433 #if defined(O_DIRECT)
434 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
436 #if defined(O_NOATIME)
437 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
439 #if defined(O_CLOEXEC)
440 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
443 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
445 #if defined(O_TMPFILE)
446 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
448 /* Don't terminate the list prematurely on 64-bit host+guest. */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
455 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
461 const struct timespec
*,tsp
,int,flags
)
463 static int sys_utimensat(int dirfd
, const char *pathname
,
464 const struct timespec times
[2], int flags
)
470 #endif /* TARGET_NR_utimensat */
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
476 const char *, new, unsigned int, flags
)
478 static int sys_renameat2(int oldfd
, const char *old
,
479 int newfd
, const char *new, int flags
)
482 return renameat(oldfd
, old
, newfd
, new);
488 #endif /* TARGET_NR_renameat2 */
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY */
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64
{
510 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
511 const struct host_rlimit64
*, new_limit
,
512 struct host_rlimit64
*, old_limit
)
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 #define GUEST_TIMER_MAX 32
519 static timer_t g_posix_timers
[GUEST_TIMER_MAX
];
520 static int g_posix_timer_allocated
[GUEST_TIMER_MAX
];
522 static inline int next_free_host_timer(void)
525 for (k
= 0; k
< ARRAY_SIZE(g_posix_timer_allocated
); k
++) {
526 if (qatomic_xchg(g_posix_timer_allocated
+ k
, 1) == 0) {
533 static inline void free_host_timer_slot(int id
)
535 qatomic_store_release(g_posix_timer_allocated
+ id
, 0);
539 static inline int host_to_target_errno(int host_errno
)
541 switch (host_errno
) {
542 #define E(X) case X: return TARGET_##X;
543 #include "errnos.c.inc"
550 static inline int target_to_host_errno(int target_errno
)
552 switch (target_errno
) {
553 #define E(X) case TARGET_##X: return X;
554 #include "errnos.c.inc"
561 abi_long
get_errno(abi_long ret
)
564 return -host_to_target_errno(errno
);
569 const char *target_strerror(int err
)
571 if (err
== QEMU_ERESTARTSYS
) {
572 return "To be restarted";
574 if (err
== QEMU_ESIGRETURN
) {
575 return "Successful exit from sigreturn";
578 return strerror(target_to_host_errno(err
));
581 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
585 if (usize
<= ksize
) {
588 for (i
= ksize
; i
< usize
; i
++) {
589 if (get_user_u8(b
, addr
+ i
)) {
590 return -TARGET_EFAULT
;
599 #define safe_syscall0(type, name) \
600 static type safe_##name(void) \
602 return safe_syscall(__NR_##name); \
605 #define safe_syscall1(type, name, type1, arg1) \
606 static type safe_##name(type1 arg1) \
608 return safe_syscall(__NR_##name, arg1); \
611 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
612 static type safe_##name(type1 arg1, type2 arg2) \
614 return safe_syscall(__NR_##name, arg1, arg2); \
617 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
618 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
620 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
623 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
625 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
627 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
630 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
631 type4, arg4, type5, arg5) \
632 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
635 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
638 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
639 type4, arg4, type5, arg5, type6, arg6) \
640 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641 type5 arg5, type6 arg6) \
643 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
646 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
647 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
648 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
649 int, flags
, mode_t
, mode
)
650 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
651 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
652 struct rusage
*, rusage
)
654 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
655 int, options
, struct rusage
*, rusage
)
656 safe_syscall5(int, execveat
, int, dirfd
, const char *, filename
,
657 char **, argv
, char **, envp
, int, flags
)
658 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
659 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
660 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
661 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
663 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
664 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
665 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
668 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
669 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
671 #if defined(__NR_futex)
672 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
673 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
675 #if defined(__NR_futex_time64)
676 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
677 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
679 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
680 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
681 safe_syscall2(int, tkill
, int, tid
, int, sig
)
682 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
683 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
684 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
685 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
686 unsigned long, pos_l
, unsigned long, pos_h
)
687 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
688 unsigned long, pos_l
, unsigned long, pos_h
)
689 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
691 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
692 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
693 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
694 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
695 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
696 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
697 safe_syscall2(int, flock
, int, fd
, int, operation
)
698 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
699 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
700 const struct timespec
*, uts
, size_t, sigsetsize
)
702 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
704 #if defined(TARGET_NR_nanosleep)
705 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
706 struct timespec
*, rem
)
708 #if defined(TARGET_NR_clock_nanosleep) || \
709 defined(TARGET_NR_clock_nanosleep_time64)
710 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
711 const struct timespec
*, req
, struct timespec
*, rem
)
715 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
718 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
719 void *, ptr
, long, fifth
)
723 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
727 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
728 long, msgtype
, int, flags
)
730 #ifdef __NR_semtimedop
731 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
732 unsigned, nsops
, const struct timespec
*, timeout
)
734 #if defined(TARGET_NR_mq_timedsend) || \
735 defined(TARGET_NR_mq_timedsend_time64)
736 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
737 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
739 #if defined(TARGET_NR_mq_timedreceive) || \
740 defined(TARGET_NR_mq_timedreceive_time64)
741 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
742 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
744 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
745 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
746 int, outfd
, loff_t
*, poutoff
, size_t, length
,
750 /* We do ioctl like this rather than via safe_syscall3 to preserve the
751 * "third argument might be integer or pointer or not present" behaviour of
754 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
755 /* Similarly for fcntl. Note that callers must always:
756 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
757 * use the flock64 struct rather than unsuffixed flock
758 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
761 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
763 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
766 static inline int host_to_target_sock_type(int host_type
)
770 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
772 target_type
= TARGET_SOCK_DGRAM
;
775 target_type
= TARGET_SOCK_STREAM
;
778 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
782 #if defined(SOCK_CLOEXEC)
783 if (host_type
& SOCK_CLOEXEC
) {
784 target_type
|= TARGET_SOCK_CLOEXEC
;
788 #if defined(SOCK_NONBLOCK)
789 if (host_type
& SOCK_NONBLOCK
) {
790 target_type
|= TARGET_SOCK_NONBLOCK
;
797 static abi_ulong target_brk
;
798 static abi_ulong target_original_brk
;
799 static abi_ulong brk_page
;
801 void target_set_brk(abi_ulong new_brk
)
803 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
804 brk_page
= HOST_PAGE_ALIGN(target_brk
);
807 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
808 #define DEBUGF_BRK(message, args...)
810 /* do_brk() must return target values and target errnos. */
811 abi_long
do_brk(abi_ulong new_brk
)
813 abi_long mapped_addr
;
814 abi_ulong new_alloc_size
;
816 /* brk pointers are always untagged */
818 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
821 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
824 if (new_brk
< target_original_brk
) {
825 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
830 /* If the new brk is less than the highest page reserved to the
831 * target heap allocation, set it and we're almost done... */
832 if (new_brk
<= brk_page
) {
833 /* Heap contents are initialized to zero, as for anonymous
835 if (new_brk
> target_brk
) {
836 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
838 target_brk
= new_brk
;
839 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
843 /* We need to allocate more memory after the brk... Note that
844 * we don't use MAP_FIXED because that will map over the top of
845 * any existing mapping (like the one with the host libc or qemu
846 * itself); instead we treat "mapped but at wrong address" as
847 * a failure and unmap again.
849 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
850 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
851 PROT_READ
|PROT_WRITE
,
852 MAP_ANON
|MAP_PRIVATE
, 0, 0));
854 if (mapped_addr
== brk_page
) {
855 /* Heap contents are initialized to zero, as for anonymous
856 * mapped pages. Technically the new pages are already
857 * initialized to zero since they *are* anonymous mapped
858 * pages, however we have to take care with the contents that
859 * come from the remaining part of the previous page: it may
860 * contains garbage data due to a previous heap usage (grown
862 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
864 target_brk
= new_brk
;
865 brk_page
= HOST_PAGE_ALIGN(target_brk
);
866 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
869 } else if (mapped_addr
!= -1) {
870 /* Mapped but at wrong address, meaning there wasn't actually
871 * enough space for this brk.
873 target_munmap(mapped_addr
, new_alloc_size
);
875 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
878 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
881 #if defined(TARGET_ALPHA)
882 /* We (partially) emulate OSF/1 on Alpha, which requires we
883 return a proper errno, not an unchanged brk value. */
884 return -TARGET_ENOMEM
;
886 /* For everything else, return the previous break. */
890 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
892 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
893 abi_ulong target_fds_addr
,
897 abi_ulong b
, *target_fds
;
899 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
900 if (!(target_fds
= lock_user(VERIFY_READ
,
902 sizeof(abi_ulong
) * nw
,
904 return -TARGET_EFAULT
;
908 for (i
= 0; i
< nw
; i
++) {
909 /* grab the abi_ulong */
910 __get_user(b
, &target_fds
[i
]);
911 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
912 /* check the bit inside the abi_ulong */
919 unlock_user(target_fds
, target_fds_addr
, 0);
924 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
925 abi_ulong target_fds_addr
,
928 if (target_fds_addr
) {
929 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
930 return -TARGET_EFAULT
;
938 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
944 abi_ulong
*target_fds
;
946 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
947 if (!(target_fds
= lock_user(VERIFY_WRITE
,
949 sizeof(abi_ulong
) * nw
,
951 return -TARGET_EFAULT
;
954 for (i
= 0; i
< nw
; i
++) {
956 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
957 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
960 __put_user(v
, &target_fds
[i
]);
963 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
969 #if defined(__alpha__)
975 static inline abi_long
host_to_target_clock_t(long ticks
)
977 #if HOST_HZ == TARGET_HZ
980 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
984 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
985 const struct rusage
*rusage
)
987 struct target_rusage
*target_rusage
;
989 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
990 return -TARGET_EFAULT
;
991 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
992 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
993 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
994 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
995 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
996 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
997 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
998 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
999 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1000 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1001 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1002 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1003 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1004 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1005 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1006 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1007 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1008 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1009 unlock_user_struct(target_rusage
, target_addr
, 1);
1014 #ifdef TARGET_NR_setrlimit
1015 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1017 abi_ulong target_rlim_swap
;
1020 target_rlim_swap
= tswapal(target_rlim
);
1021 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1022 return RLIM_INFINITY
;
1024 result
= target_rlim_swap
;
1025 if (target_rlim_swap
!= (rlim_t
)result
)
1026 return RLIM_INFINITY
;
1032 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1033 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1035 abi_ulong target_rlim_swap
;
1038 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1039 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1041 target_rlim_swap
= rlim
;
1042 result
= tswapal(target_rlim_swap
);
1048 static inline int target_to_host_resource(int code
)
1051 case TARGET_RLIMIT_AS
:
1053 case TARGET_RLIMIT_CORE
:
1055 case TARGET_RLIMIT_CPU
:
1057 case TARGET_RLIMIT_DATA
:
1059 case TARGET_RLIMIT_FSIZE
:
1060 return RLIMIT_FSIZE
;
1061 case TARGET_RLIMIT_LOCKS
:
1062 return RLIMIT_LOCKS
;
1063 case TARGET_RLIMIT_MEMLOCK
:
1064 return RLIMIT_MEMLOCK
;
1065 case TARGET_RLIMIT_MSGQUEUE
:
1066 return RLIMIT_MSGQUEUE
;
1067 case TARGET_RLIMIT_NICE
:
1069 case TARGET_RLIMIT_NOFILE
:
1070 return RLIMIT_NOFILE
;
1071 case TARGET_RLIMIT_NPROC
:
1072 return RLIMIT_NPROC
;
1073 case TARGET_RLIMIT_RSS
:
1075 case TARGET_RLIMIT_RTPRIO
:
1076 return RLIMIT_RTPRIO
;
1077 #ifdef RLIMIT_RTTIME
1078 case TARGET_RLIMIT_RTTIME
:
1079 return RLIMIT_RTTIME
;
1081 case TARGET_RLIMIT_SIGPENDING
:
1082 return RLIMIT_SIGPENDING
;
1083 case TARGET_RLIMIT_STACK
:
1084 return RLIMIT_STACK
;
1090 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1091 abi_ulong target_tv_addr
)
1093 struct target_timeval
*target_tv
;
1095 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1096 return -TARGET_EFAULT
;
1099 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1100 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1102 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1107 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1108 const struct timeval
*tv
)
1110 struct target_timeval
*target_tv
;
1112 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1113 return -TARGET_EFAULT
;
1116 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1117 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1119 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1124 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1125 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1126 abi_ulong target_tv_addr
)
1128 struct target__kernel_sock_timeval
*target_tv
;
1130 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1131 return -TARGET_EFAULT
;
1134 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1135 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1137 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1143 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1144 const struct timeval
*tv
)
1146 struct target__kernel_sock_timeval
*target_tv
;
1148 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1149 return -TARGET_EFAULT
;
1152 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1153 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1155 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1160 #if defined(TARGET_NR_futex) || \
1161 defined(TARGET_NR_rt_sigtimedwait) || \
1162 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167 defined(TARGET_NR_timer_settime) || \
1168 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1169 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1170 abi_ulong target_addr
)
1172 struct target_timespec
*target_ts
;
1174 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1175 return -TARGET_EFAULT
;
1177 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1178 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1179 unlock_user_struct(target_ts
, target_addr
, 0);
1184 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185 defined(TARGET_NR_timer_settime64) || \
1186 defined(TARGET_NR_mq_timedsend_time64) || \
1187 defined(TARGET_NR_mq_timedreceive_time64) || \
1188 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189 defined(TARGET_NR_clock_nanosleep_time64) || \
1190 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191 defined(TARGET_NR_utimensat) || \
1192 defined(TARGET_NR_utimensat_time64) || \
1193 defined(TARGET_NR_semtimedop_time64) || \
1194 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1195 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1196 abi_ulong target_addr
)
1198 struct target__kernel_timespec
*target_ts
;
1200 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1201 return -TARGET_EFAULT
;
1203 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1204 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1205 /* in 32bit mode, this drops the padding */
1206 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1207 unlock_user_struct(target_ts
, target_addr
, 0);
1212 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1213 struct timespec
*host_ts
)
1215 struct target_timespec
*target_ts
;
1217 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1218 return -TARGET_EFAULT
;
1220 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1221 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1222 unlock_user_struct(target_ts
, target_addr
, 1);
1226 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1227 struct timespec
*host_ts
)
1229 struct target__kernel_timespec
*target_ts
;
1231 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1232 return -TARGET_EFAULT
;
1234 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1235 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1236 unlock_user_struct(target_ts
, target_addr
, 1);
1240 #if defined(TARGET_NR_gettimeofday)
1241 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1242 struct timezone
*tz
)
1244 struct target_timezone
*target_tz
;
1246 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1247 return -TARGET_EFAULT
;
1250 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1251 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1253 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1259 #if defined(TARGET_NR_settimeofday)
1260 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1261 abi_ulong target_tz_addr
)
1263 struct target_timezone
*target_tz
;
1265 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1266 return -TARGET_EFAULT
;
1269 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1270 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1272 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1278 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1281 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1282 abi_ulong target_mq_attr_addr
)
1284 struct target_mq_attr
*target_mq_attr
;
1286 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1287 target_mq_attr_addr
, 1))
1288 return -TARGET_EFAULT
;
1290 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1291 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1292 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1293 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1295 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1300 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1301 const struct mq_attr
*attr
)
1303 struct target_mq_attr
*target_mq_attr
;
1305 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1306 target_mq_attr_addr
, 0))
1307 return -TARGET_EFAULT
;
1309 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1310 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1311 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1312 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1314 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1320 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321 /* do_select() must return target values and target errnos. */
1322 static abi_long
do_select(int n
,
1323 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1324 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1326 fd_set rfds
, wfds
, efds
;
1327 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1329 struct timespec ts
, *ts_ptr
;
1332 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1336 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1340 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1345 if (target_tv_addr
) {
1346 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1347 return -TARGET_EFAULT
;
1348 ts
.tv_sec
= tv
.tv_sec
;
1349 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1355 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1358 if (!is_error(ret
)) {
1359 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1360 return -TARGET_EFAULT
;
1361 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1362 return -TARGET_EFAULT
;
1363 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1364 return -TARGET_EFAULT
;
1366 if (target_tv_addr
) {
1367 tv
.tv_sec
= ts
.tv_sec
;
1368 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1369 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1370 return -TARGET_EFAULT
;
1378 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1379 static abi_long
do_old_select(abi_ulong arg1
)
1381 struct target_sel_arg_struct
*sel
;
1382 abi_ulong inp
, outp
, exp
, tvp
;
1385 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1386 return -TARGET_EFAULT
;
1389 nsel
= tswapal(sel
->n
);
1390 inp
= tswapal(sel
->inp
);
1391 outp
= tswapal(sel
->outp
);
1392 exp
= tswapal(sel
->exp
);
1393 tvp
= tswapal(sel
->tvp
);
1395 unlock_user_struct(sel
, arg1
, 0);
1397 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1402 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1403 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1404 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1407 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1408 fd_set rfds
, wfds
, efds
;
1409 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1410 struct timespec ts
, *ts_ptr
;
1414 * The 6th arg is actually two args smashed together,
1415 * so we cannot use the C library.
1422 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1430 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1434 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1438 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1444 * This takes a timespec, and not a timeval, so we cannot
1445 * use the do_select() helper ...
1449 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1450 return -TARGET_EFAULT
;
1453 if (target_to_host_timespec(&ts
, ts_addr
)) {
1454 return -TARGET_EFAULT
;
1462 /* Extract the two packed args for the sigset */
1465 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1467 return -TARGET_EFAULT
;
1469 arg_sigset
= tswapal(arg7
[0]);
1470 arg_sigsize
= tswapal(arg7
[1]);
1471 unlock_user(arg7
, arg6
, 0);
1474 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1479 sig
.size
= SIGSET_T_SIZE
;
1483 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1487 finish_sigsuspend_mask(ret
);
1490 if (!is_error(ret
)) {
1491 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1492 return -TARGET_EFAULT
;
1494 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1495 return -TARGET_EFAULT
;
1497 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1498 return -TARGET_EFAULT
;
1501 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1502 return -TARGET_EFAULT
;
1505 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1506 return -TARGET_EFAULT
;
1514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515 defined(TARGET_NR_ppoll_time64)
1516 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1517 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1519 struct target_pollfd
*target_pfd
;
1520 unsigned int nfds
= arg2
;
1528 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1529 return -TARGET_EINVAL
;
1531 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1532 sizeof(struct target_pollfd
) * nfds
, 1);
1534 return -TARGET_EFAULT
;
1537 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1538 for (i
= 0; i
< nfds
; i
++) {
1539 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1540 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1544 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1545 sigset_t
*set
= NULL
;
1549 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1550 unlock_user(target_pfd
, arg1
, 0);
1551 return -TARGET_EFAULT
;
1554 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1555 unlock_user(target_pfd
, arg1
, 0);
1556 return -TARGET_EFAULT
;
1564 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1566 unlock_user(target_pfd
, arg1
, 0);
1571 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1572 set
, SIGSET_T_SIZE
));
1575 finish_sigsuspend_mask(ret
);
1577 if (!is_error(ret
) && arg3
) {
1579 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1580 return -TARGET_EFAULT
;
1583 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1584 return -TARGET_EFAULT
;
1589 struct timespec ts
, *pts
;
1592 /* Convert ms to secs, ns */
1593 ts
.tv_sec
= arg3
/ 1000;
1594 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1597 /* -ve poll() timeout means "infinite" */
1600 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1603 if (!is_error(ret
)) {
1604 for (i
= 0; i
< nfds
; i
++) {
1605 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1608 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1613 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1614 int flags
, int is_pipe2
)
1618 ret
= pipe2(host_pipe
, flags
);
1621 return get_errno(ret
);
1623 /* Several targets have special calling conventions for the original
1624 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1626 #if defined(TARGET_ALPHA)
1627 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1628 return host_pipe
[0];
1629 #elif defined(TARGET_MIPS)
1630 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1631 return host_pipe
[0];
1632 #elif defined(TARGET_SH4)
1633 cpu_env
->gregs
[1] = host_pipe
[1];
1634 return host_pipe
[0];
1635 #elif defined(TARGET_SPARC)
1636 cpu_env
->regwptr
[1] = host_pipe
[1];
1637 return host_pipe
[0];
1641 if (put_user_s32(host_pipe
[0], pipedes
)
1642 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(abi_int
)))
1643 return -TARGET_EFAULT
;
1644 return get_errno(ret
);
1647 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1648 abi_ulong target_addr
,
1651 struct target_ip_mreqn
*target_smreqn
;
1653 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1655 return -TARGET_EFAULT
;
1656 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1657 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1658 if (len
== sizeof(struct target_ip_mreqn
))
1659 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1660 unlock_user(target_smreqn
, target_addr
, 0);
1665 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1666 abi_ulong target_addr
,
1669 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1670 sa_family_t sa_family
;
1671 struct target_sockaddr
*target_saddr
;
1673 if (fd_trans_target_to_host_addr(fd
)) {
1674 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1677 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1679 return -TARGET_EFAULT
;
1681 sa_family
= tswap16(target_saddr
->sa_family
);
1683 /* Oops. The caller might send a incomplete sun_path; sun_path
1684 * must be terminated by \0 (see the manual page), but
1685 * unfortunately it is quite common to specify sockaddr_un
1686 * length as "strlen(x->sun_path)" while it should be
1687 * "strlen(...) + 1". We'll fix that here if needed.
1688 * Linux kernel has a similar feature.
1691 if (sa_family
== AF_UNIX
) {
1692 if (len
< unix_maxlen
&& len
> 0) {
1693 char *cp
= (char*)target_saddr
;
1695 if ( cp
[len
-1] && !cp
[len
] )
1698 if (len
> unix_maxlen
)
1702 memcpy(addr
, target_saddr
, len
);
1703 addr
->sa_family
= sa_family
;
1704 if (sa_family
== AF_NETLINK
) {
1705 struct sockaddr_nl
*nladdr
;
1707 nladdr
= (struct sockaddr_nl
*)addr
;
1708 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1709 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1710 } else if (sa_family
== AF_PACKET
) {
1711 struct target_sockaddr_ll
*lladdr
;
1713 lladdr
= (struct target_sockaddr_ll
*)addr
;
1714 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1715 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1717 unlock_user(target_saddr
, target_addr
, 0);
1722 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1723 struct sockaddr
*addr
,
1726 struct target_sockaddr
*target_saddr
;
1733 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1735 return -TARGET_EFAULT
;
1736 memcpy(target_saddr
, addr
, len
);
1737 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1738 sizeof(target_saddr
->sa_family
)) {
1739 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1741 if (addr
->sa_family
== AF_NETLINK
&&
1742 len
>= sizeof(struct target_sockaddr_nl
)) {
1743 struct target_sockaddr_nl
*target_nl
=
1744 (struct target_sockaddr_nl
*)target_saddr
;
1745 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1746 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1747 } else if (addr
->sa_family
== AF_PACKET
) {
1748 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1749 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1750 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1751 } else if (addr
->sa_family
== AF_INET6
&&
1752 len
>= sizeof(struct target_sockaddr_in6
)) {
1753 struct target_sockaddr_in6
*target_in6
=
1754 (struct target_sockaddr_in6
*)target_saddr
;
1755 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1757 unlock_user(target_saddr
, target_addr
, len
);
1762 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1763 struct target_msghdr
*target_msgh
)
1765 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1766 abi_long msg_controllen
;
1767 abi_ulong target_cmsg_addr
;
1768 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1769 socklen_t space
= 0;
1771 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1772 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1774 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1775 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1776 target_cmsg_start
= target_cmsg
;
1778 return -TARGET_EFAULT
;
1780 while (cmsg
&& target_cmsg
) {
1781 void *data
= CMSG_DATA(cmsg
);
1782 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1784 int len
= tswapal(target_cmsg
->cmsg_len
)
1785 - sizeof(struct target_cmsghdr
);
1787 space
+= CMSG_SPACE(len
);
1788 if (space
> msgh
->msg_controllen
) {
1789 space
-= CMSG_SPACE(len
);
1790 /* This is a QEMU bug, since we allocated the payload
1791 * area ourselves (unlike overflow in host-to-target
1792 * conversion, which is just the guest giving us a buffer
1793 * that's too small). It can't happen for the payload types
1794 * we currently support; if it becomes an issue in future
1795 * we would need to improve our allocation strategy to
1796 * something more intelligent than "twice the size of the
1797 * target buffer we're reading from".
1799 qemu_log_mask(LOG_UNIMP
,
1800 ("Unsupported ancillary data %d/%d: "
1801 "unhandled msg size\n"),
1802 tswap32(target_cmsg
->cmsg_level
),
1803 tswap32(target_cmsg
->cmsg_type
));
1807 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1808 cmsg
->cmsg_level
= SOL_SOCKET
;
1810 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1812 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1813 cmsg
->cmsg_len
= CMSG_LEN(len
);
1815 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1816 int *fd
= (int *)data
;
1817 int *target_fd
= (int *)target_data
;
1818 int i
, numfds
= len
/ sizeof(int);
1820 for (i
= 0; i
< numfds
; i
++) {
1821 __get_user(fd
[i
], target_fd
+ i
);
1823 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1824 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1825 struct ucred
*cred
= (struct ucred
*)data
;
1826 struct target_ucred
*target_cred
=
1827 (struct target_ucred
*)target_data
;
1829 __get_user(cred
->pid
, &target_cred
->pid
);
1830 __get_user(cred
->uid
, &target_cred
->uid
);
1831 __get_user(cred
->gid
, &target_cred
->gid
);
1832 } else if (cmsg
->cmsg_level
== SOL_ALG
) {
1833 uint32_t *dst
= (uint32_t *)data
;
1835 memcpy(dst
, target_data
, len
);
1836 /* fix endianess of first 32-bit word */
1837 if (len
>= sizeof(uint32_t)) {
1838 *dst
= tswap32(*dst
);
1841 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1842 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1843 memcpy(data
, target_data
, len
);
1846 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1847 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1850 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1852 msgh
->msg_controllen
= space
;
1856 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1857 struct msghdr
*msgh
)
1859 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1860 abi_long msg_controllen
;
1861 abi_ulong target_cmsg_addr
;
1862 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1863 socklen_t space
= 0;
1865 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1866 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1868 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1869 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1870 target_cmsg_start
= target_cmsg
;
1872 return -TARGET_EFAULT
;
1874 while (cmsg
&& target_cmsg
) {
1875 void *data
= CMSG_DATA(cmsg
);
1876 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1878 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1879 int tgt_len
, tgt_space
;
1881 /* We never copy a half-header but may copy half-data;
1882 * this is Linux's behaviour in put_cmsg(). Note that
1883 * truncation here is a guest problem (which we report
1884 * to the guest via the CTRUNC bit), unlike truncation
1885 * in target_to_host_cmsg, which is a QEMU bug.
1887 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1888 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1892 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1893 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1895 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1897 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1899 /* Payload types which need a different size of payload on
1900 * the target must adjust tgt_len here.
1903 switch (cmsg
->cmsg_level
) {
1905 switch (cmsg
->cmsg_type
) {
1907 tgt_len
= sizeof(struct target_timeval
);
1917 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1918 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1919 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1922 /* We must now copy-and-convert len bytes of payload
1923 * into tgt_len bytes of destination space. Bear in mind
1924 * that in both source and destination we may be dealing
1925 * with a truncated value!
1927 switch (cmsg
->cmsg_level
) {
1929 switch (cmsg
->cmsg_type
) {
1932 int *fd
= (int *)data
;
1933 int *target_fd
= (int *)target_data
;
1934 int i
, numfds
= tgt_len
/ sizeof(int);
1936 for (i
= 0; i
< numfds
; i
++) {
1937 __put_user(fd
[i
], target_fd
+ i
);
1943 struct timeval
*tv
= (struct timeval
*)data
;
1944 struct target_timeval
*target_tv
=
1945 (struct target_timeval
*)target_data
;
1947 if (len
!= sizeof(struct timeval
) ||
1948 tgt_len
!= sizeof(struct target_timeval
)) {
1952 /* copy struct timeval to target */
1953 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1954 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1957 case SCM_CREDENTIALS
:
1959 struct ucred
*cred
= (struct ucred
*)data
;
1960 struct target_ucred
*target_cred
=
1961 (struct target_ucred
*)target_data
;
1963 __put_user(cred
->pid
, &target_cred
->pid
);
1964 __put_user(cred
->uid
, &target_cred
->uid
);
1965 __put_user(cred
->gid
, &target_cred
->gid
);
1974 switch (cmsg
->cmsg_type
) {
1977 uint32_t *v
= (uint32_t *)data
;
1978 uint32_t *t_int
= (uint32_t *)target_data
;
1980 if (len
!= sizeof(uint32_t) ||
1981 tgt_len
!= sizeof(uint32_t)) {
1984 __put_user(*v
, t_int
);
1990 struct sock_extended_err ee
;
1991 struct sockaddr_in offender
;
1993 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1994 struct errhdr_t
*target_errh
=
1995 (struct errhdr_t
*)target_data
;
1997 if (len
!= sizeof(struct errhdr_t
) ||
1998 tgt_len
!= sizeof(struct errhdr_t
)) {
2001 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2002 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2003 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2004 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2005 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2006 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2007 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2008 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2009 (void *) &errh
->offender
, sizeof(errh
->offender
));
2018 switch (cmsg
->cmsg_type
) {
2021 uint32_t *v
= (uint32_t *)data
;
2022 uint32_t *t_int
= (uint32_t *)target_data
;
2024 if (len
!= sizeof(uint32_t) ||
2025 tgt_len
!= sizeof(uint32_t)) {
2028 __put_user(*v
, t_int
);
2034 struct sock_extended_err ee
;
2035 struct sockaddr_in6 offender
;
2037 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2038 struct errhdr6_t
*target_errh
=
2039 (struct errhdr6_t
*)target_data
;
2041 if (len
!= sizeof(struct errhdr6_t
) ||
2042 tgt_len
!= sizeof(struct errhdr6_t
)) {
2045 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2046 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2047 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2048 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2049 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2050 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2051 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2052 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2053 (void *) &errh
->offender
, sizeof(errh
->offender
));
2063 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2064 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2065 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2066 if (tgt_len
> len
) {
2067 memset(target_data
+ len
, 0, tgt_len
- len
);
2071 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2072 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2073 if (msg_controllen
< tgt_space
) {
2074 tgt_space
= msg_controllen
;
2076 msg_controllen
-= tgt_space
;
2078 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2079 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2082 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2084 target_msgh
->msg_controllen
= tswapal(space
);
2088 /* do_setsockopt() Must return target values and target errnos. */
2089 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2090 abi_ulong optval_addr
, socklen_t optlen
)
2094 struct ip_mreqn
*ip_mreq
;
2095 struct ip_mreq_source
*ip_mreq_source
;
2100 /* TCP and UDP options all take an 'int' value. */
2101 if (optlen
< sizeof(uint32_t))
2102 return -TARGET_EINVAL
;
2104 if (get_user_u32(val
, optval_addr
))
2105 return -TARGET_EFAULT
;
2106 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2113 case IP_ROUTER_ALERT
:
2117 case IP_MTU_DISCOVER
:
2124 case IP_MULTICAST_TTL
:
2125 case IP_MULTICAST_LOOP
:
2127 if (optlen
>= sizeof(uint32_t)) {
2128 if (get_user_u32(val
, optval_addr
))
2129 return -TARGET_EFAULT
;
2130 } else if (optlen
>= 1) {
2131 if (get_user_u8(val
, optval_addr
))
2132 return -TARGET_EFAULT
;
2134 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2136 case IP_ADD_MEMBERSHIP
:
2137 case IP_DROP_MEMBERSHIP
:
2138 if (optlen
< sizeof (struct target_ip_mreq
) ||
2139 optlen
> sizeof (struct target_ip_mreqn
))
2140 return -TARGET_EINVAL
;
2142 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2143 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2144 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2147 case IP_BLOCK_SOURCE
:
2148 case IP_UNBLOCK_SOURCE
:
2149 case IP_ADD_SOURCE_MEMBERSHIP
:
2150 case IP_DROP_SOURCE_MEMBERSHIP
:
2151 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2152 return -TARGET_EINVAL
;
2154 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2155 if (!ip_mreq_source
) {
2156 return -TARGET_EFAULT
;
2158 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2159 unlock_user (ip_mreq_source
, optval_addr
, 0);
2168 case IPV6_MTU_DISCOVER
:
2171 case IPV6_RECVPKTINFO
:
2172 case IPV6_UNICAST_HOPS
:
2173 case IPV6_MULTICAST_HOPS
:
2174 case IPV6_MULTICAST_LOOP
:
2176 case IPV6_RECVHOPLIMIT
:
2177 case IPV6_2292HOPLIMIT
:
2180 case IPV6_2292PKTINFO
:
2181 case IPV6_RECVTCLASS
:
2182 case IPV6_RECVRTHDR
:
2183 case IPV6_2292RTHDR
:
2184 case IPV6_RECVHOPOPTS
:
2185 case IPV6_2292HOPOPTS
:
2186 case IPV6_RECVDSTOPTS
:
2187 case IPV6_2292DSTOPTS
:
2189 case IPV6_ADDR_PREFERENCES
:
2190 #ifdef IPV6_RECVPATHMTU
2191 case IPV6_RECVPATHMTU
:
2193 #ifdef IPV6_TRANSPARENT
2194 case IPV6_TRANSPARENT
:
2196 #ifdef IPV6_FREEBIND
2199 #ifdef IPV6_RECVORIGDSTADDR
2200 case IPV6_RECVORIGDSTADDR
:
2203 if (optlen
< sizeof(uint32_t)) {
2204 return -TARGET_EINVAL
;
2206 if (get_user_u32(val
, optval_addr
)) {
2207 return -TARGET_EFAULT
;
2209 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2210 &val
, sizeof(val
)));
2214 struct in6_pktinfo pki
;
2216 if (optlen
< sizeof(pki
)) {
2217 return -TARGET_EINVAL
;
2220 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2221 return -TARGET_EFAULT
;
2224 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2226 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2227 &pki
, sizeof(pki
)));
2230 case IPV6_ADD_MEMBERSHIP
:
2231 case IPV6_DROP_MEMBERSHIP
:
2233 struct ipv6_mreq ipv6mreq
;
2235 if (optlen
< sizeof(ipv6mreq
)) {
2236 return -TARGET_EINVAL
;
2239 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2240 return -TARGET_EFAULT
;
2243 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2245 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2246 &ipv6mreq
, sizeof(ipv6mreq
)));
2257 struct icmp6_filter icmp6f
;
2259 if (optlen
> sizeof(icmp6f
)) {
2260 optlen
= sizeof(icmp6f
);
2263 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2264 return -TARGET_EFAULT
;
2267 for (val
= 0; val
< 8; val
++) {
2268 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2271 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2283 /* those take an u32 value */
2284 if (optlen
< sizeof(uint32_t)) {
2285 return -TARGET_EINVAL
;
2288 if (get_user_u32(val
, optval_addr
)) {
2289 return -TARGET_EFAULT
;
2291 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2292 &val
, sizeof(val
)));
2299 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2304 char *alg_key
= g_malloc(optlen
);
2307 return -TARGET_ENOMEM
;
2309 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2311 return -TARGET_EFAULT
;
2313 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2318 case ALG_SET_AEAD_AUTHSIZE
:
2320 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2329 case TARGET_SOL_SOCKET
:
2331 case TARGET_SO_RCVTIMEO
:
2335 optname
= SO_RCVTIMEO
;
2338 if (optlen
!= sizeof(struct target_timeval
)) {
2339 return -TARGET_EINVAL
;
2342 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2343 return -TARGET_EFAULT
;
2346 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2350 case TARGET_SO_SNDTIMEO
:
2351 optname
= SO_SNDTIMEO
;
2353 case TARGET_SO_ATTACH_FILTER
:
2355 struct target_sock_fprog
*tfprog
;
2356 struct target_sock_filter
*tfilter
;
2357 struct sock_fprog fprog
;
2358 struct sock_filter
*filter
;
2361 if (optlen
!= sizeof(*tfprog
)) {
2362 return -TARGET_EINVAL
;
2364 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2365 return -TARGET_EFAULT
;
2367 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2368 tswapal(tfprog
->filter
), 0)) {
2369 unlock_user_struct(tfprog
, optval_addr
, 1);
2370 return -TARGET_EFAULT
;
2373 fprog
.len
= tswap16(tfprog
->len
);
2374 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2375 if (filter
== NULL
) {
2376 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2377 unlock_user_struct(tfprog
, optval_addr
, 1);
2378 return -TARGET_ENOMEM
;
2380 for (i
= 0; i
< fprog
.len
; i
++) {
2381 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2382 filter
[i
].jt
= tfilter
[i
].jt
;
2383 filter
[i
].jf
= tfilter
[i
].jf
;
2384 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2386 fprog
.filter
= filter
;
2388 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2389 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2392 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2393 unlock_user_struct(tfprog
, optval_addr
, 1);
2396 case TARGET_SO_BINDTODEVICE
:
2398 char *dev_ifname
, *addr_ifname
;
2400 if (optlen
> IFNAMSIZ
- 1) {
2401 optlen
= IFNAMSIZ
- 1;
2403 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2405 return -TARGET_EFAULT
;
2407 optname
= SO_BINDTODEVICE
;
2408 addr_ifname
= alloca(IFNAMSIZ
);
2409 memcpy(addr_ifname
, dev_ifname
, optlen
);
2410 addr_ifname
[optlen
] = 0;
2411 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2412 addr_ifname
, optlen
));
2413 unlock_user (dev_ifname
, optval_addr
, 0);
2416 case TARGET_SO_LINGER
:
2419 struct target_linger
*tlg
;
2421 if (optlen
!= sizeof(struct target_linger
)) {
2422 return -TARGET_EINVAL
;
2424 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2425 return -TARGET_EFAULT
;
2427 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2428 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2429 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2431 unlock_user_struct(tlg
, optval_addr
, 0);
2434 /* Options with 'int' argument. */
2435 case TARGET_SO_DEBUG
:
2438 case TARGET_SO_REUSEADDR
:
2439 optname
= SO_REUSEADDR
;
2442 case TARGET_SO_REUSEPORT
:
2443 optname
= SO_REUSEPORT
;
2446 case TARGET_SO_TYPE
:
2449 case TARGET_SO_ERROR
:
2452 case TARGET_SO_DONTROUTE
:
2453 optname
= SO_DONTROUTE
;
2455 case TARGET_SO_BROADCAST
:
2456 optname
= SO_BROADCAST
;
2458 case TARGET_SO_SNDBUF
:
2459 optname
= SO_SNDBUF
;
2461 case TARGET_SO_SNDBUFFORCE
:
2462 optname
= SO_SNDBUFFORCE
;
2464 case TARGET_SO_RCVBUF
:
2465 optname
= SO_RCVBUF
;
2467 case TARGET_SO_RCVBUFFORCE
:
2468 optname
= SO_RCVBUFFORCE
;
2470 case TARGET_SO_KEEPALIVE
:
2471 optname
= SO_KEEPALIVE
;
2473 case TARGET_SO_OOBINLINE
:
2474 optname
= SO_OOBINLINE
;
2476 case TARGET_SO_NO_CHECK
:
2477 optname
= SO_NO_CHECK
;
2479 case TARGET_SO_PRIORITY
:
2480 optname
= SO_PRIORITY
;
2483 case TARGET_SO_BSDCOMPAT
:
2484 optname
= SO_BSDCOMPAT
;
2487 case TARGET_SO_PASSCRED
:
2488 optname
= SO_PASSCRED
;
2490 case TARGET_SO_PASSSEC
:
2491 optname
= SO_PASSSEC
;
2493 case TARGET_SO_TIMESTAMP
:
2494 optname
= SO_TIMESTAMP
;
2496 case TARGET_SO_RCVLOWAT
:
2497 optname
= SO_RCVLOWAT
;
2502 if (optlen
< sizeof(uint32_t))
2503 return -TARGET_EINVAL
;
2505 if (get_user_u32(val
, optval_addr
))
2506 return -TARGET_EFAULT
;
2507 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2512 case NETLINK_PKTINFO
:
2513 case NETLINK_ADD_MEMBERSHIP
:
2514 case NETLINK_DROP_MEMBERSHIP
:
2515 case NETLINK_BROADCAST_ERROR
:
2516 case NETLINK_NO_ENOBUFS
:
2517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2518 case NETLINK_LISTEN_ALL_NSID
:
2519 case NETLINK_CAP_ACK
:
2520 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2521 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2522 case NETLINK_EXT_ACK
:
2523 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2525 case NETLINK_GET_STRICT_CHK
:
2526 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2532 if (optlen
< sizeof(uint32_t)) {
2533 return -TARGET_EINVAL
;
2535 if (get_user_u32(val
, optval_addr
)) {
2536 return -TARGET_EFAULT
;
2538 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2541 #endif /* SOL_NETLINK */
2544 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2546 ret
= -TARGET_ENOPROTOOPT
;
2551 /* do_getsockopt() Must return target values and target errnos. */
2552 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2553 abi_ulong optval_addr
, abi_ulong optlen
)
2560 case TARGET_SOL_SOCKET
:
2563 /* These don't just return a single integer */
2564 case TARGET_SO_PEERNAME
:
2566 case TARGET_SO_RCVTIMEO
: {
2570 optname
= SO_RCVTIMEO
;
2573 if (get_user_u32(len
, optlen
)) {
2574 return -TARGET_EFAULT
;
2577 return -TARGET_EINVAL
;
2581 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2586 if (len
> sizeof(struct target_timeval
)) {
2587 len
= sizeof(struct target_timeval
);
2589 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2590 return -TARGET_EFAULT
;
2592 if (put_user_u32(len
, optlen
)) {
2593 return -TARGET_EFAULT
;
2597 case TARGET_SO_SNDTIMEO
:
2598 optname
= SO_SNDTIMEO
;
2600 case TARGET_SO_PEERCRED
: {
2603 struct target_ucred
*tcr
;
2605 if (get_user_u32(len
, optlen
)) {
2606 return -TARGET_EFAULT
;
2609 return -TARGET_EINVAL
;
2613 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2621 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2622 return -TARGET_EFAULT
;
2624 __put_user(cr
.pid
, &tcr
->pid
);
2625 __put_user(cr
.uid
, &tcr
->uid
);
2626 __put_user(cr
.gid
, &tcr
->gid
);
2627 unlock_user_struct(tcr
, optval_addr
, 1);
2628 if (put_user_u32(len
, optlen
)) {
2629 return -TARGET_EFAULT
;
2633 case TARGET_SO_PEERSEC
: {
2636 if (get_user_u32(len
, optlen
)) {
2637 return -TARGET_EFAULT
;
2640 return -TARGET_EINVAL
;
2642 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2644 return -TARGET_EFAULT
;
2647 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2649 if (put_user_u32(lv
, optlen
)) {
2650 ret
= -TARGET_EFAULT
;
2652 unlock_user(name
, optval_addr
, lv
);
2655 case TARGET_SO_LINGER
:
2659 struct target_linger
*tlg
;
2661 if (get_user_u32(len
, optlen
)) {
2662 return -TARGET_EFAULT
;
2665 return -TARGET_EINVAL
;
2669 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2677 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2678 return -TARGET_EFAULT
;
2680 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2681 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2682 unlock_user_struct(tlg
, optval_addr
, 1);
2683 if (put_user_u32(len
, optlen
)) {
2684 return -TARGET_EFAULT
;
2688 /* Options with 'int' argument. */
2689 case TARGET_SO_DEBUG
:
2692 case TARGET_SO_REUSEADDR
:
2693 optname
= SO_REUSEADDR
;
2696 case TARGET_SO_REUSEPORT
:
2697 optname
= SO_REUSEPORT
;
2700 case TARGET_SO_TYPE
:
2703 case TARGET_SO_ERROR
:
2706 case TARGET_SO_DONTROUTE
:
2707 optname
= SO_DONTROUTE
;
2709 case TARGET_SO_BROADCAST
:
2710 optname
= SO_BROADCAST
;
2712 case TARGET_SO_SNDBUF
:
2713 optname
= SO_SNDBUF
;
2715 case TARGET_SO_RCVBUF
:
2716 optname
= SO_RCVBUF
;
2718 case TARGET_SO_KEEPALIVE
:
2719 optname
= SO_KEEPALIVE
;
2721 case TARGET_SO_OOBINLINE
:
2722 optname
= SO_OOBINLINE
;
2724 case TARGET_SO_NO_CHECK
:
2725 optname
= SO_NO_CHECK
;
2727 case TARGET_SO_PRIORITY
:
2728 optname
= SO_PRIORITY
;
2731 case TARGET_SO_BSDCOMPAT
:
2732 optname
= SO_BSDCOMPAT
;
2735 case TARGET_SO_PASSCRED
:
2736 optname
= SO_PASSCRED
;
2738 case TARGET_SO_TIMESTAMP
:
2739 optname
= SO_TIMESTAMP
;
2741 case TARGET_SO_RCVLOWAT
:
2742 optname
= SO_RCVLOWAT
;
2744 case TARGET_SO_ACCEPTCONN
:
2745 optname
= SO_ACCEPTCONN
;
2747 case TARGET_SO_PROTOCOL
:
2748 optname
= SO_PROTOCOL
;
2750 case TARGET_SO_DOMAIN
:
2751 optname
= SO_DOMAIN
;
2759 /* TCP and UDP options all take an 'int' value. */
2761 if (get_user_u32(len
, optlen
))
2762 return -TARGET_EFAULT
;
2764 return -TARGET_EINVAL
;
2766 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2771 val
= host_to_target_sock_type(val
);
2774 val
= host_to_target_errno(val
);
2780 if (put_user_u32(val
, optval_addr
))
2781 return -TARGET_EFAULT
;
2783 if (put_user_u8(val
, optval_addr
))
2784 return -TARGET_EFAULT
;
2786 if (put_user_u32(len
, optlen
))
2787 return -TARGET_EFAULT
;
2794 case IP_ROUTER_ALERT
:
2798 case IP_MTU_DISCOVER
:
2804 case IP_MULTICAST_TTL
:
2805 case IP_MULTICAST_LOOP
:
2806 if (get_user_u32(len
, optlen
))
2807 return -TARGET_EFAULT
;
2809 return -TARGET_EINVAL
;
2811 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2814 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2816 if (put_user_u32(len
, optlen
)
2817 || put_user_u8(val
, optval_addr
))
2818 return -TARGET_EFAULT
;
2820 if (len
> sizeof(int))
2822 if (put_user_u32(len
, optlen
)
2823 || put_user_u32(val
, optval_addr
))
2824 return -TARGET_EFAULT
;
2828 ret
= -TARGET_ENOPROTOOPT
;
2834 case IPV6_MTU_DISCOVER
:
2837 case IPV6_RECVPKTINFO
:
2838 case IPV6_UNICAST_HOPS
:
2839 case IPV6_MULTICAST_HOPS
:
2840 case IPV6_MULTICAST_LOOP
:
2842 case IPV6_RECVHOPLIMIT
:
2843 case IPV6_2292HOPLIMIT
:
2846 case IPV6_2292PKTINFO
:
2847 case IPV6_RECVTCLASS
:
2848 case IPV6_RECVRTHDR
:
2849 case IPV6_2292RTHDR
:
2850 case IPV6_RECVHOPOPTS
:
2851 case IPV6_2292HOPOPTS
:
2852 case IPV6_RECVDSTOPTS
:
2853 case IPV6_2292DSTOPTS
:
2855 case IPV6_ADDR_PREFERENCES
:
2856 #ifdef IPV6_RECVPATHMTU
2857 case IPV6_RECVPATHMTU
:
2859 #ifdef IPV6_TRANSPARENT
2860 case IPV6_TRANSPARENT
:
2862 #ifdef IPV6_FREEBIND
2865 #ifdef IPV6_RECVORIGDSTADDR
2866 case IPV6_RECVORIGDSTADDR
:
2868 if (get_user_u32(len
, optlen
))
2869 return -TARGET_EFAULT
;
2871 return -TARGET_EINVAL
;
2873 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2876 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2878 if (put_user_u32(len
, optlen
)
2879 || put_user_u8(val
, optval_addr
))
2880 return -TARGET_EFAULT
;
2882 if (len
> sizeof(int))
2884 if (put_user_u32(len
, optlen
)
2885 || put_user_u32(val
, optval_addr
))
2886 return -TARGET_EFAULT
;
2890 ret
= -TARGET_ENOPROTOOPT
;
2897 case NETLINK_PKTINFO
:
2898 case NETLINK_BROADCAST_ERROR
:
2899 case NETLINK_NO_ENOBUFS
:
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2901 case NETLINK_LISTEN_ALL_NSID
:
2902 case NETLINK_CAP_ACK
:
2903 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2904 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2905 case NETLINK_EXT_ACK
:
2906 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2908 case NETLINK_GET_STRICT_CHK
:
2909 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2910 if (get_user_u32(len
, optlen
)) {
2911 return -TARGET_EFAULT
;
2913 if (len
!= sizeof(val
)) {
2914 return -TARGET_EINVAL
;
2917 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2921 if (put_user_u32(lv
, optlen
)
2922 || put_user_u32(val
, optval_addr
)) {
2923 return -TARGET_EFAULT
;
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2927 case NETLINK_LIST_MEMBERSHIPS
:
2931 if (get_user_u32(len
, optlen
)) {
2932 return -TARGET_EFAULT
;
2935 return -TARGET_EINVAL
;
2937 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2938 if (!results
&& len
> 0) {
2939 return -TARGET_EFAULT
;
2942 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2944 unlock_user(results
, optval_addr
, 0);
2947 /* swap host endianess to target endianess. */
2948 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2949 results
[i
] = tswap32(results
[i
]);
2951 if (put_user_u32(lv
, optlen
)) {
2952 return -TARGET_EFAULT
;
2954 unlock_user(results
, optval_addr
, 0);
2957 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2962 #endif /* SOL_NETLINK */
2965 qemu_log_mask(LOG_UNIMP
,
2966 "getsockopt level=%d optname=%d not yet supported\n",
2968 ret
= -TARGET_EOPNOTSUPP
;
2974 /* Convert target low/high pair representing file offset into the host
2975 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2976 * as the kernel doesn't handle them either.
2978 static void target_to_host_low_high(abi_ulong tlow
,
2980 unsigned long *hlow
,
2981 unsigned long *hhigh
)
2983 uint64_t off
= tlow
|
2984 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2985 TARGET_LONG_BITS
/ 2;
2988 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2991 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2992 abi_ulong count
, int copy
)
2994 struct target_iovec
*target_vec
;
2996 abi_ulong total_len
, max_len
;
2999 bool bad_address
= false;
3005 if (count
> IOV_MAX
) {
3010 vec
= g_try_new0(struct iovec
, count
);
3016 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3017 count
* sizeof(struct target_iovec
), 1);
3018 if (target_vec
== NULL
) {
3023 /* ??? If host page size > target page size, this will result in a
3024 value larger than what we can actually support. */
3025 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3028 for (i
= 0; i
< count
; i
++) {
3029 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3030 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3035 } else if (len
== 0) {
3036 /* Zero length pointer is ignored. */
3037 vec
[i
].iov_base
= 0;
3039 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3040 /* If the first buffer pointer is bad, this is a fault. But
3041 * subsequent bad buffers will result in a partial write; this
3042 * is realized by filling the vector with null pointers and
3044 if (!vec
[i
].iov_base
) {
3055 if (len
> max_len
- total_len
) {
3056 len
= max_len
- total_len
;
3059 vec
[i
].iov_len
= len
;
3063 unlock_user(target_vec
, target_addr
, 0);
3068 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3069 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3072 unlock_user(target_vec
, target_addr
, 0);
3079 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3080 abi_ulong count
, int copy
)
3082 struct target_iovec
*target_vec
;
3085 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3086 count
* sizeof(struct target_iovec
), 1);
3088 for (i
= 0; i
< count
; i
++) {
3089 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3090 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3094 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3096 unlock_user(target_vec
, target_addr
, 0);
3102 static inline int target_to_host_sock_type(int *type
)
3105 int target_type
= *type
;
3107 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3108 case TARGET_SOCK_DGRAM
:
3109 host_type
= SOCK_DGRAM
;
3111 case TARGET_SOCK_STREAM
:
3112 host_type
= SOCK_STREAM
;
3115 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3118 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3119 #if defined(SOCK_CLOEXEC)
3120 host_type
|= SOCK_CLOEXEC
;
3122 return -TARGET_EINVAL
;
3125 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3126 #if defined(SOCK_NONBLOCK)
3127 host_type
|= SOCK_NONBLOCK
;
3128 #elif !defined(O_NONBLOCK)
3129 return -TARGET_EINVAL
;
3136 /* Try to emulate socket type flags after socket creation. */
3137 static int sock_flags_fixup(int fd
, int target_type
)
3139 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3140 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3141 int flags
= fcntl(fd
, F_GETFL
);
3142 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3144 return -TARGET_EINVAL
;
3151 /* do_socket() Must return target values and target errnos. */
3152 static abi_long
do_socket(int domain
, int type
, int protocol
)
3154 int target_type
= type
;
3157 ret
= target_to_host_sock_type(&type
);
3162 if (domain
== PF_NETLINK
&& !(
3163 #ifdef CONFIG_RTNETLINK
3164 protocol
== NETLINK_ROUTE
||
3166 protocol
== NETLINK_KOBJECT_UEVENT
||
3167 protocol
== NETLINK_AUDIT
)) {
3168 return -TARGET_EPROTONOSUPPORT
;
3171 if (domain
== AF_PACKET
||
3172 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3173 protocol
= tswap16(protocol
);
3176 ret
= get_errno(socket(domain
, type
, protocol
));
3178 ret
= sock_flags_fixup(ret
, target_type
);
3179 if (type
== SOCK_PACKET
) {
3180 /* Manage an obsolete case :
3181 * if socket type is SOCK_PACKET, bind by name
3183 fd_trans_register(ret
, &target_packet_trans
);
3184 } else if (domain
== PF_NETLINK
) {
3186 #ifdef CONFIG_RTNETLINK
3188 fd_trans_register(ret
, &target_netlink_route_trans
);
3191 case NETLINK_KOBJECT_UEVENT
:
3192 /* nothing to do: messages are strings */
3195 fd_trans_register(ret
, &target_netlink_audit_trans
);
3198 g_assert_not_reached();
3205 /* do_bind() Must return target values and target errnos. */
3206 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3212 if ((int)addrlen
< 0) {
3213 return -TARGET_EINVAL
;
3216 addr
= alloca(addrlen
+1);
3218 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3222 return get_errno(bind(sockfd
, addr
, addrlen
));
3225 /* do_connect() Must return target values and target errnos. */
3226 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3232 if ((int)addrlen
< 0) {
3233 return -TARGET_EINVAL
;
3236 addr
= alloca(addrlen
+1);
3238 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3242 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3245 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3246 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3247 int flags
, int send
)
3253 abi_ulong target_vec
;
3255 if (msgp
->msg_name
) {
3256 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3257 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3258 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3259 tswapal(msgp
->msg_name
),
3261 if (ret
== -TARGET_EFAULT
) {
3262 /* For connected sockets msg_name and msg_namelen must
3263 * be ignored, so returning EFAULT immediately is wrong.
3264 * Instead, pass a bad msg_name to the host kernel, and
3265 * let it decide whether to return EFAULT or not.
3267 msg
.msg_name
= (void *)-1;
3272 msg
.msg_name
= NULL
;
3273 msg
.msg_namelen
= 0;
3275 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3276 msg
.msg_control
= alloca(msg
.msg_controllen
);
3277 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3279 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3281 count
= tswapal(msgp
->msg_iovlen
);
3282 target_vec
= tswapal(msgp
->msg_iov
);
3284 if (count
> IOV_MAX
) {
3285 /* sendrcvmsg returns a different errno for this condition than
3286 * readv/writev, so we must catch it here before lock_iovec() does.
3288 ret
= -TARGET_EMSGSIZE
;
3292 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3293 target_vec
, count
, send
);
3295 ret
= -host_to_target_errno(errno
);
3296 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3301 msg
.msg_iovlen
= count
;
3305 if (fd_trans_target_to_host_data(fd
)) {
3308 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3309 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3310 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3311 msg
.msg_iov
->iov_len
);
3313 msg
.msg_iov
->iov_base
= host_msg
;
3314 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3318 ret
= target_to_host_cmsg(&msg
, msgp
);
3320 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3324 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3325 if (!is_error(ret
)) {
3327 if (fd_trans_host_to_target_data(fd
)) {
3328 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3329 MIN(msg
.msg_iov
->iov_len
, len
));
3331 if (!is_error(ret
)) {
3332 ret
= host_to_target_cmsg(msgp
, &msg
);
3334 if (!is_error(ret
)) {
3335 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3336 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3337 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3338 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3339 msg
.msg_name
, msg
.msg_namelen
);
3352 unlock_iovec(vec
, target_vec
, count
, !send
);
3358 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3359 int flags
, int send
)
3362 struct target_msghdr
*msgp
;
3364 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3368 return -TARGET_EFAULT
;
3370 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3371 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3375 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3376 * so it might not have this *mmsg-specific flag either.
3378 #ifndef MSG_WAITFORONE
3379 #define MSG_WAITFORONE 0x10000
3382 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3383 unsigned int vlen
, unsigned int flags
,
3386 struct target_mmsghdr
*mmsgp
;
3390 if (vlen
> UIO_MAXIOV
) {
3394 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3396 return -TARGET_EFAULT
;
3399 for (i
= 0; i
< vlen
; i
++) {
3400 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3401 if (is_error(ret
)) {
3404 mmsgp
[i
].msg_len
= tswap32(ret
);
3405 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3406 if (flags
& MSG_WAITFORONE
) {
3407 flags
|= MSG_DONTWAIT
;
3411 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3413 /* Return number of datagrams sent if we sent any at all;
3414 * otherwise return the error.
3422 /* do_accept4() Must return target values and target errnos. */
3423 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3424 abi_ulong target_addrlen_addr
, int flags
)
3426 socklen_t addrlen
, ret_addrlen
;
3431 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3433 if (target_addr
== 0) {
3434 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3437 /* linux returns EFAULT if addrlen pointer is invalid */
3438 if (get_user_u32(addrlen
, target_addrlen_addr
))
3439 return -TARGET_EFAULT
;
3441 if ((int)addrlen
< 0) {
3442 return -TARGET_EINVAL
;
3445 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3446 return -TARGET_EFAULT
;
3449 addr
= alloca(addrlen
);
3451 ret_addrlen
= addrlen
;
3452 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3453 if (!is_error(ret
)) {
3454 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3455 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3456 ret
= -TARGET_EFAULT
;
3462 /* do_getpeername() Must return target values and target errnos. */
3463 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3464 abi_ulong target_addrlen_addr
)
3466 socklen_t addrlen
, ret_addrlen
;
3470 if (get_user_u32(addrlen
, target_addrlen_addr
))
3471 return -TARGET_EFAULT
;
3473 if ((int)addrlen
< 0) {
3474 return -TARGET_EINVAL
;
3477 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3478 return -TARGET_EFAULT
;
3481 addr
= alloca(addrlen
);
3483 ret_addrlen
= addrlen
;
3484 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3485 if (!is_error(ret
)) {
3486 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3487 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3488 ret
= -TARGET_EFAULT
;
3494 /* do_getsockname() Must return target values and target errnos. */
3495 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3496 abi_ulong target_addrlen_addr
)
3498 socklen_t addrlen
, ret_addrlen
;
3502 if (get_user_u32(addrlen
, target_addrlen_addr
))
3503 return -TARGET_EFAULT
;
3505 if ((int)addrlen
< 0) {
3506 return -TARGET_EINVAL
;
3509 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3510 return -TARGET_EFAULT
;
3513 addr
= alloca(addrlen
);
3515 ret_addrlen
= addrlen
;
3516 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3517 if (!is_error(ret
)) {
3518 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3519 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3520 ret
= -TARGET_EFAULT
;
3526 /* do_socketpair() Must return target values and target errnos. */
3527 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3528 abi_ulong target_tab_addr
)
3533 target_to_host_sock_type(&type
);
3535 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3536 if (!is_error(ret
)) {
3537 if (put_user_s32(tab
[0], target_tab_addr
)
3538 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3539 ret
= -TARGET_EFAULT
;
3544 /* do_sendto() Must return target values and target errnos. */
3545 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3546 abi_ulong target_addr
, socklen_t addrlen
)
3550 void *copy_msg
= NULL
;
3553 if ((int)addrlen
< 0) {
3554 return -TARGET_EINVAL
;
3557 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3559 return -TARGET_EFAULT
;
3560 if (fd_trans_target_to_host_data(fd
)) {
3561 copy_msg
= host_msg
;
3562 host_msg
= g_malloc(len
);
3563 memcpy(host_msg
, copy_msg
, len
);
3564 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3570 addr
= alloca(addrlen
+1);
3571 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3575 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3577 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3582 host_msg
= copy_msg
;
3584 unlock_user(host_msg
, msg
, 0);
3588 /* do_recvfrom() Must return target values and target errnos. */
3589 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3590 abi_ulong target_addr
,
3591 abi_ulong target_addrlen
)
3593 socklen_t addrlen
, ret_addrlen
;
3601 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3603 return -TARGET_EFAULT
;
3607 if (get_user_u32(addrlen
, target_addrlen
)) {
3608 ret
= -TARGET_EFAULT
;
3611 if ((int)addrlen
< 0) {
3612 ret
= -TARGET_EINVAL
;
3615 addr
= alloca(addrlen
);
3616 ret_addrlen
= addrlen
;
3617 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3618 addr
, &ret_addrlen
));
3620 addr
= NULL
; /* To keep compiler quiet. */
3621 addrlen
= 0; /* To keep compiler quiet. */
3622 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3624 if (!is_error(ret
)) {
3625 if (fd_trans_host_to_target_data(fd
)) {
3627 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3628 if (is_error(trans
)) {
3634 host_to_target_sockaddr(target_addr
, addr
,
3635 MIN(addrlen
, ret_addrlen
));
3636 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3637 ret
= -TARGET_EFAULT
;
3641 unlock_user(host_msg
, msg
, len
);
3644 unlock_user(host_msg
, msg
, 0);
3649 #ifdef TARGET_NR_socketcall
3650 /* do_socketcall() must return target values and target errnos. */
3651 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3653 static const unsigned nargs
[] = { /* number of arguments per operation */
3654 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3655 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3656 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3657 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3658 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3659 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3660 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3661 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3662 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3663 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3664 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3665 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3666 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3667 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3668 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3669 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3670 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3671 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3672 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3673 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3675 abi_long a
[6]; /* max 6 args */
3678 /* check the range of the first argument num */
3679 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3680 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3681 return -TARGET_EINVAL
;
3683 /* ensure we have space for args */
3684 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3685 return -TARGET_EINVAL
;
3687 /* collect the arguments in a[] according to nargs[] */
3688 for (i
= 0; i
< nargs
[num
]; ++i
) {
3689 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3690 return -TARGET_EFAULT
;
3693 /* now when we have the args, invoke the appropriate underlying function */
3695 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3696 return do_socket(a
[0], a
[1], a
[2]);
3697 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3698 return do_bind(a
[0], a
[1], a
[2]);
3699 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3700 return do_connect(a
[0], a
[1], a
[2]);
3701 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3702 return get_errno(listen(a
[0], a
[1]));
3703 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3704 return do_accept4(a
[0], a
[1], a
[2], 0);
3705 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3706 return do_getsockname(a
[0], a
[1], a
[2]);
3707 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3708 return do_getpeername(a
[0], a
[1], a
[2]);
3709 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3710 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3711 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3712 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3713 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3714 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3715 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3716 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3717 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3718 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3719 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3720 return get_errno(shutdown(a
[0], a
[1]));
3721 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3722 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3723 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3724 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3725 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3726 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3727 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3728 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3729 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3730 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3731 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3732 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3733 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3734 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3736 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3737 return -TARGET_EINVAL
;
3742 #define N_SHM_REGIONS 32
3744 static struct shm_region
{
3748 } shm_regions
[N_SHM_REGIONS
];
3750 #ifndef TARGET_SEMID64_DS
3751 /* asm-generic version of this struct */
3752 struct target_semid64_ds
3754 struct target_ipc_perm sem_perm
;
3755 abi_ulong sem_otime
;
3756 #if TARGET_ABI_BITS == 32
3757 abi_ulong __unused1
;
3759 abi_ulong sem_ctime
;
3760 #if TARGET_ABI_BITS == 32
3761 abi_ulong __unused2
;
3763 abi_ulong sem_nsems
;
3764 abi_ulong __unused3
;
3765 abi_ulong __unused4
;
3769 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3770 abi_ulong target_addr
)
3772 struct target_ipc_perm
*target_ip
;
3773 struct target_semid64_ds
*target_sd
;
3775 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3776 return -TARGET_EFAULT
;
3777 target_ip
= &(target_sd
->sem_perm
);
3778 host_ip
->__key
= tswap32(target_ip
->__key
);
3779 host_ip
->uid
= tswap32(target_ip
->uid
);
3780 host_ip
->gid
= tswap32(target_ip
->gid
);
3781 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3782 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3783 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3784 host_ip
->mode
= tswap32(target_ip
->mode
);
3786 host_ip
->mode
= tswap16(target_ip
->mode
);
3788 #if defined(TARGET_PPC)
3789 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3791 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3793 unlock_user_struct(target_sd
, target_addr
, 0);
3797 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3798 struct ipc_perm
*host_ip
)
3800 struct target_ipc_perm
*target_ip
;
3801 struct target_semid64_ds
*target_sd
;
3803 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3804 return -TARGET_EFAULT
;
3805 target_ip
= &(target_sd
->sem_perm
);
3806 target_ip
->__key
= tswap32(host_ip
->__key
);
3807 target_ip
->uid
= tswap32(host_ip
->uid
);
3808 target_ip
->gid
= tswap32(host_ip
->gid
);
3809 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3810 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3811 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3812 target_ip
->mode
= tswap32(host_ip
->mode
);
3814 target_ip
->mode
= tswap16(host_ip
->mode
);
3816 #if defined(TARGET_PPC)
3817 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3819 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3821 unlock_user_struct(target_sd
, target_addr
, 1);
3825 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3826 abi_ulong target_addr
)
3828 struct target_semid64_ds
*target_sd
;
3830 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3831 return -TARGET_EFAULT
;
3832 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3833 return -TARGET_EFAULT
;
3834 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3835 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3836 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3837 unlock_user_struct(target_sd
, target_addr
, 0);
3841 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3842 struct semid_ds
*host_sd
)
3844 struct target_semid64_ds
*target_sd
;
3846 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3847 return -TARGET_EFAULT
;
3848 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3849 return -TARGET_EFAULT
;
3850 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3851 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3852 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3853 unlock_user_struct(target_sd
, target_addr
, 1);
3857 struct target_seminfo
{
3870 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3871 struct seminfo
*host_seminfo
)
3873 struct target_seminfo
*target_seminfo
;
3874 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3875 return -TARGET_EFAULT
;
3876 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3877 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3878 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3879 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3880 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3881 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3882 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3883 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3884 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3885 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3886 unlock_user_struct(target_seminfo
, target_addr
, 1);
3892 struct semid_ds
*buf
;
3893 unsigned short *array
;
3894 struct seminfo
*__buf
;
3897 union target_semun
{
3904 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3905 abi_ulong target_addr
)
3908 unsigned short *array
;
3910 struct semid_ds semid_ds
;
3913 semun
.buf
= &semid_ds
;
3915 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3917 return get_errno(ret
);
3919 nsems
= semid_ds
.sem_nsems
;
3921 *host_array
= g_try_new(unsigned short, nsems
);
3923 return -TARGET_ENOMEM
;
3925 array
= lock_user(VERIFY_READ
, target_addr
,
3926 nsems
*sizeof(unsigned short), 1);
3928 g_free(*host_array
);
3929 return -TARGET_EFAULT
;
3932 for(i
=0; i
<nsems
; i
++) {
3933 __get_user((*host_array
)[i
], &array
[i
]);
3935 unlock_user(array
, target_addr
, 0);
3940 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3941 unsigned short **host_array
)
3944 unsigned short *array
;
3946 struct semid_ds semid_ds
;
3949 semun
.buf
= &semid_ds
;
3951 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3953 return get_errno(ret
);
3955 nsems
= semid_ds
.sem_nsems
;
3957 array
= lock_user(VERIFY_WRITE
, target_addr
,
3958 nsems
*sizeof(unsigned short), 0);
3960 return -TARGET_EFAULT
;
3962 for(i
=0; i
<nsems
; i
++) {
3963 __put_user((*host_array
)[i
], &array
[i
]);
3965 g_free(*host_array
);
3966 unlock_user(array
, target_addr
, 1);
3971 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3972 abi_ulong target_arg
)
3974 union target_semun target_su
= { .buf
= target_arg
};
3976 struct semid_ds dsarg
;
3977 unsigned short *array
= NULL
;
3978 struct seminfo seminfo
;
3979 abi_long ret
= -TARGET_EINVAL
;
3986 /* In 64 bit cross-endian situations, we will erroneously pick up
3987 * the wrong half of the union for the "val" element. To rectify
3988 * this, the entire 8-byte structure is byteswapped, followed by
3989 * a swap of the 4 byte val field. In other cases, the data is
3990 * already in proper host byte order. */
3991 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3992 target_su
.buf
= tswapal(target_su
.buf
);
3993 arg
.val
= tswap32(target_su
.val
);
3995 arg
.val
= target_su
.val
;
3997 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4001 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4005 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4006 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4013 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4017 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4018 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4024 arg
.__buf
= &seminfo
;
4025 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4026 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4034 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4041 struct target_sembuf
{
4042 unsigned short sem_num
;
4047 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4048 abi_ulong target_addr
,
4051 struct target_sembuf
*target_sembuf
;
4054 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4055 nsops
*sizeof(struct target_sembuf
), 1);
4057 return -TARGET_EFAULT
;
4059 for(i
=0; i
<nsops
; i
++) {
4060 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4061 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4062 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4065 unlock_user(target_sembuf
, target_addr
, 0);
4070 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4071 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4074 * This macro is required to handle the s390 variants, which passes the
4075 * arguments in a different order than default.
4078 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4079 (__nsops), (__timeout), (__sops)
4081 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4082 (__nsops), 0, (__sops), (__timeout)
4085 static inline abi_long
do_semtimedop(int semid
,
4088 abi_long timeout
, bool time64
)
4090 struct sembuf
*sops
;
4091 struct timespec ts
, *pts
= NULL
;
4097 if (target_to_host_timespec64(pts
, timeout
)) {
4098 return -TARGET_EFAULT
;
4101 if (target_to_host_timespec(pts
, timeout
)) {
4102 return -TARGET_EFAULT
;
4107 if (nsops
> TARGET_SEMOPM
) {
4108 return -TARGET_E2BIG
;
4111 sops
= g_new(struct sembuf
, nsops
);
4113 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4115 return -TARGET_EFAULT
;
4118 ret
= -TARGET_ENOSYS
;
4119 #ifdef __NR_semtimedop
4120 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4123 if (ret
== -TARGET_ENOSYS
) {
4124 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4125 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4133 struct target_msqid_ds
4135 struct target_ipc_perm msg_perm
;
4136 abi_ulong msg_stime
;
4137 #if TARGET_ABI_BITS == 32
4138 abi_ulong __unused1
;
4140 abi_ulong msg_rtime
;
4141 #if TARGET_ABI_BITS == 32
4142 abi_ulong __unused2
;
4144 abi_ulong msg_ctime
;
4145 #if TARGET_ABI_BITS == 32
4146 abi_ulong __unused3
;
4148 abi_ulong __msg_cbytes
;
4150 abi_ulong msg_qbytes
;
4151 abi_ulong msg_lspid
;
4152 abi_ulong msg_lrpid
;
4153 abi_ulong __unused4
;
4154 abi_ulong __unused5
;
4157 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4158 abi_ulong target_addr
)
4160 struct target_msqid_ds
*target_md
;
4162 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4163 return -TARGET_EFAULT
;
4164 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4165 return -TARGET_EFAULT
;
4166 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4167 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4168 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4169 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4170 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4171 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4172 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4173 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4174 unlock_user_struct(target_md
, target_addr
, 0);
4178 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4179 struct msqid_ds
*host_md
)
4181 struct target_msqid_ds
*target_md
;
4183 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4184 return -TARGET_EFAULT
;
4185 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4186 return -TARGET_EFAULT
;
4187 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4188 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4189 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4190 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4191 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4192 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4193 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4194 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4195 unlock_user_struct(target_md
, target_addr
, 1);
4199 struct target_msginfo
{
4207 unsigned short int msgseg
;
4210 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4211 struct msginfo
*host_msginfo
)
4213 struct target_msginfo
*target_msginfo
;
4214 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4215 return -TARGET_EFAULT
;
4216 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4217 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4218 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4219 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4220 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4221 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4222 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4223 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4224 unlock_user_struct(target_msginfo
, target_addr
, 1);
4228 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4230 struct msqid_ds dsarg
;
4231 struct msginfo msginfo
;
4232 abi_long ret
= -TARGET_EINVAL
;
4240 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4241 return -TARGET_EFAULT
;
4242 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4243 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4244 return -TARGET_EFAULT
;
4247 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4251 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4252 if (host_to_target_msginfo(ptr
, &msginfo
))
4253 return -TARGET_EFAULT
;
4260 struct target_msgbuf
{
4265 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4266 ssize_t msgsz
, int msgflg
)
4268 struct target_msgbuf
*target_mb
;
4269 struct msgbuf
*host_mb
;
4273 return -TARGET_EINVAL
;
4276 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4277 return -TARGET_EFAULT
;
4278 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4280 unlock_user_struct(target_mb
, msgp
, 0);
4281 return -TARGET_ENOMEM
;
4283 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4284 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4285 ret
= -TARGET_ENOSYS
;
4287 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4290 if (ret
== -TARGET_ENOSYS
) {
4292 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4295 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4301 unlock_user_struct(target_mb
, msgp
, 0);
4307 #if defined(__sparc__)
4308 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4309 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4310 #elif defined(__s390x__)
4311 /* The s390 sys_ipc variant has only five parameters. */
4312 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4313 ((long int[]){(long int)__msgp, __msgtyp})
4315 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4316 ((long int[]){(long int)__msgp, __msgtyp}), 0
4320 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4321 ssize_t msgsz
, abi_long msgtyp
,
4324 struct target_msgbuf
*target_mb
;
4326 struct msgbuf
*host_mb
;
4330 return -TARGET_EINVAL
;
4333 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4334 return -TARGET_EFAULT
;
4336 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4338 ret
= -TARGET_ENOMEM
;
4341 ret
= -TARGET_ENOSYS
;
4343 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4346 if (ret
== -TARGET_ENOSYS
) {
4347 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4348 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4353 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4354 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4355 if (!target_mtext
) {
4356 ret
= -TARGET_EFAULT
;
4359 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4360 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4363 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4367 unlock_user_struct(target_mb
, msgp
, 1);
4372 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4373 abi_ulong target_addr
)
4375 struct target_shmid_ds
*target_sd
;
4377 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4378 return -TARGET_EFAULT
;
4379 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4380 return -TARGET_EFAULT
;
4381 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4382 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4383 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4384 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4385 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4386 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4387 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4388 unlock_user_struct(target_sd
, target_addr
, 0);
4392 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4393 struct shmid_ds
*host_sd
)
4395 struct target_shmid_ds
*target_sd
;
4397 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4398 return -TARGET_EFAULT
;
4399 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4400 return -TARGET_EFAULT
;
4401 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4402 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4403 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4404 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4405 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4406 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4407 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4408 unlock_user_struct(target_sd
, target_addr
, 1);
4412 struct target_shminfo
{
4420 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4421 struct shminfo
*host_shminfo
)
4423 struct target_shminfo
*target_shminfo
;
4424 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4425 return -TARGET_EFAULT
;
4426 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4427 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4428 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4429 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4430 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4431 unlock_user_struct(target_shminfo
, target_addr
, 1);
4435 struct target_shm_info
{
4440 abi_ulong swap_attempts
;
4441 abi_ulong swap_successes
;
4444 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4445 struct shm_info
*host_shm_info
)
4447 struct target_shm_info
*target_shm_info
;
4448 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4449 return -TARGET_EFAULT
;
4450 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4451 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4452 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4453 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4454 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4455 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4456 unlock_user_struct(target_shm_info
, target_addr
, 1);
4460 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4462 struct shmid_ds dsarg
;
4463 struct shminfo shminfo
;
4464 struct shm_info shm_info
;
4465 abi_long ret
= -TARGET_EINVAL
;
4473 if (target_to_host_shmid_ds(&dsarg
, buf
))
4474 return -TARGET_EFAULT
;
4475 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4476 if (host_to_target_shmid_ds(buf
, &dsarg
))
4477 return -TARGET_EFAULT
;
4480 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4481 if (host_to_target_shminfo(buf
, &shminfo
))
4482 return -TARGET_EFAULT
;
4485 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4486 if (host_to_target_shm_info(buf
, &shm_info
))
4487 return -TARGET_EFAULT
;
4492 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4499 #ifndef TARGET_FORCE_SHMLBA
4500 /* For most architectures, SHMLBA is the same as the page size;
4501 * some architectures have larger values, in which case they should
4502 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4503 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4504 * and defining its own value for SHMLBA.
4506 * The kernel also permits SHMLBA to be set by the architecture to a
4507 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4508 * this means that addresses are rounded to the large size if
4509 * SHM_RND is set but addresses not aligned to that size are not rejected
4510 * as long as they are at least page-aligned. Since the only architecture
4511 * which uses this is ia64 this code doesn't provide for that oddity.
4513 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4515 return TARGET_PAGE_SIZE
;
4519 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4520 int shmid
, abi_ulong shmaddr
, int shmflg
)
4522 CPUState
*cpu
= env_cpu(cpu_env
);
4525 struct shmid_ds shm_info
;
4529 /* shmat pointers are always untagged */
4531 /* find out the length of the shared memory segment */
4532 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4533 if (is_error(ret
)) {
4534 /* can't get length, bail out */
4538 shmlba
= target_shmlba(cpu_env
);
4540 if (shmaddr
& (shmlba
- 1)) {
4541 if (shmflg
& SHM_RND
) {
4542 shmaddr
&= ~(shmlba
- 1);
4544 return -TARGET_EINVAL
;
4547 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4548 return -TARGET_EINVAL
;
4554 * We're mapping shared memory, so ensure we generate code for parallel
4555 * execution and flush old translations. This will work up to the level
4556 * supported by the host -- anything that requires EXCP_ATOMIC will not
4557 * be atomic with respect to an external process.
4559 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4560 cpu
->tcg_cflags
|= CF_PARALLEL
;
4565 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4567 abi_ulong mmap_start
;
4569 /* In order to use the host shmat, we need to honor host SHMLBA. */
4570 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4572 if (mmap_start
== -1) {
4574 host_raddr
= (void *)-1;
4576 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4577 shmflg
| SHM_REMAP
);
4580 if (host_raddr
== (void *)-1) {
4582 return get_errno((long)host_raddr
);
4584 raddr
=h2g((unsigned long)host_raddr
);
4586 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4587 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4588 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4590 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4591 if (!shm_regions
[i
].in_use
) {
4592 shm_regions
[i
].in_use
= true;
4593 shm_regions
[i
].start
= raddr
;
4594 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4604 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4609 /* shmdt pointers are always untagged */
4613 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4614 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4615 shm_regions
[i
].in_use
= false;
4616 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4620 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4627 #ifdef TARGET_NR_ipc
4628 /* ??? This only works with linear mappings. */
4629 /* do_ipc() must return target values and target errnos. */
4630 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4631 unsigned int call
, abi_long first
,
4632 abi_long second
, abi_long third
,
4633 abi_long ptr
, abi_long fifth
)
4638 version
= call
>> 16;
4643 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4645 case IPCOP_semtimedop
:
4647 * The s390 sys_ipc variant has only five parameters instead of six
4648 * (as for default variant) and the only difference is the handling of
4649 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4650 * to a struct timespec where the generic variant uses fifth parameter.
4652 #if defined(TARGET_S390X)
4653 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4655 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4660 ret
= get_errno(semget(first
, second
, third
));
4663 case IPCOP_semctl
: {
4664 /* The semun argument to semctl is passed by value, so dereference the
4667 get_user_ual(atptr
, ptr
);
4668 ret
= do_semctl(first
, second
, third
, atptr
);
4673 ret
= get_errno(msgget(first
, second
));
4677 ret
= do_msgsnd(first
, ptr
, second
, third
);
4681 ret
= do_msgctl(first
, second
, ptr
);
4688 struct target_ipc_kludge
{
4693 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4694 ret
= -TARGET_EFAULT
;
4698 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4700 unlock_user_struct(tmp
, ptr
, 0);
4704 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4713 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4714 if (is_error(raddr
))
4715 return get_errno(raddr
);
4716 if (put_user_ual(raddr
, third
))
4717 return -TARGET_EFAULT
;
4721 ret
= -TARGET_EINVAL
;
4726 ret
= do_shmdt(ptr
);
4730 /* IPC_* flag values are the same on all linux platforms */
4731 ret
= get_errno(shmget(first
, second
, third
));
4734 /* IPC_* and SHM_* command values are the same on all linux platforms */
4736 ret
= do_shmctl(first
, second
, ptr
);
4739 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4741 ret
= -TARGET_ENOSYS
;
4748 /* kernel structure types definitions */
4750 #define STRUCT(name, ...) STRUCT_ ## name,
4751 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4753 #include "syscall_types.h"
4757 #undef STRUCT_SPECIAL
4759 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4760 #define STRUCT_SPECIAL(name)
4761 #include "syscall_types.h"
4763 #undef STRUCT_SPECIAL
4765 #define MAX_STRUCT_SIZE 4096
4767 #ifdef CONFIG_FIEMAP
4768 /* So fiemap access checks don't overflow on 32 bit systems.
4769 * This is very slightly smaller than the limit imposed by
4770 * the underlying kernel.
4772 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4773 / sizeof(struct fiemap_extent))
4775 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4776 int fd
, int cmd
, abi_long arg
)
4778 /* The parameter for this ioctl is a struct fiemap followed
4779 * by an array of struct fiemap_extent whose size is set
4780 * in fiemap->fm_extent_count. The array is filled in by the
4783 int target_size_in
, target_size_out
;
4785 const argtype
*arg_type
= ie
->arg_type
;
4786 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4789 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4793 assert(arg_type
[0] == TYPE_PTR
);
4794 assert(ie
->access
== IOC_RW
);
4796 target_size_in
= thunk_type_size(arg_type
, 0);
4797 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4799 return -TARGET_EFAULT
;
4801 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4802 unlock_user(argptr
, arg
, 0);
4803 fm
= (struct fiemap
*)buf_temp
;
4804 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4805 return -TARGET_EINVAL
;
4808 outbufsz
= sizeof (*fm
) +
4809 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4811 if (outbufsz
> MAX_STRUCT_SIZE
) {
4812 /* We can't fit all the extents into the fixed size buffer.
4813 * Allocate one that is large enough and use it instead.
4815 fm
= g_try_malloc(outbufsz
);
4817 return -TARGET_ENOMEM
;
4819 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4822 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4823 if (!is_error(ret
)) {
4824 target_size_out
= target_size_in
;
4825 /* An extent_count of 0 means we were only counting the extents
4826 * so there are no structs to copy
4828 if (fm
->fm_extent_count
!= 0) {
4829 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4831 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4833 ret
= -TARGET_EFAULT
;
4835 /* Convert the struct fiemap */
4836 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4837 if (fm
->fm_extent_count
!= 0) {
4838 p
= argptr
+ target_size_in
;
4839 /* ...and then all the struct fiemap_extents */
4840 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4841 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4846 unlock_user(argptr
, arg
, target_size_out
);
4856 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4857 int fd
, int cmd
, abi_long arg
)
4859 const argtype
*arg_type
= ie
->arg_type
;
4863 struct ifconf
*host_ifconf
;
4865 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4866 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4867 int target_ifreq_size
;
4872 abi_long target_ifc_buf
;
4876 assert(arg_type
[0] == TYPE_PTR
);
4877 assert(ie
->access
== IOC_RW
);
4880 target_size
= thunk_type_size(arg_type
, 0);
4882 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4884 return -TARGET_EFAULT
;
4885 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4886 unlock_user(argptr
, arg
, 0);
4888 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4889 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4890 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4892 if (target_ifc_buf
!= 0) {
4893 target_ifc_len
= host_ifconf
->ifc_len
;
4894 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4895 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4897 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4898 if (outbufsz
> MAX_STRUCT_SIZE
) {
4900 * We can't fit all the extents into the fixed size buffer.
4901 * Allocate one that is large enough and use it instead.
4903 host_ifconf
= g_try_malloc(outbufsz
);
4905 return -TARGET_ENOMEM
;
4907 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4910 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4912 host_ifconf
->ifc_len
= host_ifc_len
;
4914 host_ifc_buf
= NULL
;
4916 host_ifconf
->ifc_buf
= host_ifc_buf
;
4918 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4919 if (!is_error(ret
)) {
4920 /* convert host ifc_len to target ifc_len */
4922 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4923 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4924 host_ifconf
->ifc_len
= target_ifc_len
;
4926 /* restore target ifc_buf */
4928 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4930 /* copy struct ifconf to target user */
4932 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4934 return -TARGET_EFAULT
;
4935 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4936 unlock_user(argptr
, arg
, target_size
);
4938 if (target_ifc_buf
!= 0) {
4939 /* copy ifreq[] to target user */
4940 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4941 for (i
= 0; i
< nb_ifreq
; i
++) {
4942 thunk_convert(argptr
+ i
* target_ifreq_size
,
4943 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4944 ifreq_arg_type
, THUNK_TARGET
);
4946 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4951 g_free(host_ifconf
);
4957 #if defined(CONFIG_USBFS)
4958 #if HOST_LONG_BITS > 64
4959 #error USBDEVFS thunks do not support >64 bit hosts yet.
4962 uint64_t target_urb_adr
;
4963 uint64_t target_buf_adr
;
4964 char *target_buf_ptr
;
4965 struct usbdevfs_urb host_urb
;
4968 static GHashTable
*usbdevfs_urb_hashtable(void)
4970 static GHashTable
*urb_hashtable
;
4972 if (!urb_hashtable
) {
4973 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4975 return urb_hashtable
;
4978 static void urb_hashtable_insert(struct live_urb
*urb
)
4980 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4981 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4984 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4986 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4987 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4990 static void urb_hashtable_remove(struct live_urb
*urb
)
4992 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4993 g_hash_table_remove(urb_hashtable
, urb
);
4997 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4998 int fd
, int cmd
, abi_long arg
)
5000 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
5001 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
5002 struct live_urb
*lurb
;
5006 uintptr_t target_urb_adr
;
5009 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
5011 memset(buf_temp
, 0, sizeof(uint64_t));
5012 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5013 if (is_error(ret
)) {
5017 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5018 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5019 if (!lurb
->target_urb_adr
) {
5020 return -TARGET_EFAULT
;
5022 urb_hashtable_remove(lurb
);
5023 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5024 lurb
->host_urb
.buffer_length
);
5025 lurb
->target_buf_ptr
= NULL
;
5027 /* restore the guest buffer pointer */
5028 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5030 /* update the guest urb struct */
5031 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5034 return -TARGET_EFAULT
;
5036 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5037 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5039 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5040 /* write back the urb handle */
5041 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5044 return -TARGET_EFAULT
;
5047 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5048 target_urb_adr
= lurb
->target_urb_adr
;
5049 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5050 unlock_user(argptr
, arg
, target_size
);
5057 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5058 uint8_t *buf_temp
__attribute__((unused
)),
5059 int fd
, int cmd
, abi_long arg
)
5061 struct live_urb
*lurb
;
5063 /* map target address back to host URB with metadata. */
5064 lurb
= urb_hashtable_lookup(arg
);
5066 return -TARGET_EFAULT
;
5068 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5072 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5073 int fd
, int cmd
, abi_long arg
)
5075 const argtype
*arg_type
= ie
->arg_type
;
5080 struct live_urb
*lurb
;
5083 * each submitted URB needs to map to a unique ID for the
5084 * kernel, and that unique ID needs to be a pointer to
5085 * host memory. hence, we need to malloc for each URB.
5086 * isochronous transfers have a variable length struct.
5089 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5091 /* construct host copy of urb and metadata */
5092 lurb
= g_try_new0(struct live_urb
, 1);
5094 return -TARGET_ENOMEM
;
5097 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5100 return -TARGET_EFAULT
;
5102 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5103 unlock_user(argptr
, arg
, 0);
5105 lurb
->target_urb_adr
= arg
;
5106 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5108 /* buffer space used depends on endpoint type so lock the entire buffer */
5109 /* control type urbs should check the buffer contents for true direction */
5110 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5111 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5112 lurb
->host_urb
.buffer_length
, 1);
5113 if (lurb
->target_buf_ptr
== NULL
) {
5115 return -TARGET_EFAULT
;
5118 /* update buffer pointer in host copy */
5119 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5121 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5122 if (is_error(ret
)) {
5123 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5126 urb_hashtable_insert(lurb
);
5131 #endif /* CONFIG_USBFS */
5133 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5134 int cmd
, abi_long arg
)
5137 struct dm_ioctl
*host_dm
;
5138 abi_long guest_data
;
5139 uint32_t guest_data_size
;
5141 const argtype
*arg_type
= ie
->arg_type
;
5143 void *big_buf
= NULL
;
5147 target_size
= thunk_type_size(arg_type
, 0);
5148 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5150 ret
= -TARGET_EFAULT
;
5153 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5154 unlock_user(argptr
, arg
, 0);
5156 /* buf_temp is too small, so fetch things into a bigger buffer */
5157 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5158 memcpy(big_buf
, buf_temp
, target_size
);
5162 guest_data
= arg
+ host_dm
->data_start
;
5163 if ((guest_data
- arg
) < 0) {
5164 ret
= -TARGET_EINVAL
;
5167 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5168 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5170 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5172 ret
= -TARGET_EFAULT
;
5176 switch (ie
->host_cmd
) {
5178 case DM_LIST_DEVICES
:
5181 case DM_DEV_SUSPEND
:
5184 case DM_TABLE_STATUS
:
5185 case DM_TABLE_CLEAR
:
5187 case DM_LIST_VERSIONS
:
5191 case DM_DEV_SET_GEOMETRY
:
5192 /* data contains only strings */
5193 memcpy(host_data
, argptr
, guest_data_size
);
5196 memcpy(host_data
, argptr
, guest_data_size
);
5197 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5201 void *gspec
= argptr
;
5202 void *cur_data
= host_data
;
5203 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5204 int spec_size
= thunk_type_size(arg_type
, 0);
5207 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5208 struct dm_target_spec
*spec
= cur_data
;
5212 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5213 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5215 spec
->next
= sizeof(*spec
) + slen
;
5216 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5218 cur_data
+= spec
->next
;
5223 ret
= -TARGET_EINVAL
;
5224 unlock_user(argptr
, guest_data
, 0);
5227 unlock_user(argptr
, guest_data
, 0);
5229 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5230 if (!is_error(ret
)) {
5231 guest_data
= arg
+ host_dm
->data_start
;
5232 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5233 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5234 switch (ie
->host_cmd
) {
5239 case DM_DEV_SUSPEND
:
5242 case DM_TABLE_CLEAR
:
5244 case DM_DEV_SET_GEOMETRY
:
5245 /* no return data */
5247 case DM_LIST_DEVICES
:
5249 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5250 uint32_t remaining_data
= guest_data_size
;
5251 void *cur_data
= argptr
;
5252 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5253 int nl_size
= 12; /* can't use thunk_size due to alignment */
5256 uint32_t next
= nl
->next
;
5258 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5260 if (remaining_data
< nl
->next
) {
5261 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5264 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5265 strcpy(cur_data
+ nl_size
, nl
->name
);
5266 cur_data
+= nl
->next
;
5267 remaining_data
-= nl
->next
;
5271 nl
= (void*)nl
+ next
;
5276 case DM_TABLE_STATUS
:
5278 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5279 void *cur_data
= argptr
;
5280 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5281 int spec_size
= thunk_type_size(arg_type
, 0);
5284 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5285 uint32_t next
= spec
->next
;
5286 int slen
= strlen((char*)&spec
[1]) + 1;
5287 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5288 if (guest_data_size
< spec
->next
) {
5289 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5292 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5293 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5294 cur_data
= argptr
+ spec
->next
;
5295 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5301 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5302 int count
= *(uint32_t*)hdata
;
5303 uint64_t *hdev
= hdata
+ 8;
5304 uint64_t *gdev
= argptr
+ 8;
5307 *(uint32_t*)argptr
= tswap32(count
);
5308 for (i
= 0; i
< count
; i
++) {
5309 *gdev
= tswap64(*hdev
);
5315 case DM_LIST_VERSIONS
:
5317 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5318 uint32_t remaining_data
= guest_data_size
;
5319 void *cur_data
= argptr
;
5320 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5321 int vers_size
= thunk_type_size(arg_type
, 0);
5324 uint32_t next
= vers
->next
;
5326 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5328 if (remaining_data
< vers
->next
) {
5329 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5332 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5333 strcpy(cur_data
+ vers_size
, vers
->name
);
5334 cur_data
+= vers
->next
;
5335 remaining_data
-= vers
->next
;
5339 vers
= (void*)vers
+ next
;
5344 unlock_user(argptr
, guest_data
, 0);
5345 ret
= -TARGET_EINVAL
;
5348 unlock_user(argptr
, guest_data
, guest_data_size
);
5350 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5352 ret
= -TARGET_EFAULT
;
5355 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5356 unlock_user(argptr
, arg
, target_size
);
5363 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5364 int cmd
, abi_long arg
)
5368 const argtype
*arg_type
= ie
->arg_type
;
5369 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5372 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5373 struct blkpg_partition host_part
;
5375 /* Read and convert blkpg */
5377 target_size
= thunk_type_size(arg_type
, 0);
5378 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5380 ret
= -TARGET_EFAULT
;
5383 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5384 unlock_user(argptr
, arg
, 0);
5386 switch (host_blkpg
->op
) {
5387 case BLKPG_ADD_PARTITION
:
5388 case BLKPG_DEL_PARTITION
:
5389 /* payload is struct blkpg_partition */
5392 /* Unknown opcode */
5393 ret
= -TARGET_EINVAL
;
5397 /* Read and convert blkpg->data */
5398 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5399 target_size
= thunk_type_size(part_arg_type
, 0);
5400 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5402 ret
= -TARGET_EFAULT
;
5405 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5406 unlock_user(argptr
, arg
, 0);
5408 /* Swizzle the data pointer to our local copy and call! */
5409 host_blkpg
->data
= &host_part
;
5410 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5416 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5417 int fd
, int cmd
, abi_long arg
)
5419 const argtype
*arg_type
= ie
->arg_type
;
5420 const StructEntry
*se
;
5421 const argtype
*field_types
;
5422 const int *dst_offsets
, *src_offsets
;
5425 abi_ulong
*target_rt_dev_ptr
= NULL
;
5426 unsigned long *host_rt_dev_ptr
= NULL
;
5430 assert(ie
->access
== IOC_W
);
5431 assert(*arg_type
== TYPE_PTR
);
5433 assert(*arg_type
== TYPE_STRUCT
);
5434 target_size
= thunk_type_size(arg_type
, 0);
5435 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5437 return -TARGET_EFAULT
;
5440 assert(*arg_type
== (int)STRUCT_rtentry
);
5441 se
= struct_entries
+ *arg_type
++;
5442 assert(se
->convert
[0] == NULL
);
5443 /* convert struct here to be able to catch rt_dev string */
5444 field_types
= se
->field_types
;
5445 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5446 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5447 for (i
= 0; i
< se
->nb_fields
; i
++) {
5448 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5449 assert(*field_types
== TYPE_PTRVOID
);
5450 target_rt_dev_ptr
= argptr
+ src_offsets
[i
];
5451 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5452 if (*target_rt_dev_ptr
!= 0) {
5453 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5454 tswapal(*target_rt_dev_ptr
));
5455 if (!*host_rt_dev_ptr
) {
5456 unlock_user(argptr
, arg
, 0);
5457 return -TARGET_EFAULT
;
5460 *host_rt_dev_ptr
= 0;
5465 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5466 argptr
+ src_offsets
[i
],
5467 field_types
, THUNK_HOST
);
5469 unlock_user(argptr
, arg
, 0);
5471 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5473 assert(host_rt_dev_ptr
!= NULL
);
5474 assert(target_rt_dev_ptr
!= NULL
);
5475 if (*host_rt_dev_ptr
!= 0) {
5476 unlock_user((void *)*host_rt_dev_ptr
,
5477 *target_rt_dev_ptr
, 0);
5482 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5483 int fd
, int cmd
, abi_long arg
)
5485 int sig
= target_to_host_signal(arg
);
5486 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5489 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5490 int fd
, int cmd
, abi_long arg
)
5495 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5496 if (is_error(ret
)) {
5500 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5501 if (copy_to_user_timeval(arg
, &tv
)) {
5502 return -TARGET_EFAULT
;
5505 if (copy_to_user_timeval64(arg
, &tv
)) {
5506 return -TARGET_EFAULT
;
5513 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5514 int fd
, int cmd
, abi_long arg
)
5519 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5520 if (is_error(ret
)) {
5524 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5525 if (host_to_target_timespec(arg
, &ts
)) {
5526 return -TARGET_EFAULT
;
5529 if (host_to_target_timespec64(arg
, &ts
)) {
5530 return -TARGET_EFAULT
;
5538 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5539 int fd
, int cmd
, abi_long arg
)
5541 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5542 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5548 static void unlock_drm_version(struct drm_version
*host_ver
,
5549 struct target_drm_version
*target_ver
,
5552 unlock_user(host_ver
->name
, target_ver
->name
,
5553 copy
? host_ver
->name_len
: 0);
5554 unlock_user(host_ver
->date
, target_ver
->date
,
5555 copy
? host_ver
->date_len
: 0);
5556 unlock_user(host_ver
->desc
, target_ver
->desc
,
5557 copy
? host_ver
->desc_len
: 0);
5560 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5561 struct target_drm_version
*target_ver
)
5563 memset(host_ver
, 0, sizeof(*host_ver
));
5565 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5566 if (host_ver
->name_len
) {
5567 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5568 target_ver
->name_len
, 0);
5569 if (!host_ver
->name
) {
5574 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5575 if (host_ver
->date_len
) {
5576 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5577 target_ver
->date_len
, 0);
5578 if (!host_ver
->date
) {
5583 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5584 if (host_ver
->desc_len
) {
5585 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5586 target_ver
->desc_len
, 0);
5587 if (!host_ver
->desc
) {
5594 unlock_drm_version(host_ver
, target_ver
, false);
5598 static inline void host_to_target_drmversion(
5599 struct target_drm_version
*target_ver
,
5600 struct drm_version
*host_ver
)
5602 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5603 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5604 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5605 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5606 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5607 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5608 unlock_drm_version(host_ver
, target_ver
, true);
5611 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5612 int fd
, int cmd
, abi_long arg
)
5614 struct drm_version
*ver
;
5615 struct target_drm_version
*target_ver
;
5618 switch (ie
->host_cmd
) {
5619 case DRM_IOCTL_VERSION
:
5620 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5621 return -TARGET_EFAULT
;
5623 ver
= (struct drm_version
*)buf_temp
;
5624 ret
= target_to_host_drmversion(ver
, target_ver
);
5625 if (!is_error(ret
)) {
5626 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5627 if (is_error(ret
)) {
5628 unlock_drm_version(ver
, target_ver
, false);
5630 host_to_target_drmversion(target_ver
, ver
);
5633 unlock_user_struct(target_ver
, arg
, 0);
5636 return -TARGET_ENOSYS
;
5639 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5640 struct drm_i915_getparam
*gparam
,
5641 int fd
, abi_long arg
)
5645 struct target_drm_i915_getparam
*target_gparam
;
5647 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5648 return -TARGET_EFAULT
;
5651 __get_user(gparam
->param
, &target_gparam
->param
);
5652 gparam
->value
= &value
;
5653 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5654 put_user_s32(value
, target_gparam
->value
);
5656 unlock_user_struct(target_gparam
, arg
, 0);
5660 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5661 int fd
, int cmd
, abi_long arg
)
5663 switch (ie
->host_cmd
) {
5664 case DRM_IOCTL_I915_GETPARAM
:
5665 return do_ioctl_drm_i915_getparam(ie
,
5666 (struct drm_i915_getparam
*)buf_temp
,
5669 return -TARGET_ENOSYS
;
5675 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5676 int fd
, int cmd
, abi_long arg
)
5678 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5679 struct tun_filter
*target_filter
;
5682 assert(ie
->access
== IOC_W
);
5684 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5685 if (!target_filter
) {
5686 return -TARGET_EFAULT
;
5688 filter
->flags
= tswap16(target_filter
->flags
);
5689 filter
->count
= tswap16(target_filter
->count
);
5690 unlock_user(target_filter
, arg
, 0);
5692 if (filter
->count
) {
5693 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5695 return -TARGET_EFAULT
;
5698 target_addr
= lock_user(VERIFY_READ
,
5699 arg
+ offsetof(struct tun_filter
, addr
),
5700 filter
->count
* ETH_ALEN
, 1);
5702 return -TARGET_EFAULT
;
5704 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5705 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5708 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5711 IOCTLEntry ioctl_entries
[] = {
5712 #define IOCTL(cmd, access, ...) \
5713 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5714 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5715 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5716 #define IOCTL_IGNORE(cmd) \
5717 { TARGET_ ## cmd, 0, #cmd },
5722 /* ??? Implement proper locking for ioctls. */
5723 /* do_ioctl() Must return target values and target errnos. */
5724 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5726 const IOCTLEntry
*ie
;
5727 const argtype
*arg_type
;
5729 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5735 if (ie
->target_cmd
== 0) {
5737 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5738 return -TARGET_ENOSYS
;
5740 if (ie
->target_cmd
== cmd
)
5744 arg_type
= ie
->arg_type
;
5746 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5747 } else if (!ie
->host_cmd
) {
5748 /* Some architectures define BSD ioctls in their headers
5749 that are not implemented in Linux. */
5750 return -TARGET_ENOSYS
;
5753 switch(arg_type
[0]) {
5756 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5762 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5766 target_size
= thunk_type_size(arg_type
, 0);
5767 switch(ie
->access
) {
5769 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5770 if (!is_error(ret
)) {
5771 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5773 return -TARGET_EFAULT
;
5774 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5775 unlock_user(argptr
, arg
, target_size
);
5779 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5781 return -TARGET_EFAULT
;
5782 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5783 unlock_user(argptr
, arg
, 0);
5784 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5788 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5790 return -TARGET_EFAULT
;
5791 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5792 unlock_user(argptr
, arg
, 0);
5793 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5794 if (!is_error(ret
)) {
5795 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5797 return -TARGET_EFAULT
;
5798 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5799 unlock_user(argptr
, arg
, target_size
);
5805 qemu_log_mask(LOG_UNIMP
,
5806 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5807 (long)cmd
, arg_type
[0]);
5808 ret
= -TARGET_ENOSYS
;
5814 static const bitmask_transtbl iflag_tbl
[] = {
5815 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5816 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5817 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5818 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5819 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5820 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5821 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5822 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5823 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5824 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5825 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5826 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5827 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5828 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5829 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5833 static const bitmask_transtbl oflag_tbl
[] = {
5834 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5835 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5836 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5837 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5838 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5839 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5840 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5841 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5842 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5843 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5844 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5845 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5846 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5847 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5848 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5849 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5850 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5851 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5852 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5853 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5854 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5855 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5856 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5857 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5861 static const bitmask_transtbl cflag_tbl
[] = {
5862 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5863 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5864 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5865 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5866 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5867 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5868 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5869 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5870 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5871 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5872 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5873 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5874 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5875 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5876 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5877 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5878 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5879 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5880 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5881 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5882 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5883 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5884 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5885 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5886 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5887 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5888 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5889 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5890 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5891 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5892 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5896 static const bitmask_transtbl lflag_tbl
[] = {
5897 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5898 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5899 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5900 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5901 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5902 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5903 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5904 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5905 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5906 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5907 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5908 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5909 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5910 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5911 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5912 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5916 static void target_to_host_termios (void *dst
, const void *src
)
5918 struct host_termios
*host
= dst
;
5919 const struct target_termios
*target
= src
;
5922 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5924 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5926 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5928 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5929 host
->c_line
= target
->c_line
;
5931 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5932 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5933 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5934 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5935 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5936 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5937 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5938 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5939 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5940 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5941 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5942 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5943 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5944 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5945 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5946 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5947 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5948 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5951 static void host_to_target_termios (void *dst
, const void *src
)
5953 struct target_termios
*target
= dst
;
5954 const struct host_termios
*host
= src
;
5957 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5959 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5961 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5963 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5964 target
->c_line
= host
->c_line
;
5966 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5967 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5968 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5969 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5970 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5971 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5972 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5973 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5974 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5975 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5976 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5977 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5978 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5979 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5980 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5981 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5982 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5983 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5986 static const StructEntry struct_termios_def
= {
5987 .convert
= { host_to_target_termios
, target_to_host_termios
},
5988 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5989 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5990 .print
= print_termios
,
5993 static const bitmask_transtbl mmap_flags_tbl
[] = {
5994 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5995 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5996 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5997 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5998 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5999 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6000 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6001 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6002 MAP_DENYWRITE
, MAP_DENYWRITE
},
6003 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6004 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6005 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6006 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6007 MAP_NORESERVE
, MAP_NORESERVE
},
6008 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6009 /* MAP_STACK had been ignored by the kernel for quite some time.
6010 Recognize it for the target insofar as we do not want to pass
6011 it through to the host. */
6012 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6017 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6018 * TARGET_I386 is defined if TARGET_X86_64 is defined
6020 #if defined(TARGET_I386)
6022 /* NOTE: there is really one LDT for all the threads */
6023 static uint8_t *ldt_table
;
6025 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6032 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6033 if (size
> bytecount
)
6035 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6037 return -TARGET_EFAULT
;
6038 /* ??? Should this by byteswapped? */
6039 memcpy(p
, ldt_table
, size
);
6040 unlock_user(p
, ptr
, size
);
6044 /* XXX: add locking support */
6045 static abi_long
write_ldt(CPUX86State
*env
,
6046 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6048 struct target_modify_ldt_ldt_s ldt_info
;
6049 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6050 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6051 int seg_not_present
, useable
, lm
;
6052 uint32_t *lp
, entry_1
, entry_2
;
6054 if (bytecount
!= sizeof(ldt_info
))
6055 return -TARGET_EINVAL
;
6056 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6057 return -TARGET_EFAULT
;
6058 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6059 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6060 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6061 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6062 unlock_user_struct(target_ldt_info
, ptr
, 0);
6064 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6065 return -TARGET_EINVAL
;
6066 seg_32bit
= ldt_info
.flags
& 1;
6067 contents
= (ldt_info
.flags
>> 1) & 3;
6068 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6069 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6070 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6071 useable
= (ldt_info
.flags
>> 6) & 1;
6075 lm
= (ldt_info
.flags
>> 7) & 1;
6077 if (contents
== 3) {
6079 return -TARGET_EINVAL
;
6080 if (seg_not_present
== 0)
6081 return -TARGET_EINVAL
;
6083 /* allocate the LDT */
6085 env
->ldt
.base
= target_mmap(0,
6086 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6087 PROT_READ
|PROT_WRITE
,
6088 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6089 if (env
->ldt
.base
== -1)
6090 return -TARGET_ENOMEM
;
6091 memset(g2h_untagged(env
->ldt
.base
), 0,
6092 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6093 env
->ldt
.limit
= 0xffff;
6094 ldt_table
= g2h_untagged(env
->ldt
.base
);
6097 /* NOTE: same code as Linux kernel */
6098 /* Allow LDTs to be cleared by the user. */
6099 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6102 read_exec_only
== 1 &&
6104 limit_in_pages
== 0 &&
6105 seg_not_present
== 1 &&
6113 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6114 (ldt_info
.limit
& 0x0ffff);
6115 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6116 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6117 (ldt_info
.limit
& 0xf0000) |
6118 ((read_exec_only
^ 1) << 9) |
6120 ((seg_not_present
^ 1) << 15) |
6122 (limit_in_pages
<< 23) |
6126 entry_2
|= (useable
<< 20);
6128 /* Install the new entry ... */
6130 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6131 lp
[0] = tswap32(entry_1
);
6132 lp
[1] = tswap32(entry_2
);
6136 /* specific and weird i386 syscalls */
6137 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6138 unsigned long bytecount
)
6144 ret
= read_ldt(ptr
, bytecount
);
6147 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6150 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6153 ret
= -TARGET_ENOSYS
;
6159 #if defined(TARGET_ABI32)
6160 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6162 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6163 struct target_modify_ldt_ldt_s ldt_info
;
6164 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6165 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6166 int seg_not_present
, useable
, lm
;
6167 uint32_t *lp
, entry_1
, entry_2
;
6170 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6171 if (!target_ldt_info
)
6172 return -TARGET_EFAULT
;
6173 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6174 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6175 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6176 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6177 if (ldt_info
.entry_number
== -1) {
6178 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6179 if (gdt_table
[i
] == 0) {
6180 ldt_info
.entry_number
= i
;
6181 target_ldt_info
->entry_number
= tswap32(i
);
6186 unlock_user_struct(target_ldt_info
, ptr
, 1);
6188 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6189 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6190 return -TARGET_EINVAL
;
6191 seg_32bit
= ldt_info
.flags
& 1;
6192 contents
= (ldt_info
.flags
>> 1) & 3;
6193 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6194 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6195 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6196 useable
= (ldt_info
.flags
>> 6) & 1;
6200 lm
= (ldt_info
.flags
>> 7) & 1;
6203 if (contents
== 3) {
6204 if (seg_not_present
== 0)
6205 return -TARGET_EINVAL
;
6208 /* NOTE: same code as Linux kernel */
6209 /* Allow LDTs to be cleared by the user. */
6210 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6211 if ((contents
== 0 &&
6212 read_exec_only
== 1 &&
6214 limit_in_pages
== 0 &&
6215 seg_not_present
== 1 &&
6223 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6224 (ldt_info
.limit
& 0x0ffff);
6225 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6226 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6227 (ldt_info
.limit
& 0xf0000) |
6228 ((read_exec_only
^ 1) << 9) |
6230 ((seg_not_present
^ 1) << 15) |
6232 (limit_in_pages
<< 23) |
6237 /* Install the new entry ... */
6239 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6240 lp
[0] = tswap32(entry_1
);
6241 lp
[1] = tswap32(entry_2
);
6245 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6247 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6248 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6249 uint32_t base_addr
, limit
, flags
;
6250 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6251 int seg_not_present
, useable
, lm
;
6252 uint32_t *lp
, entry_1
, entry_2
;
6254 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6255 if (!target_ldt_info
)
6256 return -TARGET_EFAULT
;
6257 idx
= tswap32(target_ldt_info
->entry_number
);
6258 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6259 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6260 unlock_user_struct(target_ldt_info
, ptr
, 1);
6261 return -TARGET_EINVAL
;
6263 lp
= (uint32_t *)(gdt_table
+ idx
);
6264 entry_1
= tswap32(lp
[0]);
6265 entry_2
= tswap32(lp
[1]);
6267 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6268 contents
= (entry_2
>> 10) & 3;
6269 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6270 seg_32bit
= (entry_2
>> 22) & 1;
6271 limit_in_pages
= (entry_2
>> 23) & 1;
6272 useable
= (entry_2
>> 20) & 1;
6276 lm
= (entry_2
>> 21) & 1;
6278 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6279 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6280 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6281 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6282 base_addr
= (entry_1
>> 16) |
6283 (entry_2
& 0xff000000) |
6284 ((entry_2
& 0xff) << 16);
6285 target_ldt_info
->base_addr
= tswapal(base_addr
);
6286 target_ldt_info
->limit
= tswap32(limit
);
6287 target_ldt_info
->flags
= tswap32(flags
);
6288 unlock_user_struct(target_ldt_info
, ptr
, 1);
6292 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6294 return -TARGET_ENOSYS
;
6297 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6304 case TARGET_ARCH_SET_GS
:
6305 case TARGET_ARCH_SET_FS
:
6306 if (code
== TARGET_ARCH_SET_GS
)
6310 cpu_x86_load_seg(env
, idx
, 0);
6311 env
->segs
[idx
].base
= addr
;
6313 case TARGET_ARCH_GET_GS
:
6314 case TARGET_ARCH_GET_FS
:
6315 if (code
== TARGET_ARCH_GET_GS
)
6319 val
= env
->segs
[idx
].base
;
6320 if (put_user(val
, addr
, abi_ulong
))
6321 ret
= -TARGET_EFAULT
;
6324 ret
= -TARGET_EINVAL
;
6329 #endif /* defined(TARGET_ABI32 */
6330 #endif /* defined(TARGET_I386) */
6333 * These constants are generic. Supply any that are missing from the host.
6336 # define PR_SET_NAME 15
6337 # define PR_GET_NAME 16
6339 #ifndef PR_SET_FP_MODE
6340 # define PR_SET_FP_MODE 45
6341 # define PR_GET_FP_MODE 46
6342 # define PR_FP_MODE_FR (1 << 0)
6343 # define PR_FP_MODE_FRE (1 << 1)
6345 #ifndef PR_SVE_SET_VL
6346 # define PR_SVE_SET_VL 50
6347 # define PR_SVE_GET_VL 51
6348 # define PR_SVE_VL_LEN_MASK 0xffff
6349 # define PR_SVE_VL_INHERIT (1 << 17)
6351 #ifndef PR_PAC_RESET_KEYS
6352 # define PR_PAC_RESET_KEYS 54
6353 # define PR_PAC_APIAKEY (1 << 0)
6354 # define PR_PAC_APIBKEY (1 << 1)
6355 # define PR_PAC_APDAKEY (1 << 2)
6356 # define PR_PAC_APDBKEY (1 << 3)
6357 # define PR_PAC_APGAKEY (1 << 4)
6359 #ifndef PR_SET_TAGGED_ADDR_CTRL
6360 # define PR_SET_TAGGED_ADDR_CTRL 55
6361 # define PR_GET_TAGGED_ADDR_CTRL 56
6362 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6364 #ifndef PR_MTE_TCF_SHIFT
6365 # define PR_MTE_TCF_SHIFT 1
6366 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6367 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6368 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6369 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6370 # define PR_MTE_TAG_SHIFT 3
6371 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6373 #ifndef PR_SET_IO_FLUSHER
6374 # define PR_SET_IO_FLUSHER 57
6375 # define PR_GET_IO_FLUSHER 58
6377 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6378 # define PR_SET_SYSCALL_USER_DISPATCH 59
6380 #ifndef PR_SME_SET_VL
6381 # define PR_SME_SET_VL 63
6382 # define PR_SME_GET_VL 64
6383 # define PR_SME_VL_LEN_MASK 0xffff
6384 # define PR_SME_VL_INHERIT (1 << 17)
6387 #include "target_prctl.h"
6389 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6391 return -TARGET_EINVAL
;
6394 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6396 return -TARGET_EINVAL
;
6399 #ifndef do_prctl_get_fp_mode
6400 #define do_prctl_get_fp_mode do_prctl_inval0
6402 #ifndef do_prctl_set_fp_mode
6403 #define do_prctl_set_fp_mode do_prctl_inval1
6405 #ifndef do_prctl_sve_get_vl
6406 #define do_prctl_sve_get_vl do_prctl_inval0
6408 #ifndef do_prctl_sve_set_vl
6409 #define do_prctl_sve_set_vl do_prctl_inval1
6411 #ifndef do_prctl_reset_keys
6412 #define do_prctl_reset_keys do_prctl_inval1
6414 #ifndef do_prctl_set_tagged_addr_ctrl
6415 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6417 #ifndef do_prctl_get_tagged_addr_ctrl
6418 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6420 #ifndef do_prctl_get_unalign
6421 #define do_prctl_get_unalign do_prctl_inval1
6423 #ifndef do_prctl_set_unalign
6424 #define do_prctl_set_unalign do_prctl_inval1
6426 #ifndef do_prctl_sme_get_vl
6427 #define do_prctl_sme_get_vl do_prctl_inval0
6429 #ifndef do_prctl_sme_set_vl
6430 #define do_prctl_sme_set_vl do_prctl_inval1
6433 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6434 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6439 case PR_GET_PDEATHSIG
:
6442 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6444 if (!is_error(ret
) &&
6445 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6446 return -TARGET_EFAULT
;
6450 case PR_SET_PDEATHSIG
:
6451 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6455 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6457 return -TARGET_EFAULT
;
6459 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6461 unlock_user(name
, arg2
, 16);
6466 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6468 return -TARGET_EFAULT
;
6470 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6472 unlock_user(name
, arg2
, 0);
6475 case PR_GET_FP_MODE
:
6476 return do_prctl_get_fp_mode(env
);
6477 case PR_SET_FP_MODE
:
6478 return do_prctl_set_fp_mode(env
, arg2
);
6480 return do_prctl_sve_get_vl(env
);
6482 return do_prctl_sve_set_vl(env
, arg2
);
6484 return do_prctl_sme_get_vl(env
);
6486 return do_prctl_sme_set_vl(env
, arg2
);
6487 case PR_PAC_RESET_KEYS
:
6488 if (arg3
|| arg4
|| arg5
) {
6489 return -TARGET_EINVAL
;
6491 return do_prctl_reset_keys(env
, arg2
);
6492 case PR_SET_TAGGED_ADDR_CTRL
:
6493 if (arg3
|| arg4
|| arg5
) {
6494 return -TARGET_EINVAL
;
6496 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6497 case PR_GET_TAGGED_ADDR_CTRL
:
6498 if (arg2
|| arg3
|| arg4
|| arg5
) {
6499 return -TARGET_EINVAL
;
6501 return do_prctl_get_tagged_addr_ctrl(env
);
6503 case PR_GET_UNALIGN
:
6504 return do_prctl_get_unalign(env
, arg2
);
6505 case PR_SET_UNALIGN
:
6506 return do_prctl_set_unalign(env
, arg2
);
6508 case PR_CAP_AMBIENT
:
6509 case PR_CAPBSET_READ
:
6510 case PR_CAPBSET_DROP
:
6511 case PR_GET_DUMPABLE
:
6512 case PR_SET_DUMPABLE
:
6513 case PR_GET_KEEPCAPS
:
6514 case PR_SET_KEEPCAPS
:
6515 case PR_GET_SECUREBITS
:
6516 case PR_SET_SECUREBITS
:
6519 case PR_GET_TIMERSLACK
:
6520 case PR_SET_TIMERSLACK
:
6522 case PR_MCE_KILL_GET
:
6523 case PR_GET_NO_NEW_PRIVS
:
6524 case PR_SET_NO_NEW_PRIVS
:
6525 case PR_GET_IO_FLUSHER
:
6526 case PR_SET_IO_FLUSHER
:
6527 /* Some prctl options have no pointer arguments and we can pass on. */
6528 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6530 case PR_GET_CHILD_SUBREAPER
:
6531 case PR_SET_CHILD_SUBREAPER
:
6532 case PR_GET_SPECULATION_CTRL
:
6533 case PR_SET_SPECULATION_CTRL
:
6534 case PR_GET_TID_ADDRESS
:
6536 return -TARGET_EINVAL
;
6540 /* Was used for SPE on PowerPC. */
6541 return -TARGET_EINVAL
;
6548 case PR_GET_SECCOMP
:
6549 case PR_SET_SECCOMP
:
6550 case PR_SET_SYSCALL_USER_DISPATCH
:
6551 case PR_GET_THP_DISABLE
:
6552 case PR_SET_THP_DISABLE
:
6555 /* Disable to prevent the target disabling stuff we need. */
6556 return -TARGET_EINVAL
;
6559 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6561 return -TARGET_EINVAL
;
6565 #define NEW_STACK_SIZE 0x40000
6568 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6571 pthread_mutex_t mutex
;
6572 pthread_cond_t cond
;
6575 abi_ulong child_tidptr
;
6576 abi_ulong parent_tidptr
;
6580 static void *clone_func(void *arg
)
6582 new_thread_info
*info
= arg
;
6587 rcu_register_thread();
6588 tcg_register_thread();
6592 ts
= (TaskState
*)cpu
->opaque
;
6593 info
->tid
= sys_gettid();
6595 if (info
->child_tidptr
)
6596 put_user_u32(info
->tid
, info
->child_tidptr
);
6597 if (info
->parent_tidptr
)
6598 put_user_u32(info
->tid
, info
->parent_tidptr
);
6599 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6600 /* Enable signals. */
6601 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6602 /* Signal to the parent that we're ready. */
6603 pthread_mutex_lock(&info
->mutex
);
6604 pthread_cond_broadcast(&info
->cond
);
6605 pthread_mutex_unlock(&info
->mutex
);
6606 /* Wait until the parent has finished initializing the tls state. */
6607 pthread_mutex_lock(&clone_lock
);
6608 pthread_mutex_unlock(&clone_lock
);
6614 /* do_fork() Must return host values and target errnos (unlike most
6615 do_*() functions). */
6616 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6617 abi_ulong parent_tidptr
, target_ulong newtls
,
6618 abi_ulong child_tidptr
)
6620 CPUState
*cpu
= env_cpu(env
);
6624 CPUArchState
*new_env
;
6627 flags
&= ~CLONE_IGNORED_FLAGS
;
6629 /* Emulate vfork() with fork() */
6630 if (flags
& CLONE_VFORK
)
6631 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6633 if (flags
& CLONE_VM
) {
6634 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6635 new_thread_info info
;
6636 pthread_attr_t attr
;
6638 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6639 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6640 return -TARGET_EINVAL
;
6643 ts
= g_new0(TaskState
, 1);
6644 init_task_state(ts
);
6646 /* Grab a mutex so that thread setup appears atomic. */
6647 pthread_mutex_lock(&clone_lock
);
6650 * If this is our first additional thread, we need to ensure we
6651 * generate code for parallel execution and flush old translations.
6652 * Do this now so that the copy gets CF_PARALLEL too.
6654 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6655 cpu
->tcg_cflags
|= CF_PARALLEL
;
6659 /* we create a new CPU instance. */
6660 new_env
= cpu_copy(env
);
6661 /* Init regs that differ from the parent. */
6662 cpu_clone_regs_child(new_env
, newsp
, flags
);
6663 cpu_clone_regs_parent(env
, flags
);
6664 new_cpu
= env_cpu(new_env
);
6665 new_cpu
->opaque
= ts
;
6666 ts
->bprm
= parent_ts
->bprm
;
6667 ts
->info
= parent_ts
->info
;
6668 ts
->signal_mask
= parent_ts
->signal_mask
;
6670 if (flags
& CLONE_CHILD_CLEARTID
) {
6671 ts
->child_tidptr
= child_tidptr
;
6674 if (flags
& CLONE_SETTLS
) {
6675 cpu_set_tls (new_env
, newtls
);
6678 memset(&info
, 0, sizeof(info
));
6679 pthread_mutex_init(&info
.mutex
, NULL
);
6680 pthread_mutex_lock(&info
.mutex
);
6681 pthread_cond_init(&info
.cond
, NULL
);
6683 if (flags
& CLONE_CHILD_SETTID
) {
6684 info
.child_tidptr
= child_tidptr
;
6686 if (flags
& CLONE_PARENT_SETTID
) {
6687 info
.parent_tidptr
= parent_tidptr
;
6690 ret
= pthread_attr_init(&attr
);
6691 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6692 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6693 /* It is not safe to deliver signals until the child has finished
6694 initializing, so temporarily block all signals. */
6695 sigfillset(&sigmask
);
6696 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6697 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6699 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6700 /* TODO: Free new CPU state if thread creation failed. */
6702 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6703 pthread_attr_destroy(&attr
);
6705 /* Wait for the child to initialize. */
6706 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6711 pthread_mutex_unlock(&info
.mutex
);
6712 pthread_cond_destroy(&info
.cond
);
6713 pthread_mutex_destroy(&info
.mutex
);
6714 pthread_mutex_unlock(&clone_lock
);
6716 /* if no CLONE_VM, we consider it is a fork */
6717 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6718 return -TARGET_EINVAL
;
6721 /* We can't support custom termination signals */
6722 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6723 return -TARGET_EINVAL
;
6726 if (block_signals()) {
6727 return -QEMU_ERESTARTSYS
;
6733 /* Child Process. */
6734 cpu_clone_regs_child(env
, newsp
, flags
);
6736 /* There is a race condition here. The parent process could
6737 theoretically read the TID in the child process before the child
6738 tid is set. This would require using either ptrace
6739 (not implemented) or having *_tidptr to point at a shared memory
6740 mapping. We can't repeat the spinlock hack used above because
6741 the child process gets its own copy of the lock. */
6742 if (flags
& CLONE_CHILD_SETTID
)
6743 put_user_u32(sys_gettid(), child_tidptr
);
6744 if (flags
& CLONE_PARENT_SETTID
)
6745 put_user_u32(sys_gettid(), parent_tidptr
);
6746 ts
= (TaskState
*)cpu
->opaque
;
6747 if (flags
& CLONE_SETTLS
)
6748 cpu_set_tls (env
, newtls
);
6749 if (flags
& CLONE_CHILD_CLEARTID
)
6750 ts
->child_tidptr
= child_tidptr
;
6752 cpu_clone_regs_parent(env
, flags
);
6759 /* warning : doesn't handle linux specific flags... */
6760 static int target_to_host_fcntl_cmd(int cmd
)
6765 case TARGET_F_DUPFD
:
6766 case TARGET_F_GETFD
:
6767 case TARGET_F_SETFD
:
6768 case TARGET_F_GETFL
:
6769 case TARGET_F_SETFL
:
6770 case TARGET_F_OFD_GETLK
:
6771 case TARGET_F_OFD_SETLK
:
6772 case TARGET_F_OFD_SETLKW
:
6775 case TARGET_F_GETLK
:
6778 case TARGET_F_SETLK
:
6781 case TARGET_F_SETLKW
:
6784 case TARGET_F_GETOWN
:
6787 case TARGET_F_SETOWN
:
6790 case TARGET_F_GETSIG
:
6793 case TARGET_F_SETSIG
:
6796 #if TARGET_ABI_BITS == 32
6797 case TARGET_F_GETLK64
:
6800 case TARGET_F_SETLK64
:
6803 case TARGET_F_SETLKW64
:
6807 case TARGET_F_SETLEASE
:
6810 case TARGET_F_GETLEASE
:
6813 #ifdef F_DUPFD_CLOEXEC
6814 case TARGET_F_DUPFD_CLOEXEC
:
6815 ret
= F_DUPFD_CLOEXEC
;
6818 case TARGET_F_NOTIFY
:
6822 case TARGET_F_GETOWN_EX
:
6827 case TARGET_F_SETOWN_EX
:
6832 case TARGET_F_SETPIPE_SZ
:
6835 case TARGET_F_GETPIPE_SZ
:
6840 case TARGET_F_ADD_SEALS
:
6843 case TARGET_F_GET_SEALS
:
6848 ret
= -TARGET_EINVAL
;
6852 #if defined(__powerpc64__)
6853 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6854 * is not supported by kernel. The glibc fcntl call actually adjusts
6855 * them to 5, 6 and 7 before making the syscall(). Since we make the
6856 * syscall directly, adjust to what is supported by the kernel.
6858 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6859 ret
-= F_GETLK64
- 5;
6866 #define FLOCK_TRANSTBL \
6868 TRANSTBL_CONVERT(F_RDLCK); \
6869 TRANSTBL_CONVERT(F_WRLCK); \
6870 TRANSTBL_CONVERT(F_UNLCK); \
6873 static int target_to_host_flock(int type
)
6875 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6877 #undef TRANSTBL_CONVERT
6878 return -TARGET_EINVAL
;
6881 static int host_to_target_flock(int type
)
6883 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6885 #undef TRANSTBL_CONVERT
6886 /* if we don't know how to convert the value coming
6887 * from the host we copy to the target field as-is
6892 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6893 abi_ulong target_flock_addr
)
6895 struct target_flock
*target_fl
;
6898 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6899 return -TARGET_EFAULT
;
6902 __get_user(l_type
, &target_fl
->l_type
);
6903 l_type
= target_to_host_flock(l_type
);
6907 fl
->l_type
= l_type
;
6908 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6909 __get_user(fl
->l_start
, &target_fl
->l_start
);
6910 __get_user(fl
->l_len
, &target_fl
->l_len
);
6911 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6912 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6916 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6917 const struct flock64
*fl
)
6919 struct target_flock
*target_fl
;
6922 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6923 return -TARGET_EFAULT
;
6926 l_type
= host_to_target_flock(fl
->l_type
);
6927 __put_user(l_type
, &target_fl
->l_type
);
6928 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6929 __put_user(fl
->l_start
, &target_fl
->l_start
);
6930 __put_user(fl
->l_len
, &target_fl
->l_len
);
6931 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6932 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6936 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6937 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6939 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6940 struct target_oabi_flock64
{
6948 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6949 abi_ulong target_flock_addr
)
6951 struct target_oabi_flock64
*target_fl
;
6954 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6955 return -TARGET_EFAULT
;
6958 __get_user(l_type
, &target_fl
->l_type
);
6959 l_type
= target_to_host_flock(l_type
);
6963 fl
->l_type
= l_type
;
6964 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6965 __get_user(fl
->l_start
, &target_fl
->l_start
);
6966 __get_user(fl
->l_len
, &target_fl
->l_len
);
6967 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6968 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6972 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6973 const struct flock64
*fl
)
6975 struct target_oabi_flock64
*target_fl
;
6978 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6979 return -TARGET_EFAULT
;
6982 l_type
= host_to_target_flock(fl
->l_type
);
6983 __put_user(l_type
, &target_fl
->l_type
);
6984 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6985 __put_user(fl
->l_start
, &target_fl
->l_start
);
6986 __put_user(fl
->l_len
, &target_fl
->l_len
);
6987 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6988 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6993 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6994 abi_ulong target_flock_addr
)
6996 struct target_flock64
*target_fl
;
6999 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
7000 return -TARGET_EFAULT
;
7003 __get_user(l_type
, &target_fl
->l_type
);
7004 l_type
= target_to_host_flock(l_type
);
7008 fl
->l_type
= l_type
;
7009 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
7010 __get_user(fl
->l_start
, &target_fl
->l_start
);
7011 __get_user(fl
->l_len
, &target_fl
->l_len
);
7012 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
7013 unlock_user_struct(target_fl
, target_flock_addr
, 0);
7017 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
7018 const struct flock64
*fl
)
7020 struct target_flock64
*target_fl
;
7023 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7024 return -TARGET_EFAULT
;
7027 l_type
= host_to_target_flock(fl
->l_type
);
7028 __put_user(l_type
, &target_fl
->l_type
);
7029 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7030 __put_user(fl
->l_start
, &target_fl
->l_start
);
7031 __put_user(fl
->l_len
, &target_fl
->l_len
);
7032 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7033 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7037 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7039 struct flock64 fl64
;
7041 struct f_owner_ex fox
;
7042 struct target_f_owner_ex
*target_fox
;
7045 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7047 if (host_cmd
== -TARGET_EINVAL
)
7051 case TARGET_F_GETLK
:
7052 ret
= copy_from_user_flock(&fl64
, arg
);
7056 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7058 ret
= copy_to_user_flock(arg
, &fl64
);
7062 case TARGET_F_SETLK
:
7063 case TARGET_F_SETLKW
:
7064 ret
= copy_from_user_flock(&fl64
, arg
);
7068 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7071 case TARGET_F_GETLK64
:
7072 case TARGET_F_OFD_GETLK
:
7073 ret
= copy_from_user_flock64(&fl64
, arg
);
7077 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7079 ret
= copy_to_user_flock64(arg
, &fl64
);
7082 case TARGET_F_SETLK64
:
7083 case TARGET_F_SETLKW64
:
7084 case TARGET_F_OFD_SETLK
:
7085 case TARGET_F_OFD_SETLKW
:
7086 ret
= copy_from_user_flock64(&fl64
, arg
);
7090 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7093 case TARGET_F_GETFL
:
7094 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7096 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7100 case TARGET_F_SETFL
:
7101 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7102 target_to_host_bitmask(arg
,
7107 case TARGET_F_GETOWN_EX
:
7108 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7110 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7111 return -TARGET_EFAULT
;
7112 target_fox
->type
= tswap32(fox
.type
);
7113 target_fox
->pid
= tswap32(fox
.pid
);
7114 unlock_user_struct(target_fox
, arg
, 1);
7120 case TARGET_F_SETOWN_EX
:
7121 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7122 return -TARGET_EFAULT
;
7123 fox
.type
= tswap32(target_fox
->type
);
7124 fox
.pid
= tswap32(target_fox
->pid
);
7125 unlock_user_struct(target_fox
, arg
, 0);
7126 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7130 case TARGET_F_SETSIG
:
7131 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7134 case TARGET_F_GETSIG
:
7135 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7138 case TARGET_F_SETOWN
:
7139 case TARGET_F_GETOWN
:
7140 case TARGET_F_SETLEASE
:
7141 case TARGET_F_GETLEASE
:
7142 case TARGET_F_SETPIPE_SZ
:
7143 case TARGET_F_GETPIPE_SZ
:
7144 case TARGET_F_ADD_SEALS
:
7145 case TARGET_F_GET_SEALS
:
7146 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7150 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7158 static inline int high2lowuid(int uid
)
7166 static inline int high2lowgid(int gid
)
7174 static inline int low2highuid(int uid
)
7176 if ((int16_t)uid
== -1)
7182 static inline int low2highgid(int gid
)
7184 if ((int16_t)gid
== -1)
7189 static inline int tswapid(int id
)
7194 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7196 #else /* !USE_UID16 */
7197 static inline int high2lowuid(int uid
)
7201 static inline int high2lowgid(int gid
)
7205 static inline int low2highuid(int uid
)
7209 static inline int low2highgid(int gid
)
7213 static inline int tswapid(int id
)
7218 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7220 #endif /* USE_UID16 */
7222 /* We must do direct syscalls for setting UID/GID, because we want to
7223 * implement the Linux system call semantics of "change only for this thread",
7224 * not the libc/POSIX semantics of "change for all threads in process".
7225 * (See http://ewontfix.com/17/ for more details.)
7226 * We use the 32-bit version of the syscalls if present; if it is not
7227 * then either the host architecture supports 32-bit UIDs natively with
7228 * the standard syscall, or the 16-bit UID is the best we can do.
7230 #ifdef __NR_setuid32
7231 #define __NR_sys_setuid __NR_setuid32
7233 #define __NR_sys_setuid __NR_setuid
7235 #ifdef __NR_setgid32
7236 #define __NR_sys_setgid __NR_setgid32
7238 #define __NR_sys_setgid __NR_setgid
7240 #ifdef __NR_setresuid32
7241 #define __NR_sys_setresuid __NR_setresuid32
7243 #define __NR_sys_setresuid __NR_setresuid
7245 #ifdef __NR_setresgid32
7246 #define __NR_sys_setresgid __NR_setresgid32
7248 #define __NR_sys_setresgid __NR_setresgid
7251 _syscall1(int, sys_setuid
, uid_t
, uid
)
7252 _syscall1(int, sys_setgid
, gid_t
, gid
)
7253 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7254 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7256 void syscall_init(void)
7259 const argtype
*arg_type
;
7262 thunk_init(STRUCT_MAX
);
7264 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7265 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7266 #include "syscall_types.h"
7268 #undef STRUCT_SPECIAL
7270 /* we patch the ioctl size if necessary. We rely on the fact that
7271 no ioctl has all the bits at '1' in the size field */
7273 while (ie
->target_cmd
!= 0) {
7274 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7275 TARGET_IOC_SIZEMASK
) {
7276 arg_type
= ie
->arg_type
;
7277 if (arg_type
[0] != TYPE_PTR
) {
7278 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7283 size
= thunk_type_size(arg_type
, 0);
7284 ie
->target_cmd
= (ie
->target_cmd
&
7285 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7286 (size
<< TARGET_IOC_SIZESHIFT
);
7289 /* automatic consistency check if same arch */
7290 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7291 (defined(__x86_64__) && defined(TARGET_X86_64))
7292 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7293 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7294 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7301 #ifdef TARGET_NR_truncate64
7302 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7307 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7311 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7315 #ifdef TARGET_NR_ftruncate64
7316 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7321 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7325 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7329 #if defined(TARGET_NR_timer_settime) || \
7330 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7331 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7332 abi_ulong target_addr
)
7334 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7335 offsetof(struct target_itimerspec
,
7337 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7338 offsetof(struct target_itimerspec
,
7340 return -TARGET_EFAULT
;
7347 #if defined(TARGET_NR_timer_settime64) || \
7348 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7349 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7350 abi_ulong target_addr
)
7352 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7353 offsetof(struct target__kernel_itimerspec
,
7355 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7356 offsetof(struct target__kernel_itimerspec
,
7358 return -TARGET_EFAULT
;
7365 #if ((defined(TARGET_NR_timerfd_gettime) || \
7366 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7367 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7368 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7369 struct itimerspec
*host_its
)
7371 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7373 &host_its
->it_interval
) ||
7374 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7376 &host_its
->it_value
)) {
7377 return -TARGET_EFAULT
;
7383 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7384 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7385 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7386 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7387 struct itimerspec
*host_its
)
7389 if (host_to_target_timespec64(target_addr
+
7390 offsetof(struct target__kernel_itimerspec
,
7392 &host_its
->it_interval
) ||
7393 host_to_target_timespec64(target_addr
+
7394 offsetof(struct target__kernel_itimerspec
,
7396 &host_its
->it_value
)) {
7397 return -TARGET_EFAULT
;
7403 #if defined(TARGET_NR_adjtimex) || \
7404 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7405 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7406 abi_long target_addr
)
7408 struct target_timex
*target_tx
;
7410 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7411 return -TARGET_EFAULT
;
7414 __get_user(host_tx
->modes
, &target_tx
->modes
);
7415 __get_user(host_tx
->offset
, &target_tx
->offset
);
7416 __get_user(host_tx
->freq
, &target_tx
->freq
);
7417 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7418 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7419 __get_user(host_tx
->status
, &target_tx
->status
);
7420 __get_user(host_tx
->constant
, &target_tx
->constant
);
7421 __get_user(host_tx
->precision
, &target_tx
->precision
);
7422 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7423 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7424 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7425 __get_user(host_tx
->tick
, &target_tx
->tick
);
7426 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7427 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7428 __get_user(host_tx
->shift
, &target_tx
->shift
);
7429 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7430 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7431 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7432 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7433 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7434 __get_user(host_tx
->tai
, &target_tx
->tai
);
7436 unlock_user_struct(target_tx
, target_addr
, 0);
7440 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7441 struct timex
*host_tx
)
7443 struct target_timex
*target_tx
;
7445 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7446 return -TARGET_EFAULT
;
7449 __put_user(host_tx
->modes
, &target_tx
->modes
);
7450 __put_user(host_tx
->offset
, &target_tx
->offset
);
7451 __put_user(host_tx
->freq
, &target_tx
->freq
);
7452 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7453 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7454 __put_user(host_tx
->status
, &target_tx
->status
);
7455 __put_user(host_tx
->constant
, &target_tx
->constant
);
7456 __put_user(host_tx
->precision
, &target_tx
->precision
);
7457 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7458 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7459 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7460 __put_user(host_tx
->tick
, &target_tx
->tick
);
7461 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7462 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7463 __put_user(host_tx
->shift
, &target_tx
->shift
);
7464 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7465 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7466 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7467 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7468 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7469 __put_user(host_tx
->tai
, &target_tx
->tai
);
7471 unlock_user_struct(target_tx
, target_addr
, 1);
7477 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7478 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7479 abi_long target_addr
)
7481 struct target__kernel_timex
*target_tx
;
7483 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7484 offsetof(struct target__kernel_timex
,
7486 return -TARGET_EFAULT
;
7489 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7490 return -TARGET_EFAULT
;
7493 __get_user(host_tx
->modes
, &target_tx
->modes
);
7494 __get_user(host_tx
->offset
, &target_tx
->offset
);
7495 __get_user(host_tx
->freq
, &target_tx
->freq
);
7496 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7497 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7498 __get_user(host_tx
->status
, &target_tx
->status
);
7499 __get_user(host_tx
->constant
, &target_tx
->constant
);
7500 __get_user(host_tx
->precision
, &target_tx
->precision
);
7501 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7502 __get_user(host_tx
->tick
, &target_tx
->tick
);
7503 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7504 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7505 __get_user(host_tx
->shift
, &target_tx
->shift
);
7506 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7507 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7508 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7509 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7510 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7511 __get_user(host_tx
->tai
, &target_tx
->tai
);
7513 unlock_user_struct(target_tx
, target_addr
, 0);
7517 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7518 struct timex
*host_tx
)
7520 struct target__kernel_timex
*target_tx
;
7522 if (copy_to_user_timeval64(target_addr
+
7523 offsetof(struct target__kernel_timex
, time
),
7525 return -TARGET_EFAULT
;
7528 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7529 return -TARGET_EFAULT
;
7532 __put_user(host_tx
->modes
, &target_tx
->modes
);
7533 __put_user(host_tx
->offset
, &target_tx
->offset
);
7534 __put_user(host_tx
->freq
, &target_tx
->freq
);
7535 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7536 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7537 __put_user(host_tx
->status
, &target_tx
->status
);
7538 __put_user(host_tx
->constant
, &target_tx
->constant
);
7539 __put_user(host_tx
->precision
, &target_tx
->precision
);
7540 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7541 __put_user(host_tx
->tick
, &target_tx
->tick
);
7542 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7543 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7544 __put_user(host_tx
->shift
, &target_tx
->shift
);
7545 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7546 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7547 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7548 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7549 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7550 __put_user(host_tx
->tai
, &target_tx
->tai
);
7552 unlock_user_struct(target_tx
, target_addr
, 1);
7557 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7558 #define sigev_notify_thread_id _sigev_un._tid
7561 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7562 abi_ulong target_addr
)
7564 struct target_sigevent
*target_sevp
;
7566 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7567 return -TARGET_EFAULT
;
7570 /* This union is awkward on 64 bit systems because it has a 32 bit
7571 * integer and a pointer in it; we follow the conversion approach
7572 * used for handling sigval types in signal.c so the guest should get
7573 * the correct value back even if we did a 64 bit byteswap and it's
7574 * using the 32 bit integer.
7576 host_sevp
->sigev_value
.sival_ptr
=
7577 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7578 host_sevp
->sigev_signo
=
7579 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7580 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7581 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7583 unlock_user_struct(target_sevp
, target_addr
, 1);
7587 #if defined(TARGET_NR_mlockall)
7588 static inline int target_to_host_mlockall_arg(int arg
)
7592 if (arg
& TARGET_MCL_CURRENT
) {
7593 result
|= MCL_CURRENT
;
7595 if (arg
& TARGET_MCL_FUTURE
) {
7596 result
|= MCL_FUTURE
;
7599 if (arg
& TARGET_MCL_ONFAULT
) {
7600 result
|= MCL_ONFAULT
;
7608 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7609 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7610 defined(TARGET_NR_newfstatat))
7611 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7612 abi_ulong target_addr
,
7613 struct stat
*host_st
)
7615 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7616 if (cpu_env
->eabi
) {
7617 struct target_eabi_stat64
*target_st
;
7619 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7620 return -TARGET_EFAULT
;
7621 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7622 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7623 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7624 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7625 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7627 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7628 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7629 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7630 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7631 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7632 __put_user(host_st
->st_size
, &target_st
->st_size
);
7633 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7634 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7635 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7636 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7637 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7638 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7639 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7640 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7641 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7643 unlock_user_struct(target_st
, target_addr
, 1);
7647 #if defined(TARGET_HAS_STRUCT_STAT64)
7648 struct target_stat64
*target_st
;
7650 struct target_stat
*target_st
;
7653 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7654 return -TARGET_EFAULT
;
7655 memset(target_st
, 0, sizeof(*target_st
));
7656 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7657 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7658 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7659 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7661 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7662 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7663 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7664 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7665 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7666 /* XXX: better use of kernel struct */
7667 __put_user(host_st
->st_size
, &target_st
->st_size
);
7668 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7669 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7670 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7671 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7672 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7673 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7674 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7675 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7676 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7678 unlock_user_struct(target_st
, target_addr
, 1);
7685 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7686 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7687 abi_ulong target_addr
)
7689 struct target_statx
*target_stx
;
7691 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7692 return -TARGET_EFAULT
;
7694 memset(target_stx
, 0, sizeof(*target_stx
));
7696 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7697 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7698 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7699 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7700 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7701 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7702 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7703 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7704 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7705 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7706 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7707 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7708 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7709 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7710 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7711 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7712 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7713 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7714 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7715 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7716 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7717 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7718 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7720 unlock_user_struct(target_stx
, target_addr
, 1);
7726 static int do_sys_futex(int *uaddr
, int op
, int val
,
7727 const struct timespec
*timeout
, int *uaddr2
,
7730 #if HOST_LONG_BITS == 64
7731 #if defined(__NR_futex)
7732 /* always a 64-bit time_t, it doesn't define _time64 version */
7733 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7736 #else /* HOST_LONG_BITS == 64 */
7737 #if defined(__NR_futex_time64)
7738 if (sizeof(timeout
->tv_sec
) == 8) {
7739 /* _time64 function on 32bit arch */
7740 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7743 #if defined(__NR_futex)
7744 /* old function on 32bit arch */
7745 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7747 #endif /* HOST_LONG_BITS == 64 */
7748 g_assert_not_reached();
7751 static int do_safe_futex(int *uaddr
, int op
, int val
,
7752 const struct timespec
*timeout
, int *uaddr2
,
7755 #if HOST_LONG_BITS == 64
7756 #if defined(__NR_futex)
7757 /* always a 64-bit time_t, it doesn't define _time64 version */
7758 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7760 #else /* HOST_LONG_BITS == 64 */
7761 #if defined(__NR_futex_time64)
7762 if (sizeof(timeout
->tv_sec
) == 8) {
7763 /* _time64 function on 32bit arch */
7764 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7768 #if defined(__NR_futex)
7769 /* old function on 32bit arch */
7770 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7772 #endif /* HOST_LONG_BITS == 64 */
7773 return -TARGET_ENOSYS
;
7776 /* ??? Using host futex calls even when target atomic operations
7777 are not really atomic probably breaks things. However implementing
7778 futexes locally would make futexes shared between multiple processes
7779 tricky. However they're probably useless because guest atomic
7780 operations won't work either. */
7781 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7782 static int do_futex(CPUState
*cpu
, bool time64
, target_ulong uaddr
,
7783 int op
, int val
, target_ulong timeout
,
7784 target_ulong uaddr2
, int val3
)
7786 struct timespec ts
, *pts
= NULL
;
7787 void *haddr2
= NULL
;
7790 /* We assume FUTEX_* constants are the same on both host and target. */
7791 #ifdef FUTEX_CMD_MASK
7792 base_op
= op
& FUTEX_CMD_MASK
;
7798 case FUTEX_WAIT_BITSET
:
7801 case FUTEX_WAIT_REQUEUE_PI
:
7803 haddr2
= g2h(cpu
, uaddr2
);
7806 case FUTEX_LOCK_PI2
:
7809 case FUTEX_WAKE_BITSET
:
7810 case FUTEX_TRYLOCK_PI
:
7811 case FUTEX_UNLOCK_PI
:
7815 val
= target_to_host_signal(val
);
7818 case FUTEX_CMP_REQUEUE
:
7819 case FUTEX_CMP_REQUEUE_PI
:
7820 val3
= tswap32(val3
);
7825 * For these, the 4th argument is not TIMEOUT, but VAL2.
7826 * But the prototype of do_safe_futex takes a pointer, so
7827 * insert casts to satisfy the compiler. We do not need
7828 * to tswap VAL2 since it's not compared to guest memory.
7830 pts
= (struct timespec
*)(uintptr_t)timeout
;
7832 haddr2
= g2h(cpu
, uaddr2
);
7835 return -TARGET_ENOSYS
;
7840 ? target_to_host_timespec64(pts
, timeout
)
7841 : target_to_host_timespec(pts
, timeout
)) {
7842 return -TARGET_EFAULT
;
7845 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, haddr2
, val3
);
7849 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7850 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7851 abi_long handle
, abi_long mount_id
,
7854 struct file_handle
*target_fh
;
7855 struct file_handle
*fh
;
7859 unsigned int size
, total_size
;
7861 if (get_user_s32(size
, handle
)) {
7862 return -TARGET_EFAULT
;
7865 name
= lock_user_string(pathname
);
7867 return -TARGET_EFAULT
;
7870 total_size
= sizeof(struct file_handle
) + size
;
7871 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7873 unlock_user(name
, pathname
, 0);
7874 return -TARGET_EFAULT
;
7877 fh
= g_malloc0(total_size
);
7878 fh
->handle_bytes
= size
;
7880 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7881 unlock_user(name
, pathname
, 0);
7883 /* man name_to_handle_at(2):
7884 * Other than the use of the handle_bytes field, the caller should treat
7885 * the file_handle structure as an opaque data type
7888 memcpy(target_fh
, fh
, total_size
);
7889 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7890 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7892 unlock_user(target_fh
, handle
, total_size
);
7894 if (put_user_s32(mid
, mount_id
)) {
7895 return -TARGET_EFAULT
;
7903 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7904 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7907 struct file_handle
*target_fh
;
7908 struct file_handle
*fh
;
7909 unsigned int size
, total_size
;
7912 if (get_user_s32(size
, handle
)) {
7913 return -TARGET_EFAULT
;
7916 total_size
= sizeof(struct file_handle
) + size
;
7917 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7919 return -TARGET_EFAULT
;
7922 fh
= g_memdup(target_fh
, total_size
);
7923 fh
->handle_bytes
= size
;
7924 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7926 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7927 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7931 unlock_user(target_fh
, handle
, total_size
);
7937 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7939 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7942 target_sigset_t
*target_mask
;
7946 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7947 return -TARGET_EINVAL
;
7949 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7950 return -TARGET_EFAULT
;
7953 target_to_host_sigset(&host_mask
, target_mask
);
7955 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7957 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7959 fd_trans_register(ret
, &target_signalfd_trans
);
7962 unlock_user_struct(target_mask
, mask
, 0);
7968 /* Map host to target signal numbers for the wait family of syscalls.
7969 Assume all other status bits are the same. */
7970 int host_to_target_waitstatus(int status
)
7972 if (WIFSIGNALED(status
)) {
7973 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7975 if (WIFSTOPPED(status
)) {
7976 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7982 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
7984 CPUState
*cpu
= env_cpu(cpu_env
);
7985 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7988 for (i
= 0; i
< bprm
->argc
; i
++) {
7989 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7991 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7999 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
8001 CPUState
*cpu
= env_cpu(cpu_env
);
8002 TaskState
*ts
= cpu
->opaque
;
8003 GSList
*map_info
= read_self_maps();
8007 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
8008 MapInfo
*e
= (MapInfo
*) s
->data
;
8010 if (h2g_valid(e
->start
)) {
8011 unsigned long min
= e
->start
;
8012 unsigned long max
= e
->end
;
8013 int flags
= page_get_flags(h2g(min
));
8016 max
= h2g_valid(max
- 1) ?
8017 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8019 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
8024 if (h2g(max
) == ts
->info
->stack_limit
) {
8026 if (h2g(min
) == ts
->info
->stack_limit
) {
8033 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8034 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8035 h2g(min
), h2g(max
- 1) + 1,
8036 (flags
& PAGE_READ
) ? 'r' : '-',
8037 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8038 (flags
& PAGE_EXEC
) ? 'x' : '-',
8039 e
->is_priv
? 'p' : 's',
8040 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8042 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8049 free_self_maps(map_info
);
8051 #ifdef TARGET_VSYSCALL_PAGE
8053 * We only support execution from the vsyscall page.
8054 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8056 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8057 " --xp 00000000 00:00 0",
8058 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8059 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8065 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8067 CPUState
*cpu
= env_cpu(cpu_env
);
8068 TaskState
*ts
= cpu
->opaque
;
8069 g_autoptr(GString
) buf
= g_string_new(NULL
);
8072 for (i
= 0; i
< 44; i
++) {
8075 g_string_printf(buf
, FMT_pid
" ", getpid());
8076 } else if (i
== 1) {
8078 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8079 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8080 g_string_printf(buf
, "(%.15s) ", bin
);
8081 } else if (i
== 3) {
8083 g_string_printf(buf
, FMT_pid
" ", getppid());
8084 } else if (i
== 21) {
8086 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8087 } else if (i
== 27) {
8089 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8091 /* for the rest, there is MasterCard */
8092 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8095 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8103 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8105 CPUState
*cpu
= env_cpu(cpu_env
);
8106 TaskState
*ts
= cpu
->opaque
;
8107 abi_ulong auxv
= ts
->info
->saved_auxv
;
8108 abi_ulong len
= ts
->info
->auxv_len
;
8112 * Auxiliary vector is stored in target process stack.
8113 * read in whole auxv vector and copy it to file
8115 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8119 r
= write(fd
, ptr
, len
);
8126 lseek(fd
, 0, SEEK_SET
);
8127 unlock_user(ptr
, auxv
, len
);
8133 static int is_proc_myself(const char *filename
, const char *entry
)
8135 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8136 filename
+= strlen("/proc/");
8137 if (!strncmp(filename
, "self/", strlen("self/"))) {
8138 filename
+= strlen("self/");
8139 } else if (*filename
>= '1' && *filename
<= '9') {
8141 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8142 if (!strncmp(filename
, myself
, strlen(myself
))) {
8143 filename
+= strlen(myself
);
8150 if (!strcmp(filename
, entry
)) {
8157 static void excp_dump_file(FILE *logfile
, CPUArchState
*env
,
8158 const char *fmt
, int code
)
8161 CPUState
*cs
= env_cpu(env
);
8163 fprintf(logfile
, fmt
, code
);
8164 fprintf(logfile
, "Failing executable: %s\n", exec_path
);
8165 cpu_dump_state(cs
, logfile
, 0);
8166 open_self_maps(env
, fileno(logfile
));
8170 void target_exception_dump(CPUArchState
*env
, const char *fmt
, int code
)
8172 /* dump to console */
8173 excp_dump_file(stderr
, env
, fmt
, code
);
8175 /* dump to log file */
8176 if (qemu_log_separate()) {
8177 FILE *logfile
= qemu_log_trylock();
8179 excp_dump_file(logfile
, env
, fmt
, code
);
8180 qemu_log_unlock(logfile
);
8184 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8185 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8186 static int is_proc(const char *filename
, const char *entry
)
8188 return strcmp(filename
, entry
) == 0;
8192 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8193 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8200 fp
= fopen("/proc/net/route", "r");
8207 read
= getline(&line
, &len
, fp
);
8208 dprintf(fd
, "%s", line
);
8212 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8214 uint32_t dest
, gw
, mask
;
8215 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8218 fields
= sscanf(line
,
8219 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8220 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8221 &mask
, &mtu
, &window
, &irtt
);
8225 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8226 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8227 metric
, tswap32(mask
), mtu
, window
, irtt
);
8237 #if defined(TARGET_SPARC)
8238 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8240 dprintf(fd
, "type\t\t: sun4u\n");
8245 #if defined(TARGET_HPPA)
8246 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8250 num_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
8251 for (i
= 0; i
< num_cpus
; i
++) {
8252 dprintf(fd
, "processor\t: %d\n", i
);
8253 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8254 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8255 dprintf(fd
, "capabilities\t: os32\n");
8256 dprintf(fd
, "model\t\t: 9000/778/B160L - "
8257 "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8263 #if defined(TARGET_M68K)
8264 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8266 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8271 static int do_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8274 const char *filename
;
8275 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8276 int (*cmp
)(const char *s1
, const char *s2
);
8278 const struct fake_open
*fake_open
;
8279 static const struct fake_open fakes
[] = {
8280 { "maps", open_self_maps
, is_proc_myself
},
8281 { "stat", open_self_stat
, is_proc_myself
},
8282 { "auxv", open_self_auxv
, is_proc_myself
},
8283 { "cmdline", open_self_cmdline
, is_proc_myself
},
8284 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8285 { "/proc/net/route", open_net_route
, is_proc
},
8287 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8288 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8290 #if defined(TARGET_M68K)
8291 { "/proc/hardware", open_hardware
, is_proc
},
8293 { NULL
, NULL
, NULL
}
8296 if (is_proc_myself(pathname
, "exe")) {
8297 return safe_openat(dirfd
, exec_path
, flags
, mode
);
8300 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8301 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8306 if (fake_open
->filename
) {
8308 char filename
[PATH_MAX
];
8311 fd
= memfd_create("qemu-open", 0);
8313 if (errno
!= ENOSYS
) {
8316 /* create temporary file to map stat to */
8317 tmpdir
= getenv("TMPDIR");
8320 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8321 fd
= mkstemp(filename
);
8328 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8334 lseek(fd
, 0, SEEK_SET
);
8339 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8342 static int do_execveat(CPUArchState
*cpu_env
, int dirfd
,
8343 abi_long pathname
, abi_long guest_argp
,
8344 abi_long guest_envp
, int flags
)
8347 char **argp
, **envp
;
8356 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8357 if (get_user_ual(addr
, gp
)) {
8358 return -TARGET_EFAULT
;
8366 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8367 if (get_user_ual(addr
, gp
)) {
8368 return -TARGET_EFAULT
;
8376 argp
= g_new0(char *, argc
+ 1);
8377 envp
= g_new0(char *, envc
+ 1);
8379 for (gp
= guest_argp
, q
= argp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8380 if (get_user_ual(addr
, gp
)) {
8386 *q
= lock_user_string(addr
);
8393 for (gp
= guest_envp
, q
= envp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8394 if (get_user_ual(addr
, gp
)) {
8400 *q
= lock_user_string(addr
);
8408 * Although execve() is not an interruptible syscall it is
8409 * a special case where we must use the safe_syscall wrapper:
8410 * if we allow a signal to happen before we make the host
8411 * syscall then we will 'lose' it, because at the point of
8412 * execve the process leaves QEMU's control. So we use the
8413 * safe syscall wrapper to ensure that we either take the
8414 * signal as a guest signal, or else it does not happen
8415 * before the execve completes and makes it the other
8416 * program's problem.
8418 p
= lock_user_string(pathname
);
8423 if (is_proc_myself(p
, "exe")) {
8424 ret
= get_errno(safe_execveat(dirfd
, exec_path
, argp
, envp
, flags
));
8426 ret
= get_errno(safe_execveat(dirfd
, p
, argp
, envp
, flags
));
8429 unlock_user(p
, pathname
, 0);
8434 ret
= -TARGET_EFAULT
;
8437 for (gp
= guest_argp
, q
= argp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8438 if (get_user_ual(addr
, gp
) || !addr
) {
8441 unlock_user(*q
, addr
, 0);
8443 for (gp
= guest_envp
, q
= envp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8444 if (get_user_ual(addr
, gp
) || !addr
) {
8447 unlock_user(*q
, addr
, 0);
8455 #define TIMER_MAGIC 0x0caf0000
8456 #define TIMER_MAGIC_MASK 0xffff0000
8458 /* Convert QEMU provided timer ID back to internal 16bit index format */
8459 static target_timer_t
get_timer_id(abi_long arg
)
8461 target_timer_t timerid
= arg
;
8463 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8464 return -TARGET_EINVAL
;
8469 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8470 return -TARGET_EINVAL
;
8476 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8478 abi_ulong target_addr
,
8481 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8482 unsigned host_bits
= sizeof(*host_mask
) * 8;
8483 abi_ulong
*target_mask
;
8486 assert(host_size
>= target_size
);
8488 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8490 return -TARGET_EFAULT
;
8492 memset(host_mask
, 0, host_size
);
8494 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8495 unsigned bit
= i
* target_bits
;
8498 __get_user(val
, &target_mask
[i
]);
8499 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8500 if (val
& (1UL << j
)) {
8501 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8506 unlock_user(target_mask
, target_addr
, 0);
8510 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8512 abi_ulong target_addr
,
8515 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8516 unsigned host_bits
= sizeof(*host_mask
) * 8;
8517 abi_ulong
*target_mask
;
8520 assert(host_size
>= target_size
);
8522 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8524 return -TARGET_EFAULT
;
8527 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8528 unsigned bit
= i
* target_bits
;
8531 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8532 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8536 __put_user(val
, &target_mask
[i
]);
8539 unlock_user(target_mask
, target_addr
, target_size
);
8543 #ifdef TARGET_NR_getdents
8544 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8546 g_autofree
void *hdirp
= NULL
;
8548 int hlen
, hoff
, toff
;
8549 int hreclen
, treclen
;
8550 off64_t prev_diroff
= 0;
8552 hdirp
= g_try_malloc(count
);
8554 return -TARGET_ENOMEM
;
8557 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8558 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8560 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8563 hlen
= get_errno(hlen
);
8564 if (is_error(hlen
)) {
8568 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8570 return -TARGET_EFAULT
;
8573 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8574 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8575 struct linux_dirent
*hde
= hdirp
+ hoff
;
8577 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8579 struct target_dirent
*tde
= tdirp
+ toff
;
8583 namelen
= strlen(hde
->d_name
);
8584 hreclen
= hde
->d_reclen
;
8585 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8586 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8588 if (toff
+ treclen
> count
) {
8590 * If the host struct is smaller than the target struct, or
8591 * requires less alignment and thus packs into less space,
8592 * then the host can return more entries than we can pass
8596 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8600 * Return what we have, resetting the file pointer to the
8601 * location of the first record not returned.
8603 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8607 prev_diroff
= hde
->d_off
;
8608 tde
->d_ino
= tswapal(hde
->d_ino
);
8609 tde
->d_off
= tswapal(hde
->d_off
);
8610 tde
->d_reclen
= tswap16(treclen
);
8611 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8614 * The getdents type is in what was formerly a padding byte at the
8615 * end of the structure.
8617 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8618 type
= *((uint8_t *)hde
+ hreclen
- 1);
8622 *((uint8_t *)tde
+ treclen
- 1) = type
;
8625 unlock_user(tdirp
, arg2
, toff
);
8628 #endif /* TARGET_NR_getdents */
8630 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8631 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8633 g_autofree
void *hdirp
= NULL
;
8635 int hlen
, hoff
, toff
;
8636 int hreclen
, treclen
;
8637 off64_t prev_diroff
= 0;
8639 hdirp
= g_try_malloc(count
);
8641 return -TARGET_ENOMEM
;
8644 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8645 if (is_error(hlen
)) {
8649 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8651 return -TARGET_EFAULT
;
8654 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8655 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8656 struct target_dirent64
*tde
= tdirp
+ toff
;
8659 namelen
= strlen(hde
->d_name
) + 1;
8660 hreclen
= hde
->d_reclen
;
8661 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8662 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8664 if (toff
+ treclen
> count
) {
8666 * If the host struct is smaller than the target struct, or
8667 * requires less alignment and thus packs into less space,
8668 * then the host can return more entries than we can pass
8672 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8676 * Return what we have, resetting the file pointer to the
8677 * location of the first record not returned.
8679 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8683 prev_diroff
= hde
->d_off
;
8684 tde
->d_ino
= tswap64(hde
->d_ino
);
8685 tde
->d_off
= tswap64(hde
->d_off
);
8686 tde
->d_reclen
= tswap16(treclen
);
8687 tde
->d_type
= hde
->d_type
;
8688 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8691 unlock_user(tdirp
, arg2
, toff
);
8694 #endif /* TARGET_NR_getdents64 */
8696 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8697 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8700 /* This is an internal helper for do_syscall so that it is easier
8701 * to have a single return point, so that actions, such as logging
8702 * of syscall results, can be performed.
8703 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8705 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
8706 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8707 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8710 CPUState
*cpu
= env_cpu(cpu_env
);
8712 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8713 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8714 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8715 || defined(TARGET_NR_statx)
8718 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8719 || defined(TARGET_NR_fstatfs)
8725 case TARGET_NR_exit
:
8726 /* In old applications this may be used to implement _exit(2).
8727 However in threaded applications it is used for thread termination,
8728 and _exit_group is used for application termination.
8729 Do thread termination if we have more then one thread. */
8731 if (block_signals()) {
8732 return -QEMU_ERESTARTSYS
;
8735 pthread_mutex_lock(&clone_lock
);
8737 if (CPU_NEXT(first_cpu
)) {
8738 TaskState
*ts
= cpu
->opaque
;
8740 if (ts
->child_tidptr
) {
8741 put_user_u32(0, ts
->child_tidptr
);
8742 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8743 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8746 object_unparent(OBJECT(cpu
));
8747 object_unref(OBJECT(cpu
));
8749 * At this point the CPU should be unrealized and removed
8750 * from cpu lists. We can clean-up the rest of the thread
8751 * data without the lock held.
8754 pthread_mutex_unlock(&clone_lock
);
8758 rcu_unregister_thread();
8762 pthread_mutex_unlock(&clone_lock
);
8763 preexit_cleanup(cpu_env
, arg1
);
8765 return 0; /* avoid warning */
8766 case TARGET_NR_read
:
8767 if (arg2
== 0 && arg3
== 0) {
8768 return get_errno(safe_read(arg1
, 0, 0));
8770 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8771 return -TARGET_EFAULT
;
8772 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8774 fd_trans_host_to_target_data(arg1
)) {
8775 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8777 unlock_user(p
, arg2
, ret
);
8780 case TARGET_NR_write
:
8781 if (arg2
== 0 && arg3
== 0) {
8782 return get_errno(safe_write(arg1
, 0, 0));
8784 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8785 return -TARGET_EFAULT
;
8786 if (fd_trans_target_to_host_data(arg1
)) {
8787 void *copy
= g_malloc(arg3
);
8788 memcpy(copy
, p
, arg3
);
8789 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8791 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8795 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8797 unlock_user(p
, arg2
, 0);
8800 #ifdef TARGET_NR_open
8801 case TARGET_NR_open
:
8802 if (!(p
= lock_user_string(arg1
)))
8803 return -TARGET_EFAULT
;
8804 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8805 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8807 fd_trans_unregister(ret
);
8808 unlock_user(p
, arg1
, 0);
8811 case TARGET_NR_openat
:
8812 if (!(p
= lock_user_string(arg2
)))
8813 return -TARGET_EFAULT
;
8814 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8815 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8817 fd_trans_unregister(ret
);
8818 unlock_user(p
, arg2
, 0);
8820 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8821 case TARGET_NR_name_to_handle_at
:
8822 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8825 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8826 case TARGET_NR_open_by_handle_at
:
8827 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8828 fd_trans_unregister(ret
);
8831 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8832 case TARGET_NR_pidfd_open
:
8833 return get_errno(pidfd_open(arg1
, arg2
));
8835 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8836 case TARGET_NR_pidfd_send_signal
:
8838 siginfo_t uinfo
, *puinfo
;
8841 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8843 return -TARGET_EFAULT
;
8845 target_to_host_siginfo(&uinfo
, p
);
8846 unlock_user(p
, arg3
, 0);
8851 ret
= get_errno(pidfd_send_signal(arg1
, target_to_host_signal(arg2
),
8856 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8857 case TARGET_NR_pidfd_getfd
:
8858 return get_errno(pidfd_getfd(arg1
, arg2
, arg3
));
8860 case TARGET_NR_close
:
8861 fd_trans_unregister(arg1
);
8862 return get_errno(close(arg1
));
8863 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8864 case TARGET_NR_close_range
:
8865 ret
= get_errno(sys_close_range(arg1
, arg2
, arg3
));
8866 if (ret
== 0 && !(arg3
& CLOSE_RANGE_CLOEXEC
)) {
8868 maxfd
= MIN(arg2
, target_fd_max
);
8869 for (fd
= arg1
; fd
< maxfd
; fd
++) {
8870 fd_trans_unregister(fd
);
8877 return do_brk(arg1
);
8878 #ifdef TARGET_NR_fork
8879 case TARGET_NR_fork
:
8880 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8882 #ifdef TARGET_NR_waitpid
8883 case TARGET_NR_waitpid
:
8886 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8887 if (!is_error(ret
) && arg2
&& ret
8888 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8889 return -TARGET_EFAULT
;
8893 #ifdef TARGET_NR_waitid
8894 case TARGET_NR_waitid
:
8898 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8899 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8900 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8901 return -TARGET_EFAULT
;
8902 host_to_target_siginfo(p
, &info
);
8903 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8908 #ifdef TARGET_NR_creat /* not on alpha */
8909 case TARGET_NR_creat
:
8910 if (!(p
= lock_user_string(arg1
)))
8911 return -TARGET_EFAULT
;
8912 ret
= get_errno(creat(p
, arg2
));
8913 fd_trans_unregister(ret
);
8914 unlock_user(p
, arg1
, 0);
8917 #ifdef TARGET_NR_link
8918 case TARGET_NR_link
:
8921 p
= lock_user_string(arg1
);
8922 p2
= lock_user_string(arg2
);
8924 ret
= -TARGET_EFAULT
;
8926 ret
= get_errno(link(p
, p2
));
8927 unlock_user(p2
, arg2
, 0);
8928 unlock_user(p
, arg1
, 0);
8932 #if defined(TARGET_NR_linkat)
8933 case TARGET_NR_linkat
:
8937 return -TARGET_EFAULT
;
8938 p
= lock_user_string(arg2
);
8939 p2
= lock_user_string(arg4
);
8941 ret
= -TARGET_EFAULT
;
8943 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8944 unlock_user(p
, arg2
, 0);
8945 unlock_user(p2
, arg4
, 0);
8949 #ifdef TARGET_NR_unlink
8950 case TARGET_NR_unlink
:
8951 if (!(p
= lock_user_string(arg1
)))
8952 return -TARGET_EFAULT
;
8953 ret
= get_errno(unlink(p
));
8954 unlock_user(p
, arg1
, 0);
8957 #if defined(TARGET_NR_unlinkat)
8958 case TARGET_NR_unlinkat
:
8959 if (!(p
= lock_user_string(arg2
)))
8960 return -TARGET_EFAULT
;
8961 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8962 unlock_user(p
, arg2
, 0);
8965 case TARGET_NR_execveat
:
8966 return do_execveat(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
8967 case TARGET_NR_execve
:
8968 return do_execveat(cpu_env
, AT_FDCWD
, arg1
, arg2
, arg3
, 0);
8969 case TARGET_NR_chdir
:
8970 if (!(p
= lock_user_string(arg1
)))
8971 return -TARGET_EFAULT
;
8972 ret
= get_errno(chdir(p
));
8973 unlock_user(p
, arg1
, 0);
8975 #ifdef TARGET_NR_time
8976 case TARGET_NR_time
:
8979 ret
= get_errno(time(&host_time
));
8982 && put_user_sal(host_time
, arg1
))
8983 return -TARGET_EFAULT
;
8987 #ifdef TARGET_NR_mknod
8988 case TARGET_NR_mknod
:
8989 if (!(p
= lock_user_string(arg1
)))
8990 return -TARGET_EFAULT
;
8991 ret
= get_errno(mknod(p
, arg2
, arg3
));
8992 unlock_user(p
, arg1
, 0);
8995 #if defined(TARGET_NR_mknodat)
8996 case TARGET_NR_mknodat
:
8997 if (!(p
= lock_user_string(arg2
)))
8998 return -TARGET_EFAULT
;
8999 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
9000 unlock_user(p
, arg2
, 0);
9003 #ifdef TARGET_NR_chmod
9004 case TARGET_NR_chmod
:
9005 if (!(p
= lock_user_string(arg1
)))
9006 return -TARGET_EFAULT
;
9007 ret
= get_errno(chmod(p
, arg2
));
9008 unlock_user(p
, arg1
, 0);
9011 #ifdef TARGET_NR_lseek
9012 case TARGET_NR_lseek
:
9013 return get_errno(lseek(arg1
, arg2
, arg3
));
9015 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9016 /* Alpha specific */
9017 case TARGET_NR_getxpid
:
9018 cpu_env
->ir
[IR_A4
] = getppid();
9019 return get_errno(getpid());
9021 #ifdef TARGET_NR_getpid
9022 case TARGET_NR_getpid
:
9023 return get_errno(getpid());
9025 case TARGET_NR_mount
:
9027 /* need to look at the data field */
9031 p
= lock_user_string(arg1
);
9033 return -TARGET_EFAULT
;
9039 p2
= lock_user_string(arg2
);
9042 unlock_user(p
, arg1
, 0);
9044 return -TARGET_EFAULT
;
9048 p3
= lock_user_string(arg3
);
9051 unlock_user(p
, arg1
, 0);
9053 unlock_user(p2
, arg2
, 0);
9054 return -TARGET_EFAULT
;
9060 /* FIXME - arg5 should be locked, but it isn't clear how to
9061 * do that since it's not guaranteed to be a NULL-terminated
9065 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
9067 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
9069 ret
= get_errno(ret
);
9072 unlock_user(p
, arg1
, 0);
9074 unlock_user(p2
, arg2
, 0);
9076 unlock_user(p3
, arg3
, 0);
9080 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9081 #if defined(TARGET_NR_umount)
9082 case TARGET_NR_umount
:
9084 #if defined(TARGET_NR_oldumount)
9085 case TARGET_NR_oldumount
:
9087 if (!(p
= lock_user_string(arg1
)))
9088 return -TARGET_EFAULT
;
9089 ret
= get_errno(umount(p
));
9090 unlock_user(p
, arg1
, 0);
9093 #ifdef TARGET_NR_stime /* not on alpha */
9094 case TARGET_NR_stime
:
9098 if (get_user_sal(ts
.tv_sec
, arg1
)) {
9099 return -TARGET_EFAULT
;
9101 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
9104 #ifdef TARGET_NR_alarm /* not on alpha */
9105 case TARGET_NR_alarm
:
9108 #ifdef TARGET_NR_pause /* not on alpha */
9109 case TARGET_NR_pause
:
9110 if (!block_signals()) {
9111 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
9113 return -TARGET_EINTR
;
9115 #ifdef TARGET_NR_utime
9116 case TARGET_NR_utime
:
9118 struct utimbuf tbuf
, *host_tbuf
;
9119 struct target_utimbuf
*target_tbuf
;
9121 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9122 return -TARGET_EFAULT
;
9123 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9124 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9125 unlock_user_struct(target_tbuf
, arg2
, 0);
9130 if (!(p
= lock_user_string(arg1
)))
9131 return -TARGET_EFAULT
;
9132 ret
= get_errno(utime(p
, host_tbuf
));
9133 unlock_user(p
, arg1
, 0);
9137 #ifdef TARGET_NR_utimes
9138 case TARGET_NR_utimes
:
9140 struct timeval
*tvp
, tv
[2];
9142 if (copy_from_user_timeval(&tv
[0], arg2
)
9143 || copy_from_user_timeval(&tv
[1],
9144 arg2
+ sizeof(struct target_timeval
)))
9145 return -TARGET_EFAULT
;
9150 if (!(p
= lock_user_string(arg1
)))
9151 return -TARGET_EFAULT
;
9152 ret
= get_errno(utimes(p
, tvp
));
9153 unlock_user(p
, arg1
, 0);
9157 #if defined(TARGET_NR_futimesat)
9158 case TARGET_NR_futimesat
:
9160 struct timeval
*tvp
, tv
[2];
9162 if (copy_from_user_timeval(&tv
[0], arg3
)
9163 || copy_from_user_timeval(&tv
[1],
9164 arg3
+ sizeof(struct target_timeval
)))
9165 return -TARGET_EFAULT
;
9170 if (!(p
= lock_user_string(arg2
))) {
9171 return -TARGET_EFAULT
;
9173 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9174 unlock_user(p
, arg2
, 0);
9178 #ifdef TARGET_NR_access
9179 case TARGET_NR_access
:
9180 if (!(p
= lock_user_string(arg1
))) {
9181 return -TARGET_EFAULT
;
9183 ret
= get_errno(access(path(p
), arg2
));
9184 unlock_user(p
, arg1
, 0);
9187 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9188 case TARGET_NR_faccessat
:
9189 if (!(p
= lock_user_string(arg2
))) {
9190 return -TARGET_EFAULT
;
9192 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9193 unlock_user(p
, arg2
, 0);
9196 #if defined(TARGET_NR_faccessat2)
9197 case TARGET_NR_faccessat2
:
9198 if (!(p
= lock_user_string(arg2
))) {
9199 return -TARGET_EFAULT
;
9201 ret
= get_errno(faccessat(arg1
, p
, arg3
, arg4
));
9202 unlock_user(p
, arg2
, 0);
9205 #ifdef TARGET_NR_nice /* not on alpha */
9206 case TARGET_NR_nice
:
9207 return get_errno(nice(arg1
));
9209 case TARGET_NR_sync
:
9212 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9213 case TARGET_NR_syncfs
:
9214 return get_errno(syncfs(arg1
));
9216 case TARGET_NR_kill
:
9217 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9218 #ifdef TARGET_NR_rename
9219 case TARGET_NR_rename
:
9222 p
= lock_user_string(arg1
);
9223 p2
= lock_user_string(arg2
);
9225 ret
= -TARGET_EFAULT
;
9227 ret
= get_errno(rename(p
, p2
));
9228 unlock_user(p2
, arg2
, 0);
9229 unlock_user(p
, arg1
, 0);
9233 #if defined(TARGET_NR_renameat)
9234 case TARGET_NR_renameat
:
9237 p
= lock_user_string(arg2
);
9238 p2
= lock_user_string(arg4
);
9240 ret
= -TARGET_EFAULT
;
9242 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9243 unlock_user(p2
, arg4
, 0);
9244 unlock_user(p
, arg2
, 0);
9248 #if defined(TARGET_NR_renameat2)
9249 case TARGET_NR_renameat2
:
9252 p
= lock_user_string(arg2
);
9253 p2
= lock_user_string(arg4
);
9255 ret
= -TARGET_EFAULT
;
9257 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9259 unlock_user(p2
, arg4
, 0);
9260 unlock_user(p
, arg2
, 0);
9264 #ifdef TARGET_NR_mkdir
9265 case TARGET_NR_mkdir
:
9266 if (!(p
= lock_user_string(arg1
)))
9267 return -TARGET_EFAULT
;
9268 ret
= get_errno(mkdir(p
, arg2
));
9269 unlock_user(p
, arg1
, 0);
9272 #if defined(TARGET_NR_mkdirat)
9273 case TARGET_NR_mkdirat
:
9274 if (!(p
= lock_user_string(arg2
)))
9275 return -TARGET_EFAULT
;
9276 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9277 unlock_user(p
, arg2
, 0);
9280 #ifdef TARGET_NR_rmdir
9281 case TARGET_NR_rmdir
:
9282 if (!(p
= lock_user_string(arg1
)))
9283 return -TARGET_EFAULT
;
9284 ret
= get_errno(rmdir(p
));
9285 unlock_user(p
, arg1
, 0);
9289 ret
= get_errno(dup(arg1
));
9291 fd_trans_dup(arg1
, ret
);
9294 #ifdef TARGET_NR_pipe
9295 case TARGET_NR_pipe
:
9296 return do_pipe(cpu_env
, arg1
, 0, 0);
9298 #ifdef TARGET_NR_pipe2
9299 case TARGET_NR_pipe2
:
9300 return do_pipe(cpu_env
, arg1
,
9301 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9303 case TARGET_NR_times
:
9305 struct target_tms
*tmsp
;
9307 ret
= get_errno(times(&tms
));
9309 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9311 return -TARGET_EFAULT
;
9312 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9313 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9314 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9315 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9318 ret
= host_to_target_clock_t(ret
);
9321 case TARGET_NR_acct
:
9323 ret
= get_errno(acct(NULL
));
9325 if (!(p
= lock_user_string(arg1
))) {
9326 return -TARGET_EFAULT
;
9328 ret
= get_errno(acct(path(p
)));
9329 unlock_user(p
, arg1
, 0);
9332 #ifdef TARGET_NR_umount2
9333 case TARGET_NR_umount2
:
9334 if (!(p
= lock_user_string(arg1
)))
9335 return -TARGET_EFAULT
;
9336 ret
= get_errno(umount2(p
, arg2
));
9337 unlock_user(p
, arg1
, 0);
9340 case TARGET_NR_ioctl
:
9341 return do_ioctl(arg1
, arg2
, arg3
);
9342 #ifdef TARGET_NR_fcntl
9343 case TARGET_NR_fcntl
:
9344 return do_fcntl(arg1
, arg2
, arg3
);
9346 case TARGET_NR_setpgid
:
9347 return get_errno(setpgid(arg1
, arg2
));
9348 case TARGET_NR_umask
:
9349 return get_errno(umask(arg1
));
9350 case TARGET_NR_chroot
:
9351 if (!(p
= lock_user_string(arg1
)))
9352 return -TARGET_EFAULT
;
9353 ret
= get_errno(chroot(p
));
9354 unlock_user(p
, arg1
, 0);
9356 #ifdef TARGET_NR_dup2
9357 case TARGET_NR_dup2
:
9358 ret
= get_errno(dup2(arg1
, arg2
));
9360 fd_trans_dup(arg1
, arg2
);
9364 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9365 case TARGET_NR_dup3
:
9369 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9372 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9373 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9375 fd_trans_dup(arg1
, arg2
);
9380 #ifdef TARGET_NR_getppid /* not on alpha */
9381 case TARGET_NR_getppid
:
9382 return get_errno(getppid());
9384 #ifdef TARGET_NR_getpgrp
9385 case TARGET_NR_getpgrp
:
9386 return get_errno(getpgrp());
9388 case TARGET_NR_setsid
:
9389 return get_errno(setsid());
9390 #ifdef TARGET_NR_sigaction
9391 case TARGET_NR_sigaction
:
9393 #if defined(TARGET_MIPS)
9394 struct target_sigaction act
, oact
, *pact
, *old_act
;
9397 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9398 return -TARGET_EFAULT
;
9399 act
._sa_handler
= old_act
->_sa_handler
;
9400 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9401 act
.sa_flags
= old_act
->sa_flags
;
9402 unlock_user_struct(old_act
, arg2
, 0);
9408 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9410 if (!is_error(ret
) && arg3
) {
9411 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9412 return -TARGET_EFAULT
;
9413 old_act
->_sa_handler
= oact
._sa_handler
;
9414 old_act
->sa_flags
= oact
.sa_flags
;
9415 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9416 old_act
->sa_mask
.sig
[1] = 0;
9417 old_act
->sa_mask
.sig
[2] = 0;
9418 old_act
->sa_mask
.sig
[3] = 0;
9419 unlock_user_struct(old_act
, arg3
, 1);
9422 struct target_old_sigaction
*old_act
;
9423 struct target_sigaction act
, oact
, *pact
;
9425 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9426 return -TARGET_EFAULT
;
9427 act
._sa_handler
= old_act
->_sa_handler
;
9428 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9429 act
.sa_flags
= old_act
->sa_flags
;
9430 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9431 act
.sa_restorer
= old_act
->sa_restorer
;
9433 unlock_user_struct(old_act
, arg2
, 0);
9438 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9439 if (!is_error(ret
) && arg3
) {
9440 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9441 return -TARGET_EFAULT
;
9442 old_act
->_sa_handler
= oact
._sa_handler
;
9443 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9444 old_act
->sa_flags
= oact
.sa_flags
;
9445 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9446 old_act
->sa_restorer
= oact
.sa_restorer
;
9448 unlock_user_struct(old_act
, arg3
, 1);
9454 case TARGET_NR_rt_sigaction
:
9457 * For Alpha and SPARC this is a 5 argument syscall, with
9458 * a 'restorer' parameter which must be copied into the
9459 * sa_restorer field of the sigaction struct.
9460 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9461 * and arg5 is the sigsetsize.
9463 #if defined(TARGET_ALPHA)
9464 target_ulong sigsetsize
= arg4
;
9465 target_ulong restorer
= arg5
;
9466 #elif defined(TARGET_SPARC)
9467 target_ulong restorer
= arg4
;
9468 target_ulong sigsetsize
= arg5
;
9470 target_ulong sigsetsize
= arg4
;
9471 target_ulong restorer
= 0;
9473 struct target_sigaction
*act
= NULL
;
9474 struct target_sigaction
*oact
= NULL
;
9476 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9477 return -TARGET_EINVAL
;
9479 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9480 return -TARGET_EFAULT
;
9482 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9483 ret
= -TARGET_EFAULT
;
9485 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9487 unlock_user_struct(oact
, arg3
, 1);
9491 unlock_user_struct(act
, arg2
, 0);
9495 #ifdef TARGET_NR_sgetmask /* not on alpha */
9496 case TARGET_NR_sgetmask
:
9499 abi_ulong target_set
;
9500 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9502 host_to_target_old_sigset(&target_set
, &cur_set
);
9508 #ifdef TARGET_NR_ssetmask /* not on alpha */
9509 case TARGET_NR_ssetmask
:
9512 abi_ulong target_set
= arg1
;
9513 target_to_host_old_sigset(&set
, &target_set
);
9514 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9516 host_to_target_old_sigset(&target_set
, &oset
);
9522 #ifdef TARGET_NR_sigprocmask
9523 case TARGET_NR_sigprocmask
:
9525 #if defined(TARGET_ALPHA)
9526 sigset_t set
, oldset
;
9531 case TARGET_SIG_BLOCK
:
9534 case TARGET_SIG_UNBLOCK
:
9537 case TARGET_SIG_SETMASK
:
9541 return -TARGET_EINVAL
;
9544 target_to_host_old_sigset(&set
, &mask
);
9546 ret
= do_sigprocmask(how
, &set
, &oldset
);
9547 if (!is_error(ret
)) {
9548 host_to_target_old_sigset(&mask
, &oldset
);
9550 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
9553 sigset_t set
, oldset
, *set_ptr
;
9557 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9559 return -TARGET_EFAULT
;
9561 target_to_host_old_sigset(&set
, p
);
9562 unlock_user(p
, arg2
, 0);
9565 case TARGET_SIG_BLOCK
:
9568 case TARGET_SIG_UNBLOCK
:
9571 case TARGET_SIG_SETMASK
:
9575 return -TARGET_EINVAL
;
9581 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9582 if (!is_error(ret
) && arg3
) {
9583 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9584 return -TARGET_EFAULT
;
9585 host_to_target_old_sigset(p
, &oldset
);
9586 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9592 case TARGET_NR_rt_sigprocmask
:
9595 sigset_t set
, oldset
, *set_ptr
;
9597 if (arg4
!= sizeof(target_sigset_t
)) {
9598 return -TARGET_EINVAL
;
9602 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9604 return -TARGET_EFAULT
;
9606 target_to_host_sigset(&set
, p
);
9607 unlock_user(p
, arg2
, 0);
9610 case TARGET_SIG_BLOCK
:
9613 case TARGET_SIG_UNBLOCK
:
9616 case TARGET_SIG_SETMASK
:
9620 return -TARGET_EINVAL
;
9626 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9627 if (!is_error(ret
) && arg3
) {
9628 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9629 return -TARGET_EFAULT
;
9630 host_to_target_sigset(p
, &oldset
);
9631 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9635 #ifdef TARGET_NR_sigpending
9636 case TARGET_NR_sigpending
:
9639 ret
= get_errno(sigpending(&set
));
9640 if (!is_error(ret
)) {
9641 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9642 return -TARGET_EFAULT
;
9643 host_to_target_old_sigset(p
, &set
);
9644 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9649 case TARGET_NR_rt_sigpending
:
9653 /* Yes, this check is >, not != like most. We follow the kernel's
9654 * logic and it does it like this because it implements
9655 * NR_sigpending through the same code path, and in that case
9656 * the old_sigset_t is smaller in size.
9658 if (arg2
> sizeof(target_sigset_t
)) {
9659 return -TARGET_EINVAL
;
9662 ret
= get_errno(sigpending(&set
));
9663 if (!is_error(ret
)) {
9664 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9665 return -TARGET_EFAULT
;
9666 host_to_target_sigset(p
, &set
);
9667 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9671 #ifdef TARGET_NR_sigsuspend
9672 case TARGET_NR_sigsuspend
:
9676 #if defined(TARGET_ALPHA)
9677 TaskState
*ts
= cpu
->opaque
;
9678 /* target_to_host_old_sigset will bswap back */
9679 abi_ulong mask
= tswapal(arg1
);
9680 set
= &ts
->sigsuspend_mask
;
9681 target_to_host_old_sigset(set
, &mask
);
9683 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
9688 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9689 finish_sigsuspend_mask(ret
);
9693 case TARGET_NR_rt_sigsuspend
:
9697 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
9701 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9702 finish_sigsuspend_mask(ret
);
9705 #ifdef TARGET_NR_rt_sigtimedwait
9706 case TARGET_NR_rt_sigtimedwait
:
9709 struct timespec uts
, *puts
;
9712 if (arg4
!= sizeof(target_sigset_t
)) {
9713 return -TARGET_EINVAL
;
9716 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9717 return -TARGET_EFAULT
;
9718 target_to_host_sigset(&set
, p
);
9719 unlock_user(p
, arg1
, 0);
9722 if (target_to_host_timespec(puts
, arg3
)) {
9723 return -TARGET_EFAULT
;
9728 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9730 if (!is_error(ret
)) {
9732 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9735 return -TARGET_EFAULT
;
9737 host_to_target_siginfo(p
, &uinfo
);
9738 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9740 ret
= host_to_target_signal(ret
);
9745 #ifdef TARGET_NR_rt_sigtimedwait_time64
9746 case TARGET_NR_rt_sigtimedwait_time64
:
9749 struct timespec uts
, *puts
;
9752 if (arg4
!= sizeof(target_sigset_t
)) {
9753 return -TARGET_EINVAL
;
9756 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9758 return -TARGET_EFAULT
;
9760 target_to_host_sigset(&set
, p
);
9761 unlock_user(p
, arg1
, 0);
9764 if (target_to_host_timespec64(puts
, arg3
)) {
9765 return -TARGET_EFAULT
;
9770 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9772 if (!is_error(ret
)) {
9774 p
= lock_user(VERIFY_WRITE
, arg2
,
9775 sizeof(target_siginfo_t
), 0);
9777 return -TARGET_EFAULT
;
9779 host_to_target_siginfo(p
, &uinfo
);
9780 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9782 ret
= host_to_target_signal(ret
);
9787 case TARGET_NR_rt_sigqueueinfo
:
9791 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9793 return -TARGET_EFAULT
;
9795 target_to_host_siginfo(&uinfo
, p
);
9796 unlock_user(p
, arg3
, 0);
9797 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, target_to_host_signal(arg2
), &uinfo
));
9800 case TARGET_NR_rt_tgsigqueueinfo
:
9804 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9806 return -TARGET_EFAULT
;
9808 target_to_host_siginfo(&uinfo
, p
);
9809 unlock_user(p
, arg4
, 0);
9810 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, target_to_host_signal(arg3
), &uinfo
));
9813 #ifdef TARGET_NR_sigreturn
9814 case TARGET_NR_sigreturn
:
9815 if (block_signals()) {
9816 return -QEMU_ERESTARTSYS
;
9818 return do_sigreturn(cpu_env
);
9820 case TARGET_NR_rt_sigreturn
:
9821 if (block_signals()) {
9822 return -QEMU_ERESTARTSYS
;
9824 return do_rt_sigreturn(cpu_env
);
9825 case TARGET_NR_sethostname
:
9826 if (!(p
= lock_user_string(arg1
)))
9827 return -TARGET_EFAULT
;
9828 ret
= get_errno(sethostname(p
, arg2
));
9829 unlock_user(p
, arg1
, 0);
9831 #ifdef TARGET_NR_setrlimit
9832 case TARGET_NR_setrlimit
:
9834 int resource
= target_to_host_resource(arg1
);
9835 struct target_rlimit
*target_rlim
;
9837 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9838 return -TARGET_EFAULT
;
9839 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9840 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9841 unlock_user_struct(target_rlim
, arg2
, 0);
9843 * If we just passed through resource limit settings for memory then
9844 * they would also apply to QEMU's own allocations, and QEMU will
9845 * crash or hang or die if its allocations fail. Ideally we would
9846 * track the guest allocations in QEMU and apply the limits ourselves.
9847 * For now, just tell the guest the call succeeded but don't actually
9850 if (resource
!= RLIMIT_AS
&&
9851 resource
!= RLIMIT_DATA
&&
9852 resource
!= RLIMIT_STACK
) {
9853 return get_errno(setrlimit(resource
, &rlim
));
9859 #ifdef TARGET_NR_getrlimit
9860 case TARGET_NR_getrlimit
:
9862 int resource
= target_to_host_resource(arg1
);
9863 struct target_rlimit
*target_rlim
;
9866 ret
= get_errno(getrlimit(resource
, &rlim
));
9867 if (!is_error(ret
)) {
9868 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9869 return -TARGET_EFAULT
;
9870 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9871 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9872 unlock_user_struct(target_rlim
, arg2
, 1);
9877 case TARGET_NR_getrusage
:
9879 struct rusage rusage
;
9880 ret
= get_errno(getrusage(arg1
, &rusage
));
9881 if (!is_error(ret
)) {
9882 ret
= host_to_target_rusage(arg2
, &rusage
);
9886 #if defined(TARGET_NR_gettimeofday)
9887 case TARGET_NR_gettimeofday
:
9892 ret
= get_errno(gettimeofday(&tv
, &tz
));
9893 if (!is_error(ret
)) {
9894 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9895 return -TARGET_EFAULT
;
9897 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9898 return -TARGET_EFAULT
;
9904 #if defined(TARGET_NR_settimeofday)
9905 case TARGET_NR_settimeofday
:
9907 struct timeval tv
, *ptv
= NULL
;
9908 struct timezone tz
, *ptz
= NULL
;
9911 if (copy_from_user_timeval(&tv
, arg1
)) {
9912 return -TARGET_EFAULT
;
9918 if (copy_from_user_timezone(&tz
, arg2
)) {
9919 return -TARGET_EFAULT
;
9924 return get_errno(settimeofday(ptv
, ptz
));
9927 #if defined(TARGET_NR_select)
9928 case TARGET_NR_select
:
9929 #if defined(TARGET_WANT_NI_OLD_SELECT)
9930 /* some architectures used to have old_select here
9931 * but now ENOSYS it.
9933 ret
= -TARGET_ENOSYS
;
9934 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9935 ret
= do_old_select(arg1
);
9937 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9941 #ifdef TARGET_NR_pselect6
9942 case TARGET_NR_pselect6
:
9943 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9945 #ifdef TARGET_NR_pselect6_time64
9946 case TARGET_NR_pselect6_time64
:
9947 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9949 #ifdef TARGET_NR_symlink
9950 case TARGET_NR_symlink
:
9953 p
= lock_user_string(arg1
);
9954 p2
= lock_user_string(arg2
);
9956 ret
= -TARGET_EFAULT
;
9958 ret
= get_errno(symlink(p
, p2
));
9959 unlock_user(p2
, arg2
, 0);
9960 unlock_user(p
, arg1
, 0);
9964 #if defined(TARGET_NR_symlinkat)
9965 case TARGET_NR_symlinkat
:
9968 p
= lock_user_string(arg1
);
9969 p2
= lock_user_string(arg3
);
9971 ret
= -TARGET_EFAULT
;
9973 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9974 unlock_user(p2
, arg3
, 0);
9975 unlock_user(p
, arg1
, 0);
9979 #ifdef TARGET_NR_readlink
9980 case TARGET_NR_readlink
:
9983 p
= lock_user_string(arg1
);
9984 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9986 ret
= -TARGET_EFAULT
;
9988 /* Short circuit this for the magic exe check. */
9989 ret
= -TARGET_EINVAL
;
9990 } else if (is_proc_myself((const char *)p
, "exe")) {
9991 char real
[PATH_MAX
], *temp
;
9992 temp
= realpath(exec_path
, real
);
9993 /* Return value is # of bytes that we wrote to the buffer. */
9995 ret
= get_errno(-1);
9997 /* Don't worry about sign mismatch as earlier mapping
9998 * logic would have thrown a bad address error. */
9999 ret
= MIN(strlen(real
), arg3
);
10000 /* We cannot NUL terminate the string. */
10001 memcpy(p2
, real
, ret
);
10004 ret
= get_errno(readlink(path(p
), p2
, arg3
));
10006 unlock_user(p2
, arg2
, ret
);
10007 unlock_user(p
, arg1
, 0);
10011 #if defined(TARGET_NR_readlinkat)
10012 case TARGET_NR_readlinkat
:
10015 p
= lock_user_string(arg2
);
10016 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10018 ret
= -TARGET_EFAULT
;
10019 } else if (!arg4
) {
10020 /* Short circuit this for the magic exe check. */
10021 ret
= -TARGET_EINVAL
;
10022 } else if (is_proc_myself((const char *)p
, "exe")) {
10023 char real
[PATH_MAX
], *temp
;
10024 temp
= realpath(exec_path
, real
);
10025 /* Return value is # of bytes that we wrote to the buffer. */
10026 if (temp
== NULL
) {
10027 ret
= get_errno(-1);
10029 /* Don't worry about sign mismatch as earlier mapping
10030 * logic would have thrown a bad address error. */
10031 ret
= MIN(strlen(real
), arg4
);
10032 /* We cannot NUL terminate the string. */
10033 memcpy(p2
, real
, ret
);
10036 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
10038 unlock_user(p2
, arg3
, ret
);
10039 unlock_user(p
, arg2
, 0);
10043 #ifdef TARGET_NR_swapon
10044 case TARGET_NR_swapon
:
10045 if (!(p
= lock_user_string(arg1
)))
10046 return -TARGET_EFAULT
;
10047 ret
= get_errno(swapon(p
, arg2
));
10048 unlock_user(p
, arg1
, 0);
10051 case TARGET_NR_reboot
:
10052 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
10053 /* arg4 must be ignored in all other cases */
10054 p
= lock_user_string(arg4
);
10056 return -TARGET_EFAULT
;
10058 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
10059 unlock_user(p
, arg4
, 0);
10061 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
10064 #ifdef TARGET_NR_mmap
10065 case TARGET_NR_mmap
:
10066 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10067 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10068 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10069 || defined(TARGET_S390X)
10072 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
10073 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
10074 return -TARGET_EFAULT
;
10075 v1
= tswapal(v
[0]);
10076 v2
= tswapal(v
[1]);
10077 v3
= tswapal(v
[2]);
10078 v4
= tswapal(v
[3]);
10079 v5
= tswapal(v
[4]);
10080 v6
= tswapal(v
[5]);
10081 unlock_user(v
, arg1
, 0);
10082 ret
= get_errno(target_mmap(v1
, v2
, v3
,
10083 target_to_host_bitmask(v4
, mmap_flags_tbl
),
10087 /* mmap pointers are always untagged */
10088 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
10089 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10095 #ifdef TARGET_NR_mmap2
10096 case TARGET_NR_mmap2
:
10098 #define MMAP_SHIFT 12
10100 ret
= target_mmap(arg1
, arg2
, arg3
,
10101 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
10102 arg5
, arg6
<< MMAP_SHIFT
);
10103 return get_errno(ret
);
10105 case TARGET_NR_munmap
:
10106 arg1
= cpu_untagged_addr(cpu
, arg1
);
10107 return get_errno(target_munmap(arg1
, arg2
));
10108 case TARGET_NR_mprotect
:
10109 arg1
= cpu_untagged_addr(cpu
, arg1
);
10111 TaskState
*ts
= cpu
->opaque
;
10112 /* Special hack to detect libc making the stack executable. */
10113 if ((arg3
& PROT_GROWSDOWN
)
10114 && arg1
>= ts
->info
->stack_limit
10115 && arg1
<= ts
->info
->start_stack
) {
10116 arg3
&= ~PROT_GROWSDOWN
;
10117 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
10118 arg1
= ts
->info
->stack_limit
;
10121 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
10122 #ifdef TARGET_NR_mremap
10123 case TARGET_NR_mremap
:
10124 arg1
= cpu_untagged_addr(cpu
, arg1
);
10125 /* mremap new_addr (arg5) is always untagged */
10126 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
10128 /* ??? msync/mlock/munlock are broken for softmmu. */
10129 #ifdef TARGET_NR_msync
10130 case TARGET_NR_msync
:
10131 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
10133 #ifdef TARGET_NR_mlock
10134 case TARGET_NR_mlock
:
10135 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10137 #ifdef TARGET_NR_munlock
10138 case TARGET_NR_munlock
:
10139 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10141 #ifdef TARGET_NR_mlockall
10142 case TARGET_NR_mlockall
:
10143 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10145 #ifdef TARGET_NR_munlockall
10146 case TARGET_NR_munlockall
:
10147 return get_errno(munlockall());
10149 #ifdef TARGET_NR_truncate
10150 case TARGET_NR_truncate
:
10151 if (!(p
= lock_user_string(arg1
)))
10152 return -TARGET_EFAULT
;
10153 ret
= get_errno(truncate(p
, arg2
));
10154 unlock_user(p
, arg1
, 0);
10157 #ifdef TARGET_NR_ftruncate
10158 case TARGET_NR_ftruncate
:
10159 return get_errno(ftruncate(arg1
, arg2
));
10161 case TARGET_NR_fchmod
:
10162 return get_errno(fchmod(arg1
, arg2
));
10163 #if defined(TARGET_NR_fchmodat)
10164 case TARGET_NR_fchmodat
:
10165 if (!(p
= lock_user_string(arg2
)))
10166 return -TARGET_EFAULT
;
10167 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10168 unlock_user(p
, arg2
, 0);
10171 case TARGET_NR_getpriority
:
10172 /* Note that negative values are valid for getpriority, so we must
10173 differentiate based on errno settings. */
10175 ret
= getpriority(arg1
, arg2
);
10176 if (ret
== -1 && errno
!= 0) {
10177 return -host_to_target_errno(errno
);
10179 #ifdef TARGET_ALPHA
10180 /* Return value is the unbiased priority. Signal no error. */
10181 cpu_env
->ir
[IR_V0
] = 0;
10183 /* Return value is a biased priority to avoid negative numbers. */
10187 case TARGET_NR_setpriority
:
10188 return get_errno(setpriority(arg1
, arg2
, arg3
));
10189 #ifdef TARGET_NR_statfs
10190 case TARGET_NR_statfs
:
10191 if (!(p
= lock_user_string(arg1
))) {
10192 return -TARGET_EFAULT
;
10194 ret
= get_errno(statfs(path(p
), &stfs
));
10195 unlock_user(p
, arg1
, 0);
10197 if (!is_error(ret
)) {
10198 struct target_statfs
*target_stfs
;
10200 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10201 return -TARGET_EFAULT
;
10202 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10203 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10204 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10205 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10206 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10207 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10208 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10209 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10210 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10211 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10212 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10213 #ifdef _STATFS_F_FLAGS
10214 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10216 __put_user(0, &target_stfs
->f_flags
);
10218 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10219 unlock_user_struct(target_stfs
, arg2
, 1);
10223 #ifdef TARGET_NR_fstatfs
10224 case TARGET_NR_fstatfs
:
10225 ret
= get_errno(fstatfs(arg1
, &stfs
));
10226 goto convert_statfs
;
10228 #ifdef TARGET_NR_statfs64
10229 case TARGET_NR_statfs64
:
10230 if (!(p
= lock_user_string(arg1
))) {
10231 return -TARGET_EFAULT
;
10233 ret
= get_errno(statfs(path(p
), &stfs
));
10234 unlock_user(p
, arg1
, 0);
10236 if (!is_error(ret
)) {
10237 struct target_statfs64
*target_stfs
;
10239 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10240 return -TARGET_EFAULT
;
10241 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10242 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10243 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10244 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10245 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10246 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10247 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10248 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10249 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10250 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10251 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10252 #ifdef _STATFS_F_FLAGS
10253 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10255 __put_user(0, &target_stfs
->f_flags
);
10257 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10258 unlock_user_struct(target_stfs
, arg3
, 1);
10261 case TARGET_NR_fstatfs64
:
10262 ret
= get_errno(fstatfs(arg1
, &stfs
));
10263 goto convert_statfs64
;
10265 #ifdef TARGET_NR_socketcall
10266 case TARGET_NR_socketcall
:
10267 return do_socketcall(arg1
, arg2
);
10269 #ifdef TARGET_NR_accept
10270 case TARGET_NR_accept
:
10271 return do_accept4(arg1
, arg2
, arg3
, 0);
10273 #ifdef TARGET_NR_accept4
10274 case TARGET_NR_accept4
:
10275 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10277 #ifdef TARGET_NR_bind
10278 case TARGET_NR_bind
:
10279 return do_bind(arg1
, arg2
, arg3
);
10281 #ifdef TARGET_NR_connect
10282 case TARGET_NR_connect
:
10283 return do_connect(arg1
, arg2
, arg3
);
10285 #ifdef TARGET_NR_getpeername
10286 case TARGET_NR_getpeername
:
10287 return do_getpeername(arg1
, arg2
, arg3
);
10289 #ifdef TARGET_NR_getsockname
10290 case TARGET_NR_getsockname
:
10291 return do_getsockname(arg1
, arg2
, arg3
);
10293 #ifdef TARGET_NR_getsockopt
10294 case TARGET_NR_getsockopt
:
10295 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10297 #ifdef TARGET_NR_listen
10298 case TARGET_NR_listen
:
10299 return get_errno(listen(arg1
, arg2
));
10301 #ifdef TARGET_NR_recv
10302 case TARGET_NR_recv
:
10303 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10305 #ifdef TARGET_NR_recvfrom
10306 case TARGET_NR_recvfrom
:
10307 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10309 #ifdef TARGET_NR_recvmsg
10310 case TARGET_NR_recvmsg
:
10311 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10313 #ifdef TARGET_NR_send
10314 case TARGET_NR_send
:
10315 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10317 #ifdef TARGET_NR_sendmsg
10318 case TARGET_NR_sendmsg
:
10319 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10321 #ifdef TARGET_NR_sendmmsg
10322 case TARGET_NR_sendmmsg
:
10323 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10325 #ifdef TARGET_NR_recvmmsg
10326 case TARGET_NR_recvmmsg
:
10327 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10329 #ifdef TARGET_NR_sendto
10330 case TARGET_NR_sendto
:
10331 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10333 #ifdef TARGET_NR_shutdown
10334 case TARGET_NR_shutdown
:
10335 return get_errno(shutdown(arg1
, arg2
));
10337 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10338 case TARGET_NR_getrandom
:
10339 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10341 return -TARGET_EFAULT
;
10343 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10344 unlock_user(p
, arg1
, ret
);
10347 #ifdef TARGET_NR_socket
10348 case TARGET_NR_socket
:
10349 return do_socket(arg1
, arg2
, arg3
);
10351 #ifdef TARGET_NR_socketpair
10352 case TARGET_NR_socketpair
:
10353 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10355 #ifdef TARGET_NR_setsockopt
10356 case TARGET_NR_setsockopt
:
10357 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10359 #if defined(TARGET_NR_syslog)
10360 case TARGET_NR_syslog
:
10365 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10366 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10367 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10368 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10369 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10370 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10371 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10372 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10373 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10374 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10375 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10376 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10379 return -TARGET_EINVAL
;
10384 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10386 return -TARGET_EFAULT
;
10388 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10389 unlock_user(p
, arg2
, arg3
);
10393 return -TARGET_EINVAL
;
10398 case TARGET_NR_setitimer
:
10400 struct itimerval value
, ovalue
, *pvalue
;
10404 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10405 || copy_from_user_timeval(&pvalue
->it_value
,
10406 arg2
+ sizeof(struct target_timeval
)))
10407 return -TARGET_EFAULT
;
10411 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10412 if (!is_error(ret
) && arg3
) {
10413 if (copy_to_user_timeval(arg3
,
10414 &ovalue
.it_interval
)
10415 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10417 return -TARGET_EFAULT
;
10421 case TARGET_NR_getitimer
:
10423 struct itimerval value
;
10425 ret
= get_errno(getitimer(arg1
, &value
));
10426 if (!is_error(ret
) && arg2
) {
10427 if (copy_to_user_timeval(arg2
,
10428 &value
.it_interval
)
10429 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10431 return -TARGET_EFAULT
;
10435 #ifdef TARGET_NR_stat
10436 case TARGET_NR_stat
:
10437 if (!(p
= lock_user_string(arg1
))) {
10438 return -TARGET_EFAULT
;
10440 ret
= get_errno(stat(path(p
), &st
));
10441 unlock_user(p
, arg1
, 0);
10444 #ifdef TARGET_NR_lstat
10445 case TARGET_NR_lstat
:
10446 if (!(p
= lock_user_string(arg1
))) {
10447 return -TARGET_EFAULT
;
10449 ret
= get_errno(lstat(path(p
), &st
));
10450 unlock_user(p
, arg1
, 0);
10453 #ifdef TARGET_NR_fstat
10454 case TARGET_NR_fstat
:
10456 ret
= get_errno(fstat(arg1
, &st
));
10457 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10460 if (!is_error(ret
)) {
10461 struct target_stat
*target_st
;
10463 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10464 return -TARGET_EFAULT
;
10465 memset(target_st
, 0, sizeof(*target_st
));
10466 __put_user(st
.st_dev
, &target_st
->st_dev
);
10467 __put_user(st
.st_ino
, &target_st
->st_ino
);
10468 __put_user(st
.st_mode
, &target_st
->st_mode
);
10469 __put_user(st
.st_uid
, &target_st
->st_uid
);
10470 __put_user(st
.st_gid
, &target_st
->st_gid
);
10471 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10472 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10473 __put_user(st
.st_size
, &target_st
->st_size
);
10474 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10475 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10476 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10477 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10478 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10479 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10480 __put_user(st
.st_atim
.tv_nsec
,
10481 &target_st
->target_st_atime_nsec
);
10482 __put_user(st
.st_mtim
.tv_nsec
,
10483 &target_st
->target_st_mtime_nsec
);
10484 __put_user(st
.st_ctim
.tv_nsec
,
10485 &target_st
->target_st_ctime_nsec
);
10487 unlock_user_struct(target_st
, arg2
, 1);
10492 case TARGET_NR_vhangup
:
10493 return get_errno(vhangup());
10494 #ifdef TARGET_NR_syscall
10495 case TARGET_NR_syscall
:
10496 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10497 arg6
, arg7
, arg8
, 0);
10499 #if defined(TARGET_NR_wait4)
10500 case TARGET_NR_wait4
:
10503 abi_long status_ptr
= arg2
;
10504 struct rusage rusage
, *rusage_ptr
;
10505 abi_ulong target_rusage
= arg4
;
10506 abi_long rusage_err
;
10508 rusage_ptr
= &rusage
;
10511 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10512 if (!is_error(ret
)) {
10513 if (status_ptr
&& ret
) {
10514 status
= host_to_target_waitstatus(status
);
10515 if (put_user_s32(status
, status_ptr
))
10516 return -TARGET_EFAULT
;
10518 if (target_rusage
) {
10519 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10528 #ifdef TARGET_NR_swapoff
10529 case TARGET_NR_swapoff
:
10530 if (!(p
= lock_user_string(arg1
)))
10531 return -TARGET_EFAULT
;
10532 ret
= get_errno(swapoff(p
));
10533 unlock_user(p
, arg1
, 0);
10536 case TARGET_NR_sysinfo
:
10538 struct target_sysinfo
*target_value
;
10539 struct sysinfo value
;
10540 ret
= get_errno(sysinfo(&value
));
10541 if (!is_error(ret
) && arg1
)
10543 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10544 return -TARGET_EFAULT
;
10545 __put_user(value
.uptime
, &target_value
->uptime
);
10546 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10547 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10548 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10549 __put_user(value
.totalram
, &target_value
->totalram
);
10550 __put_user(value
.freeram
, &target_value
->freeram
);
10551 __put_user(value
.sharedram
, &target_value
->sharedram
);
10552 __put_user(value
.bufferram
, &target_value
->bufferram
);
10553 __put_user(value
.totalswap
, &target_value
->totalswap
);
10554 __put_user(value
.freeswap
, &target_value
->freeswap
);
10555 __put_user(value
.procs
, &target_value
->procs
);
10556 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10557 __put_user(value
.freehigh
, &target_value
->freehigh
);
10558 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10559 unlock_user_struct(target_value
, arg1
, 1);
10563 #ifdef TARGET_NR_ipc
10564 case TARGET_NR_ipc
:
10565 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10567 #ifdef TARGET_NR_semget
10568 case TARGET_NR_semget
:
10569 return get_errno(semget(arg1
, arg2
, arg3
));
10571 #ifdef TARGET_NR_semop
10572 case TARGET_NR_semop
:
10573 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10575 #ifdef TARGET_NR_semtimedop
10576 case TARGET_NR_semtimedop
:
10577 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10579 #ifdef TARGET_NR_semtimedop_time64
10580 case TARGET_NR_semtimedop_time64
:
10581 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10583 #ifdef TARGET_NR_semctl
10584 case TARGET_NR_semctl
:
10585 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10587 #ifdef TARGET_NR_msgctl
10588 case TARGET_NR_msgctl
:
10589 return do_msgctl(arg1
, arg2
, arg3
);
10591 #ifdef TARGET_NR_msgget
10592 case TARGET_NR_msgget
:
10593 return get_errno(msgget(arg1
, arg2
));
10595 #ifdef TARGET_NR_msgrcv
10596 case TARGET_NR_msgrcv
:
10597 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10599 #ifdef TARGET_NR_msgsnd
10600 case TARGET_NR_msgsnd
:
10601 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10603 #ifdef TARGET_NR_shmget
10604 case TARGET_NR_shmget
:
10605 return get_errno(shmget(arg1
, arg2
, arg3
));
10607 #ifdef TARGET_NR_shmctl
10608 case TARGET_NR_shmctl
:
10609 return do_shmctl(arg1
, arg2
, arg3
);
10611 #ifdef TARGET_NR_shmat
10612 case TARGET_NR_shmat
:
10613 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10615 #ifdef TARGET_NR_shmdt
10616 case TARGET_NR_shmdt
:
10617 return do_shmdt(arg1
);
10619 case TARGET_NR_fsync
:
10620 return get_errno(fsync(arg1
));
10621 case TARGET_NR_clone
:
10622 /* Linux manages to have three different orderings for its
10623 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10624 * match the kernel's CONFIG_CLONE_* settings.
10625 * Microblaze is further special in that it uses a sixth
10626 * implicit argument to clone for the TLS pointer.
10628 #if defined(TARGET_MICROBLAZE)
10629 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10630 #elif defined(TARGET_CLONE_BACKWARDS)
10631 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10632 #elif defined(TARGET_CLONE_BACKWARDS2)
10633 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10635 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10638 #ifdef __NR_exit_group
10639 /* new thread calls */
10640 case TARGET_NR_exit_group
:
10641 preexit_cleanup(cpu_env
, arg1
);
10642 return get_errno(exit_group(arg1
));
10644 case TARGET_NR_setdomainname
:
10645 if (!(p
= lock_user_string(arg1
)))
10646 return -TARGET_EFAULT
;
10647 ret
= get_errno(setdomainname(p
, arg2
));
10648 unlock_user(p
, arg1
, 0);
10650 case TARGET_NR_uname
:
10651 /* no need to transcode because we use the linux syscall */
10653 struct new_utsname
* buf
;
10655 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10656 return -TARGET_EFAULT
;
10657 ret
= get_errno(sys_uname(buf
));
10658 if (!is_error(ret
)) {
10659 /* Overwrite the native machine name with whatever is being
10661 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10662 sizeof(buf
->machine
));
10663 /* Allow the user to override the reported release. */
10664 if (qemu_uname_release
&& *qemu_uname_release
) {
10665 g_strlcpy(buf
->release
, qemu_uname_release
,
10666 sizeof(buf
->release
));
10669 unlock_user_struct(buf
, arg1
, 1);
10673 case TARGET_NR_modify_ldt
:
10674 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10675 #if !defined(TARGET_X86_64)
10676 case TARGET_NR_vm86
:
10677 return do_vm86(cpu_env
, arg1
, arg2
);
10680 #if defined(TARGET_NR_adjtimex)
10681 case TARGET_NR_adjtimex
:
10683 struct timex host_buf
;
10685 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10686 return -TARGET_EFAULT
;
10688 ret
= get_errno(adjtimex(&host_buf
));
10689 if (!is_error(ret
)) {
10690 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10691 return -TARGET_EFAULT
;
10697 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10698 case TARGET_NR_clock_adjtime
:
10700 struct timex htx
, *phtx
= &htx
;
10702 if (target_to_host_timex(phtx
, arg2
) != 0) {
10703 return -TARGET_EFAULT
;
10705 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10706 if (!is_error(ret
) && phtx
) {
10707 if (host_to_target_timex(arg2
, phtx
) != 0) {
10708 return -TARGET_EFAULT
;
10714 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10715 case TARGET_NR_clock_adjtime64
:
10719 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10720 return -TARGET_EFAULT
;
10722 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10723 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10724 return -TARGET_EFAULT
;
10729 case TARGET_NR_getpgid
:
10730 return get_errno(getpgid(arg1
));
10731 case TARGET_NR_fchdir
:
10732 return get_errno(fchdir(arg1
));
10733 case TARGET_NR_personality
:
10734 return get_errno(personality(arg1
));
10735 #ifdef TARGET_NR__llseek /* Not on alpha */
10736 case TARGET_NR__llseek
:
10739 #if !defined(__NR_llseek)
10740 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10742 ret
= get_errno(res
);
10747 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10749 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10750 return -TARGET_EFAULT
;
10755 #ifdef TARGET_NR_getdents
10756 case TARGET_NR_getdents
:
10757 return do_getdents(arg1
, arg2
, arg3
);
10758 #endif /* TARGET_NR_getdents */
10759 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10760 case TARGET_NR_getdents64
:
10761 return do_getdents64(arg1
, arg2
, arg3
);
10762 #endif /* TARGET_NR_getdents64 */
10763 #if defined(TARGET_NR__newselect)
10764 case TARGET_NR__newselect
:
10765 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10767 #ifdef TARGET_NR_poll
10768 case TARGET_NR_poll
:
10769 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10771 #ifdef TARGET_NR_ppoll
10772 case TARGET_NR_ppoll
:
10773 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10775 #ifdef TARGET_NR_ppoll_time64
10776 case TARGET_NR_ppoll_time64
:
10777 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10779 case TARGET_NR_flock
:
10780 /* NOTE: the flock constant seems to be the same for every
10782 return get_errno(safe_flock(arg1
, arg2
));
10783 case TARGET_NR_readv
:
10785 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10787 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10788 unlock_iovec(vec
, arg2
, arg3
, 1);
10790 ret
= -host_to_target_errno(errno
);
10794 case TARGET_NR_writev
:
10796 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10798 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10799 unlock_iovec(vec
, arg2
, arg3
, 0);
10801 ret
= -host_to_target_errno(errno
);
10805 #if defined(TARGET_NR_preadv)
10806 case TARGET_NR_preadv
:
10808 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10810 unsigned long low
, high
;
10812 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10813 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10814 unlock_iovec(vec
, arg2
, arg3
, 1);
10816 ret
= -host_to_target_errno(errno
);
10821 #if defined(TARGET_NR_pwritev)
10822 case TARGET_NR_pwritev
:
10824 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10826 unsigned long low
, high
;
10828 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10829 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10830 unlock_iovec(vec
, arg2
, arg3
, 0);
10832 ret
= -host_to_target_errno(errno
);
10837 case TARGET_NR_getsid
:
10838 return get_errno(getsid(arg1
));
10839 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10840 case TARGET_NR_fdatasync
:
10841 return get_errno(fdatasync(arg1
));
10843 case TARGET_NR_sched_getaffinity
:
10845 unsigned int mask_size
;
10846 unsigned long *mask
;
10849 * sched_getaffinity needs multiples of ulong, so need to take
10850 * care of mismatches between target ulong and host ulong sizes.
10852 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10853 return -TARGET_EINVAL
;
10855 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10857 mask
= alloca(mask_size
);
10858 memset(mask
, 0, mask_size
);
10859 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10861 if (!is_error(ret
)) {
10863 /* More data returned than the caller's buffer will fit.
10864 * This only happens if sizeof(abi_long) < sizeof(long)
10865 * and the caller passed us a buffer holding an odd number
10866 * of abi_longs. If the host kernel is actually using the
10867 * extra 4 bytes then fail EINVAL; otherwise we can just
10868 * ignore them and only copy the interesting part.
10870 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10871 if (numcpus
> arg2
* 8) {
10872 return -TARGET_EINVAL
;
10877 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10878 return -TARGET_EFAULT
;
10883 case TARGET_NR_sched_setaffinity
:
10885 unsigned int mask_size
;
10886 unsigned long *mask
;
10889 * sched_setaffinity needs multiples of ulong, so need to take
10890 * care of mismatches between target ulong and host ulong sizes.
10892 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10893 return -TARGET_EINVAL
;
10895 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10896 mask
= alloca(mask_size
);
10898 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10903 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10905 case TARGET_NR_getcpu
:
10907 unsigned cpu
, node
;
10908 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10909 arg2
? &node
: NULL
,
10911 if (is_error(ret
)) {
10914 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10915 return -TARGET_EFAULT
;
10917 if (arg2
&& put_user_u32(node
, arg2
)) {
10918 return -TARGET_EFAULT
;
10922 case TARGET_NR_sched_setparam
:
10924 struct target_sched_param
*target_schp
;
10925 struct sched_param schp
;
10928 return -TARGET_EINVAL
;
10930 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
10931 return -TARGET_EFAULT
;
10933 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10934 unlock_user_struct(target_schp
, arg2
, 0);
10935 return get_errno(sys_sched_setparam(arg1
, &schp
));
10937 case TARGET_NR_sched_getparam
:
10939 struct target_sched_param
*target_schp
;
10940 struct sched_param schp
;
10943 return -TARGET_EINVAL
;
10945 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
10946 if (!is_error(ret
)) {
10947 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
10948 return -TARGET_EFAULT
;
10950 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10951 unlock_user_struct(target_schp
, arg2
, 1);
10955 case TARGET_NR_sched_setscheduler
:
10957 struct target_sched_param
*target_schp
;
10958 struct sched_param schp
;
10960 return -TARGET_EINVAL
;
10962 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
10963 return -TARGET_EFAULT
;
10965 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10966 unlock_user_struct(target_schp
, arg3
, 0);
10967 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
10969 case TARGET_NR_sched_getscheduler
:
10970 return get_errno(sys_sched_getscheduler(arg1
));
10971 case TARGET_NR_sched_getattr
:
10973 struct target_sched_attr
*target_scha
;
10974 struct sched_attr scha
;
10976 return -TARGET_EINVAL
;
10978 if (arg3
> sizeof(scha
)) {
10979 arg3
= sizeof(scha
);
10981 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
10982 if (!is_error(ret
)) {
10983 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10984 if (!target_scha
) {
10985 return -TARGET_EFAULT
;
10987 target_scha
->size
= tswap32(scha
.size
);
10988 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
10989 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
10990 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
10991 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
10992 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
10993 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
10994 target_scha
->sched_period
= tswap64(scha
.sched_period
);
10995 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
10996 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
10997 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
10999 unlock_user(target_scha
, arg2
, arg3
);
11003 case TARGET_NR_sched_setattr
:
11005 struct target_sched_attr
*target_scha
;
11006 struct sched_attr scha
;
11010 return -TARGET_EINVAL
;
11012 if (get_user_u32(size
, arg2
)) {
11013 return -TARGET_EFAULT
;
11016 size
= offsetof(struct target_sched_attr
, sched_util_min
);
11018 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
11019 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11020 return -TARGET_EFAULT
;
11022 return -TARGET_E2BIG
;
11025 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
11028 } else if (zeroed
== 0) {
11029 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11030 return -TARGET_EFAULT
;
11032 return -TARGET_E2BIG
;
11034 if (size
> sizeof(struct target_sched_attr
)) {
11035 size
= sizeof(struct target_sched_attr
);
11038 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
11039 if (!target_scha
) {
11040 return -TARGET_EFAULT
;
11043 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
11044 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
11045 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
11046 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
11047 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
11048 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
11049 scha
.sched_period
= tswap64(target_scha
->sched_period
);
11050 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
11051 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
11052 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
11054 unlock_user(target_scha
, arg2
, 0);
11055 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
11057 case TARGET_NR_sched_yield
:
11058 return get_errno(sched_yield());
11059 case TARGET_NR_sched_get_priority_max
:
11060 return get_errno(sched_get_priority_max(arg1
));
11061 case TARGET_NR_sched_get_priority_min
:
11062 return get_errno(sched_get_priority_min(arg1
));
11063 #ifdef TARGET_NR_sched_rr_get_interval
11064 case TARGET_NR_sched_rr_get_interval
:
11066 struct timespec ts
;
11067 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11068 if (!is_error(ret
)) {
11069 ret
= host_to_target_timespec(arg2
, &ts
);
11074 #ifdef TARGET_NR_sched_rr_get_interval_time64
11075 case TARGET_NR_sched_rr_get_interval_time64
:
11077 struct timespec ts
;
11078 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11079 if (!is_error(ret
)) {
11080 ret
= host_to_target_timespec64(arg2
, &ts
);
11085 #if defined(TARGET_NR_nanosleep)
11086 case TARGET_NR_nanosleep
:
11088 struct timespec req
, rem
;
11089 target_to_host_timespec(&req
, arg1
);
11090 ret
= get_errno(safe_nanosleep(&req
, &rem
));
11091 if (is_error(ret
) && arg2
) {
11092 host_to_target_timespec(arg2
, &rem
);
11097 case TARGET_NR_prctl
:
11098 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
11100 #ifdef TARGET_NR_arch_prctl
11101 case TARGET_NR_arch_prctl
:
11102 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11104 #ifdef TARGET_NR_pread64
11105 case TARGET_NR_pread64
:
11106 if (regpairs_aligned(cpu_env
, num
)) {
11110 if (arg2
== 0 && arg3
== 0) {
11111 /* Special-case NULL buffer and zero length, which should succeed */
11114 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11116 return -TARGET_EFAULT
;
11119 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11120 unlock_user(p
, arg2
, ret
);
11122 case TARGET_NR_pwrite64
:
11123 if (regpairs_aligned(cpu_env
, num
)) {
11127 if (arg2
== 0 && arg3
== 0) {
11128 /* Special-case NULL buffer and zero length, which should succeed */
11131 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11133 return -TARGET_EFAULT
;
11136 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11137 unlock_user(p
, arg2
, 0);
11140 case TARGET_NR_getcwd
:
11141 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11142 return -TARGET_EFAULT
;
11143 ret
= get_errno(sys_getcwd1(p
, arg2
));
11144 unlock_user(p
, arg1
, ret
);
11146 case TARGET_NR_capget
:
11147 case TARGET_NR_capset
:
11149 struct target_user_cap_header
*target_header
;
11150 struct target_user_cap_data
*target_data
= NULL
;
11151 struct __user_cap_header_struct header
;
11152 struct __user_cap_data_struct data
[2];
11153 struct __user_cap_data_struct
*dataptr
= NULL
;
11154 int i
, target_datalen
;
11155 int data_items
= 1;
11157 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11158 return -TARGET_EFAULT
;
11160 header
.version
= tswap32(target_header
->version
);
11161 header
.pid
= tswap32(target_header
->pid
);
11163 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11164 /* Version 2 and up takes pointer to two user_data structs */
11168 target_datalen
= sizeof(*target_data
) * data_items
;
11171 if (num
== TARGET_NR_capget
) {
11172 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11174 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11176 if (!target_data
) {
11177 unlock_user_struct(target_header
, arg1
, 0);
11178 return -TARGET_EFAULT
;
11181 if (num
== TARGET_NR_capset
) {
11182 for (i
= 0; i
< data_items
; i
++) {
11183 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11184 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11185 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11192 if (num
== TARGET_NR_capget
) {
11193 ret
= get_errno(capget(&header
, dataptr
));
11195 ret
= get_errno(capset(&header
, dataptr
));
11198 /* The kernel always updates version for both capget and capset */
11199 target_header
->version
= tswap32(header
.version
);
11200 unlock_user_struct(target_header
, arg1
, 1);
11203 if (num
== TARGET_NR_capget
) {
11204 for (i
= 0; i
< data_items
; i
++) {
11205 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11206 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11207 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11209 unlock_user(target_data
, arg2
, target_datalen
);
11211 unlock_user(target_data
, arg2
, 0);
11216 case TARGET_NR_sigaltstack
:
11217 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11219 #ifdef CONFIG_SENDFILE
11220 #ifdef TARGET_NR_sendfile
11221 case TARGET_NR_sendfile
:
11223 off_t
*offp
= NULL
;
11226 ret
= get_user_sal(off
, arg3
);
11227 if (is_error(ret
)) {
11232 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11233 if (!is_error(ret
) && arg3
) {
11234 abi_long ret2
= put_user_sal(off
, arg3
);
11235 if (is_error(ret2
)) {
11242 #ifdef TARGET_NR_sendfile64
11243 case TARGET_NR_sendfile64
:
11245 off_t
*offp
= NULL
;
11248 ret
= get_user_s64(off
, arg3
);
11249 if (is_error(ret
)) {
11254 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11255 if (!is_error(ret
) && arg3
) {
11256 abi_long ret2
= put_user_s64(off
, arg3
);
11257 if (is_error(ret2
)) {
11265 #ifdef TARGET_NR_vfork
11266 case TARGET_NR_vfork
:
11267 return get_errno(do_fork(cpu_env
,
11268 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11271 #ifdef TARGET_NR_ugetrlimit
11272 case TARGET_NR_ugetrlimit
:
11274 struct rlimit rlim
;
11275 int resource
= target_to_host_resource(arg1
);
11276 ret
= get_errno(getrlimit(resource
, &rlim
));
11277 if (!is_error(ret
)) {
11278 struct target_rlimit
*target_rlim
;
11279 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11280 return -TARGET_EFAULT
;
11281 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11282 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11283 unlock_user_struct(target_rlim
, arg2
, 1);
11288 #ifdef TARGET_NR_truncate64
11289 case TARGET_NR_truncate64
:
11290 if (!(p
= lock_user_string(arg1
)))
11291 return -TARGET_EFAULT
;
11292 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11293 unlock_user(p
, arg1
, 0);
11296 #ifdef TARGET_NR_ftruncate64
11297 case TARGET_NR_ftruncate64
:
11298 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11300 #ifdef TARGET_NR_stat64
11301 case TARGET_NR_stat64
:
11302 if (!(p
= lock_user_string(arg1
))) {
11303 return -TARGET_EFAULT
;
11305 ret
= get_errno(stat(path(p
), &st
));
11306 unlock_user(p
, arg1
, 0);
11307 if (!is_error(ret
))
11308 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11311 #ifdef TARGET_NR_lstat64
11312 case TARGET_NR_lstat64
:
11313 if (!(p
= lock_user_string(arg1
))) {
11314 return -TARGET_EFAULT
;
11316 ret
= get_errno(lstat(path(p
), &st
));
11317 unlock_user(p
, arg1
, 0);
11318 if (!is_error(ret
))
11319 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11322 #ifdef TARGET_NR_fstat64
11323 case TARGET_NR_fstat64
:
11324 ret
= get_errno(fstat(arg1
, &st
));
11325 if (!is_error(ret
))
11326 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11329 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11330 #ifdef TARGET_NR_fstatat64
11331 case TARGET_NR_fstatat64
:
11333 #ifdef TARGET_NR_newfstatat
11334 case TARGET_NR_newfstatat
:
11336 if (!(p
= lock_user_string(arg2
))) {
11337 return -TARGET_EFAULT
;
11339 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11340 unlock_user(p
, arg2
, 0);
11341 if (!is_error(ret
))
11342 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11345 #if defined(TARGET_NR_statx)
11346 case TARGET_NR_statx
:
11348 struct target_statx
*target_stx
;
11352 p
= lock_user_string(arg2
);
11354 return -TARGET_EFAULT
;
11356 #if defined(__NR_statx)
11359 * It is assumed that struct statx is architecture independent.
11361 struct target_statx host_stx
;
11364 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11365 if (!is_error(ret
)) {
11366 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11367 unlock_user(p
, arg2
, 0);
11368 return -TARGET_EFAULT
;
11372 if (ret
!= -TARGET_ENOSYS
) {
11373 unlock_user(p
, arg2
, 0);
11378 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11379 unlock_user(p
, arg2
, 0);
11381 if (!is_error(ret
)) {
11382 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11383 return -TARGET_EFAULT
;
11385 memset(target_stx
, 0, sizeof(*target_stx
));
11386 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11387 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11388 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11389 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11390 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11391 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11392 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11393 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11394 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11395 __put_user(st
.st_size
, &target_stx
->stx_size
);
11396 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11397 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11398 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11399 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11400 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11401 unlock_user_struct(target_stx
, arg5
, 1);
11406 #ifdef TARGET_NR_lchown
11407 case TARGET_NR_lchown
:
11408 if (!(p
= lock_user_string(arg1
)))
11409 return -TARGET_EFAULT
;
11410 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11411 unlock_user(p
, arg1
, 0);
11414 #ifdef TARGET_NR_getuid
11415 case TARGET_NR_getuid
:
11416 return get_errno(high2lowuid(getuid()));
11418 #ifdef TARGET_NR_getgid
11419 case TARGET_NR_getgid
:
11420 return get_errno(high2lowgid(getgid()));
11422 #ifdef TARGET_NR_geteuid
11423 case TARGET_NR_geteuid
:
11424 return get_errno(high2lowuid(geteuid()));
11426 #ifdef TARGET_NR_getegid
11427 case TARGET_NR_getegid
:
11428 return get_errno(high2lowgid(getegid()));
11430 case TARGET_NR_setreuid
:
11431 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11432 case TARGET_NR_setregid
:
11433 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11434 case TARGET_NR_getgroups
:
11436 int gidsetsize
= arg1
;
11437 target_id
*target_grouplist
;
11441 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11442 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11443 if (gidsetsize
== 0)
11445 if (!is_error(ret
)) {
11446 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11447 if (!target_grouplist
)
11448 return -TARGET_EFAULT
;
11449 for(i
= 0;i
< ret
; i
++)
11450 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11451 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11455 case TARGET_NR_setgroups
:
11457 int gidsetsize
= arg1
;
11458 target_id
*target_grouplist
;
11459 gid_t
*grouplist
= NULL
;
11462 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11463 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11464 if (!target_grouplist
) {
11465 return -TARGET_EFAULT
;
11467 for (i
= 0; i
< gidsetsize
; i
++) {
11468 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11470 unlock_user(target_grouplist
, arg2
, 0);
11472 return get_errno(setgroups(gidsetsize
, grouplist
));
11474 case TARGET_NR_fchown
:
11475 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11476 #if defined(TARGET_NR_fchownat)
11477 case TARGET_NR_fchownat
:
11478 if (!(p
= lock_user_string(arg2
)))
11479 return -TARGET_EFAULT
;
11480 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11481 low2highgid(arg4
), arg5
));
11482 unlock_user(p
, arg2
, 0);
11485 #ifdef TARGET_NR_setresuid
11486 case TARGET_NR_setresuid
:
11487 return get_errno(sys_setresuid(low2highuid(arg1
),
11489 low2highuid(arg3
)));
11491 #ifdef TARGET_NR_getresuid
11492 case TARGET_NR_getresuid
:
11494 uid_t ruid
, euid
, suid
;
11495 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11496 if (!is_error(ret
)) {
11497 if (put_user_id(high2lowuid(ruid
), arg1
)
11498 || put_user_id(high2lowuid(euid
), arg2
)
11499 || put_user_id(high2lowuid(suid
), arg3
))
11500 return -TARGET_EFAULT
;
11505 #ifdef TARGET_NR_getresgid
11506 case TARGET_NR_setresgid
:
11507 return get_errno(sys_setresgid(low2highgid(arg1
),
11509 low2highgid(arg3
)));
11511 #ifdef TARGET_NR_getresgid
11512 case TARGET_NR_getresgid
:
11514 gid_t rgid
, egid
, sgid
;
11515 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11516 if (!is_error(ret
)) {
11517 if (put_user_id(high2lowgid(rgid
), arg1
)
11518 || put_user_id(high2lowgid(egid
), arg2
)
11519 || put_user_id(high2lowgid(sgid
), arg3
))
11520 return -TARGET_EFAULT
;
11525 #ifdef TARGET_NR_chown
11526 case TARGET_NR_chown
:
11527 if (!(p
= lock_user_string(arg1
)))
11528 return -TARGET_EFAULT
;
11529 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11530 unlock_user(p
, arg1
, 0);
11533 case TARGET_NR_setuid
:
11534 return get_errno(sys_setuid(low2highuid(arg1
)));
11535 case TARGET_NR_setgid
:
11536 return get_errno(sys_setgid(low2highgid(arg1
)));
11537 case TARGET_NR_setfsuid
:
11538 return get_errno(setfsuid(arg1
));
11539 case TARGET_NR_setfsgid
:
11540 return get_errno(setfsgid(arg1
));
11542 #ifdef TARGET_NR_lchown32
11543 case TARGET_NR_lchown32
:
11544 if (!(p
= lock_user_string(arg1
)))
11545 return -TARGET_EFAULT
;
11546 ret
= get_errno(lchown(p
, arg2
, arg3
));
11547 unlock_user(p
, arg1
, 0);
11550 #ifdef TARGET_NR_getuid32
11551 case TARGET_NR_getuid32
:
11552 return get_errno(getuid());
11555 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11556 /* Alpha specific */
11557 case TARGET_NR_getxuid
:
11561 cpu_env
->ir
[IR_A4
]=euid
;
11563 return get_errno(getuid());
11565 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11566 /* Alpha specific */
11567 case TARGET_NR_getxgid
:
11571 cpu_env
->ir
[IR_A4
]=egid
;
11573 return get_errno(getgid());
11575 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11576 /* Alpha specific */
11577 case TARGET_NR_osf_getsysinfo
:
11578 ret
= -TARGET_EOPNOTSUPP
;
11580 case TARGET_GSI_IEEE_FP_CONTROL
:
11582 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11583 uint64_t swcr
= cpu_env
->swcr
;
11585 swcr
&= ~SWCR_STATUS_MASK
;
11586 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11588 if (put_user_u64 (swcr
, arg2
))
11589 return -TARGET_EFAULT
;
11594 /* case GSI_IEEE_STATE_AT_SIGNAL:
11595 -- Not implemented in linux kernel.
11597 -- Retrieves current unaligned access state; not much used.
11598 case GSI_PROC_TYPE:
11599 -- Retrieves implver information; surely not used.
11600 case GSI_GET_HWRPB:
11601 -- Grabs a copy of the HWRPB; surely not used.
11606 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11607 /* Alpha specific */
11608 case TARGET_NR_osf_setsysinfo
:
11609 ret
= -TARGET_EOPNOTSUPP
;
11611 case TARGET_SSI_IEEE_FP_CONTROL
:
11613 uint64_t swcr
, fpcr
;
11615 if (get_user_u64 (swcr
, arg2
)) {
11616 return -TARGET_EFAULT
;
11620 * The kernel calls swcr_update_status to update the
11621 * status bits from the fpcr at every point that it
11622 * could be queried. Therefore, we store the status
11623 * bits only in FPCR.
11625 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11627 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11628 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11629 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11630 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11635 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11637 uint64_t exc
, fpcr
, fex
;
11639 if (get_user_u64(exc
, arg2
)) {
11640 return -TARGET_EFAULT
;
11642 exc
&= SWCR_STATUS_MASK
;
11643 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11645 /* Old exceptions are not signaled. */
11646 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11648 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11649 fex
&= (cpu_env
)->swcr
;
11651 /* Update the hardware fpcr. */
11652 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11653 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11656 int si_code
= TARGET_FPE_FLTUNK
;
11657 target_siginfo_t info
;
11659 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11660 si_code
= TARGET_FPE_FLTUND
;
11662 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11663 si_code
= TARGET_FPE_FLTRES
;
11665 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11666 si_code
= TARGET_FPE_FLTUND
;
11668 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11669 si_code
= TARGET_FPE_FLTOVF
;
11671 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11672 si_code
= TARGET_FPE_FLTDIV
;
11674 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11675 si_code
= TARGET_FPE_FLTINV
;
11678 info
.si_signo
= SIGFPE
;
11680 info
.si_code
= si_code
;
11681 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
11682 queue_signal(cpu_env
, info
.si_signo
,
11683 QEMU_SI_FAULT
, &info
);
11689 /* case SSI_NVPAIRS:
11690 -- Used with SSIN_UACPROC to enable unaligned accesses.
11691 case SSI_IEEE_STATE_AT_SIGNAL:
11692 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11693 -- Not implemented in linux kernel
11698 #ifdef TARGET_NR_osf_sigprocmask
11699 /* Alpha specific. */
11700 case TARGET_NR_osf_sigprocmask
:
11704 sigset_t set
, oldset
;
11707 case TARGET_SIG_BLOCK
:
11710 case TARGET_SIG_UNBLOCK
:
11713 case TARGET_SIG_SETMASK
:
11717 return -TARGET_EINVAL
;
11720 target_to_host_old_sigset(&set
, &mask
);
11721 ret
= do_sigprocmask(how
, &set
, &oldset
);
11723 host_to_target_old_sigset(&mask
, &oldset
);
11730 #ifdef TARGET_NR_getgid32
11731 case TARGET_NR_getgid32
:
11732 return get_errno(getgid());
11734 #ifdef TARGET_NR_geteuid32
11735 case TARGET_NR_geteuid32
:
11736 return get_errno(geteuid());
11738 #ifdef TARGET_NR_getegid32
11739 case TARGET_NR_getegid32
:
11740 return get_errno(getegid());
11742 #ifdef TARGET_NR_setreuid32
11743 case TARGET_NR_setreuid32
:
11744 return get_errno(setreuid(arg1
, arg2
));
11746 #ifdef TARGET_NR_setregid32
11747 case TARGET_NR_setregid32
:
11748 return get_errno(setregid(arg1
, arg2
));
11750 #ifdef TARGET_NR_getgroups32
11751 case TARGET_NR_getgroups32
:
11753 int gidsetsize
= arg1
;
11754 uint32_t *target_grouplist
;
11758 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11759 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11760 if (gidsetsize
== 0)
11762 if (!is_error(ret
)) {
11763 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11764 if (!target_grouplist
) {
11765 return -TARGET_EFAULT
;
11767 for(i
= 0;i
< ret
; i
++)
11768 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11769 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11774 #ifdef TARGET_NR_setgroups32
11775 case TARGET_NR_setgroups32
:
11777 int gidsetsize
= arg1
;
11778 uint32_t *target_grouplist
;
11782 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11783 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11784 if (!target_grouplist
) {
11785 return -TARGET_EFAULT
;
11787 for(i
= 0;i
< gidsetsize
; i
++)
11788 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11789 unlock_user(target_grouplist
, arg2
, 0);
11790 return get_errno(setgroups(gidsetsize
, grouplist
));
11793 #ifdef TARGET_NR_fchown32
11794 case TARGET_NR_fchown32
:
11795 return get_errno(fchown(arg1
, arg2
, arg3
));
11797 #ifdef TARGET_NR_setresuid32
11798 case TARGET_NR_setresuid32
:
11799 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11801 #ifdef TARGET_NR_getresuid32
11802 case TARGET_NR_getresuid32
:
11804 uid_t ruid
, euid
, suid
;
11805 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11806 if (!is_error(ret
)) {
11807 if (put_user_u32(ruid
, arg1
)
11808 || put_user_u32(euid
, arg2
)
11809 || put_user_u32(suid
, arg3
))
11810 return -TARGET_EFAULT
;
11815 #ifdef TARGET_NR_setresgid32
11816 case TARGET_NR_setresgid32
:
11817 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11819 #ifdef TARGET_NR_getresgid32
11820 case TARGET_NR_getresgid32
:
11822 gid_t rgid
, egid
, sgid
;
11823 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11824 if (!is_error(ret
)) {
11825 if (put_user_u32(rgid
, arg1
)
11826 || put_user_u32(egid
, arg2
)
11827 || put_user_u32(sgid
, arg3
))
11828 return -TARGET_EFAULT
;
11833 #ifdef TARGET_NR_chown32
11834 case TARGET_NR_chown32
:
11835 if (!(p
= lock_user_string(arg1
)))
11836 return -TARGET_EFAULT
;
11837 ret
= get_errno(chown(p
, arg2
, arg3
));
11838 unlock_user(p
, arg1
, 0);
11841 #ifdef TARGET_NR_setuid32
11842 case TARGET_NR_setuid32
:
11843 return get_errno(sys_setuid(arg1
));
11845 #ifdef TARGET_NR_setgid32
11846 case TARGET_NR_setgid32
:
11847 return get_errno(sys_setgid(arg1
));
11849 #ifdef TARGET_NR_setfsuid32
11850 case TARGET_NR_setfsuid32
:
11851 return get_errno(setfsuid(arg1
));
11853 #ifdef TARGET_NR_setfsgid32
11854 case TARGET_NR_setfsgid32
:
11855 return get_errno(setfsgid(arg1
));
11857 #ifdef TARGET_NR_mincore
11858 case TARGET_NR_mincore
:
11860 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11862 return -TARGET_ENOMEM
;
11864 p
= lock_user_string(arg3
);
11866 ret
= -TARGET_EFAULT
;
11868 ret
= get_errno(mincore(a
, arg2
, p
));
11869 unlock_user(p
, arg3
, ret
);
11871 unlock_user(a
, arg1
, 0);
11875 #ifdef TARGET_NR_arm_fadvise64_64
11876 case TARGET_NR_arm_fadvise64_64
:
11877 /* arm_fadvise64_64 looks like fadvise64_64 but
11878 * with different argument order: fd, advice, offset, len
11879 * rather than the usual fd, offset, len, advice.
11880 * Note that offset and len are both 64-bit so appear as
11881 * pairs of 32-bit registers.
11883 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11884 target_offset64(arg5
, arg6
), arg2
);
11885 return -host_to_target_errno(ret
);
11888 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11890 #ifdef TARGET_NR_fadvise64_64
11891 case TARGET_NR_fadvise64_64
:
11892 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11893 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11901 /* 6 args: fd, offset (high, low), len (high, low), advice */
11902 if (regpairs_aligned(cpu_env
, num
)) {
11903 /* offset is in (3,4), len in (5,6) and advice in 7 */
11911 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11912 target_offset64(arg4
, arg5
), arg6
);
11913 return -host_to_target_errno(ret
);
11916 #ifdef TARGET_NR_fadvise64
11917 case TARGET_NR_fadvise64
:
11918 /* 5 args: fd, offset (high, low), len, advice */
11919 if (regpairs_aligned(cpu_env
, num
)) {
11920 /* offset is in (3,4), len in 5 and advice in 6 */
11926 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11927 return -host_to_target_errno(ret
);
11930 #else /* not a 32-bit ABI */
11931 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11932 #ifdef TARGET_NR_fadvise64_64
11933 case TARGET_NR_fadvise64_64
:
11935 #ifdef TARGET_NR_fadvise64
11936 case TARGET_NR_fadvise64
:
11938 #ifdef TARGET_S390X
11940 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11941 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11942 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11943 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11947 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11949 #endif /* end of 64-bit ABI fadvise handling */
11951 #ifdef TARGET_NR_madvise
11952 case TARGET_NR_madvise
:
11953 return target_madvise(arg1
, arg2
, arg3
);
11955 #ifdef TARGET_NR_fcntl64
11956 case TARGET_NR_fcntl64
:
11960 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11961 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11964 if (!cpu_env
->eabi
) {
11965 copyfrom
= copy_from_user_oabi_flock64
;
11966 copyto
= copy_to_user_oabi_flock64
;
11970 cmd
= target_to_host_fcntl_cmd(arg2
);
11971 if (cmd
== -TARGET_EINVAL
) {
11976 case TARGET_F_GETLK64
:
11977 ret
= copyfrom(&fl
, arg3
);
11981 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11983 ret
= copyto(arg3
, &fl
);
11987 case TARGET_F_SETLK64
:
11988 case TARGET_F_SETLKW64
:
11989 ret
= copyfrom(&fl
, arg3
);
11993 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11996 ret
= do_fcntl(arg1
, arg2
, arg3
);
12002 #ifdef TARGET_NR_cacheflush
12003 case TARGET_NR_cacheflush
:
12004 /* self-modifying code is handled automatically, so nothing needed */
12007 #ifdef TARGET_NR_getpagesize
12008 case TARGET_NR_getpagesize
:
12009 return TARGET_PAGE_SIZE
;
12011 case TARGET_NR_gettid
:
12012 return get_errno(sys_gettid());
12013 #ifdef TARGET_NR_readahead
12014 case TARGET_NR_readahead
:
12015 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12016 if (regpairs_aligned(cpu_env
, num
)) {
12021 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
12023 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
12028 #ifdef TARGET_NR_setxattr
12029 case TARGET_NR_listxattr
:
12030 case TARGET_NR_llistxattr
:
12034 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12036 return -TARGET_EFAULT
;
12039 p
= lock_user_string(arg1
);
12041 if (num
== TARGET_NR_listxattr
) {
12042 ret
= get_errno(listxattr(p
, b
, arg3
));
12044 ret
= get_errno(llistxattr(p
, b
, arg3
));
12047 ret
= -TARGET_EFAULT
;
12049 unlock_user(p
, arg1
, 0);
12050 unlock_user(b
, arg2
, arg3
);
12053 case TARGET_NR_flistxattr
:
12057 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12059 return -TARGET_EFAULT
;
12062 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12063 unlock_user(b
, arg2
, arg3
);
12066 case TARGET_NR_setxattr
:
12067 case TARGET_NR_lsetxattr
:
12069 void *p
, *n
, *v
= 0;
12071 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12073 return -TARGET_EFAULT
;
12076 p
= lock_user_string(arg1
);
12077 n
= lock_user_string(arg2
);
12079 if (num
== TARGET_NR_setxattr
) {
12080 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12082 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12085 ret
= -TARGET_EFAULT
;
12087 unlock_user(p
, arg1
, 0);
12088 unlock_user(n
, arg2
, 0);
12089 unlock_user(v
, arg3
, 0);
12092 case TARGET_NR_fsetxattr
:
12096 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12098 return -TARGET_EFAULT
;
12101 n
= lock_user_string(arg2
);
12103 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12105 ret
= -TARGET_EFAULT
;
12107 unlock_user(n
, arg2
, 0);
12108 unlock_user(v
, arg3
, 0);
12111 case TARGET_NR_getxattr
:
12112 case TARGET_NR_lgetxattr
:
12114 void *p
, *n
, *v
= 0;
12116 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12118 return -TARGET_EFAULT
;
12121 p
= lock_user_string(arg1
);
12122 n
= lock_user_string(arg2
);
12124 if (num
== TARGET_NR_getxattr
) {
12125 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12127 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12130 ret
= -TARGET_EFAULT
;
12132 unlock_user(p
, arg1
, 0);
12133 unlock_user(n
, arg2
, 0);
12134 unlock_user(v
, arg3
, arg4
);
12137 case TARGET_NR_fgetxattr
:
12141 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12143 return -TARGET_EFAULT
;
12146 n
= lock_user_string(arg2
);
12148 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12150 ret
= -TARGET_EFAULT
;
12152 unlock_user(n
, arg2
, 0);
12153 unlock_user(v
, arg3
, arg4
);
12156 case TARGET_NR_removexattr
:
12157 case TARGET_NR_lremovexattr
:
12160 p
= lock_user_string(arg1
);
12161 n
= lock_user_string(arg2
);
12163 if (num
== TARGET_NR_removexattr
) {
12164 ret
= get_errno(removexattr(p
, n
));
12166 ret
= get_errno(lremovexattr(p
, n
));
12169 ret
= -TARGET_EFAULT
;
12171 unlock_user(p
, arg1
, 0);
12172 unlock_user(n
, arg2
, 0);
12175 case TARGET_NR_fremovexattr
:
12178 n
= lock_user_string(arg2
);
12180 ret
= get_errno(fremovexattr(arg1
, n
));
12182 ret
= -TARGET_EFAULT
;
12184 unlock_user(n
, arg2
, 0);
12188 #endif /* CONFIG_ATTR */
12189 #ifdef TARGET_NR_set_thread_area
12190 case TARGET_NR_set_thread_area
:
12191 #if defined(TARGET_MIPS)
12192 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12194 #elif defined(TARGET_CRIS)
12196 ret
= -TARGET_EINVAL
;
12198 cpu_env
->pregs
[PR_PID
] = arg1
;
12202 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12203 return do_set_thread_area(cpu_env
, arg1
);
12204 #elif defined(TARGET_M68K)
12206 TaskState
*ts
= cpu
->opaque
;
12207 ts
->tp_value
= arg1
;
12211 return -TARGET_ENOSYS
;
12214 #ifdef TARGET_NR_get_thread_area
12215 case TARGET_NR_get_thread_area
:
12216 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12217 return do_get_thread_area(cpu_env
, arg1
);
12218 #elif defined(TARGET_M68K)
12220 TaskState
*ts
= cpu
->opaque
;
12221 return ts
->tp_value
;
12224 return -TARGET_ENOSYS
;
12227 #ifdef TARGET_NR_getdomainname
12228 case TARGET_NR_getdomainname
:
12229 return -TARGET_ENOSYS
;
12232 #ifdef TARGET_NR_clock_settime
12233 case TARGET_NR_clock_settime
:
12235 struct timespec ts
;
12237 ret
= target_to_host_timespec(&ts
, arg2
);
12238 if (!is_error(ret
)) {
12239 ret
= get_errno(clock_settime(arg1
, &ts
));
12244 #ifdef TARGET_NR_clock_settime64
12245 case TARGET_NR_clock_settime64
:
12247 struct timespec ts
;
12249 ret
= target_to_host_timespec64(&ts
, arg2
);
12250 if (!is_error(ret
)) {
12251 ret
= get_errno(clock_settime(arg1
, &ts
));
12256 #ifdef TARGET_NR_clock_gettime
12257 case TARGET_NR_clock_gettime
:
12259 struct timespec ts
;
12260 ret
= get_errno(clock_gettime(arg1
, &ts
));
12261 if (!is_error(ret
)) {
12262 ret
= host_to_target_timespec(arg2
, &ts
);
12267 #ifdef TARGET_NR_clock_gettime64
12268 case TARGET_NR_clock_gettime64
:
12270 struct timespec ts
;
12271 ret
= get_errno(clock_gettime(arg1
, &ts
));
12272 if (!is_error(ret
)) {
12273 ret
= host_to_target_timespec64(arg2
, &ts
);
12278 #ifdef TARGET_NR_clock_getres
12279 case TARGET_NR_clock_getres
:
12281 struct timespec ts
;
12282 ret
= get_errno(clock_getres(arg1
, &ts
));
12283 if (!is_error(ret
)) {
12284 host_to_target_timespec(arg2
, &ts
);
12289 #ifdef TARGET_NR_clock_getres_time64
12290 case TARGET_NR_clock_getres_time64
:
12292 struct timespec ts
;
12293 ret
= get_errno(clock_getres(arg1
, &ts
));
12294 if (!is_error(ret
)) {
12295 host_to_target_timespec64(arg2
, &ts
);
12300 #ifdef TARGET_NR_clock_nanosleep
12301 case TARGET_NR_clock_nanosleep
:
12303 struct timespec ts
;
12304 if (target_to_host_timespec(&ts
, arg3
)) {
12305 return -TARGET_EFAULT
;
12307 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12308 &ts
, arg4
? &ts
: NULL
));
12310 * if the call is interrupted by a signal handler, it fails
12311 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12312 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12314 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12315 host_to_target_timespec(arg4
, &ts
)) {
12316 return -TARGET_EFAULT
;
12322 #ifdef TARGET_NR_clock_nanosleep_time64
12323 case TARGET_NR_clock_nanosleep_time64
:
12325 struct timespec ts
;
12327 if (target_to_host_timespec64(&ts
, arg3
)) {
12328 return -TARGET_EFAULT
;
12331 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12332 &ts
, arg4
? &ts
: NULL
));
12334 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12335 host_to_target_timespec64(arg4
, &ts
)) {
12336 return -TARGET_EFAULT
;
12342 #if defined(TARGET_NR_set_tid_address)
12343 case TARGET_NR_set_tid_address
:
12345 TaskState
*ts
= cpu
->opaque
;
12346 ts
->child_tidptr
= arg1
;
12347 /* do not call host set_tid_address() syscall, instead return tid() */
12348 return get_errno(sys_gettid());
12352 case TARGET_NR_tkill
:
12353 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12355 case TARGET_NR_tgkill
:
12356 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12357 target_to_host_signal(arg3
)));
12359 #ifdef TARGET_NR_set_robust_list
12360 case TARGET_NR_set_robust_list
:
12361 case TARGET_NR_get_robust_list
:
12362 /* The ABI for supporting robust futexes has userspace pass
12363 * the kernel a pointer to a linked list which is updated by
12364 * userspace after the syscall; the list is walked by the kernel
12365 * when the thread exits. Since the linked list in QEMU guest
12366 * memory isn't a valid linked list for the host and we have
12367 * no way to reliably intercept the thread-death event, we can't
12368 * support these. Silently return ENOSYS so that guest userspace
12369 * falls back to a non-robust futex implementation (which should
12370 * be OK except in the corner case of the guest crashing while
12371 * holding a mutex that is shared with another process via
12374 return -TARGET_ENOSYS
;
12377 #if defined(TARGET_NR_utimensat)
12378 case TARGET_NR_utimensat
:
12380 struct timespec
*tsp
, ts
[2];
12384 if (target_to_host_timespec(ts
, arg3
)) {
12385 return -TARGET_EFAULT
;
12387 if (target_to_host_timespec(ts
+ 1, arg3
+
12388 sizeof(struct target_timespec
))) {
12389 return -TARGET_EFAULT
;
12394 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12396 if (!(p
= lock_user_string(arg2
))) {
12397 return -TARGET_EFAULT
;
12399 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12400 unlock_user(p
, arg2
, 0);
12405 #ifdef TARGET_NR_utimensat_time64
12406 case TARGET_NR_utimensat_time64
:
12408 struct timespec
*tsp
, ts
[2];
12412 if (target_to_host_timespec64(ts
, arg3
)) {
12413 return -TARGET_EFAULT
;
12415 if (target_to_host_timespec64(ts
+ 1, arg3
+
12416 sizeof(struct target__kernel_timespec
))) {
12417 return -TARGET_EFAULT
;
12422 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12424 p
= lock_user_string(arg2
);
12426 return -TARGET_EFAULT
;
12428 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12429 unlock_user(p
, arg2
, 0);
12434 #ifdef TARGET_NR_futex
12435 case TARGET_NR_futex
:
12436 return do_futex(cpu
, false, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12438 #ifdef TARGET_NR_futex_time64
12439 case TARGET_NR_futex_time64
:
12440 return do_futex(cpu
, true, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12442 #ifdef CONFIG_INOTIFY
12443 #if defined(TARGET_NR_inotify_init)
12444 case TARGET_NR_inotify_init
:
12445 ret
= get_errno(inotify_init());
12447 fd_trans_register(ret
, &target_inotify_trans
);
12451 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12452 case TARGET_NR_inotify_init1
:
12453 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12454 fcntl_flags_tbl
)));
12456 fd_trans_register(ret
, &target_inotify_trans
);
12460 #if defined(TARGET_NR_inotify_add_watch)
12461 case TARGET_NR_inotify_add_watch
:
12462 p
= lock_user_string(arg2
);
12463 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12464 unlock_user(p
, arg2
, 0);
12467 #if defined(TARGET_NR_inotify_rm_watch)
12468 case TARGET_NR_inotify_rm_watch
:
12469 return get_errno(inotify_rm_watch(arg1
, arg2
));
12473 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12474 case TARGET_NR_mq_open
:
12476 struct mq_attr posix_mq_attr
;
12477 struct mq_attr
*pposix_mq_attr
;
12480 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12481 pposix_mq_attr
= NULL
;
12483 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12484 return -TARGET_EFAULT
;
12486 pposix_mq_attr
= &posix_mq_attr
;
12488 p
= lock_user_string(arg1
- 1);
12490 return -TARGET_EFAULT
;
12492 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12493 unlock_user (p
, arg1
, 0);
12497 case TARGET_NR_mq_unlink
:
12498 p
= lock_user_string(arg1
- 1);
12500 return -TARGET_EFAULT
;
12502 ret
= get_errno(mq_unlink(p
));
12503 unlock_user (p
, arg1
, 0);
12506 #ifdef TARGET_NR_mq_timedsend
12507 case TARGET_NR_mq_timedsend
:
12509 struct timespec ts
;
12511 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12513 if (target_to_host_timespec(&ts
, arg5
)) {
12514 return -TARGET_EFAULT
;
12516 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12517 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12518 return -TARGET_EFAULT
;
12521 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12523 unlock_user (p
, arg2
, arg3
);
12527 #ifdef TARGET_NR_mq_timedsend_time64
12528 case TARGET_NR_mq_timedsend_time64
:
12530 struct timespec ts
;
12532 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12534 if (target_to_host_timespec64(&ts
, arg5
)) {
12535 return -TARGET_EFAULT
;
12537 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12538 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12539 return -TARGET_EFAULT
;
12542 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12544 unlock_user(p
, arg2
, arg3
);
12549 #ifdef TARGET_NR_mq_timedreceive
12550 case TARGET_NR_mq_timedreceive
:
12552 struct timespec ts
;
12555 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12557 if (target_to_host_timespec(&ts
, arg5
)) {
12558 return -TARGET_EFAULT
;
12560 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12562 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12563 return -TARGET_EFAULT
;
12566 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12569 unlock_user (p
, arg2
, arg3
);
12571 put_user_u32(prio
, arg4
);
12575 #ifdef TARGET_NR_mq_timedreceive_time64
12576 case TARGET_NR_mq_timedreceive_time64
:
12578 struct timespec ts
;
12581 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12583 if (target_to_host_timespec64(&ts
, arg5
)) {
12584 return -TARGET_EFAULT
;
12586 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12588 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12589 return -TARGET_EFAULT
;
12592 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12595 unlock_user(p
, arg2
, arg3
);
12597 put_user_u32(prio
, arg4
);
12603 /* Not implemented for now... */
12604 /* case TARGET_NR_mq_notify: */
12607 case TARGET_NR_mq_getsetattr
:
12609 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12612 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12613 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12614 &posix_mq_attr_out
));
12615 } else if (arg3
!= 0) {
12616 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12618 if (ret
== 0 && arg3
!= 0) {
12619 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12625 #ifdef CONFIG_SPLICE
12626 #ifdef TARGET_NR_tee
12627 case TARGET_NR_tee
:
12629 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12633 #ifdef TARGET_NR_splice
12634 case TARGET_NR_splice
:
12636 loff_t loff_in
, loff_out
;
12637 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12639 if (get_user_u64(loff_in
, arg2
)) {
12640 return -TARGET_EFAULT
;
12642 ploff_in
= &loff_in
;
12645 if (get_user_u64(loff_out
, arg4
)) {
12646 return -TARGET_EFAULT
;
12648 ploff_out
= &loff_out
;
12650 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12652 if (put_user_u64(loff_in
, arg2
)) {
12653 return -TARGET_EFAULT
;
12657 if (put_user_u64(loff_out
, arg4
)) {
12658 return -TARGET_EFAULT
;
12664 #ifdef TARGET_NR_vmsplice
12665 case TARGET_NR_vmsplice
:
12667 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12669 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12670 unlock_iovec(vec
, arg2
, arg3
, 0);
12672 ret
= -host_to_target_errno(errno
);
12677 #endif /* CONFIG_SPLICE */
12678 #ifdef CONFIG_EVENTFD
12679 #if defined(TARGET_NR_eventfd)
12680 case TARGET_NR_eventfd
:
12681 ret
= get_errno(eventfd(arg1
, 0));
12683 fd_trans_register(ret
, &target_eventfd_trans
);
12687 #if defined(TARGET_NR_eventfd2)
12688 case TARGET_NR_eventfd2
:
12690 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12691 if (arg2
& TARGET_O_NONBLOCK
) {
12692 host_flags
|= O_NONBLOCK
;
12694 if (arg2
& TARGET_O_CLOEXEC
) {
12695 host_flags
|= O_CLOEXEC
;
12697 ret
= get_errno(eventfd(arg1
, host_flags
));
12699 fd_trans_register(ret
, &target_eventfd_trans
);
12704 #endif /* CONFIG_EVENTFD */
12705 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12706 case TARGET_NR_fallocate
:
12707 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12708 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12709 target_offset64(arg5
, arg6
)));
12711 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12715 #if defined(CONFIG_SYNC_FILE_RANGE)
12716 #if defined(TARGET_NR_sync_file_range)
12717 case TARGET_NR_sync_file_range
:
12718 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12719 #if defined(TARGET_MIPS)
12720 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12721 target_offset64(arg5
, arg6
), arg7
));
12723 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12724 target_offset64(arg4
, arg5
), arg6
));
12725 #endif /* !TARGET_MIPS */
12727 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12731 #if defined(TARGET_NR_sync_file_range2) || \
12732 defined(TARGET_NR_arm_sync_file_range)
12733 #if defined(TARGET_NR_sync_file_range2)
12734 case TARGET_NR_sync_file_range2
:
12736 #if defined(TARGET_NR_arm_sync_file_range)
12737 case TARGET_NR_arm_sync_file_range
:
12739 /* This is like sync_file_range but the arguments are reordered */
12740 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12741 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12742 target_offset64(arg5
, arg6
), arg2
));
12744 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12749 #if defined(TARGET_NR_signalfd4)
12750 case TARGET_NR_signalfd4
:
12751 return do_signalfd4(arg1
, arg2
, arg4
);
12753 #if defined(TARGET_NR_signalfd)
12754 case TARGET_NR_signalfd
:
12755 return do_signalfd4(arg1
, arg2
, 0);
12757 #if defined(CONFIG_EPOLL)
12758 #if defined(TARGET_NR_epoll_create)
12759 case TARGET_NR_epoll_create
:
12760 return get_errno(epoll_create(arg1
));
12762 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12763 case TARGET_NR_epoll_create1
:
12764 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12766 #if defined(TARGET_NR_epoll_ctl)
12767 case TARGET_NR_epoll_ctl
:
12769 struct epoll_event ep
;
12770 struct epoll_event
*epp
= 0;
12772 if (arg2
!= EPOLL_CTL_DEL
) {
12773 struct target_epoll_event
*target_ep
;
12774 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12775 return -TARGET_EFAULT
;
12777 ep
.events
= tswap32(target_ep
->events
);
12779 * The epoll_data_t union is just opaque data to the kernel,
12780 * so we transfer all 64 bits across and need not worry what
12781 * actual data type it is.
12783 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12784 unlock_user_struct(target_ep
, arg4
, 0);
12787 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12788 * non-null pointer, even though this argument is ignored.
12793 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12797 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12798 #if defined(TARGET_NR_epoll_wait)
12799 case TARGET_NR_epoll_wait
:
12801 #if defined(TARGET_NR_epoll_pwait)
12802 case TARGET_NR_epoll_pwait
:
12805 struct target_epoll_event
*target_ep
;
12806 struct epoll_event
*ep
;
12808 int maxevents
= arg3
;
12809 int timeout
= arg4
;
12811 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12812 return -TARGET_EINVAL
;
12815 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12816 maxevents
* sizeof(struct target_epoll_event
), 1);
12818 return -TARGET_EFAULT
;
12821 ep
= g_try_new(struct epoll_event
, maxevents
);
12823 unlock_user(target_ep
, arg2
, 0);
12824 return -TARGET_ENOMEM
;
12828 #if defined(TARGET_NR_epoll_pwait)
12829 case TARGET_NR_epoll_pwait
:
12831 sigset_t
*set
= NULL
;
12834 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
12840 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12841 set
, SIGSET_T_SIZE
));
12844 finish_sigsuspend_mask(ret
);
12849 #if defined(TARGET_NR_epoll_wait)
12850 case TARGET_NR_epoll_wait
:
12851 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12856 ret
= -TARGET_ENOSYS
;
12858 if (!is_error(ret
)) {
12860 for (i
= 0; i
< ret
; i
++) {
12861 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12862 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12864 unlock_user(target_ep
, arg2
,
12865 ret
* sizeof(struct target_epoll_event
));
12867 unlock_user(target_ep
, arg2
, 0);
12874 #ifdef TARGET_NR_prlimit64
12875 case TARGET_NR_prlimit64
:
12877 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12878 struct target_rlimit64
*target_rnew
, *target_rold
;
12879 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12880 int resource
= target_to_host_resource(arg2
);
12882 if (arg3
&& (resource
!= RLIMIT_AS
&&
12883 resource
!= RLIMIT_DATA
&&
12884 resource
!= RLIMIT_STACK
)) {
12885 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12886 return -TARGET_EFAULT
;
12888 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12889 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12890 unlock_user_struct(target_rnew
, arg3
, 0);
12894 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12895 if (!is_error(ret
) && arg4
) {
12896 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12897 return -TARGET_EFAULT
;
12899 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12900 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12901 unlock_user_struct(target_rold
, arg4
, 1);
12906 #ifdef TARGET_NR_gethostname
12907 case TARGET_NR_gethostname
:
12909 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12911 ret
= get_errno(gethostname(name
, arg2
));
12912 unlock_user(name
, arg1
, arg2
);
12914 ret
= -TARGET_EFAULT
;
12919 #ifdef TARGET_NR_atomic_cmpxchg_32
12920 case TARGET_NR_atomic_cmpxchg_32
:
12922 /* should use start_exclusive from main.c */
12923 abi_ulong mem_value
;
12924 if (get_user_u32(mem_value
, arg6
)) {
12925 target_siginfo_t info
;
12926 info
.si_signo
= SIGSEGV
;
12928 info
.si_code
= TARGET_SEGV_MAPERR
;
12929 info
._sifields
._sigfault
._addr
= arg6
;
12930 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
12934 if (mem_value
== arg2
)
12935 put_user_u32(arg1
, arg6
);
12939 #ifdef TARGET_NR_atomic_barrier
12940 case TARGET_NR_atomic_barrier
:
12941 /* Like the kernel implementation and the
12942 qemu arm barrier, no-op this? */
12946 #ifdef TARGET_NR_timer_create
12947 case TARGET_NR_timer_create
:
12949 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12951 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12954 int timer_index
= next_free_host_timer();
12956 if (timer_index
< 0) {
12957 ret
= -TARGET_EAGAIN
;
12959 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12962 phost_sevp
= &host_sevp
;
12963 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12965 free_host_timer_slot(timer_index
);
12970 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12972 free_host_timer_slot(timer_index
);
12974 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12975 timer_delete(*phtimer
);
12976 free_host_timer_slot(timer_index
);
12977 return -TARGET_EFAULT
;
12985 #ifdef TARGET_NR_timer_settime
12986 case TARGET_NR_timer_settime
:
12988 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12989 * struct itimerspec * old_value */
12990 target_timer_t timerid
= get_timer_id(arg1
);
12994 } else if (arg3
== 0) {
12995 ret
= -TARGET_EINVAL
;
12997 timer_t htimer
= g_posix_timers
[timerid
];
12998 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13000 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
13001 return -TARGET_EFAULT
;
13004 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13005 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
13006 return -TARGET_EFAULT
;
13013 #ifdef TARGET_NR_timer_settime64
13014 case TARGET_NR_timer_settime64
:
13016 target_timer_t timerid
= get_timer_id(arg1
);
13020 } else if (arg3
== 0) {
13021 ret
= -TARGET_EINVAL
;
13023 timer_t htimer
= g_posix_timers
[timerid
];
13024 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13026 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
13027 return -TARGET_EFAULT
;
13030 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13031 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
13032 return -TARGET_EFAULT
;
13039 #ifdef TARGET_NR_timer_gettime
13040 case TARGET_NR_timer_gettime
:
13042 /* args: timer_t timerid, struct itimerspec *curr_value */
13043 target_timer_t timerid
= get_timer_id(arg1
);
13047 } else if (!arg2
) {
13048 ret
= -TARGET_EFAULT
;
13050 timer_t htimer
= g_posix_timers
[timerid
];
13051 struct itimerspec hspec
;
13052 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13054 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13055 ret
= -TARGET_EFAULT
;
13062 #ifdef TARGET_NR_timer_gettime64
13063 case TARGET_NR_timer_gettime64
:
13065 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13066 target_timer_t timerid
= get_timer_id(arg1
);
13070 } else if (!arg2
) {
13071 ret
= -TARGET_EFAULT
;
13073 timer_t htimer
= g_posix_timers
[timerid
];
13074 struct itimerspec hspec
;
13075 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13077 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13078 ret
= -TARGET_EFAULT
;
13085 #ifdef TARGET_NR_timer_getoverrun
13086 case TARGET_NR_timer_getoverrun
:
13088 /* args: timer_t timerid */
13089 target_timer_t timerid
= get_timer_id(arg1
);
13094 timer_t htimer
= g_posix_timers
[timerid
];
13095 ret
= get_errno(timer_getoverrun(htimer
));
13101 #ifdef TARGET_NR_timer_delete
13102 case TARGET_NR_timer_delete
:
13104 /* args: timer_t timerid */
13105 target_timer_t timerid
= get_timer_id(arg1
);
13110 timer_t htimer
= g_posix_timers
[timerid
];
13111 ret
= get_errno(timer_delete(htimer
));
13112 free_host_timer_slot(timerid
);
13118 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13119 case TARGET_NR_timerfd_create
:
13120 return get_errno(timerfd_create(arg1
,
13121 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13124 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13125 case TARGET_NR_timerfd_gettime
:
13127 struct itimerspec its_curr
;
13129 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13131 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13132 return -TARGET_EFAULT
;
13138 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13139 case TARGET_NR_timerfd_gettime64
:
13141 struct itimerspec its_curr
;
13143 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13145 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13146 return -TARGET_EFAULT
;
13152 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13153 case TARGET_NR_timerfd_settime
:
13155 struct itimerspec its_new
, its_old
, *p_new
;
13158 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13159 return -TARGET_EFAULT
;
13166 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13168 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13169 return -TARGET_EFAULT
;
13175 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13176 case TARGET_NR_timerfd_settime64
:
13178 struct itimerspec its_new
, its_old
, *p_new
;
13181 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13182 return -TARGET_EFAULT
;
13189 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13191 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13192 return -TARGET_EFAULT
;
13198 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13199 case TARGET_NR_ioprio_get
:
13200 return get_errno(ioprio_get(arg1
, arg2
));
13203 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13204 case TARGET_NR_ioprio_set
:
13205 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13208 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13209 case TARGET_NR_setns
:
13210 return get_errno(setns(arg1
, arg2
));
13212 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13213 case TARGET_NR_unshare
:
13214 return get_errno(unshare(arg1
));
13216 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13217 case TARGET_NR_kcmp
:
13218 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13220 #ifdef TARGET_NR_swapcontext
13221 case TARGET_NR_swapcontext
:
13222 /* PowerPC specific. */
13223 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13225 #ifdef TARGET_NR_memfd_create
13226 case TARGET_NR_memfd_create
:
13227 p
= lock_user_string(arg1
);
13229 return -TARGET_EFAULT
;
13231 ret
= get_errno(memfd_create(p
, arg2
));
13232 fd_trans_unregister(ret
);
13233 unlock_user(p
, arg1
, 0);
13236 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13237 case TARGET_NR_membarrier
:
13238 return get_errno(membarrier(arg1
, arg2
));
13241 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13242 case TARGET_NR_copy_file_range
:
13244 loff_t inoff
, outoff
;
13245 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13248 if (get_user_u64(inoff
, arg2
)) {
13249 return -TARGET_EFAULT
;
13254 if (get_user_u64(outoff
, arg4
)) {
13255 return -TARGET_EFAULT
;
13259 /* Do not sign-extend the count parameter. */
13260 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13261 (abi_ulong
)arg5
, arg6
));
13262 if (!is_error(ret
) && ret
> 0) {
13264 if (put_user_u64(inoff
, arg2
)) {
13265 return -TARGET_EFAULT
;
13269 if (put_user_u64(outoff
, arg4
)) {
13270 return -TARGET_EFAULT
;
13278 #if defined(TARGET_NR_pivot_root)
13279 case TARGET_NR_pivot_root
:
13282 p
= lock_user_string(arg1
); /* new_root */
13283 p2
= lock_user_string(arg2
); /* put_old */
13285 ret
= -TARGET_EFAULT
;
13287 ret
= get_errno(pivot_root(p
, p2
));
13289 unlock_user(p2
, arg2
, 0);
13290 unlock_user(p
, arg1
, 0);
13296 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13297 return -TARGET_ENOSYS
;
13302 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13303 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13304 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13307 CPUState
*cpu
= env_cpu(cpu_env
);
13310 #ifdef DEBUG_ERESTARTSYS
13311 /* Debug-only code for exercising the syscall-restart code paths
13312 * in the per-architecture cpu main loops: restart every syscall
13313 * the guest makes once before letting it through.
13319 return -QEMU_ERESTARTSYS
;
13324 record_syscall_start(cpu
, num
, arg1
,
13325 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13327 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13328 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13331 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13332 arg5
, arg6
, arg7
, arg8
);
13334 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13335 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13336 arg3
, arg4
, arg5
, arg6
);
13339 record_syscall_return(cpu
, num
, ret
);