4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
145 #define CLONE_IO 0x80000000 /* Clone io context */
148 /* We can't directly call the host clone syscall, because this will
149 * badly confuse libc (breaking mutexes, for example). So we must
150 * divide clone flags into:
151 * * flag combinations that look like pthread_create()
152 * * flag combinations that look like fork()
153 * * flags we can implement within QEMU itself
154 * * flags we can't support and will return an error for
156 /* For thread creation, all these flags must be present; for
157 * fork, none must be present.
159 #define CLONE_THREAD_FLAGS \
160 (CLONE_VM | CLONE_FS | CLONE_FILES | \
161 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 /* These flags are ignored:
164 * CLONE_DETACHED is now ignored by the kernel;
165 * CLONE_IO is just an optimisation hint to the I/O scheduler
167 #define CLONE_IGNORED_FLAGS \
168 (CLONE_DETACHED | CLONE_IO)
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS \
172 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
173 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS \
177 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
178 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 #define CLONE_INVALID_FORK_FLAGS \
181 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 #define CLONE_INVALID_THREAD_FLAGS \
184 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
185 CLONE_IGNORED_FLAGS))
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188 * have almost all been allocated. We cannot support any of
189 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191 * The checks against the invalid thread masks above will catch these.
192 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196 * once. This exercises the codepaths for restart.
198 //#define DEBUG_ERESTARTSYS
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
214 #define _syscall0(type,name) \
215 static type name (void) \
217 return syscall(__NR_##name); \
220 #define _syscall1(type,name,type1,arg1) \
221 static type name (type1 arg1) \
223 return syscall(__NR_##name, arg1); \
226 #define _syscall2(type,name,type1,arg1,type2,arg2) \
227 static type name (type1 arg1,type2 arg2) \
229 return syscall(__NR_##name, arg1, arg2); \
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
233 static type name (type1 arg1,type2 arg2,type3 arg3) \
235 return syscall(__NR_##name, arg1, arg2, arg3); \
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 type5,arg5,type6,arg6) \
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
257 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
275 #define __NR_sys_statx __NR_statx
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid
)
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
314 loff_t
*, res
, uint
, wh
);
316 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
317 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
319 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group
,int,error_code
)
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address
,int *,tidptr
)
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
328 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
332 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
336 unsigned long *, user_mask_ptr
);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
339 unsigned long *, user_mask_ptr
);
340 /* sched_attr is not defined in glibc */
343 uint32_t sched_policy
;
344 uint64_t sched_flags
;
346 uint32_t sched_priority
;
347 uint64_t sched_runtime
;
348 uint64_t sched_deadline
;
349 uint64_t sched_period
;
350 uint32_t sched_util_min
;
351 uint32_t sched_util_max
;
353 #define __NR_sys_sched_getattr __NR_sched_getattr
354 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
355 unsigned int, size
, unsigned int, flags
);
356 #define __NR_sys_sched_setattr __NR_sched_setattr
357 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
358 unsigned int, flags
);
359 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
360 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
361 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
362 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
363 const struct sched_param
*, param
);
364 #define __NR_sys_sched_getparam __NR_sched_getparam
365 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
366 struct sched_param
*, param
);
367 #define __NR_sys_sched_setparam __NR_sched_setparam
368 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
369 const struct sched_param
*, param
);
370 #define __NR_sys_getcpu __NR_getcpu
371 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
372 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
374 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
375 struct __user_cap_data_struct
*, data
);
376 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
377 struct __user_cap_data_struct
*, data
);
378 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
379 _syscall2(int, ioprio_get
, int, which
, int, who
)
381 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
382 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
384 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
385 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
388 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
389 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
390 unsigned long, idx1
, unsigned long, idx2
)
394 * It is assumed that struct statx is architecture independent.
396 #if defined(TARGET_NR_statx) && defined(__NR_statx)
397 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
398 unsigned int, mask
, struct target_statx
*, statxbuf
)
400 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
401 _syscall2(int, membarrier
, int, cmd
, int, flags
)
404 static const bitmask_transtbl fcntl_flags_tbl
[] = {
405 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
406 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
407 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
408 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
409 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
410 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
411 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
412 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
413 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
414 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
415 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
416 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
417 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
418 #if defined(O_DIRECT)
419 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
421 #if defined(O_NOATIME)
422 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
424 #if defined(O_CLOEXEC)
425 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
428 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
430 #if defined(O_TMPFILE)
431 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
433 /* Don't terminate the list prematurely on 64-bit host+guest. */
434 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
435 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
440 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
442 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
443 #if defined(__NR_utimensat)
444 #define __NR_sys_utimensat __NR_utimensat
445 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
446 const struct timespec
*,tsp
,int,flags
)
448 static int sys_utimensat(int dirfd
, const char *pathname
,
449 const struct timespec times
[2], int flags
)
455 #endif /* TARGET_NR_utimensat */
457 #ifdef TARGET_NR_renameat2
458 #if defined(__NR_renameat2)
459 #define __NR_sys_renameat2 __NR_renameat2
460 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
461 const char *, new, unsigned int, flags
)
463 static int sys_renameat2(int oldfd
, const char *old
,
464 int newfd
, const char *new, int flags
)
467 return renameat(oldfd
, old
, newfd
, new);
473 #endif /* TARGET_NR_renameat2 */
475 #ifdef CONFIG_INOTIFY
476 #include <sys/inotify.h>
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY */
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64
{
495 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
496 const struct host_rlimit64
*, new_limit
,
497 struct host_rlimit64
*, old_limit
)
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers
[32] = { 0, } ;
505 static inline int next_free_host_timer(void)
508 /* FIXME: Does finding the next free slot require a lock? */
509 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
510 if (g_posix_timers
[k
] == 0) {
511 g_posix_timers
[k
] = (timer_t
) 1;
519 static inline int host_to_target_errno(int host_errno
)
521 switch (host_errno
) {
522 #define E(X) case X: return TARGET_##X;
523 #include "errnos.c.inc"
530 static inline int target_to_host_errno(int target_errno
)
532 switch (target_errno
) {
533 #define E(X) case TARGET_##X: return X;
534 #include "errnos.c.inc"
541 static inline abi_long
get_errno(abi_long ret
)
544 return -host_to_target_errno(errno
);
549 const char *target_strerror(int err
)
551 if (err
== QEMU_ERESTARTSYS
) {
552 return "To be restarted";
554 if (err
== QEMU_ESIGRETURN
) {
555 return "Successful exit from sigreturn";
558 return strerror(target_to_host_errno(err
));
561 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
565 if (usize
<= ksize
) {
568 for (i
= ksize
; i
< usize
; i
++) {
569 if (get_user_u8(b
, addr
+ i
)) {
570 return -TARGET_EFAULT
;
579 #define safe_syscall0(type, name) \
580 static type safe_##name(void) \
582 return safe_syscall(__NR_##name); \
585 #define safe_syscall1(type, name, type1, arg1) \
586 static type safe_##name(type1 arg1) \
588 return safe_syscall(__NR_##name, arg1); \
591 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
592 static type safe_##name(type1 arg1, type2 arg2) \
594 return safe_syscall(__NR_##name, arg1, arg2); \
597 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
600 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
603 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
605 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
607 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
610 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
611 type4, arg4, type5, arg5) \
612 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
615 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
618 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
619 type4, arg4, type5, arg5, type6, arg6) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
621 type5 arg5, type6 arg6) \
623 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
626 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
627 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
628 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
629 int, flags
, mode_t
, mode
)
630 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
631 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
632 struct rusage
*, rusage
)
634 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
635 int, options
, struct rusage
*, rusage
)
636 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
637 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
638 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
639 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
640 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
642 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
643 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
644 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
647 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
648 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
650 #if defined(__NR_futex)
651 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
652 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
654 #if defined(__NR_futex_time64)
655 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
656 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
658 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
659 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
660 safe_syscall2(int, tkill
, int, tid
, int, sig
)
661 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
662 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
663 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
664 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
665 unsigned long, pos_l
, unsigned long, pos_h
)
666 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
667 unsigned long, pos_l
, unsigned long, pos_h
)
668 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
670 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
671 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
672 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
673 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
674 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
675 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
676 safe_syscall2(int, flock
, int, fd
, int, operation
)
677 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
678 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
679 const struct timespec
*, uts
, size_t, sigsetsize
)
681 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
683 #if defined(TARGET_NR_nanosleep)
684 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
685 struct timespec
*, rem
)
687 #if defined(TARGET_NR_clock_nanosleep) || \
688 defined(TARGET_NR_clock_nanosleep_time64)
689 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
690 const struct timespec
*, req
, struct timespec
*, rem
)
694 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
697 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
698 void *, ptr
, long, fifth
)
702 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
706 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
707 long, msgtype
, int, flags
)
709 #ifdef __NR_semtimedop
710 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
711 unsigned, nsops
, const struct timespec
*, timeout
)
713 #if defined(TARGET_NR_mq_timedsend) || \
714 defined(TARGET_NR_mq_timedsend_time64)
715 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
716 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
718 #if defined(TARGET_NR_mq_timedreceive) || \
719 defined(TARGET_NR_mq_timedreceive_time64)
720 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
721 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
723 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
724 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
725 int, outfd
, loff_t
*, poutoff
, size_t, length
,
729 /* We do ioctl like this rather than via safe_syscall3 to preserve the
730 * "third argument might be integer or pointer or not present" behaviour of
733 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
734 /* Similarly for fcntl. Note that callers must always:
735 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
736 * use the flock64 struct rather than unsuffixed flock
737 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
740 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
742 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
745 static inline int host_to_target_sock_type(int host_type
)
749 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
751 target_type
= TARGET_SOCK_DGRAM
;
754 target_type
= TARGET_SOCK_STREAM
;
757 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
761 #if defined(SOCK_CLOEXEC)
762 if (host_type
& SOCK_CLOEXEC
) {
763 target_type
|= TARGET_SOCK_CLOEXEC
;
767 #if defined(SOCK_NONBLOCK)
768 if (host_type
& SOCK_NONBLOCK
) {
769 target_type
|= TARGET_SOCK_NONBLOCK
;
776 static abi_ulong target_brk
;
777 static abi_ulong target_original_brk
;
778 static abi_ulong brk_page
;
780 void target_set_brk(abi_ulong new_brk
)
782 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
783 brk_page
= HOST_PAGE_ALIGN(target_brk
);
786 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
787 #define DEBUGF_BRK(message, args...)
789 /* do_brk() must return target values and target errnos. */
790 abi_long
do_brk(abi_ulong new_brk
)
792 abi_long mapped_addr
;
793 abi_ulong new_alloc_size
;
795 /* brk pointers are always untagged */
797 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
800 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
803 if (new_brk
< target_original_brk
) {
804 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
809 /* If the new brk is less than the highest page reserved to the
810 * target heap allocation, set it and we're almost done... */
811 if (new_brk
<= brk_page
) {
812 /* Heap contents are initialized to zero, as for anonymous
814 if (new_brk
> target_brk
) {
815 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
817 target_brk
= new_brk
;
818 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
822 /* We need to allocate more memory after the brk... Note that
823 * we don't use MAP_FIXED because that will map over the top of
824 * any existing mapping (like the one with the host libc or qemu
825 * itself); instead we treat "mapped but at wrong address" as
826 * a failure and unmap again.
828 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
829 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
830 PROT_READ
|PROT_WRITE
,
831 MAP_ANON
|MAP_PRIVATE
, 0, 0));
833 if (mapped_addr
== brk_page
) {
834 /* Heap contents are initialized to zero, as for anonymous
835 * mapped pages. Technically the new pages are already
836 * initialized to zero since they *are* anonymous mapped
837 * pages, however we have to take care with the contents that
838 * come from the remaining part of the previous page: it may
839 * contains garbage data due to a previous heap usage (grown
841 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
843 target_brk
= new_brk
;
844 brk_page
= HOST_PAGE_ALIGN(target_brk
);
845 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
848 } else if (mapped_addr
!= -1) {
849 /* Mapped but at wrong address, meaning there wasn't actually
850 * enough space for this brk.
852 target_munmap(mapped_addr
, new_alloc_size
);
854 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
857 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
860 #if defined(TARGET_ALPHA)
861 /* We (partially) emulate OSF/1 on Alpha, which requires we
862 return a proper errno, not an unchanged brk value. */
863 return -TARGET_ENOMEM
;
865 /* For everything else, return the previous break. */
869 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
870 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
871 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
872 abi_ulong target_fds_addr
,
876 abi_ulong b
, *target_fds
;
878 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
879 if (!(target_fds
= lock_user(VERIFY_READ
,
881 sizeof(abi_ulong
) * nw
,
883 return -TARGET_EFAULT
;
887 for (i
= 0; i
< nw
; i
++) {
888 /* grab the abi_ulong */
889 __get_user(b
, &target_fds
[i
]);
890 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
891 /* check the bit inside the abi_ulong */
898 unlock_user(target_fds
, target_fds_addr
, 0);
903 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
904 abi_ulong target_fds_addr
,
907 if (target_fds_addr
) {
908 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
909 return -TARGET_EFAULT
;
917 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
923 abi_ulong
*target_fds
;
925 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
926 if (!(target_fds
= lock_user(VERIFY_WRITE
,
928 sizeof(abi_ulong
) * nw
,
930 return -TARGET_EFAULT
;
933 for (i
= 0; i
< nw
; i
++) {
935 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
936 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
939 __put_user(v
, &target_fds
[i
]);
942 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
948 #if defined(__alpha__)
954 static inline abi_long
host_to_target_clock_t(long ticks
)
956 #if HOST_HZ == TARGET_HZ
959 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
963 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
964 const struct rusage
*rusage
)
966 struct target_rusage
*target_rusage
;
968 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
969 return -TARGET_EFAULT
;
970 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
971 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
972 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
973 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
974 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
975 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
976 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
977 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
978 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
979 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
980 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
981 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
982 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
983 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
984 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
985 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
986 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
987 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
988 unlock_user_struct(target_rusage
, target_addr
, 1);
993 #ifdef TARGET_NR_setrlimit
994 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
996 abi_ulong target_rlim_swap
;
999 target_rlim_swap
= tswapal(target_rlim
);
1000 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1001 return RLIM_INFINITY
;
1003 result
= target_rlim_swap
;
1004 if (target_rlim_swap
!= (rlim_t
)result
)
1005 return RLIM_INFINITY
;
1011 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1012 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1014 abi_ulong target_rlim_swap
;
1017 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1018 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1020 target_rlim_swap
= rlim
;
1021 result
= tswapal(target_rlim_swap
);
1027 static inline int target_to_host_resource(int code
)
1030 case TARGET_RLIMIT_AS
:
1032 case TARGET_RLIMIT_CORE
:
1034 case TARGET_RLIMIT_CPU
:
1036 case TARGET_RLIMIT_DATA
:
1038 case TARGET_RLIMIT_FSIZE
:
1039 return RLIMIT_FSIZE
;
1040 case TARGET_RLIMIT_LOCKS
:
1041 return RLIMIT_LOCKS
;
1042 case TARGET_RLIMIT_MEMLOCK
:
1043 return RLIMIT_MEMLOCK
;
1044 case TARGET_RLIMIT_MSGQUEUE
:
1045 return RLIMIT_MSGQUEUE
;
1046 case TARGET_RLIMIT_NICE
:
1048 case TARGET_RLIMIT_NOFILE
:
1049 return RLIMIT_NOFILE
;
1050 case TARGET_RLIMIT_NPROC
:
1051 return RLIMIT_NPROC
;
1052 case TARGET_RLIMIT_RSS
:
1054 case TARGET_RLIMIT_RTPRIO
:
1055 return RLIMIT_RTPRIO
;
1056 case TARGET_RLIMIT_RTTIME
:
1057 return RLIMIT_RTTIME
;
1058 case TARGET_RLIMIT_SIGPENDING
:
1059 return RLIMIT_SIGPENDING
;
1060 case TARGET_RLIMIT_STACK
:
1061 return RLIMIT_STACK
;
1067 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1068 abi_ulong target_tv_addr
)
1070 struct target_timeval
*target_tv
;
1072 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1073 return -TARGET_EFAULT
;
1076 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1077 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1079 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1084 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1085 const struct timeval
*tv
)
1087 struct target_timeval
*target_tv
;
1089 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1090 return -TARGET_EFAULT
;
1093 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1094 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1096 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1101 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1102 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1103 abi_ulong target_tv_addr
)
1105 struct target__kernel_sock_timeval
*target_tv
;
1107 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1108 return -TARGET_EFAULT
;
1111 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1112 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1114 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1120 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1121 const struct timeval
*tv
)
1123 struct target__kernel_sock_timeval
*target_tv
;
1125 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1126 return -TARGET_EFAULT
;
1129 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1130 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1132 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1137 #if defined(TARGET_NR_futex) || \
1138 defined(TARGET_NR_rt_sigtimedwait) || \
1139 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1140 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1141 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1142 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1143 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1144 defined(TARGET_NR_timer_settime) || \
1145 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1146 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1147 abi_ulong target_addr
)
1149 struct target_timespec
*target_ts
;
1151 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1152 return -TARGET_EFAULT
;
1154 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1155 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1156 unlock_user_struct(target_ts
, target_addr
, 0);
1161 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1162 defined(TARGET_NR_timer_settime64) || \
1163 defined(TARGET_NR_mq_timedsend_time64) || \
1164 defined(TARGET_NR_mq_timedreceive_time64) || \
1165 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1166 defined(TARGET_NR_clock_nanosleep_time64) || \
1167 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1168 defined(TARGET_NR_utimensat) || \
1169 defined(TARGET_NR_utimensat_time64) || \
1170 defined(TARGET_NR_semtimedop_time64) || \
1171 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1172 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1173 abi_ulong target_addr
)
1175 struct target__kernel_timespec
*target_ts
;
1177 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1178 return -TARGET_EFAULT
;
1180 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1181 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1182 /* in 32bit mode, this drops the padding */
1183 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1184 unlock_user_struct(target_ts
, target_addr
, 0);
1189 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1190 struct timespec
*host_ts
)
1192 struct target_timespec
*target_ts
;
1194 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1195 return -TARGET_EFAULT
;
1197 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1198 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1199 unlock_user_struct(target_ts
, target_addr
, 1);
1203 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1204 struct timespec
*host_ts
)
1206 struct target__kernel_timespec
*target_ts
;
1208 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1209 return -TARGET_EFAULT
;
1211 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1212 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1213 unlock_user_struct(target_ts
, target_addr
, 1);
1217 #if defined(TARGET_NR_gettimeofday)
1218 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1219 struct timezone
*tz
)
1221 struct target_timezone
*target_tz
;
1223 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1224 return -TARGET_EFAULT
;
1227 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1228 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1230 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1236 #if defined(TARGET_NR_settimeofday)
1237 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1238 abi_ulong target_tz_addr
)
1240 struct target_timezone
*target_tz
;
1242 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1243 return -TARGET_EFAULT
;
1246 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1247 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1249 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1255 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1258 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1259 abi_ulong target_mq_attr_addr
)
1261 struct target_mq_attr
*target_mq_attr
;
1263 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1264 target_mq_attr_addr
, 1))
1265 return -TARGET_EFAULT
;
1267 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1268 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1269 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1270 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1272 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1277 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1278 const struct mq_attr
*attr
)
1280 struct target_mq_attr
*target_mq_attr
;
1282 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1283 target_mq_attr_addr
, 0))
1284 return -TARGET_EFAULT
;
1286 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1287 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1288 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1289 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1291 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1297 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1298 /* do_select() must return target values and target errnos. */
1299 static abi_long
do_select(int n
,
1300 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1301 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1303 fd_set rfds
, wfds
, efds
;
1304 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1306 struct timespec ts
, *ts_ptr
;
1309 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1313 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1317 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1322 if (target_tv_addr
) {
1323 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1324 return -TARGET_EFAULT
;
1325 ts
.tv_sec
= tv
.tv_sec
;
1326 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1332 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1335 if (!is_error(ret
)) {
1336 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1337 return -TARGET_EFAULT
;
1338 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1339 return -TARGET_EFAULT
;
1340 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1341 return -TARGET_EFAULT
;
1343 if (target_tv_addr
) {
1344 tv
.tv_sec
= ts
.tv_sec
;
1345 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1346 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1347 return -TARGET_EFAULT
;
1355 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1356 static abi_long
do_old_select(abi_ulong arg1
)
1358 struct target_sel_arg_struct
*sel
;
1359 abi_ulong inp
, outp
, exp
, tvp
;
1362 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1363 return -TARGET_EFAULT
;
1366 nsel
= tswapal(sel
->n
);
1367 inp
= tswapal(sel
->inp
);
1368 outp
= tswapal(sel
->outp
);
1369 exp
= tswapal(sel
->exp
);
1370 tvp
= tswapal(sel
->tvp
);
1372 unlock_user_struct(sel
, arg1
, 0);
1374 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1379 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1380 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1381 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1384 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1385 fd_set rfds
, wfds
, efds
;
1386 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1387 struct timespec ts
, *ts_ptr
;
1391 * The 6th arg is actually two args smashed together,
1392 * so we cannot use the C library.
1400 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1401 target_sigset_t
*target_sigset
;
1409 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1413 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1417 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1423 * This takes a timespec, and not a timeval, so we cannot
1424 * use the do_select() helper ...
1428 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1429 return -TARGET_EFAULT
;
1432 if (target_to_host_timespec(&ts
, ts_addr
)) {
1433 return -TARGET_EFAULT
;
1441 /* Extract the two packed args for the sigset */
1444 sig
.size
= SIGSET_T_SIZE
;
1446 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1448 return -TARGET_EFAULT
;
1450 arg_sigset
= tswapal(arg7
[0]);
1451 arg_sigsize
= tswapal(arg7
[1]);
1452 unlock_user(arg7
, arg6
, 0);
1456 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1457 /* Like the kernel, we enforce correct size sigsets */
1458 return -TARGET_EINVAL
;
1460 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1461 sizeof(*target_sigset
), 1);
1462 if (!target_sigset
) {
1463 return -TARGET_EFAULT
;
1465 target_to_host_sigset(&set
, target_sigset
);
1466 unlock_user(target_sigset
, arg_sigset
, 0);
1474 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1477 if (!is_error(ret
)) {
1478 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1479 return -TARGET_EFAULT
;
1481 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1482 return -TARGET_EFAULT
;
1484 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1485 return -TARGET_EFAULT
;
1488 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1489 return -TARGET_EFAULT
;
1492 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1493 return -TARGET_EFAULT
;
1501 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1502 defined(TARGET_NR_ppoll_time64)
1503 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1504 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1506 struct target_pollfd
*target_pfd
;
1507 unsigned int nfds
= arg2
;
1515 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1516 return -TARGET_EINVAL
;
1518 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1519 sizeof(struct target_pollfd
) * nfds
, 1);
1521 return -TARGET_EFAULT
;
1524 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1525 for (i
= 0; i
< nfds
; i
++) {
1526 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1527 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1531 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1532 target_sigset_t
*target_set
;
1533 sigset_t _set
, *set
= &_set
;
1537 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1538 unlock_user(target_pfd
, arg1
, 0);
1539 return -TARGET_EFAULT
;
1542 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1543 unlock_user(target_pfd
, arg1
, 0);
1544 return -TARGET_EFAULT
;
1552 if (arg5
!= sizeof(target_sigset_t
)) {
1553 unlock_user(target_pfd
, arg1
, 0);
1554 return -TARGET_EINVAL
;
1557 target_set
= lock_user(VERIFY_READ
, arg4
,
1558 sizeof(target_sigset_t
), 1);
1560 unlock_user(target_pfd
, arg1
, 0);
1561 return -TARGET_EFAULT
;
1563 target_to_host_sigset(set
, target_set
);
1568 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1569 set
, SIGSET_T_SIZE
));
1571 if (!is_error(ret
) && arg3
) {
1573 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1574 return -TARGET_EFAULT
;
1577 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1578 return -TARGET_EFAULT
;
1583 unlock_user(target_set
, arg4
, 0);
1586 struct timespec ts
, *pts
;
1589 /* Convert ms to secs, ns */
1590 ts
.tv_sec
= arg3
/ 1000;
1591 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1594 /* -ve poll() timeout means "infinite" */
1597 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1600 if (!is_error(ret
)) {
1601 for (i
= 0; i
< nfds
; i
++) {
1602 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1605 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1610 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1613 return pipe2(host_pipe
, flags
);
1619 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1620 int flags
, int is_pipe2
)
1624 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1627 return get_errno(ret
);
1629 /* Several targets have special calling conventions for the original
1630 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1632 #if defined(TARGET_ALPHA)
1633 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1634 return host_pipe
[0];
1635 #elif defined(TARGET_MIPS)
1636 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1637 return host_pipe
[0];
1638 #elif defined(TARGET_SH4)
1639 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1640 return host_pipe
[0];
1641 #elif defined(TARGET_SPARC)
1642 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1643 return host_pipe
[0];
1647 if (put_user_s32(host_pipe
[0], pipedes
)
1648 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1649 return -TARGET_EFAULT
;
1650 return get_errno(ret
);
1653 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1654 abi_ulong target_addr
,
1657 struct target_ip_mreqn
*target_smreqn
;
1659 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1661 return -TARGET_EFAULT
;
1662 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1663 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1664 if (len
== sizeof(struct target_ip_mreqn
))
1665 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1666 unlock_user(target_smreqn
, target_addr
, 0);
1671 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1672 abi_ulong target_addr
,
1675 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1676 sa_family_t sa_family
;
1677 struct target_sockaddr
*target_saddr
;
1679 if (fd_trans_target_to_host_addr(fd
)) {
1680 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1683 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1685 return -TARGET_EFAULT
;
1687 sa_family
= tswap16(target_saddr
->sa_family
);
1689 /* Oops. The caller might send a incomplete sun_path; sun_path
1690 * must be terminated by \0 (see the manual page), but
1691 * unfortunately it is quite common to specify sockaddr_un
1692 * length as "strlen(x->sun_path)" while it should be
1693 * "strlen(...) + 1". We'll fix that here if needed.
1694 * Linux kernel has a similar feature.
1697 if (sa_family
== AF_UNIX
) {
1698 if (len
< unix_maxlen
&& len
> 0) {
1699 char *cp
= (char*)target_saddr
;
1701 if ( cp
[len
-1] && !cp
[len
] )
1704 if (len
> unix_maxlen
)
1708 memcpy(addr
, target_saddr
, len
);
1709 addr
->sa_family
= sa_family
;
1710 if (sa_family
== AF_NETLINK
) {
1711 struct sockaddr_nl
*nladdr
;
1713 nladdr
= (struct sockaddr_nl
*)addr
;
1714 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1715 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1716 } else if (sa_family
== AF_PACKET
) {
1717 struct target_sockaddr_ll
*lladdr
;
1719 lladdr
= (struct target_sockaddr_ll
*)addr
;
1720 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1721 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1723 unlock_user(target_saddr
, target_addr
, 0);
1728 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1729 struct sockaddr
*addr
,
1732 struct target_sockaddr
*target_saddr
;
1739 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1741 return -TARGET_EFAULT
;
1742 memcpy(target_saddr
, addr
, len
);
1743 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1744 sizeof(target_saddr
->sa_family
)) {
1745 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1747 if (addr
->sa_family
== AF_NETLINK
&&
1748 len
>= sizeof(struct target_sockaddr_nl
)) {
1749 struct target_sockaddr_nl
*target_nl
=
1750 (struct target_sockaddr_nl
*)target_saddr
;
1751 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1752 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1753 } else if (addr
->sa_family
== AF_PACKET
) {
1754 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1755 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1756 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1757 } else if (addr
->sa_family
== AF_INET6
&&
1758 len
>= sizeof(struct target_sockaddr_in6
)) {
1759 struct target_sockaddr_in6
*target_in6
=
1760 (struct target_sockaddr_in6
*)target_saddr
;
1761 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1763 unlock_user(target_saddr
, target_addr
, len
);
1768 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1769 struct target_msghdr
*target_msgh
)
1771 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1772 abi_long msg_controllen
;
1773 abi_ulong target_cmsg_addr
;
1774 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1775 socklen_t space
= 0;
1777 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1778 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1780 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1781 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1782 target_cmsg_start
= target_cmsg
;
1784 return -TARGET_EFAULT
;
1786 while (cmsg
&& target_cmsg
) {
1787 void *data
= CMSG_DATA(cmsg
);
1788 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1790 int len
= tswapal(target_cmsg
->cmsg_len
)
1791 - sizeof(struct target_cmsghdr
);
1793 space
+= CMSG_SPACE(len
);
1794 if (space
> msgh
->msg_controllen
) {
1795 space
-= CMSG_SPACE(len
);
1796 /* This is a QEMU bug, since we allocated the payload
1797 * area ourselves (unlike overflow in host-to-target
1798 * conversion, which is just the guest giving us a buffer
1799 * that's too small). It can't happen for the payload types
1800 * we currently support; if it becomes an issue in future
1801 * we would need to improve our allocation strategy to
1802 * something more intelligent than "twice the size of the
1803 * target buffer we're reading from".
1805 qemu_log_mask(LOG_UNIMP
,
1806 ("Unsupported ancillary data %d/%d: "
1807 "unhandled msg size\n"),
1808 tswap32(target_cmsg
->cmsg_level
),
1809 tswap32(target_cmsg
->cmsg_type
));
1813 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1814 cmsg
->cmsg_level
= SOL_SOCKET
;
1816 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1818 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1819 cmsg
->cmsg_len
= CMSG_LEN(len
);
1821 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1822 int *fd
= (int *)data
;
1823 int *target_fd
= (int *)target_data
;
1824 int i
, numfds
= len
/ sizeof(int);
1826 for (i
= 0; i
< numfds
; i
++) {
1827 __get_user(fd
[i
], target_fd
+ i
);
1829 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1830 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1831 struct ucred
*cred
= (struct ucred
*)data
;
1832 struct target_ucred
*target_cred
=
1833 (struct target_ucred
*)target_data
;
1835 __get_user(cred
->pid
, &target_cred
->pid
);
1836 __get_user(cred
->uid
, &target_cred
->uid
);
1837 __get_user(cred
->gid
, &target_cred
->gid
);
1839 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1840 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1841 memcpy(data
, target_data
, len
);
1844 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1845 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1848 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1850 msgh
->msg_controllen
= space
;
1854 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1855 struct msghdr
*msgh
)
1857 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1858 abi_long msg_controllen
;
1859 abi_ulong target_cmsg_addr
;
1860 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1861 socklen_t space
= 0;
1863 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1864 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1866 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1867 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1868 target_cmsg_start
= target_cmsg
;
1870 return -TARGET_EFAULT
;
1872 while (cmsg
&& target_cmsg
) {
1873 void *data
= CMSG_DATA(cmsg
);
1874 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1876 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1877 int tgt_len
, tgt_space
;
1879 /* We never copy a half-header but may copy half-data;
1880 * this is Linux's behaviour in put_cmsg(). Note that
1881 * truncation here is a guest problem (which we report
1882 * to the guest via the CTRUNC bit), unlike truncation
1883 * in target_to_host_cmsg, which is a QEMU bug.
1885 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1886 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1890 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1891 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1893 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1895 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1897 /* Payload types which need a different size of payload on
1898 * the target must adjust tgt_len here.
1901 switch (cmsg
->cmsg_level
) {
1903 switch (cmsg
->cmsg_type
) {
1905 tgt_len
= sizeof(struct target_timeval
);
1915 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1916 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1917 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1920 /* We must now copy-and-convert len bytes of payload
1921 * into tgt_len bytes of destination space. Bear in mind
1922 * that in both source and destination we may be dealing
1923 * with a truncated value!
1925 switch (cmsg
->cmsg_level
) {
1927 switch (cmsg
->cmsg_type
) {
1930 int *fd
= (int *)data
;
1931 int *target_fd
= (int *)target_data
;
1932 int i
, numfds
= tgt_len
/ sizeof(int);
1934 for (i
= 0; i
< numfds
; i
++) {
1935 __put_user(fd
[i
], target_fd
+ i
);
1941 struct timeval
*tv
= (struct timeval
*)data
;
1942 struct target_timeval
*target_tv
=
1943 (struct target_timeval
*)target_data
;
1945 if (len
!= sizeof(struct timeval
) ||
1946 tgt_len
!= sizeof(struct target_timeval
)) {
1950 /* copy struct timeval to target */
1951 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1952 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1955 case SCM_CREDENTIALS
:
1957 struct ucred
*cred
= (struct ucred
*)data
;
1958 struct target_ucred
*target_cred
=
1959 (struct target_ucred
*)target_data
;
1961 __put_user(cred
->pid
, &target_cred
->pid
);
1962 __put_user(cred
->uid
, &target_cred
->uid
);
1963 __put_user(cred
->gid
, &target_cred
->gid
);
1972 switch (cmsg
->cmsg_type
) {
1975 uint32_t *v
= (uint32_t *)data
;
1976 uint32_t *t_int
= (uint32_t *)target_data
;
1978 if (len
!= sizeof(uint32_t) ||
1979 tgt_len
!= sizeof(uint32_t)) {
1982 __put_user(*v
, t_int
);
1988 struct sock_extended_err ee
;
1989 struct sockaddr_in offender
;
1991 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1992 struct errhdr_t
*target_errh
=
1993 (struct errhdr_t
*)target_data
;
1995 if (len
!= sizeof(struct errhdr_t
) ||
1996 tgt_len
!= sizeof(struct errhdr_t
)) {
1999 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2000 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2001 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2002 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2003 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2004 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2005 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2006 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2007 (void *) &errh
->offender
, sizeof(errh
->offender
));
2016 switch (cmsg
->cmsg_type
) {
2019 uint32_t *v
= (uint32_t *)data
;
2020 uint32_t *t_int
= (uint32_t *)target_data
;
2022 if (len
!= sizeof(uint32_t) ||
2023 tgt_len
!= sizeof(uint32_t)) {
2026 __put_user(*v
, t_int
);
2032 struct sock_extended_err ee
;
2033 struct sockaddr_in6 offender
;
2035 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2036 struct errhdr6_t
*target_errh
=
2037 (struct errhdr6_t
*)target_data
;
2039 if (len
!= sizeof(struct errhdr6_t
) ||
2040 tgt_len
!= sizeof(struct errhdr6_t
)) {
2043 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2044 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2045 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2046 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2047 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2048 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2049 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2050 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2051 (void *) &errh
->offender
, sizeof(errh
->offender
));
2061 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2062 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2063 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2064 if (tgt_len
> len
) {
2065 memset(target_data
+ len
, 0, tgt_len
- len
);
2069 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2070 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2071 if (msg_controllen
< tgt_space
) {
2072 tgt_space
= msg_controllen
;
2074 msg_controllen
-= tgt_space
;
2076 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2077 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2080 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2082 target_msgh
->msg_controllen
= tswapal(space
);
2086 /* do_setsockopt() Must return target values and target errnos. */
2087 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2088 abi_ulong optval_addr
, socklen_t optlen
)
2092 struct ip_mreqn
*ip_mreq
;
2093 struct ip_mreq_source
*ip_mreq_source
;
2098 /* TCP and UDP options all take an 'int' value. */
2099 if (optlen
< sizeof(uint32_t))
2100 return -TARGET_EINVAL
;
2102 if (get_user_u32(val
, optval_addr
))
2103 return -TARGET_EFAULT
;
2104 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2111 case IP_ROUTER_ALERT
:
2115 case IP_MTU_DISCOVER
:
2122 case IP_MULTICAST_TTL
:
2123 case IP_MULTICAST_LOOP
:
2125 if (optlen
>= sizeof(uint32_t)) {
2126 if (get_user_u32(val
, optval_addr
))
2127 return -TARGET_EFAULT
;
2128 } else if (optlen
>= 1) {
2129 if (get_user_u8(val
, optval_addr
))
2130 return -TARGET_EFAULT
;
2132 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2134 case IP_ADD_MEMBERSHIP
:
2135 case IP_DROP_MEMBERSHIP
:
2136 if (optlen
< sizeof (struct target_ip_mreq
) ||
2137 optlen
> sizeof (struct target_ip_mreqn
))
2138 return -TARGET_EINVAL
;
2140 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2141 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2142 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2145 case IP_BLOCK_SOURCE
:
2146 case IP_UNBLOCK_SOURCE
:
2147 case IP_ADD_SOURCE_MEMBERSHIP
:
2148 case IP_DROP_SOURCE_MEMBERSHIP
:
2149 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2150 return -TARGET_EINVAL
;
2152 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2153 if (!ip_mreq_source
) {
2154 return -TARGET_EFAULT
;
2156 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2157 unlock_user (ip_mreq_source
, optval_addr
, 0);
2166 case IPV6_MTU_DISCOVER
:
2169 case IPV6_RECVPKTINFO
:
2170 case IPV6_UNICAST_HOPS
:
2171 case IPV6_MULTICAST_HOPS
:
2172 case IPV6_MULTICAST_LOOP
:
2174 case IPV6_RECVHOPLIMIT
:
2175 case IPV6_2292HOPLIMIT
:
2178 case IPV6_2292PKTINFO
:
2179 case IPV6_RECVTCLASS
:
2180 case IPV6_RECVRTHDR
:
2181 case IPV6_2292RTHDR
:
2182 case IPV6_RECVHOPOPTS
:
2183 case IPV6_2292HOPOPTS
:
2184 case IPV6_RECVDSTOPTS
:
2185 case IPV6_2292DSTOPTS
:
2187 case IPV6_ADDR_PREFERENCES
:
2188 #ifdef IPV6_RECVPATHMTU
2189 case IPV6_RECVPATHMTU
:
2191 #ifdef IPV6_TRANSPARENT
2192 case IPV6_TRANSPARENT
:
2194 #ifdef IPV6_FREEBIND
2197 #ifdef IPV6_RECVORIGDSTADDR
2198 case IPV6_RECVORIGDSTADDR
:
2201 if (optlen
< sizeof(uint32_t)) {
2202 return -TARGET_EINVAL
;
2204 if (get_user_u32(val
, optval_addr
)) {
2205 return -TARGET_EFAULT
;
2207 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2208 &val
, sizeof(val
)));
2212 struct in6_pktinfo pki
;
2214 if (optlen
< sizeof(pki
)) {
2215 return -TARGET_EINVAL
;
2218 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2219 return -TARGET_EFAULT
;
2222 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2224 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2225 &pki
, sizeof(pki
)));
2228 case IPV6_ADD_MEMBERSHIP
:
2229 case IPV6_DROP_MEMBERSHIP
:
2231 struct ipv6_mreq ipv6mreq
;
2233 if (optlen
< sizeof(ipv6mreq
)) {
2234 return -TARGET_EINVAL
;
2237 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2238 return -TARGET_EFAULT
;
2241 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2243 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2244 &ipv6mreq
, sizeof(ipv6mreq
)));
2255 struct icmp6_filter icmp6f
;
2257 if (optlen
> sizeof(icmp6f
)) {
2258 optlen
= sizeof(icmp6f
);
2261 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2262 return -TARGET_EFAULT
;
2265 for (val
= 0; val
< 8; val
++) {
2266 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2269 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2281 /* those take an u32 value */
2282 if (optlen
< sizeof(uint32_t)) {
2283 return -TARGET_EINVAL
;
2286 if (get_user_u32(val
, optval_addr
)) {
2287 return -TARGET_EFAULT
;
2289 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2290 &val
, sizeof(val
)));
2297 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2302 char *alg_key
= g_malloc(optlen
);
2305 return -TARGET_ENOMEM
;
2307 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2309 return -TARGET_EFAULT
;
2311 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2316 case ALG_SET_AEAD_AUTHSIZE
:
2318 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2327 case TARGET_SOL_SOCKET
:
2329 case TARGET_SO_RCVTIMEO
:
2333 optname
= SO_RCVTIMEO
;
2336 if (optlen
!= sizeof(struct target_timeval
)) {
2337 return -TARGET_EINVAL
;
2340 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2341 return -TARGET_EFAULT
;
2344 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2348 case TARGET_SO_SNDTIMEO
:
2349 optname
= SO_SNDTIMEO
;
2351 case TARGET_SO_ATTACH_FILTER
:
2353 struct target_sock_fprog
*tfprog
;
2354 struct target_sock_filter
*tfilter
;
2355 struct sock_fprog fprog
;
2356 struct sock_filter
*filter
;
2359 if (optlen
!= sizeof(*tfprog
)) {
2360 return -TARGET_EINVAL
;
2362 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2363 return -TARGET_EFAULT
;
2365 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2366 tswapal(tfprog
->filter
), 0)) {
2367 unlock_user_struct(tfprog
, optval_addr
, 1);
2368 return -TARGET_EFAULT
;
2371 fprog
.len
= tswap16(tfprog
->len
);
2372 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2373 if (filter
== NULL
) {
2374 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2375 unlock_user_struct(tfprog
, optval_addr
, 1);
2376 return -TARGET_ENOMEM
;
2378 for (i
= 0; i
< fprog
.len
; i
++) {
2379 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2380 filter
[i
].jt
= tfilter
[i
].jt
;
2381 filter
[i
].jf
= tfilter
[i
].jf
;
2382 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2384 fprog
.filter
= filter
;
2386 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2387 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2390 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2391 unlock_user_struct(tfprog
, optval_addr
, 1);
2394 case TARGET_SO_BINDTODEVICE
:
2396 char *dev_ifname
, *addr_ifname
;
2398 if (optlen
> IFNAMSIZ
- 1) {
2399 optlen
= IFNAMSIZ
- 1;
2401 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2403 return -TARGET_EFAULT
;
2405 optname
= SO_BINDTODEVICE
;
2406 addr_ifname
= alloca(IFNAMSIZ
);
2407 memcpy(addr_ifname
, dev_ifname
, optlen
);
2408 addr_ifname
[optlen
] = 0;
2409 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2410 addr_ifname
, optlen
));
2411 unlock_user (dev_ifname
, optval_addr
, 0);
2414 case TARGET_SO_LINGER
:
2417 struct target_linger
*tlg
;
2419 if (optlen
!= sizeof(struct target_linger
)) {
2420 return -TARGET_EINVAL
;
2422 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2423 return -TARGET_EFAULT
;
2425 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2426 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2427 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2429 unlock_user_struct(tlg
, optval_addr
, 0);
2432 /* Options with 'int' argument. */
2433 case TARGET_SO_DEBUG
:
2436 case TARGET_SO_REUSEADDR
:
2437 optname
= SO_REUSEADDR
;
2440 case TARGET_SO_REUSEPORT
:
2441 optname
= SO_REUSEPORT
;
2444 case TARGET_SO_TYPE
:
2447 case TARGET_SO_ERROR
:
2450 case TARGET_SO_DONTROUTE
:
2451 optname
= SO_DONTROUTE
;
2453 case TARGET_SO_BROADCAST
:
2454 optname
= SO_BROADCAST
;
2456 case TARGET_SO_SNDBUF
:
2457 optname
= SO_SNDBUF
;
2459 case TARGET_SO_SNDBUFFORCE
:
2460 optname
= SO_SNDBUFFORCE
;
2462 case TARGET_SO_RCVBUF
:
2463 optname
= SO_RCVBUF
;
2465 case TARGET_SO_RCVBUFFORCE
:
2466 optname
= SO_RCVBUFFORCE
;
2468 case TARGET_SO_KEEPALIVE
:
2469 optname
= SO_KEEPALIVE
;
2471 case TARGET_SO_OOBINLINE
:
2472 optname
= SO_OOBINLINE
;
2474 case TARGET_SO_NO_CHECK
:
2475 optname
= SO_NO_CHECK
;
2477 case TARGET_SO_PRIORITY
:
2478 optname
= SO_PRIORITY
;
2481 case TARGET_SO_BSDCOMPAT
:
2482 optname
= SO_BSDCOMPAT
;
2485 case TARGET_SO_PASSCRED
:
2486 optname
= SO_PASSCRED
;
2488 case TARGET_SO_PASSSEC
:
2489 optname
= SO_PASSSEC
;
2491 case TARGET_SO_TIMESTAMP
:
2492 optname
= SO_TIMESTAMP
;
2494 case TARGET_SO_RCVLOWAT
:
2495 optname
= SO_RCVLOWAT
;
2500 if (optlen
< sizeof(uint32_t))
2501 return -TARGET_EINVAL
;
2503 if (get_user_u32(val
, optval_addr
))
2504 return -TARGET_EFAULT
;
2505 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2510 case NETLINK_PKTINFO
:
2511 case NETLINK_ADD_MEMBERSHIP
:
2512 case NETLINK_DROP_MEMBERSHIP
:
2513 case NETLINK_BROADCAST_ERROR
:
2514 case NETLINK_NO_ENOBUFS
:
2515 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2516 case NETLINK_LISTEN_ALL_NSID
:
2517 case NETLINK_CAP_ACK
:
2518 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2519 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2520 case NETLINK_EXT_ACK
:
2521 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2523 case NETLINK_GET_STRICT_CHK
:
2524 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2530 if (optlen
< sizeof(uint32_t)) {
2531 return -TARGET_EINVAL
;
2533 if (get_user_u32(val
, optval_addr
)) {
2534 return -TARGET_EFAULT
;
2536 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2539 #endif /* SOL_NETLINK */
2542 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2544 ret
= -TARGET_ENOPROTOOPT
;
2549 /* do_getsockopt() Must return target values and target errnos. */
2550 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2551 abi_ulong optval_addr
, abi_ulong optlen
)
2558 case TARGET_SOL_SOCKET
:
2561 /* These don't just return a single integer */
2562 case TARGET_SO_PEERNAME
:
2564 case TARGET_SO_RCVTIMEO
: {
2568 optname
= SO_RCVTIMEO
;
2571 if (get_user_u32(len
, optlen
)) {
2572 return -TARGET_EFAULT
;
2575 return -TARGET_EINVAL
;
2579 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2584 if (len
> sizeof(struct target_timeval
)) {
2585 len
= sizeof(struct target_timeval
);
2587 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2588 return -TARGET_EFAULT
;
2590 if (put_user_u32(len
, optlen
)) {
2591 return -TARGET_EFAULT
;
2595 case TARGET_SO_SNDTIMEO
:
2596 optname
= SO_SNDTIMEO
;
2598 case TARGET_SO_PEERCRED
: {
2601 struct target_ucred
*tcr
;
2603 if (get_user_u32(len
, optlen
)) {
2604 return -TARGET_EFAULT
;
2607 return -TARGET_EINVAL
;
2611 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2619 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2620 return -TARGET_EFAULT
;
2622 __put_user(cr
.pid
, &tcr
->pid
);
2623 __put_user(cr
.uid
, &tcr
->uid
);
2624 __put_user(cr
.gid
, &tcr
->gid
);
2625 unlock_user_struct(tcr
, optval_addr
, 1);
2626 if (put_user_u32(len
, optlen
)) {
2627 return -TARGET_EFAULT
;
2631 case TARGET_SO_PEERSEC
: {
2634 if (get_user_u32(len
, optlen
)) {
2635 return -TARGET_EFAULT
;
2638 return -TARGET_EINVAL
;
2640 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2642 return -TARGET_EFAULT
;
2645 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2647 if (put_user_u32(lv
, optlen
)) {
2648 ret
= -TARGET_EFAULT
;
2650 unlock_user(name
, optval_addr
, lv
);
2653 case TARGET_SO_LINGER
:
2657 struct target_linger
*tlg
;
2659 if (get_user_u32(len
, optlen
)) {
2660 return -TARGET_EFAULT
;
2663 return -TARGET_EINVAL
;
2667 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2675 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2676 return -TARGET_EFAULT
;
2678 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2679 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2680 unlock_user_struct(tlg
, optval_addr
, 1);
2681 if (put_user_u32(len
, optlen
)) {
2682 return -TARGET_EFAULT
;
2686 /* Options with 'int' argument. */
2687 case TARGET_SO_DEBUG
:
2690 case TARGET_SO_REUSEADDR
:
2691 optname
= SO_REUSEADDR
;
2694 case TARGET_SO_REUSEPORT
:
2695 optname
= SO_REUSEPORT
;
2698 case TARGET_SO_TYPE
:
2701 case TARGET_SO_ERROR
:
2704 case TARGET_SO_DONTROUTE
:
2705 optname
= SO_DONTROUTE
;
2707 case TARGET_SO_BROADCAST
:
2708 optname
= SO_BROADCAST
;
2710 case TARGET_SO_SNDBUF
:
2711 optname
= SO_SNDBUF
;
2713 case TARGET_SO_RCVBUF
:
2714 optname
= SO_RCVBUF
;
2716 case TARGET_SO_KEEPALIVE
:
2717 optname
= SO_KEEPALIVE
;
2719 case TARGET_SO_OOBINLINE
:
2720 optname
= SO_OOBINLINE
;
2722 case TARGET_SO_NO_CHECK
:
2723 optname
= SO_NO_CHECK
;
2725 case TARGET_SO_PRIORITY
:
2726 optname
= SO_PRIORITY
;
2729 case TARGET_SO_BSDCOMPAT
:
2730 optname
= SO_BSDCOMPAT
;
2733 case TARGET_SO_PASSCRED
:
2734 optname
= SO_PASSCRED
;
2736 case TARGET_SO_TIMESTAMP
:
2737 optname
= SO_TIMESTAMP
;
2739 case TARGET_SO_RCVLOWAT
:
2740 optname
= SO_RCVLOWAT
;
2742 case TARGET_SO_ACCEPTCONN
:
2743 optname
= SO_ACCEPTCONN
;
2745 case TARGET_SO_PROTOCOL
:
2746 optname
= SO_PROTOCOL
;
2748 case TARGET_SO_DOMAIN
:
2749 optname
= SO_DOMAIN
;
2757 /* TCP and UDP options all take an 'int' value. */
2759 if (get_user_u32(len
, optlen
))
2760 return -TARGET_EFAULT
;
2762 return -TARGET_EINVAL
;
2764 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2767 if (optname
== SO_TYPE
) {
2768 val
= host_to_target_sock_type(val
);
2773 if (put_user_u32(val
, optval_addr
))
2774 return -TARGET_EFAULT
;
2776 if (put_user_u8(val
, optval_addr
))
2777 return -TARGET_EFAULT
;
2779 if (put_user_u32(len
, optlen
))
2780 return -TARGET_EFAULT
;
2787 case IP_ROUTER_ALERT
:
2791 case IP_MTU_DISCOVER
:
2797 case IP_MULTICAST_TTL
:
2798 case IP_MULTICAST_LOOP
:
2799 if (get_user_u32(len
, optlen
))
2800 return -TARGET_EFAULT
;
2802 return -TARGET_EINVAL
;
2804 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2807 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2809 if (put_user_u32(len
, optlen
)
2810 || put_user_u8(val
, optval_addr
))
2811 return -TARGET_EFAULT
;
2813 if (len
> sizeof(int))
2815 if (put_user_u32(len
, optlen
)
2816 || put_user_u32(val
, optval_addr
))
2817 return -TARGET_EFAULT
;
2821 ret
= -TARGET_ENOPROTOOPT
;
2827 case IPV6_MTU_DISCOVER
:
2830 case IPV6_RECVPKTINFO
:
2831 case IPV6_UNICAST_HOPS
:
2832 case IPV6_MULTICAST_HOPS
:
2833 case IPV6_MULTICAST_LOOP
:
2835 case IPV6_RECVHOPLIMIT
:
2836 case IPV6_2292HOPLIMIT
:
2839 case IPV6_2292PKTINFO
:
2840 case IPV6_RECVTCLASS
:
2841 case IPV6_RECVRTHDR
:
2842 case IPV6_2292RTHDR
:
2843 case IPV6_RECVHOPOPTS
:
2844 case IPV6_2292HOPOPTS
:
2845 case IPV6_RECVDSTOPTS
:
2846 case IPV6_2292DSTOPTS
:
2848 case IPV6_ADDR_PREFERENCES
:
2849 #ifdef IPV6_RECVPATHMTU
2850 case IPV6_RECVPATHMTU
:
2852 #ifdef IPV6_TRANSPARENT
2853 case IPV6_TRANSPARENT
:
2855 #ifdef IPV6_FREEBIND
2858 #ifdef IPV6_RECVORIGDSTADDR
2859 case IPV6_RECVORIGDSTADDR
:
2861 if (get_user_u32(len
, optlen
))
2862 return -TARGET_EFAULT
;
2864 return -TARGET_EINVAL
;
2866 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2869 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2871 if (put_user_u32(len
, optlen
)
2872 || put_user_u8(val
, optval_addr
))
2873 return -TARGET_EFAULT
;
2875 if (len
> sizeof(int))
2877 if (put_user_u32(len
, optlen
)
2878 || put_user_u32(val
, optval_addr
))
2879 return -TARGET_EFAULT
;
2883 ret
= -TARGET_ENOPROTOOPT
;
2890 case NETLINK_PKTINFO
:
2891 case NETLINK_BROADCAST_ERROR
:
2892 case NETLINK_NO_ENOBUFS
:
2893 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2894 case NETLINK_LISTEN_ALL_NSID
:
2895 case NETLINK_CAP_ACK
:
2896 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2897 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2898 case NETLINK_EXT_ACK
:
2899 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2901 case NETLINK_GET_STRICT_CHK
:
2902 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2903 if (get_user_u32(len
, optlen
)) {
2904 return -TARGET_EFAULT
;
2906 if (len
!= sizeof(val
)) {
2907 return -TARGET_EINVAL
;
2910 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2914 if (put_user_u32(lv
, optlen
)
2915 || put_user_u32(val
, optval_addr
)) {
2916 return -TARGET_EFAULT
;
2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2920 case NETLINK_LIST_MEMBERSHIPS
:
2924 if (get_user_u32(len
, optlen
)) {
2925 return -TARGET_EFAULT
;
2928 return -TARGET_EINVAL
;
2930 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2931 if (!results
&& len
> 0) {
2932 return -TARGET_EFAULT
;
2935 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2937 unlock_user(results
, optval_addr
, 0);
2940 /* swap host endianess to target endianess. */
2941 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2942 results
[i
] = tswap32(results
[i
]);
2944 if (put_user_u32(lv
, optlen
)) {
2945 return -TARGET_EFAULT
;
2947 unlock_user(results
, optval_addr
, 0);
2950 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2955 #endif /* SOL_NETLINK */
2958 qemu_log_mask(LOG_UNIMP
,
2959 "getsockopt level=%d optname=%d not yet supported\n",
2961 ret
= -TARGET_EOPNOTSUPP
;
2967 /* Convert target low/high pair representing file offset into the host
2968 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2969 * as the kernel doesn't handle them either.
2971 static void target_to_host_low_high(abi_ulong tlow
,
2973 unsigned long *hlow
,
2974 unsigned long *hhigh
)
2976 uint64_t off
= tlow
|
2977 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2978 TARGET_LONG_BITS
/ 2;
2981 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2984 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2985 abi_ulong count
, int copy
)
2987 struct target_iovec
*target_vec
;
2989 abi_ulong total_len
, max_len
;
2992 bool bad_address
= false;
2998 if (count
> IOV_MAX
) {
3003 vec
= g_try_new0(struct iovec
, count
);
3009 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3010 count
* sizeof(struct target_iovec
), 1);
3011 if (target_vec
== NULL
) {
3016 /* ??? If host page size > target page size, this will result in a
3017 value larger than what we can actually support. */
3018 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3021 for (i
= 0; i
< count
; i
++) {
3022 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3023 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3028 } else if (len
== 0) {
3029 /* Zero length pointer is ignored. */
3030 vec
[i
].iov_base
= 0;
3032 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3033 /* If the first buffer pointer is bad, this is a fault. But
3034 * subsequent bad buffers will result in a partial write; this
3035 * is realized by filling the vector with null pointers and
3037 if (!vec
[i
].iov_base
) {
3048 if (len
> max_len
- total_len
) {
3049 len
= max_len
- total_len
;
3052 vec
[i
].iov_len
= len
;
3056 unlock_user(target_vec
, target_addr
, 0);
3061 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3062 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3065 unlock_user(target_vec
, target_addr
, 0);
3072 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3073 abi_ulong count
, int copy
)
3075 struct target_iovec
*target_vec
;
3078 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3079 count
* sizeof(struct target_iovec
), 1);
3081 for (i
= 0; i
< count
; i
++) {
3082 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3083 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3087 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3089 unlock_user(target_vec
, target_addr
, 0);
3095 static inline int target_to_host_sock_type(int *type
)
3098 int target_type
= *type
;
3100 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3101 case TARGET_SOCK_DGRAM
:
3102 host_type
= SOCK_DGRAM
;
3104 case TARGET_SOCK_STREAM
:
3105 host_type
= SOCK_STREAM
;
3108 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3111 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3112 #if defined(SOCK_CLOEXEC)
3113 host_type
|= SOCK_CLOEXEC
;
3115 return -TARGET_EINVAL
;
3118 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3119 #if defined(SOCK_NONBLOCK)
3120 host_type
|= SOCK_NONBLOCK
;
3121 #elif !defined(O_NONBLOCK)
3122 return -TARGET_EINVAL
;
3129 /* Try to emulate socket type flags after socket creation. */
3130 static int sock_flags_fixup(int fd
, int target_type
)
3132 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3133 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3134 int flags
= fcntl(fd
, F_GETFL
);
3135 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3137 return -TARGET_EINVAL
;
3144 /* do_socket() Must return target values and target errnos. */
3145 static abi_long
do_socket(int domain
, int type
, int protocol
)
3147 int target_type
= type
;
3150 ret
= target_to_host_sock_type(&type
);
3155 if (domain
== PF_NETLINK
&& !(
3156 #ifdef CONFIG_RTNETLINK
3157 protocol
== NETLINK_ROUTE
||
3159 protocol
== NETLINK_KOBJECT_UEVENT
||
3160 protocol
== NETLINK_AUDIT
)) {
3161 return -TARGET_EPROTONOSUPPORT
;
3164 if (domain
== AF_PACKET
||
3165 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3166 protocol
= tswap16(protocol
);
3169 ret
= get_errno(socket(domain
, type
, protocol
));
3171 ret
= sock_flags_fixup(ret
, target_type
);
3172 if (type
== SOCK_PACKET
) {
3173 /* Manage an obsolete case :
3174 * if socket type is SOCK_PACKET, bind by name
3176 fd_trans_register(ret
, &target_packet_trans
);
3177 } else if (domain
== PF_NETLINK
) {
3179 #ifdef CONFIG_RTNETLINK
3181 fd_trans_register(ret
, &target_netlink_route_trans
);
3184 case NETLINK_KOBJECT_UEVENT
:
3185 /* nothing to do: messages are strings */
3188 fd_trans_register(ret
, &target_netlink_audit_trans
);
3191 g_assert_not_reached();
3198 /* do_bind() Must return target values and target errnos. */
3199 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3205 if ((int)addrlen
< 0) {
3206 return -TARGET_EINVAL
;
3209 addr
= alloca(addrlen
+1);
3211 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3215 return get_errno(bind(sockfd
, addr
, addrlen
));
3218 /* do_connect() Must return target values and target errnos. */
3219 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3225 if ((int)addrlen
< 0) {
3226 return -TARGET_EINVAL
;
3229 addr
= alloca(addrlen
+1);
3231 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3235 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3238 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3239 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3240 int flags
, int send
)
3246 abi_ulong target_vec
;
3248 if (msgp
->msg_name
) {
3249 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3250 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3251 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3252 tswapal(msgp
->msg_name
),
3254 if (ret
== -TARGET_EFAULT
) {
3255 /* For connected sockets msg_name and msg_namelen must
3256 * be ignored, so returning EFAULT immediately is wrong.
3257 * Instead, pass a bad msg_name to the host kernel, and
3258 * let it decide whether to return EFAULT or not.
3260 msg
.msg_name
= (void *)-1;
3265 msg
.msg_name
= NULL
;
3266 msg
.msg_namelen
= 0;
3268 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3269 msg
.msg_control
= alloca(msg
.msg_controllen
);
3270 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3272 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3274 count
= tswapal(msgp
->msg_iovlen
);
3275 target_vec
= tswapal(msgp
->msg_iov
);
3277 if (count
> IOV_MAX
) {
3278 /* sendrcvmsg returns a different errno for this condition than
3279 * readv/writev, so we must catch it here before lock_iovec() does.
3281 ret
= -TARGET_EMSGSIZE
;
3285 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3286 target_vec
, count
, send
);
3288 ret
= -host_to_target_errno(errno
);
3291 msg
.msg_iovlen
= count
;
3295 if (fd_trans_target_to_host_data(fd
)) {
3298 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3299 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3300 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3301 msg
.msg_iov
->iov_len
);
3303 msg
.msg_iov
->iov_base
= host_msg
;
3304 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3308 ret
= target_to_host_cmsg(&msg
, msgp
);
3310 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3314 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3315 if (!is_error(ret
)) {
3317 if (fd_trans_host_to_target_data(fd
)) {
3318 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3319 MIN(msg
.msg_iov
->iov_len
, len
));
3321 ret
= host_to_target_cmsg(msgp
, &msg
);
3323 if (!is_error(ret
)) {
3324 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3325 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3326 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3327 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3328 msg
.msg_name
, msg
.msg_namelen
);
3340 unlock_iovec(vec
, target_vec
, count
, !send
);
3345 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3346 int flags
, int send
)
3349 struct target_msghdr
*msgp
;
3351 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3355 return -TARGET_EFAULT
;
3357 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3358 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3362 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3363 * so it might not have this *mmsg-specific flag either.
3365 #ifndef MSG_WAITFORONE
3366 #define MSG_WAITFORONE 0x10000
3369 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3370 unsigned int vlen
, unsigned int flags
,
3373 struct target_mmsghdr
*mmsgp
;
3377 if (vlen
> UIO_MAXIOV
) {
3381 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3383 return -TARGET_EFAULT
;
3386 for (i
= 0; i
< vlen
; i
++) {
3387 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3388 if (is_error(ret
)) {
3391 mmsgp
[i
].msg_len
= tswap32(ret
);
3392 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3393 if (flags
& MSG_WAITFORONE
) {
3394 flags
|= MSG_DONTWAIT
;
3398 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3400 /* Return number of datagrams sent if we sent any at all;
3401 * otherwise return the error.
3409 /* do_accept4() Must return target values and target errnos. */
3410 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3411 abi_ulong target_addrlen_addr
, int flags
)
3413 socklen_t addrlen
, ret_addrlen
;
3418 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3420 if (target_addr
== 0) {
3421 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3424 /* linux returns EFAULT if addrlen pointer is invalid */
3425 if (get_user_u32(addrlen
, target_addrlen_addr
))
3426 return -TARGET_EFAULT
;
3428 if ((int)addrlen
< 0) {
3429 return -TARGET_EINVAL
;
3432 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3433 return -TARGET_EFAULT
;
3436 addr
= alloca(addrlen
);
3438 ret_addrlen
= addrlen
;
3439 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3440 if (!is_error(ret
)) {
3441 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3442 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3443 ret
= -TARGET_EFAULT
;
3449 /* do_getpeername() Must return target values and target errnos. */
3450 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3451 abi_ulong target_addrlen_addr
)
3453 socklen_t addrlen
, ret_addrlen
;
3457 if (get_user_u32(addrlen
, target_addrlen_addr
))
3458 return -TARGET_EFAULT
;
3460 if ((int)addrlen
< 0) {
3461 return -TARGET_EINVAL
;
3464 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3465 return -TARGET_EFAULT
;
3468 addr
= alloca(addrlen
);
3470 ret_addrlen
= addrlen
;
3471 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3472 if (!is_error(ret
)) {
3473 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3474 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3475 ret
= -TARGET_EFAULT
;
3481 /* do_getsockname() Must return target values and target errnos. */
3482 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3483 abi_ulong target_addrlen_addr
)
3485 socklen_t addrlen
, ret_addrlen
;
3489 if (get_user_u32(addrlen
, target_addrlen_addr
))
3490 return -TARGET_EFAULT
;
3492 if ((int)addrlen
< 0) {
3493 return -TARGET_EINVAL
;
3496 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3497 return -TARGET_EFAULT
;
3500 addr
= alloca(addrlen
);
3502 ret_addrlen
= addrlen
;
3503 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3504 if (!is_error(ret
)) {
3505 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3506 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3507 ret
= -TARGET_EFAULT
;
3513 /* do_socketpair() Must return target values and target errnos. */
3514 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3515 abi_ulong target_tab_addr
)
3520 target_to_host_sock_type(&type
);
3522 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3523 if (!is_error(ret
)) {
3524 if (put_user_s32(tab
[0], target_tab_addr
)
3525 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3526 ret
= -TARGET_EFAULT
;
3531 /* do_sendto() Must return target values and target errnos. */
3532 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3533 abi_ulong target_addr
, socklen_t addrlen
)
3537 void *copy_msg
= NULL
;
3540 if ((int)addrlen
< 0) {
3541 return -TARGET_EINVAL
;
3544 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3546 return -TARGET_EFAULT
;
3547 if (fd_trans_target_to_host_data(fd
)) {
3548 copy_msg
= host_msg
;
3549 host_msg
= g_malloc(len
);
3550 memcpy(host_msg
, copy_msg
, len
);
3551 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3557 addr
= alloca(addrlen
+1);
3558 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3562 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3564 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3569 host_msg
= copy_msg
;
3571 unlock_user(host_msg
, msg
, 0);
3575 /* do_recvfrom() Must return target values and target errnos. */
3576 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3577 abi_ulong target_addr
,
3578 abi_ulong target_addrlen
)
3580 socklen_t addrlen
, ret_addrlen
;
3588 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3590 return -TARGET_EFAULT
;
3594 if (get_user_u32(addrlen
, target_addrlen
)) {
3595 ret
= -TARGET_EFAULT
;
3598 if ((int)addrlen
< 0) {
3599 ret
= -TARGET_EINVAL
;
3602 addr
= alloca(addrlen
);
3603 ret_addrlen
= addrlen
;
3604 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3605 addr
, &ret_addrlen
));
3607 addr
= NULL
; /* To keep compiler quiet. */
3608 addrlen
= 0; /* To keep compiler quiet. */
3609 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3611 if (!is_error(ret
)) {
3612 if (fd_trans_host_to_target_data(fd
)) {
3614 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3615 if (is_error(trans
)) {
3621 host_to_target_sockaddr(target_addr
, addr
,
3622 MIN(addrlen
, ret_addrlen
));
3623 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3624 ret
= -TARGET_EFAULT
;
3628 unlock_user(host_msg
, msg
, len
);
3631 unlock_user(host_msg
, msg
, 0);
3636 #ifdef TARGET_NR_socketcall
3637 /* do_socketcall() must return target values and target errnos. */
3638 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3640 static const unsigned nargs
[] = { /* number of arguments per operation */
3641 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3642 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3643 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3644 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3645 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3646 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3647 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3648 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3649 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3650 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3651 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3652 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3653 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3654 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3655 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3656 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3657 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3658 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3659 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3660 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3662 abi_long a
[6]; /* max 6 args */
3665 /* check the range of the first argument num */
3666 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3667 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3668 return -TARGET_EINVAL
;
3670 /* ensure we have space for args */
3671 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3672 return -TARGET_EINVAL
;
3674 /* collect the arguments in a[] according to nargs[] */
3675 for (i
= 0; i
< nargs
[num
]; ++i
) {
3676 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3677 return -TARGET_EFAULT
;
3680 /* now when we have the args, invoke the appropriate underlying function */
3682 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3683 return do_socket(a
[0], a
[1], a
[2]);
3684 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3685 return do_bind(a
[0], a
[1], a
[2]);
3686 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3687 return do_connect(a
[0], a
[1], a
[2]);
3688 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3689 return get_errno(listen(a
[0], a
[1]));
3690 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3691 return do_accept4(a
[0], a
[1], a
[2], 0);
3692 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3693 return do_getsockname(a
[0], a
[1], a
[2]);
3694 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3695 return do_getpeername(a
[0], a
[1], a
[2]);
3696 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3697 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3698 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3699 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3700 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3701 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3702 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3703 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3704 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3705 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3706 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3707 return get_errno(shutdown(a
[0], a
[1]));
3708 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3709 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3710 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3711 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3712 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3713 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3714 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3715 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3716 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3717 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3718 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3719 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3720 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3721 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3723 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3724 return -TARGET_EINVAL
;
3729 #define N_SHM_REGIONS 32
3731 static struct shm_region
{
3735 } shm_regions
[N_SHM_REGIONS
];
3737 #ifndef TARGET_SEMID64_DS
3738 /* asm-generic version of this struct */
3739 struct target_semid64_ds
3741 struct target_ipc_perm sem_perm
;
3742 abi_ulong sem_otime
;
3743 #if TARGET_ABI_BITS == 32
3744 abi_ulong __unused1
;
3746 abi_ulong sem_ctime
;
3747 #if TARGET_ABI_BITS == 32
3748 abi_ulong __unused2
;
3750 abi_ulong sem_nsems
;
3751 abi_ulong __unused3
;
3752 abi_ulong __unused4
;
3756 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3757 abi_ulong target_addr
)
3759 struct target_ipc_perm
*target_ip
;
3760 struct target_semid64_ds
*target_sd
;
3762 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3763 return -TARGET_EFAULT
;
3764 target_ip
= &(target_sd
->sem_perm
);
3765 host_ip
->__key
= tswap32(target_ip
->__key
);
3766 host_ip
->uid
= tswap32(target_ip
->uid
);
3767 host_ip
->gid
= tswap32(target_ip
->gid
);
3768 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3769 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771 host_ip
->mode
= tswap32(target_ip
->mode
);
3773 host_ip
->mode
= tswap16(target_ip
->mode
);
3775 #if defined(TARGET_PPC)
3776 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3778 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3780 unlock_user_struct(target_sd
, target_addr
, 0);
3784 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3785 struct ipc_perm
*host_ip
)
3787 struct target_ipc_perm
*target_ip
;
3788 struct target_semid64_ds
*target_sd
;
3790 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3791 return -TARGET_EFAULT
;
3792 target_ip
= &(target_sd
->sem_perm
);
3793 target_ip
->__key
= tswap32(host_ip
->__key
);
3794 target_ip
->uid
= tswap32(host_ip
->uid
);
3795 target_ip
->gid
= tswap32(host_ip
->gid
);
3796 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3797 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799 target_ip
->mode
= tswap32(host_ip
->mode
);
3801 target_ip
->mode
= tswap16(host_ip
->mode
);
3803 #if defined(TARGET_PPC)
3804 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3806 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3808 unlock_user_struct(target_sd
, target_addr
, 1);
3812 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3813 abi_ulong target_addr
)
3815 struct target_semid64_ds
*target_sd
;
3817 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3818 return -TARGET_EFAULT
;
3819 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3820 return -TARGET_EFAULT
;
3821 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3822 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3823 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3824 unlock_user_struct(target_sd
, target_addr
, 0);
3828 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3829 struct semid_ds
*host_sd
)
3831 struct target_semid64_ds
*target_sd
;
3833 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3834 return -TARGET_EFAULT
;
3835 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3836 return -TARGET_EFAULT
;
3837 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3838 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3839 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3840 unlock_user_struct(target_sd
, target_addr
, 1);
3844 struct target_seminfo
{
3857 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3858 struct seminfo
*host_seminfo
)
3860 struct target_seminfo
*target_seminfo
;
3861 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3862 return -TARGET_EFAULT
;
3863 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3864 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3865 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3866 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3867 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3868 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3869 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3870 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3871 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3872 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3873 unlock_user_struct(target_seminfo
, target_addr
, 1);
3879 struct semid_ds
*buf
;
3880 unsigned short *array
;
3881 struct seminfo
*__buf
;
3884 union target_semun
{
3891 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3892 abi_ulong target_addr
)
3895 unsigned short *array
;
3897 struct semid_ds semid_ds
;
3900 semun
.buf
= &semid_ds
;
3902 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3904 return get_errno(ret
);
3906 nsems
= semid_ds
.sem_nsems
;
3908 *host_array
= g_try_new(unsigned short, nsems
);
3910 return -TARGET_ENOMEM
;
3912 array
= lock_user(VERIFY_READ
, target_addr
,
3913 nsems
*sizeof(unsigned short), 1);
3915 g_free(*host_array
);
3916 return -TARGET_EFAULT
;
3919 for(i
=0; i
<nsems
; i
++) {
3920 __get_user((*host_array
)[i
], &array
[i
]);
3922 unlock_user(array
, target_addr
, 0);
3927 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3928 unsigned short **host_array
)
3931 unsigned short *array
;
3933 struct semid_ds semid_ds
;
3936 semun
.buf
= &semid_ds
;
3938 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3940 return get_errno(ret
);
3942 nsems
= semid_ds
.sem_nsems
;
3944 array
= lock_user(VERIFY_WRITE
, target_addr
,
3945 nsems
*sizeof(unsigned short), 0);
3947 return -TARGET_EFAULT
;
3949 for(i
=0; i
<nsems
; i
++) {
3950 __put_user((*host_array
)[i
], &array
[i
]);
3952 g_free(*host_array
);
3953 unlock_user(array
, target_addr
, 1);
3958 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3959 abi_ulong target_arg
)
3961 union target_semun target_su
= { .buf
= target_arg
};
3963 struct semid_ds dsarg
;
3964 unsigned short *array
= NULL
;
3965 struct seminfo seminfo
;
3966 abi_long ret
= -TARGET_EINVAL
;
3973 /* In 64 bit cross-endian situations, we will erroneously pick up
3974 * the wrong half of the union for the "val" element. To rectify
3975 * this, the entire 8-byte structure is byteswapped, followed by
3976 * a swap of the 4 byte val field. In other cases, the data is
3977 * already in proper host byte order. */
3978 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3979 target_su
.buf
= tswapal(target_su
.buf
);
3980 arg
.val
= tswap32(target_su
.val
);
3982 arg
.val
= target_su
.val
;
3984 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3988 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3992 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3993 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4000 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4004 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4005 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4011 arg
.__buf
= &seminfo
;
4012 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4013 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4021 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4028 struct target_sembuf
{
4029 unsigned short sem_num
;
4034 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4035 abi_ulong target_addr
,
4038 struct target_sembuf
*target_sembuf
;
4041 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4042 nsops
*sizeof(struct target_sembuf
), 1);
4044 return -TARGET_EFAULT
;
4046 for(i
=0; i
<nsops
; i
++) {
4047 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4048 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4049 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4052 unlock_user(target_sembuf
, target_addr
, 0);
4057 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4058 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4061 * This macro is required to handle the s390 variants, which passes the
4062 * arguments in a different order than default.
4065 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4066 (__nsops), (__timeout), (__sops)
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069 (__nsops), 0, (__sops), (__timeout)
4072 static inline abi_long
do_semtimedop(int semid
,
4075 abi_long timeout
, bool time64
)
4077 struct sembuf
*sops
;
4078 struct timespec ts
, *pts
= NULL
;
4084 if (target_to_host_timespec64(pts
, timeout
)) {
4085 return -TARGET_EFAULT
;
4088 if (target_to_host_timespec(pts
, timeout
)) {
4089 return -TARGET_EFAULT
;
4094 if (nsops
> TARGET_SEMOPM
) {
4095 return -TARGET_E2BIG
;
4098 sops
= g_new(struct sembuf
, nsops
);
4100 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4102 return -TARGET_EFAULT
;
4105 ret
= -TARGET_ENOSYS
;
4106 #ifdef __NR_semtimedop
4107 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4110 if (ret
== -TARGET_ENOSYS
) {
4111 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4112 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4120 struct target_msqid_ds
4122 struct target_ipc_perm msg_perm
;
4123 abi_ulong msg_stime
;
4124 #if TARGET_ABI_BITS == 32
4125 abi_ulong __unused1
;
4127 abi_ulong msg_rtime
;
4128 #if TARGET_ABI_BITS == 32
4129 abi_ulong __unused2
;
4131 abi_ulong msg_ctime
;
4132 #if TARGET_ABI_BITS == 32
4133 abi_ulong __unused3
;
4135 abi_ulong __msg_cbytes
;
4137 abi_ulong msg_qbytes
;
4138 abi_ulong msg_lspid
;
4139 abi_ulong msg_lrpid
;
4140 abi_ulong __unused4
;
4141 abi_ulong __unused5
;
4144 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4145 abi_ulong target_addr
)
4147 struct target_msqid_ds
*target_md
;
4149 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4150 return -TARGET_EFAULT
;
4151 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4152 return -TARGET_EFAULT
;
4153 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4154 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4155 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4156 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4157 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4158 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4159 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4160 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4161 unlock_user_struct(target_md
, target_addr
, 0);
4165 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4166 struct msqid_ds
*host_md
)
4168 struct target_msqid_ds
*target_md
;
4170 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4171 return -TARGET_EFAULT
;
4172 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4173 return -TARGET_EFAULT
;
4174 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4175 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4176 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4177 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4178 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4179 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4180 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4181 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4182 unlock_user_struct(target_md
, target_addr
, 1);
4186 struct target_msginfo
{
4194 unsigned short int msgseg
;
4197 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4198 struct msginfo
*host_msginfo
)
4200 struct target_msginfo
*target_msginfo
;
4201 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4202 return -TARGET_EFAULT
;
4203 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4204 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4205 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4206 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4207 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4208 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4209 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4210 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4211 unlock_user_struct(target_msginfo
, target_addr
, 1);
4215 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4217 struct msqid_ds dsarg
;
4218 struct msginfo msginfo
;
4219 abi_long ret
= -TARGET_EINVAL
;
4227 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4228 return -TARGET_EFAULT
;
4229 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4230 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4231 return -TARGET_EFAULT
;
4234 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4238 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4239 if (host_to_target_msginfo(ptr
, &msginfo
))
4240 return -TARGET_EFAULT
;
4247 struct target_msgbuf
{
4252 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4253 ssize_t msgsz
, int msgflg
)
4255 struct target_msgbuf
*target_mb
;
4256 struct msgbuf
*host_mb
;
4260 return -TARGET_EINVAL
;
4263 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4264 return -TARGET_EFAULT
;
4265 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4267 unlock_user_struct(target_mb
, msgp
, 0);
4268 return -TARGET_ENOMEM
;
4270 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4271 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4272 ret
= -TARGET_ENOSYS
;
4274 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4277 if (ret
== -TARGET_ENOSYS
) {
4279 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4282 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4288 unlock_user_struct(target_mb
, msgp
, 0);
4294 #if defined(__sparc__)
4295 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4296 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4297 #elif defined(__s390x__)
4298 /* The s390 sys_ipc variant has only five parameters. */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4300 ((long int[]){(long int)__msgp, __msgtyp})
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303 ((long int[]){(long int)__msgp, __msgtyp}), 0
4307 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4308 ssize_t msgsz
, abi_long msgtyp
,
4311 struct target_msgbuf
*target_mb
;
4313 struct msgbuf
*host_mb
;
4317 return -TARGET_EINVAL
;
4320 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4321 return -TARGET_EFAULT
;
4323 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4325 ret
= -TARGET_ENOMEM
;
4328 ret
= -TARGET_ENOSYS
;
4330 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4333 if (ret
== -TARGET_ENOSYS
) {
4334 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4335 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4340 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4341 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4342 if (!target_mtext
) {
4343 ret
= -TARGET_EFAULT
;
4346 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4347 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4350 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4354 unlock_user_struct(target_mb
, msgp
, 1);
4359 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4360 abi_ulong target_addr
)
4362 struct target_shmid_ds
*target_sd
;
4364 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4365 return -TARGET_EFAULT
;
4366 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4367 return -TARGET_EFAULT
;
4368 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4369 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4370 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4371 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4372 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4373 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4374 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4375 unlock_user_struct(target_sd
, target_addr
, 0);
4379 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4380 struct shmid_ds
*host_sd
)
4382 struct target_shmid_ds
*target_sd
;
4384 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4385 return -TARGET_EFAULT
;
4386 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4387 return -TARGET_EFAULT
;
4388 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4389 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4390 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4391 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4392 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4393 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4394 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4395 unlock_user_struct(target_sd
, target_addr
, 1);
4399 struct target_shminfo
{
4407 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4408 struct shminfo
*host_shminfo
)
4410 struct target_shminfo
*target_shminfo
;
4411 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4412 return -TARGET_EFAULT
;
4413 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4414 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4415 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4416 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4417 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4418 unlock_user_struct(target_shminfo
, target_addr
, 1);
4422 struct target_shm_info
{
4427 abi_ulong swap_attempts
;
4428 abi_ulong swap_successes
;
4431 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4432 struct shm_info
*host_shm_info
)
4434 struct target_shm_info
*target_shm_info
;
4435 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4436 return -TARGET_EFAULT
;
4437 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4438 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4439 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4440 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4441 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4442 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4443 unlock_user_struct(target_shm_info
, target_addr
, 1);
4447 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4449 struct shmid_ds dsarg
;
4450 struct shminfo shminfo
;
4451 struct shm_info shm_info
;
4452 abi_long ret
= -TARGET_EINVAL
;
4460 if (target_to_host_shmid_ds(&dsarg
, buf
))
4461 return -TARGET_EFAULT
;
4462 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4463 if (host_to_target_shmid_ds(buf
, &dsarg
))
4464 return -TARGET_EFAULT
;
4467 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4468 if (host_to_target_shminfo(buf
, &shminfo
))
4469 return -TARGET_EFAULT
;
4472 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4473 if (host_to_target_shm_info(buf
, &shm_info
))
4474 return -TARGET_EFAULT
;
4479 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4486 #ifndef TARGET_FORCE_SHMLBA
4487 /* For most architectures, SHMLBA is the same as the page size;
4488 * some architectures have larger values, in which case they should
4489 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4490 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4491 * and defining its own value for SHMLBA.
4493 * The kernel also permits SHMLBA to be set by the architecture to a
4494 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4495 * this means that addresses are rounded to the large size if
4496 * SHM_RND is set but addresses not aligned to that size are not rejected
4497 * as long as they are at least page-aligned. Since the only architecture
4498 * which uses this is ia64 this code doesn't provide for that oddity.
4500 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4502 return TARGET_PAGE_SIZE
;
4506 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4507 int shmid
, abi_ulong shmaddr
, int shmflg
)
4509 CPUState
*cpu
= env_cpu(cpu_env
);
4512 struct shmid_ds shm_info
;
4516 /* shmat pointers are always untagged */
4518 /* find out the length of the shared memory segment */
4519 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4520 if (is_error(ret
)) {
4521 /* can't get length, bail out */
4525 shmlba
= target_shmlba(cpu_env
);
4527 if (shmaddr
& (shmlba
- 1)) {
4528 if (shmflg
& SHM_RND
) {
4529 shmaddr
&= ~(shmlba
- 1);
4531 return -TARGET_EINVAL
;
4534 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4535 return -TARGET_EINVAL
;
4541 * We're mapping shared memory, so ensure we generate code for parallel
4542 * execution and flush old translations. This will work up to the level
4543 * supported by the host -- anything that requires EXCP_ATOMIC will not
4544 * be atomic with respect to an external process.
4546 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4547 cpu
->tcg_cflags
|= CF_PARALLEL
;
4552 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4554 abi_ulong mmap_start
;
4556 /* In order to use the host shmat, we need to honor host SHMLBA. */
4557 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4559 if (mmap_start
== -1) {
4561 host_raddr
= (void *)-1;
4563 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4564 shmflg
| SHM_REMAP
);
4567 if (host_raddr
== (void *)-1) {
4569 return get_errno((long)host_raddr
);
4571 raddr
=h2g((unsigned long)host_raddr
);
4573 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4574 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4575 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4577 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4578 if (!shm_regions
[i
].in_use
) {
4579 shm_regions
[i
].in_use
= true;
4580 shm_regions
[i
].start
= raddr
;
4581 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4591 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4596 /* shmdt pointers are always untagged */
4600 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4601 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4602 shm_regions
[i
].in_use
= false;
4603 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4607 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4614 #ifdef TARGET_NR_ipc
4615 /* ??? This only works with linear mappings. */
4616 /* do_ipc() must return target values and target errnos. */
4617 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4618 unsigned int call
, abi_long first
,
4619 abi_long second
, abi_long third
,
4620 abi_long ptr
, abi_long fifth
)
4625 version
= call
>> 16;
4630 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4632 case IPCOP_semtimedop
:
4634 * The s390 sys_ipc variant has only five parameters instead of six
4635 * (as for default variant) and the only difference is the handling of
4636 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4637 * to a struct timespec where the generic variant uses fifth parameter.
4639 #if defined(TARGET_S390X)
4640 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4642 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4647 ret
= get_errno(semget(first
, second
, third
));
4650 case IPCOP_semctl
: {
4651 /* The semun argument to semctl is passed by value, so dereference the
4654 get_user_ual(atptr
, ptr
);
4655 ret
= do_semctl(first
, second
, third
, atptr
);
4660 ret
= get_errno(msgget(first
, second
));
4664 ret
= do_msgsnd(first
, ptr
, second
, third
);
4668 ret
= do_msgctl(first
, second
, ptr
);
4675 struct target_ipc_kludge
{
4680 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4681 ret
= -TARGET_EFAULT
;
4685 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4687 unlock_user_struct(tmp
, ptr
, 0);
4691 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4700 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4701 if (is_error(raddr
))
4702 return get_errno(raddr
);
4703 if (put_user_ual(raddr
, third
))
4704 return -TARGET_EFAULT
;
4708 ret
= -TARGET_EINVAL
;
4713 ret
= do_shmdt(ptr
);
4717 /* IPC_* flag values are the same on all linux platforms */
4718 ret
= get_errno(shmget(first
, second
, third
));
4721 /* IPC_* and SHM_* command values are the same on all linux platforms */
4723 ret
= do_shmctl(first
, second
, ptr
);
4726 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4728 ret
= -TARGET_ENOSYS
;
4735 /* kernel structure types definitions */
4737 #define STRUCT(name, ...) STRUCT_ ## name,
4738 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4740 #include "syscall_types.h"
4744 #undef STRUCT_SPECIAL
4746 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4747 #define STRUCT_SPECIAL(name)
4748 #include "syscall_types.h"
4750 #undef STRUCT_SPECIAL
4752 #define MAX_STRUCT_SIZE 4096
4754 #ifdef CONFIG_FIEMAP
4755 /* So fiemap access checks don't overflow on 32 bit systems.
4756 * This is very slightly smaller than the limit imposed by
4757 * the underlying kernel.
4759 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4760 / sizeof(struct fiemap_extent))
4762 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4763 int fd
, int cmd
, abi_long arg
)
4765 /* The parameter for this ioctl is a struct fiemap followed
4766 * by an array of struct fiemap_extent whose size is set
4767 * in fiemap->fm_extent_count. The array is filled in by the
4770 int target_size_in
, target_size_out
;
4772 const argtype
*arg_type
= ie
->arg_type
;
4773 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4776 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4780 assert(arg_type
[0] == TYPE_PTR
);
4781 assert(ie
->access
== IOC_RW
);
4783 target_size_in
= thunk_type_size(arg_type
, 0);
4784 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4786 return -TARGET_EFAULT
;
4788 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4789 unlock_user(argptr
, arg
, 0);
4790 fm
= (struct fiemap
*)buf_temp
;
4791 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4792 return -TARGET_EINVAL
;
4795 outbufsz
= sizeof (*fm
) +
4796 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4798 if (outbufsz
> MAX_STRUCT_SIZE
) {
4799 /* We can't fit all the extents into the fixed size buffer.
4800 * Allocate one that is large enough and use it instead.
4802 fm
= g_try_malloc(outbufsz
);
4804 return -TARGET_ENOMEM
;
4806 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4809 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4810 if (!is_error(ret
)) {
4811 target_size_out
= target_size_in
;
4812 /* An extent_count of 0 means we were only counting the extents
4813 * so there are no structs to copy
4815 if (fm
->fm_extent_count
!= 0) {
4816 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4818 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4820 ret
= -TARGET_EFAULT
;
4822 /* Convert the struct fiemap */
4823 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4824 if (fm
->fm_extent_count
!= 0) {
4825 p
= argptr
+ target_size_in
;
4826 /* ...and then all the struct fiemap_extents */
4827 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4828 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4833 unlock_user(argptr
, arg
, target_size_out
);
4843 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4844 int fd
, int cmd
, abi_long arg
)
4846 const argtype
*arg_type
= ie
->arg_type
;
4850 struct ifconf
*host_ifconf
;
4852 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4853 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4854 int target_ifreq_size
;
4859 abi_long target_ifc_buf
;
4863 assert(arg_type
[0] == TYPE_PTR
);
4864 assert(ie
->access
== IOC_RW
);
4867 target_size
= thunk_type_size(arg_type
, 0);
4869 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4871 return -TARGET_EFAULT
;
4872 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4873 unlock_user(argptr
, arg
, 0);
4875 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4876 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4877 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4879 if (target_ifc_buf
!= 0) {
4880 target_ifc_len
= host_ifconf
->ifc_len
;
4881 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4882 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4884 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4885 if (outbufsz
> MAX_STRUCT_SIZE
) {
4887 * We can't fit all the extents into the fixed size buffer.
4888 * Allocate one that is large enough and use it instead.
4890 host_ifconf
= g_try_malloc(outbufsz
);
4892 return -TARGET_ENOMEM
;
4894 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4897 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4899 host_ifconf
->ifc_len
= host_ifc_len
;
4901 host_ifc_buf
= NULL
;
4903 host_ifconf
->ifc_buf
= host_ifc_buf
;
4905 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4906 if (!is_error(ret
)) {
4907 /* convert host ifc_len to target ifc_len */
4909 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4910 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4911 host_ifconf
->ifc_len
= target_ifc_len
;
4913 /* restore target ifc_buf */
4915 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4917 /* copy struct ifconf to target user */
4919 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4921 return -TARGET_EFAULT
;
4922 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4923 unlock_user(argptr
, arg
, target_size
);
4925 if (target_ifc_buf
!= 0) {
4926 /* copy ifreq[] to target user */
4927 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4928 for (i
= 0; i
< nb_ifreq
; i
++) {
4929 thunk_convert(argptr
+ i
* target_ifreq_size
,
4930 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4931 ifreq_arg_type
, THUNK_TARGET
);
4933 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4938 g_free(host_ifconf
);
4944 #if defined(CONFIG_USBFS)
4945 #if HOST_LONG_BITS > 64
4946 #error USBDEVFS thunks do not support >64 bit hosts yet.
4949 uint64_t target_urb_adr
;
4950 uint64_t target_buf_adr
;
4951 char *target_buf_ptr
;
4952 struct usbdevfs_urb host_urb
;
4955 static GHashTable
*usbdevfs_urb_hashtable(void)
4957 static GHashTable
*urb_hashtable
;
4959 if (!urb_hashtable
) {
4960 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4962 return urb_hashtable
;
4965 static void urb_hashtable_insert(struct live_urb
*urb
)
4967 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4968 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4971 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4973 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4974 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4977 static void urb_hashtable_remove(struct live_urb
*urb
)
4979 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4980 g_hash_table_remove(urb_hashtable
, urb
);
4984 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4985 int fd
, int cmd
, abi_long arg
)
4987 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4988 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4989 struct live_urb
*lurb
;
4993 uintptr_t target_urb_adr
;
4996 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4998 memset(buf_temp
, 0, sizeof(uint64_t));
4999 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5000 if (is_error(ret
)) {
5004 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
5005 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
5006 if (!lurb
->target_urb_adr
) {
5007 return -TARGET_EFAULT
;
5009 urb_hashtable_remove(lurb
);
5010 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
5011 lurb
->host_urb
.buffer_length
);
5012 lurb
->target_buf_ptr
= NULL
;
5014 /* restore the guest buffer pointer */
5015 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
5017 /* update the guest urb struct */
5018 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5021 return -TARGET_EFAULT
;
5023 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5024 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5026 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5027 /* write back the urb handle */
5028 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5031 return -TARGET_EFAULT
;
5034 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5035 target_urb_adr
= lurb
->target_urb_adr
;
5036 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5037 unlock_user(argptr
, arg
, target_size
);
5044 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5045 uint8_t *buf_temp
__attribute__((unused
)),
5046 int fd
, int cmd
, abi_long arg
)
5048 struct live_urb
*lurb
;
5050 /* map target address back to host URB with metadata. */
5051 lurb
= urb_hashtable_lookup(arg
);
5053 return -TARGET_EFAULT
;
5055 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5059 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5060 int fd
, int cmd
, abi_long arg
)
5062 const argtype
*arg_type
= ie
->arg_type
;
5067 struct live_urb
*lurb
;
5070 * each submitted URB needs to map to a unique ID for the
5071 * kernel, and that unique ID needs to be a pointer to
5072 * host memory. hence, we need to malloc for each URB.
5073 * isochronous transfers have a variable length struct.
5076 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5078 /* construct host copy of urb and metadata */
5079 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5081 return -TARGET_ENOMEM
;
5084 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5087 return -TARGET_EFAULT
;
5089 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5090 unlock_user(argptr
, arg
, 0);
5092 lurb
->target_urb_adr
= arg
;
5093 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5095 /* buffer space used depends on endpoint type so lock the entire buffer */
5096 /* control type urbs should check the buffer contents for true direction */
5097 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5098 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5099 lurb
->host_urb
.buffer_length
, 1);
5100 if (lurb
->target_buf_ptr
== NULL
) {
5102 return -TARGET_EFAULT
;
5105 /* update buffer pointer in host copy */
5106 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5108 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5109 if (is_error(ret
)) {
5110 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5113 urb_hashtable_insert(lurb
);
5118 #endif /* CONFIG_USBFS */
5120 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5121 int cmd
, abi_long arg
)
5124 struct dm_ioctl
*host_dm
;
5125 abi_long guest_data
;
5126 uint32_t guest_data_size
;
5128 const argtype
*arg_type
= ie
->arg_type
;
5130 void *big_buf
= NULL
;
5134 target_size
= thunk_type_size(arg_type
, 0);
5135 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5137 ret
= -TARGET_EFAULT
;
5140 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5141 unlock_user(argptr
, arg
, 0);
5143 /* buf_temp is too small, so fetch things into a bigger buffer */
5144 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5145 memcpy(big_buf
, buf_temp
, target_size
);
5149 guest_data
= arg
+ host_dm
->data_start
;
5150 if ((guest_data
- arg
) < 0) {
5151 ret
= -TARGET_EINVAL
;
5154 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5155 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5157 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5159 ret
= -TARGET_EFAULT
;
5163 switch (ie
->host_cmd
) {
5165 case DM_LIST_DEVICES
:
5168 case DM_DEV_SUSPEND
:
5171 case DM_TABLE_STATUS
:
5172 case DM_TABLE_CLEAR
:
5174 case DM_LIST_VERSIONS
:
5178 case DM_DEV_SET_GEOMETRY
:
5179 /* data contains only strings */
5180 memcpy(host_data
, argptr
, guest_data_size
);
5183 memcpy(host_data
, argptr
, guest_data_size
);
5184 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5188 void *gspec
= argptr
;
5189 void *cur_data
= host_data
;
5190 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5191 int spec_size
= thunk_type_size(arg_type
, 0);
5194 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5195 struct dm_target_spec
*spec
= cur_data
;
5199 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5200 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5202 spec
->next
= sizeof(*spec
) + slen
;
5203 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5205 cur_data
+= spec
->next
;
5210 ret
= -TARGET_EINVAL
;
5211 unlock_user(argptr
, guest_data
, 0);
5214 unlock_user(argptr
, guest_data
, 0);
5216 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5217 if (!is_error(ret
)) {
5218 guest_data
= arg
+ host_dm
->data_start
;
5219 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5220 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5221 switch (ie
->host_cmd
) {
5226 case DM_DEV_SUSPEND
:
5229 case DM_TABLE_CLEAR
:
5231 case DM_DEV_SET_GEOMETRY
:
5232 /* no return data */
5234 case DM_LIST_DEVICES
:
5236 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5237 uint32_t remaining_data
= guest_data_size
;
5238 void *cur_data
= argptr
;
5239 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5240 int nl_size
= 12; /* can't use thunk_size due to alignment */
5243 uint32_t next
= nl
->next
;
5245 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5247 if (remaining_data
< nl
->next
) {
5248 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5251 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5252 strcpy(cur_data
+ nl_size
, nl
->name
);
5253 cur_data
+= nl
->next
;
5254 remaining_data
-= nl
->next
;
5258 nl
= (void*)nl
+ next
;
5263 case DM_TABLE_STATUS
:
5265 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5266 void *cur_data
= argptr
;
5267 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5268 int spec_size
= thunk_type_size(arg_type
, 0);
5271 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5272 uint32_t next
= spec
->next
;
5273 int slen
= strlen((char*)&spec
[1]) + 1;
5274 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5275 if (guest_data_size
< spec
->next
) {
5276 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5279 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5280 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5281 cur_data
= argptr
+ spec
->next
;
5282 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5288 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5289 int count
= *(uint32_t*)hdata
;
5290 uint64_t *hdev
= hdata
+ 8;
5291 uint64_t *gdev
= argptr
+ 8;
5294 *(uint32_t*)argptr
= tswap32(count
);
5295 for (i
= 0; i
< count
; i
++) {
5296 *gdev
= tswap64(*hdev
);
5302 case DM_LIST_VERSIONS
:
5304 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5305 uint32_t remaining_data
= guest_data_size
;
5306 void *cur_data
= argptr
;
5307 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5308 int vers_size
= thunk_type_size(arg_type
, 0);
5311 uint32_t next
= vers
->next
;
5313 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5315 if (remaining_data
< vers
->next
) {
5316 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5319 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5320 strcpy(cur_data
+ vers_size
, vers
->name
);
5321 cur_data
+= vers
->next
;
5322 remaining_data
-= vers
->next
;
5326 vers
= (void*)vers
+ next
;
5331 unlock_user(argptr
, guest_data
, 0);
5332 ret
= -TARGET_EINVAL
;
5335 unlock_user(argptr
, guest_data
, guest_data_size
);
5337 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5339 ret
= -TARGET_EFAULT
;
5342 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5343 unlock_user(argptr
, arg
, target_size
);
5350 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5351 int cmd
, abi_long arg
)
5355 const argtype
*arg_type
= ie
->arg_type
;
5356 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5359 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5360 struct blkpg_partition host_part
;
5362 /* Read and convert blkpg */
5364 target_size
= thunk_type_size(arg_type
, 0);
5365 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5367 ret
= -TARGET_EFAULT
;
5370 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5371 unlock_user(argptr
, arg
, 0);
5373 switch (host_blkpg
->op
) {
5374 case BLKPG_ADD_PARTITION
:
5375 case BLKPG_DEL_PARTITION
:
5376 /* payload is struct blkpg_partition */
5379 /* Unknown opcode */
5380 ret
= -TARGET_EINVAL
;
5384 /* Read and convert blkpg->data */
5385 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5386 target_size
= thunk_type_size(part_arg_type
, 0);
5387 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5389 ret
= -TARGET_EFAULT
;
5392 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5393 unlock_user(argptr
, arg
, 0);
5395 /* Swizzle the data pointer to our local copy and call! */
5396 host_blkpg
->data
= &host_part
;
5397 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5403 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5404 int fd
, int cmd
, abi_long arg
)
5406 const argtype
*arg_type
= ie
->arg_type
;
5407 const StructEntry
*se
;
5408 const argtype
*field_types
;
5409 const int *dst_offsets
, *src_offsets
;
5412 abi_ulong
*target_rt_dev_ptr
= NULL
;
5413 unsigned long *host_rt_dev_ptr
= NULL
;
5417 assert(ie
->access
== IOC_W
);
5418 assert(*arg_type
== TYPE_PTR
);
5420 assert(*arg_type
== TYPE_STRUCT
);
5421 target_size
= thunk_type_size(arg_type
, 0);
5422 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5424 return -TARGET_EFAULT
;
5427 assert(*arg_type
== (int)STRUCT_rtentry
);
5428 se
= struct_entries
+ *arg_type
++;
5429 assert(se
->convert
[0] == NULL
);
5430 /* convert struct here to be able to catch rt_dev string */
5431 field_types
= se
->field_types
;
5432 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5433 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5434 for (i
= 0; i
< se
->nb_fields
; i
++) {
5435 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5436 assert(*field_types
== TYPE_PTRVOID
);
5437 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5438 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5439 if (*target_rt_dev_ptr
!= 0) {
5440 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5441 tswapal(*target_rt_dev_ptr
));
5442 if (!*host_rt_dev_ptr
) {
5443 unlock_user(argptr
, arg
, 0);
5444 return -TARGET_EFAULT
;
5447 *host_rt_dev_ptr
= 0;
5452 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5453 argptr
+ src_offsets
[i
],
5454 field_types
, THUNK_HOST
);
5456 unlock_user(argptr
, arg
, 0);
5458 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5460 assert(host_rt_dev_ptr
!= NULL
);
5461 assert(target_rt_dev_ptr
!= NULL
);
5462 if (*host_rt_dev_ptr
!= 0) {
5463 unlock_user((void *)*host_rt_dev_ptr
,
5464 *target_rt_dev_ptr
, 0);
5469 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5470 int fd
, int cmd
, abi_long arg
)
5472 int sig
= target_to_host_signal(arg
);
5473 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5476 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5477 int fd
, int cmd
, abi_long arg
)
5482 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5483 if (is_error(ret
)) {
5487 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5488 if (copy_to_user_timeval(arg
, &tv
)) {
5489 return -TARGET_EFAULT
;
5492 if (copy_to_user_timeval64(arg
, &tv
)) {
5493 return -TARGET_EFAULT
;
5500 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5501 int fd
, int cmd
, abi_long arg
)
5506 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5507 if (is_error(ret
)) {
5511 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5512 if (host_to_target_timespec(arg
, &ts
)) {
5513 return -TARGET_EFAULT
;
5516 if (host_to_target_timespec64(arg
, &ts
)) {
5517 return -TARGET_EFAULT
;
5525 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5526 int fd
, int cmd
, abi_long arg
)
5528 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5529 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5535 static void unlock_drm_version(struct drm_version
*host_ver
,
5536 struct target_drm_version
*target_ver
,
5539 unlock_user(host_ver
->name
, target_ver
->name
,
5540 copy
? host_ver
->name_len
: 0);
5541 unlock_user(host_ver
->date
, target_ver
->date
,
5542 copy
? host_ver
->date_len
: 0);
5543 unlock_user(host_ver
->desc
, target_ver
->desc
,
5544 copy
? host_ver
->desc_len
: 0);
5547 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5548 struct target_drm_version
*target_ver
)
5550 memset(host_ver
, 0, sizeof(*host_ver
));
5552 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5553 if (host_ver
->name_len
) {
5554 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5555 target_ver
->name_len
, 0);
5556 if (!host_ver
->name
) {
5561 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5562 if (host_ver
->date_len
) {
5563 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5564 target_ver
->date_len
, 0);
5565 if (!host_ver
->date
) {
5570 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5571 if (host_ver
->desc_len
) {
5572 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5573 target_ver
->desc_len
, 0);
5574 if (!host_ver
->desc
) {
5581 unlock_drm_version(host_ver
, target_ver
, false);
5585 static inline void host_to_target_drmversion(
5586 struct target_drm_version
*target_ver
,
5587 struct drm_version
*host_ver
)
5589 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5590 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5591 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5592 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5593 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5594 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5595 unlock_drm_version(host_ver
, target_ver
, true);
5598 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5599 int fd
, int cmd
, abi_long arg
)
5601 struct drm_version
*ver
;
5602 struct target_drm_version
*target_ver
;
5605 switch (ie
->host_cmd
) {
5606 case DRM_IOCTL_VERSION
:
5607 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5608 return -TARGET_EFAULT
;
5610 ver
= (struct drm_version
*)buf_temp
;
5611 ret
= target_to_host_drmversion(ver
, target_ver
);
5612 if (!is_error(ret
)) {
5613 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5614 if (is_error(ret
)) {
5615 unlock_drm_version(ver
, target_ver
, false);
5617 host_to_target_drmversion(target_ver
, ver
);
5620 unlock_user_struct(target_ver
, arg
, 0);
5623 return -TARGET_ENOSYS
;
5626 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5627 struct drm_i915_getparam
*gparam
,
5628 int fd
, abi_long arg
)
5632 struct target_drm_i915_getparam
*target_gparam
;
5634 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5635 return -TARGET_EFAULT
;
5638 __get_user(gparam
->param
, &target_gparam
->param
);
5639 gparam
->value
= &value
;
5640 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5641 put_user_s32(value
, target_gparam
->value
);
5643 unlock_user_struct(target_gparam
, arg
, 0);
5647 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5648 int fd
, int cmd
, abi_long arg
)
5650 switch (ie
->host_cmd
) {
5651 case DRM_IOCTL_I915_GETPARAM
:
5652 return do_ioctl_drm_i915_getparam(ie
,
5653 (struct drm_i915_getparam
*)buf_temp
,
5656 return -TARGET_ENOSYS
;
5662 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5663 int fd
, int cmd
, abi_long arg
)
5665 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5666 struct tun_filter
*target_filter
;
5669 assert(ie
->access
== IOC_W
);
5671 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5672 if (!target_filter
) {
5673 return -TARGET_EFAULT
;
5675 filter
->flags
= tswap16(target_filter
->flags
);
5676 filter
->count
= tswap16(target_filter
->count
);
5677 unlock_user(target_filter
, arg
, 0);
5679 if (filter
->count
) {
5680 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5682 return -TARGET_EFAULT
;
5685 target_addr
= lock_user(VERIFY_READ
,
5686 arg
+ offsetof(struct tun_filter
, addr
),
5687 filter
->count
* ETH_ALEN
, 1);
5689 return -TARGET_EFAULT
;
5691 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5692 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5695 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5698 IOCTLEntry ioctl_entries
[] = {
5699 #define IOCTL(cmd, access, ...) \
5700 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5701 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5702 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5703 #define IOCTL_IGNORE(cmd) \
5704 { TARGET_ ## cmd, 0, #cmd },
5709 /* ??? Implement proper locking for ioctls. */
5710 /* do_ioctl() Must return target values and target errnos. */
5711 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5713 const IOCTLEntry
*ie
;
5714 const argtype
*arg_type
;
5716 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5722 if (ie
->target_cmd
== 0) {
5724 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5725 return -TARGET_ENOSYS
;
5727 if (ie
->target_cmd
== cmd
)
5731 arg_type
= ie
->arg_type
;
5733 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5734 } else if (!ie
->host_cmd
) {
5735 /* Some architectures define BSD ioctls in their headers
5736 that are not implemented in Linux. */
5737 return -TARGET_ENOSYS
;
5740 switch(arg_type
[0]) {
5743 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5749 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5753 target_size
= thunk_type_size(arg_type
, 0);
5754 switch(ie
->access
) {
5756 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5757 if (!is_error(ret
)) {
5758 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5760 return -TARGET_EFAULT
;
5761 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5762 unlock_user(argptr
, arg
, target_size
);
5766 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5768 return -TARGET_EFAULT
;
5769 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5770 unlock_user(argptr
, arg
, 0);
5771 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5775 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5777 return -TARGET_EFAULT
;
5778 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5779 unlock_user(argptr
, arg
, 0);
5780 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5781 if (!is_error(ret
)) {
5782 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5784 return -TARGET_EFAULT
;
5785 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5786 unlock_user(argptr
, arg
, target_size
);
5792 qemu_log_mask(LOG_UNIMP
,
5793 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5794 (long)cmd
, arg_type
[0]);
5795 ret
= -TARGET_ENOSYS
;
5801 static const bitmask_transtbl iflag_tbl
[] = {
5802 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5803 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5804 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5805 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5806 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5807 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5808 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5809 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5810 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5811 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5812 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5813 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5814 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5815 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5816 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5820 static const bitmask_transtbl oflag_tbl
[] = {
5821 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5822 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5823 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5824 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5825 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5826 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5827 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5828 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5829 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5830 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5831 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5832 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5833 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5834 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5835 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5836 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5837 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5838 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5839 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5840 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5841 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5842 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5843 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5844 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5848 static const bitmask_transtbl cflag_tbl
[] = {
5849 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5850 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5851 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5852 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5853 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5854 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5855 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5856 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5857 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5858 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5859 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5860 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5861 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5862 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5863 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5864 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5865 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5866 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5867 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5868 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5869 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5870 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5871 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5872 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5873 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5874 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5875 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5876 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5877 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5878 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5879 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5883 static const bitmask_transtbl lflag_tbl
[] = {
5884 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5885 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5886 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5887 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5888 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5889 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5890 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5891 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5892 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5893 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5894 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5895 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5896 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5897 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5898 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5899 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5903 static void target_to_host_termios (void *dst
, const void *src
)
5905 struct host_termios
*host
= dst
;
5906 const struct target_termios
*target
= src
;
5909 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5911 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5913 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5915 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5916 host
->c_line
= target
->c_line
;
5918 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5919 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5920 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5921 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5922 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5923 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5924 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5925 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5926 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5927 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5928 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5929 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5930 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5931 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5932 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5933 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5934 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5935 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5938 static void host_to_target_termios (void *dst
, const void *src
)
5940 struct target_termios
*target
= dst
;
5941 const struct host_termios
*host
= src
;
5944 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5946 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5948 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5950 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5951 target
->c_line
= host
->c_line
;
5953 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5954 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5955 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5956 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5957 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5958 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5959 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5960 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5961 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5962 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5963 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5964 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5965 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5966 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5967 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5968 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5969 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5970 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5973 static const StructEntry struct_termios_def
= {
5974 .convert
= { host_to_target_termios
, target_to_host_termios
},
5975 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5976 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5977 .print
= print_termios
,
5980 static const bitmask_transtbl mmap_flags_tbl
[] = {
5981 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5982 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5983 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5984 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5985 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5986 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5987 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5988 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5989 MAP_DENYWRITE
, MAP_DENYWRITE
},
5990 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5991 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5992 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5993 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5994 MAP_NORESERVE
, MAP_NORESERVE
},
5995 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5996 /* MAP_STACK had been ignored by the kernel for quite some time.
5997 Recognize it for the target insofar as we do not want to pass
5998 it through to the host. */
5999 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6004 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6005 * TARGET_I386 is defined if TARGET_X86_64 is defined
6007 #if defined(TARGET_I386)
6009 /* NOTE: there is really one LDT for all the threads */
6010 static uint8_t *ldt_table
;
6012 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6019 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6020 if (size
> bytecount
)
6022 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6024 return -TARGET_EFAULT
;
6025 /* ??? Should this by byteswapped? */
6026 memcpy(p
, ldt_table
, size
);
6027 unlock_user(p
, ptr
, size
);
6031 /* XXX: add locking support */
6032 static abi_long
write_ldt(CPUX86State
*env
,
6033 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6035 struct target_modify_ldt_ldt_s ldt_info
;
6036 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6037 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6038 int seg_not_present
, useable
, lm
;
6039 uint32_t *lp
, entry_1
, entry_2
;
6041 if (bytecount
!= sizeof(ldt_info
))
6042 return -TARGET_EINVAL
;
6043 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6044 return -TARGET_EFAULT
;
6045 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6046 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6047 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6048 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6049 unlock_user_struct(target_ldt_info
, ptr
, 0);
6051 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6052 return -TARGET_EINVAL
;
6053 seg_32bit
= ldt_info
.flags
& 1;
6054 contents
= (ldt_info
.flags
>> 1) & 3;
6055 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6056 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6057 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6058 useable
= (ldt_info
.flags
>> 6) & 1;
6062 lm
= (ldt_info
.flags
>> 7) & 1;
6064 if (contents
== 3) {
6066 return -TARGET_EINVAL
;
6067 if (seg_not_present
== 0)
6068 return -TARGET_EINVAL
;
6070 /* allocate the LDT */
6072 env
->ldt
.base
= target_mmap(0,
6073 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6074 PROT_READ
|PROT_WRITE
,
6075 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6076 if (env
->ldt
.base
== -1)
6077 return -TARGET_ENOMEM
;
6078 memset(g2h_untagged(env
->ldt
.base
), 0,
6079 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6080 env
->ldt
.limit
= 0xffff;
6081 ldt_table
= g2h_untagged(env
->ldt
.base
);
6084 /* NOTE: same code as Linux kernel */
6085 /* Allow LDTs to be cleared by the user. */
6086 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6089 read_exec_only
== 1 &&
6091 limit_in_pages
== 0 &&
6092 seg_not_present
== 1 &&
6100 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6101 (ldt_info
.limit
& 0x0ffff);
6102 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6103 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6104 (ldt_info
.limit
& 0xf0000) |
6105 ((read_exec_only
^ 1) << 9) |
6107 ((seg_not_present
^ 1) << 15) |
6109 (limit_in_pages
<< 23) |
6113 entry_2
|= (useable
<< 20);
6115 /* Install the new entry ... */
6117 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6118 lp
[0] = tswap32(entry_1
);
6119 lp
[1] = tswap32(entry_2
);
6123 /* specific and weird i386 syscalls */
6124 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6125 unsigned long bytecount
)
6131 ret
= read_ldt(ptr
, bytecount
);
6134 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6137 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6140 ret
= -TARGET_ENOSYS
;
6146 #if defined(TARGET_ABI32)
6147 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6149 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6150 struct target_modify_ldt_ldt_s ldt_info
;
6151 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6152 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6153 int seg_not_present
, useable
, lm
;
6154 uint32_t *lp
, entry_1
, entry_2
;
6157 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6158 if (!target_ldt_info
)
6159 return -TARGET_EFAULT
;
6160 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6161 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6162 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6163 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6164 if (ldt_info
.entry_number
== -1) {
6165 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6166 if (gdt_table
[i
] == 0) {
6167 ldt_info
.entry_number
= i
;
6168 target_ldt_info
->entry_number
= tswap32(i
);
6173 unlock_user_struct(target_ldt_info
, ptr
, 1);
6175 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6176 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6177 return -TARGET_EINVAL
;
6178 seg_32bit
= ldt_info
.flags
& 1;
6179 contents
= (ldt_info
.flags
>> 1) & 3;
6180 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6181 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6182 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6183 useable
= (ldt_info
.flags
>> 6) & 1;
6187 lm
= (ldt_info
.flags
>> 7) & 1;
6190 if (contents
== 3) {
6191 if (seg_not_present
== 0)
6192 return -TARGET_EINVAL
;
6195 /* NOTE: same code as Linux kernel */
6196 /* Allow LDTs to be cleared by the user. */
6197 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6198 if ((contents
== 0 &&
6199 read_exec_only
== 1 &&
6201 limit_in_pages
== 0 &&
6202 seg_not_present
== 1 &&
6210 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6211 (ldt_info
.limit
& 0x0ffff);
6212 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6213 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6214 (ldt_info
.limit
& 0xf0000) |
6215 ((read_exec_only
^ 1) << 9) |
6217 ((seg_not_present
^ 1) << 15) |
6219 (limit_in_pages
<< 23) |
6224 /* Install the new entry ... */
6226 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6227 lp
[0] = tswap32(entry_1
);
6228 lp
[1] = tswap32(entry_2
);
6232 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6234 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6235 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6236 uint32_t base_addr
, limit
, flags
;
6237 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6238 int seg_not_present
, useable
, lm
;
6239 uint32_t *lp
, entry_1
, entry_2
;
6241 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6242 if (!target_ldt_info
)
6243 return -TARGET_EFAULT
;
6244 idx
= tswap32(target_ldt_info
->entry_number
);
6245 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6246 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6247 unlock_user_struct(target_ldt_info
, ptr
, 1);
6248 return -TARGET_EINVAL
;
6250 lp
= (uint32_t *)(gdt_table
+ idx
);
6251 entry_1
= tswap32(lp
[0]);
6252 entry_2
= tswap32(lp
[1]);
6254 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6255 contents
= (entry_2
>> 10) & 3;
6256 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6257 seg_32bit
= (entry_2
>> 22) & 1;
6258 limit_in_pages
= (entry_2
>> 23) & 1;
6259 useable
= (entry_2
>> 20) & 1;
6263 lm
= (entry_2
>> 21) & 1;
6265 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6266 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6267 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6268 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6269 base_addr
= (entry_1
>> 16) |
6270 (entry_2
& 0xff000000) |
6271 ((entry_2
& 0xff) << 16);
6272 target_ldt_info
->base_addr
= tswapal(base_addr
);
6273 target_ldt_info
->limit
= tswap32(limit
);
6274 target_ldt_info
->flags
= tswap32(flags
);
6275 unlock_user_struct(target_ldt_info
, ptr
, 1);
6279 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6281 return -TARGET_ENOSYS
;
6284 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6291 case TARGET_ARCH_SET_GS
:
6292 case TARGET_ARCH_SET_FS
:
6293 if (code
== TARGET_ARCH_SET_GS
)
6297 cpu_x86_load_seg(env
, idx
, 0);
6298 env
->segs
[idx
].base
= addr
;
6300 case TARGET_ARCH_GET_GS
:
6301 case TARGET_ARCH_GET_FS
:
6302 if (code
== TARGET_ARCH_GET_GS
)
6306 val
= env
->segs
[idx
].base
;
6307 if (put_user(val
, addr
, abi_ulong
))
6308 ret
= -TARGET_EFAULT
;
6311 ret
= -TARGET_EINVAL
;
6316 #endif /* defined(TARGET_ABI32 */
6317 #endif /* defined(TARGET_I386) */
6320 * These constants are generic. Supply any that are missing from the host.
6323 # define PR_SET_NAME 15
6324 # define PR_GET_NAME 16
6326 #ifndef PR_SET_FP_MODE
6327 # define PR_SET_FP_MODE 45
6328 # define PR_GET_FP_MODE 46
6329 # define PR_FP_MODE_FR (1 << 0)
6330 # define PR_FP_MODE_FRE (1 << 1)
6332 #ifndef PR_SVE_SET_VL
6333 # define PR_SVE_SET_VL 50
6334 # define PR_SVE_GET_VL 51
6335 # define PR_SVE_VL_LEN_MASK 0xffff
6336 # define PR_SVE_VL_INHERIT (1 << 17)
6338 #ifndef PR_PAC_RESET_KEYS
6339 # define PR_PAC_RESET_KEYS 54
6340 # define PR_PAC_APIAKEY (1 << 0)
6341 # define PR_PAC_APIBKEY (1 << 1)
6342 # define PR_PAC_APDAKEY (1 << 2)
6343 # define PR_PAC_APDBKEY (1 << 3)
6344 # define PR_PAC_APGAKEY (1 << 4)
6346 #ifndef PR_SET_TAGGED_ADDR_CTRL
6347 # define PR_SET_TAGGED_ADDR_CTRL 55
6348 # define PR_GET_TAGGED_ADDR_CTRL 56
6349 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6351 #ifndef PR_MTE_TCF_SHIFT
6352 # define PR_MTE_TCF_SHIFT 1
6353 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6354 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6355 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6356 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6357 # define PR_MTE_TAG_SHIFT 3
6358 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6360 #ifndef PR_SET_IO_FLUSHER
6361 # define PR_SET_IO_FLUSHER 57
6362 # define PR_GET_IO_FLUSHER 58
6364 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6365 # define PR_SET_SYSCALL_USER_DISPATCH 59
6368 #include "target_prctl.h"
6370 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6372 return -TARGET_EINVAL
;
6375 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6377 return -TARGET_EINVAL
;
6380 #ifndef do_prctl_get_fp_mode
6381 #define do_prctl_get_fp_mode do_prctl_inval0
6383 #ifndef do_prctl_set_fp_mode
6384 #define do_prctl_set_fp_mode do_prctl_inval1
6386 #ifndef do_prctl_get_vl
6387 #define do_prctl_get_vl do_prctl_inval0
6389 #ifndef do_prctl_set_vl
6390 #define do_prctl_set_vl do_prctl_inval1
6392 #ifndef do_prctl_reset_keys
6393 #define do_prctl_reset_keys do_prctl_inval1
6395 #ifndef do_prctl_set_tagged_addr_ctrl
6396 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6398 #ifndef do_prctl_get_tagged_addr_ctrl
6399 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6401 #ifndef do_prctl_get_unalign
6402 #define do_prctl_get_unalign do_prctl_inval1
6404 #ifndef do_prctl_set_unalign
6405 #define do_prctl_set_unalign do_prctl_inval1
6408 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6409 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6414 case PR_GET_PDEATHSIG
:
6417 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6419 if (!is_error(ret
) &&
6420 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6421 return -TARGET_EFAULT
;
6425 case PR_SET_PDEATHSIG
:
6426 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6430 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6432 return -TARGET_EFAULT
;
6434 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6436 unlock_user(name
, arg2
, 16);
6441 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6443 return -TARGET_EFAULT
;
6445 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6447 unlock_user(name
, arg2
, 0);
6450 case PR_GET_FP_MODE
:
6451 return do_prctl_get_fp_mode(env
);
6452 case PR_SET_FP_MODE
:
6453 return do_prctl_set_fp_mode(env
, arg2
);
6455 return do_prctl_get_vl(env
);
6457 return do_prctl_set_vl(env
, arg2
);
6458 case PR_PAC_RESET_KEYS
:
6459 if (arg3
|| arg4
|| arg5
) {
6460 return -TARGET_EINVAL
;
6462 return do_prctl_reset_keys(env
, arg2
);
6463 case PR_SET_TAGGED_ADDR_CTRL
:
6464 if (arg3
|| arg4
|| arg5
) {
6465 return -TARGET_EINVAL
;
6467 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6468 case PR_GET_TAGGED_ADDR_CTRL
:
6469 if (arg2
|| arg3
|| arg4
|| arg5
) {
6470 return -TARGET_EINVAL
;
6472 return do_prctl_get_tagged_addr_ctrl(env
);
6474 case PR_GET_UNALIGN
:
6475 return do_prctl_get_unalign(env
, arg2
);
6476 case PR_SET_UNALIGN
:
6477 return do_prctl_set_unalign(env
, arg2
);
6479 case PR_CAP_AMBIENT
:
6480 case PR_CAPBSET_READ
:
6481 case PR_CAPBSET_DROP
:
6482 case PR_GET_DUMPABLE
:
6483 case PR_SET_DUMPABLE
:
6484 case PR_GET_KEEPCAPS
:
6485 case PR_SET_KEEPCAPS
:
6486 case PR_GET_SECUREBITS
:
6487 case PR_SET_SECUREBITS
:
6490 case PR_GET_TIMERSLACK
:
6491 case PR_SET_TIMERSLACK
:
6493 case PR_MCE_KILL_GET
:
6494 case PR_GET_NO_NEW_PRIVS
:
6495 case PR_SET_NO_NEW_PRIVS
:
6496 case PR_GET_IO_FLUSHER
:
6497 case PR_SET_IO_FLUSHER
:
6498 /* Some prctl options have no pointer arguments and we can pass on. */
6499 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6501 case PR_GET_CHILD_SUBREAPER
:
6502 case PR_SET_CHILD_SUBREAPER
:
6503 case PR_GET_SPECULATION_CTRL
:
6504 case PR_SET_SPECULATION_CTRL
:
6505 case PR_GET_TID_ADDRESS
:
6507 return -TARGET_EINVAL
;
6511 /* Was used for SPE on PowerPC. */
6512 return -TARGET_EINVAL
;
6519 case PR_GET_SECCOMP
:
6520 case PR_SET_SECCOMP
:
6521 case PR_SET_SYSCALL_USER_DISPATCH
:
6522 case PR_GET_THP_DISABLE
:
6523 case PR_SET_THP_DISABLE
:
6526 /* Disable to prevent the target disabling stuff we need. */
6527 return -TARGET_EINVAL
;
6530 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6532 return -TARGET_EINVAL
;
6536 #define NEW_STACK_SIZE 0x40000
6539 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6542 pthread_mutex_t mutex
;
6543 pthread_cond_t cond
;
6546 abi_ulong child_tidptr
;
6547 abi_ulong parent_tidptr
;
6551 static void *clone_func(void *arg
)
6553 new_thread_info
*info
= arg
;
6558 rcu_register_thread();
6559 tcg_register_thread();
6563 ts
= (TaskState
*)cpu
->opaque
;
6564 info
->tid
= sys_gettid();
6566 if (info
->child_tidptr
)
6567 put_user_u32(info
->tid
, info
->child_tidptr
);
6568 if (info
->parent_tidptr
)
6569 put_user_u32(info
->tid
, info
->parent_tidptr
);
6570 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6571 /* Enable signals. */
6572 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6573 /* Signal to the parent that we're ready. */
6574 pthread_mutex_lock(&info
->mutex
);
6575 pthread_cond_broadcast(&info
->cond
);
6576 pthread_mutex_unlock(&info
->mutex
);
6577 /* Wait until the parent has finished initializing the tls state. */
6578 pthread_mutex_lock(&clone_lock
);
6579 pthread_mutex_unlock(&clone_lock
);
6585 /* do_fork() Must return host values and target errnos (unlike most
6586 do_*() functions). */
6587 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6588 abi_ulong parent_tidptr
, target_ulong newtls
,
6589 abi_ulong child_tidptr
)
6591 CPUState
*cpu
= env_cpu(env
);
6595 CPUArchState
*new_env
;
6598 flags
&= ~CLONE_IGNORED_FLAGS
;
6600 /* Emulate vfork() with fork() */
6601 if (flags
& CLONE_VFORK
)
6602 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6604 if (flags
& CLONE_VM
) {
6605 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6606 new_thread_info info
;
6607 pthread_attr_t attr
;
6609 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6610 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6611 return -TARGET_EINVAL
;
6614 ts
= g_new0(TaskState
, 1);
6615 init_task_state(ts
);
6617 /* Grab a mutex so that thread setup appears atomic. */
6618 pthread_mutex_lock(&clone_lock
);
6621 * If this is our first additional thread, we need to ensure we
6622 * generate code for parallel execution and flush old translations.
6623 * Do this now so that the copy gets CF_PARALLEL too.
6625 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6626 cpu
->tcg_cflags
|= CF_PARALLEL
;
6630 /* we create a new CPU instance. */
6631 new_env
= cpu_copy(env
);
6632 /* Init regs that differ from the parent. */
6633 cpu_clone_regs_child(new_env
, newsp
, flags
);
6634 cpu_clone_regs_parent(env
, flags
);
6635 new_cpu
= env_cpu(new_env
);
6636 new_cpu
->opaque
= ts
;
6637 ts
->bprm
= parent_ts
->bprm
;
6638 ts
->info
= parent_ts
->info
;
6639 ts
->signal_mask
= parent_ts
->signal_mask
;
6641 if (flags
& CLONE_CHILD_CLEARTID
) {
6642 ts
->child_tidptr
= child_tidptr
;
6645 if (flags
& CLONE_SETTLS
) {
6646 cpu_set_tls (new_env
, newtls
);
6649 memset(&info
, 0, sizeof(info
));
6650 pthread_mutex_init(&info
.mutex
, NULL
);
6651 pthread_mutex_lock(&info
.mutex
);
6652 pthread_cond_init(&info
.cond
, NULL
);
6654 if (flags
& CLONE_CHILD_SETTID
) {
6655 info
.child_tidptr
= child_tidptr
;
6657 if (flags
& CLONE_PARENT_SETTID
) {
6658 info
.parent_tidptr
= parent_tidptr
;
6661 ret
= pthread_attr_init(&attr
);
6662 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6663 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6664 /* It is not safe to deliver signals until the child has finished
6665 initializing, so temporarily block all signals. */
6666 sigfillset(&sigmask
);
6667 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6668 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6670 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6671 /* TODO: Free new CPU state if thread creation failed. */
6673 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6674 pthread_attr_destroy(&attr
);
6676 /* Wait for the child to initialize. */
6677 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6682 pthread_mutex_unlock(&info
.mutex
);
6683 pthread_cond_destroy(&info
.cond
);
6684 pthread_mutex_destroy(&info
.mutex
);
6685 pthread_mutex_unlock(&clone_lock
);
6687 /* if no CLONE_VM, we consider it is a fork */
6688 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6689 return -TARGET_EINVAL
;
6692 /* We can't support custom termination signals */
6693 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6694 return -TARGET_EINVAL
;
6697 if (block_signals()) {
6698 return -QEMU_ERESTARTSYS
;
6704 /* Child Process. */
6705 cpu_clone_regs_child(env
, newsp
, flags
);
6707 /* There is a race condition here. The parent process could
6708 theoretically read the TID in the child process before the child
6709 tid is set. This would require using either ptrace
6710 (not implemented) or having *_tidptr to point at a shared memory
6711 mapping. We can't repeat the spinlock hack used above because
6712 the child process gets its own copy of the lock. */
6713 if (flags
& CLONE_CHILD_SETTID
)
6714 put_user_u32(sys_gettid(), child_tidptr
);
6715 if (flags
& CLONE_PARENT_SETTID
)
6716 put_user_u32(sys_gettid(), parent_tidptr
);
6717 ts
= (TaskState
*)cpu
->opaque
;
6718 if (flags
& CLONE_SETTLS
)
6719 cpu_set_tls (env
, newtls
);
6720 if (flags
& CLONE_CHILD_CLEARTID
)
6721 ts
->child_tidptr
= child_tidptr
;
6723 cpu_clone_regs_parent(env
, flags
);
6730 /* warning : doesn't handle linux specific flags... */
6731 static int target_to_host_fcntl_cmd(int cmd
)
6736 case TARGET_F_DUPFD
:
6737 case TARGET_F_GETFD
:
6738 case TARGET_F_SETFD
:
6739 case TARGET_F_GETFL
:
6740 case TARGET_F_SETFL
:
6741 case TARGET_F_OFD_GETLK
:
6742 case TARGET_F_OFD_SETLK
:
6743 case TARGET_F_OFD_SETLKW
:
6746 case TARGET_F_GETLK
:
6749 case TARGET_F_SETLK
:
6752 case TARGET_F_SETLKW
:
6755 case TARGET_F_GETOWN
:
6758 case TARGET_F_SETOWN
:
6761 case TARGET_F_GETSIG
:
6764 case TARGET_F_SETSIG
:
6767 #if TARGET_ABI_BITS == 32
6768 case TARGET_F_GETLK64
:
6771 case TARGET_F_SETLK64
:
6774 case TARGET_F_SETLKW64
:
6778 case TARGET_F_SETLEASE
:
6781 case TARGET_F_GETLEASE
:
6784 #ifdef F_DUPFD_CLOEXEC
6785 case TARGET_F_DUPFD_CLOEXEC
:
6786 ret
= F_DUPFD_CLOEXEC
;
6789 case TARGET_F_NOTIFY
:
6793 case TARGET_F_GETOWN_EX
:
6798 case TARGET_F_SETOWN_EX
:
6803 case TARGET_F_SETPIPE_SZ
:
6806 case TARGET_F_GETPIPE_SZ
:
6811 case TARGET_F_ADD_SEALS
:
6814 case TARGET_F_GET_SEALS
:
6819 ret
= -TARGET_EINVAL
;
6823 #if defined(__powerpc64__)
6824 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6825 * is not supported by kernel. The glibc fcntl call actually adjusts
6826 * them to 5, 6 and 7 before making the syscall(). Since we make the
6827 * syscall directly, adjust to what is supported by the kernel.
6829 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6830 ret
-= F_GETLK64
- 5;
6837 #define FLOCK_TRANSTBL \
6839 TRANSTBL_CONVERT(F_RDLCK); \
6840 TRANSTBL_CONVERT(F_WRLCK); \
6841 TRANSTBL_CONVERT(F_UNLCK); \
6844 static int target_to_host_flock(int type
)
6846 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6848 #undef TRANSTBL_CONVERT
6849 return -TARGET_EINVAL
;
6852 static int host_to_target_flock(int type
)
6854 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6856 #undef TRANSTBL_CONVERT
6857 /* if we don't know how to convert the value coming
6858 * from the host we copy to the target field as-is
6863 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6864 abi_ulong target_flock_addr
)
6866 struct target_flock
*target_fl
;
6869 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6870 return -TARGET_EFAULT
;
6873 __get_user(l_type
, &target_fl
->l_type
);
6874 l_type
= target_to_host_flock(l_type
);
6878 fl
->l_type
= l_type
;
6879 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6880 __get_user(fl
->l_start
, &target_fl
->l_start
);
6881 __get_user(fl
->l_len
, &target_fl
->l_len
);
6882 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6883 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6887 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6888 const struct flock64
*fl
)
6890 struct target_flock
*target_fl
;
6893 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6894 return -TARGET_EFAULT
;
6897 l_type
= host_to_target_flock(fl
->l_type
);
6898 __put_user(l_type
, &target_fl
->l_type
);
6899 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6900 __put_user(fl
->l_start
, &target_fl
->l_start
);
6901 __put_user(fl
->l_len
, &target_fl
->l_len
);
6902 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6903 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6907 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6908 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6910 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6911 struct target_oabi_flock64
{
6919 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6920 abi_ulong target_flock_addr
)
6922 struct target_oabi_flock64
*target_fl
;
6925 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6926 return -TARGET_EFAULT
;
6929 __get_user(l_type
, &target_fl
->l_type
);
6930 l_type
= target_to_host_flock(l_type
);
6934 fl
->l_type
= l_type
;
6935 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6936 __get_user(fl
->l_start
, &target_fl
->l_start
);
6937 __get_user(fl
->l_len
, &target_fl
->l_len
);
6938 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6939 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6943 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6944 const struct flock64
*fl
)
6946 struct target_oabi_flock64
*target_fl
;
6949 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6950 return -TARGET_EFAULT
;
6953 l_type
= host_to_target_flock(fl
->l_type
);
6954 __put_user(l_type
, &target_fl
->l_type
);
6955 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6956 __put_user(fl
->l_start
, &target_fl
->l_start
);
6957 __put_user(fl
->l_len
, &target_fl
->l_len
);
6958 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6959 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6964 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6965 abi_ulong target_flock_addr
)
6967 struct target_flock64
*target_fl
;
6970 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6971 return -TARGET_EFAULT
;
6974 __get_user(l_type
, &target_fl
->l_type
);
6975 l_type
= target_to_host_flock(l_type
);
6979 fl
->l_type
= l_type
;
6980 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6981 __get_user(fl
->l_start
, &target_fl
->l_start
);
6982 __get_user(fl
->l_len
, &target_fl
->l_len
);
6983 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6984 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6988 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6989 const struct flock64
*fl
)
6991 struct target_flock64
*target_fl
;
6994 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6995 return -TARGET_EFAULT
;
6998 l_type
= host_to_target_flock(fl
->l_type
);
6999 __put_user(l_type
, &target_fl
->l_type
);
7000 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7001 __put_user(fl
->l_start
, &target_fl
->l_start
);
7002 __put_user(fl
->l_len
, &target_fl
->l_len
);
7003 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7004 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7008 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7010 struct flock64 fl64
;
7012 struct f_owner_ex fox
;
7013 struct target_f_owner_ex
*target_fox
;
7016 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7018 if (host_cmd
== -TARGET_EINVAL
)
7022 case TARGET_F_GETLK
:
7023 ret
= copy_from_user_flock(&fl64
, arg
);
7027 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7029 ret
= copy_to_user_flock(arg
, &fl64
);
7033 case TARGET_F_SETLK
:
7034 case TARGET_F_SETLKW
:
7035 ret
= copy_from_user_flock(&fl64
, arg
);
7039 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7042 case TARGET_F_GETLK64
:
7043 case TARGET_F_OFD_GETLK
:
7044 ret
= copy_from_user_flock64(&fl64
, arg
);
7048 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7050 ret
= copy_to_user_flock64(arg
, &fl64
);
7053 case TARGET_F_SETLK64
:
7054 case TARGET_F_SETLKW64
:
7055 case TARGET_F_OFD_SETLK
:
7056 case TARGET_F_OFD_SETLKW
:
7057 ret
= copy_from_user_flock64(&fl64
, arg
);
7061 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7064 case TARGET_F_GETFL
:
7065 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7067 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7071 case TARGET_F_SETFL
:
7072 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7073 target_to_host_bitmask(arg
,
7078 case TARGET_F_GETOWN_EX
:
7079 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7081 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7082 return -TARGET_EFAULT
;
7083 target_fox
->type
= tswap32(fox
.type
);
7084 target_fox
->pid
= tswap32(fox
.pid
);
7085 unlock_user_struct(target_fox
, arg
, 1);
7091 case TARGET_F_SETOWN_EX
:
7092 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7093 return -TARGET_EFAULT
;
7094 fox
.type
= tswap32(target_fox
->type
);
7095 fox
.pid
= tswap32(target_fox
->pid
);
7096 unlock_user_struct(target_fox
, arg
, 0);
7097 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7101 case TARGET_F_SETSIG
:
7102 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7105 case TARGET_F_GETSIG
:
7106 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7109 case TARGET_F_SETOWN
:
7110 case TARGET_F_GETOWN
:
7111 case TARGET_F_SETLEASE
:
7112 case TARGET_F_GETLEASE
:
7113 case TARGET_F_SETPIPE_SZ
:
7114 case TARGET_F_GETPIPE_SZ
:
7115 case TARGET_F_ADD_SEALS
:
7116 case TARGET_F_GET_SEALS
:
7117 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7121 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7129 static inline int high2lowuid(int uid
)
7137 static inline int high2lowgid(int gid
)
7145 static inline int low2highuid(int uid
)
7147 if ((int16_t)uid
== -1)
7153 static inline int low2highgid(int gid
)
7155 if ((int16_t)gid
== -1)
7160 static inline int tswapid(int id
)
7165 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7167 #else /* !USE_UID16 */
7168 static inline int high2lowuid(int uid
)
7172 static inline int high2lowgid(int gid
)
7176 static inline int low2highuid(int uid
)
7180 static inline int low2highgid(int gid
)
7184 static inline int tswapid(int id
)
7189 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7191 #endif /* USE_UID16 */
7193 /* We must do direct syscalls for setting UID/GID, because we want to
7194 * implement the Linux system call semantics of "change only for this thread",
7195 * not the libc/POSIX semantics of "change for all threads in process".
7196 * (See http://ewontfix.com/17/ for more details.)
7197 * We use the 32-bit version of the syscalls if present; if it is not
7198 * then either the host architecture supports 32-bit UIDs natively with
7199 * the standard syscall, or the 16-bit UID is the best we can do.
7201 #ifdef __NR_setuid32
7202 #define __NR_sys_setuid __NR_setuid32
7204 #define __NR_sys_setuid __NR_setuid
7206 #ifdef __NR_setgid32
7207 #define __NR_sys_setgid __NR_setgid32
7209 #define __NR_sys_setgid __NR_setgid
7211 #ifdef __NR_setresuid32
7212 #define __NR_sys_setresuid __NR_setresuid32
7214 #define __NR_sys_setresuid __NR_setresuid
7216 #ifdef __NR_setresgid32
7217 #define __NR_sys_setresgid __NR_setresgid32
7219 #define __NR_sys_setresgid __NR_setresgid
7222 _syscall1(int, sys_setuid
, uid_t
, uid
)
7223 _syscall1(int, sys_setgid
, gid_t
, gid
)
7224 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7225 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7227 void syscall_init(void)
7230 const argtype
*arg_type
;
7233 thunk_init(STRUCT_MAX
);
7235 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7236 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7237 #include "syscall_types.h"
7239 #undef STRUCT_SPECIAL
7241 /* we patch the ioctl size if necessary. We rely on the fact that
7242 no ioctl has all the bits at '1' in the size field */
7244 while (ie
->target_cmd
!= 0) {
7245 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7246 TARGET_IOC_SIZEMASK
) {
7247 arg_type
= ie
->arg_type
;
7248 if (arg_type
[0] != TYPE_PTR
) {
7249 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7254 size
= thunk_type_size(arg_type
, 0);
7255 ie
->target_cmd
= (ie
->target_cmd
&
7256 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7257 (size
<< TARGET_IOC_SIZESHIFT
);
7260 /* automatic consistency check if same arch */
7261 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7262 (defined(__x86_64__) && defined(TARGET_X86_64))
7263 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7264 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7265 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7272 #ifdef TARGET_NR_truncate64
7273 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7278 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7282 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7286 #ifdef TARGET_NR_ftruncate64
7287 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7292 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7296 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7300 #if defined(TARGET_NR_timer_settime) || \
7301 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7302 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7303 abi_ulong target_addr
)
7305 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7306 offsetof(struct target_itimerspec
,
7308 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7309 offsetof(struct target_itimerspec
,
7311 return -TARGET_EFAULT
;
7318 #if defined(TARGET_NR_timer_settime64) || \
7319 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7320 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7321 abi_ulong target_addr
)
7323 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7324 offsetof(struct target__kernel_itimerspec
,
7326 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7327 offsetof(struct target__kernel_itimerspec
,
7329 return -TARGET_EFAULT
;
7336 #if ((defined(TARGET_NR_timerfd_gettime) || \
7337 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7338 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7339 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7340 struct itimerspec
*host_its
)
7342 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7344 &host_its
->it_interval
) ||
7345 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7347 &host_its
->it_value
)) {
7348 return -TARGET_EFAULT
;
7354 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7355 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7356 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7357 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7358 struct itimerspec
*host_its
)
7360 if (host_to_target_timespec64(target_addr
+
7361 offsetof(struct target__kernel_itimerspec
,
7363 &host_its
->it_interval
) ||
7364 host_to_target_timespec64(target_addr
+
7365 offsetof(struct target__kernel_itimerspec
,
7367 &host_its
->it_value
)) {
7368 return -TARGET_EFAULT
;
7374 #if defined(TARGET_NR_adjtimex) || \
7375 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7376 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7377 abi_long target_addr
)
7379 struct target_timex
*target_tx
;
7381 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7382 return -TARGET_EFAULT
;
7385 __get_user(host_tx
->modes
, &target_tx
->modes
);
7386 __get_user(host_tx
->offset
, &target_tx
->offset
);
7387 __get_user(host_tx
->freq
, &target_tx
->freq
);
7388 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7389 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7390 __get_user(host_tx
->status
, &target_tx
->status
);
7391 __get_user(host_tx
->constant
, &target_tx
->constant
);
7392 __get_user(host_tx
->precision
, &target_tx
->precision
);
7393 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7394 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7395 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7396 __get_user(host_tx
->tick
, &target_tx
->tick
);
7397 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7398 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7399 __get_user(host_tx
->shift
, &target_tx
->shift
);
7400 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7401 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7402 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7403 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7404 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7405 __get_user(host_tx
->tai
, &target_tx
->tai
);
7407 unlock_user_struct(target_tx
, target_addr
, 0);
7411 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7412 struct timex
*host_tx
)
7414 struct target_timex
*target_tx
;
7416 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7417 return -TARGET_EFAULT
;
7420 __put_user(host_tx
->modes
, &target_tx
->modes
);
7421 __put_user(host_tx
->offset
, &target_tx
->offset
);
7422 __put_user(host_tx
->freq
, &target_tx
->freq
);
7423 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7424 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7425 __put_user(host_tx
->status
, &target_tx
->status
);
7426 __put_user(host_tx
->constant
, &target_tx
->constant
);
7427 __put_user(host_tx
->precision
, &target_tx
->precision
);
7428 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7429 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7430 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7431 __put_user(host_tx
->tick
, &target_tx
->tick
);
7432 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7433 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7434 __put_user(host_tx
->shift
, &target_tx
->shift
);
7435 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7436 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7437 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7438 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7439 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7440 __put_user(host_tx
->tai
, &target_tx
->tai
);
7442 unlock_user_struct(target_tx
, target_addr
, 1);
7448 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7449 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7450 abi_long target_addr
)
7452 struct target__kernel_timex
*target_tx
;
7454 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7455 offsetof(struct target__kernel_timex
,
7457 return -TARGET_EFAULT
;
7460 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7461 return -TARGET_EFAULT
;
7464 __get_user(host_tx
->modes
, &target_tx
->modes
);
7465 __get_user(host_tx
->offset
, &target_tx
->offset
);
7466 __get_user(host_tx
->freq
, &target_tx
->freq
);
7467 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7468 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7469 __get_user(host_tx
->status
, &target_tx
->status
);
7470 __get_user(host_tx
->constant
, &target_tx
->constant
);
7471 __get_user(host_tx
->precision
, &target_tx
->precision
);
7472 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7473 __get_user(host_tx
->tick
, &target_tx
->tick
);
7474 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7475 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7476 __get_user(host_tx
->shift
, &target_tx
->shift
);
7477 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7478 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7479 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7480 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7481 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7482 __get_user(host_tx
->tai
, &target_tx
->tai
);
7484 unlock_user_struct(target_tx
, target_addr
, 0);
7488 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7489 struct timex
*host_tx
)
7491 struct target__kernel_timex
*target_tx
;
7493 if (copy_to_user_timeval64(target_addr
+
7494 offsetof(struct target__kernel_timex
, time
),
7496 return -TARGET_EFAULT
;
7499 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7500 return -TARGET_EFAULT
;
7503 __put_user(host_tx
->modes
, &target_tx
->modes
);
7504 __put_user(host_tx
->offset
, &target_tx
->offset
);
7505 __put_user(host_tx
->freq
, &target_tx
->freq
);
7506 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7507 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7508 __put_user(host_tx
->status
, &target_tx
->status
);
7509 __put_user(host_tx
->constant
, &target_tx
->constant
);
7510 __put_user(host_tx
->precision
, &target_tx
->precision
);
7511 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7512 __put_user(host_tx
->tick
, &target_tx
->tick
);
7513 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7514 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7515 __put_user(host_tx
->shift
, &target_tx
->shift
);
7516 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7517 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7518 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7519 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7520 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7521 __put_user(host_tx
->tai
, &target_tx
->tai
);
7523 unlock_user_struct(target_tx
, target_addr
, 1);
7528 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7529 #define sigev_notify_thread_id _sigev_un._tid
7532 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7533 abi_ulong target_addr
)
7535 struct target_sigevent
*target_sevp
;
7537 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7538 return -TARGET_EFAULT
;
7541 /* This union is awkward on 64 bit systems because it has a 32 bit
7542 * integer and a pointer in it; we follow the conversion approach
7543 * used for handling sigval types in signal.c so the guest should get
7544 * the correct value back even if we did a 64 bit byteswap and it's
7545 * using the 32 bit integer.
7547 host_sevp
->sigev_value
.sival_ptr
=
7548 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7549 host_sevp
->sigev_signo
=
7550 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7551 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7552 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7554 unlock_user_struct(target_sevp
, target_addr
, 1);
7558 #if defined(TARGET_NR_mlockall)
7559 static inline int target_to_host_mlockall_arg(int arg
)
7563 if (arg
& TARGET_MCL_CURRENT
) {
7564 result
|= MCL_CURRENT
;
7566 if (arg
& TARGET_MCL_FUTURE
) {
7567 result
|= MCL_FUTURE
;
7570 if (arg
& TARGET_MCL_ONFAULT
) {
7571 result
|= MCL_ONFAULT
;
7579 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7580 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7581 defined(TARGET_NR_newfstatat))
7582 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7583 abi_ulong target_addr
,
7584 struct stat
*host_st
)
7586 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7587 if (((CPUARMState
*)cpu_env
)->eabi
) {
7588 struct target_eabi_stat64
*target_st
;
7590 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7591 return -TARGET_EFAULT
;
7592 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7593 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7594 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7595 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7596 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7598 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7599 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7600 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7601 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7602 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7603 __put_user(host_st
->st_size
, &target_st
->st_size
);
7604 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7605 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7606 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7607 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7608 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7609 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7610 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7611 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7612 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7614 unlock_user_struct(target_st
, target_addr
, 1);
7618 #if defined(TARGET_HAS_STRUCT_STAT64)
7619 struct target_stat64
*target_st
;
7621 struct target_stat
*target_st
;
7624 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7625 return -TARGET_EFAULT
;
7626 memset(target_st
, 0, sizeof(*target_st
));
7627 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7628 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7629 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7630 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7632 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7633 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7634 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7635 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7636 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7637 /* XXX: better use of kernel struct */
7638 __put_user(host_st
->st_size
, &target_st
->st_size
);
7639 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7640 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7641 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7642 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7643 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7644 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7645 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7646 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7647 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7649 unlock_user_struct(target_st
, target_addr
, 1);
7656 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7657 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7658 abi_ulong target_addr
)
7660 struct target_statx
*target_stx
;
7662 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7663 return -TARGET_EFAULT
;
7665 memset(target_stx
, 0, sizeof(*target_stx
));
7667 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7668 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7669 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7670 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7671 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7672 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7673 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7674 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7675 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7676 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7677 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7678 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7679 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7680 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7681 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7682 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7683 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7684 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7685 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7686 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7687 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7688 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7689 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7691 unlock_user_struct(target_stx
, target_addr
, 1);
7697 static int do_sys_futex(int *uaddr
, int op
, int val
,
7698 const struct timespec
*timeout
, int *uaddr2
,
7701 #if HOST_LONG_BITS == 64
7702 #if defined(__NR_futex)
7703 /* always a 64-bit time_t, it doesn't define _time64 version */
7704 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7707 #else /* HOST_LONG_BITS == 64 */
7708 #if defined(__NR_futex_time64)
7709 if (sizeof(timeout
->tv_sec
) == 8) {
7710 /* _time64 function on 32bit arch */
7711 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7714 #if defined(__NR_futex)
7715 /* old function on 32bit arch */
7716 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7718 #endif /* HOST_LONG_BITS == 64 */
7719 g_assert_not_reached();
7722 static int do_safe_futex(int *uaddr
, int op
, int val
,
7723 const struct timespec
*timeout
, int *uaddr2
,
7726 #if HOST_LONG_BITS == 64
7727 #if defined(__NR_futex)
7728 /* always a 64-bit time_t, it doesn't define _time64 version */
7729 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7731 #else /* HOST_LONG_BITS == 64 */
7732 #if defined(__NR_futex_time64)
7733 if (sizeof(timeout
->tv_sec
) == 8) {
7734 /* _time64 function on 32bit arch */
7735 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7739 #if defined(__NR_futex)
7740 /* old function on 32bit arch */
7741 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7743 #endif /* HOST_LONG_BITS == 64 */
7744 return -TARGET_ENOSYS
;
7747 /* ??? Using host futex calls even when target atomic operations
7748 are not really atomic probably breaks things. However implementing
7749 futexes locally would make futexes shared between multiple processes
7750 tricky. However they're probably useless because guest atomic
7751 operations won't work either. */
7752 #if defined(TARGET_NR_futex)
7753 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7754 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7756 struct timespec ts
, *pts
;
7759 /* ??? We assume FUTEX_* constants are the same on both host
7761 #ifdef FUTEX_CMD_MASK
7762 base_op
= op
& FUTEX_CMD_MASK
;
7768 case FUTEX_WAIT_BITSET
:
7771 target_to_host_timespec(pts
, timeout
);
7775 return do_safe_futex(g2h(cpu
, uaddr
),
7776 op
, tswap32(val
), pts
, NULL
, val3
);
7778 return do_safe_futex(g2h(cpu
, uaddr
),
7779 op
, val
, NULL
, NULL
, 0);
7781 return do_safe_futex(g2h(cpu
, uaddr
),
7782 op
, val
, NULL
, NULL
, 0);
7784 case FUTEX_CMP_REQUEUE
:
7786 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7787 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7788 But the prototype takes a `struct timespec *'; insert casts
7789 to satisfy the compiler. We do not need to tswap TIMEOUT
7790 since it's not compared to guest memory. */
7791 pts
= (struct timespec
*)(uintptr_t) timeout
;
7792 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7793 (base_op
== FUTEX_CMP_REQUEUE
7794 ? tswap32(val3
) : val3
));
7796 return -TARGET_ENOSYS
;
7801 #if defined(TARGET_NR_futex_time64)
7802 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7803 int val
, target_ulong timeout
,
7804 target_ulong uaddr2
, int val3
)
7806 struct timespec ts
, *pts
;
7809 /* ??? We assume FUTEX_* constants are the same on both host
7811 #ifdef FUTEX_CMD_MASK
7812 base_op
= op
& FUTEX_CMD_MASK
;
7818 case FUTEX_WAIT_BITSET
:
7821 if (target_to_host_timespec64(pts
, timeout
)) {
7822 return -TARGET_EFAULT
;
7827 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7828 tswap32(val
), pts
, NULL
, val3
);
7830 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7832 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7834 case FUTEX_CMP_REQUEUE
:
7836 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7837 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7838 But the prototype takes a `struct timespec *'; insert casts
7839 to satisfy the compiler. We do not need to tswap TIMEOUT
7840 since it's not compared to guest memory. */
7841 pts
= (struct timespec
*)(uintptr_t) timeout
;
7842 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7843 (base_op
== FUTEX_CMP_REQUEUE
7844 ? tswap32(val3
) : val3
));
7846 return -TARGET_ENOSYS
;
7851 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7852 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7853 abi_long handle
, abi_long mount_id
,
7856 struct file_handle
*target_fh
;
7857 struct file_handle
*fh
;
7861 unsigned int size
, total_size
;
7863 if (get_user_s32(size
, handle
)) {
7864 return -TARGET_EFAULT
;
7867 name
= lock_user_string(pathname
);
7869 return -TARGET_EFAULT
;
7872 total_size
= sizeof(struct file_handle
) + size
;
7873 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7875 unlock_user(name
, pathname
, 0);
7876 return -TARGET_EFAULT
;
7879 fh
= g_malloc0(total_size
);
7880 fh
->handle_bytes
= size
;
7882 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7883 unlock_user(name
, pathname
, 0);
7885 /* man name_to_handle_at(2):
7886 * Other than the use of the handle_bytes field, the caller should treat
7887 * the file_handle structure as an opaque data type
7890 memcpy(target_fh
, fh
, total_size
);
7891 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7892 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7894 unlock_user(target_fh
, handle
, total_size
);
7896 if (put_user_s32(mid
, mount_id
)) {
7897 return -TARGET_EFAULT
;
7905 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7906 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7909 struct file_handle
*target_fh
;
7910 struct file_handle
*fh
;
7911 unsigned int size
, total_size
;
7914 if (get_user_s32(size
, handle
)) {
7915 return -TARGET_EFAULT
;
7918 total_size
= sizeof(struct file_handle
) + size
;
7919 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7921 return -TARGET_EFAULT
;
7924 fh
= g_memdup(target_fh
, total_size
);
7925 fh
->handle_bytes
= size
;
7926 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7928 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7929 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7933 unlock_user(target_fh
, handle
, total_size
);
7939 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7941 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7944 target_sigset_t
*target_mask
;
7948 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7949 return -TARGET_EINVAL
;
7951 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7952 return -TARGET_EFAULT
;
7955 target_to_host_sigset(&host_mask
, target_mask
);
7957 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7959 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7961 fd_trans_register(ret
, &target_signalfd_trans
);
7964 unlock_user_struct(target_mask
, mask
, 0);
7970 /* Map host to target signal numbers for the wait family of syscalls.
7971 Assume all other status bits are the same. */
7972 int host_to_target_waitstatus(int status
)
7974 if (WIFSIGNALED(status
)) {
7975 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7977 if (WIFSTOPPED(status
)) {
7978 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7984 static int open_self_cmdline(void *cpu_env
, int fd
)
7986 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7987 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7990 for (i
= 0; i
< bprm
->argc
; i
++) {
7991 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7993 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
8001 static int open_self_maps(void *cpu_env
, int fd
)
8003 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
8004 TaskState
*ts
= cpu
->opaque
;
8005 GSList
*map_info
= read_self_maps();
8009 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
8010 MapInfo
*e
= (MapInfo
*) s
->data
;
8012 if (h2g_valid(e
->start
)) {
8013 unsigned long min
= e
->start
;
8014 unsigned long max
= e
->end
;
8015 int flags
= page_get_flags(h2g(min
));
8018 max
= h2g_valid(max
- 1) ?
8019 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8021 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
8025 if (h2g(min
) == ts
->info
->stack_limit
) {
8031 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8032 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8033 h2g(min
), h2g(max
- 1) + 1,
8034 (flags
& PAGE_READ
) ? 'r' : '-',
8035 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8036 (flags
& PAGE_EXEC
) ? 'x' : '-',
8037 e
->is_priv
? 'p' : 's',
8038 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8040 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8047 free_self_maps(map_info
);
8049 #ifdef TARGET_VSYSCALL_PAGE
8051 * We only support execution from the vsyscall page.
8052 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8054 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8055 " --xp 00000000 00:00 0",
8056 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8057 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8063 static int open_self_stat(void *cpu_env
, int fd
)
8065 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
8066 TaskState
*ts
= cpu
->opaque
;
8067 g_autoptr(GString
) buf
= g_string_new(NULL
);
8070 for (i
= 0; i
< 44; i
++) {
8073 g_string_printf(buf
, FMT_pid
" ", getpid());
8074 } else if (i
== 1) {
8076 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8077 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8078 g_string_printf(buf
, "(%.15s) ", bin
);
8079 } else if (i
== 3) {
8081 g_string_printf(buf
, FMT_pid
" ", getppid());
8082 } else if (i
== 21) {
8084 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8085 } else if (i
== 27) {
8087 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8089 /* for the rest, there is MasterCard */
8090 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8093 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8101 static int open_self_auxv(void *cpu_env
, int fd
)
8103 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
8104 TaskState
*ts
= cpu
->opaque
;
8105 abi_ulong auxv
= ts
->info
->saved_auxv
;
8106 abi_ulong len
= ts
->info
->auxv_len
;
8110 * Auxiliary vector is stored in target process stack.
8111 * read in whole auxv vector and copy it to file
8113 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8117 r
= write(fd
, ptr
, len
);
8124 lseek(fd
, 0, SEEK_SET
);
8125 unlock_user(ptr
, auxv
, len
);
8131 static int is_proc_myself(const char *filename
, const char *entry
)
8133 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8134 filename
+= strlen("/proc/");
8135 if (!strncmp(filename
, "self/", strlen("self/"))) {
8136 filename
+= strlen("self/");
8137 } else if (*filename
>= '1' && *filename
<= '9') {
8139 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8140 if (!strncmp(filename
, myself
, strlen(myself
))) {
8141 filename
+= strlen(myself
);
8148 if (!strcmp(filename
, entry
)) {
8155 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8156 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8157 static int is_proc(const char *filename
, const char *entry
)
8159 return strcmp(filename
, entry
) == 0;
8163 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8164 static int open_net_route(void *cpu_env
, int fd
)
8171 fp
= fopen("/proc/net/route", "r");
8178 read
= getline(&line
, &len
, fp
);
8179 dprintf(fd
, "%s", line
);
8183 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8185 uint32_t dest
, gw
, mask
;
8186 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8189 fields
= sscanf(line
,
8190 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8191 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8192 &mask
, &mtu
, &window
, &irtt
);
8196 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8197 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8198 metric
, tswap32(mask
), mtu
, window
, irtt
);
8208 #if defined(TARGET_SPARC)
8209 static int open_cpuinfo(void *cpu_env
, int fd
)
8211 dprintf(fd
, "type\t\t: sun4u\n");
8216 #if defined(TARGET_HPPA)
8217 static int open_cpuinfo(void *cpu_env
, int fd
)
8219 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8220 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8221 dprintf(fd
, "capabilities\t: os32\n");
8222 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8223 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8228 #if defined(TARGET_M68K)
8229 static int open_hardware(void *cpu_env
, int fd
)
8231 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8236 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8239 const char *filename
;
8240 int (*fill
)(void *cpu_env
, int fd
);
8241 int (*cmp
)(const char *s1
, const char *s2
);
8243 const struct fake_open
*fake_open
;
8244 static const struct fake_open fakes
[] = {
8245 { "maps", open_self_maps
, is_proc_myself
},
8246 { "stat", open_self_stat
, is_proc_myself
},
8247 { "auxv", open_self_auxv
, is_proc_myself
},
8248 { "cmdline", open_self_cmdline
, is_proc_myself
},
8249 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8250 { "/proc/net/route", open_net_route
, is_proc
},
8252 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8253 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8255 #if defined(TARGET_M68K)
8256 { "/proc/hardware", open_hardware
, is_proc
},
8258 { NULL
, NULL
, NULL
}
8261 if (is_proc_myself(pathname
, "exe")) {
8262 int execfd
= qemu_getauxval(AT_EXECFD
);
8263 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8266 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8267 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8272 if (fake_open
->filename
) {
8274 char filename
[PATH_MAX
];
8277 /* create temporary file to map stat to */
8278 tmpdir
= getenv("TMPDIR");
8281 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8282 fd
= mkstemp(filename
);
8288 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8294 lseek(fd
, 0, SEEK_SET
);
8299 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8302 #define TIMER_MAGIC 0x0caf0000
8303 #define TIMER_MAGIC_MASK 0xffff0000
8305 /* Convert QEMU provided timer ID back to internal 16bit index format */
8306 static target_timer_t
get_timer_id(abi_long arg
)
8308 target_timer_t timerid
= arg
;
8310 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8311 return -TARGET_EINVAL
;
8316 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8317 return -TARGET_EINVAL
;
8323 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8325 abi_ulong target_addr
,
8328 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8329 unsigned host_bits
= sizeof(*host_mask
) * 8;
8330 abi_ulong
*target_mask
;
8333 assert(host_size
>= target_size
);
8335 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8337 return -TARGET_EFAULT
;
8339 memset(host_mask
, 0, host_size
);
8341 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8342 unsigned bit
= i
* target_bits
;
8345 __get_user(val
, &target_mask
[i
]);
8346 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8347 if (val
& (1UL << j
)) {
8348 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8353 unlock_user(target_mask
, target_addr
, 0);
8357 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8359 abi_ulong target_addr
,
8362 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8363 unsigned host_bits
= sizeof(*host_mask
) * 8;
8364 abi_ulong
*target_mask
;
8367 assert(host_size
>= target_size
);
8369 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8371 return -TARGET_EFAULT
;
8374 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8375 unsigned bit
= i
* target_bits
;
8378 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8379 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8383 __put_user(val
, &target_mask
[i
]);
8386 unlock_user(target_mask
, target_addr
, target_size
);
8390 #ifdef TARGET_NR_getdents
8391 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8393 g_autofree
void *hdirp
= NULL
;
8395 int hlen
, hoff
, toff
;
8396 int hreclen
, treclen
;
8397 off64_t prev_diroff
= 0;
8399 hdirp
= g_try_malloc(count
);
8401 return -TARGET_ENOMEM
;
8404 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8405 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8407 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8410 hlen
= get_errno(hlen
);
8411 if (is_error(hlen
)) {
8415 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8417 return -TARGET_EFAULT
;
8420 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8421 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8422 struct linux_dirent
*hde
= hdirp
+ hoff
;
8424 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8426 struct target_dirent
*tde
= tdirp
+ toff
;
8430 namelen
= strlen(hde
->d_name
);
8431 hreclen
= hde
->d_reclen
;
8432 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8433 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8435 if (toff
+ treclen
> count
) {
8437 * If the host struct is smaller than the target struct, or
8438 * requires less alignment and thus packs into less space,
8439 * then the host can return more entries than we can pass
8443 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8447 * Return what we have, resetting the file pointer to the
8448 * location of the first record not returned.
8450 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8454 prev_diroff
= hde
->d_off
;
8455 tde
->d_ino
= tswapal(hde
->d_ino
);
8456 tde
->d_off
= tswapal(hde
->d_off
);
8457 tde
->d_reclen
= tswap16(treclen
);
8458 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8461 * The getdents type is in what was formerly a padding byte at the
8462 * end of the structure.
8464 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8465 type
= *((uint8_t *)hde
+ hreclen
- 1);
8469 *((uint8_t *)tde
+ treclen
- 1) = type
;
8472 unlock_user(tdirp
, arg2
, toff
);
8475 #endif /* TARGET_NR_getdents */
8477 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8478 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8480 g_autofree
void *hdirp
= NULL
;
8482 int hlen
, hoff
, toff
;
8483 int hreclen
, treclen
;
8484 off64_t prev_diroff
= 0;
8486 hdirp
= g_try_malloc(count
);
8488 return -TARGET_ENOMEM
;
8491 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8492 if (is_error(hlen
)) {
8496 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8498 return -TARGET_EFAULT
;
8501 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8502 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8503 struct target_dirent64
*tde
= tdirp
+ toff
;
8506 namelen
= strlen(hde
->d_name
) + 1;
8507 hreclen
= hde
->d_reclen
;
8508 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8509 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8511 if (toff
+ treclen
> count
) {
8513 * If the host struct is smaller than the target struct, or
8514 * requires less alignment and thus packs into less space,
8515 * then the host can return more entries than we can pass
8519 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8523 * Return what we have, resetting the file pointer to the
8524 * location of the first record not returned.
8526 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8530 prev_diroff
= hde
->d_off
;
8531 tde
->d_ino
= tswap64(hde
->d_ino
);
8532 tde
->d_off
= tswap64(hde
->d_off
);
8533 tde
->d_reclen
= tswap16(treclen
);
8534 tde
->d_type
= hde
->d_type
;
8535 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8538 unlock_user(tdirp
, arg2
, toff
);
8541 #endif /* TARGET_NR_getdents64 */
8543 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8544 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8547 /* This is an internal helper for do_syscall so that it is easier
8548 * to have a single return point, so that actions, such as logging
8549 * of syscall results, can be performed.
8550 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8552 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8553 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8554 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8557 CPUState
*cpu
= env_cpu(cpu_env
);
8559 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8560 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8561 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8562 || defined(TARGET_NR_statx)
8565 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8566 || defined(TARGET_NR_fstatfs)
8572 case TARGET_NR_exit
:
8573 /* In old applications this may be used to implement _exit(2).
8574 However in threaded applications it is used for thread termination,
8575 and _exit_group is used for application termination.
8576 Do thread termination if we have more then one thread. */
8578 if (block_signals()) {
8579 return -QEMU_ERESTARTSYS
;
8582 pthread_mutex_lock(&clone_lock
);
8584 if (CPU_NEXT(first_cpu
)) {
8585 TaskState
*ts
= cpu
->opaque
;
8587 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8588 object_unref(OBJECT(cpu
));
8590 * At this point the CPU should be unrealized and removed
8591 * from cpu lists. We can clean-up the rest of the thread
8592 * data without the lock held.
8595 pthread_mutex_unlock(&clone_lock
);
8597 if (ts
->child_tidptr
) {
8598 put_user_u32(0, ts
->child_tidptr
);
8599 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8600 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8604 rcu_unregister_thread();
8608 pthread_mutex_unlock(&clone_lock
);
8609 preexit_cleanup(cpu_env
, arg1
);
8611 return 0; /* avoid warning */
8612 case TARGET_NR_read
:
8613 if (arg2
== 0 && arg3
== 0) {
8614 return get_errno(safe_read(arg1
, 0, 0));
8616 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8617 return -TARGET_EFAULT
;
8618 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8620 fd_trans_host_to_target_data(arg1
)) {
8621 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8623 unlock_user(p
, arg2
, ret
);
8626 case TARGET_NR_write
:
8627 if (arg2
== 0 && arg3
== 0) {
8628 return get_errno(safe_write(arg1
, 0, 0));
8630 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8631 return -TARGET_EFAULT
;
8632 if (fd_trans_target_to_host_data(arg1
)) {
8633 void *copy
= g_malloc(arg3
);
8634 memcpy(copy
, p
, arg3
);
8635 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8637 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8641 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8643 unlock_user(p
, arg2
, 0);
8646 #ifdef TARGET_NR_open
8647 case TARGET_NR_open
:
8648 if (!(p
= lock_user_string(arg1
)))
8649 return -TARGET_EFAULT
;
8650 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8651 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8653 fd_trans_unregister(ret
);
8654 unlock_user(p
, arg1
, 0);
8657 case TARGET_NR_openat
:
8658 if (!(p
= lock_user_string(arg2
)))
8659 return -TARGET_EFAULT
;
8660 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8661 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8663 fd_trans_unregister(ret
);
8664 unlock_user(p
, arg2
, 0);
8666 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8667 case TARGET_NR_name_to_handle_at
:
8668 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8671 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8672 case TARGET_NR_open_by_handle_at
:
8673 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8674 fd_trans_unregister(ret
);
8677 case TARGET_NR_close
:
8678 fd_trans_unregister(arg1
);
8679 return get_errno(close(arg1
));
8682 return do_brk(arg1
);
8683 #ifdef TARGET_NR_fork
8684 case TARGET_NR_fork
:
8685 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8687 #ifdef TARGET_NR_waitpid
8688 case TARGET_NR_waitpid
:
8691 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8692 if (!is_error(ret
) && arg2
&& ret
8693 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8694 return -TARGET_EFAULT
;
8698 #ifdef TARGET_NR_waitid
8699 case TARGET_NR_waitid
:
8703 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8704 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8705 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8706 return -TARGET_EFAULT
;
8707 host_to_target_siginfo(p
, &info
);
8708 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8713 #ifdef TARGET_NR_creat /* not on alpha */
8714 case TARGET_NR_creat
:
8715 if (!(p
= lock_user_string(arg1
)))
8716 return -TARGET_EFAULT
;
8717 ret
= get_errno(creat(p
, arg2
));
8718 fd_trans_unregister(ret
);
8719 unlock_user(p
, arg1
, 0);
8722 #ifdef TARGET_NR_link
8723 case TARGET_NR_link
:
8726 p
= lock_user_string(arg1
);
8727 p2
= lock_user_string(arg2
);
8729 ret
= -TARGET_EFAULT
;
8731 ret
= get_errno(link(p
, p2
));
8732 unlock_user(p2
, arg2
, 0);
8733 unlock_user(p
, arg1
, 0);
8737 #if defined(TARGET_NR_linkat)
8738 case TARGET_NR_linkat
:
8742 return -TARGET_EFAULT
;
8743 p
= lock_user_string(arg2
);
8744 p2
= lock_user_string(arg4
);
8746 ret
= -TARGET_EFAULT
;
8748 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8749 unlock_user(p
, arg2
, 0);
8750 unlock_user(p2
, arg4
, 0);
8754 #ifdef TARGET_NR_unlink
8755 case TARGET_NR_unlink
:
8756 if (!(p
= lock_user_string(arg1
)))
8757 return -TARGET_EFAULT
;
8758 ret
= get_errno(unlink(p
));
8759 unlock_user(p
, arg1
, 0);
8762 #if defined(TARGET_NR_unlinkat)
8763 case TARGET_NR_unlinkat
:
8764 if (!(p
= lock_user_string(arg2
)))
8765 return -TARGET_EFAULT
;
8766 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8767 unlock_user(p
, arg2
, 0);
8770 case TARGET_NR_execve
:
8772 char **argp
, **envp
;
8775 abi_ulong guest_argp
;
8776 abi_ulong guest_envp
;
8782 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8783 if (get_user_ual(addr
, gp
))
8784 return -TARGET_EFAULT
;
8791 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8792 if (get_user_ual(addr
, gp
))
8793 return -TARGET_EFAULT
;
8799 argp
= g_new0(char *, argc
+ 1);
8800 envp
= g_new0(char *, envc
+ 1);
8802 for (gp
= guest_argp
, q
= argp
; gp
;
8803 gp
+= sizeof(abi_ulong
), q
++) {
8804 if (get_user_ual(addr
, gp
))
8808 if (!(*q
= lock_user_string(addr
)))
8813 for (gp
= guest_envp
, q
= envp
; gp
;
8814 gp
+= sizeof(abi_ulong
), q
++) {
8815 if (get_user_ual(addr
, gp
))
8819 if (!(*q
= lock_user_string(addr
)))
8824 if (!(p
= lock_user_string(arg1
)))
8826 /* Although execve() is not an interruptible syscall it is
8827 * a special case where we must use the safe_syscall wrapper:
8828 * if we allow a signal to happen before we make the host
8829 * syscall then we will 'lose' it, because at the point of
8830 * execve the process leaves QEMU's control. So we use the
8831 * safe syscall wrapper to ensure that we either take the
8832 * signal as a guest signal, or else it does not happen
8833 * before the execve completes and makes it the other
8834 * program's problem.
8836 ret
= get_errno(safe_execve(p
, argp
, envp
));
8837 unlock_user(p
, arg1
, 0);
8842 ret
= -TARGET_EFAULT
;
8845 for (gp
= guest_argp
, q
= argp
; *q
;
8846 gp
+= sizeof(abi_ulong
), q
++) {
8847 if (get_user_ual(addr
, gp
)
8850 unlock_user(*q
, addr
, 0);
8852 for (gp
= guest_envp
, q
= envp
; *q
;
8853 gp
+= sizeof(abi_ulong
), q
++) {
8854 if (get_user_ual(addr
, gp
)
8857 unlock_user(*q
, addr
, 0);
8864 case TARGET_NR_chdir
:
8865 if (!(p
= lock_user_string(arg1
)))
8866 return -TARGET_EFAULT
;
8867 ret
= get_errno(chdir(p
));
8868 unlock_user(p
, arg1
, 0);
8870 #ifdef TARGET_NR_time
8871 case TARGET_NR_time
:
8874 ret
= get_errno(time(&host_time
));
8877 && put_user_sal(host_time
, arg1
))
8878 return -TARGET_EFAULT
;
8882 #ifdef TARGET_NR_mknod
8883 case TARGET_NR_mknod
:
8884 if (!(p
= lock_user_string(arg1
)))
8885 return -TARGET_EFAULT
;
8886 ret
= get_errno(mknod(p
, arg2
, arg3
));
8887 unlock_user(p
, arg1
, 0);
8890 #if defined(TARGET_NR_mknodat)
8891 case TARGET_NR_mknodat
:
8892 if (!(p
= lock_user_string(arg2
)))
8893 return -TARGET_EFAULT
;
8894 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8895 unlock_user(p
, arg2
, 0);
8898 #ifdef TARGET_NR_chmod
8899 case TARGET_NR_chmod
:
8900 if (!(p
= lock_user_string(arg1
)))
8901 return -TARGET_EFAULT
;
8902 ret
= get_errno(chmod(p
, arg2
));
8903 unlock_user(p
, arg1
, 0);
8906 #ifdef TARGET_NR_lseek
8907 case TARGET_NR_lseek
:
8908 return get_errno(lseek(arg1
, arg2
, arg3
));
8910 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8911 /* Alpha specific */
8912 case TARGET_NR_getxpid
:
8913 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8914 return get_errno(getpid());
8916 #ifdef TARGET_NR_getpid
8917 case TARGET_NR_getpid
:
8918 return get_errno(getpid());
8920 case TARGET_NR_mount
:
8922 /* need to look at the data field */
8926 p
= lock_user_string(arg1
);
8928 return -TARGET_EFAULT
;
8934 p2
= lock_user_string(arg2
);
8937 unlock_user(p
, arg1
, 0);
8939 return -TARGET_EFAULT
;
8943 p3
= lock_user_string(arg3
);
8946 unlock_user(p
, arg1
, 0);
8948 unlock_user(p2
, arg2
, 0);
8949 return -TARGET_EFAULT
;
8955 /* FIXME - arg5 should be locked, but it isn't clear how to
8956 * do that since it's not guaranteed to be a NULL-terminated
8960 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8962 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8964 ret
= get_errno(ret
);
8967 unlock_user(p
, arg1
, 0);
8969 unlock_user(p2
, arg2
, 0);
8971 unlock_user(p3
, arg3
, 0);
8975 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8976 #if defined(TARGET_NR_umount)
8977 case TARGET_NR_umount
:
8979 #if defined(TARGET_NR_oldumount)
8980 case TARGET_NR_oldumount
:
8982 if (!(p
= lock_user_string(arg1
)))
8983 return -TARGET_EFAULT
;
8984 ret
= get_errno(umount(p
));
8985 unlock_user(p
, arg1
, 0);
8988 #ifdef TARGET_NR_stime /* not on alpha */
8989 case TARGET_NR_stime
:
8993 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8994 return -TARGET_EFAULT
;
8996 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8999 #ifdef TARGET_NR_alarm /* not on alpha */
9000 case TARGET_NR_alarm
:
9003 #ifdef TARGET_NR_pause /* not on alpha */
9004 case TARGET_NR_pause
:
9005 if (!block_signals()) {
9006 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
9008 return -TARGET_EINTR
;
9010 #ifdef TARGET_NR_utime
9011 case TARGET_NR_utime
:
9013 struct utimbuf tbuf
, *host_tbuf
;
9014 struct target_utimbuf
*target_tbuf
;
9016 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9017 return -TARGET_EFAULT
;
9018 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9019 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9020 unlock_user_struct(target_tbuf
, arg2
, 0);
9025 if (!(p
= lock_user_string(arg1
)))
9026 return -TARGET_EFAULT
;
9027 ret
= get_errno(utime(p
, host_tbuf
));
9028 unlock_user(p
, arg1
, 0);
9032 #ifdef TARGET_NR_utimes
9033 case TARGET_NR_utimes
:
9035 struct timeval
*tvp
, tv
[2];
9037 if (copy_from_user_timeval(&tv
[0], arg2
)
9038 || copy_from_user_timeval(&tv
[1],
9039 arg2
+ sizeof(struct target_timeval
)))
9040 return -TARGET_EFAULT
;
9045 if (!(p
= lock_user_string(arg1
)))
9046 return -TARGET_EFAULT
;
9047 ret
= get_errno(utimes(p
, tvp
));
9048 unlock_user(p
, arg1
, 0);
9052 #if defined(TARGET_NR_futimesat)
9053 case TARGET_NR_futimesat
:
9055 struct timeval
*tvp
, tv
[2];
9057 if (copy_from_user_timeval(&tv
[0], arg3
)
9058 || copy_from_user_timeval(&tv
[1],
9059 arg3
+ sizeof(struct target_timeval
)))
9060 return -TARGET_EFAULT
;
9065 if (!(p
= lock_user_string(arg2
))) {
9066 return -TARGET_EFAULT
;
9068 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9069 unlock_user(p
, arg2
, 0);
9073 #ifdef TARGET_NR_access
9074 case TARGET_NR_access
:
9075 if (!(p
= lock_user_string(arg1
))) {
9076 return -TARGET_EFAULT
;
9078 ret
= get_errno(access(path(p
), arg2
));
9079 unlock_user(p
, arg1
, 0);
9082 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9083 case TARGET_NR_faccessat
:
9084 if (!(p
= lock_user_string(arg2
))) {
9085 return -TARGET_EFAULT
;
9087 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9088 unlock_user(p
, arg2
, 0);
9091 #ifdef TARGET_NR_nice /* not on alpha */
9092 case TARGET_NR_nice
:
9093 return get_errno(nice(arg1
));
9095 case TARGET_NR_sync
:
9098 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9099 case TARGET_NR_syncfs
:
9100 return get_errno(syncfs(arg1
));
9102 case TARGET_NR_kill
:
9103 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9104 #ifdef TARGET_NR_rename
9105 case TARGET_NR_rename
:
9108 p
= lock_user_string(arg1
);
9109 p2
= lock_user_string(arg2
);
9111 ret
= -TARGET_EFAULT
;
9113 ret
= get_errno(rename(p
, p2
));
9114 unlock_user(p2
, arg2
, 0);
9115 unlock_user(p
, arg1
, 0);
9119 #if defined(TARGET_NR_renameat)
9120 case TARGET_NR_renameat
:
9123 p
= lock_user_string(arg2
);
9124 p2
= lock_user_string(arg4
);
9126 ret
= -TARGET_EFAULT
;
9128 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9129 unlock_user(p2
, arg4
, 0);
9130 unlock_user(p
, arg2
, 0);
9134 #if defined(TARGET_NR_renameat2)
9135 case TARGET_NR_renameat2
:
9138 p
= lock_user_string(arg2
);
9139 p2
= lock_user_string(arg4
);
9141 ret
= -TARGET_EFAULT
;
9143 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9145 unlock_user(p2
, arg4
, 0);
9146 unlock_user(p
, arg2
, 0);
9150 #ifdef TARGET_NR_mkdir
9151 case TARGET_NR_mkdir
:
9152 if (!(p
= lock_user_string(arg1
)))
9153 return -TARGET_EFAULT
;
9154 ret
= get_errno(mkdir(p
, arg2
));
9155 unlock_user(p
, arg1
, 0);
9158 #if defined(TARGET_NR_mkdirat)
9159 case TARGET_NR_mkdirat
:
9160 if (!(p
= lock_user_string(arg2
)))
9161 return -TARGET_EFAULT
;
9162 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9163 unlock_user(p
, arg2
, 0);
9166 #ifdef TARGET_NR_rmdir
9167 case TARGET_NR_rmdir
:
9168 if (!(p
= lock_user_string(arg1
)))
9169 return -TARGET_EFAULT
;
9170 ret
= get_errno(rmdir(p
));
9171 unlock_user(p
, arg1
, 0);
9175 ret
= get_errno(dup(arg1
));
9177 fd_trans_dup(arg1
, ret
);
9180 #ifdef TARGET_NR_pipe
9181 case TARGET_NR_pipe
:
9182 return do_pipe(cpu_env
, arg1
, 0, 0);
9184 #ifdef TARGET_NR_pipe2
9185 case TARGET_NR_pipe2
:
9186 return do_pipe(cpu_env
, arg1
,
9187 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9189 case TARGET_NR_times
:
9191 struct target_tms
*tmsp
;
9193 ret
= get_errno(times(&tms
));
9195 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9197 return -TARGET_EFAULT
;
9198 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9199 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9200 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9201 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9204 ret
= host_to_target_clock_t(ret
);
9207 case TARGET_NR_acct
:
9209 ret
= get_errno(acct(NULL
));
9211 if (!(p
= lock_user_string(arg1
))) {
9212 return -TARGET_EFAULT
;
9214 ret
= get_errno(acct(path(p
)));
9215 unlock_user(p
, arg1
, 0);
9218 #ifdef TARGET_NR_umount2
9219 case TARGET_NR_umount2
:
9220 if (!(p
= lock_user_string(arg1
)))
9221 return -TARGET_EFAULT
;
9222 ret
= get_errno(umount2(p
, arg2
));
9223 unlock_user(p
, arg1
, 0);
9226 case TARGET_NR_ioctl
:
9227 return do_ioctl(arg1
, arg2
, arg3
);
9228 #ifdef TARGET_NR_fcntl
9229 case TARGET_NR_fcntl
:
9230 return do_fcntl(arg1
, arg2
, arg3
);
9232 case TARGET_NR_setpgid
:
9233 return get_errno(setpgid(arg1
, arg2
));
9234 case TARGET_NR_umask
:
9235 return get_errno(umask(arg1
));
9236 case TARGET_NR_chroot
:
9237 if (!(p
= lock_user_string(arg1
)))
9238 return -TARGET_EFAULT
;
9239 ret
= get_errno(chroot(p
));
9240 unlock_user(p
, arg1
, 0);
9242 #ifdef TARGET_NR_dup2
9243 case TARGET_NR_dup2
:
9244 ret
= get_errno(dup2(arg1
, arg2
));
9246 fd_trans_dup(arg1
, arg2
);
9250 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9251 case TARGET_NR_dup3
:
9255 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9258 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9259 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9261 fd_trans_dup(arg1
, arg2
);
9266 #ifdef TARGET_NR_getppid /* not on alpha */
9267 case TARGET_NR_getppid
:
9268 return get_errno(getppid());
9270 #ifdef TARGET_NR_getpgrp
9271 case TARGET_NR_getpgrp
:
9272 return get_errno(getpgrp());
9274 case TARGET_NR_setsid
:
9275 return get_errno(setsid());
9276 #ifdef TARGET_NR_sigaction
9277 case TARGET_NR_sigaction
:
9279 #if defined(TARGET_MIPS)
9280 struct target_sigaction act
, oact
, *pact
, *old_act
;
9283 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9284 return -TARGET_EFAULT
;
9285 act
._sa_handler
= old_act
->_sa_handler
;
9286 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9287 act
.sa_flags
= old_act
->sa_flags
;
9288 unlock_user_struct(old_act
, arg2
, 0);
9294 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9296 if (!is_error(ret
) && arg3
) {
9297 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9298 return -TARGET_EFAULT
;
9299 old_act
->_sa_handler
= oact
._sa_handler
;
9300 old_act
->sa_flags
= oact
.sa_flags
;
9301 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9302 old_act
->sa_mask
.sig
[1] = 0;
9303 old_act
->sa_mask
.sig
[2] = 0;
9304 old_act
->sa_mask
.sig
[3] = 0;
9305 unlock_user_struct(old_act
, arg3
, 1);
9308 struct target_old_sigaction
*old_act
;
9309 struct target_sigaction act
, oact
, *pact
;
9311 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9312 return -TARGET_EFAULT
;
9313 act
._sa_handler
= old_act
->_sa_handler
;
9314 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9315 act
.sa_flags
= old_act
->sa_flags
;
9316 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9317 act
.sa_restorer
= old_act
->sa_restorer
;
9319 unlock_user_struct(old_act
, arg2
, 0);
9324 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9325 if (!is_error(ret
) && arg3
) {
9326 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9327 return -TARGET_EFAULT
;
9328 old_act
->_sa_handler
= oact
._sa_handler
;
9329 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9330 old_act
->sa_flags
= oact
.sa_flags
;
9331 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9332 old_act
->sa_restorer
= oact
.sa_restorer
;
9334 unlock_user_struct(old_act
, arg3
, 1);
9340 case TARGET_NR_rt_sigaction
:
9343 * For Alpha and SPARC this is a 5 argument syscall, with
9344 * a 'restorer' parameter which must be copied into the
9345 * sa_restorer field of the sigaction struct.
9346 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9347 * and arg5 is the sigsetsize.
9349 #if defined(TARGET_ALPHA)
9350 target_ulong sigsetsize
= arg4
;
9351 target_ulong restorer
= arg5
;
9352 #elif defined(TARGET_SPARC)
9353 target_ulong restorer
= arg4
;
9354 target_ulong sigsetsize
= arg5
;
9356 target_ulong sigsetsize
= arg4
;
9357 target_ulong restorer
= 0;
9359 struct target_sigaction
*act
= NULL
;
9360 struct target_sigaction
*oact
= NULL
;
9362 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9363 return -TARGET_EINVAL
;
9365 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9366 return -TARGET_EFAULT
;
9368 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9369 ret
= -TARGET_EFAULT
;
9371 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9373 unlock_user_struct(oact
, arg3
, 1);
9377 unlock_user_struct(act
, arg2
, 0);
9381 #ifdef TARGET_NR_sgetmask /* not on alpha */
9382 case TARGET_NR_sgetmask
:
9385 abi_ulong target_set
;
9386 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9388 host_to_target_old_sigset(&target_set
, &cur_set
);
9394 #ifdef TARGET_NR_ssetmask /* not on alpha */
9395 case TARGET_NR_ssetmask
:
9398 abi_ulong target_set
= arg1
;
9399 target_to_host_old_sigset(&set
, &target_set
);
9400 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9402 host_to_target_old_sigset(&target_set
, &oset
);
9408 #ifdef TARGET_NR_sigprocmask
9409 case TARGET_NR_sigprocmask
:
9411 #if defined(TARGET_ALPHA)
9412 sigset_t set
, oldset
;
9417 case TARGET_SIG_BLOCK
:
9420 case TARGET_SIG_UNBLOCK
:
9423 case TARGET_SIG_SETMASK
:
9427 return -TARGET_EINVAL
;
9430 target_to_host_old_sigset(&set
, &mask
);
9432 ret
= do_sigprocmask(how
, &set
, &oldset
);
9433 if (!is_error(ret
)) {
9434 host_to_target_old_sigset(&mask
, &oldset
);
9436 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9439 sigset_t set
, oldset
, *set_ptr
;
9443 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9445 return -TARGET_EFAULT
;
9447 target_to_host_old_sigset(&set
, p
);
9448 unlock_user(p
, arg2
, 0);
9451 case TARGET_SIG_BLOCK
:
9454 case TARGET_SIG_UNBLOCK
:
9457 case TARGET_SIG_SETMASK
:
9461 return -TARGET_EINVAL
;
9467 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9468 if (!is_error(ret
) && arg3
) {
9469 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9470 return -TARGET_EFAULT
;
9471 host_to_target_old_sigset(p
, &oldset
);
9472 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9478 case TARGET_NR_rt_sigprocmask
:
9481 sigset_t set
, oldset
, *set_ptr
;
9483 if (arg4
!= sizeof(target_sigset_t
)) {
9484 return -TARGET_EINVAL
;
9488 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9490 return -TARGET_EFAULT
;
9492 target_to_host_sigset(&set
, p
);
9493 unlock_user(p
, arg2
, 0);
9496 case TARGET_SIG_BLOCK
:
9499 case TARGET_SIG_UNBLOCK
:
9502 case TARGET_SIG_SETMASK
:
9506 return -TARGET_EINVAL
;
9512 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9513 if (!is_error(ret
) && arg3
) {
9514 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9515 return -TARGET_EFAULT
;
9516 host_to_target_sigset(p
, &oldset
);
9517 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9521 #ifdef TARGET_NR_sigpending
9522 case TARGET_NR_sigpending
:
9525 ret
= get_errno(sigpending(&set
));
9526 if (!is_error(ret
)) {
9527 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9528 return -TARGET_EFAULT
;
9529 host_to_target_old_sigset(p
, &set
);
9530 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9535 case TARGET_NR_rt_sigpending
:
9539 /* Yes, this check is >, not != like most. We follow the kernel's
9540 * logic and it does it like this because it implements
9541 * NR_sigpending through the same code path, and in that case
9542 * the old_sigset_t is smaller in size.
9544 if (arg2
> sizeof(target_sigset_t
)) {
9545 return -TARGET_EINVAL
;
9548 ret
= get_errno(sigpending(&set
));
9549 if (!is_error(ret
)) {
9550 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9551 return -TARGET_EFAULT
;
9552 host_to_target_sigset(p
, &set
);
9553 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9557 #ifdef TARGET_NR_sigsuspend
9558 case TARGET_NR_sigsuspend
:
9560 TaskState
*ts
= cpu
->opaque
;
9561 #if defined(TARGET_ALPHA)
9562 abi_ulong mask
= arg1
;
9563 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9565 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9566 return -TARGET_EFAULT
;
9567 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9568 unlock_user(p
, arg1
, 0);
9570 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9572 if (ret
!= -QEMU_ERESTARTSYS
) {
9573 ts
->in_sigsuspend
= 1;
9578 case TARGET_NR_rt_sigsuspend
:
9580 TaskState
*ts
= cpu
->opaque
;
9582 if (arg2
!= sizeof(target_sigset_t
)) {
9583 return -TARGET_EINVAL
;
9585 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9586 return -TARGET_EFAULT
;
9587 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9588 unlock_user(p
, arg1
, 0);
9589 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9591 if (ret
!= -QEMU_ERESTARTSYS
) {
9592 ts
->in_sigsuspend
= 1;
9596 #ifdef TARGET_NR_rt_sigtimedwait
9597 case TARGET_NR_rt_sigtimedwait
:
9600 struct timespec uts
, *puts
;
9603 if (arg4
!= sizeof(target_sigset_t
)) {
9604 return -TARGET_EINVAL
;
9607 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9608 return -TARGET_EFAULT
;
9609 target_to_host_sigset(&set
, p
);
9610 unlock_user(p
, arg1
, 0);
9613 if (target_to_host_timespec(puts
, arg3
)) {
9614 return -TARGET_EFAULT
;
9619 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9621 if (!is_error(ret
)) {
9623 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9626 return -TARGET_EFAULT
;
9628 host_to_target_siginfo(p
, &uinfo
);
9629 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9631 ret
= host_to_target_signal(ret
);
9636 #ifdef TARGET_NR_rt_sigtimedwait_time64
9637 case TARGET_NR_rt_sigtimedwait_time64
:
9640 struct timespec uts
, *puts
;
9643 if (arg4
!= sizeof(target_sigset_t
)) {
9644 return -TARGET_EINVAL
;
9647 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9649 return -TARGET_EFAULT
;
9651 target_to_host_sigset(&set
, p
);
9652 unlock_user(p
, arg1
, 0);
9655 if (target_to_host_timespec64(puts
, arg3
)) {
9656 return -TARGET_EFAULT
;
9661 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9663 if (!is_error(ret
)) {
9665 p
= lock_user(VERIFY_WRITE
, arg2
,
9666 sizeof(target_siginfo_t
), 0);
9668 return -TARGET_EFAULT
;
9670 host_to_target_siginfo(p
, &uinfo
);
9671 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9673 ret
= host_to_target_signal(ret
);
9678 case TARGET_NR_rt_sigqueueinfo
:
9682 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9684 return -TARGET_EFAULT
;
9686 target_to_host_siginfo(&uinfo
, p
);
9687 unlock_user(p
, arg3
, 0);
9688 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9691 case TARGET_NR_rt_tgsigqueueinfo
:
9695 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9697 return -TARGET_EFAULT
;
9699 target_to_host_siginfo(&uinfo
, p
);
9700 unlock_user(p
, arg4
, 0);
9701 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9704 #ifdef TARGET_NR_sigreturn
9705 case TARGET_NR_sigreturn
:
9706 if (block_signals()) {
9707 return -QEMU_ERESTARTSYS
;
9709 return do_sigreturn(cpu_env
);
9711 case TARGET_NR_rt_sigreturn
:
9712 if (block_signals()) {
9713 return -QEMU_ERESTARTSYS
;
9715 return do_rt_sigreturn(cpu_env
);
9716 case TARGET_NR_sethostname
:
9717 if (!(p
= lock_user_string(arg1
)))
9718 return -TARGET_EFAULT
;
9719 ret
= get_errno(sethostname(p
, arg2
));
9720 unlock_user(p
, arg1
, 0);
9722 #ifdef TARGET_NR_setrlimit
9723 case TARGET_NR_setrlimit
:
9725 int resource
= target_to_host_resource(arg1
);
9726 struct target_rlimit
*target_rlim
;
9728 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9729 return -TARGET_EFAULT
;
9730 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9731 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9732 unlock_user_struct(target_rlim
, arg2
, 0);
9734 * If we just passed through resource limit settings for memory then
9735 * they would also apply to QEMU's own allocations, and QEMU will
9736 * crash or hang or die if its allocations fail. Ideally we would
9737 * track the guest allocations in QEMU and apply the limits ourselves.
9738 * For now, just tell the guest the call succeeded but don't actually
9741 if (resource
!= RLIMIT_AS
&&
9742 resource
!= RLIMIT_DATA
&&
9743 resource
!= RLIMIT_STACK
) {
9744 return get_errno(setrlimit(resource
, &rlim
));
9750 #ifdef TARGET_NR_getrlimit
9751 case TARGET_NR_getrlimit
:
9753 int resource
= target_to_host_resource(arg1
);
9754 struct target_rlimit
*target_rlim
;
9757 ret
= get_errno(getrlimit(resource
, &rlim
));
9758 if (!is_error(ret
)) {
9759 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9760 return -TARGET_EFAULT
;
9761 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9762 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9763 unlock_user_struct(target_rlim
, arg2
, 1);
9768 case TARGET_NR_getrusage
:
9770 struct rusage rusage
;
9771 ret
= get_errno(getrusage(arg1
, &rusage
));
9772 if (!is_error(ret
)) {
9773 ret
= host_to_target_rusage(arg2
, &rusage
);
9777 #if defined(TARGET_NR_gettimeofday)
9778 case TARGET_NR_gettimeofday
:
9783 ret
= get_errno(gettimeofday(&tv
, &tz
));
9784 if (!is_error(ret
)) {
9785 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9786 return -TARGET_EFAULT
;
9788 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9789 return -TARGET_EFAULT
;
9795 #if defined(TARGET_NR_settimeofday)
9796 case TARGET_NR_settimeofday
:
9798 struct timeval tv
, *ptv
= NULL
;
9799 struct timezone tz
, *ptz
= NULL
;
9802 if (copy_from_user_timeval(&tv
, arg1
)) {
9803 return -TARGET_EFAULT
;
9809 if (copy_from_user_timezone(&tz
, arg2
)) {
9810 return -TARGET_EFAULT
;
9815 return get_errno(settimeofday(ptv
, ptz
));
9818 #if defined(TARGET_NR_select)
9819 case TARGET_NR_select
:
9820 #if defined(TARGET_WANT_NI_OLD_SELECT)
9821 /* some architectures used to have old_select here
9822 * but now ENOSYS it.
9824 ret
= -TARGET_ENOSYS
;
9825 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9826 ret
= do_old_select(arg1
);
9828 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9832 #ifdef TARGET_NR_pselect6
9833 case TARGET_NR_pselect6
:
9834 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9836 #ifdef TARGET_NR_pselect6_time64
9837 case TARGET_NR_pselect6_time64
:
9838 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9840 #ifdef TARGET_NR_symlink
9841 case TARGET_NR_symlink
:
9844 p
= lock_user_string(arg1
);
9845 p2
= lock_user_string(arg2
);
9847 ret
= -TARGET_EFAULT
;
9849 ret
= get_errno(symlink(p
, p2
));
9850 unlock_user(p2
, arg2
, 0);
9851 unlock_user(p
, arg1
, 0);
9855 #if defined(TARGET_NR_symlinkat)
9856 case TARGET_NR_symlinkat
:
9859 p
= lock_user_string(arg1
);
9860 p2
= lock_user_string(arg3
);
9862 ret
= -TARGET_EFAULT
;
9864 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9865 unlock_user(p2
, arg3
, 0);
9866 unlock_user(p
, arg1
, 0);
9870 #ifdef TARGET_NR_readlink
9871 case TARGET_NR_readlink
:
9874 p
= lock_user_string(arg1
);
9875 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9877 ret
= -TARGET_EFAULT
;
9879 /* Short circuit this for the magic exe check. */
9880 ret
= -TARGET_EINVAL
;
9881 } else if (is_proc_myself((const char *)p
, "exe")) {
9882 char real
[PATH_MAX
], *temp
;
9883 temp
= realpath(exec_path
, real
);
9884 /* Return value is # of bytes that we wrote to the buffer. */
9886 ret
= get_errno(-1);
9888 /* Don't worry about sign mismatch as earlier mapping
9889 * logic would have thrown a bad address error. */
9890 ret
= MIN(strlen(real
), arg3
);
9891 /* We cannot NUL terminate the string. */
9892 memcpy(p2
, real
, ret
);
9895 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9897 unlock_user(p2
, arg2
, ret
);
9898 unlock_user(p
, arg1
, 0);
9902 #if defined(TARGET_NR_readlinkat)
9903 case TARGET_NR_readlinkat
:
9906 p
= lock_user_string(arg2
);
9907 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9909 ret
= -TARGET_EFAULT
;
9910 } else if (is_proc_myself((const char *)p
, "exe")) {
9911 char real
[PATH_MAX
], *temp
;
9912 temp
= realpath(exec_path
, real
);
9913 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9914 snprintf((char *)p2
, arg4
, "%s", real
);
9916 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9918 unlock_user(p2
, arg3
, ret
);
9919 unlock_user(p
, arg2
, 0);
9923 #ifdef TARGET_NR_swapon
9924 case TARGET_NR_swapon
:
9925 if (!(p
= lock_user_string(arg1
)))
9926 return -TARGET_EFAULT
;
9927 ret
= get_errno(swapon(p
, arg2
));
9928 unlock_user(p
, arg1
, 0);
9931 case TARGET_NR_reboot
:
9932 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9933 /* arg4 must be ignored in all other cases */
9934 p
= lock_user_string(arg4
);
9936 return -TARGET_EFAULT
;
9938 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9939 unlock_user(p
, arg4
, 0);
9941 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9944 #ifdef TARGET_NR_mmap
9945 case TARGET_NR_mmap
:
9946 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9947 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9948 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9949 || defined(TARGET_S390X)
9952 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9953 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9954 return -TARGET_EFAULT
;
9961 unlock_user(v
, arg1
, 0);
9962 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9963 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9967 /* mmap pointers are always untagged */
9968 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9969 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9975 #ifdef TARGET_NR_mmap2
9976 case TARGET_NR_mmap2
:
9978 #define MMAP_SHIFT 12
9980 ret
= target_mmap(arg1
, arg2
, arg3
,
9981 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9982 arg5
, arg6
<< MMAP_SHIFT
);
9983 return get_errno(ret
);
9985 case TARGET_NR_munmap
:
9986 arg1
= cpu_untagged_addr(cpu
, arg1
);
9987 return get_errno(target_munmap(arg1
, arg2
));
9988 case TARGET_NR_mprotect
:
9989 arg1
= cpu_untagged_addr(cpu
, arg1
);
9991 TaskState
*ts
= cpu
->opaque
;
9992 /* Special hack to detect libc making the stack executable. */
9993 if ((arg3
& PROT_GROWSDOWN
)
9994 && arg1
>= ts
->info
->stack_limit
9995 && arg1
<= ts
->info
->start_stack
) {
9996 arg3
&= ~PROT_GROWSDOWN
;
9997 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9998 arg1
= ts
->info
->stack_limit
;
10001 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
10002 #ifdef TARGET_NR_mremap
10003 case TARGET_NR_mremap
:
10004 arg1
= cpu_untagged_addr(cpu
, arg1
);
10005 /* mremap new_addr (arg5) is always untagged */
10006 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
10008 /* ??? msync/mlock/munlock are broken for softmmu. */
10009 #ifdef TARGET_NR_msync
10010 case TARGET_NR_msync
:
10011 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
10013 #ifdef TARGET_NR_mlock
10014 case TARGET_NR_mlock
:
10015 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10017 #ifdef TARGET_NR_munlock
10018 case TARGET_NR_munlock
:
10019 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10021 #ifdef TARGET_NR_mlockall
10022 case TARGET_NR_mlockall
:
10023 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10025 #ifdef TARGET_NR_munlockall
10026 case TARGET_NR_munlockall
:
10027 return get_errno(munlockall());
10029 #ifdef TARGET_NR_truncate
10030 case TARGET_NR_truncate
:
10031 if (!(p
= lock_user_string(arg1
)))
10032 return -TARGET_EFAULT
;
10033 ret
= get_errno(truncate(p
, arg2
));
10034 unlock_user(p
, arg1
, 0);
10037 #ifdef TARGET_NR_ftruncate
10038 case TARGET_NR_ftruncate
:
10039 return get_errno(ftruncate(arg1
, arg2
));
10041 case TARGET_NR_fchmod
:
10042 return get_errno(fchmod(arg1
, arg2
));
10043 #if defined(TARGET_NR_fchmodat)
10044 case TARGET_NR_fchmodat
:
10045 if (!(p
= lock_user_string(arg2
)))
10046 return -TARGET_EFAULT
;
10047 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10048 unlock_user(p
, arg2
, 0);
10051 case TARGET_NR_getpriority
:
10052 /* Note that negative values are valid for getpriority, so we must
10053 differentiate based on errno settings. */
10055 ret
= getpriority(arg1
, arg2
);
10056 if (ret
== -1 && errno
!= 0) {
10057 return -host_to_target_errno(errno
);
10059 #ifdef TARGET_ALPHA
10060 /* Return value is the unbiased priority. Signal no error. */
10061 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
10063 /* Return value is a biased priority to avoid negative numbers. */
10067 case TARGET_NR_setpriority
:
10068 return get_errno(setpriority(arg1
, arg2
, arg3
));
10069 #ifdef TARGET_NR_statfs
10070 case TARGET_NR_statfs
:
10071 if (!(p
= lock_user_string(arg1
))) {
10072 return -TARGET_EFAULT
;
10074 ret
= get_errno(statfs(path(p
), &stfs
));
10075 unlock_user(p
, arg1
, 0);
10077 if (!is_error(ret
)) {
10078 struct target_statfs
*target_stfs
;
10080 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10081 return -TARGET_EFAULT
;
10082 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10083 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10084 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10085 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10086 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10087 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10088 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10089 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10090 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10091 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10092 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10093 #ifdef _STATFS_F_FLAGS
10094 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10096 __put_user(0, &target_stfs
->f_flags
);
10098 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10099 unlock_user_struct(target_stfs
, arg2
, 1);
10103 #ifdef TARGET_NR_fstatfs
10104 case TARGET_NR_fstatfs
:
10105 ret
= get_errno(fstatfs(arg1
, &stfs
));
10106 goto convert_statfs
;
10108 #ifdef TARGET_NR_statfs64
10109 case TARGET_NR_statfs64
:
10110 if (!(p
= lock_user_string(arg1
))) {
10111 return -TARGET_EFAULT
;
10113 ret
= get_errno(statfs(path(p
), &stfs
));
10114 unlock_user(p
, arg1
, 0);
10116 if (!is_error(ret
)) {
10117 struct target_statfs64
*target_stfs
;
10119 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10120 return -TARGET_EFAULT
;
10121 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10122 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10123 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10124 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10125 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10126 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10127 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10128 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10129 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10130 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10131 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10132 #ifdef _STATFS_F_FLAGS
10133 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10135 __put_user(0, &target_stfs
->f_flags
);
10137 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10138 unlock_user_struct(target_stfs
, arg3
, 1);
10141 case TARGET_NR_fstatfs64
:
10142 ret
= get_errno(fstatfs(arg1
, &stfs
));
10143 goto convert_statfs64
;
10145 #ifdef TARGET_NR_socketcall
10146 case TARGET_NR_socketcall
:
10147 return do_socketcall(arg1
, arg2
);
10149 #ifdef TARGET_NR_accept
10150 case TARGET_NR_accept
:
10151 return do_accept4(arg1
, arg2
, arg3
, 0);
10153 #ifdef TARGET_NR_accept4
10154 case TARGET_NR_accept4
:
10155 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10157 #ifdef TARGET_NR_bind
10158 case TARGET_NR_bind
:
10159 return do_bind(arg1
, arg2
, arg3
);
10161 #ifdef TARGET_NR_connect
10162 case TARGET_NR_connect
:
10163 return do_connect(arg1
, arg2
, arg3
);
10165 #ifdef TARGET_NR_getpeername
10166 case TARGET_NR_getpeername
:
10167 return do_getpeername(arg1
, arg2
, arg3
);
10169 #ifdef TARGET_NR_getsockname
10170 case TARGET_NR_getsockname
:
10171 return do_getsockname(arg1
, arg2
, arg3
);
10173 #ifdef TARGET_NR_getsockopt
10174 case TARGET_NR_getsockopt
:
10175 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10177 #ifdef TARGET_NR_listen
10178 case TARGET_NR_listen
:
10179 return get_errno(listen(arg1
, arg2
));
10181 #ifdef TARGET_NR_recv
10182 case TARGET_NR_recv
:
10183 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10185 #ifdef TARGET_NR_recvfrom
10186 case TARGET_NR_recvfrom
:
10187 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10189 #ifdef TARGET_NR_recvmsg
10190 case TARGET_NR_recvmsg
:
10191 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10193 #ifdef TARGET_NR_send
10194 case TARGET_NR_send
:
10195 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10197 #ifdef TARGET_NR_sendmsg
10198 case TARGET_NR_sendmsg
:
10199 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10201 #ifdef TARGET_NR_sendmmsg
10202 case TARGET_NR_sendmmsg
:
10203 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10205 #ifdef TARGET_NR_recvmmsg
10206 case TARGET_NR_recvmmsg
:
10207 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10209 #ifdef TARGET_NR_sendto
10210 case TARGET_NR_sendto
:
10211 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10213 #ifdef TARGET_NR_shutdown
10214 case TARGET_NR_shutdown
:
10215 return get_errno(shutdown(arg1
, arg2
));
10217 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10218 case TARGET_NR_getrandom
:
10219 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10221 return -TARGET_EFAULT
;
10223 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10224 unlock_user(p
, arg1
, ret
);
10227 #ifdef TARGET_NR_socket
10228 case TARGET_NR_socket
:
10229 return do_socket(arg1
, arg2
, arg3
);
10231 #ifdef TARGET_NR_socketpair
10232 case TARGET_NR_socketpair
:
10233 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10235 #ifdef TARGET_NR_setsockopt
10236 case TARGET_NR_setsockopt
:
10237 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10239 #if defined(TARGET_NR_syslog)
10240 case TARGET_NR_syslog
:
10245 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10246 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10247 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10248 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10249 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10250 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10251 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10252 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10253 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10254 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10255 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10256 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10259 return -TARGET_EINVAL
;
10264 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10266 return -TARGET_EFAULT
;
10268 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10269 unlock_user(p
, arg2
, arg3
);
10273 return -TARGET_EINVAL
;
10278 case TARGET_NR_setitimer
:
10280 struct itimerval value
, ovalue
, *pvalue
;
10284 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10285 || copy_from_user_timeval(&pvalue
->it_value
,
10286 arg2
+ sizeof(struct target_timeval
)))
10287 return -TARGET_EFAULT
;
10291 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10292 if (!is_error(ret
) && arg3
) {
10293 if (copy_to_user_timeval(arg3
,
10294 &ovalue
.it_interval
)
10295 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10297 return -TARGET_EFAULT
;
10301 case TARGET_NR_getitimer
:
10303 struct itimerval value
;
10305 ret
= get_errno(getitimer(arg1
, &value
));
10306 if (!is_error(ret
) && arg2
) {
10307 if (copy_to_user_timeval(arg2
,
10308 &value
.it_interval
)
10309 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10311 return -TARGET_EFAULT
;
10315 #ifdef TARGET_NR_stat
10316 case TARGET_NR_stat
:
10317 if (!(p
= lock_user_string(arg1
))) {
10318 return -TARGET_EFAULT
;
10320 ret
= get_errno(stat(path(p
), &st
));
10321 unlock_user(p
, arg1
, 0);
10324 #ifdef TARGET_NR_lstat
10325 case TARGET_NR_lstat
:
10326 if (!(p
= lock_user_string(arg1
))) {
10327 return -TARGET_EFAULT
;
10329 ret
= get_errno(lstat(path(p
), &st
));
10330 unlock_user(p
, arg1
, 0);
10333 #ifdef TARGET_NR_fstat
10334 case TARGET_NR_fstat
:
10336 ret
= get_errno(fstat(arg1
, &st
));
10337 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10340 if (!is_error(ret
)) {
10341 struct target_stat
*target_st
;
10343 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10344 return -TARGET_EFAULT
;
10345 memset(target_st
, 0, sizeof(*target_st
));
10346 __put_user(st
.st_dev
, &target_st
->st_dev
);
10347 __put_user(st
.st_ino
, &target_st
->st_ino
);
10348 __put_user(st
.st_mode
, &target_st
->st_mode
);
10349 __put_user(st
.st_uid
, &target_st
->st_uid
);
10350 __put_user(st
.st_gid
, &target_st
->st_gid
);
10351 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10352 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10353 __put_user(st
.st_size
, &target_st
->st_size
);
10354 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10355 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10356 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10357 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10358 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10359 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10360 __put_user(st
.st_atim
.tv_nsec
,
10361 &target_st
->target_st_atime_nsec
);
10362 __put_user(st
.st_mtim
.tv_nsec
,
10363 &target_st
->target_st_mtime_nsec
);
10364 __put_user(st
.st_ctim
.tv_nsec
,
10365 &target_st
->target_st_ctime_nsec
);
10367 unlock_user_struct(target_st
, arg2
, 1);
10372 case TARGET_NR_vhangup
:
10373 return get_errno(vhangup());
10374 #ifdef TARGET_NR_syscall
10375 case TARGET_NR_syscall
:
10376 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10377 arg6
, arg7
, arg8
, 0);
10379 #if defined(TARGET_NR_wait4)
10380 case TARGET_NR_wait4
:
10383 abi_long status_ptr
= arg2
;
10384 struct rusage rusage
, *rusage_ptr
;
10385 abi_ulong target_rusage
= arg4
;
10386 abi_long rusage_err
;
10388 rusage_ptr
= &rusage
;
10391 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10392 if (!is_error(ret
)) {
10393 if (status_ptr
&& ret
) {
10394 status
= host_to_target_waitstatus(status
);
10395 if (put_user_s32(status
, status_ptr
))
10396 return -TARGET_EFAULT
;
10398 if (target_rusage
) {
10399 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10408 #ifdef TARGET_NR_swapoff
10409 case TARGET_NR_swapoff
:
10410 if (!(p
= lock_user_string(arg1
)))
10411 return -TARGET_EFAULT
;
10412 ret
= get_errno(swapoff(p
));
10413 unlock_user(p
, arg1
, 0);
10416 case TARGET_NR_sysinfo
:
10418 struct target_sysinfo
*target_value
;
10419 struct sysinfo value
;
10420 ret
= get_errno(sysinfo(&value
));
10421 if (!is_error(ret
) && arg1
)
10423 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10424 return -TARGET_EFAULT
;
10425 __put_user(value
.uptime
, &target_value
->uptime
);
10426 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10427 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10428 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10429 __put_user(value
.totalram
, &target_value
->totalram
);
10430 __put_user(value
.freeram
, &target_value
->freeram
);
10431 __put_user(value
.sharedram
, &target_value
->sharedram
);
10432 __put_user(value
.bufferram
, &target_value
->bufferram
);
10433 __put_user(value
.totalswap
, &target_value
->totalswap
);
10434 __put_user(value
.freeswap
, &target_value
->freeswap
);
10435 __put_user(value
.procs
, &target_value
->procs
);
10436 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10437 __put_user(value
.freehigh
, &target_value
->freehigh
);
10438 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10439 unlock_user_struct(target_value
, arg1
, 1);
10443 #ifdef TARGET_NR_ipc
10444 case TARGET_NR_ipc
:
10445 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10447 #ifdef TARGET_NR_semget
10448 case TARGET_NR_semget
:
10449 return get_errno(semget(arg1
, arg2
, arg3
));
10451 #ifdef TARGET_NR_semop
10452 case TARGET_NR_semop
:
10453 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10455 #ifdef TARGET_NR_semtimedop
10456 case TARGET_NR_semtimedop
:
10457 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10459 #ifdef TARGET_NR_semtimedop_time64
10460 case TARGET_NR_semtimedop_time64
:
10461 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10463 #ifdef TARGET_NR_semctl
10464 case TARGET_NR_semctl
:
10465 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10467 #ifdef TARGET_NR_msgctl
10468 case TARGET_NR_msgctl
:
10469 return do_msgctl(arg1
, arg2
, arg3
);
10471 #ifdef TARGET_NR_msgget
10472 case TARGET_NR_msgget
:
10473 return get_errno(msgget(arg1
, arg2
));
10475 #ifdef TARGET_NR_msgrcv
10476 case TARGET_NR_msgrcv
:
10477 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10479 #ifdef TARGET_NR_msgsnd
10480 case TARGET_NR_msgsnd
:
10481 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10483 #ifdef TARGET_NR_shmget
10484 case TARGET_NR_shmget
:
10485 return get_errno(shmget(arg1
, arg2
, arg3
));
10487 #ifdef TARGET_NR_shmctl
10488 case TARGET_NR_shmctl
:
10489 return do_shmctl(arg1
, arg2
, arg3
);
10491 #ifdef TARGET_NR_shmat
10492 case TARGET_NR_shmat
:
10493 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10495 #ifdef TARGET_NR_shmdt
10496 case TARGET_NR_shmdt
:
10497 return do_shmdt(arg1
);
10499 case TARGET_NR_fsync
:
10500 return get_errno(fsync(arg1
));
10501 case TARGET_NR_clone
:
10502 /* Linux manages to have three different orderings for its
10503 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10504 * match the kernel's CONFIG_CLONE_* settings.
10505 * Microblaze is further special in that it uses a sixth
10506 * implicit argument to clone for the TLS pointer.
10508 #if defined(TARGET_MICROBLAZE)
10509 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10510 #elif defined(TARGET_CLONE_BACKWARDS)
10511 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10512 #elif defined(TARGET_CLONE_BACKWARDS2)
10513 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10515 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10518 #ifdef __NR_exit_group
10519 /* new thread calls */
10520 case TARGET_NR_exit_group
:
10521 preexit_cleanup(cpu_env
, arg1
);
10522 return get_errno(exit_group(arg1
));
10524 case TARGET_NR_setdomainname
:
10525 if (!(p
= lock_user_string(arg1
)))
10526 return -TARGET_EFAULT
;
10527 ret
= get_errno(setdomainname(p
, arg2
));
10528 unlock_user(p
, arg1
, 0);
10530 case TARGET_NR_uname
:
10531 /* no need to transcode because we use the linux syscall */
10533 struct new_utsname
* buf
;
10535 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10536 return -TARGET_EFAULT
;
10537 ret
= get_errno(sys_uname(buf
));
10538 if (!is_error(ret
)) {
10539 /* Overwrite the native machine name with whatever is being
10541 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10542 sizeof(buf
->machine
));
10543 /* Allow the user to override the reported release. */
10544 if (qemu_uname_release
&& *qemu_uname_release
) {
10545 g_strlcpy(buf
->release
, qemu_uname_release
,
10546 sizeof(buf
->release
));
10549 unlock_user_struct(buf
, arg1
, 1);
10553 case TARGET_NR_modify_ldt
:
10554 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10555 #if !defined(TARGET_X86_64)
10556 case TARGET_NR_vm86
:
10557 return do_vm86(cpu_env
, arg1
, arg2
);
10560 #if defined(TARGET_NR_adjtimex)
10561 case TARGET_NR_adjtimex
:
10563 struct timex host_buf
;
10565 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10566 return -TARGET_EFAULT
;
10568 ret
= get_errno(adjtimex(&host_buf
));
10569 if (!is_error(ret
)) {
10570 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10571 return -TARGET_EFAULT
;
10577 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10578 case TARGET_NR_clock_adjtime
:
10580 struct timex htx
, *phtx
= &htx
;
10582 if (target_to_host_timex(phtx
, arg2
) != 0) {
10583 return -TARGET_EFAULT
;
10585 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10586 if (!is_error(ret
) && phtx
) {
10587 if (host_to_target_timex(arg2
, phtx
) != 0) {
10588 return -TARGET_EFAULT
;
10594 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10595 case TARGET_NR_clock_adjtime64
:
10599 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10600 return -TARGET_EFAULT
;
10602 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10603 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10604 return -TARGET_EFAULT
;
10609 case TARGET_NR_getpgid
:
10610 return get_errno(getpgid(arg1
));
10611 case TARGET_NR_fchdir
:
10612 return get_errno(fchdir(arg1
));
10613 case TARGET_NR_personality
:
10614 return get_errno(personality(arg1
));
10615 #ifdef TARGET_NR__llseek /* Not on alpha */
10616 case TARGET_NR__llseek
:
10619 #if !defined(__NR_llseek)
10620 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10622 ret
= get_errno(res
);
10627 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10629 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10630 return -TARGET_EFAULT
;
10635 #ifdef TARGET_NR_getdents
10636 case TARGET_NR_getdents
:
10637 return do_getdents(arg1
, arg2
, arg3
);
10638 #endif /* TARGET_NR_getdents */
10639 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10640 case TARGET_NR_getdents64
:
10641 return do_getdents64(arg1
, arg2
, arg3
);
10642 #endif /* TARGET_NR_getdents64 */
10643 #if defined(TARGET_NR__newselect)
10644 case TARGET_NR__newselect
:
10645 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10647 #ifdef TARGET_NR_poll
10648 case TARGET_NR_poll
:
10649 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10651 #ifdef TARGET_NR_ppoll
10652 case TARGET_NR_ppoll
:
10653 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10655 #ifdef TARGET_NR_ppoll_time64
10656 case TARGET_NR_ppoll_time64
:
10657 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10659 case TARGET_NR_flock
:
10660 /* NOTE: the flock constant seems to be the same for every
10662 return get_errno(safe_flock(arg1
, arg2
));
10663 case TARGET_NR_readv
:
10665 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10667 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10668 unlock_iovec(vec
, arg2
, arg3
, 1);
10670 ret
= -host_to_target_errno(errno
);
10674 case TARGET_NR_writev
:
10676 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10678 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10679 unlock_iovec(vec
, arg2
, arg3
, 0);
10681 ret
= -host_to_target_errno(errno
);
10685 #if defined(TARGET_NR_preadv)
10686 case TARGET_NR_preadv
:
10688 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10690 unsigned long low
, high
;
10692 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10693 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10694 unlock_iovec(vec
, arg2
, arg3
, 1);
10696 ret
= -host_to_target_errno(errno
);
10701 #if defined(TARGET_NR_pwritev)
10702 case TARGET_NR_pwritev
:
10704 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10706 unsigned long low
, high
;
10708 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10709 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10710 unlock_iovec(vec
, arg2
, arg3
, 0);
10712 ret
= -host_to_target_errno(errno
);
10717 case TARGET_NR_getsid
:
10718 return get_errno(getsid(arg1
));
10719 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10720 case TARGET_NR_fdatasync
:
10721 return get_errno(fdatasync(arg1
));
10723 case TARGET_NR_sched_getaffinity
:
10725 unsigned int mask_size
;
10726 unsigned long *mask
;
10729 * sched_getaffinity needs multiples of ulong, so need to take
10730 * care of mismatches between target ulong and host ulong sizes.
10732 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10733 return -TARGET_EINVAL
;
10735 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10737 mask
= alloca(mask_size
);
10738 memset(mask
, 0, mask_size
);
10739 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10741 if (!is_error(ret
)) {
10743 /* More data returned than the caller's buffer will fit.
10744 * This only happens if sizeof(abi_long) < sizeof(long)
10745 * and the caller passed us a buffer holding an odd number
10746 * of abi_longs. If the host kernel is actually using the
10747 * extra 4 bytes then fail EINVAL; otherwise we can just
10748 * ignore them and only copy the interesting part.
10750 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10751 if (numcpus
> arg2
* 8) {
10752 return -TARGET_EINVAL
;
10757 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10758 return -TARGET_EFAULT
;
10763 case TARGET_NR_sched_setaffinity
:
10765 unsigned int mask_size
;
10766 unsigned long *mask
;
10769 * sched_setaffinity needs multiples of ulong, so need to take
10770 * care of mismatches between target ulong and host ulong sizes.
10772 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10773 return -TARGET_EINVAL
;
10775 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10776 mask
= alloca(mask_size
);
10778 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10783 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10785 case TARGET_NR_getcpu
:
10787 unsigned cpu
, node
;
10788 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10789 arg2
? &node
: NULL
,
10791 if (is_error(ret
)) {
10794 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10795 return -TARGET_EFAULT
;
10797 if (arg2
&& put_user_u32(node
, arg2
)) {
10798 return -TARGET_EFAULT
;
10802 case TARGET_NR_sched_setparam
:
10804 struct target_sched_param
*target_schp
;
10805 struct sched_param schp
;
10808 return -TARGET_EINVAL
;
10810 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
10811 return -TARGET_EFAULT
;
10813 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10814 unlock_user_struct(target_schp
, arg2
, 0);
10815 return get_errno(sys_sched_setparam(arg1
, &schp
));
10817 case TARGET_NR_sched_getparam
:
10819 struct target_sched_param
*target_schp
;
10820 struct sched_param schp
;
10823 return -TARGET_EINVAL
;
10825 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
10826 if (!is_error(ret
)) {
10827 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
10828 return -TARGET_EFAULT
;
10830 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10831 unlock_user_struct(target_schp
, arg2
, 1);
10835 case TARGET_NR_sched_setscheduler
:
10837 struct target_sched_param
*target_schp
;
10838 struct sched_param schp
;
10840 return -TARGET_EINVAL
;
10842 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
10843 return -TARGET_EFAULT
;
10845 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10846 unlock_user_struct(target_schp
, arg3
, 0);
10847 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
10849 case TARGET_NR_sched_getscheduler
:
10850 return get_errno(sys_sched_getscheduler(arg1
));
10851 case TARGET_NR_sched_getattr
:
10853 struct target_sched_attr
*target_scha
;
10854 struct sched_attr scha
;
10856 return -TARGET_EINVAL
;
10858 if (arg3
> sizeof(scha
)) {
10859 arg3
= sizeof(scha
);
10861 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
10862 if (!is_error(ret
)) {
10863 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10864 if (!target_scha
) {
10865 return -TARGET_EFAULT
;
10867 target_scha
->size
= tswap32(scha
.size
);
10868 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
10869 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
10870 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
10871 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
10872 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
10873 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
10874 target_scha
->sched_period
= tswap64(scha
.sched_period
);
10875 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
10876 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
10877 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
10879 unlock_user(target_scha
, arg2
, arg3
);
10883 case TARGET_NR_sched_setattr
:
10885 struct target_sched_attr
*target_scha
;
10886 struct sched_attr scha
;
10890 return -TARGET_EINVAL
;
10892 if (get_user_u32(size
, arg2
)) {
10893 return -TARGET_EFAULT
;
10896 size
= offsetof(struct target_sched_attr
, sched_util_min
);
10898 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
10899 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10900 return -TARGET_EFAULT
;
10902 return -TARGET_E2BIG
;
10905 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
10908 } else if (zeroed
== 0) {
10909 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10910 return -TARGET_EFAULT
;
10912 return -TARGET_E2BIG
;
10914 if (size
> sizeof(struct target_sched_attr
)) {
10915 size
= sizeof(struct target_sched_attr
);
10918 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
10919 if (!target_scha
) {
10920 return -TARGET_EFAULT
;
10923 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
10924 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
10925 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
10926 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
10927 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
10928 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
10929 scha
.sched_period
= tswap64(target_scha
->sched_period
);
10930 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
10931 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
10932 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
10934 unlock_user(target_scha
, arg2
, 0);
10935 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
10937 case TARGET_NR_sched_yield
:
10938 return get_errno(sched_yield());
10939 case TARGET_NR_sched_get_priority_max
:
10940 return get_errno(sched_get_priority_max(arg1
));
10941 case TARGET_NR_sched_get_priority_min
:
10942 return get_errno(sched_get_priority_min(arg1
));
10943 #ifdef TARGET_NR_sched_rr_get_interval
10944 case TARGET_NR_sched_rr_get_interval
:
10946 struct timespec ts
;
10947 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10948 if (!is_error(ret
)) {
10949 ret
= host_to_target_timespec(arg2
, &ts
);
10954 #ifdef TARGET_NR_sched_rr_get_interval_time64
10955 case TARGET_NR_sched_rr_get_interval_time64
:
10957 struct timespec ts
;
10958 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10959 if (!is_error(ret
)) {
10960 ret
= host_to_target_timespec64(arg2
, &ts
);
10965 #if defined(TARGET_NR_nanosleep)
10966 case TARGET_NR_nanosleep
:
10968 struct timespec req
, rem
;
10969 target_to_host_timespec(&req
, arg1
);
10970 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10971 if (is_error(ret
) && arg2
) {
10972 host_to_target_timespec(arg2
, &rem
);
10977 case TARGET_NR_prctl
:
10978 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
10980 #ifdef TARGET_NR_arch_prctl
10981 case TARGET_NR_arch_prctl
:
10982 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10984 #ifdef TARGET_NR_pread64
10985 case TARGET_NR_pread64
:
10986 if (regpairs_aligned(cpu_env
, num
)) {
10990 if (arg2
== 0 && arg3
== 0) {
10991 /* Special-case NULL buffer and zero length, which should succeed */
10994 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10996 return -TARGET_EFAULT
;
10999 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11000 unlock_user(p
, arg2
, ret
);
11002 case TARGET_NR_pwrite64
:
11003 if (regpairs_aligned(cpu_env
, num
)) {
11007 if (arg2
== 0 && arg3
== 0) {
11008 /* Special-case NULL buffer and zero length, which should succeed */
11011 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11013 return -TARGET_EFAULT
;
11016 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11017 unlock_user(p
, arg2
, 0);
11020 case TARGET_NR_getcwd
:
11021 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11022 return -TARGET_EFAULT
;
11023 ret
= get_errno(sys_getcwd1(p
, arg2
));
11024 unlock_user(p
, arg1
, ret
);
11026 case TARGET_NR_capget
:
11027 case TARGET_NR_capset
:
11029 struct target_user_cap_header
*target_header
;
11030 struct target_user_cap_data
*target_data
= NULL
;
11031 struct __user_cap_header_struct header
;
11032 struct __user_cap_data_struct data
[2];
11033 struct __user_cap_data_struct
*dataptr
= NULL
;
11034 int i
, target_datalen
;
11035 int data_items
= 1;
11037 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11038 return -TARGET_EFAULT
;
11040 header
.version
= tswap32(target_header
->version
);
11041 header
.pid
= tswap32(target_header
->pid
);
11043 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11044 /* Version 2 and up takes pointer to two user_data structs */
11048 target_datalen
= sizeof(*target_data
) * data_items
;
11051 if (num
== TARGET_NR_capget
) {
11052 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11054 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11056 if (!target_data
) {
11057 unlock_user_struct(target_header
, arg1
, 0);
11058 return -TARGET_EFAULT
;
11061 if (num
== TARGET_NR_capset
) {
11062 for (i
= 0; i
< data_items
; i
++) {
11063 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11064 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11065 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11072 if (num
== TARGET_NR_capget
) {
11073 ret
= get_errno(capget(&header
, dataptr
));
11075 ret
= get_errno(capset(&header
, dataptr
));
11078 /* The kernel always updates version for both capget and capset */
11079 target_header
->version
= tswap32(header
.version
);
11080 unlock_user_struct(target_header
, arg1
, 1);
11083 if (num
== TARGET_NR_capget
) {
11084 for (i
= 0; i
< data_items
; i
++) {
11085 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11086 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11087 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11089 unlock_user(target_data
, arg2
, target_datalen
);
11091 unlock_user(target_data
, arg2
, 0);
11096 case TARGET_NR_sigaltstack
:
11097 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11099 #ifdef CONFIG_SENDFILE
11100 #ifdef TARGET_NR_sendfile
11101 case TARGET_NR_sendfile
:
11103 off_t
*offp
= NULL
;
11106 ret
= get_user_sal(off
, arg3
);
11107 if (is_error(ret
)) {
11112 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11113 if (!is_error(ret
) && arg3
) {
11114 abi_long ret2
= put_user_sal(off
, arg3
);
11115 if (is_error(ret2
)) {
11122 #ifdef TARGET_NR_sendfile64
11123 case TARGET_NR_sendfile64
:
11125 off_t
*offp
= NULL
;
11128 ret
= get_user_s64(off
, arg3
);
11129 if (is_error(ret
)) {
11134 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11135 if (!is_error(ret
) && arg3
) {
11136 abi_long ret2
= put_user_s64(off
, arg3
);
11137 if (is_error(ret2
)) {
11145 #ifdef TARGET_NR_vfork
11146 case TARGET_NR_vfork
:
11147 return get_errno(do_fork(cpu_env
,
11148 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11151 #ifdef TARGET_NR_ugetrlimit
11152 case TARGET_NR_ugetrlimit
:
11154 struct rlimit rlim
;
11155 int resource
= target_to_host_resource(arg1
);
11156 ret
= get_errno(getrlimit(resource
, &rlim
));
11157 if (!is_error(ret
)) {
11158 struct target_rlimit
*target_rlim
;
11159 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11160 return -TARGET_EFAULT
;
11161 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11162 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11163 unlock_user_struct(target_rlim
, arg2
, 1);
11168 #ifdef TARGET_NR_truncate64
11169 case TARGET_NR_truncate64
:
11170 if (!(p
= lock_user_string(arg1
)))
11171 return -TARGET_EFAULT
;
11172 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11173 unlock_user(p
, arg1
, 0);
11176 #ifdef TARGET_NR_ftruncate64
11177 case TARGET_NR_ftruncate64
:
11178 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11180 #ifdef TARGET_NR_stat64
11181 case TARGET_NR_stat64
:
11182 if (!(p
= lock_user_string(arg1
))) {
11183 return -TARGET_EFAULT
;
11185 ret
= get_errno(stat(path(p
), &st
));
11186 unlock_user(p
, arg1
, 0);
11187 if (!is_error(ret
))
11188 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11191 #ifdef TARGET_NR_lstat64
11192 case TARGET_NR_lstat64
:
11193 if (!(p
= lock_user_string(arg1
))) {
11194 return -TARGET_EFAULT
;
11196 ret
= get_errno(lstat(path(p
), &st
));
11197 unlock_user(p
, arg1
, 0);
11198 if (!is_error(ret
))
11199 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11202 #ifdef TARGET_NR_fstat64
11203 case TARGET_NR_fstat64
:
11204 ret
= get_errno(fstat(arg1
, &st
));
11205 if (!is_error(ret
))
11206 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11209 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11210 #ifdef TARGET_NR_fstatat64
11211 case TARGET_NR_fstatat64
:
11213 #ifdef TARGET_NR_newfstatat
11214 case TARGET_NR_newfstatat
:
11216 if (!(p
= lock_user_string(arg2
))) {
11217 return -TARGET_EFAULT
;
11219 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11220 unlock_user(p
, arg2
, 0);
11221 if (!is_error(ret
))
11222 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11225 #if defined(TARGET_NR_statx)
11226 case TARGET_NR_statx
:
11228 struct target_statx
*target_stx
;
11232 p
= lock_user_string(arg2
);
11234 return -TARGET_EFAULT
;
11236 #if defined(__NR_statx)
11239 * It is assumed that struct statx is architecture independent.
11241 struct target_statx host_stx
;
11244 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11245 if (!is_error(ret
)) {
11246 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11247 unlock_user(p
, arg2
, 0);
11248 return -TARGET_EFAULT
;
11252 if (ret
!= -TARGET_ENOSYS
) {
11253 unlock_user(p
, arg2
, 0);
11258 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11259 unlock_user(p
, arg2
, 0);
11261 if (!is_error(ret
)) {
11262 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11263 return -TARGET_EFAULT
;
11265 memset(target_stx
, 0, sizeof(*target_stx
));
11266 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11267 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11268 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11269 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11270 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11271 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11272 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11273 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11274 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11275 __put_user(st
.st_size
, &target_stx
->stx_size
);
11276 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11277 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11278 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11279 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11280 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11281 unlock_user_struct(target_stx
, arg5
, 1);
11286 #ifdef TARGET_NR_lchown
11287 case TARGET_NR_lchown
:
11288 if (!(p
= lock_user_string(arg1
)))
11289 return -TARGET_EFAULT
;
11290 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11291 unlock_user(p
, arg1
, 0);
11294 #ifdef TARGET_NR_getuid
11295 case TARGET_NR_getuid
:
11296 return get_errno(high2lowuid(getuid()));
11298 #ifdef TARGET_NR_getgid
11299 case TARGET_NR_getgid
:
11300 return get_errno(high2lowgid(getgid()));
11302 #ifdef TARGET_NR_geteuid
11303 case TARGET_NR_geteuid
:
11304 return get_errno(high2lowuid(geteuid()));
11306 #ifdef TARGET_NR_getegid
11307 case TARGET_NR_getegid
:
11308 return get_errno(high2lowgid(getegid()));
11310 case TARGET_NR_setreuid
:
11311 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11312 case TARGET_NR_setregid
:
11313 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11314 case TARGET_NR_getgroups
:
11316 int gidsetsize
= arg1
;
11317 target_id
*target_grouplist
;
11321 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11322 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11323 if (gidsetsize
== 0)
11325 if (!is_error(ret
)) {
11326 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11327 if (!target_grouplist
)
11328 return -TARGET_EFAULT
;
11329 for(i
= 0;i
< ret
; i
++)
11330 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11331 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11335 case TARGET_NR_setgroups
:
11337 int gidsetsize
= arg1
;
11338 target_id
*target_grouplist
;
11339 gid_t
*grouplist
= NULL
;
11342 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11343 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11344 if (!target_grouplist
) {
11345 return -TARGET_EFAULT
;
11347 for (i
= 0; i
< gidsetsize
; i
++) {
11348 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11350 unlock_user(target_grouplist
, arg2
, 0);
11352 return get_errno(setgroups(gidsetsize
, grouplist
));
11354 case TARGET_NR_fchown
:
11355 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11356 #if defined(TARGET_NR_fchownat)
11357 case TARGET_NR_fchownat
:
11358 if (!(p
= lock_user_string(arg2
)))
11359 return -TARGET_EFAULT
;
11360 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11361 low2highgid(arg4
), arg5
));
11362 unlock_user(p
, arg2
, 0);
11365 #ifdef TARGET_NR_setresuid
11366 case TARGET_NR_setresuid
:
11367 return get_errno(sys_setresuid(low2highuid(arg1
),
11369 low2highuid(arg3
)));
11371 #ifdef TARGET_NR_getresuid
11372 case TARGET_NR_getresuid
:
11374 uid_t ruid
, euid
, suid
;
11375 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11376 if (!is_error(ret
)) {
11377 if (put_user_id(high2lowuid(ruid
), arg1
)
11378 || put_user_id(high2lowuid(euid
), arg2
)
11379 || put_user_id(high2lowuid(suid
), arg3
))
11380 return -TARGET_EFAULT
;
11385 #ifdef TARGET_NR_getresgid
11386 case TARGET_NR_setresgid
:
11387 return get_errno(sys_setresgid(low2highgid(arg1
),
11389 low2highgid(arg3
)));
11391 #ifdef TARGET_NR_getresgid
11392 case TARGET_NR_getresgid
:
11394 gid_t rgid
, egid
, sgid
;
11395 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11396 if (!is_error(ret
)) {
11397 if (put_user_id(high2lowgid(rgid
), arg1
)
11398 || put_user_id(high2lowgid(egid
), arg2
)
11399 || put_user_id(high2lowgid(sgid
), arg3
))
11400 return -TARGET_EFAULT
;
11405 #ifdef TARGET_NR_chown
11406 case TARGET_NR_chown
:
11407 if (!(p
= lock_user_string(arg1
)))
11408 return -TARGET_EFAULT
;
11409 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11410 unlock_user(p
, arg1
, 0);
11413 case TARGET_NR_setuid
:
11414 return get_errno(sys_setuid(low2highuid(arg1
)));
11415 case TARGET_NR_setgid
:
11416 return get_errno(sys_setgid(low2highgid(arg1
)));
11417 case TARGET_NR_setfsuid
:
11418 return get_errno(setfsuid(arg1
));
11419 case TARGET_NR_setfsgid
:
11420 return get_errno(setfsgid(arg1
));
11422 #ifdef TARGET_NR_lchown32
11423 case TARGET_NR_lchown32
:
11424 if (!(p
= lock_user_string(arg1
)))
11425 return -TARGET_EFAULT
;
11426 ret
= get_errno(lchown(p
, arg2
, arg3
));
11427 unlock_user(p
, arg1
, 0);
11430 #ifdef TARGET_NR_getuid32
11431 case TARGET_NR_getuid32
:
11432 return get_errno(getuid());
11435 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11436 /* Alpha specific */
11437 case TARGET_NR_getxuid
:
11441 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11443 return get_errno(getuid());
11445 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11446 /* Alpha specific */
11447 case TARGET_NR_getxgid
:
11451 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11453 return get_errno(getgid());
11455 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11456 /* Alpha specific */
11457 case TARGET_NR_osf_getsysinfo
:
11458 ret
= -TARGET_EOPNOTSUPP
;
11460 case TARGET_GSI_IEEE_FP_CONTROL
:
11462 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11463 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11465 swcr
&= ~SWCR_STATUS_MASK
;
11466 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11468 if (put_user_u64 (swcr
, arg2
))
11469 return -TARGET_EFAULT
;
11474 /* case GSI_IEEE_STATE_AT_SIGNAL:
11475 -- Not implemented in linux kernel.
11477 -- Retrieves current unaligned access state; not much used.
11478 case GSI_PROC_TYPE:
11479 -- Retrieves implver information; surely not used.
11480 case GSI_GET_HWRPB:
11481 -- Grabs a copy of the HWRPB; surely not used.
11486 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11487 /* Alpha specific */
11488 case TARGET_NR_osf_setsysinfo
:
11489 ret
= -TARGET_EOPNOTSUPP
;
11491 case TARGET_SSI_IEEE_FP_CONTROL
:
11493 uint64_t swcr
, fpcr
;
11495 if (get_user_u64 (swcr
, arg2
)) {
11496 return -TARGET_EFAULT
;
11500 * The kernel calls swcr_update_status to update the
11501 * status bits from the fpcr at every point that it
11502 * could be queried. Therefore, we store the status
11503 * bits only in FPCR.
11505 ((CPUAlphaState
*)cpu_env
)->swcr
11506 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11508 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11509 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11510 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11511 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11516 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11518 uint64_t exc
, fpcr
, fex
;
11520 if (get_user_u64(exc
, arg2
)) {
11521 return -TARGET_EFAULT
;
11523 exc
&= SWCR_STATUS_MASK
;
11524 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11526 /* Old exceptions are not signaled. */
11527 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11529 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11530 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11532 /* Update the hardware fpcr. */
11533 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11534 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11537 int si_code
= TARGET_FPE_FLTUNK
;
11538 target_siginfo_t info
;
11540 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11541 si_code
= TARGET_FPE_FLTUND
;
11543 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11544 si_code
= TARGET_FPE_FLTRES
;
11546 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11547 si_code
= TARGET_FPE_FLTUND
;
11549 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11550 si_code
= TARGET_FPE_FLTOVF
;
11552 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11553 si_code
= TARGET_FPE_FLTDIV
;
11555 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11556 si_code
= TARGET_FPE_FLTINV
;
11559 info
.si_signo
= SIGFPE
;
11561 info
.si_code
= si_code
;
11562 info
._sifields
._sigfault
._addr
11563 = ((CPUArchState
*)cpu_env
)->pc
;
11564 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11565 QEMU_SI_FAULT
, &info
);
11571 /* case SSI_NVPAIRS:
11572 -- Used with SSIN_UACPROC to enable unaligned accesses.
11573 case SSI_IEEE_STATE_AT_SIGNAL:
11574 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11575 -- Not implemented in linux kernel
11580 #ifdef TARGET_NR_osf_sigprocmask
11581 /* Alpha specific. */
11582 case TARGET_NR_osf_sigprocmask
:
11586 sigset_t set
, oldset
;
11589 case TARGET_SIG_BLOCK
:
11592 case TARGET_SIG_UNBLOCK
:
11595 case TARGET_SIG_SETMASK
:
11599 return -TARGET_EINVAL
;
11602 target_to_host_old_sigset(&set
, &mask
);
11603 ret
= do_sigprocmask(how
, &set
, &oldset
);
11605 host_to_target_old_sigset(&mask
, &oldset
);
11612 #ifdef TARGET_NR_getgid32
11613 case TARGET_NR_getgid32
:
11614 return get_errno(getgid());
11616 #ifdef TARGET_NR_geteuid32
11617 case TARGET_NR_geteuid32
:
11618 return get_errno(geteuid());
11620 #ifdef TARGET_NR_getegid32
11621 case TARGET_NR_getegid32
:
11622 return get_errno(getegid());
11624 #ifdef TARGET_NR_setreuid32
11625 case TARGET_NR_setreuid32
:
11626 return get_errno(setreuid(arg1
, arg2
));
11628 #ifdef TARGET_NR_setregid32
11629 case TARGET_NR_setregid32
:
11630 return get_errno(setregid(arg1
, arg2
));
11632 #ifdef TARGET_NR_getgroups32
11633 case TARGET_NR_getgroups32
:
11635 int gidsetsize
= arg1
;
11636 uint32_t *target_grouplist
;
11640 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11641 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11642 if (gidsetsize
== 0)
11644 if (!is_error(ret
)) {
11645 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11646 if (!target_grouplist
) {
11647 return -TARGET_EFAULT
;
11649 for(i
= 0;i
< ret
; i
++)
11650 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11651 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11656 #ifdef TARGET_NR_setgroups32
11657 case TARGET_NR_setgroups32
:
11659 int gidsetsize
= arg1
;
11660 uint32_t *target_grouplist
;
11664 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11665 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11666 if (!target_grouplist
) {
11667 return -TARGET_EFAULT
;
11669 for(i
= 0;i
< gidsetsize
; i
++)
11670 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11671 unlock_user(target_grouplist
, arg2
, 0);
11672 return get_errno(setgroups(gidsetsize
, grouplist
));
11675 #ifdef TARGET_NR_fchown32
11676 case TARGET_NR_fchown32
:
11677 return get_errno(fchown(arg1
, arg2
, arg3
));
11679 #ifdef TARGET_NR_setresuid32
11680 case TARGET_NR_setresuid32
:
11681 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11683 #ifdef TARGET_NR_getresuid32
11684 case TARGET_NR_getresuid32
:
11686 uid_t ruid
, euid
, suid
;
11687 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11688 if (!is_error(ret
)) {
11689 if (put_user_u32(ruid
, arg1
)
11690 || put_user_u32(euid
, arg2
)
11691 || put_user_u32(suid
, arg3
))
11692 return -TARGET_EFAULT
;
11697 #ifdef TARGET_NR_setresgid32
11698 case TARGET_NR_setresgid32
:
11699 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11701 #ifdef TARGET_NR_getresgid32
11702 case TARGET_NR_getresgid32
:
11704 gid_t rgid
, egid
, sgid
;
11705 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11706 if (!is_error(ret
)) {
11707 if (put_user_u32(rgid
, arg1
)
11708 || put_user_u32(egid
, arg2
)
11709 || put_user_u32(sgid
, arg3
))
11710 return -TARGET_EFAULT
;
11715 #ifdef TARGET_NR_chown32
11716 case TARGET_NR_chown32
:
11717 if (!(p
= lock_user_string(arg1
)))
11718 return -TARGET_EFAULT
;
11719 ret
= get_errno(chown(p
, arg2
, arg3
));
11720 unlock_user(p
, arg1
, 0);
11723 #ifdef TARGET_NR_setuid32
11724 case TARGET_NR_setuid32
:
11725 return get_errno(sys_setuid(arg1
));
11727 #ifdef TARGET_NR_setgid32
11728 case TARGET_NR_setgid32
:
11729 return get_errno(sys_setgid(arg1
));
11731 #ifdef TARGET_NR_setfsuid32
11732 case TARGET_NR_setfsuid32
:
11733 return get_errno(setfsuid(arg1
));
11735 #ifdef TARGET_NR_setfsgid32
11736 case TARGET_NR_setfsgid32
:
11737 return get_errno(setfsgid(arg1
));
11739 #ifdef TARGET_NR_mincore
11740 case TARGET_NR_mincore
:
11742 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11744 return -TARGET_ENOMEM
;
11746 p
= lock_user_string(arg3
);
11748 ret
= -TARGET_EFAULT
;
11750 ret
= get_errno(mincore(a
, arg2
, p
));
11751 unlock_user(p
, arg3
, ret
);
11753 unlock_user(a
, arg1
, 0);
11757 #ifdef TARGET_NR_arm_fadvise64_64
11758 case TARGET_NR_arm_fadvise64_64
:
11759 /* arm_fadvise64_64 looks like fadvise64_64 but
11760 * with different argument order: fd, advice, offset, len
11761 * rather than the usual fd, offset, len, advice.
11762 * Note that offset and len are both 64-bit so appear as
11763 * pairs of 32-bit registers.
11765 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11766 target_offset64(arg5
, arg6
), arg2
);
11767 return -host_to_target_errno(ret
);
11770 #if TARGET_ABI_BITS == 32
11772 #ifdef TARGET_NR_fadvise64_64
11773 case TARGET_NR_fadvise64_64
:
11774 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11775 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11783 /* 6 args: fd, offset (high, low), len (high, low), advice */
11784 if (regpairs_aligned(cpu_env
, num
)) {
11785 /* offset is in (3,4), len in (5,6) and advice in 7 */
11793 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11794 target_offset64(arg4
, arg5
), arg6
);
11795 return -host_to_target_errno(ret
);
11798 #ifdef TARGET_NR_fadvise64
11799 case TARGET_NR_fadvise64
:
11800 /* 5 args: fd, offset (high, low), len, advice */
11801 if (regpairs_aligned(cpu_env
, num
)) {
11802 /* offset is in (3,4), len in 5 and advice in 6 */
11808 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11809 return -host_to_target_errno(ret
);
11812 #else /* not a 32-bit ABI */
11813 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11814 #ifdef TARGET_NR_fadvise64_64
11815 case TARGET_NR_fadvise64_64
:
11817 #ifdef TARGET_NR_fadvise64
11818 case TARGET_NR_fadvise64
:
11820 #ifdef TARGET_S390X
11822 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11823 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11824 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11825 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11829 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11831 #endif /* end of 64-bit ABI fadvise handling */
11833 #ifdef TARGET_NR_madvise
11834 case TARGET_NR_madvise
:
11835 /* A straight passthrough may not be safe because qemu sometimes
11836 turns private file-backed mappings into anonymous mappings.
11837 This will break MADV_DONTNEED.
11838 This is a hint, so ignoring and returning success is ok. */
11841 #ifdef TARGET_NR_fcntl64
11842 case TARGET_NR_fcntl64
:
11846 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11847 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11850 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11851 copyfrom
= copy_from_user_oabi_flock64
;
11852 copyto
= copy_to_user_oabi_flock64
;
11856 cmd
= target_to_host_fcntl_cmd(arg2
);
11857 if (cmd
== -TARGET_EINVAL
) {
11862 case TARGET_F_GETLK64
:
11863 ret
= copyfrom(&fl
, arg3
);
11867 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11869 ret
= copyto(arg3
, &fl
);
11873 case TARGET_F_SETLK64
:
11874 case TARGET_F_SETLKW64
:
11875 ret
= copyfrom(&fl
, arg3
);
11879 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11882 ret
= do_fcntl(arg1
, arg2
, arg3
);
11888 #ifdef TARGET_NR_cacheflush
11889 case TARGET_NR_cacheflush
:
11890 /* self-modifying code is handled automatically, so nothing needed */
11893 #ifdef TARGET_NR_getpagesize
11894 case TARGET_NR_getpagesize
:
11895 return TARGET_PAGE_SIZE
;
11897 case TARGET_NR_gettid
:
11898 return get_errno(sys_gettid());
11899 #ifdef TARGET_NR_readahead
11900 case TARGET_NR_readahead
:
11901 #if TARGET_ABI_BITS == 32
11902 if (regpairs_aligned(cpu_env
, num
)) {
11907 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11909 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11914 #ifdef TARGET_NR_setxattr
11915 case TARGET_NR_listxattr
:
11916 case TARGET_NR_llistxattr
:
11920 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11922 return -TARGET_EFAULT
;
11925 p
= lock_user_string(arg1
);
11927 if (num
== TARGET_NR_listxattr
) {
11928 ret
= get_errno(listxattr(p
, b
, arg3
));
11930 ret
= get_errno(llistxattr(p
, b
, arg3
));
11933 ret
= -TARGET_EFAULT
;
11935 unlock_user(p
, arg1
, 0);
11936 unlock_user(b
, arg2
, arg3
);
11939 case TARGET_NR_flistxattr
:
11943 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11945 return -TARGET_EFAULT
;
11948 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11949 unlock_user(b
, arg2
, arg3
);
11952 case TARGET_NR_setxattr
:
11953 case TARGET_NR_lsetxattr
:
11955 void *p
, *n
, *v
= 0;
11957 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11959 return -TARGET_EFAULT
;
11962 p
= lock_user_string(arg1
);
11963 n
= lock_user_string(arg2
);
11965 if (num
== TARGET_NR_setxattr
) {
11966 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11968 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11971 ret
= -TARGET_EFAULT
;
11973 unlock_user(p
, arg1
, 0);
11974 unlock_user(n
, arg2
, 0);
11975 unlock_user(v
, arg3
, 0);
11978 case TARGET_NR_fsetxattr
:
11982 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11984 return -TARGET_EFAULT
;
11987 n
= lock_user_string(arg2
);
11989 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11991 ret
= -TARGET_EFAULT
;
11993 unlock_user(n
, arg2
, 0);
11994 unlock_user(v
, arg3
, 0);
11997 case TARGET_NR_getxattr
:
11998 case TARGET_NR_lgetxattr
:
12000 void *p
, *n
, *v
= 0;
12002 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12004 return -TARGET_EFAULT
;
12007 p
= lock_user_string(arg1
);
12008 n
= lock_user_string(arg2
);
12010 if (num
== TARGET_NR_getxattr
) {
12011 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12013 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12016 ret
= -TARGET_EFAULT
;
12018 unlock_user(p
, arg1
, 0);
12019 unlock_user(n
, arg2
, 0);
12020 unlock_user(v
, arg3
, arg4
);
12023 case TARGET_NR_fgetxattr
:
12027 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12029 return -TARGET_EFAULT
;
12032 n
= lock_user_string(arg2
);
12034 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12036 ret
= -TARGET_EFAULT
;
12038 unlock_user(n
, arg2
, 0);
12039 unlock_user(v
, arg3
, arg4
);
12042 case TARGET_NR_removexattr
:
12043 case TARGET_NR_lremovexattr
:
12046 p
= lock_user_string(arg1
);
12047 n
= lock_user_string(arg2
);
12049 if (num
== TARGET_NR_removexattr
) {
12050 ret
= get_errno(removexattr(p
, n
));
12052 ret
= get_errno(lremovexattr(p
, n
));
12055 ret
= -TARGET_EFAULT
;
12057 unlock_user(p
, arg1
, 0);
12058 unlock_user(n
, arg2
, 0);
12061 case TARGET_NR_fremovexattr
:
12064 n
= lock_user_string(arg2
);
12066 ret
= get_errno(fremovexattr(arg1
, n
));
12068 ret
= -TARGET_EFAULT
;
12070 unlock_user(n
, arg2
, 0);
12074 #endif /* CONFIG_ATTR */
12075 #ifdef TARGET_NR_set_thread_area
12076 case TARGET_NR_set_thread_area
:
12077 #if defined(TARGET_MIPS)
12078 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12080 #elif defined(TARGET_CRIS)
12082 ret
= -TARGET_EINVAL
;
12084 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12088 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12089 return do_set_thread_area(cpu_env
, arg1
);
12090 #elif defined(TARGET_M68K)
12092 TaskState
*ts
= cpu
->opaque
;
12093 ts
->tp_value
= arg1
;
12097 return -TARGET_ENOSYS
;
12100 #ifdef TARGET_NR_get_thread_area
12101 case TARGET_NR_get_thread_area
:
12102 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12103 return do_get_thread_area(cpu_env
, arg1
);
12104 #elif defined(TARGET_M68K)
12106 TaskState
*ts
= cpu
->opaque
;
12107 return ts
->tp_value
;
12110 return -TARGET_ENOSYS
;
12113 #ifdef TARGET_NR_getdomainname
12114 case TARGET_NR_getdomainname
:
12115 return -TARGET_ENOSYS
;
12118 #ifdef TARGET_NR_clock_settime
12119 case TARGET_NR_clock_settime
:
12121 struct timespec ts
;
12123 ret
= target_to_host_timespec(&ts
, arg2
);
12124 if (!is_error(ret
)) {
12125 ret
= get_errno(clock_settime(arg1
, &ts
));
12130 #ifdef TARGET_NR_clock_settime64
12131 case TARGET_NR_clock_settime64
:
12133 struct timespec ts
;
12135 ret
= target_to_host_timespec64(&ts
, arg2
);
12136 if (!is_error(ret
)) {
12137 ret
= get_errno(clock_settime(arg1
, &ts
));
12142 #ifdef TARGET_NR_clock_gettime
12143 case TARGET_NR_clock_gettime
:
12145 struct timespec ts
;
12146 ret
= get_errno(clock_gettime(arg1
, &ts
));
12147 if (!is_error(ret
)) {
12148 ret
= host_to_target_timespec(arg2
, &ts
);
12153 #ifdef TARGET_NR_clock_gettime64
12154 case TARGET_NR_clock_gettime64
:
12156 struct timespec ts
;
12157 ret
= get_errno(clock_gettime(arg1
, &ts
));
12158 if (!is_error(ret
)) {
12159 ret
= host_to_target_timespec64(arg2
, &ts
);
12164 #ifdef TARGET_NR_clock_getres
12165 case TARGET_NR_clock_getres
:
12167 struct timespec ts
;
12168 ret
= get_errno(clock_getres(arg1
, &ts
));
12169 if (!is_error(ret
)) {
12170 host_to_target_timespec(arg2
, &ts
);
12175 #ifdef TARGET_NR_clock_getres_time64
12176 case TARGET_NR_clock_getres_time64
:
12178 struct timespec ts
;
12179 ret
= get_errno(clock_getres(arg1
, &ts
));
12180 if (!is_error(ret
)) {
12181 host_to_target_timespec64(arg2
, &ts
);
12186 #ifdef TARGET_NR_clock_nanosleep
12187 case TARGET_NR_clock_nanosleep
:
12189 struct timespec ts
;
12190 if (target_to_host_timespec(&ts
, arg3
)) {
12191 return -TARGET_EFAULT
;
12193 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12194 &ts
, arg4
? &ts
: NULL
));
12196 * if the call is interrupted by a signal handler, it fails
12197 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12198 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12200 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12201 host_to_target_timespec(arg4
, &ts
)) {
12202 return -TARGET_EFAULT
;
12208 #ifdef TARGET_NR_clock_nanosleep_time64
12209 case TARGET_NR_clock_nanosleep_time64
:
12211 struct timespec ts
;
12213 if (target_to_host_timespec64(&ts
, arg3
)) {
12214 return -TARGET_EFAULT
;
12217 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12218 &ts
, arg4
? &ts
: NULL
));
12220 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12221 host_to_target_timespec64(arg4
, &ts
)) {
12222 return -TARGET_EFAULT
;
12228 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12229 case TARGET_NR_set_tid_address
:
12230 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12233 case TARGET_NR_tkill
:
12234 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12236 case TARGET_NR_tgkill
:
12237 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12238 target_to_host_signal(arg3
)));
12240 #ifdef TARGET_NR_set_robust_list
12241 case TARGET_NR_set_robust_list
:
12242 case TARGET_NR_get_robust_list
:
12243 /* The ABI for supporting robust futexes has userspace pass
12244 * the kernel a pointer to a linked list which is updated by
12245 * userspace after the syscall; the list is walked by the kernel
12246 * when the thread exits. Since the linked list in QEMU guest
12247 * memory isn't a valid linked list for the host and we have
12248 * no way to reliably intercept the thread-death event, we can't
12249 * support these. Silently return ENOSYS so that guest userspace
12250 * falls back to a non-robust futex implementation (which should
12251 * be OK except in the corner case of the guest crashing while
12252 * holding a mutex that is shared with another process via
12255 return -TARGET_ENOSYS
;
12258 #if defined(TARGET_NR_utimensat)
12259 case TARGET_NR_utimensat
:
12261 struct timespec
*tsp
, ts
[2];
12265 if (target_to_host_timespec(ts
, arg3
)) {
12266 return -TARGET_EFAULT
;
12268 if (target_to_host_timespec(ts
+ 1, arg3
+
12269 sizeof(struct target_timespec
))) {
12270 return -TARGET_EFAULT
;
12275 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12277 if (!(p
= lock_user_string(arg2
))) {
12278 return -TARGET_EFAULT
;
12280 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12281 unlock_user(p
, arg2
, 0);
12286 #ifdef TARGET_NR_utimensat_time64
12287 case TARGET_NR_utimensat_time64
:
12289 struct timespec
*tsp
, ts
[2];
12293 if (target_to_host_timespec64(ts
, arg3
)) {
12294 return -TARGET_EFAULT
;
12296 if (target_to_host_timespec64(ts
+ 1, arg3
+
12297 sizeof(struct target__kernel_timespec
))) {
12298 return -TARGET_EFAULT
;
12303 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12305 p
= lock_user_string(arg2
);
12307 return -TARGET_EFAULT
;
12309 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12310 unlock_user(p
, arg2
, 0);
12315 #ifdef TARGET_NR_futex
12316 case TARGET_NR_futex
:
12317 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12319 #ifdef TARGET_NR_futex_time64
12320 case TARGET_NR_futex_time64
:
12321 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12323 #ifdef CONFIG_INOTIFY
12324 #if defined(TARGET_NR_inotify_init)
12325 case TARGET_NR_inotify_init
:
12326 ret
= get_errno(inotify_init());
12328 fd_trans_register(ret
, &target_inotify_trans
);
12332 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12333 case TARGET_NR_inotify_init1
:
12334 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12335 fcntl_flags_tbl
)));
12337 fd_trans_register(ret
, &target_inotify_trans
);
12341 #if defined(TARGET_NR_inotify_add_watch)
12342 case TARGET_NR_inotify_add_watch
:
12343 p
= lock_user_string(arg2
);
12344 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12345 unlock_user(p
, arg2
, 0);
12348 #if defined(TARGET_NR_inotify_rm_watch)
12349 case TARGET_NR_inotify_rm_watch
:
12350 return get_errno(inotify_rm_watch(arg1
, arg2
));
12354 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12355 case TARGET_NR_mq_open
:
12357 struct mq_attr posix_mq_attr
;
12358 struct mq_attr
*pposix_mq_attr
;
12361 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12362 pposix_mq_attr
= NULL
;
12364 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12365 return -TARGET_EFAULT
;
12367 pposix_mq_attr
= &posix_mq_attr
;
12369 p
= lock_user_string(arg1
- 1);
12371 return -TARGET_EFAULT
;
12373 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12374 unlock_user (p
, arg1
, 0);
12378 case TARGET_NR_mq_unlink
:
12379 p
= lock_user_string(arg1
- 1);
12381 return -TARGET_EFAULT
;
12383 ret
= get_errno(mq_unlink(p
));
12384 unlock_user (p
, arg1
, 0);
12387 #ifdef TARGET_NR_mq_timedsend
12388 case TARGET_NR_mq_timedsend
:
12390 struct timespec ts
;
12392 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12394 if (target_to_host_timespec(&ts
, arg5
)) {
12395 return -TARGET_EFAULT
;
12397 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12398 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12399 return -TARGET_EFAULT
;
12402 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12404 unlock_user (p
, arg2
, arg3
);
12408 #ifdef TARGET_NR_mq_timedsend_time64
12409 case TARGET_NR_mq_timedsend_time64
:
12411 struct timespec ts
;
12413 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12415 if (target_to_host_timespec64(&ts
, arg5
)) {
12416 return -TARGET_EFAULT
;
12418 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12419 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12420 return -TARGET_EFAULT
;
12423 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12425 unlock_user(p
, arg2
, arg3
);
12430 #ifdef TARGET_NR_mq_timedreceive
12431 case TARGET_NR_mq_timedreceive
:
12433 struct timespec ts
;
12436 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12438 if (target_to_host_timespec(&ts
, arg5
)) {
12439 return -TARGET_EFAULT
;
12441 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12443 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12444 return -TARGET_EFAULT
;
12447 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12450 unlock_user (p
, arg2
, arg3
);
12452 put_user_u32(prio
, arg4
);
12456 #ifdef TARGET_NR_mq_timedreceive_time64
12457 case TARGET_NR_mq_timedreceive_time64
:
12459 struct timespec ts
;
12462 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12464 if (target_to_host_timespec64(&ts
, arg5
)) {
12465 return -TARGET_EFAULT
;
12467 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12469 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12470 return -TARGET_EFAULT
;
12473 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12476 unlock_user(p
, arg2
, arg3
);
12478 put_user_u32(prio
, arg4
);
12484 /* Not implemented for now... */
12485 /* case TARGET_NR_mq_notify: */
12488 case TARGET_NR_mq_getsetattr
:
12490 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12493 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12494 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12495 &posix_mq_attr_out
));
12496 } else if (arg3
!= 0) {
12497 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12499 if (ret
== 0 && arg3
!= 0) {
12500 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12506 #ifdef CONFIG_SPLICE
12507 #ifdef TARGET_NR_tee
12508 case TARGET_NR_tee
:
12510 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12514 #ifdef TARGET_NR_splice
12515 case TARGET_NR_splice
:
12517 loff_t loff_in
, loff_out
;
12518 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12520 if (get_user_u64(loff_in
, arg2
)) {
12521 return -TARGET_EFAULT
;
12523 ploff_in
= &loff_in
;
12526 if (get_user_u64(loff_out
, arg4
)) {
12527 return -TARGET_EFAULT
;
12529 ploff_out
= &loff_out
;
12531 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12533 if (put_user_u64(loff_in
, arg2
)) {
12534 return -TARGET_EFAULT
;
12538 if (put_user_u64(loff_out
, arg4
)) {
12539 return -TARGET_EFAULT
;
12545 #ifdef TARGET_NR_vmsplice
12546 case TARGET_NR_vmsplice
:
12548 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12550 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12551 unlock_iovec(vec
, arg2
, arg3
, 0);
12553 ret
= -host_to_target_errno(errno
);
12558 #endif /* CONFIG_SPLICE */
12559 #ifdef CONFIG_EVENTFD
12560 #if defined(TARGET_NR_eventfd)
12561 case TARGET_NR_eventfd
:
12562 ret
= get_errno(eventfd(arg1
, 0));
12564 fd_trans_register(ret
, &target_eventfd_trans
);
12568 #if defined(TARGET_NR_eventfd2)
12569 case TARGET_NR_eventfd2
:
12571 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12572 if (arg2
& TARGET_O_NONBLOCK
) {
12573 host_flags
|= O_NONBLOCK
;
12575 if (arg2
& TARGET_O_CLOEXEC
) {
12576 host_flags
|= O_CLOEXEC
;
12578 ret
= get_errno(eventfd(arg1
, host_flags
));
12580 fd_trans_register(ret
, &target_eventfd_trans
);
12585 #endif /* CONFIG_EVENTFD */
12586 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12587 case TARGET_NR_fallocate
:
12588 #if TARGET_ABI_BITS == 32
12589 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12590 target_offset64(arg5
, arg6
)));
12592 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12596 #if defined(CONFIG_SYNC_FILE_RANGE)
12597 #if defined(TARGET_NR_sync_file_range)
12598 case TARGET_NR_sync_file_range
:
12599 #if TARGET_ABI_BITS == 32
12600 #if defined(TARGET_MIPS)
12601 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12602 target_offset64(arg5
, arg6
), arg7
));
12604 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12605 target_offset64(arg4
, arg5
), arg6
));
12606 #endif /* !TARGET_MIPS */
12608 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12612 #if defined(TARGET_NR_sync_file_range2) || \
12613 defined(TARGET_NR_arm_sync_file_range)
12614 #if defined(TARGET_NR_sync_file_range2)
12615 case TARGET_NR_sync_file_range2
:
12617 #if defined(TARGET_NR_arm_sync_file_range)
12618 case TARGET_NR_arm_sync_file_range
:
12620 /* This is like sync_file_range but the arguments are reordered */
12621 #if TARGET_ABI_BITS == 32
12622 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12623 target_offset64(arg5
, arg6
), arg2
));
12625 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12630 #if defined(TARGET_NR_signalfd4)
12631 case TARGET_NR_signalfd4
:
12632 return do_signalfd4(arg1
, arg2
, arg4
);
12634 #if defined(TARGET_NR_signalfd)
12635 case TARGET_NR_signalfd
:
12636 return do_signalfd4(arg1
, arg2
, 0);
12638 #if defined(CONFIG_EPOLL)
12639 #if defined(TARGET_NR_epoll_create)
12640 case TARGET_NR_epoll_create
:
12641 return get_errno(epoll_create(arg1
));
12643 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12644 case TARGET_NR_epoll_create1
:
12645 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12647 #if defined(TARGET_NR_epoll_ctl)
12648 case TARGET_NR_epoll_ctl
:
12650 struct epoll_event ep
;
12651 struct epoll_event
*epp
= 0;
12653 if (arg2
!= EPOLL_CTL_DEL
) {
12654 struct target_epoll_event
*target_ep
;
12655 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12656 return -TARGET_EFAULT
;
12658 ep
.events
= tswap32(target_ep
->events
);
12660 * The epoll_data_t union is just opaque data to the kernel,
12661 * so we transfer all 64 bits across and need not worry what
12662 * actual data type it is.
12664 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12665 unlock_user_struct(target_ep
, arg4
, 0);
12668 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12669 * non-null pointer, even though this argument is ignored.
12674 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12678 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12679 #if defined(TARGET_NR_epoll_wait)
12680 case TARGET_NR_epoll_wait
:
12682 #if defined(TARGET_NR_epoll_pwait)
12683 case TARGET_NR_epoll_pwait
:
12686 struct target_epoll_event
*target_ep
;
12687 struct epoll_event
*ep
;
12689 int maxevents
= arg3
;
12690 int timeout
= arg4
;
12692 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12693 return -TARGET_EINVAL
;
12696 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12697 maxevents
* sizeof(struct target_epoll_event
), 1);
12699 return -TARGET_EFAULT
;
12702 ep
= g_try_new(struct epoll_event
, maxevents
);
12704 unlock_user(target_ep
, arg2
, 0);
12705 return -TARGET_ENOMEM
;
12709 #if defined(TARGET_NR_epoll_pwait)
12710 case TARGET_NR_epoll_pwait
:
12712 target_sigset_t
*target_set
;
12713 sigset_t _set
, *set
= &_set
;
12716 if (arg6
!= sizeof(target_sigset_t
)) {
12717 ret
= -TARGET_EINVAL
;
12721 target_set
= lock_user(VERIFY_READ
, arg5
,
12722 sizeof(target_sigset_t
), 1);
12724 ret
= -TARGET_EFAULT
;
12727 target_to_host_sigset(set
, target_set
);
12728 unlock_user(target_set
, arg5
, 0);
12733 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12734 set
, SIGSET_T_SIZE
));
12738 #if defined(TARGET_NR_epoll_wait)
12739 case TARGET_NR_epoll_wait
:
12740 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12745 ret
= -TARGET_ENOSYS
;
12747 if (!is_error(ret
)) {
12749 for (i
= 0; i
< ret
; i
++) {
12750 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12751 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12753 unlock_user(target_ep
, arg2
,
12754 ret
* sizeof(struct target_epoll_event
));
12756 unlock_user(target_ep
, arg2
, 0);
12763 #ifdef TARGET_NR_prlimit64
12764 case TARGET_NR_prlimit64
:
12766 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12767 struct target_rlimit64
*target_rnew
, *target_rold
;
12768 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12769 int resource
= target_to_host_resource(arg2
);
12771 if (arg3
&& (resource
!= RLIMIT_AS
&&
12772 resource
!= RLIMIT_DATA
&&
12773 resource
!= RLIMIT_STACK
)) {
12774 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12775 return -TARGET_EFAULT
;
12777 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12778 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12779 unlock_user_struct(target_rnew
, arg3
, 0);
12783 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12784 if (!is_error(ret
) && arg4
) {
12785 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12786 return -TARGET_EFAULT
;
12788 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12789 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12790 unlock_user_struct(target_rold
, arg4
, 1);
12795 #ifdef TARGET_NR_gethostname
12796 case TARGET_NR_gethostname
:
12798 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12800 ret
= get_errno(gethostname(name
, arg2
));
12801 unlock_user(name
, arg1
, arg2
);
12803 ret
= -TARGET_EFAULT
;
12808 #ifdef TARGET_NR_atomic_cmpxchg_32
12809 case TARGET_NR_atomic_cmpxchg_32
:
12811 /* should use start_exclusive from main.c */
12812 abi_ulong mem_value
;
12813 if (get_user_u32(mem_value
, arg6
)) {
12814 target_siginfo_t info
;
12815 info
.si_signo
= SIGSEGV
;
12817 info
.si_code
= TARGET_SEGV_MAPERR
;
12818 info
._sifields
._sigfault
._addr
= arg6
;
12819 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12820 QEMU_SI_FAULT
, &info
);
12824 if (mem_value
== arg2
)
12825 put_user_u32(arg1
, arg6
);
12829 #ifdef TARGET_NR_atomic_barrier
12830 case TARGET_NR_atomic_barrier
:
12831 /* Like the kernel implementation and the
12832 qemu arm barrier, no-op this? */
12836 #ifdef TARGET_NR_timer_create
12837 case TARGET_NR_timer_create
:
12839 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12841 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12844 int timer_index
= next_free_host_timer();
12846 if (timer_index
< 0) {
12847 ret
= -TARGET_EAGAIN
;
12849 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12852 phost_sevp
= &host_sevp
;
12853 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12859 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12863 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12864 return -TARGET_EFAULT
;
12872 #ifdef TARGET_NR_timer_settime
12873 case TARGET_NR_timer_settime
:
12875 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12876 * struct itimerspec * old_value */
12877 target_timer_t timerid
= get_timer_id(arg1
);
12881 } else if (arg3
== 0) {
12882 ret
= -TARGET_EINVAL
;
12884 timer_t htimer
= g_posix_timers
[timerid
];
12885 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12887 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12888 return -TARGET_EFAULT
;
12891 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12892 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12893 return -TARGET_EFAULT
;
12900 #ifdef TARGET_NR_timer_settime64
12901 case TARGET_NR_timer_settime64
:
12903 target_timer_t timerid
= get_timer_id(arg1
);
12907 } else if (arg3
== 0) {
12908 ret
= -TARGET_EINVAL
;
12910 timer_t htimer
= g_posix_timers
[timerid
];
12911 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12913 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12914 return -TARGET_EFAULT
;
12917 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12918 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12919 return -TARGET_EFAULT
;
12926 #ifdef TARGET_NR_timer_gettime
12927 case TARGET_NR_timer_gettime
:
12929 /* args: timer_t timerid, struct itimerspec *curr_value */
12930 target_timer_t timerid
= get_timer_id(arg1
);
12934 } else if (!arg2
) {
12935 ret
= -TARGET_EFAULT
;
12937 timer_t htimer
= g_posix_timers
[timerid
];
12938 struct itimerspec hspec
;
12939 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12941 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12942 ret
= -TARGET_EFAULT
;
12949 #ifdef TARGET_NR_timer_gettime64
12950 case TARGET_NR_timer_gettime64
:
12952 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12953 target_timer_t timerid
= get_timer_id(arg1
);
12957 } else if (!arg2
) {
12958 ret
= -TARGET_EFAULT
;
12960 timer_t htimer
= g_posix_timers
[timerid
];
12961 struct itimerspec hspec
;
12962 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12964 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12965 ret
= -TARGET_EFAULT
;
12972 #ifdef TARGET_NR_timer_getoverrun
12973 case TARGET_NR_timer_getoverrun
:
12975 /* args: timer_t timerid */
12976 target_timer_t timerid
= get_timer_id(arg1
);
12981 timer_t htimer
= g_posix_timers
[timerid
];
12982 ret
= get_errno(timer_getoverrun(htimer
));
12988 #ifdef TARGET_NR_timer_delete
12989 case TARGET_NR_timer_delete
:
12991 /* args: timer_t timerid */
12992 target_timer_t timerid
= get_timer_id(arg1
);
12997 timer_t htimer
= g_posix_timers
[timerid
];
12998 ret
= get_errno(timer_delete(htimer
));
12999 g_posix_timers
[timerid
] = 0;
13005 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13006 case TARGET_NR_timerfd_create
:
13007 return get_errno(timerfd_create(arg1
,
13008 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13011 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13012 case TARGET_NR_timerfd_gettime
:
13014 struct itimerspec its_curr
;
13016 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13018 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13019 return -TARGET_EFAULT
;
13025 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13026 case TARGET_NR_timerfd_gettime64
:
13028 struct itimerspec its_curr
;
13030 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13032 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13033 return -TARGET_EFAULT
;
13039 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13040 case TARGET_NR_timerfd_settime
:
13042 struct itimerspec its_new
, its_old
, *p_new
;
13045 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13046 return -TARGET_EFAULT
;
13053 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13055 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13056 return -TARGET_EFAULT
;
13062 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13063 case TARGET_NR_timerfd_settime64
:
13065 struct itimerspec its_new
, its_old
, *p_new
;
13068 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13069 return -TARGET_EFAULT
;
13076 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13078 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13079 return -TARGET_EFAULT
;
13085 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13086 case TARGET_NR_ioprio_get
:
13087 return get_errno(ioprio_get(arg1
, arg2
));
13090 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13091 case TARGET_NR_ioprio_set
:
13092 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13095 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13096 case TARGET_NR_setns
:
13097 return get_errno(setns(arg1
, arg2
));
13099 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13100 case TARGET_NR_unshare
:
13101 return get_errno(unshare(arg1
));
13103 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13104 case TARGET_NR_kcmp
:
13105 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13107 #ifdef TARGET_NR_swapcontext
13108 case TARGET_NR_swapcontext
:
13109 /* PowerPC specific. */
13110 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13112 #ifdef TARGET_NR_memfd_create
13113 case TARGET_NR_memfd_create
:
13114 p
= lock_user_string(arg1
);
13116 return -TARGET_EFAULT
;
13118 ret
= get_errno(memfd_create(p
, arg2
));
13119 fd_trans_unregister(ret
);
13120 unlock_user(p
, arg1
, 0);
13123 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13124 case TARGET_NR_membarrier
:
13125 return get_errno(membarrier(arg1
, arg2
));
13128 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13129 case TARGET_NR_copy_file_range
:
13131 loff_t inoff
, outoff
;
13132 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13135 if (get_user_u64(inoff
, arg2
)) {
13136 return -TARGET_EFAULT
;
13141 if (get_user_u64(outoff
, arg4
)) {
13142 return -TARGET_EFAULT
;
13146 /* Do not sign-extend the count parameter. */
13147 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13148 (abi_ulong
)arg5
, arg6
));
13149 if (!is_error(ret
) && ret
> 0) {
13151 if (put_user_u64(inoff
, arg2
)) {
13152 return -TARGET_EFAULT
;
13156 if (put_user_u64(outoff
, arg4
)) {
13157 return -TARGET_EFAULT
;
13165 #if defined(TARGET_NR_pivot_root)
13166 case TARGET_NR_pivot_root
:
13169 p
= lock_user_string(arg1
); /* new_root */
13170 p2
= lock_user_string(arg2
); /* put_old */
13172 ret
= -TARGET_EFAULT
;
13174 ret
= get_errno(pivot_root(p
, p2
));
13176 unlock_user(p2
, arg2
, 0);
13177 unlock_user(p
, arg1
, 0);
13183 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13184 return -TARGET_ENOSYS
;
13189 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13190 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13191 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13194 CPUState
*cpu
= env_cpu(cpu_env
);
13197 #ifdef DEBUG_ERESTARTSYS
13198 /* Debug-only code for exercising the syscall-restart code paths
13199 * in the per-architecture cpu main loops: restart every syscall
13200 * the guest makes once before letting it through.
13206 return -QEMU_ERESTARTSYS
;
13211 record_syscall_start(cpu
, num
, arg1
,
13212 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13214 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13215 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13218 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13219 arg5
, arg6
, arg7
, arg8
);
13221 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13222 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13223 arg3
, arg4
, arg5
, arg6
);
13226 record_syscall_return(cpu
, num
, ret
);