4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "qapi/error.h"
140 #include "fd-trans.h"
144 #define CLONE_IO 0x80000000 /* Clone io context */
147 /* We can't directly call the host clone syscall, because this will
148 * badly confuse libc (breaking mutexes, for example). So we must
149 * divide clone flags into:
150 * * flag combinations that look like pthread_create()
151 * * flag combinations that look like fork()
152 * * flags we can implement within QEMU itself
153 * * flags we can't support and will return an error for
155 /* For thread creation, all these flags must be present; for
156 * fork, none must be present.
158 #define CLONE_THREAD_FLAGS \
159 (CLONE_VM | CLONE_FS | CLONE_FILES | \
160 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 /* These flags are ignored:
163 * CLONE_DETACHED is now ignored by the kernel;
164 * CLONE_IO is just an optimisation hint to the I/O scheduler
166 #define CLONE_IGNORED_FLAGS \
167 (CLONE_DETACHED | CLONE_IO)
169 /* Flags for fork which we can implement within QEMU itself */
170 #define CLONE_OPTIONAL_FORK_FLAGS \
171 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
172 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 /* Flags for thread creation which we can implement within QEMU itself */
175 #define CLONE_OPTIONAL_THREAD_FLAGS \
176 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
177 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 #define CLONE_INVALID_FORK_FLAGS \
180 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 #define CLONE_INVALID_THREAD_FLAGS \
183 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
184 CLONE_IGNORED_FLAGS))
186 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
187 * have almost all been allocated. We cannot support any of
188 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
189 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
190 * The checks against the invalid thread masks above will catch these.
191 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
195 * once. This exercises the codepaths for restart.
197 //#define DEBUG_ERESTARTSYS
199 //#include <linux/msdos_fs.h>
200 #define VFAT_IOCTL_READDIR_BOTH \
201 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
202 #define VFAT_IOCTL_READDIR_SHORT \
203 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
213 #define _syscall0(type,name) \
214 static type name (void) \
216 return syscall(__NR_##name); \
219 #define _syscall1(type,name,type1,arg1) \
220 static type name (type1 arg1) \
222 return syscall(__NR_##name, arg1); \
225 #define _syscall2(type,name,type1,arg1,type2,arg2) \
226 static type name (type1 arg1,type2 arg2) \
228 return syscall(__NR_##name, arg1, arg2); \
231 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
232 static type name (type1 arg1,type2 arg2,type3 arg3) \
234 return syscall(__NR_##name, arg1, arg2, arg3); \
237 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
240 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
243 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
247 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
251 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
252 type5,arg5,type6,arg6) \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
256 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
260 #define __NR_sys_uname __NR_uname
261 #define __NR_sys_getcwd1 __NR_getcwd
262 #define __NR_sys_getdents __NR_getdents
263 #define __NR_sys_getdents64 __NR_getdents64
264 #define __NR_sys_getpriority __NR_getpriority
265 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
266 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
267 #define __NR_sys_syslog __NR_syslog
268 #if defined(__NR_futex)
269 # define __NR_sys_futex __NR_futex
271 #if defined(__NR_futex_time64)
272 # define __NR_sys_futex_time64 __NR_futex_time64
274 #define __NR_sys_inotify_init __NR_inotify_init
275 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
276 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
277 #define __NR_sys_statx __NR_statx
279 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
280 #define __NR__llseek __NR_lseek
283 /* Newer kernel ports have llseek() instead of _llseek() */
284 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
285 #define TARGET_NR__llseek TARGET_NR_llseek
288 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
289 #ifndef TARGET_O_NONBLOCK_MASK
290 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
293 #define __NR_sys_gettid __NR_gettid
294 _syscall0(int, sys_gettid
)
296 /* For the 64-bit guest on 32-bit host case we must emulate
297 * getdents using getdents64, because otherwise the host
298 * might hand us back more dirent records than we can fit
299 * into the guest buffer after structure format conversion.
300 * Otherwise we emulate getdents with getdents if the host has it.
302 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
303 #define EMULATE_GETDENTS_WITH_GETDENTS
306 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
307 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
309 #if (defined(TARGET_NR_getdents) && \
310 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
311 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
312 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
314 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
315 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
316 loff_t
*, res
, uint
, wh
);
318 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
319 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
321 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
322 #ifdef __NR_exit_group
323 _syscall1(int,exit_group
,int,error_code
)
325 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
326 _syscall1(int,set_tid_address
,int *,tidptr
)
328 #if defined(__NR_futex)
329 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
330 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
332 #if defined(__NR_futex_time64)
333 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
334 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
336 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
337 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
338 unsigned long *, user_mask_ptr
);
339 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
340 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
341 unsigned long *, user_mask_ptr
);
342 #define __NR_sys_getcpu __NR_getcpu
343 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
344 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
346 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
347 struct __user_cap_data_struct
*, data
);
348 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
349 struct __user_cap_data_struct
*, data
);
350 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
351 _syscall2(int, ioprio_get
, int, which
, int, who
)
353 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
354 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
356 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
357 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
360 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
361 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
362 unsigned long, idx1
, unsigned long, idx2
)
366 * It is assumed that struct statx is architecture independent.
368 #if defined(TARGET_NR_statx) && defined(__NR_statx)
369 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
370 unsigned int, mask
, struct target_statx
*, statxbuf
)
372 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
373 _syscall2(int, membarrier
, int, cmd
, int, flags
)
376 static const bitmask_transtbl fcntl_flags_tbl
[] = {
377 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
378 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
379 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
380 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
381 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
382 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
383 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
384 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
385 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
386 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
387 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
388 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
389 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
390 #if defined(O_DIRECT)
391 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
393 #if defined(O_NOATIME)
394 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
396 #if defined(O_CLOEXEC)
397 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
400 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
402 #if defined(O_TMPFILE)
403 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
405 /* Don't terminate the list prematurely on 64-bit host+guest. */
406 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
407 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
412 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
414 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
415 #if defined(__NR_utimensat)
416 #define __NR_sys_utimensat __NR_utimensat
417 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
418 const struct timespec
*,tsp
,int,flags
)
420 static int sys_utimensat(int dirfd
, const char *pathname
,
421 const struct timespec times
[2], int flags
)
427 #endif /* TARGET_NR_utimensat */
429 #ifdef TARGET_NR_renameat2
430 #if defined(__NR_renameat2)
431 #define __NR_sys_renameat2 __NR_renameat2
432 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
433 const char *, new, unsigned int, flags
)
435 static int sys_renameat2(int oldfd
, const char *old
,
436 int newfd
, const char *new, int flags
)
439 return renameat(oldfd
, old
, newfd
, new);
445 #endif /* TARGET_NR_renameat2 */
447 #ifdef CONFIG_INOTIFY
448 #include <sys/inotify.h>
450 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
451 static int sys_inotify_init(void)
453 return (inotify_init());
456 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
457 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
459 return (inotify_add_watch(fd
, pathname
, mask
));
462 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
463 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
465 return (inotify_rm_watch(fd
, wd
));
468 #ifdef CONFIG_INOTIFY1
469 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
470 static int sys_inotify_init1(int flags
)
472 return (inotify_init1(flags
));
477 /* Userspace can usually survive runtime without inotify */
478 #undef TARGET_NR_inotify_init
479 #undef TARGET_NR_inotify_init1
480 #undef TARGET_NR_inotify_add_watch
481 #undef TARGET_NR_inotify_rm_watch
482 #endif /* CONFIG_INOTIFY */
484 #if defined(TARGET_NR_prlimit64)
485 #ifndef __NR_prlimit64
486 # define __NR_prlimit64 -1
488 #define __NR_sys_prlimit64 __NR_prlimit64
489 /* The glibc rlimit structure may not be that used by the underlying syscall */
490 struct host_rlimit64
{
494 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
495 const struct host_rlimit64
*, new_limit
,
496 struct host_rlimit64
*, old_limit
)
500 #if defined(TARGET_NR_timer_create)
501 /* Maximum of 32 active POSIX timers allowed at any one time. */
502 static timer_t g_posix_timers
[32] = { 0, } ;
504 static inline int next_free_host_timer(void)
507 /* FIXME: Does finding the next free slot require a lock? */
508 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
509 if (g_posix_timers
[k
] == 0) {
510 g_posix_timers
[k
] = (timer_t
) 1;
518 static inline int host_to_target_errno(int host_errno
)
520 switch (host_errno
) {
521 #define E(X) case X: return TARGET_##X;
522 #include "errnos.c.inc"
529 static inline int target_to_host_errno(int target_errno
)
531 switch (target_errno
) {
532 #define E(X) case TARGET_##X: return X;
533 #include "errnos.c.inc"
540 static inline abi_long
get_errno(abi_long ret
)
543 return -host_to_target_errno(errno
);
548 const char *target_strerror(int err
)
550 if (err
== TARGET_ERESTARTSYS
) {
551 return "To be restarted";
553 if (err
== TARGET_QEMU_ESIGRETURN
) {
554 return "Successful exit from sigreturn";
557 return strerror(target_to_host_errno(err
));
560 #define safe_syscall0(type, name) \
561 static type safe_##name(void) \
563 return safe_syscall(__NR_##name); \
566 #define safe_syscall1(type, name, type1, arg1) \
567 static type safe_##name(type1 arg1) \
569 return safe_syscall(__NR_##name, arg1); \
572 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
573 static type safe_##name(type1 arg1, type2 arg2) \
575 return safe_syscall(__NR_##name, arg1, arg2); \
578 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
579 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
581 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
584 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
586 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
588 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
591 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
592 type4, arg4, type5, arg5) \
593 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
596 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
599 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
600 type4, arg4, type5, arg5, type6, arg6) \
601 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
602 type5 arg5, type6 arg6) \
604 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
607 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
608 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
609 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
610 int, flags
, mode_t
, mode
)
611 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
612 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
613 struct rusage
*, rusage
)
615 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
616 int, options
, struct rusage
*, rusage
)
617 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
618 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
619 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
620 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
621 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
623 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
624 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
625 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
628 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
629 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
631 #if defined(__NR_futex)
632 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
633 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
635 #if defined(__NR_futex_time64)
636 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
637 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
639 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
640 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
641 safe_syscall2(int, tkill
, int, tid
, int, sig
)
642 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
643 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
644 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
645 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
646 unsigned long, pos_l
, unsigned long, pos_h
)
647 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
648 unsigned long, pos_l
, unsigned long, pos_h
)
649 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
651 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
652 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
653 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
654 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
655 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
656 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
657 safe_syscall2(int, flock
, int, fd
, int, operation
)
658 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
659 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
660 const struct timespec
*, uts
, size_t, sigsetsize
)
662 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
664 #if defined(TARGET_NR_nanosleep)
665 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
666 struct timespec
*, rem
)
668 #if defined(TARGET_NR_clock_nanosleep) || \
669 defined(TARGET_NR_clock_nanosleep_time64)
670 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
671 const struct timespec
*, req
, struct timespec
*, rem
)
675 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
678 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
679 void *, ptr
, long, fifth
)
683 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
687 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
688 long, msgtype
, int, flags
)
690 #ifdef __NR_semtimedop
691 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
692 unsigned, nsops
, const struct timespec
*, timeout
)
694 #if defined(TARGET_NR_mq_timedsend) || \
695 defined(TARGET_NR_mq_timedsend_time64)
696 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
697 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
699 #if defined(TARGET_NR_mq_timedreceive) || \
700 defined(TARGET_NR_mq_timedreceive_time64)
701 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
702 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
704 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
705 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
706 int, outfd
, loff_t
*, poutoff
, size_t, length
,
710 /* We do ioctl like this rather than via safe_syscall3 to preserve the
711 * "third argument might be integer or pointer or not present" behaviour of
714 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
715 /* Similarly for fcntl. Note that callers must always:
716 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
717 * use the flock64 struct rather than unsuffixed flock
718 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
721 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
723 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
726 static inline int host_to_target_sock_type(int host_type
)
730 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
732 target_type
= TARGET_SOCK_DGRAM
;
735 target_type
= TARGET_SOCK_STREAM
;
738 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
742 #if defined(SOCK_CLOEXEC)
743 if (host_type
& SOCK_CLOEXEC
) {
744 target_type
|= TARGET_SOCK_CLOEXEC
;
748 #if defined(SOCK_NONBLOCK)
749 if (host_type
& SOCK_NONBLOCK
) {
750 target_type
|= TARGET_SOCK_NONBLOCK
;
757 static abi_ulong target_brk
;
758 static abi_ulong target_original_brk
;
759 static abi_ulong brk_page
;
761 void target_set_brk(abi_ulong new_brk
)
763 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
764 brk_page
= HOST_PAGE_ALIGN(target_brk
);
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
770 /* do_brk() must return target values and target errnos. */
771 abi_long
do_brk(abi_ulong new_brk
)
773 abi_long mapped_addr
;
774 abi_ulong new_alloc_size
;
776 /* brk pointers are always untagged */
778 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
781 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
784 if (new_brk
< target_original_brk
) {
785 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
790 /* If the new brk is less than the highest page reserved to the
791 * target heap allocation, set it and we're almost done... */
792 if (new_brk
<= brk_page
) {
793 /* Heap contents are initialized to zero, as for anonymous
795 if (new_brk
> target_brk
) {
796 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
798 target_brk
= new_brk
;
799 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
803 /* We need to allocate more memory after the brk... Note that
804 * we don't use MAP_FIXED because that will map over the top of
805 * any existing mapping (like the one with the host libc or qemu
806 * itself); instead we treat "mapped but at wrong address" as
807 * a failure and unmap again.
809 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
810 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
811 PROT_READ
|PROT_WRITE
,
812 MAP_ANON
|MAP_PRIVATE
, 0, 0));
814 if (mapped_addr
== brk_page
) {
815 /* Heap contents are initialized to zero, as for anonymous
816 * mapped pages. Technically the new pages are already
817 * initialized to zero since they *are* anonymous mapped
818 * pages, however we have to take care with the contents that
819 * come from the remaining part of the previous page: it may
820 * contains garbage data due to a previous heap usage (grown
822 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
824 target_brk
= new_brk
;
825 brk_page
= HOST_PAGE_ALIGN(target_brk
);
826 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
829 } else if (mapped_addr
!= -1) {
830 /* Mapped but at wrong address, meaning there wasn't actually
831 * enough space for this brk.
833 target_munmap(mapped_addr
, new_alloc_size
);
835 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
838 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
841 #if defined(TARGET_ALPHA)
842 /* We (partially) emulate OSF/1 on Alpha, which requires we
843 return a proper errno, not an unchanged brk value. */
844 return -TARGET_ENOMEM
;
846 /* For everything else, return the previous break. */
850 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
851 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
852 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
853 abi_ulong target_fds_addr
,
857 abi_ulong b
, *target_fds
;
859 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
860 if (!(target_fds
= lock_user(VERIFY_READ
,
862 sizeof(abi_ulong
) * nw
,
864 return -TARGET_EFAULT
;
868 for (i
= 0; i
< nw
; i
++) {
869 /* grab the abi_ulong */
870 __get_user(b
, &target_fds
[i
]);
871 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
872 /* check the bit inside the abi_ulong */
879 unlock_user(target_fds
, target_fds_addr
, 0);
884 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
885 abi_ulong target_fds_addr
,
888 if (target_fds_addr
) {
889 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
890 return -TARGET_EFAULT
;
898 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
904 abi_ulong
*target_fds
;
906 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
907 if (!(target_fds
= lock_user(VERIFY_WRITE
,
909 sizeof(abi_ulong
) * nw
,
911 return -TARGET_EFAULT
;
914 for (i
= 0; i
< nw
; i
++) {
916 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
917 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
920 __put_user(v
, &target_fds
[i
]);
923 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
929 #if defined(__alpha__)
935 static inline abi_long
host_to_target_clock_t(long ticks
)
937 #if HOST_HZ == TARGET_HZ
940 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
944 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
945 const struct rusage
*rusage
)
947 struct target_rusage
*target_rusage
;
949 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
950 return -TARGET_EFAULT
;
951 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
952 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
953 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
954 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
955 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
956 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
957 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
958 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
959 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
960 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
961 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
962 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
963 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
964 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
965 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
966 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
967 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
968 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
969 unlock_user_struct(target_rusage
, target_addr
, 1);
974 #ifdef TARGET_NR_setrlimit
975 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
977 abi_ulong target_rlim_swap
;
980 target_rlim_swap
= tswapal(target_rlim
);
981 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
982 return RLIM_INFINITY
;
984 result
= target_rlim_swap
;
985 if (target_rlim_swap
!= (rlim_t
)result
)
986 return RLIM_INFINITY
;
992 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
993 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
995 abi_ulong target_rlim_swap
;
998 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
999 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1001 target_rlim_swap
= rlim
;
1002 result
= tswapal(target_rlim_swap
);
1008 static inline int target_to_host_resource(int code
)
1011 case TARGET_RLIMIT_AS
:
1013 case TARGET_RLIMIT_CORE
:
1015 case TARGET_RLIMIT_CPU
:
1017 case TARGET_RLIMIT_DATA
:
1019 case TARGET_RLIMIT_FSIZE
:
1020 return RLIMIT_FSIZE
;
1021 case TARGET_RLIMIT_LOCKS
:
1022 return RLIMIT_LOCKS
;
1023 case TARGET_RLIMIT_MEMLOCK
:
1024 return RLIMIT_MEMLOCK
;
1025 case TARGET_RLIMIT_MSGQUEUE
:
1026 return RLIMIT_MSGQUEUE
;
1027 case TARGET_RLIMIT_NICE
:
1029 case TARGET_RLIMIT_NOFILE
:
1030 return RLIMIT_NOFILE
;
1031 case TARGET_RLIMIT_NPROC
:
1032 return RLIMIT_NPROC
;
1033 case TARGET_RLIMIT_RSS
:
1035 case TARGET_RLIMIT_RTPRIO
:
1036 return RLIMIT_RTPRIO
;
1037 case TARGET_RLIMIT_SIGPENDING
:
1038 return RLIMIT_SIGPENDING
;
1039 case TARGET_RLIMIT_STACK
:
1040 return RLIMIT_STACK
;
1046 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1047 abi_ulong target_tv_addr
)
1049 struct target_timeval
*target_tv
;
1051 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1052 return -TARGET_EFAULT
;
1055 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1056 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1058 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1063 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1064 const struct timeval
*tv
)
1066 struct target_timeval
*target_tv
;
1068 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1069 return -TARGET_EFAULT
;
1072 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1073 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1075 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1080 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1081 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1082 abi_ulong target_tv_addr
)
1084 struct target__kernel_sock_timeval
*target_tv
;
1086 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1087 return -TARGET_EFAULT
;
1090 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1091 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1093 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1099 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1100 const struct timeval
*tv
)
1102 struct target__kernel_sock_timeval
*target_tv
;
1104 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1105 return -TARGET_EFAULT
;
1108 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1109 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1111 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1116 #if defined(TARGET_NR_futex) || \
1117 defined(TARGET_NR_rt_sigtimedwait) || \
1118 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1119 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1120 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1121 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1122 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1123 defined(TARGET_NR_timer_settime) || \
1124 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1125 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1126 abi_ulong target_addr
)
1128 struct target_timespec
*target_ts
;
1130 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1131 return -TARGET_EFAULT
;
1133 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1134 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1135 unlock_user_struct(target_ts
, target_addr
, 0);
1140 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1141 defined(TARGET_NR_timer_settime64) || \
1142 defined(TARGET_NR_mq_timedsend_time64) || \
1143 defined(TARGET_NR_mq_timedreceive_time64) || \
1144 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1145 defined(TARGET_NR_clock_nanosleep_time64) || \
1146 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1147 defined(TARGET_NR_utimensat) || \
1148 defined(TARGET_NR_utimensat_time64) || \
1149 defined(TARGET_NR_semtimedop_time64) || \
1150 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1151 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1152 abi_ulong target_addr
)
1154 struct target__kernel_timespec
*target_ts
;
1156 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1157 return -TARGET_EFAULT
;
1159 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1160 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1161 /* in 32bit mode, this drops the padding */
1162 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1163 unlock_user_struct(target_ts
, target_addr
, 0);
1168 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1169 struct timespec
*host_ts
)
1171 struct target_timespec
*target_ts
;
1173 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1174 return -TARGET_EFAULT
;
1176 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1177 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1178 unlock_user_struct(target_ts
, target_addr
, 1);
1182 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1183 struct timespec
*host_ts
)
1185 struct target__kernel_timespec
*target_ts
;
1187 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1188 return -TARGET_EFAULT
;
1190 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1191 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1192 unlock_user_struct(target_ts
, target_addr
, 1);
1196 #if defined(TARGET_NR_gettimeofday)
1197 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1198 struct timezone
*tz
)
1200 struct target_timezone
*target_tz
;
1202 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1203 return -TARGET_EFAULT
;
1206 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1207 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1209 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1215 #if defined(TARGET_NR_settimeofday)
1216 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1217 abi_ulong target_tz_addr
)
1219 struct target_timezone
*target_tz
;
1221 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1222 return -TARGET_EFAULT
;
1225 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1226 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1228 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1234 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1237 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1238 abi_ulong target_mq_attr_addr
)
1240 struct target_mq_attr
*target_mq_attr
;
1242 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1243 target_mq_attr_addr
, 1))
1244 return -TARGET_EFAULT
;
1246 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1247 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1248 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1249 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1251 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1256 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1257 const struct mq_attr
*attr
)
1259 struct target_mq_attr
*target_mq_attr
;
1261 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1262 target_mq_attr_addr
, 0))
1263 return -TARGET_EFAULT
;
1265 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1266 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1267 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1268 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1270 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1276 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1277 /* do_select() must return target values and target errnos. */
1278 static abi_long
do_select(int n
,
1279 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1280 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1282 fd_set rfds
, wfds
, efds
;
1283 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1285 struct timespec ts
, *ts_ptr
;
1288 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1292 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1296 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1301 if (target_tv_addr
) {
1302 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1303 return -TARGET_EFAULT
;
1304 ts
.tv_sec
= tv
.tv_sec
;
1305 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1311 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1314 if (!is_error(ret
)) {
1315 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1316 return -TARGET_EFAULT
;
1317 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1318 return -TARGET_EFAULT
;
1319 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1320 return -TARGET_EFAULT
;
1322 if (target_tv_addr
) {
1323 tv
.tv_sec
= ts
.tv_sec
;
1324 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1325 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1326 return -TARGET_EFAULT
;
1334 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1335 static abi_long
do_old_select(abi_ulong arg1
)
1337 struct target_sel_arg_struct
*sel
;
1338 abi_ulong inp
, outp
, exp
, tvp
;
1341 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1342 return -TARGET_EFAULT
;
1345 nsel
= tswapal(sel
->n
);
1346 inp
= tswapal(sel
->inp
);
1347 outp
= tswapal(sel
->outp
);
1348 exp
= tswapal(sel
->exp
);
1349 tvp
= tswapal(sel
->tvp
);
1351 unlock_user_struct(sel
, arg1
, 0);
1353 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1358 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1359 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1360 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1363 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1364 fd_set rfds
, wfds
, efds
;
1365 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1366 struct timespec ts
, *ts_ptr
;
1370 * The 6th arg is actually two args smashed together,
1371 * so we cannot use the C library.
1379 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1380 target_sigset_t
*target_sigset
;
1388 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1392 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1396 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1402 * This takes a timespec, and not a timeval, so we cannot
1403 * use the do_select() helper ...
1407 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1408 return -TARGET_EFAULT
;
1411 if (target_to_host_timespec(&ts
, ts_addr
)) {
1412 return -TARGET_EFAULT
;
1420 /* Extract the two packed args for the sigset */
1423 sig
.size
= SIGSET_T_SIZE
;
1425 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1427 return -TARGET_EFAULT
;
1429 arg_sigset
= tswapal(arg7
[0]);
1430 arg_sigsize
= tswapal(arg7
[1]);
1431 unlock_user(arg7
, arg6
, 0);
1435 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1436 /* Like the kernel, we enforce correct size sigsets */
1437 return -TARGET_EINVAL
;
1439 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1440 sizeof(*target_sigset
), 1);
1441 if (!target_sigset
) {
1442 return -TARGET_EFAULT
;
1444 target_to_host_sigset(&set
, target_sigset
);
1445 unlock_user(target_sigset
, arg_sigset
, 0);
1453 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1456 if (!is_error(ret
)) {
1457 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1458 return -TARGET_EFAULT
;
1460 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1461 return -TARGET_EFAULT
;
1463 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1464 return -TARGET_EFAULT
;
1467 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1468 return -TARGET_EFAULT
;
1471 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1472 return -TARGET_EFAULT
;
1480 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1481 defined(TARGET_NR_ppoll_time64)
1482 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1483 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1485 struct target_pollfd
*target_pfd
;
1486 unsigned int nfds
= arg2
;
1494 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1495 return -TARGET_EINVAL
;
1497 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1498 sizeof(struct target_pollfd
) * nfds
, 1);
1500 return -TARGET_EFAULT
;
1503 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1504 for (i
= 0; i
< nfds
; i
++) {
1505 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1506 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1510 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1511 target_sigset_t
*target_set
;
1512 sigset_t _set
, *set
= &_set
;
1516 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1517 unlock_user(target_pfd
, arg1
, 0);
1518 return -TARGET_EFAULT
;
1521 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1522 unlock_user(target_pfd
, arg1
, 0);
1523 return -TARGET_EFAULT
;
1531 if (arg5
!= sizeof(target_sigset_t
)) {
1532 unlock_user(target_pfd
, arg1
, 0);
1533 return -TARGET_EINVAL
;
1536 target_set
= lock_user(VERIFY_READ
, arg4
,
1537 sizeof(target_sigset_t
), 1);
1539 unlock_user(target_pfd
, arg1
, 0);
1540 return -TARGET_EFAULT
;
1542 target_to_host_sigset(set
, target_set
);
1547 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1548 set
, SIGSET_T_SIZE
));
1550 if (!is_error(ret
) && arg3
) {
1552 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1553 return -TARGET_EFAULT
;
1556 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1557 return -TARGET_EFAULT
;
1562 unlock_user(target_set
, arg4
, 0);
1565 struct timespec ts
, *pts
;
1568 /* Convert ms to secs, ns */
1569 ts
.tv_sec
= arg3
/ 1000;
1570 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1573 /* -ve poll() timeout means "infinite" */
1576 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1579 if (!is_error(ret
)) {
1580 for (i
= 0; i
< nfds
; i
++) {
1581 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1584 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1589 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1592 return pipe2(host_pipe
, flags
);
1598 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1599 int flags
, int is_pipe2
)
1603 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1606 return get_errno(ret
);
1608 /* Several targets have special calling conventions for the original
1609 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1611 #if defined(TARGET_ALPHA)
1612 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1613 return host_pipe
[0];
1614 #elif defined(TARGET_MIPS)
1615 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1616 return host_pipe
[0];
1617 #elif defined(TARGET_SH4)
1618 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1619 return host_pipe
[0];
1620 #elif defined(TARGET_SPARC)
1621 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1622 return host_pipe
[0];
1626 if (put_user_s32(host_pipe
[0], pipedes
)
1627 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1628 return -TARGET_EFAULT
;
1629 return get_errno(ret
);
1632 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1633 abi_ulong target_addr
,
1636 struct target_ip_mreqn
*target_smreqn
;
1638 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1640 return -TARGET_EFAULT
;
1641 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1642 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1643 if (len
== sizeof(struct target_ip_mreqn
))
1644 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1645 unlock_user(target_smreqn
, target_addr
, 0);
1650 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1651 abi_ulong target_addr
,
1654 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1655 sa_family_t sa_family
;
1656 struct target_sockaddr
*target_saddr
;
1658 if (fd_trans_target_to_host_addr(fd
)) {
1659 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1662 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1664 return -TARGET_EFAULT
;
1666 sa_family
= tswap16(target_saddr
->sa_family
);
1668 /* Oops. The caller might send a incomplete sun_path; sun_path
1669 * must be terminated by \0 (see the manual page), but
1670 * unfortunately it is quite common to specify sockaddr_un
1671 * length as "strlen(x->sun_path)" while it should be
1672 * "strlen(...) + 1". We'll fix that here if needed.
1673 * Linux kernel has a similar feature.
1676 if (sa_family
== AF_UNIX
) {
1677 if (len
< unix_maxlen
&& len
> 0) {
1678 char *cp
= (char*)target_saddr
;
1680 if ( cp
[len
-1] && !cp
[len
] )
1683 if (len
> unix_maxlen
)
1687 memcpy(addr
, target_saddr
, len
);
1688 addr
->sa_family
= sa_family
;
1689 if (sa_family
== AF_NETLINK
) {
1690 struct sockaddr_nl
*nladdr
;
1692 nladdr
= (struct sockaddr_nl
*)addr
;
1693 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1694 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1695 } else if (sa_family
== AF_PACKET
) {
1696 struct target_sockaddr_ll
*lladdr
;
1698 lladdr
= (struct target_sockaddr_ll
*)addr
;
1699 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1700 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1702 unlock_user(target_saddr
, target_addr
, 0);
1707 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1708 struct sockaddr
*addr
,
1711 struct target_sockaddr
*target_saddr
;
1718 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1720 return -TARGET_EFAULT
;
1721 memcpy(target_saddr
, addr
, len
);
1722 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1723 sizeof(target_saddr
->sa_family
)) {
1724 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1726 if (addr
->sa_family
== AF_NETLINK
&&
1727 len
>= sizeof(struct target_sockaddr_nl
)) {
1728 struct target_sockaddr_nl
*target_nl
=
1729 (struct target_sockaddr_nl
*)target_saddr
;
1730 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1731 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1732 } else if (addr
->sa_family
== AF_PACKET
) {
1733 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1734 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1735 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1736 } else if (addr
->sa_family
== AF_INET6
&&
1737 len
>= sizeof(struct target_sockaddr_in6
)) {
1738 struct target_sockaddr_in6
*target_in6
=
1739 (struct target_sockaddr_in6
*)target_saddr
;
1740 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1742 unlock_user(target_saddr
, target_addr
, len
);
1747 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1748 struct target_msghdr
*target_msgh
)
1750 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1751 abi_long msg_controllen
;
1752 abi_ulong target_cmsg_addr
;
1753 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1754 socklen_t space
= 0;
1756 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1757 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1759 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1760 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1761 target_cmsg_start
= target_cmsg
;
1763 return -TARGET_EFAULT
;
1765 while (cmsg
&& target_cmsg
) {
1766 void *data
= CMSG_DATA(cmsg
);
1767 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1769 int len
= tswapal(target_cmsg
->cmsg_len
)
1770 - sizeof(struct target_cmsghdr
);
1772 space
+= CMSG_SPACE(len
);
1773 if (space
> msgh
->msg_controllen
) {
1774 space
-= CMSG_SPACE(len
);
1775 /* This is a QEMU bug, since we allocated the payload
1776 * area ourselves (unlike overflow in host-to-target
1777 * conversion, which is just the guest giving us a buffer
1778 * that's too small). It can't happen for the payload types
1779 * we currently support; if it becomes an issue in future
1780 * we would need to improve our allocation strategy to
1781 * something more intelligent than "twice the size of the
1782 * target buffer we're reading from".
1784 qemu_log_mask(LOG_UNIMP
,
1785 ("Unsupported ancillary data %d/%d: "
1786 "unhandled msg size\n"),
1787 tswap32(target_cmsg
->cmsg_level
),
1788 tswap32(target_cmsg
->cmsg_type
));
1792 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1793 cmsg
->cmsg_level
= SOL_SOCKET
;
1795 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1797 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1798 cmsg
->cmsg_len
= CMSG_LEN(len
);
1800 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1801 int *fd
= (int *)data
;
1802 int *target_fd
= (int *)target_data
;
1803 int i
, numfds
= len
/ sizeof(int);
1805 for (i
= 0; i
< numfds
; i
++) {
1806 __get_user(fd
[i
], target_fd
+ i
);
1808 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1809 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1810 struct ucred
*cred
= (struct ucred
*)data
;
1811 struct target_ucred
*target_cred
=
1812 (struct target_ucred
*)target_data
;
1814 __get_user(cred
->pid
, &target_cred
->pid
);
1815 __get_user(cred
->uid
, &target_cred
->uid
);
1816 __get_user(cred
->gid
, &target_cred
->gid
);
1818 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1819 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1820 memcpy(data
, target_data
, len
);
1823 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1824 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1827 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1829 msgh
->msg_controllen
= space
;
1833 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1834 struct msghdr
*msgh
)
1836 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1837 abi_long msg_controllen
;
1838 abi_ulong target_cmsg_addr
;
1839 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1840 socklen_t space
= 0;
1842 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1843 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1845 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1846 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1847 target_cmsg_start
= target_cmsg
;
1849 return -TARGET_EFAULT
;
1851 while (cmsg
&& target_cmsg
) {
1852 void *data
= CMSG_DATA(cmsg
);
1853 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1855 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1856 int tgt_len
, tgt_space
;
1858 /* We never copy a half-header but may copy half-data;
1859 * this is Linux's behaviour in put_cmsg(). Note that
1860 * truncation here is a guest problem (which we report
1861 * to the guest via the CTRUNC bit), unlike truncation
1862 * in target_to_host_cmsg, which is a QEMU bug.
1864 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1865 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1869 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1870 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1872 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1874 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1876 /* Payload types which need a different size of payload on
1877 * the target must adjust tgt_len here.
1880 switch (cmsg
->cmsg_level
) {
1882 switch (cmsg
->cmsg_type
) {
1884 tgt_len
= sizeof(struct target_timeval
);
1894 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1895 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1896 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1899 /* We must now copy-and-convert len bytes of payload
1900 * into tgt_len bytes of destination space. Bear in mind
1901 * that in both source and destination we may be dealing
1902 * with a truncated value!
1904 switch (cmsg
->cmsg_level
) {
1906 switch (cmsg
->cmsg_type
) {
1909 int *fd
= (int *)data
;
1910 int *target_fd
= (int *)target_data
;
1911 int i
, numfds
= tgt_len
/ sizeof(int);
1913 for (i
= 0; i
< numfds
; i
++) {
1914 __put_user(fd
[i
], target_fd
+ i
);
1920 struct timeval
*tv
= (struct timeval
*)data
;
1921 struct target_timeval
*target_tv
=
1922 (struct target_timeval
*)target_data
;
1924 if (len
!= sizeof(struct timeval
) ||
1925 tgt_len
!= sizeof(struct target_timeval
)) {
1929 /* copy struct timeval to target */
1930 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1931 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1934 case SCM_CREDENTIALS
:
1936 struct ucred
*cred
= (struct ucred
*)data
;
1937 struct target_ucred
*target_cred
=
1938 (struct target_ucred
*)target_data
;
1940 __put_user(cred
->pid
, &target_cred
->pid
);
1941 __put_user(cred
->uid
, &target_cred
->uid
);
1942 __put_user(cred
->gid
, &target_cred
->gid
);
1951 switch (cmsg
->cmsg_type
) {
1954 uint32_t *v
= (uint32_t *)data
;
1955 uint32_t *t_int
= (uint32_t *)target_data
;
1957 if (len
!= sizeof(uint32_t) ||
1958 tgt_len
!= sizeof(uint32_t)) {
1961 __put_user(*v
, t_int
);
1967 struct sock_extended_err ee
;
1968 struct sockaddr_in offender
;
1970 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1971 struct errhdr_t
*target_errh
=
1972 (struct errhdr_t
*)target_data
;
1974 if (len
!= sizeof(struct errhdr_t
) ||
1975 tgt_len
!= sizeof(struct errhdr_t
)) {
1978 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1979 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1980 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1981 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1982 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1983 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1984 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1985 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1986 (void *) &errh
->offender
, sizeof(errh
->offender
));
1995 switch (cmsg
->cmsg_type
) {
1998 uint32_t *v
= (uint32_t *)data
;
1999 uint32_t *t_int
= (uint32_t *)target_data
;
2001 if (len
!= sizeof(uint32_t) ||
2002 tgt_len
!= sizeof(uint32_t)) {
2005 __put_user(*v
, t_int
);
2011 struct sock_extended_err ee
;
2012 struct sockaddr_in6 offender
;
2014 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2015 struct errhdr6_t
*target_errh
=
2016 (struct errhdr6_t
*)target_data
;
2018 if (len
!= sizeof(struct errhdr6_t
) ||
2019 tgt_len
!= sizeof(struct errhdr6_t
)) {
2022 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2023 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2024 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2025 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2026 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2027 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2028 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2029 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2030 (void *) &errh
->offender
, sizeof(errh
->offender
));
2040 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2041 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2042 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2043 if (tgt_len
> len
) {
2044 memset(target_data
+ len
, 0, tgt_len
- len
);
2048 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2049 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2050 if (msg_controllen
< tgt_space
) {
2051 tgt_space
= msg_controllen
;
2053 msg_controllen
-= tgt_space
;
2055 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2056 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2059 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2061 target_msgh
->msg_controllen
= tswapal(space
);
2065 /* do_setsockopt() Must return target values and target errnos. */
2066 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2067 abi_ulong optval_addr
, socklen_t optlen
)
2071 struct ip_mreqn
*ip_mreq
;
2072 struct ip_mreq_source
*ip_mreq_source
;
2077 /* TCP and UDP options all take an 'int' value. */
2078 if (optlen
< sizeof(uint32_t))
2079 return -TARGET_EINVAL
;
2081 if (get_user_u32(val
, optval_addr
))
2082 return -TARGET_EFAULT
;
2083 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2090 case IP_ROUTER_ALERT
:
2094 case IP_MTU_DISCOVER
:
2101 case IP_MULTICAST_TTL
:
2102 case IP_MULTICAST_LOOP
:
2104 if (optlen
>= sizeof(uint32_t)) {
2105 if (get_user_u32(val
, optval_addr
))
2106 return -TARGET_EFAULT
;
2107 } else if (optlen
>= 1) {
2108 if (get_user_u8(val
, optval_addr
))
2109 return -TARGET_EFAULT
;
2111 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2113 case IP_ADD_MEMBERSHIP
:
2114 case IP_DROP_MEMBERSHIP
:
2115 if (optlen
< sizeof (struct target_ip_mreq
) ||
2116 optlen
> sizeof (struct target_ip_mreqn
))
2117 return -TARGET_EINVAL
;
2119 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2120 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2121 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2124 case IP_BLOCK_SOURCE
:
2125 case IP_UNBLOCK_SOURCE
:
2126 case IP_ADD_SOURCE_MEMBERSHIP
:
2127 case IP_DROP_SOURCE_MEMBERSHIP
:
2128 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2129 return -TARGET_EINVAL
;
2131 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2132 if (!ip_mreq_source
) {
2133 return -TARGET_EFAULT
;
2135 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2136 unlock_user (ip_mreq_source
, optval_addr
, 0);
2145 case IPV6_MTU_DISCOVER
:
2148 case IPV6_RECVPKTINFO
:
2149 case IPV6_UNICAST_HOPS
:
2150 case IPV6_MULTICAST_HOPS
:
2151 case IPV6_MULTICAST_LOOP
:
2153 case IPV6_RECVHOPLIMIT
:
2154 case IPV6_2292HOPLIMIT
:
2157 case IPV6_2292PKTINFO
:
2158 case IPV6_RECVTCLASS
:
2159 case IPV6_RECVRTHDR
:
2160 case IPV6_2292RTHDR
:
2161 case IPV6_RECVHOPOPTS
:
2162 case IPV6_2292HOPOPTS
:
2163 case IPV6_RECVDSTOPTS
:
2164 case IPV6_2292DSTOPTS
:
2166 case IPV6_ADDR_PREFERENCES
:
2167 #ifdef IPV6_RECVPATHMTU
2168 case IPV6_RECVPATHMTU
:
2170 #ifdef IPV6_TRANSPARENT
2171 case IPV6_TRANSPARENT
:
2173 #ifdef IPV6_FREEBIND
2176 #ifdef IPV6_RECVORIGDSTADDR
2177 case IPV6_RECVORIGDSTADDR
:
2180 if (optlen
< sizeof(uint32_t)) {
2181 return -TARGET_EINVAL
;
2183 if (get_user_u32(val
, optval_addr
)) {
2184 return -TARGET_EFAULT
;
2186 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2187 &val
, sizeof(val
)));
2191 struct in6_pktinfo pki
;
2193 if (optlen
< sizeof(pki
)) {
2194 return -TARGET_EINVAL
;
2197 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2198 return -TARGET_EFAULT
;
2201 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2203 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2204 &pki
, sizeof(pki
)));
2207 case IPV6_ADD_MEMBERSHIP
:
2208 case IPV6_DROP_MEMBERSHIP
:
2210 struct ipv6_mreq ipv6mreq
;
2212 if (optlen
< sizeof(ipv6mreq
)) {
2213 return -TARGET_EINVAL
;
2216 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2217 return -TARGET_EFAULT
;
2220 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2222 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2223 &ipv6mreq
, sizeof(ipv6mreq
)));
2234 struct icmp6_filter icmp6f
;
2236 if (optlen
> sizeof(icmp6f
)) {
2237 optlen
= sizeof(icmp6f
);
2240 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2241 return -TARGET_EFAULT
;
2244 for (val
= 0; val
< 8; val
++) {
2245 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2248 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2260 /* those take an u32 value */
2261 if (optlen
< sizeof(uint32_t)) {
2262 return -TARGET_EINVAL
;
2265 if (get_user_u32(val
, optval_addr
)) {
2266 return -TARGET_EFAULT
;
2268 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2269 &val
, sizeof(val
)));
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2281 char *alg_key
= g_malloc(optlen
);
2284 return -TARGET_ENOMEM
;
2286 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2288 return -TARGET_EFAULT
;
2290 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2295 case ALG_SET_AEAD_AUTHSIZE
:
2297 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2306 case TARGET_SOL_SOCKET
:
2308 case TARGET_SO_RCVTIMEO
:
2312 optname
= SO_RCVTIMEO
;
2315 if (optlen
!= sizeof(struct target_timeval
)) {
2316 return -TARGET_EINVAL
;
2319 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2320 return -TARGET_EFAULT
;
2323 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2327 case TARGET_SO_SNDTIMEO
:
2328 optname
= SO_SNDTIMEO
;
2330 case TARGET_SO_ATTACH_FILTER
:
2332 struct target_sock_fprog
*tfprog
;
2333 struct target_sock_filter
*tfilter
;
2334 struct sock_fprog fprog
;
2335 struct sock_filter
*filter
;
2338 if (optlen
!= sizeof(*tfprog
)) {
2339 return -TARGET_EINVAL
;
2341 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2342 return -TARGET_EFAULT
;
2344 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2345 tswapal(tfprog
->filter
), 0)) {
2346 unlock_user_struct(tfprog
, optval_addr
, 1);
2347 return -TARGET_EFAULT
;
2350 fprog
.len
= tswap16(tfprog
->len
);
2351 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2352 if (filter
== NULL
) {
2353 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2354 unlock_user_struct(tfprog
, optval_addr
, 1);
2355 return -TARGET_ENOMEM
;
2357 for (i
= 0; i
< fprog
.len
; i
++) {
2358 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2359 filter
[i
].jt
= tfilter
[i
].jt
;
2360 filter
[i
].jf
= tfilter
[i
].jf
;
2361 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2363 fprog
.filter
= filter
;
2365 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2366 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2369 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2370 unlock_user_struct(tfprog
, optval_addr
, 1);
2373 case TARGET_SO_BINDTODEVICE
:
2375 char *dev_ifname
, *addr_ifname
;
2377 if (optlen
> IFNAMSIZ
- 1) {
2378 optlen
= IFNAMSIZ
- 1;
2380 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2382 return -TARGET_EFAULT
;
2384 optname
= SO_BINDTODEVICE
;
2385 addr_ifname
= alloca(IFNAMSIZ
);
2386 memcpy(addr_ifname
, dev_ifname
, optlen
);
2387 addr_ifname
[optlen
] = 0;
2388 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2389 addr_ifname
, optlen
));
2390 unlock_user (dev_ifname
, optval_addr
, 0);
2393 case TARGET_SO_LINGER
:
2396 struct target_linger
*tlg
;
2398 if (optlen
!= sizeof(struct target_linger
)) {
2399 return -TARGET_EINVAL
;
2401 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2402 return -TARGET_EFAULT
;
2404 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2405 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2406 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2408 unlock_user_struct(tlg
, optval_addr
, 0);
2411 /* Options with 'int' argument. */
2412 case TARGET_SO_DEBUG
:
2415 case TARGET_SO_REUSEADDR
:
2416 optname
= SO_REUSEADDR
;
2419 case TARGET_SO_REUSEPORT
:
2420 optname
= SO_REUSEPORT
;
2423 case TARGET_SO_TYPE
:
2426 case TARGET_SO_ERROR
:
2429 case TARGET_SO_DONTROUTE
:
2430 optname
= SO_DONTROUTE
;
2432 case TARGET_SO_BROADCAST
:
2433 optname
= SO_BROADCAST
;
2435 case TARGET_SO_SNDBUF
:
2436 optname
= SO_SNDBUF
;
2438 case TARGET_SO_SNDBUFFORCE
:
2439 optname
= SO_SNDBUFFORCE
;
2441 case TARGET_SO_RCVBUF
:
2442 optname
= SO_RCVBUF
;
2444 case TARGET_SO_RCVBUFFORCE
:
2445 optname
= SO_RCVBUFFORCE
;
2447 case TARGET_SO_KEEPALIVE
:
2448 optname
= SO_KEEPALIVE
;
2450 case TARGET_SO_OOBINLINE
:
2451 optname
= SO_OOBINLINE
;
2453 case TARGET_SO_NO_CHECK
:
2454 optname
= SO_NO_CHECK
;
2456 case TARGET_SO_PRIORITY
:
2457 optname
= SO_PRIORITY
;
2460 case TARGET_SO_BSDCOMPAT
:
2461 optname
= SO_BSDCOMPAT
;
2464 case TARGET_SO_PASSCRED
:
2465 optname
= SO_PASSCRED
;
2467 case TARGET_SO_PASSSEC
:
2468 optname
= SO_PASSSEC
;
2470 case TARGET_SO_TIMESTAMP
:
2471 optname
= SO_TIMESTAMP
;
2473 case TARGET_SO_RCVLOWAT
:
2474 optname
= SO_RCVLOWAT
;
2479 if (optlen
< sizeof(uint32_t))
2480 return -TARGET_EINVAL
;
2482 if (get_user_u32(val
, optval_addr
))
2483 return -TARGET_EFAULT
;
2484 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2489 case NETLINK_PKTINFO
:
2490 case NETLINK_ADD_MEMBERSHIP
:
2491 case NETLINK_DROP_MEMBERSHIP
:
2492 case NETLINK_BROADCAST_ERROR
:
2493 case NETLINK_NO_ENOBUFS
:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495 case NETLINK_LISTEN_ALL_NSID
:
2496 case NETLINK_CAP_ACK
:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499 case NETLINK_EXT_ACK
:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502 case NETLINK_GET_STRICT_CHK
:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2509 if (optlen
< sizeof(uint32_t)) {
2510 return -TARGET_EINVAL
;
2512 if (get_user_u32(val
, optval_addr
)) {
2513 return -TARGET_EFAULT
;
2515 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2518 #endif /* SOL_NETLINK */
2521 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2523 ret
= -TARGET_ENOPROTOOPT
;
2528 /* do_getsockopt() Must return target values and target errnos. */
2529 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2530 abi_ulong optval_addr
, abi_ulong optlen
)
2537 case TARGET_SOL_SOCKET
:
2540 /* These don't just return a single integer */
2541 case TARGET_SO_PEERNAME
:
2543 case TARGET_SO_RCVTIMEO
: {
2547 optname
= SO_RCVTIMEO
;
2550 if (get_user_u32(len
, optlen
)) {
2551 return -TARGET_EFAULT
;
2554 return -TARGET_EINVAL
;
2558 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2563 if (len
> sizeof(struct target_timeval
)) {
2564 len
= sizeof(struct target_timeval
);
2566 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2567 return -TARGET_EFAULT
;
2569 if (put_user_u32(len
, optlen
)) {
2570 return -TARGET_EFAULT
;
2574 case TARGET_SO_SNDTIMEO
:
2575 optname
= SO_SNDTIMEO
;
2577 case TARGET_SO_PEERCRED
: {
2580 struct target_ucred
*tcr
;
2582 if (get_user_u32(len
, optlen
)) {
2583 return -TARGET_EFAULT
;
2586 return -TARGET_EINVAL
;
2590 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2598 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2599 return -TARGET_EFAULT
;
2601 __put_user(cr
.pid
, &tcr
->pid
);
2602 __put_user(cr
.uid
, &tcr
->uid
);
2603 __put_user(cr
.gid
, &tcr
->gid
);
2604 unlock_user_struct(tcr
, optval_addr
, 1);
2605 if (put_user_u32(len
, optlen
)) {
2606 return -TARGET_EFAULT
;
2610 case TARGET_SO_PEERSEC
: {
2613 if (get_user_u32(len
, optlen
)) {
2614 return -TARGET_EFAULT
;
2617 return -TARGET_EINVAL
;
2619 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2621 return -TARGET_EFAULT
;
2624 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2626 if (put_user_u32(lv
, optlen
)) {
2627 ret
= -TARGET_EFAULT
;
2629 unlock_user(name
, optval_addr
, lv
);
2632 case TARGET_SO_LINGER
:
2636 struct target_linger
*tlg
;
2638 if (get_user_u32(len
, optlen
)) {
2639 return -TARGET_EFAULT
;
2642 return -TARGET_EINVAL
;
2646 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2654 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2655 return -TARGET_EFAULT
;
2657 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2658 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2659 unlock_user_struct(tlg
, optval_addr
, 1);
2660 if (put_user_u32(len
, optlen
)) {
2661 return -TARGET_EFAULT
;
2665 /* Options with 'int' argument. */
2666 case TARGET_SO_DEBUG
:
2669 case TARGET_SO_REUSEADDR
:
2670 optname
= SO_REUSEADDR
;
2673 case TARGET_SO_REUSEPORT
:
2674 optname
= SO_REUSEPORT
;
2677 case TARGET_SO_TYPE
:
2680 case TARGET_SO_ERROR
:
2683 case TARGET_SO_DONTROUTE
:
2684 optname
= SO_DONTROUTE
;
2686 case TARGET_SO_BROADCAST
:
2687 optname
= SO_BROADCAST
;
2689 case TARGET_SO_SNDBUF
:
2690 optname
= SO_SNDBUF
;
2692 case TARGET_SO_RCVBUF
:
2693 optname
= SO_RCVBUF
;
2695 case TARGET_SO_KEEPALIVE
:
2696 optname
= SO_KEEPALIVE
;
2698 case TARGET_SO_OOBINLINE
:
2699 optname
= SO_OOBINLINE
;
2701 case TARGET_SO_NO_CHECK
:
2702 optname
= SO_NO_CHECK
;
2704 case TARGET_SO_PRIORITY
:
2705 optname
= SO_PRIORITY
;
2708 case TARGET_SO_BSDCOMPAT
:
2709 optname
= SO_BSDCOMPAT
;
2712 case TARGET_SO_PASSCRED
:
2713 optname
= SO_PASSCRED
;
2715 case TARGET_SO_TIMESTAMP
:
2716 optname
= SO_TIMESTAMP
;
2718 case TARGET_SO_RCVLOWAT
:
2719 optname
= SO_RCVLOWAT
;
2721 case TARGET_SO_ACCEPTCONN
:
2722 optname
= SO_ACCEPTCONN
;
2724 case TARGET_SO_PROTOCOL
:
2725 optname
= SO_PROTOCOL
;
2727 case TARGET_SO_DOMAIN
:
2728 optname
= SO_DOMAIN
;
2736 /* TCP and UDP options all take an 'int' value. */
2738 if (get_user_u32(len
, optlen
))
2739 return -TARGET_EFAULT
;
2741 return -TARGET_EINVAL
;
2743 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2746 if (optname
== SO_TYPE
) {
2747 val
= host_to_target_sock_type(val
);
2752 if (put_user_u32(val
, optval_addr
))
2753 return -TARGET_EFAULT
;
2755 if (put_user_u8(val
, optval_addr
))
2756 return -TARGET_EFAULT
;
2758 if (put_user_u32(len
, optlen
))
2759 return -TARGET_EFAULT
;
2766 case IP_ROUTER_ALERT
:
2770 case IP_MTU_DISCOVER
:
2776 case IP_MULTICAST_TTL
:
2777 case IP_MULTICAST_LOOP
:
2778 if (get_user_u32(len
, optlen
))
2779 return -TARGET_EFAULT
;
2781 return -TARGET_EINVAL
;
2783 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2786 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2788 if (put_user_u32(len
, optlen
)
2789 || put_user_u8(val
, optval_addr
))
2790 return -TARGET_EFAULT
;
2792 if (len
> sizeof(int))
2794 if (put_user_u32(len
, optlen
)
2795 || put_user_u32(val
, optval_addr
))
2796 return -TARGET_EFAULT
;
2800 ret
= -TARGET_ENOPROTOOPT
;
2806 case IPV6_MTU_DISCOVER
:
2809 case IPV6_RECVPKTINFO
:
2810 case IPV6_UNICAST_HOPS
:
2811 case IPV6_MULTICAST_HOPS
:
2812 case IPV6_MULTICAST_LOOP
:
2814 case IPV6_RECVHOPLIMIT
:
2815 case IPV6_2292HOPLIMIT
:
2818 case IPV6_2292PKTINFO
:
2819 case IPV6_RECVTCLASS
:
2820 case IPV6_RECVRTHDR
:
2821 case IPV6_2292RTHDR
:
2822 case IPV6_RECVHOPOPTS
:
2823 case IPV6_2292HOPOPTS
:
2824 case IPV6_RECVDSTOPTS
:
2825 case IPV6_2292DSTOPTS
:
2827 case IPV6_ADDR_PREFERENCES
:
2828 #ifdef IPV6_RECVPATHMTU
2829 case IPV6_RECVPATHMTU
:
2831 #ifdef IPV6_TRANSPARENT
2832 case IPV6_TRANSPARENT
:
2834 #ifdef IPV6_FREEBIND
2837 #ifdef IPV6_RECVORIGDSTADDR
2838 case IPV6_RECVORIGDSTADDR
:
2840 if (get_user_u32(len
, optlen
))
2841 return -TARGET_EFAULT
;
2843 return -TARGET_EINVAL
;
2845 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2848 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2850 if (put_user_u32(len
, optlen
)
2851 || put_user_u8(val
, optval_addr
))
2852 return -TARGET_EFAULT
;
2854 if (len
> sizeof(int))
2856 if (put_user_u32(len
, optlen
)
2857 || put_user_u32(val
, optval_addr
))
2858 return -TARGET_EFAULT
;
2862 ret
= -TARGET_ENOPROTOOPT
;
2869 case NETLINK_PKTINFO
:
2870 case NETLINK_BROADCAST_ERROR
:
2871 case NETLINK_NO_ENOBUFS
:
2872 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2873 case NETLINK_LISTEN_ALL_NSID
:
2874 case NETLINK_CAP_ACK
:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2876 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2877 case NETLINK_EXT_ACK
:
2878 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2879 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2880 case NETLINK_GET_STRICT_CHK
:
2881 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2882 if (get_user_u32(len
, optlen
)) {
2883 return -TARGET_EFAULT
;
2885 if (len
!= sizeof(val
)) {
2886 return -TARGET_EINVAL
;
2889 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2893 if (put_user_u32(lv
, optlen
)
2894 || put_user_u32(val
, optval_addr
)) {
2895 return -TARGET_EFAULT
;
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2899 case NETLINK_LIST_MEMBERSHIPS
:
2903 if (get_user_u32(len
, optlen
)) {
2904 return -TARGET_EFAULT
;
2907 return -TARGET_EINVAL
;
2909 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2910 if (!results
&& len
> 0) {
2911 return -TARGET_EFAULT
;
2914 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2916 unlock_user(results
, optval_addr
, 0);
2919 /* swap host endianess to target endianess. */
2920 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2921 results
[i
] = tswap32(results
[i
]);
2923 if (put_user_u32(lv
, optlen
)) {
2924 return -TARGET_EFAULT
;
2926 unlock_user(results
, optval_addr
, 0);
2929 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2934 #endif /* SOL_NETLINK */
2937 qemu_log_mask(LOG_UNIMP
,
2938 "getsockopt level=%d optname=%d not yet supported\n",
2940 ret
= -TARGET_EOPNOTSUPP
;
2946 /* Convert target low/high pair representing file offset into the host
2947 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2948 * as the kernel doesn't handle them either.
2950 static void target_to_host_low_high(abi_ulong tlow
,
2952 unsigned long *hlow
,
2953 unsigned long *hhigh
)
2955 uint64_t off
= tlow
|
2956 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2957 TARGET_LONG_BITS
/ 2;
2960 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2963 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2964 abi_ulong count
, int copy
)
2966 struct target_iovec
*target_vec
;
2968 abi_ulong total_len
, max_len
;
2971 bool bad_address
= false;
2977 if (count
> IOV_MAX
) {
2982 vec
= g_try_new0(struct iovec
, count
);
2988 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2989 count
* sizeof(struct target_iovec
), 1);
2990 if (target_vec
== NULL
) {
2995 /* ??? If host page size > target page size, this will result in a
2996 value larger than what we can actually support. */
2997 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3000 for (i
= 0; i
< count
; i
++) {
3001 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3002 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3007 } else if (len
== 0) {
3008 /* Zero length pointer is ignored. */
3009 vec
[i
].iov_base
= 0;
3011 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3012 /* If the first buffer pointer is bad, this is a fault. But
3013 * subsequent bad buffers will result in a partial write; this
3014 * is realized by filling the vector with null pointers and
3016 if (!vec
[i
].iov_base
) {
3027 if (len
> max_len
- total_len
) {
3028 len
= max_len
- total_len
;
3031 vec
[i
].iov_len
= len
;
3035 unlock_user(target_vec
, target_addr
, 0);
3040 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3041 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3044 unlock_user(target_vec
, target_addr
, 0);
3051 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3052 abi_ulong count
, int copy
)
3054 struct target_iovec
*target_vec
;
3057 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3058 count
* sizeof(struct target_iovec
), 1);
3060 for (i
= 0; i
< count
; i
++) {
3061 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3062 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3066 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3068 unlock_user(target_vec
, target_addr
, 0);
3074 static inline int target_to_host_sock_type(int *type
)
3077 int target_type
= *type
;
3079 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3080 case TARGET_SOCK_DGRAM
:
3081 host_type
= SOCK_DGRAM
;
3083 case TARGET_SOCK_STREAM
:
3084 host_type
= SOCK_STREAM
;
3087 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3090 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3091 #if defined(SOCK_CLOEXEC)
3092 host_type
|= SOCK_CLOEXEC
;
3094 return -TARGET_EINVAL
;
3097 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3098 #if defined(SOCK_NONBLOCK)
3099 host_type
|= SOCK_NONBLOCK
;
3100 #elif !defined(O_NONBLOCK)
3101 return -TARGET_EINVAL
;
3108 /* Try to emulate socket type flags after socket creation. */
3109 static int sock_flags_fixup(int fd
, int target_type
)
3111 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3112 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3113 int flags
= fcntl(fd
, F_GETFL
);
3114 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3116 return -TARGET_EINVAL
;
3123 /* do_socket() Must return target values and target errnos. */
3124 static abi_long
do_socket(int domain
, int type
, int protocol
)
3126 int target_type
= type
;
3129 ret
= target_to_host_sock_type(&type
);
3134 if (domain
== PF_NETLINK
&& !(
3135 #ifdef CONFIG_RTNETLINK
3136 protocol
== NETLINK_ROUTE
||
3138 protocol
== NETLINK_KOBJECT_UEVENT
||
3139 protocol
== NETLINK_AUDIT
)) {
3140 return -TARGET_EPROTONOSUPPORT
;
3143 if (domain
== AF_PACKET
||
3144 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3145 protocol
= tswap16(protocol
);
3148 ret
= get_errno(socket(domain
, type
, protocol
));
3150 ret
= sock_flags_fixup(ret
, target_type
);
3151 if (type
== SOCK_PACKET
) {
3152 /* Manage an obsolete case :
3153 * if socket type is SOCK_PACKET, bind by name
3155 fd_trans_register(ret
, &target_packet_trans
);
3156 } else if (domain
== PF_NETLINK
) {
3158 #ifdef CONFIG_RTNETLINK
3160 fd_trans_register(ret
, &target_netlink_route_trans
);
3163 case NETLINK_KOBJECT_UEVENT
:
3164 /* nothing to do: messages are strings */
3167 fd_trans_register(ret
, &target_netlink_audit_trans
);
3170 g_assert_not_reached();
3177 /* do_bind() Must return target values and target errnos. */
3178 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3184 if ((int)addrlen
< 0) {
3185 return -TARGET_EINVAL
;
3188 addr
= alloca(addrlen
+1);
3190 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3194 return get_errno(bind(sockfd
, addr
, addrlen
));
3197 /* do_connect() Must return target values and target errnos. */
3198 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3204 if ((int)addrlen
< 0) {
3205 return -TARGET_EINVAL
;
3208 addr
= alloca(addrlen
+1);
3210 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3214 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3217 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3218 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3219 int flags
, int send
)
3225 abi_ulong target_vec
;
3227 if (msgp
->msg_name
) {
3228 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3229 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3230 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3231 tswapal(msgp
->msg_name
),
3233 if (ret
== -TARGET_EFAULT
) {
3234 /* For connected sockets msg_name and msg_namelen must
3235 * be ignored, so returning EFAULT immediately is wrong.
3236 * Instead, pass a bad msg_name to the host kernel, and
3237 * let it decide whether to return EFAULT or not.
3239 msg
.msg_name
= (void *)-1;
3244 msg
.msg_name
= NULL
;
3245 msg
.msg_namelen
= 0;
3247 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3248 msg
.msg_control
= alloca(msg
.msg_controllen
);
3249 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3251 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3253 count
= tswapal(msgp
->msg_iovlen
);
3254 target_vec
= tswapal(msgp
->msg_iov
);
3256 if (count
> IOV_MAX
) {
3257 /* sendrcvmsg returns a different errno for this condition than
3258 * readv/writev, so we must catch it here before lock_iovec() does.
3260 ret
= -TARGET_EMSGSIZE
;
3264 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3265 target_vec
, count
, send
);
3267 ret
= -host_to_target_errno(errno
);
3270 msg
.msg_iovlen
= count
;
3274 if (fd_trans_target_to_host_data(fd
)) {
3277 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3278 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3279 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3280 msg
.msg_iov
->iov_len
);
3282 msg
.msg_iov
->iov_base
= host_msg
;
3283 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3287 ret
= target_to_host_cmsg(&msg
, msgp
);
3289 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3293 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3294 if (!is_error(ret
)) {
3296 if (fd_trans_host_to_target_data(fd
)) {
3297 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3298 MIN(msg
.msg_iov
->iov_len
, len
));
3300 ret
= host_to_target_cmsg(msgp
, &msg
);
3302 if (!is_error(ret
)) {
3303 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3304 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3305 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3306 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3307 msg
.msg_name
, msg
.msg_namelen
);
3319 unlock_iovec(vec
, target_vec
, count
, !send
);
3324 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3325 int flags
, int send
)
3328 struct target_msghdr
*msgp
;
3330 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3334 return -TARGET_EFAULT
;
3336 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3337 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3341 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3342 * so it might not have this *mmsg-specific flag either.
3344 #ifndef MSG_WAITFORONE
3345 #define MSG_WAITFORONE 0x10000
3348 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3349 unsigned int vlen
, unsigned int flags
,
3352 struct target_mmsghdr
*mmsgp
;
3356 if (vlen
> UIO_MAXIOV
) {
3360 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3362 return -TARGET_EFAULT
;
3365 for (i
= 0; i
< vlen
; i
++) {
3366 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3367 if (is_error(ret
)) {
3370 mmsgp
[i
].msg_len
= tswap32(ret
);
3371 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3372 if (flags
& MSG_WAITFORONE
) {
3373 flags
|= MSG_DONTWAIT
;
3377 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3379 /* Return number of datagrams sent if we sent any at all;
3380 * otherwise return the error.
3388 /* do_accept4() Must return target values and target errnos. */
3389 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3390 abi_ulong target_addrlen_addr
, int flags
)
3392 socklen_t addrlen
, ret_addrlen
;
3397 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3399 if (target_addr
== 0) {
3400 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3403 /* linux returns EFAULT if addrlen pointer is invalid */
3404 if (get_user_u32(addrlen
, target_addrlen_addr
))
3405 return -TARGET_EFAULT
;
3407 if ((int)addrlen
< 0) {
3408 return -TARGET_EINVAL
;
3411 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3412 return -TARGET_EFAULT
;
3415 addr
= alloca(addrlen
);
3417 ret_addrlen
= addrlen
;
3418 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3419 if (!is_error(ret
)) {
3420 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3421 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3422 ret
= -TARGET_EFAULT
;
3428 /* do_getpeername() Must return target values and target errnos. */
3429 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3430 abi_ulong target_addrlen_addr
)
3432 socklen_t addrlen
, ret_addrlen
;
3436 if (get_user_u32(addrlen
, target_addrlen_addr
))
3437 return -TARGET_EFAULT
;
3439 if ((int)addrlen
< 0) {
3440 return -TARGET_EINVAL
;
3443 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3444 return -TARGET_EFAULT
;
3447 addr
= alloca(addrlen
);
3449 ret_addrlen
= addrlen
;
3450 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3451 if (!is_error(ret
)) {
3452 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3453 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3454 ret
= -TARGET_EFAULT
;
3460 /* do_getsockname() Must return target values and target errnos. */
3461 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3462 abi_ulong target_addrlen_addr
)
3464 socklen_t addrlen
, ret_addrlen
;
3468 if (get_user_u32(addrlen
, target_addrlen_addr
))
3469 return -TARGET_EFAULT
;
3471 if ((int)addrlen
< 0) {
3472 return -TARGET_EINVAL
;
3475 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3476 return -TARGET_EFAULT
;
3479 addr
= alloca(addrlen
);
3481 ret_addrlen
= addrlen
;
3482 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3483 if (!is_error(ret
)) {
3484 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3485 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3486 ret
= -TARGET_EFAULT
;
3492 /* do_socketpair() Must return target values and target errnos. */
3493 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3494 abi_ulong target_tab_addr
)
3499 target_to_host_sock_type(&type
);
3501 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3502 if (!is_error(ret
)) {
3503 if (put_user_s32(tab
[0], target_tab_addr
)
3504 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3505 ret
= -TARGET_EFAULT
;
3510 /* do_sendto() Must return target values and target errnos. */
3511 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3512 abi_ulong target_addr
, socklen_t addrlen
)
3516 void *copy_msg
= NULL
;
3519 if ((int)addrlen
< 0) {
3520 return -TARGET_EINVAL
;
3523 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3525 return -TARGET_EFAULT
;
3526 if (fd_trans_target_to_host_data(fd
)) {
3527 copy_msg
= host_msg
;
3528 host_msg
= g_malloc(len
);
3529 memcpy(host_msg
, copy_msg
, len
);
3530 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3536 addr
= alloca(addrlen
+1);
3537 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3541 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3543 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3548 host_msg
= copy_msg
;
3550 unlock_user(host_msg
, msg
, 0);
3554 /* do_recvfrom() Must return target values and target errnos. */
3555 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3556 abi_ulong target_addr
,
3557 abi_ulong target_addrlen
)
3559 socklen_t addrlen
, ret_addrlen
;
3567 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3569 return -TARGET_EFAULT
;
3573 if (get_user_u32(addrlen
, target_addrlen
)) {
3574 ret
= -TARGET_EFAULT
;
3577 if ((int)addrlen
< 0) {
3578 ret
= -TARGET_EINVAL
;
3581 addr
= alloca(addrlen
);
3582 ret_addrlen
= addrlen
;
3583 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3584 addr
, &ret_addrlen
));
3586 addr
= NULL
; /* To keep compiler quiet. */
3587 addrlen
= 0; /* To keep compiler quiet. */
3588 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3590 if (!is_error(ret
)) {
3591 if (fd_trans_host_to_target_data(fd
)) {
3593 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3594 if (is_error(trans
)) {
3600 host_to_target_sockaddr(target_addr
, addr
,
3601 MIN(addrlen
, ret_addrlen
));
3602 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3603 ret
= -TARGET_EFAULT
;
3607 unlock_user(host_msg
, msg
, len
);
3610 unlock_user(host_msg
, msg
, 0);
3615 #ifdef TARGET_NR_socketcall
3616 /* do_socketcall() must return target values and target errnos. */
3617 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3619 static const unsigned nargs
[] = { /* number of arguments per operation */
3620 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3621 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3622 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3623 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3624 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3625 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3626 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3627 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3628 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3629 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3630 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3631 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3632 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3633 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3634 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3635 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3636 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3637 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3638 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3639 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3641 abi_long a
[6]; /* max 6 args */
3644 /* check the range of the first argument num */
3645 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3646 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3647 return -TARGET_EINVAL
;
3649 /* ensure we have space for args */
3650 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3651 return -TARGET_EINVAL
;
3653 /* collect the arguments in a[] according to nargs[] */
3654 for (i
= 0; i
< nargs
[num
]; ++i
) {
3655 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3656 return -TARGET_EFAULT
;
3659 /* now when we have the args, invoke the appropriate underlying function */
3661 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3662 return do_socket(a
[0], a
[1], a
[2]);
3663 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3664 return do_bind(a
[0], a
[1], a
[2]);
3665 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3666 return do_connect(a
[0], a
[1], a
[2]);
3667 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3668 return get_errno(listen(a
[0], a
[1]));
3669 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3670 return do_accept4(a
[0], a
[1], a
[2], 0);
3671 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3672 return do_getsockname(a
[0], a
[1], a
[2]);
3673 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3674 return do_getpeername(a
[0], a
[1], a
[2]);
3675 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3676 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3677 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3678 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3679 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3680 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3681 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3682 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3683 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3684 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3685 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3686 return get_errno(shutdown(a
[0], a
[1]));
3687 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3688 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3689 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3690 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3691 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3692 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3693 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3694 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3695 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3696 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3697 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3698 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3699 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3700 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3702 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3703 return -TARGET_EINVAL
;
3708 #define N_SHM_REGIONS 32
3710 static struct shm_region
{
3714 } shm_regions
[N_SHM_REGIONS
];
3716 #ifndef TARGET_SEMID64_DS
3717 /* asm-generic version of this struct */
3718 struct target_semid64_ds
3720 struct target_ipc_perm sem_perm
;
3721 abi_ulong sem_otime
;
3722 #if TARGET_ABI_BITS == 32
3723 abi_ulong __unused1
;
3725 abi_ulong sem_ctime
;
3726 #if TARGET_ABI_BITS == 32
3727 abi_ulong __unused2
;
3729 abi_ulong sem_nsems
;
3730 abi_ulong __unused3
;
3731 abi_ulong __unused4
;
3735 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3736 abi_ulong target_addr
)
3738 struct target_ipc_perm
*target_ip
;
3739 struct target_semid64_ds
*target_sd
;
3741 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3742 return -TARGET_EFAULT
;
3743 target_ip
= &(target_sd
->sem_perm
);
3744 host_ip
->__key
= tswap32(target_ip
->__key
);
3745 host_ip
->uid
= tswap32(target_ip
->uid
);
3746 host_ip
->gid
= tswap32(target_ip
->gid
);
3747 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3748 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3749 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3750 host_ip
->mode
= tswap32(target_ip
->mode
);
3752 host_ip
->mode
= tswap16(target_ip
->mode
);
3754 #if defined(TARGET_PPC)
3755 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3757 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3759 unlock_user_struct(target_sd
, target_addr
, 0);
3763 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3764 struct ipc_perm
*host_ip
)
3766 struct target_ipc_perm
*target_ip
;
3767 struct target_semid64_ds
*target_sd
;
3769 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3770 return -TARGET_EFAULT
;
3771 target_ip
= &(target_sd
->sem_perm
);
3772 target_ip
->__key
= tswap32(host_ip
->__key
);
3773 target_ip
->uid
= tswap32(host_ip
->uid
);
3774 target_ip
->gid
= tswap32(host_ip
->gid
);
3775 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3776 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3777 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3778 target_ip
->mode
= tswap32(host_ip
->mode
);
3780 target_ip
->mode
= tswap16(host_ip
->mode
);
3782 #if defined(TARGET_PPC)
3783 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3785 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3787 unlock_user_struct(target_sd
, target_addr
, 1);
3791 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3792 abi_ulong target_addr
)
3794 struct target_semid64_ds
*target_sd
;
3796 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3797 return -TARGET_EFAULT
;
3798 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3799 return -TARGET_EFAULT
;
3800 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3801 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3802 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3803 unlock_user_struct(target_sd
, target_addr
, 0);
3807 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3808 struct semid_ds
*host_sd
)
3810 struct target_semid64_ds
*target_sd
;
3812 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3813 return -TARGET_EFAULT
;
3814 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3815 return -TARGET_EFAULT
;
3816 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3817 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3818 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3819 unlock_user_struct(target_sd
, target_addr
, 1);
3823 struct target_seminfo
{
3836 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3837 struct seminfo
*host_seminfo
)
3839 struct target_seminfo
*target_seminfo
;
3840 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3841 return -TARGET_EFAULT
;
3842 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3843 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3844 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3845 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3846 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3847 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3848 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3849 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3850 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3851 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3852 unlock_user_struct(target_seminfo
, target_addr
, 1);
3858 struct semid_ds
*buf
;
3859 unsigned short *array
;
3860 struct seminfo
*__buf
;
3863 union target_semun
{
3870 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3871 abi_ulong target_addr
)
3874 unsigned short *array
;
3876 struct semid_ds semid_ds
;
3879 semun
.buf
= &semid_ds
;
3881 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3883 return get_errno(ret
);
3885 nsems
= semid_ds
.sem_nsems
;
3887 *host_array
= g_try_new(unsigned short, nsems
);
3889 return -TARGET_ENOMEM
;
3891 array
= lock_user(VERIFY_READ
, target_addr
,
3892 nsems
*sizeof(unsigned short), 1);
3894 g_free(*host_array
);
3895 return -TARGET_EFAULT
;
3898 for(i
=0; i
<nsems
; i
++) {
3899 __get_user((*host_array
)[i
], &array
[i
]);
3901 unlock_user(array
, target_addr
, 0);
3906 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3907 unsigned short **host_array
)
3910 unsigned short *array
;
3912 struct semid_ds semid_ds
;
3915 semun
.buf
= &semid_ds
;
3917 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3919 return get_errno(ret
);
3921 nsems
= semid_ds
.sem_nsems
;
3923 array
= lock_user(VERIFY_WRITE
, target_addr
,
3924 nsems
*sizeof(unsigned short), 0);
3926 return -TARGET_EFAULT
;
3928 for(i
=0; i
<nsems
; i
++) {
3929 __put_user((*host_array
)[i
], &array
[i
]);
3931 g_free(*host_array
);
3932 unlock_user(array
, target_addr
, 1);
3937 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3938 abi_ulong target_arg
)
3940 union target_semun target_su
= { .buf
= target_arg
};
3942 struct semid_ds dsarg
;
3943 unsigned short *array
= NULL
;
3944 struct seminfo seminfo
;
3945 abi_long ret
= -TARGET_EINVAL
;
3952 /* In 64 bit cross-endian situations, we will erroneously pick up
3953 * the wrong half of the union for the "val" element. To rectify
3954 * this, the entire 8-byte structure is byteswapped, followed by
3955 * a swap of the 4 byte val field. In other cases, the data is
3956 * already in proper host byte order. */
3957 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3958 target_su
.buf
= tswapal(target_su
.buf
);
3959 arg
.val
= tswap32(target_su
.val
);
3961 arg
.val
= target_su
.val
;
3963 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3967 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3971 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3972 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3979 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3983 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3984 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3990 arg
.__buf
= &seminfo
;
3991 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3992 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4000 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4007 struct target_sembuf
{
4008 unsigned short sem_num
;
4013 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4014 abi_ulong target_addr
,
4017 struct target_sembuf
*target_sembuf
;
4020 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4021 nsops
*sizeof(struct target_sembuf
), 1);
4023 return -TARGET_EFAULT
;
4025 for(i
=0; i
<nsops
; i
++) {
4026 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4027 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4028 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4031 unlock_user(target_sembuf
, target_addr
, 0);
4036 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4037 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4040 * This macro is required to handle the s390 variants, which passes the
4041 * arguments in a different order than default.
4044 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4045 (__nsops), (__timeout), (__sops)
4047 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4048 (__nsops), 0, (__sops), (__timeout)
4051 static inline abi_long
do_semtimedop(int semid
,
4054 abi_long timeout
, bool time64
)
4056 struct sembuf
*sops
;
4057 struct timespec ts
, *pts
= NULL
;
4063 if (target_to_host_timespec64(pts
, timeout
)) {
4064 return -TARGET_EFAULT
;
4067 if (target_to_host_timespec(pts
, timeout
)) {
4068 return -TARGET_EFAULT
;
4073 if (nsops
> TARGET_SEMOPM
) {
4074 return -TARGET_E2BIG
;
4077 sops
= g_new(struct sembuf
, nsops
);
4079 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4081 return -TARGET_EFAULT
;
4084 ret
= -TARGET_ENOSYS
;
4085 #ifdef __NR_semtimedop
4086 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4089 if (ret
== -TARGET_ENOSYS
) {
4090 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4091 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4099 struct target_msqid_ds
4101 struct target_ipc_perm msg_perm
;
4102 abi_ulong msg_stime
;
4103 #if TARGET_ABI_BITS == 32
4104 abi_ulong __unused1
;
4106 abi_ulong msg_rtime
;
4107 #if TARGET_ABI_BITS == 32
4108 abi_ulong __unused2
;
4110 abi_ulong msg_ctime
;
4111 #if TARGET_ABI_BITS == 32
4112 abi_ulong __unused3
;
4114 abi_ulong __msg_cbytes
;
4116 abi_ulong msg_qbytes
;
4117 abi_ulong msg_lspid
;
4118 abi_ulong msg_lrpid
;
4119 abi_ulong __unused4
;
4120 abi_ulong __unused5
;
4123 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4124 abi_ulong target_addr
)
4126 struct target_msqid_ds
*target_md
;
4128 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4129 return -TARGET_EFAULT
;
4130 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4131 return -TARGET_EFAULT
;
4132 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4133 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4134 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4135 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4136 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4137 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4138 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4139 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4140 unlock_user_struct(target_md
, target_addr
, 0);
4144 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4145 struct msqid_ds
*host_md
)
4147 struct target_msqid_ds
*target_md
;
4149 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4150 return -TARGET_EFAULT
;
4151 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4152 return -TARGET_EFAULT
;
4153 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4154 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4155 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4156 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4157 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4158 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4159 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4160 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4161 unlock_user_struct(target_md
, target_addr
, 1);
4165 struct target_msginfo
{
4173 unsigned short int msgseg
;
4176 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4177 struct msginfo
*host_msginfo
)
4179 struct target_msginfo
*target_msginfo
;
4180 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4181 return -TARGET_EFAULT
;
4182 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4183 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4184 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4185 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4186 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4187 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4188 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4189 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4190 unlock_user_struct(target_msginfo
, target_addr
, 1);
4194 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4196 struct msqid_ds dsarg
;
4197 struct msginfo msginfo
;
4198 abi_long ret
= -TARGET_EINVAL
;
4206 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4207 return -TARGET_EFAULT
;
4208 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4209 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4210 return -TARGET_EFAULT
;
4213 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4217 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4218 if (host_to_target_msginfo(ptr
, &msginfo
))
4219 return -TARGET_EFAULT
;
4226 struct target_msgbuf
{
4231 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4232 ssize_t msgsz
, int msgflg
)
4234 struct target_msgbuf
*target_mb
;
4235 struct msgbuf
*host_mb
;
4239 return -TARGET_EINVAL
;
4242 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4243 return -TARGET_EFAULT
;
4244 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4246 unlock_user_struct(target_mb
, msgp
, 0);
4247 return -TARGET_ENOMEM
;
4249 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4250 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4251 ret
= -TARGET_ENOSYS
;
4253 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4256 if (ret
== -TARGET_ENOSYS
) {
4258 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4261 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4267 unlock_user_struct(target_mb
, msgp
, 0);
4273 #if defined(__sparc__)
4274 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4275 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4276 #elif defined(__s390x__)
4277 /* The s390 sys_ipc variant has only five parameters. */
4278 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4279 ((long int[]){(long int)__msgp, __msgtyp})
4281 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4282 ((long int[]){(long int)__msgp, __msgtyp}), 0
4286 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4287 ssize_t msgsz
, abi_long msgtyp
,
4290 struct target_msgbuf
*target_mb
;
4292 struct msgbuf
*host_mb
;
4296 return -TARGET_EINVAL
;
4299 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4300 return -TARGET_EFAULT
;
4302 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4304 ret
= -TARGET_ENOMEM
;
4307 ret
= -TARGET_ENOSYS
;
4309 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4312 if (ret
== -TARGET_ENOSYS
) {
4313 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4314 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4319 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4320 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4321 if (!target_mtext
) {
4322 ret
= -TARGET_EFAULT
;
4325 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4326 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4329 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4333 unlock_user_struct(target_mb
, msgp
, 1);
4338 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4339 abi_ulong target_addr
)
4341 struct target_shmid_ds
*target_sd
;
4343 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4344 return -TARGET_EFAULT
;
4345 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4346 return -TARGET_EFAULT
;
4347 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4348 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4349 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4350 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4351 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4352 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4353 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4354 unlock_user_struct(target_sd
, target_addr
, 0);
4358 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4359 struct shmid_ds
*host_sd
)
4361 struct target_shmid_ds
*target_sd
;
4363 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4364 return -TARGET_EFAULT
;
4365 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4366 return -TARGET_EFAULT
;
4367 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4368 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4369 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4370 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4371 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4372 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4373 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4374 unlock_user_struct(target_sd
, target_addr
, 1);
4378 struct target_shminfo
{
4386 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4387 struct shminfo
*host_shminfo
)
4389 struct target_shminfo
*target_shminfo
;
4390 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4391 return -TARGET_EFAULT
;
4392 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4393 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4394 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4395 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4396 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4397 unlock_user_struct(target_shminfo
, target_addr
, 1);
4401 struct target_shm_info
{
4406 abi_ulong swap_attempts
;
4407 abi_ulong swap_successes
;
4410 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4411 struct shm_info
*host_shm_info
)
4413 struct target_shm_info
*target_shm_info
;
4414 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4415 return -TARGET_EFAULT
;
4416 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4417 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4418 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4419 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4420 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4421 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4422 unlock_user_struct(target_shm_info
, target_addr
, 1);
4426 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4428 struct shmid_ds dsarg
;
4429 struct shminfo shminfo
;
4430 struct shm_info shm_info
;
4431 abi_long ret
= -TARGET_EINVAL
;
4439 if (target_to_host_shmid_ds(&dsarg
, buf
))
4440 return -TARGET_EFAULT
;
4441 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4442 if (host_to_target_shmid_ds(buf
, &dsarg
))
4443 return -TARGET_EFAULT
;
4446 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4447 if (host_to_target_shminfo(buf
, &shminfo
))
4448 return -TARGET_EFAULT
;
4451 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4452 if (host_to_target_shm_info(buf
, &shm_info
))
4453 return -TARGET_EFAULT
;
4458 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4465 #ifndef TARGET_FORCE_SHMLBA
4466 /* For most architectures, SHMLBA is the same as the page size;
4467 * some architectures have larger values, in which case they should
4468 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4469 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4470 * and defining its own value for SHMLBA.
4472 * The kernel also permits SHMLBA to be set by the architecture to a
4473 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4474 * this means that addresses are rounded to the large size if
4475 * SHM_RND is set but addresses not aligned to that size are not rejected
4476 * as long as they are at least page-aligned. Since the only architecture
4477 * which uses this is ia64 this code doesn't provide for that oddity.
4479 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4481 return TARGET_PAGE_SIZE
;
4485 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4486 int shmid
, abi_ulong shmaddr
, int shmflg
)
4488 CPUState
*cpu
= env_cpu(cpu_env
);
4491 struct shmid_ds shm_info
;
4495 /* shmat pointers are always untagged */
4497 /* find out the length of the shared memory segment */
4498 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4499 if (is_error(ret
)) {
4500 /* can't get length, bail out */
4504 shmlba
= target_shmlba(cpu_env
);
4506 if (shmaddr
& (shmlba
- 1)) {
4507 if (shmflg
& SHM_RND
) {
4508 shmaddr
&= ~(shmlba
- 1);
4510 return -TARGET_EINVAL
;
4513 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4514 return -TARGET_EINVAL
;
4520 * We're mapping shared memory, so ensure we generate code for parallel
4521 * execution and flush old translations. This will work up to the level
4522 * supported by the host -- anything that requires EXCP_ATOMIC will not
4523 * be atomic with respect to an external process.
4525 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4526 cpu
->tcg_cflags
|= CF_PARALLEL
;
4531 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4533 abi_ulong mmap_start
;
4535 /* In order to use the host shmat, we need to honor host SHMLBA. */
4536 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4538 if (mmap_start
== -1) {
4540 host_raddr
= (void *)-1;
4542 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4543 shmflg
| SHM_REMAP
);
4546 if (host_raddr
== (void *)-1) {
4548 return get_errno((long)host_raddr
);
4550 raddr
=h2g((unsigned long)host_raddr
);
4552 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4553 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4554 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4556 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4557 if (!shm_regions
[i
].in_use
) {
4558 shm_regions
[i
].in_use
= true;
4559 shm_regions
[i
].start
= raddr
;
4560 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4570 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4575 /* shmdt pointers are always untagged */
4579 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4580 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4581 shm_regions
[i
].in_use
= false;
4582 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4586 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4593 #ifdef TARGET_NR_ipc
4594 /* ??? This only works with linear mappings. */
4595 /* do_ipc() must return target values and target errnos. */
4596 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4597 unsigned int call
, abi_long first
,
4598 abi_long second
, abi_long third
,
4599 abi_long ptr
, abi_long fifth
)
4604 version
= call
>> 16;
4609 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4611 case IPCOP_semtimedop
:
4613 * The s390 sys_ipc variant has only five parameters instead of six
4614 * (as for default variant) and the only difference is the handling of
4615 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4616 * to a struct timespec where the generic variant uses fifth parameter.
4618 #if defined(TARGET_S390X)
4619 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4621 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4626 ret
= get_errno(semget(first
, second
, third
));
4629 case IPCOP_semctl
: {
4630 /* The semun argument to semctl is passed by value, so dereference the
4633 get_user_ual(atptr
, ptr
);
4634 ret
= do_semctl(first
, second
, third
, atptr
);
4639 ret
= get_errno(msgget(first
, second
));
4643 ret
= do_msgsnd(first
, ptr
, second
, third
);
4647 ret
= do_msgctl(first
, second
, ptr
);
4654 struct target_ipc_kludge
{
4659 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4660 ret
= -TARGET_EFAULT
;
4664 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4666 unlock_user_struct(tmp
, ptr
, 0);
4670 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4679 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4680 if (is_error(raddr
))
4681 return get_errno(raddr
);
4682 if (put_user_ual(raddr
, third
))
4683 return -TARGET_EFAULT
;
4687 ret
= -TARGET_EINVAL
;
4692 ret
= do_shmdt(ptr
);
4696 /* IPC_* flag values are the same on all linux platforms */
4697 ret
= get_errno(shmget(first
, second
, third
));
4700 /* IPC_* and SHM_* command values are the same on all linux platforms */
4702 ret
= do_shmctl(first
, second
, ptr
);
4705 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4707 ret
= -TARGET_ENOSYS
;
4714 /* kernel structure types definitions */
4716 #define STRUCT(name, ...) STRUCT_ ## name,
4717 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4719 #include "syscall_types.h"
4723 #undef STRUCT_SPECIAL
4725 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4726 #define STRUCT_SPECIAL(name)
4727 #include "syscall_types.h"
4729 #undef STRUCT_SPECIAL
4731 #define MAX_STRUCT_SIZE 4096
4733 #ifdef CONFIG_FIEMAP
4734 /* So fiemap access checks don't overflow on 32 bit systems.
4735 * This is very slightly smaller than the limit imposed by
4736 * the underlying kernel.
4738 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4739 / sizeof(struct fiemap_extent))
4741 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4742 int fd
, int cmd
, abi_long arg
)
4744 /* The parameter for this ioctl is a struct fiemap followed
4745 * by an array of struct fiemap_extent whose size is set
4746 * in fiemap->fm_extent_count. The array is filled in by the
4749 int target_size_in
, target_size_out
;
4751 const argtype
*arg_type
= ie
->arg_type
;
4752 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4755 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4759 assert(arg_type
[0] == TYPE_PTR
);
4760 assert(ie
->access
== IOC_RW
);
4762 target_size_in
= thunk_type_size(arg_type
, 0);
4763 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4765 return -TARGET_EFAULT
;
4767 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4768 unlock_user(argptr
, arg
, 0);
4769 fm
= (struct fiemap
*)buf_temp
;
4770 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4771 return -TARGET_EINVAL
;
4774 outbufsz
= sizeof (*fm
) +
4775 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4777 if (outbufsz
> MAX_STRUCT_SIZE
) {
4778 /* We can't fit all the extents into the fixed size buffer.
4779 * Allocate one that is large enough and use it instead.
4781 fm
= g_try_malloc(outbufsz
);
4783 return -TARGET_ENOMEM
;
4785 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4788 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4789 if (!is_error(ret
)) {
4790 target_size_out
= target_size_in
;
4791 /* An extent_count of 0 means we were only counting the extents
4792 * so there are no structs to copy
4794 if (fm
->fm_extent_count
!= 0) {
4795 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4797 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4799 ret
= -TARGET_EFAULT
;
4801 /* Convert the struct fiemap */
4802 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4803 if (fm
->fm_extent_count
!= 0) {
4804 p
= argptr
+ target_size_in
;
4805 /* ...and then all the struct fiemap_extents */
4806 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4807 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4812 unlock_user(argptr
, arg
, target_size_out
);
4822 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4823 int fd
, int cmd
, abi_long arg
)
4825 const argtype
*arg_type
= ie
->arg_type
;
4829 struct ifconf
*host_ifconf
;
4831 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4832 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4833 int target_ifreq_size
;
4838 abi_long target_ifc_buf
;
4842 assert(arg_type
[0] == TYPE_PTR
);
4843 assert(ie
->access
== IOC_RW
);
4846 target_size
= thunk_type_size(arg_type
, 0);
4848 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4850 return -TARGET_EFAULT
;
4851 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4852 unlock_user(argptr
, arg
, 0);
4854 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4855 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4856 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4858 if (target_ifc_buf
!= 0) {
4859 target_ifc_len
= host_ifconf
->ifc_len
;
4860 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4861 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4863 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4864 if (outbufsz
> MAX_STRUCT_SIZE
) {
4866 * We can't fit all the extents into the fixed size buffer.
4867 * Allocate one that is large enough and use it instead.
4869 host_ifconf
= malloc(outbufsz
);
4871 return -TARGET_ENOMEM
;
4873 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4876 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4878 host_ifconf
->ifc_len
= host_ifc_len
;
4880 host_ifc_buf
= NULL
;
4882 host_ifconf
->ifc_buf
= host_ifc_buf
;
4884 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4885 if (!is_error(ret
)) {
4886 /* convert host ifc_len to target ifc_len */
4888 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4889 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4890 host_ifconf
->ifc_len
= target_ifc_len
;
4892 /* restore target ifc_buf */
4894 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4896 /* copy struct ifconf to target user */
4898 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4900 return -TARGET_EFAULT
;
4901 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4902 unlock_user(argptr
, arg
, target_size
);
4904 if (target_ifc_buf
!= 0) {
4905 /* copy ifreq[] to target user */
4906 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4907 for (i
= 0; i
< nb_ifreq
; i
++) {
4908 thunk_convert(argptr
+ i
* target_ifreq_size
,
4909 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4910 ifreq_arg_type
, THUNK_TARGET
);
4912 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4923 #if defined(CONFIG_USBFS)
4924 #if HOST_LONG_BITS > 64
4925 #error USBDEVFS thunks do not support >64 bit hosts yet.
4928 uint64_t target_urb_adr
;
4929 uint64_t target_buf_adr
;
4930 char *target_buf_ptr
;
4931 struct usbdevfs_urb host_urb
;
4934 static GHashTable
*usbdevfs_urb_hashtable(void)
4936 static GHashTable
*urb_hashtable
;
4938 if (!urb_hashtable
) {
4939 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4941 return urb_hashtable
;
4944 static void urb_hashtable_insert(struct live_urb
*urb
)
4946 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4947 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4950 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4952 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4953 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4956 static void urb_hashtable_remove(struct live_urb
*urb
)
4958 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4959 g_hash_table_remove(urb_hashtable
, urb
);
4963 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4964 int fd
, int cmd
, abi_long arg
)
4966 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4967 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4968 struct live_urb
*lurb
;
4972 uintptr_t target_urb_adr
;
4975 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4977 memset(buf_temp
, 0, sizeof(uint64_t));
4978 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4979 if (is_error(ret
)) {
4983 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4984 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4985 if (!lurb
->target_urb_adr
) {
4986 return -TARGET_EFAULT
;
4988 urb_hashtable_remove(lurb
);
4989 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4990 lurb
->host_urb
.buffer_length
);
4991 lurb
->target_buf_ptr
= NULL
;
4993 /* restore the guest buffer pointer */
4994 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4996 /* update the guest urb struct */
4997 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5000 return -TARGET_EFAULT
;
5002 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5003 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5005 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5006 /* write back the urb handle */
5007 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5010 return -TARGET_EFAULT
;
5013 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5014 target_urb_adr
= lurb
->target_urb_adr
;
5015 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5016 unlock_user(argptr
, arg
, target_size
);
5023 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5024 uint8_t *buf_temp
__attribute__((unused
)),
5025 int fd
, int cmd
, abi_long arg
)
5027 struct live_urb
*lurb
;
5029 /* map target address back to host URB with metadata. */
5030 lurb
= urb_hashtable_lookup(arg
);
5032 return -TARGET_EFAULT
;
5034 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5038 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5039 int fd
, int cmd
, abi_long arg
)
5041 const argtype
*arg_type
= ie
->arg_type
;
5046 struct live_urb
*lurb
;
5049 * each submitted URB needs to map to a unique ID for the
5050 * kernel, and that unique ID needs to be a pointer to
5051 * host memory. hence, we need to malloc for each URB.
5052 * isochronous transfers have a variable length struct.
5055 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5057 /* construct host copy of urb and metadata */
5058 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5060 return -TARGET_ENOMEM
;
5063 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5066 return -TARGET_EFAULT
;
5068 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5069 unlock_user(argptr
, arg
, 0);
5071 lurb
->target_urb_adr
= arg
;
5072 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5074 /* buffer space used depends on endpoint type so lock the entire buffer */
5075 /* control type urbs should check the buffer contents for true direction */
5076 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5077 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5078 lurb
->host_urb
.buffer_length
, 1);
5079 if (lurb
->target_buf_ptr
== NULL
) {
5081 return -TARGET_EFAULT
;
5084 /* update buffer pointer in host copy */
5085 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5087 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5088 if (is_error(ret
)) {
5089 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5092 urb_hashtable_insert(lurb
);
5097 #endif /* CONFIG_USBFS */
5099 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5100 int cmd
, abi_long arg
)
5103 struct dm_ioctl
*host_dm
;
5104 abi_long guest_data
;
5105 uint32_t guest_data_size
;
5107 const argtype
*arg_type
= ie
->arg_type
;
5109 void *big_buf
= NULL
;
5113 target_size
= thunk_type_size(arg_type
, 0);
5114 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5116 ret
= -TARGET_EFAULT
;
5119 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5120 unlock_user(argptr
, arg
, 0);
5122 /* buf_temp is too small, so fetch things into a bigger buffer */
5123 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5124 memcpy(big_buf
, buf_temp
, target_size
);
5128 guest_data
= arg
+ host_dm
->data_start
;
5129 if ((guest_data
- arg
) < 0) {
5130 ret
= -TARGET_EINVAL
;
5133 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5134 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5136 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5138 ret
= -TARGET_EFAULT
;
5142 switch (ie
->host_cmd
) {
5144 case DM_LIST_DEVICES
:
5147 case DM_DEV_SUSPEND
:
5150 case DM_TABLE_STATUS
:
5151 case DM_TABLE_CLEAR
:
5153 case DM_LIST_VERSIONS
:
5157 case DM_DEV_SET_GEOMETRY
:
5158 /* data contains only strings */
5159 memcpy(host_data
, argptr
, guest_data_size
);
5162 memcpy(host_data
, argptr
, guest_data_size
);
5163 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5167 void *gspec
= argptr
;
5168 void *cur_data
= host_data
;
5169 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5170 int spec_size
= thunk_type_size(arg_type
, 0);
5173 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5174 struct dm_target_spec
*spec
= cur_data
;
5178 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5179 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5181 spec
->next
= sizeof(*spec
) + slen
;
5182 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5184 cur_data
+= spec
->next
;
5189 ret
= -TARGET_EINVAL
;
5190 unlock_user(argptr
, guest_data
, 0);
5193 unlock_user(argptr
, guest_data
, 0);
5195 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5196 if (!is_error(ret
)) {
5197 guest_data
= arg
+ host_dm
->data_start
;
5198 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5199 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5200 switch (ie
->host_cmd
) {
5205 case DM_DEV_SUSPEND
:
5208 case DM_TABLE_CLEAR
:
5210 case DM_DEV_SET_GEOMETRY
:
5211 /* no return data */
5213 case DM_LIST_DEVICES
:
5215 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5216 uint32_t remaining_data
= guest_data_size
;
5217 void *cur_data
= argptr
;
5218 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5219 int nl_size
= 12; /* can't use thunk_size due to alignment */
5222 uint32_t next
= nl
->next
;
5224 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5226 if (remaining_data
< nl
->next
) {
5227 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5230 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5231 strcpy(cur_data
+ nl_size
, nl
->name
);
5232 cur_data
+= nl
->next
;
5233 remaining_data
-= nl
->next
;
5237 nl
= (void*)nl
+ next
;
5242 case DM_TABLE_STATUS
:
5244 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5245 void *cur_data
= argptr
;
5246 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5247 int spec_size
= thunk_type_size(arg_type
, 0);
5250 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5251 uint32_t next
= spec
->next
;
5252 int slen
= strlen((char*)&spec
[1]) + 1;
5253 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5254 if (guest_data_size
< spec
->next
) {
5255 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5258 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5259 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5260 cur_data
= argptr
+ spec
->next
;
5261 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5267 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5268 int count
= *(uint32_t*)hdata
;
5269 uint64_t *hdev
= hdata
+ 8;
5270 uint64_t *gdev
= argptr
+ 8;
5273 *(uint32_t*)argptr
= tswap32(count
);
5274 for (i
= 0; i
< count
; i
++) {
5275 *gdev
= tswap64(*hdev
);
5281 case DM_LIST_VERSIONS
:
5283 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5284 uint32_t remaining_data
= guest_data_size
;
5285 void *cur_data
= argptr
;
5286 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5287 int vers_size
= thunk_type_size(arg_type
, 0);
5290 uint32_t next
= vers
->next
;
5292 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5294 if (remaining_data
< vers
->next
) {
5295 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5298 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5299 strcpy(cur_data
+ vers_size
, vers
->name
);
5300 cur_data
+= vers
->next
;
5301 remaining_data
-= vers
->next
;
5305 vers
= (void*)vers
+ next
;
5310 unlock_user(argptr
, guest_data
, 0);
5311 ret
= -TARGET_EINVAL
;
5314 unlock_user(argptr
, guest_data
, guest_data_size
);
5316 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5318 ret
= -TARGET_EFAULT
;
5321 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5322 unlock_user(argptr
, arg
, target_size
);
5329 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5330 int cmd
, abi_long arg
)
5334 const argtype
*arg_type
= ie
->arg_type
;
5335 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5338 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5339 struct blkpg_partition host_part
;
5341 /* Read and convert blkpg */
5343 target_size
= thunk_type_size(arg_type
, 0);
5344 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5346 ret
= -TARGET_EFAULT
;
5349 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5350 unlock_user(argptr
, arg
, 0);
5352 switch (host_blkpg
->op
) {
5353 case BLKPG_ADD_PARTITION
:
5354 case BLKPG_DEL_PARTITION
:
5355 /* payload is struct blkpg_partition */
5358 /* Unknown opcode */
5359 ret
= -TARGET_EINVAL
;
5363 /* Read and convert blkpg->data */
5364 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5365 target_size
= thunk_type_size(part_arg_type
, 0);
5366 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5368 ret
= -TARGET_EFAULT
;
5371 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5372 unlock_user(argptr
, arg
, 0);
5374 /* Swizzle the data pointer to our local copy and call! */
5375 host_blkpg
->data
= &host_part
;
5376 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5382 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5383 int fd
, int cmd
, abi_long arg
)
5385 const argtype
*arg_type
= ie
->arg_type
;
5386 const StructEntry
*se
;
5387 const argtype
*field_types
;
5388 const int *dst_offsets
, *src_offsets
;
5391 abi_ulong
*target_rt_dev_ptr
= NULL
;
5392 unsigned long *host_rt_dev_ptr
= NULL
;
5396 assert(ie
->access
== IOC_W
);
5397 assert(*arg_type
== TYPE_PTR
);
5399 assert(*arg_type
== TYPE_STRUCT
);
5400 target_size
= thunk_type_size(arg_type
, 0);
5401 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5403 return -TARGET_EFAULT
;
5406 assert(*arg_type
== (int)STRUCT_rtentry
);
5407 se
= struct_entries
+ *arg_type
++;
5408 assert(se
->convert
[0] == NULL
);
5409 /* convert struct here to be able to catch rt_dev string */
5410 field_types
= se
->field_types
;
5411 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5412 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5413 for (i
= 0; i
< se
->nb_fields
; i
++) {
5414 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5415 assert(*field_types
== TYPE_PTRVOID
);
5416 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5417 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5418 if (*target_rt_dev_ptr
!= 0) {
5419 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5420 tswapal(*target_rt_dev_ptr
));
5421 if (!*host_rt_dev_ptr
) {
5422 unlock_user(argptr
, arg
, 0);
5423 return -TARGET_EFAULT
;
5426 *host_rt_dev_ptr
= 0;
5431 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5432 argptr
+ src_offsets
[i
],
5433 field_types
, THUNK_HOST
);
5435 unlock_user(argptr
, arg
, 0);
5437 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5439 assert(host_rt_dev_ptr
!= NULL
);
5440 assert(target_rt_dev_ptr
!= NULL
);
5441 if (*host_rt_dev_ptr
!= 0) {
5442 unlock_user((void *)*host_rt_dev_ptr
,
5443 *target_rt_dev_ptr
, 0);
5448 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5449 int fd
, int cmd
, abi_long arg
)
5451 int sig
= target_to_host_signal(arg
);
5452 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5455 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5456 int fd
, int cmd
, abi_long arg
)
5461 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5462 if (is_error(ret
)) {
5466 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5467 if (copy_to_user_timeval(arg
, &tv
)) {
5468 return -TARGET_EFAULT
;
5471 if (copy_to_user_timeval64(arg
, &tv
)) {
5472 return -TARGET_EFAULT
;
5479 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5480 int fd
, int cmd
, abi_long arg
)
5485 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5486 if (is_error(ret
)) {
5490 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5491 if (host_to_target_timespec(arg
, &ts
)) {
5492 return -TARGET_EFAULT
;
5495 if (host_to_target_timespec64(arg
, &ts
)) {
5496 return -TARGET_EFAULT
;
5504 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5505 int fd
, int cmd
, abi_long arg
)
5507 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5508 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5514 static void unlock_drm_version(struct drm_version
*host_ver
,
5515 struct target_drm_version
*target_ver
,
5518 unlock_user(host_ver
->name
, target_ver
->name
,
5519 copy
? host_ver
->name_len
: 0);
5520 unlock_user(host_ver
->date
, target_ver
->date
,
5521 copy
? host_ver
->date_len
: 0);
5522 unlock_user(host_ver
->desc
, target_ver
->desc
,
5523 copy
? host_ver
->desc_len
: 0);
5526 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5527 struct target_drm_version
*target_ver
)
5529 memset(host_ver
, 0, sizeof(*host_ver
));
5531 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5532 if (host_ver
->name_len
) {
5533 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5534 target_ver
->name_len
, 0);
5535 if (!host_ver
->name
) {
5540 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5541 if (host_ver
->date_len
) {
5542 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5543 target_ver
->date_len
, 0);
5544 if (!host_ver
->date
) {
5549 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5550 if (host_ver
->desc_len
) {
5551 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5552 target_ver
->desc_len
, 0);
5553 if (!host_ver
->desc
) {
5560 unlock_drm_version(host_ver
, target_ver
, false);
5564 static inline void host_to_target_drmversion(
5565 struct target_drm_version
*target_ver
,
5566 struct drm_version
*host_ver
)
5568 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5569 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5570 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5571 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5572 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5573 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5574 unlock_drm_version(host_ver
, target_ver
, true);
5577 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5578 int fd
, int cmd
, abi_long arg
)
5580 struct drm_version
*ver
;
5581 struct target_drm_version
*target_ver
;
5584 switch (ie
->host_cmd
) {
5585 case DRM_IOCTL_VERSION
:
5586 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5587 return -TARGET_EFAULT
;
5589 ver
= (struct drm_version
*)buf_temp
;
5590 ret
= target_to_host_drmversion(ver
, target_ver
);
5591 if (!is_error(ret
)) {
5592 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5593 if (is_error(ret
)) {
5594 unlock_drm_version(ver
, target_ver
, false);
5596 host_to_target_drmversion(target_ver
, ver
);
5599 unlock_user_struct(target_ver
, arg
, 0);
5602 return -TARGET_ENOSYS
;
5605 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5606 struct drm_i915_getparam
*gparam
,
5607 int fd
, abi_long arg
)
5611 struct target_drm_i915_getparam
*target_gparam
;
5613 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5614 return -TARGET_EFAULT
;
5617 __get_user(gparam
->param
, &target_gparam
->param
);
5618 gparam
->value
= &value
;
5619 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5620 put_user_s32(value
, target_gparam
->value
);
5622 unlock_user_struct(target_gparam
, arg
, 0);
5626 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5627 int fd
, int cmd
, abi_long arg
)
5629 switch (ie
->host_cmd
) {
5630 case DRM_IOCTL_I915_GETPARAM
:
5631 return do_ioctl_drm_i915_getparam(ie
,
5632 (struct drm_i915_getparam
*)buf_temp
,
5635 return -TARGET_ENOSYS
;
5641 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5642 int fd
, int cmd
, abi_long arg
)
5644 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5645 struct tun_filter
*target_filter
;
5648 assert(ie
->access
== IOC_W
);
5650 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5651 if (!target_filter
) {
5652 return -TARGET_EFAULT
;
5654 filter
->flags
= tswap16(target_filter
->flags
);
5655 filter
->count
= tswap16(target_filter
->count
);
5656 unlock_user(target_filter
, arg
, 0);
5658 if (filter
->count
) {
5659 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5661 return -TARGET_EFAULT
;
5664 target_addr
= lock_user(VERIFY_READ
,
5665 arg
+ offsetof(struct tun_filter
, addr
),
5666 filter
->count
* ETH_ALEN
, 1);
5668 return -TARGET_EFAULT
;
5670 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5671 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5674 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5677 IOCTLEntry ioctl_entries
[] = {
5678 #define IOCTL(cmd, access, ...) \
5679 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5680 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5681 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5682 #define IOCTL_IGNORE(cmd) \
5683 { TARGET_ ## cmd, 0, #cmd },
5688 /* ??? Implement proper locking for ioctls. */
5689 /* do_ioctl() Must return target values and target errnos. */
5690 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5692 const IOCTLEntry
*ie
;
5693 const argtype
*arg_type
;
5695 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5701 if (ie
->target_cmd
== 0) {
5703 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5704 return -TARGET_ENOSYS
;
5706 if (ie
->target_cmd
== cmd
)
5710 arg_type
= ie
->arg_type
;
5712 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5713 } else if (!ie
->host_cmd
) {
5714 /* Some architectures define BSD ioctls in their headers
5715 that are not implemented in Linux. */
5716 return -TARGET_ENOSYS
;
5719 switch(arg_type
[0]) {
5722 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5728 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5732 target_size
= thunk_type_size(arg_type
, 0);
5733 switch(ie
->access
) {
5735 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5736 if (!is_error(ret
)) {
5737 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5739 return -TARGET_EFAULT
;
5740 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5741 unlock_user(argptr
, arg
, target_size
);
5745 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5747 return -TARGET_EFAULT
;
5748 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5749 unlock_user(argptr
, arg
, 0);
5750 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5754 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5756 return -TARGET_EFAULT
;
5757 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5758 unlock_user(argptr
, arg
, 0);
5759 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5760 if (!is_error(ret
)) {
5761 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5763 return -TARGET_EFAULT
;
5764 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5765 unlock_user(argptr
, arg
, target_size
);
5771 qemu_log_mask(LOG_UNIMP
,
5772 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5773 (long)cmd
, arg_type
[0]);
5774 ret
= -TARGET_ENOSYS
;
5780 static const bitmask_transtbl iflag_tbl
[] = {
5781 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5782 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5783 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5784 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5785 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5786 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5787 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5788 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5789 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5790 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5791 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5792 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5793 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5794 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5795 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5799 static const bitmask_transtbl oflag_tbl
[] = {
5800 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5801 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5802 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5803 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5804 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5805 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5806 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5807 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5808 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5809 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5810 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5811 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5812 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5813 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5814 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5815 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5816 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5817 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5818 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5819 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5820 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5821 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5822 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5823 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5827 static const bitmask_transtbl cflag_tbl
[] = {
5828 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5829 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5830 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5831 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5832 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5833 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5834 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5835 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5836 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5837 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5838 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5839 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5840 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5841 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5842 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5843 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5844 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5845 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5846 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5847 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5848 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5849 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5850 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5851 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5852 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5853 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5854 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5855 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5856 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5857 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5858 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5862 static const bitmask_transtbl lflag_tbl
[] = {
5863 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5864 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5865 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5866 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5867 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5868 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5869 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5870 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5871 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5872 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5873 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5874 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5875 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5876 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5877 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5878 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5882 static void target_to_host_termios (void *dst
, const void *src
)
5884 struct host_termios
*host
= dst
;
5885 const struct target_termios
*target
= src
;
5888 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5890 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5892 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5894 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5895 host
->c_line
= target
->c_line
;
5897 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5898 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5899 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5900 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5901 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5902 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5903 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5904 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5905 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5906 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5907 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5908 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5909 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5910 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5911 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5912 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5913 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5914 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5917 static void host_to_target_termios (void *dst
, const void *src
)
5919 struct target_termios
*target
= dst
;
5920 const struct host_termios
*host
= src
;
5923 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5925 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5927 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5929 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5930 target
->c_line
= host
->c_line
;
5932 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5933 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5934 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5935 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5936 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5937 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5938 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5939 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5940 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5941 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5942 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5943 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5944 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5945 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5946 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5947 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5948 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5949 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5952 static const StructEntry struct_termios_def
= {
5953 .convert
= { host_to_target_termios
, target_to_host_termios
},
5954 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5955 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5956 .print
= print_termios
,
5959 static const bitmask_transtbl mmap_flags_tbl
[] = {
5960 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5961 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5962 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5963 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5964 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5965 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5966 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5967 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5968 MAP_DENYWRITE
, MAP_DENYWRITE
},
5969 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5970 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5971 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5972 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5973 MAP_NORESERVE
, MAP_NORESERVE
},
5974 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5975 /* MAP_STACK had been ignored by the kernel for quite some time.
5976 Recognize it for the target insofar as we do not want to pass
5977 it through to the host. */
5978 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5983 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5984 * TARGET_I386 is defined if TARGET_X86_64 is defined
5986 #if defined(TARGET_I386)
5988 /* NOTE: there is really one LDT for all the threads */
5989 static uint8_t *ldt_table
;
5991 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5998 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5999 if (size
> bytecount
)
6001 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6003 return -TARGET_EFAULT
;
6004 /* ??? Should this by byteswapped? */
6005 memcpy(p
, ldt_table
, size
);
6006 unlock_user(p
, ptr
, size
);
6010 /* XXX: add locking support */
6011 static abi_long
write_ldt(CPUX86State
*env
,
6012 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6014 struct target_modify_ldt_ldt_s ldt_info
;
6015 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6016 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6017 int seg_not_present
, useable
, lm
;
6018 uint32_t *lp
, entry_1
, entry_2
;
6020 if (bytecount
!= sizeof(ldt_info
))
6021 return -TARGET_EINVAL
;
6022 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6023 return -TARGET_EFAULT
;
6024 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6025 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6026 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6027 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6028 unlock_user_struct(target_ldt_info
, ptr
, 0);
6030 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6031 return -TARGET_EINVAL
;
6032 seg_32bit
= ldt_info
.flags
& 1;
6033 contents
= (ldt_info
.flags
>> 1) & 3;
6034 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6035 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6036 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6037 useable
= (ldt_info
.flags
>> 6) & 1;
6041 lm
= (ldt_info
.flags
>> 7) & 1;
6043 if (contents
== 3) {
6045 return -TARGET_EINVAL
;
6046 if (seg_not_present
== 0)
6047 return -TARGET_EINVAL
;
6049 /* allocate the LDT */
6051 env
->ldt
.base
= target_mmap(0,
6052 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6053 PROT_READ
|PROT_WRITE
,
6054 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6055 if (env
->ldt
.base
== -1)
6056 return -TARGET_ENOMEM
;
6057 memset(g2h_untagged(env
->ldt
.base
), 0,
6058 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6059 env
->ldt
.limit
= 0xffff;
6060 ldt_table
= g2h_untagged(env
->ldt
.base
);
6063 /* NOTE: same code as Linux kernel */
6064 /* Allow LDTs to be cleared by the user. */
6065 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6068 read_exec_only
== 1 &&
6070 limit_in_pages
== 0 &&
6071 seg_not_present
== 1 &&
6079 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6080 (ldt_info
.limit
& 0x0ffff);
6081 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6082 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6083 (ldt_info
.limit
& 0xf0000) |
6084 ((read_exec_only
^ 1) << 9) |
6086 ((seg_not_present
^ 1) << 15) |
6088 (limit_in_pages
<< 23) |
6092 entry_2
|= (useable
<< 20);
6094 /* Install the new entry ... */
6096 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6097 lp
[0] = tswap32(entry_1
);
6098 lp
[1] = tswap32(entry_2
);
6102 /* specific and weird i386 syscalls */
6103 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6104 unsigned long bytecount
)
6110 ret
= read_ldt(ptr
, bytecount
);
6113 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6116 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6119 ret
= -TARGET_ENOSYS
;
6125 #if defined(TARGET_ABI32)
6126 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6128 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6129 struct target_modify_ldt_ldt_s ldt_info
;
6130 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6131 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6132 int seg_not_present
, useable
, lm
;
6133 uint32_t *lp
, entry_1
, entry_2
;
6136 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6137 if (!target_ldt_info
)
6138 return -TARGET_EFAULT
;
6139 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6140 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6141 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6142 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6143 if (ldt_info
.entry_number
== -1) {
6144 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6145 if (gdt_table
[i
] == 0) {
6146 ldt_info
.entry_number
= i
;
6147 target_ldt_info
->entry_number
= tswap32(i
);
6152 unlock_user_struct(target_ldt_info
, ptr
, 1);
6154 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6155 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6156 return -TARGET_EINVAL
;
6157 seg_32bit
= ldt_info
.flags
& 1;
6158 contents
= (ldt_info
.flags
>> 1) & 3;
6159 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6160 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6161 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6162 useable
= (ldt_info
.flags
>> 6) & 1;
6166 lm
= (ldt_info
.flags
>> 7) & 1;
6169 if (contents
== 3) {
6170 if (seg_not_present
== 0)
6171 return -TARGET_EINVAL
;
6174 /* NOTE: same code as Linux kernel */
6175 /* Allow LDTs to be cleared by the user. */
6176 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6177 if ((contents
== 0 &&
6178 read_exec_only
== 1 &&
6180 limit_in_pages
== 0 &&
6181 seg_not_present
== 1 &&
6189 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6190 (ldt_info
.limit
& 0x0ffff);
6191 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6192 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6193 (ldt_info
.limit
& 0xf0000) |
6194 ((read_exec_only
^ 1) << 9) |
6196 ((seg_not_present
^ 1) << 15) |
6198 (limit_in_pages
<< 23) |
6203 /* Install the new entry ... */
6205 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6206 lp
[0] = tswap32(entry_1
);
6207 lp
[1] = tswap32(entry_2
);
6211 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6213 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6214 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6215 uint32_t base_addr
, limit
, flags
;
6216 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6217 int seg_not_present
, useable
, lm
;
6218 uint32_t *lp
, entry_1
, entry_2
;
6220 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6221 if (!target_ldt_info
)
6222 return -TARGET_EFAULT
;
6223 idx
= tswap32(target_ldt_info
->entry_number
);
6224 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6225 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6226 unlock_user_struct(target_ldt_info
, ptr
, 1);
6227 return -TARGET_EINVAL
;
6229 lp
= (uint32_t *)(gdt_table
+ idx
);
6230 entry_1
= tswap32(lp
[0]);
6231 entry_2
= tswap32(lp
[1]);
6233 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6234 contents
= (entry_2
>> 10) & 3;
6235 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6236 seg_32bit
= (entry_2
>> 22) & 1;
6237 limit_in_pages
= (entry_2
>> 23) & 1;
6238 useable
= (entry_2
>> 20) & 1;
6242 lm
= (entry_2
>> 21) & 1;
6244 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6245 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6246 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6247 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6248 base_addr
= (entry_1
>> 16) |
6249 (entry_2
& 0xff000000) |
6250 ((entry_2
& 0xff) << 16);
6251 target_ldt_info
->base_addr
= tswapal(base_addr
);
6252 target_ldt_info
->limit
= tswap32(limit
);
6253 target_ldt_info
->flags
= tswap32(flags
);
6254 unlock_user_struct(target_ldt_info
, ptr
, 1);
6258 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6260 return -TARGET_ENOSYS
;
6263 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6270 case TARGET_ARCH_SET_GS
:
6271 case TARGET_ARCH_SET_FS
:
6272 if (code
== TARGET_ARCH_SET_GS
)
6276 cpu_x86_load_seg(env
, idx
, 0);
6277 env
->segs
[idx
].base
= addr
;
6279 case TARGET_ARCH_GET_GS
:
6280 case TARGET_ARCH_GET_FS
:
6281 if (code
== TARGET_ARCH_GET_GS
)
6285 val
= env
->segs
[idx
].base
;
6286 if (put_user(val
, addr
, abi_ulong
))
6287 ret
= -TARGET_EFAULT
;
6290 ret
= -TARGET_EINVAL
;
6295 #endif /* defined(TARGET_ABI32 */
6297 #endif /* defined(TARGET_I386) */
6299 #define NEW_STACK_SIZE 0x40000
6302 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6305 pthread_mutex_t mutex
;
6306 pthread_cond_t cond
;
6309 abi_ulong child_tidptr
;
6310 abi_ulong parent_tidptr
;
6314 static void *clone_func(void *arg
)
6316 new_thread_info
*info
= arg
;
6321 rcu_register_thread();
6322 tcg_register_thread();
6326 ts
= (TaskState
*)cpu
->opaque
;
6327 info
->tid
= sys_gettid();
6329 if (info
->child_tidptr
)
6330 put_user_u32(info
->tid
, info
->child_tidptr
);
6331 if (info
->parent_tidptr
)
6332 put_user_u32(info
->tid
, info
->parent_tidptr
);
6333 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6334 /* Enable signals. */
6335 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6336 /* Signal to the parent that we're ready. */
6337 pthread_mutex_lock(&info
->mutex
);
6338 pthread_cond_broadcast(&info
->cond
);
6339 pthread_mutex_unlock(&info
->mutex
);
6340 /* Wait until the parent has finished initializing the tls state. */
6341 pthread_mutex_lock(&clone_lock
);
6342 pthread_mutex_unlock(&clone_lock
);
6348 /* do_fork() Must return host values and target errnos (unlike most
6349 do_*() functions). */
6350 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6351 abi_ulong parent_tidptr
, target_ulong newtls
,
6352 abi_ulong child_tidptr
)
6354 CPUState
*cpu
= env_cpu(env
);
6358 CPUArchState
*new_env
;
6361 flags
&= ~CLONE_IGNORED_FLAGS
;
6363 /* Emulate vfork() with fork() */
6364 if (flags
& CLONE_VFORK
)
6365 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6367 if (flags
& CLONE_VM
) {
6368 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6369 new_thread_info info
;
6370 pthread_attr_t attr
;
6372 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6373 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6374 return -TARGET_EINVAL
;
6377 ts
= g_new0(TaskState
, 1);
6378 init_task_state(ts
);
6380 /* Grab a mutex so that thread setup appears atomic. */
6381 pthread_mutex_lock(&clone_lock
);
6384 * If this is our first additional thread, we need to ensure we
6385 * generate code for parallel execution and flush old translations.
6386 * Do this now so that the copy gets CF_PARALLEL too.
6388 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6389 cpu
->tcg_cflags
|= CF_PARALLEL
;
6393 /* we create a new CPU instance. */
6394 new_env
= cpu_copy(env
);
6395 /* Init regs that differ from the parent. */
6396 cpu_clone_regs_child(new_env
, newsp
, flags
);
6397 cpu_clone_regs_parent(env
, flags
);
6398 new_cpu
= env_cpu(new_env
);
6399 new_cpu
->opaque
= ts
;
6400 ts
->bprm
= parent_ts
->bprm
;
6401 ts
->info
= parent_ts
->info
;
6402 ts
->signal_mask
= parent_ts
->signal_mask
;
6404 if (flags
& CLONE_CHILD_CLEARTID
) {
6405 ts
->child_tidptr
= child_tidptr
;
6408 if (flags
& CLONE_SETTLS
) {
6409 cpu_set_tls (new_env
, newtls
);
6412 memset(&info
, 0, sizeof(info
));
6413 pthread_mutex_init(&info
.mutex
, NULL
);
6414 pthread_mutex_lock(&info
.mutex
);
6415 pthread_cond_init(&info
.cond
, NULL
);
6417 if (flags
& CLONE_CHILD_SETTID
) {
6418 info
.child_tidptr
= child_tidptr
;
6420 if (flags
& CLONE_PARENT_SETTID
) {
6421 info
.parent_tidptr
= parent_tidptr
;
6424 ret
= pthread_attr_init(&attr
);
6425 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6426 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6427 /* It is not safe to deliver signals until the child has finished
6428 initializing, so temporarily block all signals. */
6429 sigfillset(&sigmask
);
6430 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6431 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6433 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6434 /* TODO: Free new CPU state if thread creation failed. */
6436 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6437 pthread_attr_destroy(&attr
);
6439 /* Wait for the child to initialize. */
6440 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6445 pthread_mutex_unlock(&info
.mutex
);
6446 pthread_cond_destroy(&info
.cond
);
6447 pthread_mutex_destroy(&info
.mutex
);
6448 pthread_mutex_unlock(&clone_lock
);
6450 /* if no CLONE_VM, we consider it is a fork */
6451 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6452 return -TARGET_EINVAL
;
6455 /* We can't support custom termination signals */
6456 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6457 return -TARGET_EINVAL
;
6460 if (block_signals()) {
6461 return -TARGET_ERESTARTSYS
;
6467 /* Child Process. */
6468 cpu_clone_regs_child(env
, newsp
, flags
);
6470 /* There is a race condition here. The parent process could
6471 theoretically read the TID in the child process before the child
6472 tid is set. This would require using either ptrace
6473 (not implemented) or having *_tidptr to point at a shared memory
6474 mapping. We can't repeat the spinlock hack used above because
6475 the child process gets its own copy of the lock. */
6476 if (flags
& CLONE_CHILD_SETTID
)
6477 put_user_u32(sys_gettid(), child_tidptr
);
6478 if (flags
& CLONE_PARENT_SETTID
)
6479 put_user_u32(sys_gettid(), parent_tidptr
);
6480 ts
= (TaskState
*)cpu
->opaque
;
6481 if (flags
& CLONE_SETTLS
)
6482 cpu_set_tls (env
, newtls
);
6483 if (flags
& CLONE_CHILD_CLEARTID
)
6484 ts
->child_tidptr
= child_tidptr
;
6486 cpu_clone_regs_parent(env
, flags
);
6493 /* warning : doesn't handle linux specific flags... */
6494 static int target_to_host_fcntl_cmd(int cmd
)
6499 case TARGET_F_DUPFD
:
6500 case TARGET_F_GETFD
:
6501 case TARGET_F_SETFD
:
6502 case TARGET_F_GETFL
:
6503 case TARGET_F_SETFL
:
6504 case TARGET_F_OFD_GETLK
:
6505 case TARGET_F_OFD_SETLK
:
6506 case TARGET_F_OFD_SETLKW
:
6509 case TARGET_F_GETLK
:
6512 case TARGET_F_SETLK
:
6515 case TARGET_F_SETLKW
:
6518 case TARGET_F_GETOWN
:
6521 case TARGET_F_SETOWN
:
6524 case TARGET_F_GETSIG
:
6527 case TARGET_F_SETSIG
:
6530 #if TARGET_ABI_BITS == 32
6531 case TARGET_F_GETLK64
:
6534 case TARGET_F_SETLK64
:
6537 case TARGET_F_SETLKW64
:
6541 case TARGET_F_SETLEASE
:
6544 case TARGET_F_GETLEASE
:
6547 #ifdef F_DUPFD_CLOEXEC
6548 case TARGET_F_DUPFD_CLOEXEC
:
6549 ret
= F_DUPFD_CLOEXEC
;
6552 case TARGET_F_NOTIFY
:
6556 case TARGET_F_GETOWN_EX
:
6561 case TARGET_F_SETOWN_EX
:
6566 case TARGET_F_SETPIPE_SZ
:
6569 case TARGET_F_GETPIPE_SZ
:
6574 case TARGET_F_ADD_SEALS
:
6577 case TARGET_F_GET_SEALS
:
6582 ret
= -TARGET_EINVAL
;
6586 #if defined(__powerpc64__)
6587 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6588 * is not supported by kernel. The glibc fcntl call actually adjusts
6589 * them to 5, 6 and 7 before making the syscall(). Since we make the
6590 * syscall directly, adjust to what is supported by the kernel.
6592 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6593 ret
-= F_GETLK64
- 5;
6600 #define FLOCK_TRANSTBL \
6602 TRANSTBL_CONVERT(F_RDLCK); \
6603 TRANSTBL_CONVERT(F_WRLCK); \
6604 TRANSTBL_CONVERT(F_UNLCK); \
6607 static int target_to_host_flock(int type
)
6609 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6611 #undef TRANSTBL_CONVERT
6612 return -TARGET_EINVAL
;
6615 static int host_to_target_flock(int type
)
6617 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6619 #undef TRANSTBL_CONVERT
6620 /* if we don't know how to convert the value coming
6621 * from the host we copy to the target field as-is
6626 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6627 abi_ulong target_flock_addr
)
6629 struct target_flock
*target_fl
;
6632 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6633 return -TARGET_EFAULT
;
6636 __get_user(l_type
, &target_fl
->l_type
);
6637 l_type
= target_to_host_flock(l_type
);
6641 fl
->l_type
= l_type
;
6642 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6643 __get_user(fl
->l_start
, &target_fl
->l_start
);
6644 __get_user(fl
->l_len
, &target_fl
->l_len
);
6645 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6646 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6650 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6651 const struct flock64
*fl
)
6653 struct target_flock
*target_fl
;
6656 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6657 return -TARGET_EFAULT
;
6660 l_type
= host_to_target_flock(fl
->l_type
);
6661 __put_user(l_type
, &target_fl
->l_type
);
6662 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6663 __put_user(fl
->l_start
, &target_fl
->l_start
);
6664 __put_user(fl
->l_len
, &target_fl
->l_len
);
6665 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6666 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6670 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6671 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6673 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6674 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6675 abi_ulong target_flock_addr
)
6677 struct target_oabi_flock64
*target_fl
;
6680 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6681 return -TARGET_EFAULT
;
6684 __get_user(l_type
, &target_fl
->l_type
);
6685 l_type
= target_to_host_flock(l_type
);
6689 fl
->l_type
= l_type
;
6690 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6691 __get_user(fl
->l_start
, &target_fl
->l_start
);
6692 __get_user(fl
->l_len
, &target_fl
->l_len
);
6693 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6694 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6698 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6699 const struct flock64
*fl
)
6701 struct target_oabi_flock64
*target_fl
;
6704 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6705 return -TARGET_EFAULT
;
6708 l_type
= host_to_target_flock(fl
->l_type
);
6709 __put_user(l_type
, &target_fl
->l_type
);
6710 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6711 __put_user(fl
->l_start
, &target_fl
->l_start
);
6712 __put_user(fl
->l_len
, &target_fl
->l_len
);
6713 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6714 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6719 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6720 abi_ulong target_flock_addr
)
6722 struct target_flock64
*target_fl
;
6725 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6726 return -TARGET_EFAULT
;
6729 __get_user(l_type
, &target_fl
->l_type
);
6730 l_type
= target_to_host_flock(l_type
);
6734 fl
->l_type
= l_type
;
6735 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6736 __get_user(fl
->l_start
, &target_fl
->l_start
);
6737 __get_user(fl
->l_len
, &target_fl
->l_len
);
6738 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6739 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6743 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6744 const struct flock64
*fl
)
6746 struct target_flock64
*target_fl
;
6749 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6750 return -TARGET_EFAULT
;
6753 l_type
= host_to_target_flock(fl
->l_type
);
6754 __put_user(l_type
, &target_fl
->l_type
);
6755 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6756 __put_user(fl
->l_start
, &target_fl
->l_start
);
6757 __put_user(fl
->l_len
, &target_fl
->l_len
);
6758 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6759 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6763 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6765 struct flock64 fl64
;
6767 struct f_owner_ex fox
;
6768 struct target_f_owner_ex
*target_fox
;
6771 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6773 if (host_cmd
== -TARGET_EINVAL
)
6777 case TARGET_F_GETLK
:
6778 ret
= copy_from_user_flock(&fl64
, arg
);
6782 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6784 ret
= copy_to_user_flock(arg
, &fl64
);
6788 case TARGET_F_SETLK
:
6789 case TARGET_F_SETLKW
:
6790 ret
= copy_from_user_flock(&fl64
, arg
);
6794 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6797 case TARGET_F_GETLK64
:
6798 case TARGET_F_OFD_GETLK
:
6799 ret
= copy_from_user_flock64(&fl64
, arg
);
6803 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6805 ret
= copy_to_user_flock64(arg
, &fl64
);
6808 case TARGET_F_SETLK64
:
6809 case TARGET_F_SETLKW64
:
6810 case TARGET_F_OFD_SETLK
:
6811 case TARGET_F_OFD_SETLKW
:
6812 ret
= copy_from_user_flock64(&fl64
, arg
);
6816 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6819 case TARGET_F_GETFL
:
6820 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6822 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6826 case TARGET_F_SETFL
:
6827 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6828 target_to_host_bitmask(arg
,
6833 case TARGET_F_GETOWN_EX
:
6834 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6836 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6837 return -TARGET_EFAULT
;
6838 target_fox
->type
= tswap32(fox
.type
);
6839 target_fox
->pid
= tswap32(fox
.pid
);
6840 unlock_user_struct(target_fox
, arg
, 1);
6846 case TARGET_F_SETOWN_EX
:
6847 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6848 return -TARGET_EFAULT
;
6849 fox
.type
= tswap32(target_fox
->type
);
6850 fox
.pid
= tswap32(target_fox
->pid
);
6851 unlock_user_struct(target_fox
, arg
, 0);
6852 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6856 case TARGET_F_SETSIG
:
6857 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6860 case TARGET_F_GETSIG
:
6861 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6864 case TARGET_F_SETOWN
:
6865 case TARGET_F_GETOWN
:
6866 case TARGET_F_SETLEASE
:
6867 case TARGET_F_GETLEASE
:
6868 case TARGET_F_SETPIPE_SZ
:
6869 case TARGET_F_GETPIPE_SZ
:
6870 case TARGET_F_ADD_SEALS
:
6871 case TARGET_F_GET_SEALS
:
6872 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6876 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6884 static inline int high2lowuid(int uid
)
6892 static inline int high2lowgid(int gid
)
6900 static inline int low2highuid(int uid
)
6902 if ((int16_t)uid
== -1)
6908 static inline int low2highgid(int gid
)
6910 if ((int16_t)gid
== -1)
6915 static inline int tswapid(int id
)
6920 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6922 #else /* !USE_UID16 */
6923 static inline int high2lowuid(int uid
)
6927 static inline int high2lowgid(int gid
)
6931 static inline int low2highuid(int uid
)
6935 static inline int low2highgid(int gid
)
6939 static inline int tswapid(int id
)
6944 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6946 #endif /* USE_UID16 */
6948 /* We must do direct syscalls for setting UID/GID, because we want to
6949 * implement the Linux system call semantics of "change only for this thread",
6950 * not the libc/POSIX semantics of "change for all threads in process".
6951 * (See http://ewontfix.com/17/ for more details.)
6952 * We use the 32-bit version of the syscalls if present; if it is not
6953 * then either the host architecture supports 32-bit UIDs natively with
6954 * the standard syscall, or the 16-bit UID is the best we can do.
6956 #ifdef __NR_setuid32
6957 #define __NR_sys_setuid __NR_setuid32
6959 #define __NR_sys_setuid __NR_setuid
6961 #ifdef __NR_setgid32
6962 #define __NR_sys_setgid __NR_setgid32
6964 #define __NR_sys_setgid __NR_setgid
6966 #ifdef __NR_setresuid32
6967 #define __NR_sys_setresuid __NR_setresuid32
6969 #define __NR_sys_setresuid __NR_setresuid
6971 #ifdef __NR_setresgid32
6972 #define __NR_sys_setresgid __NR_setresgid32
6974 #define __NR_sys_setresgid __NR_setresgid
6977 _syscall1(int, sys_setuid
, uid_t
, uid
)
6978 _syscall1(int, sys_setgid
, gid_t
, gid
)
6979 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6980 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6982 void syscall_init(void)
6985 const argtype
*arg_type
;
6988 thunk_init(STRUCT_MAX
);
6990 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6991 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6992 #include "syscall_types.h"
6994 #undef STRUCT_SPECIAL
6996 /* we patch the ioctl size if necessary. We rely on the fact that
6997 no ioctl has all the bits at '1' in the size field */
6999 while (ie
->target_cmd
!= 0) {
7000 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7001 TARGET_IOC_SIZEMASK
) {
7002 arg_type
= ie
->arg_type
;
7003 if (arg_type
[0] != TYPE_PTR
) {
7004 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7009 size
= thunk_type_size(arg_type
, 0);
7010 ie
->target_cmd
= (ie
->target_cmd
&
7011 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7012 (size
<< TARGET_IOC_SIZESHIFT
);
7015 /* automatic consistency check if same arch */
7016 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7017 (defined(__x86_64__) && defined(TARGET_X86_64))
7018 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7019 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7020 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7027 #ifdef TARGET_NR_truncate64
7028 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7033 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7037 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7041 #ifdef TARGET_NR_ftruncate64
7042 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7047 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7051 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7055 #if defined(TARGET_NR_timer_settime) || \
7056 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7057 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7058 abi_ulong target_addr
)
7060 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7061 offsetof(struct target_itimerspec
,
7063 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7064 offsetof(struct target_itimerspec
,
7066 return -TARGET_EFAULT
;
7073 #if defined(TARGET_NR_timer_settime64) || \
7074 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7075 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7076 abi_ulong target_addr
)
7078 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7079 offsetof(struct target__kernel_itimerspec
,
7081 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7082 offsetof(struct target__kernel_itimerspec
,
7084 return -TARGET_EFAULT
;
7091 #if ((defined(TARGET_NR_timerfd_gettime) || \
7092 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7093 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7094 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7095 struct itimerspec
*host_its
)
7097 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7099 &host_its
->it_interval
) ||
7100 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7102 &host_its
->it_value
)) {
7103 return -TARGET_EFAULT
;
7109 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7110 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7111 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7112 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7113 struct itimerspec
*host_its
)
7115 if (host_to_target_timespec64(target_addr
+
7116 offsetof(struct target__kernel_itimerspec
,
7118 &host_its
->it_interval
) ||
7119 host_to_target_timespec64(target_addr
+
7120 offsetof(struct target__kernel_itimerspec
,
7122 &host_its
->it_value
)) {
7123 return -TARGET_EFAULT
;
7129 #if defined(TARGET_NR_adjtimex) || \
7130 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7131 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7132 abi_long target_addr
)
7134 struct target_timex
*target_tx
;
7136 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7137 return -TARGET_EFAULT
;
7140 __get_user(host_tx
->modes
, &target_tx
->modes
);
7141 __get_user(host_tx
->offset
, &target_tx
->offset
);
7142 __get_user(host_tx
->freq
, &target_tx
->freq
);
7143 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7144 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7145 __get_user(host_tx
->status
, &target_tx
->status
);
7146 __get_user(host_tx
->constant
, &target_tx
->constant
);
7147 __get_user(host_tx
->precision
, &target_tx
->precision
);
7148 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7149 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7150 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7151 __get_user(host_tx
->tick
, &target_tx
->tick
);
7152 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7153 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7154 __get_user(host_tx
->shift
, &target_tx
->shift
);
7155 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7156 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7157 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7158 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7159 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7160 __get_user(host_tx
->tai
, &target_tx
->tai
);
7162 unlock_user_struct(target_tx
, target_addr
, 0);
7166 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7167 struct timex
*host_tx
)
7169 struct target_timex
*target_tx
;
7171 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7172 return -TARGET_EFAULT
;
7175 __put_user(host_tx
->modes
, &target_tx
->modes
);
7176 __put_user(host_tx
->offset
, &target_tx
->offset
);
7177 __put_user(host_tx
->freq
, &target_tx
->freq
);
7178 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7179 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7180 __put_user(host_tx
->status
, &target_tx
->status
);
7181 __put_user(host_tx
->constant
, &target_tx
->constant
);
7182 __put_user(host_tx
->precision
, &target_tx
->precision
);
7183 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7184 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7185 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7186 __put_user(host_tx
->tick
, &target_tx
->tick
);
7187 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7188 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7189 __put_user(host_tx
->shift
, &target_tx
->shift
);
7190 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7191 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7192 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7193 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7194 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7195 __put_user(host_tx
->tai
, &target_tx
->tai
);
7197 unlock_user_struct(target_tx
, target_addr
, 1);
7203 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7204 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7205 abi_long target_addr
)
7207 struct target__kernel_timex
*target_tx
;
7209 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7210 offsetof(struct target__kernel_timex
,
7212 return -TARGET_EFAULT
;
7215 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7216 return -TARGET_EFAULT
;
7219 __get_user(host_tx
->modes
, &target_tx
->modes
);
7220 __get_user(host_tx
->offset
, &target_tx
->offset
);
7221 __get_user(host_tx
->freq
, &target_tx
->freq
);
7222 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7223 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7224 __get_user(host_tx
->status
, &target_tx
->status
);
7225 __get_user(host_tx
->constant
, &target_tx
->constant
);
7226 __get_user(host_tx
->precision
, &target_tx
->precision
);
7227 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7228 __get_user(host_tx
->tick
, &target_tx
->tick
);
7229 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7230 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7231 __get_user(host_tx
->shift
, &target_tx
->shift
);
7232 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7233 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7234 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7235 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7236 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7237 __get_user(host_tx
->tai
, &target_tx
->tai
);
7239 unlock_user_struct(target_tx
, target_addr
, 0);
7243 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7244 struct timex
*host_tx
)
7246 struct target__kernel_timex
*target_tx
;
7248 if (copy_to_user_timeval64(target_addr
+
7249 offsetof(struct target__kernel_timex
, time
),
7251 return -TARGET_EFAULT
;
7254 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7255 return -TARGET_EFAULT
;
7258 __put_user(host_tx
->modes
, &target_tx
->modes
);
7259 __put_user(host_tx
->offset
, &target_tx
->offset
);
7260 __put_user(host_tx
->freq
, &target_tx
->freq
);
7261 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7262 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7263 __put_user(host_tx
->status
, &target_tx
->status
);
7264 __put_user(host_tx
->constant
, &target_tx
->constant
);
7265 __put_user(host_tx
->precision
, &target_tx
->precision
);
7266 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7267 __put_user(host_tx
->tick
, &target_tx
->tick
);
7268 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7269 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7270 __put_user(host_tx
->shift
, &target_tx
->shift
);
7271 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7272 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7273 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7274 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7275 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7276 __put_user(host_tx
->tai
, &target_tx
->tai
);
7278 unlock_user_struct(target_tx
, target_addr
, 1);
7283 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7284 #define sigev_notify_thread_id _sigev_un._tid
7287 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7288 abi_ulong target_addr
)
7290 struct target_sigevent
*target_sevp
;
7292 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7293 return -TARGET_EFAULT
;
7296 /* This union is awkward on 64 bit systems because it has a 32 bit
7297 * integer and a pointer in it; we follow the conversion approach
7298 * used for handling sigval types in signal.c so the guest should get
7299 * the correct value back even if we did a 64 bit byteswap and it's
7300 * using the 32 bit integer.
7302 host_sevp
->sigev_value
.sival_ptr
=
7303 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7304 host_sevp
->sigev_signo
=
7305 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7306 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7307 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7309 unlock_user_struct(target_sevp
, target_addr
, 1);
7313 #if defined(TARGET_NR_mlockall)
7314 static inline int target_to_host_mlockall_arg(int arg
)
7318 if (arg
& TARGET_MCL_CURRENT
) {
7319 result
|= MCL_CURRENT
;
7321 if (arg
& TARGET_MCL_FUTURE
) {
7322 result
|= MCL_FUTURE
;
7325 if (arg
& TARGET_MCL_ONFAULT
) {
7326 result
|= MCL_ONFAULT
;
7334 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7335 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7336 defined(TARGET_NR_newfstatat))
7337 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7338 abi_ulong target_addr
,
7339 struct stat
*host_st
)
7341 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7342 if (((CPUARMState
*)cpu_env
)->eabi
) {
7343 struct target_eabi_stat64
*target_st
;
7345 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7346 return -TARGET_EFAULT
;
7347 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7348 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7349 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7350 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7351 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7353 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7354 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7355 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7356 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7357 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7358 __put_user(host_st
->st_size
, &target_st
->st_size
);
7359 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7360 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7361 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7362 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7363 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7364 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7365 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7366 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7367 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7369 unlock_user_struct(target_st
, target_addr
, 1);
7373 #if defined(TARGET_HAS_STRUCT_STAT64)
7374 struct target_stat64
*target_st
;
7376 struct target_stat
*target_st
;
7379 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7380 return -TARGET_EFAULT
;
7381 memset(target_st
, 0, sizeof(*target_st
));
7382 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7383 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7384 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7385 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7387 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7388 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7389 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7390 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7391 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7392 /* XXX: better use of kernel struct */
7393 __put_user(host_st
->st_size
, &target_st
->st_size
);
7394 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7395 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7396 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7397 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7398 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7399 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7400 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7401 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7402 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7404 unlock_user_struct(target_st
, target_addr
, 1);
7411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7412 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7413 abi_ulong target_addr
)
7415 struct target_statx
*target_stx
;
7417 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7418 return -TARGET_EFAULT
;
7420 memset(target_stx
, 0, sizeof(*target_stx
));
7422 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7423 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7424 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7425 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7426 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7427 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7428 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7429 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7430 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7431 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7432 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7433 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7434 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7435 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7436 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7437 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7438 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7439 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7440 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7441 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7442 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7443 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7444 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7446 unlock_user_struct(target_stx
, target_addr
, 1);
7452 static int do_sys_futex(int *uaddr
, int op
, int val
,
7453 const struct timespec
*timeout
, int *uaddr2
,
7456 #if HOST_LONG_BITS == 64
7457 #if defined(__NR_futex)
7458 /* always a 64-bit time_t, it doesn't define _time64 version */
7459 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7462 #else /* HOST_LONG_BITS == 64 */
7463 #if defined(__NR_futex_time64)
7464 if (sizeof(timeout
->tv_sec
) == 8) {
7465 /* _time64 function on 32bit arch */
7466 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7469 #if defined(__NR_futex)
7470 /* old function on 32bit arch */
7471 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7473 #endif /* HOST_LONG_BITS == 64 */
7474 g_assert_not_reached();
7477 static int do_safe_futex(int *uaddr
, int op
, int val
,
7478 const struct timespec
*timeout
, int *uaddr2
,
7481 #if HOST_LONG_BITS == 64
7482 #if defined(__NR_futex)
7483 /* always a 64-bit time_t, it doesn't define _time64 version */
7484 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7486 #else /* HOST_LONG_BITS == 64 */
7487 #if defined(__NR_futex_time64)
7488 if (sizeof(timeout
->tv_sec
) == 8) {
7489 /* _time64 function on 32bit arch */
7490 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7494 #if defined(__NR_futex)
7495 /* old function on 32bit arch */
7496 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7498 #endif /* HOST_LONG_BITS == 64 */
7499 return -TARGET_ENOSYS
;
7502 /* ??? Using host futex calls even when target atomic operations
7503 are not really atomic probably breaks things. However implementing
7504 futexes locally would make futexes shared between multiple processes
7505 tricky. However they're probably useless because guest atomic
7506 operations won't work either. */
7507 #if defined(TARGET_NR_futex)
7508 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7509 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7511 struct timespec ts
, *pts
;
7514 /* ??? We assume FUTEX_* constants are the same on both host
7516 #ifdef FUTEX_CMD_MASK
7517 base_op
= op
& FUTEX_CMD_MASK
;
7523 case FUTEX_WAIT_BITSET
:
7526 target_to_host_timespec(pts
, timeout
);
7530 return do_safe_futex(g2h(cpu
, uaddr
),
7531 op
, tswap32(val
), pts
, NULL
, val3
);
7533 return do_safe_futex(g2h(cpu
, uaddr
),
7534 op
, val
, NULL
, NULL
, 0);
7536 return do_safe_futex(g2h(cpu
, uaddr
),
7537 op
, val
, NULL
, NULL
, 0);
7539 case FUTEX_CMP_REQUEUE
:
7541 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7542 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7543 But the prototype takes a `struct timespec *'; insert casts
7544 to satisfy the compiler. We do not need to tswap TIMEOUT
7545 since it's not compared to guest memory. */
7546 pts
= (struct timespec
*)(uintptr_t) timeout
;
7547 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7548 (base_op
== FUTEX_CMP_REQUEUE
7549 ? tswap32(val3
) : val3
));
7551 return -TARGET_ENOSYS
;
7556 #if defined(TARGET_NR_futex_time64)
7557 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7558 int val
, target_ulong timeout
,
7559 target_ulong uaddr2
, int val3
)
7561 struct timespec ts
, *pts
;
7564 /* ??? We assume FUTEX_* constants are the same on both host
7566 #ifdef FUTEX_CMD_MASK
7567 base_op
= op
& FUTEX_CMD_MASK
;
7573 case FUTEX_WAIT_BITSET
:
7576 if (target_to_host_timespec64(pts
, timeout
)) {
7577 return -TARGET_EFAULT
;
7582 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7583 tswap32(val
), pts
, NULL
, val3
);
7585 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7587 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7589 case FUTEX_CMP_REQUEUE
:
7591 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7592 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7593 But the prototype takes a `struct timespec *'; insert casts
7594 to satisfy the compiler. We do not need to tswap TIMEOUT
7595 since it's not compared to guest memory. */
7596 pts
= (struct timespec
*)(uintptr_t) timeout
;
7597 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7598 (base_op
== FUTEX_CMP_REQUEUE
7599 ? tswap32(val3
) : val3
));
7601 return -TARGET_ENOSYS
;
7606 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7607 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7608 abi_long handle
, abi_long mount_id
,
7611 struct file_handle
*target_fh
;
7612 struct file_handle
*fh
;
7616 unsigned int size
, total_size
;
7618 if (get_user_s32(size
, handle
)) {
7619 return -TARGET_EFAULT
;
7622 name
= lock_user_string(pathname
);
7624 return -TARGET_EFAULT
;
7627 total_size
= sizeof(struct file_handle
) + size
;
7628 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7630 unlock_user(name
, pathname
, 0);
7631 return -TARGET_EFAULT
;
7634 fh
= g_malloc0(total_size
);
7635 fh
->handle_bytes
= size
;
7637 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7638 unlock_user(name
, pathname
, 0);
7640 /* man name_to_handle_at(2):
7641 * Other than the use of the handle_bytes field, the caller should treat
7642 * the file_handle structure as an opaque data type
7645 memcpy(target_fh
, fh
, total_size
);
7646 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7647 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7649 unlock_user(target_fh
, handle
, total_size
);
7651 if (put_user_s32(mid
, mount_id
)) {
7652 return -TARGET_EFAULT
;
7660 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7661 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7664 struct file_handle
*target_fh
;
7665 struct file_handle
*fh
;
7666 unsigned int size
, total_size
;
7669 if (get_user_s32(size
, handle
)) {
7670 return -TARGET_EFAULT
;
7673 total_size
= sizeof(struct file_handle
) + size
;
7674 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7676 return -TARGET_EFAULT
;
7679 fh
= g_memdup(target_fh
, total_size
);
7680 fh
->handle_bytes
= size
;
7681 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7683 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7684 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7688 unlock_user(target_fh
, handle
, total_size
);
7694 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7696 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7699 target_sigset_t
*target_mask
;
7703 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7704 return -TARGET_EINVAL
;
7706 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7707 return -TARGET_EFAULT
;
7710 target_to_host_sigset(&host_mask
, target_mask
);
7712 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7714 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7716 fd_trans_register(ret
, &target_signalfd_trans
);
7719 unlock_user_struct(target_mask
, mask
, 0);
7725 /* Map host to target signal numbers for the wait family of syscalls.
7726 Assume all other status bits are the same. */
7727 int host_to_target_waitstatus(int status
)
7729 if (WIFSIGNALED(status
)) {
7730 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7732 if (WIFSTOPPED(status
)) {
7733 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7739 static int open_self_cmdline(void *cpu_env
, int fd
)
7741 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7742 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7745 for (i
= 0; i
< bprm
->argc
; i
++) {
7746 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7748 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7756 static int open_self_maps(void *cpu_env
, int fd
)
7758 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7759 TaskState
*ts
= cpu
->opaque
;
7760 GSList
*map_info
= read_self_maps();
7764 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7765 MapInfo
*e
= (MapInfo
*) s
->data
;
7767 if (h2g_valid(e
->start
)) {
7768 unsigned long min
= e
->start
;
7769 unsigned long max
= e
->end
;
7770 int flags
= page_get_flags(h2g(min
));
7773 max
= h2g_valid(max
- 1) ?
7774 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
7776 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7780 if (h2g(min
) == ts
->info
->stack_limit
) {
7786 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7787 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7788 h2g(min
), h2g(max
- 1) + 1,
7789 (flags
& PAGE_READ
) ? 'r' : '-',
7790 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
7791 (flags
& PAGE_EXEC
) ? 'x' : '-',
7792 e
->is_priv
? 'p' : '-',
7793 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7795 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7802 free_self_maps(map_info
);
7804 #ifdef TARGET_VSYSCALL_PAGE
7806 * We only support execution from the vsyscall page.
7807 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7809 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7810 " --xp 00000000 00:00 0",
7811 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7812 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7818 static int open_self_stat(void *cpu_env
, int fd
)
7820 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7821 TaskState
*ts
= cpu
->opaque
;
7822 g_autoptr(GString
) buf
= g_string_new(NULL
);
7825 for (i
= 0; i
< 44; i
++) {
7828 g_string_printf(buf
, FMT_pid
" ", getpid());
7829 } else if (i
== 1) {
7831 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7832 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7833 g_string_printf(buf
, "(%.15s) ", bin
);
7834 } else if (i
== 3) {
7836 g_string_printf(buf
, FMT_pid
" ", getppid());
7837 } else if (i
== 27) {
7839 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7841 /* for the rest, there is MasterCard */
7842 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7845 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7853 static int open_self_auxv(void *cpu_env
, int fd
)
7855 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7856 TaskState
*ts
= cpu
->opaque
;
7857 abi_ulong auxv
= ts
->info
->saved_auxv
;
7858 abi_ulong len
= ts
->info
->auxv_len
;
7862 * Auxiliary vector is stored in target process stack.
7863 * read in whole auxv vector and copy it to file
7865 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7869 r
= write(fd
, ptr
, len
);
7876 lseek(fd
, 0, SEEK_SET
);
7877 unlock_user(ptr
, auxv
, len
);
7883 static int is_proc_myself(const char *filename
, const char *entry
)
7885 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7886 filename
+= strlen("/proc/");
7887 if (!strncmp(filename
, "self/", strlen("self/"))) {
7888 filename
+= strlen("self/");
7889 } else if (*filename
>= '1' && *filename
<= '9') {
7891 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7892 if (!strncmp(filename
, myself
, strlen(myself
))) {
7893 filename
+= strlen(myself
);
7900 if (!strcmp(filename
, entry
)) {
7907 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7908 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7909 static int is_proc(const char *filename
, const char *entry
)
7911 return strcmp(filename
, entry
) == 0;
7915 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7916 static int open_net_route(void *cpu_env
, int fd
)
7923 fp
= fopen("/proc/net/route", "r");
7930 read
= getline(&line
, &len
, fp
);
7931 dprintf(fd
, "%s", line
);
7935 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7937 uint32_t dest
, gw
, mask
;
7938 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7941 fields
= sscanf(line
,
7942 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7943 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7944 &mask
, &mtu
, &window
, &irtt
);
7948 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7949 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7950 metric
, tswap32(mask
), mtu
, window
, irtt
);
7960 #if defined(TARGET_SPARC)
7961 static int open_cpuinfo(void *cpu_env
, int fd
)
7963 dprintf(fd
, "type\t\t: sun4u\n");
7968 #if defined(TARGET_HPPA)
7969 static int open_cpuinfo(void *cpu_env
, int fd
)
7971 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7972 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7973 dprintf(fd
, "capabilities\t: os32\n");
7974 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7975 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7980 #if defined(TARGET_M68K)
7981 static int open_hardware(void *cpu_env
, int fd
)
7983 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7988 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7991 const char *filename
;
7992 int (*fill
)(void *cpu_env
, int fd
);
7993 int (*cmp
)(const char *s1
, const char *s2
);
7995 const struct fake_open
*fake_open
;
7996 static const struct fake_open fakes
[] = {
7997 { "maps", open_self_maps
, is_proc_myself
},
7998 { "stat", open_self_stat
, is_proc_myself
},
7999 { "auxv", open_self_auxv
, is_proc_myself
},
8000 { "cmdline", open_self_cmdline
, is_proc_myself
},
8001 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8002 { "/proc/net/route", open_net_route
, is_proc
},
8004 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8005 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8007 #if defined(TARGET_M68K)
8008 { "/proc/hardware", open_hardware
, is_proc
},
8010 { NULL
, NULL
, NULL
}
8013 if (is_proc_myself(pathname
, "exe")) {
8014 int execfd
= qemu_getauxval(AT_EXECFD
);
8015 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8018 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8019 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8024 if (fake_open
->filename
) {
8026 char filename
[PATH_MAX
];
8029 /* create temporary file to map stat to */
8030 tmpdir
= getenv("TMPDIR");
8033 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8034 fd
= mkstemp(filename
);
8040 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8046 lseek(fd
, 0, SEEK_SET
);
8051 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8054 #define TIMER_MAGIC 0x0caf0000
8055 #define TIMER_MAGIC_MASK 0xffff0000
8057 /* Convert QEMU provided timer ID back to internal 16bit index format */
8058 static target_timer_t
get_timer_id(abi_long arg
)
8060 target_timer_t timerid
= arg
;
8062 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8063 return -TARGET_EINVAL
;
8068 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8069 return -TARGET_EINVAL
;
8075 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8077 abi_ulong target_addr
,
8080 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8081 unsigned host_bits
= sizeof(*host_mask
) * 8;
8082 abi_ulong
*target_mask
;
8085 assert(host_size
>= target_size
);
8087 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8089 return -TARGET_EFAULT
;
8091 memset(host_mask
, 0, host_size
);
8093 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8094 unsigned bit
= i
* target_bits
;
8097 __get_user(val
, &target_mask
[i
]);
8098 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8099 if (val
& (1UL << j
)) {
8100 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8105 unlock_user(target_mask
, target_addr
, 0);
8109 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8111 abi_ulong target_addr
,
8114 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8115 unsigned host_bits
= sizeof(*host_mask
) * 8;
8116 abi_ulong
*target_mask
;
8119 assert(host_size
>= target_size
);
8121 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8123 return -TARGET_EFAULT
;
8126 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8127 unsigned bit
= i
* target_bits
;
8130 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8131 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8135 __put_user(val
, &target_mask
[i
]);
8138 unlock_user(target_mask
, target_addr
, target_size
);
8142 #ifdef TARGET_NR_getdents
8143 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8145 g_autofree
void *hdirp
= NULL
;
8147 int hlen
, hoff
, toff
;
8148 int hreclen
, treclen
;
8149 off64_t prev_diroff
= 0;
8151 hdirp
= g_try_malloc(count
);
8153 return -TARGET_ENOMEM
;
8156 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8157 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8159 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8162 hlen
= get_errno(hlen
);
8163 if (is_error(hlen
)) {
8167 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8169 return -TARGET_EFAULT
;
8172 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8173 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8174 struct linux_dirent
*hde
= hdirp
+ hoff
;
8176 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8178 struct target_dirent
*tde
= tdirp
+ toff
;
8182 namelen
= strlen(hde
->d_name
);
8183 hreclen
= hde
->d_reclen
;
8184 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8185 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8187 if (toff
+ treclen
> count
) {
8189 * If the host struct is smaller than the target struct, or
8190 * requires less alignment and thus packs into less space,
8191 * then the host can return more entries than we can pass
8195 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8199 * Return what we have, resetting the file pointer to the
8200 * location of the first record not returned.
8202 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8206 prev_diroff
= hde
->d_off
;
8207 tde
->d_ino
= tswapal(hde
->d_ino
);
8208 tde
->d_off
= tswapal(hde
->d_off
);
8209 tde
->d_reclen
= tswap16(treclen
);
8210 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8213 * The getdents type is in what was formerly a padding byte at the
8214 * end of the structure.
8216 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8217 type
= *((uint8_t *)hde
+ hreclen
- 1);
8221 *((uint8_t *)tde
+ treclen
- 1) = type
;
8224 unlock_user(tdirp
, arg2
, toff
);
8227 #endif /* TARGET_NR_getdents */
8229 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8230 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8232 g_autofree
void *hdirp
= NULL
;
8234 int hlen
, hoff
, toff
;
8235 int hreclen
, treclen
;
8236 off64_t prev_diroff
= 0;
8238 hdirp
= g_try_malloc(count
);
8240 return -TARGET_ENOMEM
;
8243 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8244 if (is_error(hlen
)) {
8248 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8250 return -TARGET_EFAULT
;
8253 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8254 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8255 struct target_dirent64
*tde
= tdirp
+ toff
;
8258 namelen
= strlen(hde
->d_name
) + 1;
8259 hreclen
= hde
->d_reclen
;
8260 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8261 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8263 if (toff
+ treclen
> count
) {
8265 * If the host struct is smaller than the target struct, or
8266 * requires less alignment and thus packs into less space,
8267 * then the host can return more entries than we can pass
8271 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8275 * Return what we have, resetting the file pointer to the
8276 * location of the first record not returned.
8278 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8282 prev_diroff
= hde
->d_off
;
8283 tde
->d_ino
= tswap64(hde
->d_ino
);
8284 tde
->d_off
= tswap64(hde
->d_off
);
8285 tde
->d_reclen
= tswap16(treclen
);
8286 tde
->d_type
= hde
->d_type
;
8287 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8290 unlock_user(tdirp
, arg2
, toff
);
8293 #endif /* TARGET_NR_getdents64 */
8295 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8296 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8299 /* This is an internal helper for do_syscall so that it is easier
8300 * to have a single return point, so that actions, such as logging
8301 * of syscall results, can be performed.
8302 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8304 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8305 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8306 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8309 CPUState
*cpu
= env_cpu(cpu_env
);
8311 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8312 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8313 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8314 || defined(TARGET_NR_statx)
8317 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8318 || defined(TARGET_NR_fstatfs)
8324 case TARGET_NR_exit
:
8325 /* In old applications this may be used to implement _exit(2).
8326 However in threaded applications it is used for thread termination,
8327 and _exit_group is used for application termination.
8328 Do thread termination if we have more then one thread. */
8330 if (block_signals()) {
8331 return -TARGET_ERESTARTSYS
;
8334 pthread_mutex_lock(&clone_lock
);
8336 if (CPU_NEXT(first_cpu
)) {
8337 TaskState
*ts
= cpu
->opaque
;
8339 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8340 object_unref(OBJECT(cpu
));
8342 * At this point the CPU should be unrealized and removed
8343 * from cpu lists. We can clean-up the rest of the thread
8344 * data without the lock held.
8347 pthread_mutex_unlock(&clone_lock
);
8349 if (ts
->child_tidptr
) {
8350 put_user_u32(0, ts
->child_tidptr
);
8351 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8352 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8356 rcu_unregister_thread();
8360 pthread_mutex_unlock(&clone_lock
);
8361 preexit_cleanup(cpu_env
, arg1
);
8363 return 0; /* avoid warning */
8364 case TARGET_NR_read
:
8365 if (arg2
== 0 && arg3
== 0) {
8366 return get_errno(safe_read(arg1
, 0, 0));
8368 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8369 return -TARGET_EFAULT
;
8370 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8372 fd_trans_host_to_target_data(arg1
)) {
8373 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8375 unlock_user(p
, arg2
, ret
);
8378 case TARGET_NR_write
:
8379 if (arg2
== 0 && arg3
== 0) {
8380 return get_errno(safe_write(arg1
, 0, 0));
8382 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8383 return -TARGET_EFAULT
;
8384 if (fd_trans_target_to_host_data(arg1
)) {
8385 void *copy
= g_malloc(arg3
);
8386 memcpy(copy
, p
, arg3
);
8387 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8389 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8393 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8395 unlock_user(p
, arg2
, 0);
8398 #ifdef TARGET_NR_open
8399 case TARGET_NR_open
:
8400 if (!(p
= lock_user_string(arg1
)))
8401 return -TARGET_EFAULT
;
8402 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8403 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8405 fd_trans_unregister(ret
);
8406 unlock_user(p
, arg1
, 0);
8409 case TARGET_NR_openat
:
8410 if (!(p
= lock_user_string(arg2
)))
8411 return -TARGET_EFAULT
;
8412 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8413 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8415 fd_trans_unregister(ret
);
8416 unlock_user(p
, arg2
, 0);
8418 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8419 case TARGET_NR_name_to_handle_at
:
8420 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8423 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8424 case TARGET_NR_open_by_handle_at
:
8425 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8426 fd_trans_unregister(ret
);
8429 case TARGET_NR_close
:
8430 fd_trans_unregister(arg1
);
8431 return get_errno(close(arg1
));
8434 return do_brk(arg1
);
8435 #ifdef TARGET_NR_fork
8436 case TARGET_NR_fork
:
8437 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8439 #ifdef TARGET_NR_waitpid
8440 case TARGET_NR_waitpid
:
8443 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8444 if (!is_error(ret
) && arg2
&& ret
8445 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8446 return -TARGET_EFAULT
;
8450 #ifdef TARGET_NR_waitid
8451 case TARGET_NR_waitid
:
8455 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8456 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8457 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8458 return -TARGET_EFAULT
;
8459 host_to_target_siginfo(p
, &info
);
8460 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8465 #ifdef TARGET_NR_creat /* not on alpha */
8466 case TARGET_NR_creat
:
8467 if (!(p
= lock_user_string(arg1
)))
8468 return -TARGET_EFAULT
;
8469 ret
= get_errno(creat(p
, arg2
));
8470 fd_trans_unregister(ret
);
8471 unlock_user(p
, arg1
, 0);
8474 #ifdef TARGET_NR_link
8475 case TARGET_NR_link
:
8478 p
= lock_user_string(arg1
);
8479 p2
= lock_user_string(arg2
);
8481 ret
= -TARGET_EFAULT
;
8483 ret
= get_errno(link(p
, p2
));
8484 unlock_user(p2
, arg2
, 0);
8485 unlock_user(p
, arg1
, 0);
8489 #if defined(TARGET_NR_linkat)
8490 case TARGET_NR_linkat
:
8494 return -TARGET_EFAULT
;
8495 p
= lock_user_string(arg2
);
8496 p2
= lock_user_string(arg4
);
8498 ret
= -TARGET_EFAULT
;
8500 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8501 unlock_user(p
, arg2
, 0);
8502 unlock_user(p2
, arg4
, 0);
8506 #ifdef TARGET_NR_unlink
8507 case TARGET_NR_unlink
:
8508 if (!(p
= lock_user_string(arg1
)))
8509 return -TARGET_EFAULT
;
8510 ret
= get_errno(unlink(p
));
8511 unlock_user(p
, arg1
, 0);
8514 #if defined(TARGET_NR_unlinkat)
8515 case TARGET_NR_unlinkat
:
8516 if (!(p
= lock_user_string(arg2
)))
8517 return -TARGET_EFAULT
;
8518 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8519 unlock_user(p
, arg2
, 0);
8522 case TARGET_NR_execve
:
8524 char **argp
, **envp
;
8527 abi_ulong guest_argp
;
8528 abi_ulong guest_envp
;
8534 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8535 if (get_user_ual(addr
, gp
))
8536 return -TARGET_EFAULT
;
8543 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8544 if (get_user_ual(addr
, gp
))
8545 return -TARGET_EFAULT
;
8551 argp
= g_new0(char *, argc
+ 1);
8552 envp
= g_new0(char *, envc
+ 1);
8554 for (gp
= guest_argp
, q
= argp
; gp
;
8555 gp
+= sizeof(abi_ulong
), q
++) {
8556 if (get_user_ual(addr
, gp
))
8560 if (!(*q
= lock_user_string(addr
)))
8565 for (gp
= guest_envp
, q
= envp
; gp
;
8566 gp
+= sizeof(abi_ulong
), q
++) {
8567 if (get_user_ual(addr
, gp
))
8571 if (!(*q
= lock_user_string(addr
)))
8576 if (!(p
= lock_user_string(arg1
)))
8578 /* Although execve() is not an interruptible syscall it is
8579 * a special case where we must use the safe_syscall wrapper:
8580 * if we allow a signal to happen before we make the host
8581 * syscall then we will 'lose' it, because at the point of
8582 * execve the process leaves QEMU's control. So we use the
8583 * safe syscall wrapper to ensure that we either take the
8584 * signal as a guest signal, or else it does not happen
8585 * before the execve completes and makes it the other
8586 * program's problem.
8588 ret
= get_errno(safe_execve(p
, argp
, envp
));
8589 unlock_user(p
, arg1
, 0);
8594 ret
= -TARGET_EFAULT
;
8597 for (gp
= guest_argp
, q
= argp
; *q
;
8598 gp
+= sizeof(abi_ulong
), q
++) {
8599 if (get_user_ual(addr
, gp
)
8602 unlock_user(*q
, addr
, 0);
8604 for (gp
= guest_envp
, q
= envp
; *q
;
8605 gp
+= sizeof(abi_ulong
), q
++) {
8606 if (get_user_ual(addr
, gp
)
8609 unlock_user(*q
, addr
, 0);
8616 case TARGET_NR_chdir
:
8617 if (!(p
= lock_user_string(arg1
)))
8618 return -TARGET_EFAULT
;
8619 ret
= get_errno(chdir(p
));
8620 unlock_user(p
, arg1
, 0);
8622 #ifdef TARGET_NR_time
8623 case TARGET_NR_time
:
8626 ret
= get_errno(time(&host_time
));
8629 && put_user_sal(host_time
, arg1
))
8630 return -TARGET_EFAULT
;
8634 #ifdef TARGET_NR_mknod
8635 case TARGET_NR_mknod
:
8636 if (!(p
= lock_user_string(arg1
)))
8637 return -TARGET_EFAULT
;
8638 ret
= get_errno(mknod(p
, arg2
, arg3
));
8639 unlock_user(p
, arg1
, 0);
8642 #if defined(TARGET_NR_mknodat)
8643 case TARGET_NR_mknodat
:
8644 if (!(p
= lock_user_string(arg2
)))
8645 return -TARGET_EFAULT
;
8646 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8647 unlock_user(p
, arg2
, 0);
8650 #ifdef TARGET_NR_chmod
8651 case TARGET_NR_chmod
:
8652 if (!(p
= lock_user_string(arg1
)))
8653 return -TARGET_EFAULT
;
8654 ret
= get_errno(chmod(p
, arg2
));
8655 unlock_user(p
, arg1
, 0);
8658 #ifdef TARGET_NR_lseek
8659 case TARGET_NR_lseek
:
8660 return get_errno(lseek(arg1
, arg2
, arg3
));
8662 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8663 /* Alpha specific */
8664 case TARGET_NR_getxpid
:
8665 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8666 return get_errno(getpid());
8668 #ifdef TARGET_NR_getpid
8669 case TARGET_NR_getpid
:
8670 return get_errno(getpid());
8672 case TARGET_NR_mount
:
8674 /* need to look at the data field */
8678 p
= lock_user_string(arg1
);
8680 return -TARGET_EFAULT
;
8686 p2
= lock_user_string(arg2
);
8689 unlock_user(p
, arg1
, 0);
8691 return -TARGET_EFAULT
;
8695 p3
= lock_user_string(arg3
);
8698 unlock_user(p
, arg1
, 0);
8700 unlock_user(p2
, arg2
, 0);
8701 return -TARGET_EFAULT
;
8707 /* FIXME - arg5 should be locked, but it isn't clear how to
8708 * do that since it's not guaranteed to be a NULL-terminated
8712 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8714 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8716 ret
= get_errno(ret
);
8719 unlock_user(p
, arg1
, 0);
8721 unlock_user(p2
, arg2
, 0);
8723 unlock_user(p3
, arg3
, 0);
8727 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8728 #if defined(TARGET_NR_umount)
8729 case TARGET_NR_umount
:
8731 #if defined(TARGET_NR_oldumount)
8732 case TARGET_NR_oldumount
:
8734 if (!(p
= lock_user_string(arg1
)))
8735 return -TARGET_EFAULT
;
8736 ret
= get_errno(umount(p
));
8737 unlock_user(p
, arg1
, 0);
8740 #ifdef TARGET_NR_stime /* not on alpha */
8741 case TARGET_NR_stime
:
8745 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8746 return -TARGET_EFAULT
;
8748 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8751 #ifdef TARGET_NR_alarm /* not on alpha */
8752 case TARGET_NR_alarm
:
8755 #ifdef TARGET_NR_pause /* not on alpha */
8756 case TARGET_NR_pause
:
8757 if (!block_signals()) {
8758 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8760 return -TARGET_EINTR
;
8762 #ifdef TARGET_NR_utime
8763 case TARGET_NR_utime
:
8765 struct utimbuf tbuf
, *host_tbuf
;
8766 struct target_utimbuf
*target_tbuf
;
8768 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8769 return -TARGET_EFAULT
;
8770 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8771 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8772 unlock_user_struct(target_tbuf
, arg2
, 0);
8777 if (!(p
= lock_user_string(arg1
)))
8778 return -TARGET_EFAULT
;
8779 ret
= get_errno(utime(p
, host_tbuf
));
8780 unlock_user(p
, arg1
, 0);
8784 #ifdef TARGET_NR_utimes
8785 case TARGET_NR_utimes
:
8787 struct timeval
*tvp
, tv
[2];
8789 if (copy_from_user_timeval(&tv
[0], arg2
)
8790 || copy_from_user_timeval(&tv
[1],
8791 arg2
+ sizeof(struct target_timeval
)))
8792 return -TARGET_EFAULT
;
8797 if (!(p
= lock_user_string(arg1
)))
8798 return -TARGET_EFAULT
;
8799 ret
= get_errno(utimes(p
, tvp
));
8800 unlock_user(p
, arg1
, 0);
8804 #if defined(TARGET_NR_futimesat)
8805 case TARGET_NR_futimesat
:
8807 struct timeval
*tvp
, tv
[2];
8809 if (copy_from_user_timeval(&tv
[0], arg3
)
8810 || copy_from_user_timeval(&tv
[1],
8811 arg3
+ sizeof(struct target_timeval
)))
8812 return -TARGET_EFAULT
;
8817 if (!(p
= lock_user_string(arg2
))) {
8818 return -TARGET_EFAULT
;
8820 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8821 unlock_user(p
, arg2
, 0);
8825 #ifdef TARGET_NR_access
8826 case TARGET_NR_access
:
8827 if (!(p
= lock_user_string(arg1
))) {
8828 return -TARGET_EFAULT
;
8830 ret
= get_errno(access(path(p
), arg2
));
8831 unlock_user(p
, arg1
, 0);
8834 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8835 case TARGET_NR_faccessat
:
8836 if (!(p
= lock_user_string(arg2
))) {
8837 return -TARGET_EFAULT
;
8839 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8840 unlock_user(p
, arg2
, 0);
8843 #ifdef TARGET_NR_nice /* not on alpha */
8844 case TARGET_NR_nice
:
8845 return get_errno(nice(arg1
));
8847 case TARGET_NR_sync
:
8850 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8851 case TARGET_NR_syncfs
:
8852 return get_errno(syncfs(arg1
));
8854 case TARGET_NR_kill
:
8855 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8856 #ifdef TARGET_NR_rename
8857 case TARGET_NR_rename
:
8860 p
= lock_user_string(arg1
);
8861 p2
= lock_user_string(arg2
);
8863 ret
= -TARGET_EFAULT
;
8865 ret
= get_errno(rename(p
, p2
));
8866 unlock_user(p2
, arg2
, 0);
8867 unlock_user(p
, arg1
, 0);
8871 #if defined(TARGET_NR_renameat)
8872 case TARGET_NR_renameat
:
8875 p
= lock_user_string(arg2
);
8876 p2
= lock_user_string(arg4
);
8878 ret
= -TARGET_EFAULT
;
8880 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8881 unlock_user(p2
, arg4
, 0);
8882 unlock_user(p
, arg2
, 0);
8886 #if defined(TARGET_NR_renameat2)
8887 case TARGET_NR_renameat2
:
8890 p
= lock_user_string(arg2
);
8891 p2
= lock_user_string(arg4
);
8893 ret
= -TARGET_EFAULT
;
8895 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8897 unlock_user(p2
, arg4
, 0);
8898 unlock_user(p
, arg2
, 0);
8902 #ifdef TARGET_NR_mkdir
8903 case TARGET_NR_mkdir
:
8904 if (!(p
= lock_user_string(arg1
)))
8905 return -TARGET_EFAULT
;
8906 ret
= get_errno(mkdir(p
, arg2
));
8907 unlock_user(p
, arg1
, 0);
8910 #if defined(TARGET_NR_mkdirat)
8911 case TARGET_NR_mkdirat
:
8912 if (!(p
= lock_user_string(arg2
)))
8913 return -TARGET_EFAULT
;
8914 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8915 unlock_user(p
, arg2
, 0);
8918 #ifdef TARGET_NR_rmdir
8919 case TARGET_NR_rmdir
:
8920 if (!(p
= lock_user_string(arg1
)))
8921 return -TARGET_EFAULT
;
8922 ret
= get_errno(rmdir(p
));
8923 unlock_user(p
, arg1
, 0);
8927 ret
= get_errno(dup(arg1
));
8929 fd_trans_dup(arg1
, ret
);
8932 #ifdef TARGET_NR_pipe
8933 case TARGET_NR_pipe
:
8934 return do_pipe(cpu_env
, arg1
, 0, 0);
8936 #ifdef TARGET_NR_pipe2
8937 case TARGET_NR_pipe2
:
8938 return do_pipe(cpu_env
, arg1
,
8939 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8941 case TARGET_NR_times
:
8943 struct target_tms
*tmsp
;
8945 ret
= get_errno(times(&tms
));
8947 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8949 return -TARGET_EFAULT
;
8950 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8951 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8952 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8953 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8956 ret
= host_to_target_clock_t(ret
);
8959 case TARGET_NR_acct
:
8961 ret
= get_errno(acct(NULL
));
8963 if (!(p
= lock_user_string(arg1
))) {
8964 return -TARGET_EFAULT
;
8966 ret
= get_errno(acct(path(p
)));
8967 unlock_user(p
, arg1
, 0);
8970 #ifdef TARGET_NR_umount2
8971 case TARGET_NR_umount2
:
8972 if (!(p
= lock_user_string(arg1
)))
8973 return -TARGET_EFAULT
;
8974 ret
= get_errno(umount2(p
, arg2
));
8975 unlock_user(p
, arg1
, 0);
8978 case TARGET_NR_ioctl
:
8979 return do_ioctl(arg1
, arg2
, arg3
);
8980 #ifdef TARGET_NR_fcntl
8981 case TARGET_NR_fcntl
:
8982 return do_fcntl(arg1
, arg2
, arg3
);
8984 case TARGET_NR_setpgid
:
8985 return get_errno(setpgid(arg1
, arg2
));
8986 case TARGET_NR_umask
:
8987 return get_errno(umask(arg1
));
8988 case TARGET_NR_chroot
:
8989 if (!(p
= lock_user_string(arg1
)))
8990 return -TARGET_EFAULT
;
8991 ret
= get_errno(chroot(p
));
8992 unlock_user(p
, arg1
, 0);
8994 #ifdef TARGET_NR_dup2
8995 case TARGET_NR_dup2
:
8996 ret
= get_errno(dup2(arg1
, arg2
));
8998 fd_trans_dup(arg1
, arg2
);
9002 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9003 case TARGET_NR_dup3
:
9007 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9010 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9011 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9013 fd_trans_dup(arg1
, arg2
);
9018 #ifdef TARGET_NR_getppid /* not on alpha */
9019 case TARGET_NR_getppid
:
9020 return get_errno(getppid());
9022 #ifdef TARGET_NR_getpgrp
9023 case TARGET_NR_getpgrp
:
9024 return get_errno(getpgrp());
9026 case TARGET_NR_setsid
:
9027 return get_errno(setsid());
9028 #ifdef TARGET_NR_sigaction
9029 case TARGET_NR_sigaction
:
9031 #if defined(TARGET_MIPS)
9032 struct target_sigaction act
, oact
, *pact
, *old_act
;
9035 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9036 return -TARGET_EFAULT
;
9037 act
._sa_handler
= old_act
->_sa_handler
;
9038 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9039 act
.sa_flags
= old_act
->sa_flags
;
9040 unlock_user_struct(old_act
, arg2
, 0);
9046 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9048 if (!is_error(ret
) && arg3
) {
9049 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9050 return -TARGET_EFAULT
;
9051 old_act
->_sa_handler
= oact
._sa_handler
;
9052 old_act
->sa_flags
= oact
.sa_flags
;
9053 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9054 old_act
->sa_mask
.sig
[1] = 0;
9055 old_act
->sa_mask
.sig
[2] = 0;
9056 old_act
->sa_mask
.sig
[3] = 0;
9057 unlock_user_struct(old_act
, arg3
, 1);
9060 struct target_old_sigaction
*old_act
;
9061 struct target_sigaction act
, oact
, *pact
;
9063 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9064 return -TARGET_EFAULT
;
9065 act
._sa_handler
= old_act
->_sa_handler
;
9066 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9067 act
.sa_flags
= old_act
->sa_flags
;
9068 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9069 act
.sa_restorer
= old_act
->sa_restorer
;
9071 unlock_user_struct(old_act
, arg2
, 0);
9076 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9077 if (!is_error(ret
) && arg3
) {
9078 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9079 return -TARGET_EFAULT
;
9080 old_act
->_sa_handler
= oact
._sa_handler
;
9081 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9082 old_act
->sa_flags
= oact
.sa_flags
;
9083 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9084 old_act
->sa_restorer
= oact
.sa_restorer
;
9086 unlock_user_struct(old_act
, arg3
, 1);
9092 case TARGET_NR_rt_sigaction
:
9095 * For Alpha and SPARC this is a 5 argument syscall, with
9096 * a 'restorer' parameter which must be copied into the
9097 * sa_restorer field of the sigaction struct.
9098 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9099 * and arg5 is the sigsetsize.
9101 #if defined(TARGET_ALPHA)
9102 target_ulong sigsetsize
= arg4
;
9103 target_ulong restorer
= arg5
;
9104 #elif defined(TARGET_SPARC)
9105 target_ulong restorer
= arg4
;
9106 target_ulong sigsetsize
= arg5
;
9108 target_ulong sigsetsize
= arg4
;
9109 target_ulong restorer
= 0;
9111 struct target_sigaction
*act
= NULL
;
9112 struct target_sigaction
*oact
= NULL
;
9114 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9115 return -TARGET_EINVAL
;
9117 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9118 return -TARGET_EFAULT
;
9120 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9121 ret
= -TARGET_EFAULT
;
9123 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9125 unlock_user_struct(oact
, arg3
, 1);
9129 unlock_user_struct(act
, arg2
, 0);
9133 #ifdef TARGET_NR_sgetmask /* not on alpha */
9134 case TARGET_NR_sgetmask
:
9137 abi_ulong target_set
;
9138 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9140 host_to_target_old_sigset(&target_set
, &cur_set
);
9146 #ifdef TARGET_NR_ssetmask /* not on alpha */
9147 case TARGET_NR_ssetmask
:
9150 abi_ulong target_set
= arg1
;
9151 target_to_host_old_sigset(&set
, &target_set
);
9152 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9154 host_to_target_old_sigset(&target_set
, &oset
);
9160 #ifdef TARGET_NR_sigprocmask
9161 case TARGET_NR_sigprocmask
:
9163 #if defined(TARGET_ALPHA)
9164 sigset_t set
, oldset
;
9169 case TARGET_SIG_BLOCK
:
9172 case TARGET_SIG_UNBLOCK
:
9175 case TARGET_SIG_SETMASK
:
9179 return -TARGET_EINVAL
;
9182 target_to_host_old_sigset(&set
, &mask
);
9184 ret
= do_sigprocmask(how
, &set
, &oldset
);
9185 if (!is_error(ret
)) {
9186 host_to_target_old_sigset(&mask
, &oldset
);
9188 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9191 sigset_t set
, oldset
, *set_ptr
;
9196 case TARGET_SIG_BLOCK
:
9199 case TARGET_SIG_UNBLOCK
:
9202 case TARGET_SIG_SETMASK
:
9206 return -TARGET_EINVAL
;
9208 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9209 return -TARGET_EFAULT
;
9210 target_to_host_old_sigset(&set
, p
);
9211 unlock_user(p
, arg2
, 0);
9217 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9218 if (!is_error(ret
) && arg3
) {
9219 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9220 return -TARGET_EFAULT
;
9221 host_to_target_old_sigset(p
, &oldset
);
9222 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9228 case TARGET_NR_rt_sigprocmask
:
9231 sigset_t set
, oldset
, *set_ptr
;
9233 if (arg4
!= sizeof(target_sigset_t
)) {
9234 return -TARGET_EINVAL
;
9239 case TARGET_SIG_BLOCK
:
9242 case TARGET_SIG_UNBLOCK
:
9245 case TARGET_SIG_SETMASK
:
9249 return -TARGET_EINVAL
;
9251 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9252 return -TARGET_EFAULT
;
9253 target_to_host_sigset(&set
, p
);
9254 unlock_user(p
, arg2
, 0);
9260 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9261 if (!is_error(ret
) && arg3
) {
9262 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9263 return -TARGET_EFAULT
;
9264 host_to_target_sigset(p
, &oldset
);
9265 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9269 #ifdef TARGET_NR_sigpending
9270 case TARGET_NR_sigpending
:
9273 ret
= get_errno(sigpending(&set
));
9274 if (!is_error(ret
)) {
9275 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9276 return -TARGET_EFAULT
;
9277 host_to_target_old_sigset(p
, &set
);
9278 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9283 case TARGET_NR_rt_sigpending
:
9287 /* Yes, this check is >, not != like most. We follow the kernel's
9288 * logic and it does it like this because it implements
9289 * NR_sigpending through the same code path, and in that case
9290 * the old_sigset_t is smaller in size.
9292 if (arg2
> sizeof(target_sigset_t
)) {
9293 return -TARGET_EINVAL
;
9296 ret
= get_errno(sigpending(&set
));
9297 if (!is_error(ret
)) {
9298 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9299 return -TARGET_EFAULT
;
9300 host_to_target_sigset(p
, &set
);
9301 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9305 #ifdef TARGET_NR_sigsuspend
9306 case TARGET_NR_sigsuspend
:
9308 TaskState
*ts
= cpu
->opaque
;
9309 #if defined(TARGET_ALPHA)
9310 abi_ulong mask
= arg1
;
9311 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9313 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9314 return -TARGET_EFAULT
;
9315 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9316 unlock_user(p
, arg1
, 0);
9318 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9320 if (ret
!= -TARGET_ERESTARTSYS
) {
9321 ts
->in_sigsuspend
= 1;
9326 case TARGET_NR_rt_sigsuspend
:
9328 TaskState
*ts
= cpu
->opaque
;
9330 if (arg2
!= sizeof(target_sigset_t
)) {
9331 return -TARGET_EINVAL
;
9333 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9334 return -TARGET_EFAULT
;
9335 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9336 unlock_user(p
, arg1
, 0);
9337 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9339 if (ret
!= -TARGET_ERESTARTSYS
) {
9340 ts
->in_sigsuspend
= 1;
9344 #ifdef TARGET_NR_rt_sigtimedwait
9345 case TARGET_NR_rt_sigtimedwait
:
9348 struct timespec uts
, *puts
;
9351 if (arg4
!= sizeof(target_sigset_t
)) {
9352 return -TARGET_EINVAL
;
9355 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9356 return -TARGET_EFAULT
;
9357 target_to_host_sigset(&set
, p
);
9358 unlock_user(p
, arg1
, 0);
9361 if (target_to_host_timespec(puts
, arg3
)) {
9362 return -TARGET_EFAULT
;
9367 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9369 if (!is_error(ret
)) {
9371 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9374 return -TARGET_EFAULT
;
9376 host_to_target_siginfo(p
, &uinfo
);
9377 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9379 ret
= host_to_target_signal(ret
);
9384 #ifdef TARGET_NR_rt_sigtimedwait_time64
9385 case TARGET_NR_rt_sigtimedwait_time64
:
9388 struct timespec uts
, *puts
;
9391 if (arg4
!= sizeof(target_sigset_t
)) {
9392 return -TARGET_EINVAL
;
9395 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9397 return -TARGET_EFAULT
;
9399 target_to_host_sigset(&set
, p
);
9400 unlock_user(p
, arg1
, 0);
9403 if (target_to_host_timespec64(puts
, arg3
)) {
9404 return -TARGET_EFAULT
;
9409 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9411 if (!is_error(ret
)) {
9413 p
= lock_user(VERIFY_WRITE
, arg2
,
9414 sizeof(target_siginfo_t
), 0);
9416 return -TARGET_EFAULT
;
9418 host_to_target_siginfo(p
, &uinfo
);
9419 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9421 ret
= host_to_target_signal(ret
);
9426 case TARGET_NR_rt_sigqueueinfo
:
9430 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9432 return -TARGET_EFAULT
;
9434 target_to_host_siginfo(&uinfo
, p
);
9435 unlock_user(p
, arg3
, 0);
9436 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9439 case TARGET_NR_rt_tgsigqueueinfo
:
9443 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9445 return -TARGET_EFAULT
;
9447 target_to_host_siginfo(&uinfo
, p
);
9448 unlock_user(p
, arg4
, 0);
9449 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9452 #ifdef TARGET_NR_sigreturn
9453 case TARGET_NR_sigreturn
:
9454 if (block_signals()) {
9455 return -TARGET_ERESTARTSYS
;
9457 return do_sigreturn(cpu_env
);
9459 case TARGET_NR_rt_sigreturn
:
9460 if (block_signals()) {
9461 return -TARGET_ERESTARTSYS
;
9463 return do_rt_sigreturn(cpu_env
);
9464 case TARGET_NR_sethostname
:
9465 if (!(p
= lock_user_string(arg1
)))
9466 return -TARGET_EFAULT
;
9467 ret
= get_errno(sethostname(p
, arg2
));
9468 unlock_user(p
, arg1
, 0);
9470 #ifdef TARGET_NR_setrlimit
9471 case TARGET_NR_setrlimit
:
9473 int resource
= target_to_host_resource(arg1
);
9474 struct target_rlimit
*target_rlim
;
9476 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9477 return -TARGET_EFAULT
;
9478 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9479 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9480 unlock_user_struct(target_rlim
, arg2
, 0);
9482 * If we just passed through resource limit settings for memory then
9483 * they would also apply to QEMU's own allocations, and QEMU will
9484 * crash or hang or die if its allocations fail. Ideally we would
9485 * track the guest allocations in QEMU and apply the limits ourselves.
9486 * For now, just tell the guest the call succeeded but don't actually
9489 if (resource
!= RLIMIT_AS
&&
9490 resource
!= RLIMIT_DATA
&&
9491 resource
!= RLIMIT_STACK
) {
9492 return get_errno(setrlimit(resource
, &rlim
));
9498 #ifdef TARGET_NR_getrlimit
9499 case TARGET_NR_getrlimit
:
9501 int resource
= target_to_host_resource(arg1
);
9502 struct target_rlimit
*target_rlim
;
9505 ret
= get_errno(getrlimit(resource
, &rlim
));
9506 if (!is_error(ret
)) {
9507 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9508 return -TARGET_EFAULT
;
9509 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9510 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9511 unlock_user_struct(target_rlim
, arg2
, 1);
9516 case TARGET_NR_getrusage
:
9518 struct rusage rusage
;
9519 ret
= get_errno(getrusage(arg1
, &rusage
));
9520 if (!is_error(ret
)) {
9521 ret
= host_to_target_rusage(arg2
, &rusage
);
9525 #if defined(TARGET_NR_gettimeofday)
9526 case TARGET_NR_gettimeofday
:
9531 ret
= get_errno(gettimeofday(&tv
, &tz
));
9532 if (!is_error(ret
)) {
9533 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9534 return -TARGET_EFAULT
;
9536 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9537 return -TARGET_EFAULT
;
9543 #if defined(TARGET_NR_settimeofday)
9544 case TARGET_NR_settimeofday
:
9546 struct timeval tv
, *ptv
= NULL
;
9547 struct timezone tz
, *ptz
= NULL
;
9550 if (copy_from_user_timeval(&tv
, arg1
)) {
9551 return -TARGET_EFAULT
;
9557 if (copy_from_user_timezone(&tz
, arg2
)) {
9558 return -TARGET_EFAULT
;
9563 return get_errno(settimeofday(ptv
, ptz
));
9566 #if defined(TARGET_NR_select)
9567 case TARGET_NR_select
:
9568 #if defined(TARGET_WANT_NI_OLD_SELECT)
9569 /* some architectures used to have old_select here
9570 * but now ENOSYS it.
9572 ret
= -TARGET_ENOSYS
;
9573 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9574 ret
= do_old_select(arg1
);
9576 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9580 #ifdef TARGET_NR_pselect6
9581 case TARGET_NR_pselect6
:
9582 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9584 #ifdef TARGET_NR_pselect6_time64
9585 case TARGET_NR_pselect6_time64
:
9586 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9588 #ifdef TARGET_NR_symlink
9589 case TARGET_NR_symlink
:
9592 p
= lock_user_string(arg1
);
9593 p2
= lock_user_string(arg2
);
9595 ret
= -TARGET_EFAULT
;
9597 ret
= get_errno(symlink(p
, p2
));
9598 unlock_user(p2
, arg2
, 0);
9599 unlock_user(p
, arg1
, 0);
9603 #if defined(TARGET_NR_symlinkat)
9604 case TARGET_NR_symlinkat
:
9607 p
= lock_user_string(arg1
);
9608 p2
= lock_user_string(arg3
);
9610 ret
= -TARGET_EFAULT
;
9612 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9613 unlock_user(p2
, arg3
, 0);
9614 unlock_user(p
, arg1
, 0);
9618 #ifdef TARGET_NR_readlink
9619 case TARGET_NR_readlink
:
9622 p
= lock_user_string(arg1
);
9623 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9625 ret
= -TARGET_EFAULT
;
9627 /* Short circuit this for the magic exe check. */
9628 ret
= -TARGET_EINVAL
;
9629 } else if (is_proc_myself((const char *)p
, "exe")) {
9630 char real
[PATH_MAX
], *temp
;
9631 temp
= realpath(exec_path
, real
);
9632 /* Return value is # of bytes that we wrote to the buffer. */
9634 ret
= get_errno(-1);
9636 /* Don't worry about sign mismatch as earlier mapping
9637 * logic would have thrown a bad address error. */
9638 ret
= MIN(strlen(real
), arg3
);
9639 /* We cannot NUL terminate the string. */
9640 memcpy(p2
, real
, ret
);
9643 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9645 unlock_user(p2
, arg2
, ret
);
9646 unlock_user(p
, arg1
, 0);
9650 #if defined(TARGET_NR_readlinkat)
9651 case TARGET_NR_readlinkat
:
9654 p
= lock_user_string(arg2
);
9655 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9657 ret
= -TARGET_EFAULT
;
9658 } else if (is_proc_myself((const char *)p
, "exe")) {
9659 char real
[PATH_MAX
], *temp
;
9660 temp
= realpath(exec_path
, real
);
9661 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9662 snprintf((char *)p2
, arg4
, "%s", real
);
9664 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9666 unlock_user(p2
, arg3
, ret
);
9667 unlock_user(p
, arg2
, 0);
9671 #ifdef TARGET_NR_swapon
9672 case TARGET_NR_swapon
:
9673 if (!(p
= lock_user_string(arg1
)))
9674 return -TARGET_EFAULT
;
9675 ret
= get_errno(swapon(p
, arg2
));
9676 unlock_user(p
, arg1
, 0);
9679 case TARGET_NR_reboot
:
9680 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9681 /* arg4 must be ignored in all other cases */
9682 p
= lock_user_string(arg4
);
9684 return -TARGET_EFAULT
;
9686 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9687 unlock_user(p
, arg4
, 0);
9689 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9692 #ifdef TARGET_NR_mmap
9693 case TARGET_NR_mmap
:
9694 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9695 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9696 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9697 || defined(TARGET_S390X)
9700 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9701 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9702 return -TARGET_EFAULT
;
9709 unlock_user(v
, arg1
, 0);
9710 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9711 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9715 /* mmap pointers are always untagged */
9716 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9717 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9723 #ifdef TARGET_NR_mmap2
9724 case TARGET_NR_mmap2
:
9726 #define MMAP_SHIFT 12
9728 ret
= target_mmap(arg1
, arg2
, arg3
,
9729 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9730 arg5
, arg6
<< MMAP_SHIFT
);
9731 return get_errno(ret
);
9733 case TARGET_NR_munmap
:
9734 arg1
= cpu_untagged_addr(cpu
, arg1
);
9735 return get_errno(target_munmap(arg1
, arg2
));
9736 case TARGET_NR_mprotect
:
9737 arg1
= cpu_untagged_addr(cpu
, arg1
);
9739 TaskState
*ts
= cpu
->opaque
;
9740 /* Special hack to detect libc making the stack executable. */
9741 if ((arg3
& PROT_GROWSDOWN
)
9742 && arg1
>= ts
->info
->stack_limit
9743 && arg1
<= ts
->info
->start_stack
) {
9744 arg3
&= ~PROT_GROWSDOWN
;
9745 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9746 arg1
= ts
->info
->stack_limit
;
9749 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9750 #ifdef TARGET_NR_mremap
9751 case TARGET_NR_mremap
:
9752 arg1
= cpu_untagged_addr(cpu
, arg1
);
9753 /* mremap new_addr (arg5) is always untagged */
9754 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9756 /* ??? msync/mlock/munlock are broken for softmmu. */
9757 #ifdef TARGET_NR_msync
9758 case TARGET_NR_msync
:
9759 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9761 #ifdef TARGET_NR_mlock
9762 case TARGET_NR_mlock
:
9763 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9765 #ifdef TARGET_NR_munlock
9766 case TARGET_NR_munlock
:
9767 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
9769 #ifdef TARGET_NR_mlockall
9770 case TARGET_NR_mlockall
:
9771 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9773 #ifdef TARGET_NR_munlockall
9774 case TARGET_NR_munlockall
:
9775 return get_errno(munlockall());
9777 #ifdef TARGET_NR_truncate
9778 case TARGET_NR_truncate
:
9779 if (!(p
= lock_user_string(arg1
)))
9780 return -TARGET_EFAULT
;
9781 ret
= get_errno(truncate(p
, arg2
));
9782 unlock_user(p
, arg1
, 0);
9785 #ifdef TARGET_NR_ftruncate
9786 case TARGET_NR_ftruncate
:
9787 return get_errno(ftruncate(arg1
, arg2
));
9789 case TARGET_NR_fchmod
:
9790 return get_errno(fchmod(arg1
, arg2
));
9791 #if defined(TARGET_NR_fchmodat)
9792 case TARGET_NR_fchmodat
:
9793 if (!(p
= lock_user_string(arg2
)))
9794 return -TARGET_EFAULT
;
9795 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9796 unlock_user(p
, arg2
, 0);
9799 case TARGET_NR_getpriority
:
9800 /* Note that negative values are valid for getpriority, so we must
9801 differentiate based on errno settings. */
9803 ret
= getpriority(arg1
, arg2
);
9804 if (ret
== -1 && errno
!= 0) {
9805 return -host_to_target_errno(errno
);
9808 /* Return value is the unbiased priority. Signal no error. */
9809 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9811 /* Return value is a biased priority to avoid negative numbers. */
9815 case TARGET_NR_setpriority
:
9816 return get_errno(setpriority(arg1
, arg2
, arg3
));
9817 #ifdef TARGET_NR_statfs
9818 case TARGET_NR_statfs
:
9819 if (!(p
= lock_user_string(arg1
))) {
9820 return -TARGET_EFAULT
;
9822 ret
= get_errno(statfs(path(p
), &stfs
));
9823 unlock_user(p
, arg1
, 0);
9825 if (!is_error(ret
)) {
9826 struct target_statfs
*target_stfs
;
9828 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9829 return -TARGET_EFAULT
;
9830 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9831 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9832 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9833 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9834 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9835 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9836 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9837 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9838 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9839 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9840 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9841 #ifdef _STATFS_F_FLAGS
9842 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9844 __put_user(0, &target_stfs
->f_flags
);
9846 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9847 unlock_user_struct(target_stfs
, arg2
, 1);
9851 #ifdef TARGET_NR_fstatfs
9852 case TARGET_NR_fstatfs
:
9853 ret
= get_errno(fstatfs(arg1
, &stfs
));
9854 goto convert_statfs
;
9856 #ifdef TARGET_NR_statfs64
9857 case TARGET_NR_statfs64
:
9858 if (!(p
= lock_user_string(arg1
))) {
9859 return -TARGET_EFAULT
;
9861 ret
= get_errno(statfs(path(p
), &stfs
));
9862 unlock_user(p
, arg1
, 0);
9864 if (!is_error(ret
)) {
9865 struct target_statfs64
*target_stfs
;
9867 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9868 return -TARGET_EFAULT
;
9869 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9870 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9871 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9872 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9873 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9874 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9875 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9876 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9877 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9878 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9879 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9880 #ifdef _STATFS_F_FLAGS
9881 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9883 __put_user(0, &target_stfs
->f_flags
);
9885 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9886 unlock_user_struct(target_stfs
, arg3
, 1);
9889 case TARGET_NR_fstatfs64
:
9890 ret
= get_errno(fstatfs(arg1
, &stfs
));
9891 goto convert_statfs64
;
9893 #ifdef TARGET_NR_socketcall
9894 case TARGET_NR_socketcall
:
9895 return do_socketcall(arg1
, arg2
);
9897 #ifdef TARGET_NR_accept
9898 case TARGET_NR_accept
:
9899 return do_accept4(arg1
, arg2
, arg3
, 0);
9901 #ifdef TARGET_NR_accept4
9902 case TARGET_NR_accept4
:
9903 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9905 #ifdef TARGET_NR_bind
9906 case TARGET_NR_bind
:
9907 return do_bind(arg1
, arg2
, arg3
);
9909 #ifdef TARGET_NR_connect
9910 case TARGET_NR_connect
:
9911 return do_connect(arg1
, arg2
, arg3
);
9913 #ifdef TARGET_NR_getpeername
9914 case TARGET_NR_getpeername
:
9915 return do_getpeername(arg1
, arg2
, arg3
);
9917 #ifdef TARGET_NR_getsockname
9918 case TARGET_NR_getsockname
:
9919 return do_getsockname(arg1
, arg2
, arg3
);
9921 #ifdef TARGET_NR_getsockopt
9922 case TARGET_NR_getsockopt
:
9923 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9925 #ifdef TARGET_NR_listen
9926 case TARGET_NR_listen
:
9927 return get_errno(listen(arg1
, arg2
));
9929 #ifdef TARGET_NR_recv
9930 case TARGET_NR_recv
:
9931 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9933 #ifdef TARGET_NR_recvfrom
9934 case TARGET_NR_recvfrom
:
9935 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9937 #ifdef TARGET_NR_recvmsg
9938 case TARGET_NR_recvmsg
:
9939 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9941 #ifdef TARGET_NR_send
9942 case TARGET_NR_send
:
9943 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9945 #ifdef TARGET_NR_sendmsg
9946 case TARGET_NR_sendmsg
:
9947 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9949 #ifdef TARGET_NR_sendmmsg
9950 case TARGET_NR_sendmmsg
:
9951 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9953 #ifdef TARGET_NR_recvmmsg
9954 case TARGET_NR_recvmmsg
:
9955 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9957 #ifdef TARGET_NR_sendto
9958 case TARGET_NR_sendto
:
9959 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9961 #ifdef TARGET_NR_shutdown
9962 case TARGET_NR_shutdown
:
9963 return get_errno(shutdown(arg1
, arg2
));
9965 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9966 case TARGET_NR_getrandom
:
9967 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9969 return -TARGET_EFAULT
;
9971 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9972 unlock_user(p
, arg1
, ret
);
9975 #ifdef TARGET_NR_socket
9976 case TARGET_NR_socket
:
9977 return do_socket(arg1
, arg2
, arg3
);
9979 #ifdef TARGET_NR_socketpair
9980 case TARGET_NR_socketpair
:
9981 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9983 #ifdef TARGET_NR_setsockopt
9984 case TARGET_NR_setsockopt
:
9985 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9987 #if defined(TARGET_NR_syslog)
9988 case TARGET_NR_syslog
:
9993 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9994 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9995 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9996 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9997 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9998 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9999 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10000 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10001 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10002 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10003 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10004 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10007 return -TARGET_EINVAL
;
10012 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10014 return -TARGET_EFAULT
;
10016 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10017 unlock_user(p
, arg2
, arg3
);
10021 return -TARGET_EINVAL
;
10026 case TARGET_NR_setitimer
:
10028 struct itimerval value
, ovalue
, *pvalue
;
10032 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10033 || copy_from_user_timeval(&pvalue
->it_value
,
10034 arg2
+ sizeof(struct target_timeval
)))
10035 return -TARGET_EFAULT
;
10039 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10040 if (!is_error(ret
) && arg3
) {
10041 if (copy_to_user_timeval(arg3
,
10042 &ovalue
.it_interval
)
10043 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10045 return -TARGET_EFAULT
;
10049 case TARGET_NR_getitimer
:
10051 struct itimerval value
;
10053 ret
= get_errno(getitimer(arg1
, &value
));
10054 if (!is_error(ret
) && arg2
) {
10055 if (copy_to_user_timeval(arg2
,
10056 &value
.it_interval
)
10057 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10059 return -TARGET_EFAULT
;
10063 #ifdef TARGET_NR_stat
10064 case TARGET_NR_stat
:
10065 if (!(p
= lock_user_string(arg1
))) {
10066 return -TARGET_EFAULT
;
10068 ret
= get_errno(stat(path(p
), &st
));
10069 unlock_user(p
, arg1
, 0);
10072 #ifdef TARGET_NR_lstat
10073 case TARGET_NR_lstat
:
10074 if (!(p
= lock_user_string(arg1
))) {
10075 return -TARGET_EFAULT
;
10077 ret
= get_errno(lstat(path(p
), &st
));
10078 unlock_user(p
, arg1
, 0);
10081 #ifdef TARGET_NR_fstat
10082 case TARGET_NR_fstat
:
10084 ret
= get_errno(fstat(arg1
, &st
));
10085 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10088 if (!is_error(ret
)) {
10089 struct target_stat
*target_st
;
10091 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10092 return -TARGET_EFAULT
;
10093 memset(target_st
, 0, sizeof(*target_st
));
10094 __put_user(st
.st_dev
, &target_st
->st_dev
);
10095 __put_user(st
.st_ino
, &target_st
->st_ino
);
10096 __put_user(st
.st_mode
, &target_st
->st_mode
);
10097 __put_user(st
.st_uid
, &target_st
->st_uid
);
10098 __put_user(st
.st_gid
, &target_st
->st_gid
);
10099 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10100 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10101 __put_user(st
.st_size
, &target_st
->st_size
);
10102 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10103 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10104 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10105 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10106 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10107 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10108 __put_user(st
.st_atim
.tv_nsec
,
10109 &target_st
->target_st_atime_nsec
);
10110 __put_user(st
.st_mtim
.tv_nsec
,
10111 &target_st
->target_st_mtime_nsec
);
10112 __put_user(st
.st_ctim
.tv_nsec
,
10113 &target_st
->target_st_ctime_nsec
);
10115 unlock_user_struct(target_st
, arg2
, 1);
10120 case TARGET_NR_vhangup
:
10121 return get_errno(vhangup());
10122 #ifdef TARGET_NR_syscall
10123 case TARGET_NR_syscall
:
10124 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10125 arg6
, arg7
, arg8
, 0);
10127 #if defined(TARGET_NR_wait4)
10128 case TARGET_NR_wait4
:
10131 abi_long status_ptr
= arg2
;
10132 struct rusage rusage
, *rusage_ptr
;
10133 abi_ulong target_rusage
= arg4
;
10134 abi_long rusage_err
;
10136 rusage_ptr
= &rusage
;
10139 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10140 if (!is_error(ret
)) {
10141 if (status_ptr
&& ret
) {
10142 status
= host_to_target_waitstatus(status
);
10143 if (put_user_s32(status
, status_ptr
))
10144 return -TARGET_EFAULT
;
10146 if (target_rusage
) {
10147 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10156 #ifdef TARGET_NR_swapoff
10157 case TARGET_NR_swapoff
:
10158 if (!(p
= lock_user_string(arg1
)))
10159 return -TARGET_EFAULT
;
10160 ret
= get_errno(swapoff(p
));
10161 unlock_user(p
, arg1
, 0);
10164 case TARGET_NR_sysinfo
:
10166 struct target_sysinfo
*target_value
;
10167 struct sysinfo value
;
10168 ret
= get_errno(sysinfo(&value
));
10169 if (!is_error(ret
) && arg1
)
10171 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10172 return -TARGET_EFAULT
;
10173 __put_user(value
.uptime
, &target_value
->uptime
);
10174 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10175 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10176 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10177 __put_user(value
.totalram
, &target_value
->totalram
);
10178 __put_user(value
.freeram
, &target_value
->freeram
);
10179 __put_user(value
.sharedram
, &target_value
->sharedram
);
10180 __put_user(value
.bufferram
, &target_value
->bufferram
);
10181 __put_user(value
.totalswap
, &target_value
->totalswap
);
10182 __put_user(value
.freeswap
, &target_value
->freeswap
);
10183 __put_user(value
.procs
, &target_value
->procs
);
10184 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10185 __put_user(value
.freehigh
, &target_value
->freehigh
);
10186 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10187 unlock_user_struct(target_value
, arg1
, 1);
10191 #ifdef TARGET_NR_ipc
10192 case TARGET_NR_ipc
:
10193 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10195 #ifdef TARGET_NR_semget
10196 case TARGET_NR_semget
:
10197 return get_errno(semget(arg1
, arg2
, arg3
));
10199 #ifdef TARGET_NR_semop
10200 case TARGET_NR_semop
:
10201 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10203 #ifdef TARGET_NR_semtimedop
10204 case TARGET_NR_semtimedop
:
10205 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10207 #ifdef TARGET_NR_semtimedop_time64
10208 case TARGET_NR_semtimedop_time64
:
10209 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10211 #ifdef TARGET_NR_semctl
10212 case TARGET_NR_semctl
:
10213 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10215 #ifdef TARGET_NR_msgctl
10216 case TARGET_NR_msgctl
:
10217 return do_msgctl(arg1
, arg2
, arg3
);
10219 #ifdef TARGET_NR_msgget
10220 case TARGET_NR_msgget
:
10221 return get_errno(msgget(arg1
, arg2
));
10223 #ifdef TARGET_NR_msgrcv
10224 case TARGET_NR_msgrcv
:
10225 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10227 #ifdef TARGET_NR_msgsnd
10228 case TARGET_NR_msgsnd
:
10229 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10231 #ifdef TARGET_NR_shmget
10232 case TARGET_NR_shmget
:
10233 return get_errno(shmget(arg1
, arg2
, arg3
));
10235 #ifdef TARGET_NR_shmctl
10236 case TARGET_NR_shmctl
:
10237 return do_shmctl(arg1
, arg2
, arg3
);
10239 #ifdef TARGET_NR_shmat
10240 case TARGET_NR_shmat
:
10241 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10243 #ifdef TARGET_NR_shmdt
10244 case TARGET_NR_shmdt
:
10245 return do_shmdt(arg1
);
10247 case TARGET_NR_fsync
:
10248 return get_errno(fsync(arg1
));
10249 case TARGET_NR_clone
:
10250 /* Linux manages to have three different orderings for its
10251 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10252 * match the kernel's CONFIG_CLONE_* settings.
10253 * Microblaze is further special in that it uses a sixth
10254 * implicit argument to clone for the TLS pointer.
10256 #if defined(TARGET_MICROBLAZE)
10257 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10258 #elif defined(TARGET_CLONE_BACKWARDS)
10259 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10260 #elif defined(TARGET_CLONE_BACKWARDS2)
10261 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10263 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10266 #ifdef __NR_exit_group
10267 /* new thread calls */
10268 case TARGET_NR_exit_group
:
10269 preexit_cleanup(cpu_env
, arg1
);
10270 return get_errno(exit_group(arg1
));
10272 case TARGET_NR_setdomainname
:
10273 if (!(p
= lock_user_string(arg1
)))
10274 return -TARGET_EFAULT
;
10275 ret
= get_errno(setdomainname(p
, arg2
));
10276 unlock_user(p
, arg1
, 0);
10278 case TARGET_NR_uname
:
10279 /* no need to transcode because we use the linux syscall */
10281 struct new_utsname
* buf
;
10283 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10284 return -TARGET_EFAULT
;
10285 ret
= get_errno(sys_uname(buf
));
10286 if (!is_error(ret
)) {
10287 /* Overwrite the native machine name with whatever is being
10289 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10290 sizeof(buf
->machine
));
10291 /* Allow the user to override the reported release. */
10292 if (qemu_uname_release
&& *qemu_uname_release
) {
10293 g_strlcpy(buf
->release
, qemu_uname_release
,
10294 sizeof(buf
->release
));
10297 unlock_user_struct(buf
, arg1
, 1);
10301 case TARGET_NR_modify_ldt
:
10302 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10303 #if !defined(TARGET_X86_64)
10304 case TARGET_NR_vm86
:
10305 return do_vm86(cpu_env
, arg1
, arg2
);
10308 #if defined(TARGET_NR_adjtimex)
10309 case TARGET_NR_adjtimex
:
10311 struct timex host_buf
;
10313 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10314 return -TARGET_EFAULT
;
10316 ret
= get_errno(adjtimex(&host_buf
));
10317 if (!is_error(ret
)) {
10318 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10319 return -TARGET_EFAULT
;
10325 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10326 case TARGET_NR_clock_adjtime
:
10328 struct timex htx
, *phtx
= &htx
;
10330 if (target_to_host_timex(phtx
, arg2
) != 0) {
10331 return -TARGET_EFAULT
;
10333 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10334 if (!is_error(ret
) && phtx
) {
10335 if (host_to_target_timex(arg2
, phtx
) != 0) {
10336 return -TARGET_EFAULT
;
10342 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10343 case TARGET_NR_clock_adjtime64
:
10347 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10348 return -TARGET_EFAULT
;
10350 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10351 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10352 return -TARGET_EFAULT
;
10357 case TARGET_NR_getpgid
:
10358 return get_errno(getpgid(arg1
));
10359 case TARGET_NR_fchdir
:
10360 return get_errno(fchdir(arg1
));
10361 case TARGET_NR_personality
:
10362 return get_errno(personality(arg1
));
10363 #ifdef TARGET_NR__llseek /* Not on alpha */
10364 case TARGET_NR__llseek
:
10367 #if !defined(__NR_llseek)
10368 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10370 ret
= get_errno(res
);
10375 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10377 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10378 return -TARGET_EFAULT
;
10383 #ifdef TARGET_NR_getdents
10384 case TARGET_NR_getdents
:
10385 return do_getdents(arg1
, arg2
, arg3
);
10386 #endif /* TARGET_NR_getdents */
10387 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10388 case TARGET_NR_getdents64
:
10389 return do_getdents64(arg1
, arg2
, arg3
);
10390 #endif /* TARGET_NR_getdents64 */
10391 #if defined(TARGET_NR__newselect)
10392 case TARGET_NR__newselect
:
10393 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10395 #ifdef TARGET_NR_poll
10396 case TARGET_NR_poll
:
10397 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10399 #ifdef TARGET_NR_ppoll
10400 case TARGET_NR_ppoll
:
10401 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10403 #ifdef TARGET_NR_ppoll_time64
10404 case TARGET_NR_ppoll_time64
:
10405 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10407 case TARGET_NR_flock
:
10408 /* NOTE: the flock constant seems to be the same for every
10410 return get_errno(safe_flock(arg1
, arg2
));
10411 case TARGET_NR_readv
:
10413 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10415 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10416 unlock_iovec(vec
, arg2
, arg3
, 1);
10418 ret
= -host_to_target_errno(errno
);
10422 case TARGET_NR_writev
:
10424 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10426 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10427 unlock_iovec(vec
, arg2
, arg3
, 0);
10429 ret
= -host_to_target_errno(errno
);
10433 #if defined(TARGET_NR_preadv)
10434 case TARGET_NR_preadv
:
10436 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10438 unsigned long low
, high
;
10440 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10441 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10442 unlock_iovec(vec
, arg2
, arg3
, 1);
10444 ret
= -host_to_target_errno(errno
);
10449 #if defined(TARGET_NR_pwritev)
10450 case TARGET_NR_pwritev
:
10452 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10454 unsigned long low
, high
;
10456 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10457 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10458 unlock_iovec(vec
, arg2
, arg3
, 0);
10460 ret
= -host_to_target_errno(errno
);
10465 case TARGET_NR_getsid
:
10466 return get_errno(getsid(arg1
));
10467 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10468 case TARGET_NR_fdatasync
:
10469 return get_errno(fdatasync(arg1
));
10471 case TARGET_NR_sched_getaffinity
:
10473 unsigned int mask_size
;
10474 unsigned long *mask
;
10477 * sched_getaffinity needs multiples of ulong, so need to take
10478 * care of mismatches between target ulong and host ulong sizes.
10480 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10481 return -TARGET_EINVAL
;
10483 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10485 mask
= alloca(mask_size
);
10486 memset(mask
, 0, mask_size
);
10487 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10489 if (!is_error(ret
)) {
10491 /* More data returned than the caller's buffer will fit.
10492 * This only happens if sizeof(abi_long) < sizeof(long)
10493 * and the caller passed us a buffer holding an odd number
10494 * of abi_longs. If the host kernel is actually using the
10495 * extra 4 bytes then fail EINVAL; otherwise we can just
10496 * ignore them and only copy the interesting part.
10498 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10499 if (numcpus
> arg2
* 8) {
10500 return -TARGET_EINVAL
;
10505 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10506 return -TARGET_EFAULT
;
10511 case TARGET_NR_sched_setaffinity
:
10513 unsigned int mask_size
;
10514 unsigned long *mask
;
10517 * sched_setaffinity needs multiples of ulong, so need to take
10518 * care of mismatches between target ulong and host ulong sizes.
10520 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10521 return -TARGET_EINVAL
;
10523 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10524 mask
= alloca(mask_size
);
10526 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10531 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10533 case TARGET_NR_getcpu
:
10535 unsigned cpu
, node
;
10536 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10537 arg2
? &node
: NULL
,
10539 if (is_error(ret
)) {
10542 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10543 return -TARGET_EFAULT
;
10545 if (arg2
&& put_user_u32(node
, arg2
)) {
10546 return -TARGET_EFAULT
;
10550 case TARGET_NR_sched_setparam
:
10552 struct sched_param
*target_schp
;
10553 struct sched_param schp
;
10556 return -TARGET_EINVAL
;
10558 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10559 return -TARGET_EFAULT
;
10560 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10561 unlock_user_struct(target_schp
, arg2
, 0);
10562 return get_errno(sched_setparam(arg1
, &schp
));
10564 case TARGET_NR_sched_getparam
:
10566 struct sched_param
*target_schp
;
10567 struct sched_param schp
;
10570 return -TARGET_EINVAL
;
10572 ret
= get_errno(sched_getparam(arg1
, &schp
));
10573 if (!is_error(ret
)) {
10574 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10575 return -TARGET_EFAULT
;
10576 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10577 unlock_user_struct(target_schp
, arg2
, 1);
10581 case TARGET_NR_sched_setscheduler
:
10583 struct sched_param
*target_schp
;
10584 struct sched_param schp
;
10586 return -TARGET_EINVAL
;
10588 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10589 return -TARGET_EFAULT
;
10590 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10591 unlock_user_struct(target_schp
, arg3
, 0);
10592 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10594 case TARGET_NR_sched_getscheduler
:
10595 return get_errno(sched_getscheduler(arg1
));
10596 case TARGET_NR_sched_yield
:
10597 return get_errno(sched_yield());
10598 case TARGET_NR_sched_get_priority_max
:
10599 return get_errno(sched_get_priority_max(arg1
));
10600 case TARGET_NR_sched_get_priority_min
:
10601 return get_errno(sched_get_priority_min(arg1
));
10602 #ifdef TARGET_NR_sched_rr_get_interval
10603 case TARGET_NR_sched_rr_get_interval
:
10605 struct timespec ts
;
10606 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10607 if (!is_error(ret
)) {
10608 ret
= host_to_target_timespec(arg2
, &ts
);
10613 #ifdef TARGET_NR_sched_rr_get_interval_time64
10614 case TARGET_NR_sched_rr_get_interval_time64
:
10616 struct timespec ts
;
10617 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10618 if (!is_error(ret
)) {
10619 ret
= host_to_target_timespec64(arg2
, &ts
);
10624 #if defined(TARGET_NR_nanosleep)
10625 case TARGET_NR_nanosleep
:
10627 struct timespec req
, rem
;
10628 target_to_host_timespec(&req
, arg1
);
10629 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10630 if (is_error(ret
) && arg2
) {
10631 host_to_target_timespec(arg2
, &rem
);
10636 case TARGET_NR_prctl
:
10638 case PR_GET_PDEATHSIG
:
10641 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10642 if (!is_error(ret
) && arg2
10643 && put_user_s32(deathsig
, arg2
)) {
10644 return -TARGET_EFAULT
;
10651 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10653 return -TARGET_EFAULT
;
10655 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10656 arg3
, arg4
, arg5
));
10657 unlock_user(name
, arg2
, 16);
10662 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10664 return -TARGET_EFAULT
;
10666 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10667 arg3
, arg4
, arg5
));
10668 unlock_user(name
, arg2
, 0);
10673 case TARGET_PR_GET_FP_MODE
:
10675 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10677 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10678 ret
|= TARGET_PR_FP_MODE_FR
;
10680 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10681 ret
|= TARGET_PR_FP_MODE_FRE
;
10685 case TARGET_PR_SET_FP_MODE
:
10687 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10688 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10689 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10690 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10691 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10693 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10694 TARGET_PR_FP_MODE_FRE
;
10696 /* If nothing to change, return right away, successfully. */
10697 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10700 /* Check the value is valid */
10701 if (arg2
& ~known_bits
) {
10702 return -TARGET_EOPNOTSUPP
;
10704 /* Setting FRE without FR is not supported. */
10705 if (new_fre
&& !new_fr
) {
10706 return -TARGET_EOPNOTSUPP
;
10708 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10709 /* FR1 is not supported */
10710 return -TARGET_EOPNOTSUPP
;
10712 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10713 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10714 /* cannot set FR=0 */
10715 return -TARGET_EOPNOTSUPP
;
10717 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10718 /* Cannot set FRE=1 */
10719 return -TARGET_EOPNOTSUPP
;
10723 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10724 for (i
= 0; i
< 32 ; i
+= 2) {
10725 if (!old_fr
&& new_fr
) {
10726 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10727 } else if (old_fr
&& !new_fr
) {
10728 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10733 env
->CP0_Status
|= (1 << CP0St_FR
);
10734 env
->hflags
|= MIPS_HFLAG_F64
;
10736 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10737 env
->hflags
&= ~MIPS_HFLAG_F64
;
10740 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10741 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10742 env
->hflags
|= MIPS_HFLAG_FRE
;
10745 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10746 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10752 #ifdef TARGET_AARCH64
10753 case TARGET_PR_SVE_SET_VL
:
10755 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10756 * PR_SVE_VL_INHERIT. Note the kernel definition
10757 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10758 * even though the current architectural maximum is VQ=16.
10760 ret
= -TARGET_EINVAL
;
10761 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10762 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10763 CPUARMState
*env
= cpu_env
;
10764 ARMCPU
*cpu
= env_archcpu(env
);
10765 uint32_t vq
, old_vq
;
10767 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10768 vq
= MAX(arg2
/ 16, 1);
10769 vq
= MIN(vq
, cpu
->sve_max_vq
);
10772 aarch64_sve_narrow_vq(env
, vq
);
10774 env
->vfp
.zcr_el
[1] = vq
- 1;
10775 arm_rebuild_hflags(env
);
10779 case TARGET_PR_SVE_GET_VL
:
10780 ret
= -TARGET_EINVAL
;
10782 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10783 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10784 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10788 case TARGET_PR_PAC_RESET_KEYS
:
10790 CPUARMState
*env
= cpu_env
;
10791 ARMCPU
*cpu
= env_archcpu(env
);
10793 if (arg3
|| arg4
|| arg5
) {
10794 return -TARGET_EINVAL
;
10796 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10797 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10798 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10799 TARGET_PR_PAC_APGAKEY
);
10805 } else if (arg2
& ~all
) {
10806 return -TARGET_EINVAL
;
10808 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10809 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10810 sizeof(ARMPACKey
), &err
);
10812 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10813 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10814 sizeof(ARMPACKey
), &err
);
10816 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10817 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10818 sizeof(ARMPACKey
), &err
);
10820 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10821 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10822 sizeof(ARMPACKey
), &err
);
10824 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10825 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10826 sizeof(ARMPACKey
), &err
);
10830 * Some unknown failure in the crypto. The best
10831 * we can do is log it and fail the syscall.
10832 * The real syscall cannot fail this way.
10834 qemu_log_mask(LOG_UNIMP
,
10835 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10836 error_get_pretty(err
));
10838 return -TARGET_EIO
;
10843 return -TARGET_EINVAL
;
10844 case TARGET_PR_SET_TAGGED_ADDR_CTRL
:
10846 abi_ulong valid_mask
= TARGET_PR_TAGGED_ADDR_ENABLE
;
10847 CPUARMState
*env
= cpu_env
;
10848 ARMCPU
*cpu
= env_archcpu(env
);
10850 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10851 valid_mask
|= TARGET_PR_MTE_TCF_MASK
;
10852 valid_mask
|= TARGET_PR_MTE_TAG_MASK
;
10855 if ((arg2
& ~valid_mask
) || arg3
|| arg4
|| arg5
) {
10856 return -TARGET_EINVAL
;
10858 env
->tagged_addr_enable
= arg2
& TARGET_PR_TAGGED_ADDR_ENABLE
;
10860 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10861 switch (arg2
& TARGET_PR_MTE_TCF_MASK
) {
10862 case TARGET_PR_MTE_TCF_NONE
:
10863 case TARGET_PR_MTE_TCF_SYNC
:
10864 case TARGET_PR_MTE_TCF_ASYNC
:
10871 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10872 * Note that the syscall values are consistent with hw.
10874 env
->cp15
.sctlr_el
[1] =
10875 deposit64(env
->cp15
.sctlr_el
[1], 38, 2,
10876 arg2
>> TARGET_PR_MTE_TCF_SHIFT
);
10879 * Write PR_MTE_TAG to GCR_EL1[Exclude].
10880 * Note that the syscall uses an include mask,
10881 * and hardware uses an exclude mask -- invert.
10883 env
->cp15
.gcr_el1
=
10884 deposit64(env
->cp15
.gcr_el1
, 0, 16,
10885 ~arg2
>> TARGET_PR_MTE_TAG_SHIFT
);
10886 arm_rebuild_hflags(env
);
10890 case TARGET_PR_GET_TAGGED_ADDR_CTRL
:
10893 CPUARMState
*env
= cpu_env
;
10894 ARMCPU
*cpu
= env_archcpu(env
);
10896 if (arg2
|| arg3
|| arg4
|| arg5
) {
10897 return -TARGET_EINVAL
;
10899 if (env
->tagged_addr_enable
) {
10900 ret
|= TARGET_PR_TAGGED_ADDR_ENABLE
;
10902 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10904 ret
|= (extract64(env
->cp15
.sctlr_el
[1], 38, 2)
10905 << TARGET_PR_MTE_TCF_SHIFT
);
10906 ret
= deposit64(ret
, TARGET_PR_MTE_TAG_SHIFT
, 16,
10907 ~env
->cp15
.gcr_el1
);
10911 #endif /* AARCH64 */
10912 case PR_GET_SECCOMP
:
10913 case PR_SET_SECCOMP
:
10914 /* Disable seccomp to prevent the target disabling syscalls we
10916 return -TARGET_EINVAL
;
10918 /* Most prctl options have no pointer arguments */
10919 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10922 #ifdef TARGET_NR_arch_prctl
10923 case TARGET_NR_arch_prctl
:
10924 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10926 #ifdef TARGET_NR_pread64
10927 case TARGET_NR_pread64
:
10928 if (regpairs_aligned(cpu_env
, num
)) {
10932 if (arg2
== 0 && arg3
== 0) {
10933 /* Special-case NULL buffer and zero length, which should succeed */
10936 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10938 return -TARGET_EFAULT
;
10941 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10942 unlock_user(p
, arg2
, ret
);
10944 case TARGET_NR_pwrite64
:
10945 if (regpairs_aligned(cpu_env
, num
)) {
10949 if (arg2
== 0 && arg3
== 0) {
10950 /* Special-case NULL buffer and zero length, which should succeed */
10953 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10955 return -TARGET_EFAULT
;
10958 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10959 unlock_user(p
, arg2
, 0);
10962 case TARGET_NR_getcwd
:
10963 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10964 return -TARGET_EFAULT
;
10965 ret
= get_errno(sys_getcwd1(p
, arg2
));
10966 unlock_user(p
, arg1
, ret
);
10968 case TARGET_NR_capget
:
10969 case TARGET_NR_capset
:
10971 struct target_user_cap_header
*target_header
;
10972 struct target_user_cap_data
*target_data
= NULL
;
10973 struct __user_cap_header_struct header
;
10974 struct __user_cap_data_struct data
[2];
10975 struct __user_cap_data_struct
*dataptr
= NULL
;
10976 int i
, target_datalen
;
10977 int data_items
= 1;
10979 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10980 return -TARGET_EFAULT
;
10982 header
.version
= tswap32(target_header
->version
);
10983 header
.pid
= tswap32(target_header
->pid
);
10985 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10986 /* Version 2 and up takes pointer to two user_data structs */
10990 target_datalen
= sizeof(*target_data
) * data_items
;
10993 if (num
== TARGET_NR_capget
) {
10994 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10996 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10998 if (!target_data
) {
10999 unlock_user_struct(target_header
, arg1
, 0);
11000 return -TARGET_EFAULT
;
11003 if (num
== TARGET_NR_capset
) {
11004 for (i
= 0; i
< data_items
; i
++) {
11005 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11006 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11007 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11014 if (num
== TARGET_NR_capget
) {
11015 ret
= get_errno(capget(&header
, dataptr
));
11017 ret
= get_errno(capset(&header
, dataptr
));
11020 /* The kernel always updates version for both capget and capset */
11021 target_header
->version
= tswap32(header
.version
);
11022 unlock_user_struct(target_header
, arg1
, 1);
11025 if (num
== TARGET_NR_capget
) {
11026 for (i
= 0; i
< data_items
; i
++) {
11027 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11028 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11029 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11031 unlock_user(target_data
, arg2
, target_datalen
);
11033 unlock_user(target_data
, arg2
, 0);
11038 case TARGET_NR_sigaltstack
:
11039 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11041 #ifdef CONFIG_SENDFILE
11042 #ifdef TARGET_NR_sendfile
11043 case TARGET_NR_sendfile
:
11045 off_t
*offp
= NULL
;
11048 ret
= get_user_sal(off
, arg3
);
11049 if (is_error(ret
)) {
11054 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11055 if (!is_error(ret
) && arg3
) {
11056 abi_long ret2
= put_user_sal(off
, arg3
);
11057 if (is_error(ret2
)) {
11064 #ifdef TARGET_NR_sendfile64
11065 case TARGET_NR_sendfile64
:
11067 off_t
*offp
= NULL
;
11070 ret
= get_user_s64(off
, arg3
);
11071 if (is_error(ret
)) {
11076 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11077 if (!is_error(ret
) && arg3
) {
11078 abi_long ret2
= put_user_s64(off
, arg3
);
11079 if (is_error(ret2
)) {
11087 #ifdef TARGET_NR_vfork
11088 case TARGET_NR_vfork
:
11089 return get_errno(do_fork(cpu_env
,
11090 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11093 #ifdef TARGET_NR_ugetrlimit
11094 case TARGET_NR_ugetrlimit
:
11096 struct rlimit rlim
;
11097 int resource
= target_to_host_resource(arg1
);
11098 ret
= get_errno(getrlimit(resource
, &rlim
));
11099 if (!is_error(ret
)) {
11100 struct target_rlimit
*target_rlim
;
11101 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11102 return -TARGET_EFAULT
;
11103 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11104 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11105 unlock_user_struct(target_rlim
, arg2
, 1);
11110 #ifdef TARGET_NR_truncate64
11111 case TARGET_NR_truncate64
:
11112 if (!(p
= lock_user_string(arg1
)))
11113 return -TARGET_EFAULT
;
11114 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11115 unlock_user(p
, arg1
, 0);
11118 #ifdef TARGET_NR_ftruncate64
11119 case TARGET_NR_ftruncate64
:
11120 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11122 #ifdef TARGET_NR_stat64
11123 case TARGET_NR_stat64
:
11124 if (!(p
= lock_user_string(arg1
))) {
11125 return -TARGET_EFAULT
;
11127 ret
= get_errno(stat(path(p
), &st
));
11128 unlock_user(p
, arg1
, 0);
11129 if (!is_error(ret
))
11130 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11133 #ifdef TARGET_NR_lstat64
11134 case TARGET_NR_lstat64
:
11135 if (!(p
= lock_user_string(arg1
))) {
11136 return -TARGET_EFAULT
;
11138 ret
= get_errno(lstat(path(p
), &st
));
11139 unlock_user(p
, arg1
, 0);
11140 if (!is_error(ret
))
11141 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11144 #ifdef TARGET_NR_fstat64
11145 case TARGET_NR_fstat64
:
11146 ret
= get_errno(fstat(arg1
, &st
));
11147 if (!is_error(ret
))
11148 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11151 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11152 #ifdef TARGET_NR_fstatat64
11153 case TARGET_NR_fstatat64
:
11155 #ifdef TARGET_NR_newfstatat
11156 case TARGET_NR_newfstatat
:
11158 if (!(p
= lock_user_string(arg2
))) {
11159 return -TARGET_EFAULT
;
11161 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11162 unlock_user(p
, arg2
, 0);
11163 if (!is_error(ret
))
11164 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11167 #if defined(TARGET_NR_statx)
11168 case TARGET_NR_statx
:
11170 struct target_statx
*target_stx
;
11174 p
= lock_user_string(arg2
);
11176 return -TARGET_EFAULT
;
11178 #if defined(__NR_statx)
11181 * It is assumed that struct statx is architecture independent.
11183 struct target_statx host_stx
;
11186 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11187 if (!is_error(ret
)) {
11188 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11189 unlock_user(p
, arg2
, 0);
11190 return -TARGET_EFAULT
;
11194 if (ret
!= -TARGET_ENOSYS
) {
11195 unlock_user(p
, arg2
, 0);
11200 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11201 unlock_user(p
, arg2
, 0);
11203 if (!is_error(ret
)) {
11204 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11205 return -TARGET_EFAULT
;
11207 memset(target_stx
, 0, sizeof(*target_stx
));
11208 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11209 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11210 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11211 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11212 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11213 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11214 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11215 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11216 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11217 __put_user(st
.st_size
, &target_stx
->stx_size
);
11218 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11219 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11220 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11221 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11222 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11223 unlock_user_struct(target_stx
, arg5
, 1);
11228 #ifdef TARGET_NR_lchown
11229 case TARGET_NR_lchown
:
11230 if (!(p
= lock_user_string(arg1
)))
11231 return -TARGET_EFAULT
;
11232 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11233 unlock_user(p
, arg1
, 0);
11236 #ifdef TARGET_NR_getuid
11237 case TARGET_NR_getuid
:
11238 return get_errno(high2lowuid(getuid()));
11240 #ifdef TARGET_NR_getgid
11241 case TARGET_NR_getgid
:
11242 return get_errno(high2lowgid(getgid()));
11244 #ifdef TARGET_NR_geteuid
11245 case TARGET_NR_geteuid
:
11246 return get_errno(high2lowuid(geteuid()));
11248 #ifdef TARGET_NR_getegid
11249 case TARGET_NR_getegid
:
11250 return get_errno(high2lowgid(getegid()));
11252 case TARGET_NR_setreuid
:
11253 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11254 case TARGET_NR_setregid
:
11255 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11256 case TARGET_NR_getgroups
:
11258 int gidsetsize
= arg1
;
11259 target_id
*target_grouplist
;
11263 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11264 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11265 if (gidsetsize
== 0)
11267 if (!is_error(ret
)) {
11268 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11269 if (!target_grouplist
)
11270 return -TARGET_EFAULT
;
11271 for(i
= 0;i
< ret
; i
++)
11272 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11273 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11277 case TARGET_NR_setgroups
:
11279 int gidsetsize
= arg1
;
11280 target_id
*target_grouplist
;
11281 gid_t
*grouplist
= NULL
;
11284 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11285 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11286 if (!target_grouplist
) {
11287 return -TARGET_EFAULT
;
11289 for (i
= 0; i
< gidsetsize
; i
++) {
11290 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11292 unlock_user(target_grouplist
, arg2
, 0);
11294 return get_errno(setgroups(gidsetsize
, grouplist
));
11296 case TARGET_NR_fchown
:
11297 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11298 #if defined(TARGET_NR_fchownat)
11299 case TARGET_NR_fchownat
:
11300 if (!(p
= lock_user_string(arg2
)))
11301 return -TARGET_EFAULT
;
11302 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11303 low2highgid(arg4
), arg5
));
11304 unlock_user(p
, arg2
, 0);
11307 #ifdef TARGET_NR_setresuid
11308 case TARGET_NR_setresuid
:
11309 return get_errno(sys_setresuid(low2highuid(arg1
),
11311 low2highuid(arg3
)));
11313 #ifdef TARGET_NR_getresuid
11314 case TARGET_NR_getresuid
:
11316 uid_t ruid
, euid
, suid
;
11317 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11318 if (!is_error(ret
)) {
11319 if (put_user_id(high2lowuid(ruid
), arg1
)
11320 || put_user_id(high2lowuid(euid
), arg2
)
11321 || put_user_id(high2lowuid(suid
), arg3
))
11322 return -TARGET_EFAULT
;
11327 #ifdef TARGET_NR_getresgid
11328 case TARGET_NR_setresgid
:
11329 return get_errno(sys_setresgid(low2highgid(arg1
),
11331 low2highgid(arg3
)));
11333 #ifdef TARGET_NR_getresgid
11334 case TARGET_NR_getresgid
:
11336 gid_t rgid
, egid
, sgid
;
11337 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11338 if (!is_error(ret
)) {
11339 if (put_user_id(high2lowgid(rgid
), arg1
)
11340 || put_user_id(high2lowgid(egid
), arg2
)
11341 || put_user_id(high2lowgid(sgid
), arg3
))
11342 return -TARGET_EFAULT
;
11347 #ifdef TARGET_NR_chown
11348 case TARGET_NR_chown
:
11349 if (!(p
= lock_user_string(arg1
)))
11350 return -TARGET_EFAULT
;
11351 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11352 unlock_user(p
, arg1
, 0);
11355 case TARGET_NR_setuid
:
11356 return get_errno(sys_setuid(low2highuid(arg1
)));
11357 case TARGET_NR_setgid
:
11358 return get_errno(sys_setgid(low2highgid(arg1
)));
11359 case TARGET_NR_setfsuid
:
11360 return get_errno(setfsuid(arg1
));
11361 case TARGET_NR_setfsgid
:
11362 return get_errno(setfsgid(arg1
));
11364 #ifdef TARGET_NR_lchown32
11365 case TARGET_NR_lchown32
:
11366 if (!(p
= lock_user_string(arg1
)))
11367 return -TARGET_EFAULT
;
11368 ret
= get_errno(lchown(p
, arg2
, arg3
));
11369 unlock_user(p
, arg1
, 0);
11372 #ifdef TARGET_NR_getuid32
11373 case TARGET_NR_getuid32
:
11374 return get_errno(getuid());
11377 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11378 /* Alpha specific */
11379 case TARGET_NR_getxuid
:
11383 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11385 return get_errno(getuid());
11387 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11388 /* Alpha specific */
11389 case TARGET_NR_getxgid
:
11393 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11395 return get_errno(getgid());
11397 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11398 /* Alpha specific */
11399 case TARGET_NR_osf_getsysinfo
:
11400 ret
= -TARGET_EOPNOTSUPP
;
11402 case TARGET_GSI_IEEE_FP_CONTROL
:
11404 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11405 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11407 swcr
&= ~SWCR_STATUS_MASK
;
11408 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11410 if (put_user_u64 (swcr
, arg2
))
11411 return -TARGET_EFAULT
;
11416 /* case GSI_IEEE_STATE_AT_SIGNAL:
11417 -- Not implemented in linux kernel.
11419 -- Retrieves current unaligned access state; not much used.
11420 case GSI_PROC_TYPE:
11421 -- Retrieves implver information; surely not used.
11422 case GSI_GET_HWRPB:
11423 -- Grabs a copy of the HWRPB; surely not used.
11428 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11429 /* Alpha specific */
11430 case TARGET_NR_osf_setsysinfo
:
11431 ret
= -TARGET_EOPNOTSUPP
;
11433 case TARGET_SSI_IEEE_FP_CONTROL
:
11435 uint64_t swcr
, fpcr
;
11437 if (get_user_u64 (swcr
, arg2
)) {
11438 return -TARGET_EFAULT
;
11442 * The kernel calls swcr_update_status to update the
11443 * status bits from the fpcr at every point that it
11444 * could be queried. Therefore, we store the status
11445 * bits only in FPCR.
11447 ((CPUAlphaState
*)cpu_env
)->swcr
11448 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11450 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11451 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11452 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11453 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11458 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11460 uint64_t exc
, fpcr
, fex
;
11462 if (get_user_u64(exc
, arg2
)) {
11463 return -TARGET_EFAULT
;
11465 exc
&= SWCR_STATUS_MASK
;
11466 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11468 /* Old exceptions are not signaled. */
11469 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11471 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11472 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11474 /* Update the hardware fpcr. */
11475 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11476 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11479 int si_code
= TARGET_FPE_FLTUNK
;
11480 target_siginfo_t info
;
11482 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11483 si_code
= TARGET_FPE_FLTUND
;
11485 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11486 si_code
= TARGET_FPE_FLTRES
;
11488 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11489 si_code
= TARGET_FPE_FLTUND
;
11491 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11492 si_code
= TARGET_FPE_FLTOVF
;
11494 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11495 si_code
= TARGET_FPE_FLTDIV
;
11497 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11498 si_code
= TARGET_FPE_FLTINV
;
11501 info
.si_signo
= SIGFPE
;
11503 info
.si_code
= si_code
;
11504 info
._sifields
._sigfault
._addr
11505 = ((CPUArchState
*)cpu_env
)->pc
;
11506 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11507 QEMU_SI_FAULT
, &info
);
11513 /* case SSI_NVPAIRS:
11514 -- Used with SSIN_UACPROC to enable unaligned accesses.
11515 case SSI_IEEE_STATE_AT_SIGNAL:
11516 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11517 -- Not implemented in linux kernel
11522 #ifdef TARGET_NR_osf_sigprocmask
11523 /* Alpha specific. */
11524 case TARGET_NR_osf_sigprocmask
:
11528 sigset_t set
, oldset
;
11531 case TARGET_SIG_BLOCK
:
11534 case TARGET_SIG_UNBLOCK
:
11537 case TARGET_SIG_SETMASK
:
11541 return -TARGET_EINVAL
;
11544 target_to_host_old_sigset(&set
, &mask
);
11545 ret
= do_sigprocmask(how
, &set
, &oldset
);
11547 host_to_target_old_sigset(&mask
, &oldset
);
11554 #ifdef TARGET_NR_getgid32
11555 case TARGET_NR_getgid32
:
11556 return get_errno(getgid());
11558 #ifdef TARGET_NR_geteuid32
11559 case TARGET_NR_geteuid32
:
11560 return get_errno(geteuid());
11562 #ifdef TARGET_NR_getegid32
11563 case TARGET_NR_getegid32
:
11564 return get_errno(getegid());
11566 #ifdef TARGET_NR_setreuid32
11567 case TARGET_NR_setreuid32
:
11568 return get_errno(setreuid(arg1
, arg2
));
11570 #ifdef TARGET_NR_setregid32
11571 case TARGET_NR_setregid32
:
11572 return get_errno(setregid(arg1
, arg2
));
11574 #ifdef TARGET_NR_getgroups32
11575 case TARGET_NR_getgroups32
:
11577 int gidsetsize
= arg1
;
11578 uint32_t *target_grouplist
;
11582 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11583 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11584 if (gidsetsize
== 0)
11586 if (!is_error(ret
)) {
11587 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11588 if (!target_grouplist
) {
11589 return -TARGET_EFAULT
;
11591 for(i
= 0;i
< ret
; i
++)
11592 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11593 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11598 #ifdef TARGET_NR_setgroups32
11599 case TARGET_NR_setgroups32
:
11601 int gidsetsize
= arg1
;
11602 uint32_t *target_grouplist
;
11606 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11607 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11608 if (!target_grouplist
) {
11609 return -TARGET_EFAULT
;
11611 for(i
= 0;i
< gidsetsize
; i
++)
11612 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11613 unlock_user(target_grouplist
, arg2
, 0);
11614 return get_errno(setgroups(gidsetsize
, grouplist
));
11617 #ifdef TARGET_NR_fchown32
11618 case TARGET_NR_fchown32
:
11619 return get_errno(fchown(arg1
, arg2
, arg3
));
11621 #ifdef TARGET_NR_setresuid32
11622 case TARGET_NR_setresuid32
:
11623 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11625 #ifdef TARGET_NR_getresuid32
11626 case TARGET_NR_getresuid32
:
11628 uid_t ruid
, euid
, suid
;
11629 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11630 if (!is_error(ret
)) {
11631 if (put_user_u32(ruid
, arg1
)
11632 || put_user_u32(euid
, arg2
)
11633 || put_user_u32(suid
, arg3
))
11634 return -TARGET_EFAULT
;
11639 #ifdef TARGET_NR_setresgid32
11640 case TARGET_NR_setresgid32
:
11641 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11643 #ifdef TARGET_NR_getresgid32
11644 case TARGET_NR_getresgid32
:
11646 gid_t rgid
, egid
, sgid
;
11647 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11648 if (!is_error(ret
)) {
11649 if (put_user_u32(rgid
, arg1
)
11650 || put_user_u32(egid
, arg2
)
11651 || put_user_u32(sgid
, arg3
))
11652 return -TARGET_EFAULT
;
11657 #ifdef TARGET_NR_chown32
11658 case TARGET_NR_chown32
:
11659 if (!(p
= lock_user_string(arg1
)))
11660 return -TARGET_EFAULT
;
11661 ret
= get_errno(chown(p
, arg2
, arg3
));
11662 unlock_user(p
, arg1
, 0);
11665 #ifdef TARGET_NR_setuid32
11666 case TARGET_NR_setuid32
:
11667 return get_errno(sys_setuid(arg1
));
11669 #ifdef TARGET_NR_setgid32
11670 case TARGET_NR_setgid32
:
11671 return get_errno(sys_setgid(arg1
));
11673 #ifdef TARGET_NR_setfsuid32
11674 case TARGET_NR_setfsuid32
:
11675 return get_errno(setfsuid(arg1
));
11677 #ifdef TARGET_NR_setfsgid32
11678 case TARGET_NR_setfsgid32
:
11679 return get_errno(setfsgid(arg1
));
11681 #ifdef TARGET_NR_mincore
11682 case TARGET_NR_mincore
:
11684 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11686 return -TARGET_ENOMEM
;
11688 p
= lock_user_string(arg3
);
11690 ret
= -TARGET_EFAULT
;
11692 ret
= get_errno(mincore(a
, arg2
, p
));
11693 unlock_user(p
, arg3
, ret
);
11695 unlock_user(a
, arg1
, 0);
11699 #ifdef TARGET_NR_arm_fadvise64_64
11700 case TARGET_NR_arm_fadvise64_64
:
11701 /* arm_fadvise64_64 looks like fadvise64_64 but
11702 * with different argument order: fd, advice, offset, len
11703 * rather than the usual fd, offset, len, advice.
11704 * Note that offset and len are both 64-bit so appear as
11705 * pairs of 32-bit registers.
11707 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11708 target_offset64(arg5
, arg6
), arg2
);
11709 return -host_to_target_errno(ret
);
11712 #if TARGET_ABI_BITS == 32
11714 #ifdef TARGET_NR_fadvise64_64
11715 case TARGET_NR_fadvise64_64
:
11716 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11717 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11725 /* 6 args: fd, offset (high, low), len (high, low), advice */
11726 if (regpairs_aligned(cpu_env
, num
)) {
11727 /* offset is in (3,4), len in (5,6) and advice in 7 */
11735 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11736 target_offset64(arg4
, arg5
), arg6
);
11737 return -host_to_target_errno(ret
);
11740 #ifdef TARGET_NR_fadvise64
11741 case TARGET_NR_fadvise64
:
11742 /* 5 args: fd, offset (high, low), len, advice */
11743 if (regpairs_aligned(cpu_env
, num
)) {
11744 /* offset is in (3,4), len in 5 and advice in 6 */
11750 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11751 return -host_to_target_errno(ret
);
11754 #else /* not a 32-bit ABI */
11755 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11756 #ifdef TARGET_NR_fadvise64_64
11757 case TARGET_NR_fadvise64_64
:
11759 #ifdef TARGET_NR_fadvise64
11760 case TARGET_NR_fadvise64
:
11762 #ifdef TARGET_S390X
11764 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11765 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11766 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11767 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11771 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11773 #endif /* end of 64-bit ABI fadvise handling */
11775 #ifdef TARGET_NR_madvise
11776 case TARGET_NR_madvise
:
11777 /* A straight passthrough may not be safe because qemu sometimes
11778 turns private file-backed mappings into anonymous mappings.
11779 This will break MADV_DONTNEED.
11780 This is a hint, so ignoring and returning success is ok. */
11783 #ifdef TARGET_NR_fcntl64
11784 case TARGET_NR_fcntl64
:
11788 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11789 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11792 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11793 copyfrom
= copy_from_user_oabi_flock64
;
11794 copyto
= copy_to_user_oabi_flock64
;
11798 cmd
= target_to_host_fcntl_cmd(arg2
);
11799 if (cmd
== -TARGET_EINVAL
) {
11804 case TARGET_F_GETLK64
:
11805 ret
= copyfrom(&fl
, arg3
);
11809 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11811 ret
= copyto(arg3
, &fl
);
11815 case TARGET_F_SETLK64
:
11816 case TARGET_F_SETLKW64
:
11817 ret
= copyfrom(&fl
, arg3
);
11821 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11824 ret
= do_fcntl(arg1
, arg2
, arg3
);
11830 #ifdef TARGET_NR_cacheflush
11831 case TARGET_NR_cacheflush
:
11832 /* self-modifying code is handled automatically, so nothing needed */
11835 #ifdef TARGET_NR_getpagesize
11836 case TARGET_NR_getpagesize
:
11837 return TARGET_PAGE_SIZE
;
11839 case TARGET_NR_gettid
:
11840 return get_errno(sys_gettid());
11841 #ifdef TARGET_NR_readahead
11842 case TARGET_NR_readahead
:
11843 #if TARGET_ABI_BITS == 32
11844 if (regpairs_aligned(cpu_env
, num
)) {
11849 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11851 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11856 #ifdef TARGET_NR_setxattr
11857 case TARGET_NR_listxattr
:
11858 case TARGET_NR_llistxattr
:
11862 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11864 return -TARGET_EFAULT
;
11867 p
= lock_user_string(arg1
);
11869 if (num
== TARGET_NR_listxattr
) {
11870 ret
= get_errno(listxattr(p
, b
, arg3
));
11872 ret
= get_errno(llistxattr(p
, b
, arg3
));
11875 ret
= -TARGET_EFAULT
;
11877 unlock_user(p
, arg1
, 0);
11878 unlock_user(b
, arg2
, arg3
);
11881 case TARGET_NR_flistxattr
:
11885 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11887 return -TARGET_EFAULT
;
11890 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11891 unlock_user(b
, arg2
, arg3
);
11894 case TARGET_NR_setxattr
:
11895 case TARGET_NR_lsetxattr
:
11897 void *p
, *n
, *v
= 0;
11899 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11901 return -TARGET_EFAULT
;
11904 p
= lock_user_string(arg1
);
11905 n
= lock_user_string(arg2
);
11907 if (num
== TARGET_NR_setxattr
) {
11908 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11910 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11913 ret
= -TARGET_EFAULT
;
11915 unlock_user(p
, arg1
, 0);
11916 unlock_user(n
, arg2
, 0);
11917 unlock_user(v
, arg3
, 0);
11920 case TARGET_NR_fsetxattr
:
11924 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11926 return -TARGET_EFAULT
;
11929 n
= lock_user_string(arg2
);
11931 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11933 ret
= -TARGET_EFAULT
;
11935 unlock_user(n
, arg2
, 0);
11936 unlock_user(v
, arg3
, 0);
11939 case TARGET_NR_getxattr
:
11940 case TARGET_NR_lgetxattr
:
11942 void *p
, *n
, *v
= 0;
11944 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11946 return -TARGET_EFAULT
;
11949 p
= lock_user_string(arg1
);
11950 n
= lock_user_string(arg2
);
11952 if (num
== TARGET_NR_getxattr
) {
11953 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11955 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11958 ret
= -TARGET_EFAULT
;
11960 unlock_user(p
, arg1
, 0);
11961 unlock_user(n
, arg2
, 0);
11962 unlock_user(v
, arg3
, arg4
);
11965 case TARGET_NR_fgetxattr
:
11969 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11971 return -TARGET_EFAULT
;
11974 n
= lock_user_string(arg2
);
11976 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11978 ret
= -TARGET_EFAULT
;
11980 unlock_user(n
, arg2
, 0);
11981 unlock_user(v
, arg3
, arg4
);
11984 case TARGET_NR_removexattr
:
11985 case TARGET_NR_lremovexattr
:
11988 p
= lock_user_string(arg1
);
11989 n
= lock_user_string(arg2
);
11991 if (num
== TARGET_NR_removexattr
) {
11992 ret
= get_errno(removexattr(p
, n
));
11994 ret
= get_errno(lremovexattr(p
, n
));
11997 ret
= -TARGET_EFAULT
;
11999 unlock_user(p
, arg1
, 0);
12000 unlock_user(n
, arg2
, 0);
12003 case TARGET_NR_fremovexattr
:
12006 n
= lock_user_string(arg2
);
12008 ret
= get_errno(fremovexattr(arg1
, n
));
12010 ret
= -TARGET_EFAULT
;
12012 unlock_user(n
, arg2
, 0);
12016 #endif /* CONFIG_ATTR */
12017 #ifdef TARGET_NR_set_thread_area
12018 case TARGET_NR_set_thread_area
:
12019 #if defined(TARGET_MIPS)
12020 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12022 #elif defined(TARGET_CRIS)
12024 ret
= -TARGET_EINVAL
;
12026 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12030 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12031 return do_set_thread_area(cpu_env
, arg1
);
12032 #elif defined(TARGET_M68K)
12034 TaskState
*ts
= cpu
->opaque
;
12035 ts
->tp_value
= arg1
;
12039 return -TARGET_ENOSYS
;
12042 #ifdef TARGET_NR_get_thread_area
12043 case TARGET_NR_get_thread_area
:
12044 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12045 return do_get_thread_area(cpu_env
, arg1
);
12046 #elif defined(TARGET_M68K)
12048 TaskState
*ts
= cpu
->opaque
;
12049 return ts
->tp_value
;
12052 return -TARGET_ENOSYS
;
12055 #ifdef TARGET_NR_getdomainname
12056 case TARGET_NR_getdomainname
:
12057 return -TARGET_ENOSYS
;
12060 #ifdef TARGET_NR_clock_settime
12061 case TARGET_NR_clock_settime
:
12063 struct timespec ts
;
12065 ret
= target_to_host_timespec(&ts
, arg2
);
12066 if (!is_error(ret
)) {
12067 ret
= get_errno(clock_settime(arg1
, &ts
));
12072 #ifdef TARGET_NR_clock_settime64
12073 case TARGET_NR_clock_settime64
:
12075 struct timespec ts
;
12077 ret
= target_to_host_timespec64(&ts
, arg2
);
12078 if (!is_error(ret
)) {
12079 ret
= get_errno(clock_settime(arg1
, &ts
));
12084 #ifdef TARGET_NR_clock_gettime
12085 case TARGET_NR_clock_gettime
:
12087 struct timespec ts
;
12088 ret
= get_errno(clock_gettime(arg1
, &ts
));
12089 if (!is_error(ret
)) {
12090 ret
= host_to_target_timespec(arg2
, &ts
);
12095 #ifdef TARGET_NR_clock_gettime64
12096 case TARGET_NR_clock_gettime64
:
12098 struct timespec ts
;
12099 ret
= get_errno(clock_gettime(arg1
, &ts
));
12100 if (!is_error(ret
)) {
12101 ret
= host_to_target_timespec64(arg2
, &ts
);
12106 #ifdef TARGET_NR_clock_getres
12107 case TARGET_NR_clock_getres
:
12109 struct timespec ts
;
12110 ret
= get_errno(clock_getres(arg1
, &ts
));
12111 if (!is_error(ret
)) {
12112 host_to_target_timespec(arg2
, &ts
);
12117 #ifdef TARGET_NR_clock_getres_time64
12118 case TARGET_NR_clock_getres_time64
:
12120 struct timespec ts
;
12121 ret
= get_errno(clock_getres(arg1
, &ts
));
12122 if (!is_error(ret
)) {
12123 host_to_target_timespec64(arg2
, &ts
);
12128 #ifdef TARGET_NR_clock_nanosleep
12129 case TARGET_NR_clock_nanosleep
:
12131 struct timespec ts
;
12132 if (target_to_host_timespec(&ts
, arg3
)) {
12133 return -TARGET_EFAULT
;
12135 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12136 &ts
, arg4
? &ts
: NULL
));
12138 * if the call is interrupted by a signal handler, it fails
12139 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12140 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12142 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12143 host_to_target_timespec(arg4
, &ts
)) {
12144 return -TARGET_EFAULT
;
12150 #ifdef TARGET_NR_clock_nanosleep_time64
12151 case TARGET_NR_clock_nanosleep_time64
:
12153 struct timespec ts
;
12155 if (target_to_host_timespec64(&ts
, arg3
)) {
12156 return -TARGET_EFAULT
;
12159 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12160 &ts
, arg4
? &ts
: NULL
));
12162 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12163 host_to_target_timespec64(arg4
, &ts
)) {
12164 return -TARGET_EFAULT
;
12170 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12171 case TARGET_NR_set_tid_address
:
12172 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12175 case TARGET_NR_tkill
:
12176 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12178 case TARGET_NR_tgkill
:
12179 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12180 target_to_host_signal(arg3
)));
12182 #ifdef TARGET_NR_set_robust_list
12183 case TARGET_NR_set_robust_list
:
12184 case TARGET_NR_get_robust_list
:
12185 /* The ABI for supporting robust futexes has userspace pass
12186 * the kernel a pointer to a linked list which is updated by
12187 * userspace after the syscall; the list is walked by the kernel
12188 * when the thread exits. Since the linked list in QEMU guest
12189 * memory isn't a valid linked list for the host and we have
12190 * no way to reliably intercept the thread-death event, we can't
12191 * support these. Silently return ENOSYS so that guest userspace
12192 * falls back to a non-robust futex implementation (which should
12193 * be OK except in the corner case of the guest crashing while
12194 * holding a mutex that is shared with another process via
12197 return -TARGET_ENOSYS
;
12200 #if defined(TARGET_NR_utimensat)
12201 case TARGET_NR_utimensat
:
12203 struct timespec
*tsp
, ts
[2];
12207 if (target_to_host_timespec(ts
, arg3
)) {
12208 return -TARGET_EFAULT
;
12210 if (target_to_host_timespec(ts
+ 1, arg3
+
12211 sizeof(struct target_timespec
))) {
12212 return -TARGET_EFAULT
;
12217 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12219 if (!(p
= lock_user_string(arg2
))) {
12220 return -TARGET_EFAULT
;
12222 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12223 unlock_user(p
, arg2
, 0);
12228 #ifdef TARGET_NR_utimensat_time64
12229 case TARGET_NR_utimensat_time64
:
12231 struct timespec
*tsp
, ts
[2];
12235 if (target_to_host_timespec64(ts
, arg3
)) {
12236 return -TARGET_EFAULT
;
12238 if (target_to_host_timespec64(ts
+ 1, arg3
+
12239 sizeof(struct target__kernel_timespec
))) {
12240 return -TARGET_EFAULT
;
12245 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12247 p
= lock_user_string(arg2
);
12249 return -TARGET_EFAULT
;
12251 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12252 unlock_user(p
, arg2
, 0);
12257 #ifdef TARGET_NR_futex
12258 case TARGET_NR_futex
:
12259 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12261 #ifdef TARGET_NR_futex_time64
12262 case TARGET_NR_futex_time64
:
12263 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12265 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12266 case TARGET_NR_inotify_init
:
12267 ret
= get_errno(sys_inotify_init());
12269 fd_trans_register(ret
, &target_inotify_trans
);
12273 #ifdef CONFIG_INOTIFY1
12274 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12275 case TARGET_NR_inotify_init1
:
12276 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12277 fcntl_flags_tbl
)));
12279 fd_trans_register(ret
, &target_inotify_trans
);
12284 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12285 case TARGET_NR_inotify_add_watch
:
12286 p
= lock_user_string(arg2
);
12287 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12288 unlock_user(p
, arg2
, 0);
12291 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12292 case TARGET_NR_inotify_rm_watch
:
12293 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12296 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12297 case TARGET_NR_mq_open
:
12299 struct mq_attr posix_mq_attr
;
12300 struct mq_attr
*pposix_mq_attr
;
12303 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12304 pposix_mq_attr
= NULL
;
12306 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12307 return -TARGET_EFAULT
;
12309 pposix_mq_attr
= &posix_mq_attr
;
12311 p
= lock_user_string(arg1
- 1);
12313 return -TARGET_EFAULT
;
12315 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12316 unlock_user (p
, arg1
, 0);
12320 case TARGET_NR_mq_unlink
:
12321 p
= lock_user_string(arg1
- 1);
12323 return -TARGET_EFAULT
;
12325 ret
= get_errno(mq_unlink(p
));
12326 unlock_user (p
, arg1
, 0);
12329 #ifdef TARGET_NR_mq_timedsend
12330 case TARGET_NR_mq_timedsend
:
12332 struct timespec ts
;
12334 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12336 if (target_to_host_timespec(&ts
, arg5
)) {
12337 return -TARGET_EFAULT
;
12339 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12340 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12341 return -TARGET_EFAULT
;
12344 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12346 unlock_user (p
, arg2
, arg3
);
12350 #ifdef TARGET_NR_mq_timedsend_time64
12351 case TARGET_NR_mq_timedsend_time64
:
12353 struct timespec ts
;
12355 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12357 if (target_to_host_timespec64(&ts
, arg5
)) {
12358 return -TARGET_EFAULT
;
12360 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12361 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12362 return -TARGET_EFAULT
;
12365 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12367 unlock_user(p
, arg2
, arg3
);
12372 #ifdef TARGET_NR_mq_timedreceive
12373 case TARGET_NR_mq_timedreceive
:
12375 struct timespec ts
;
12378 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12380 if (target_to_host_timespec(&ts
, arg5
)) {
12381 return -TARGET_EFAULT
;
12383 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12385 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12386 return -TARGET_EFAULT
;
12389 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12392 unlock_user (p
, arg2
, arg3
);
12394 put_user_u32(prio
, arg4
);
12398 #ifdef TARGET_NR_mq_timedreceive_time64
12399 case TARGET_NR_mq_timedreceive_time64
:
12401 struct timespec ts
;
12404 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12406 if (target_to_host_timespec64(&ts
, arg5
)) {
12407 return -TARGET_EFAULT
;
12409 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12411 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12412 return -TARGET_EFAULT
;
12415 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12418 unlock_user(p
, arg2
, arg3
);
12420 put_user_u32(prio
, arg4
);
12426 /* Not implemented for now... */
12427 /* case TARGET_NR_mq_notify: */
12430 case TARGET_NR_mq_getsetattr
:
12432 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12435 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12436 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12437 &posix_mq_attr_out
));
12438 } else if (arg3
!= 0) {
12439 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12441 if (ret
== 0 && arg3
!= 0) {
12442 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12448 #ifdef CONFIG_SPLICE
12449 #ifdef TARGET_NR_tee
12450 case TARGET_NR_tee
:
12452 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12456 #ifdef TARGET_NR_splice
12457 case TARGET_NR_splice
:
12459 loff_t loff_in
, loff_out
;
12460 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12462 if (get_user_u64(loff_in
, arg2
)) {
12463 return -TARGET_EFAULT
;
12465 ploff_in
= &loff_in
;
12468 if (get_user_u64(loff_out
, arg4
)) {
12469 return -TARGET_EFAULT
;
12471 ploff_out
= &loff_out
;
12473 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12475 if (put_user_u64(loff_in
, arg2
)) {
12476 return -TARGET_EFAULT
;
12480 if (put_user_u64(loff_out
, arg4
)) {
12481 return -TARGET_EFAULT
;
12487 #ifdef TARGET_NR_vmsplice
12488 case TARGET_NR_vmsplice
:
12490 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12492 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12493 unlock_iovec(vec
, arg2
, arg3
, 0);
12495 ret
= -host_to_target_errno(errno
);
12500 #endif /* CONFIG_SPLICE */
12501 #ifdef CONFIG_EVENTFD
12502 #if defined(TARGET_NR_eventfd)
12503 case TARGET_NR_eventfd
:
12504 ret
= get_errno(eventfd(arg1
, 0));
12506 fd_trans_register(ret
, &target_eventfd_trans
);
12510 #if defined(TARGET_NR_eventfd2)
12511 case TARGET_NR_eventfd2
:
12513 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12514 if (arg2
& TARGET_O_NONBLOCK
) {
12515 host_flags
|= O_NONBLOCK
;
12517 if (arg2
& TARGET_O_CLOEXEC
) {
12518 host_flags
|= O_CLOEXEC
;
12520 ret
= get_errno(eventfd(arg1
, host_flags
));
12522 fd_trans_register(ret
, &target_eventfd_trans
);
12527 #endif /* CONFIG_EVENTFD */
12528 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12529 case TARGET_NR_fallocate
:
12530 #if TARGET_ABI_BITS == 32
12531 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12532 target_offset64(arg5
, arg6
)));
12534 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12538 #if defined(CONFIG_SYNC_FILE_RANGE)
12539 #if defined(TARGET_NR_sync_file_range)
12540 case TARGET_NR_sync_file_range
:
12541 #if TARGET_ABI_BITS == 32
12542 #if defined(TARGET_MIPS)
12543 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12544 target_offset64(arg5
, arg6
), arg7
));
12546 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12547 target_offset64(arg4
, arg5
), arg6
));
12548 #endif /* !TARGET_MIPS */
12550 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12554 #if defined(TARGET_NR_sync_file_range2) || \
12555 defined(TARGET_NR_arm_sync_file_range)
12556 #if defined(TARGET_NR_sync_file_range2)
12557 case TARGET_NR_sync_file_range2
:
12559 #if defined(TARGET_NR_arm_sync_file_range)
12560 case TARGET_NR_arm_sync_file_range
:
12562 /* This is like sync_file_range but the arguments are reordered */
12563 #if TARGET_ABI_BITS == 32
12564 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12565 target_offset64(arg5
, arg6
), arg2
));
12567 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12572 #if defined(TARGET_NR_signalfd4)
12573 case TARGET_NR_signalfd4
:
12574 return do_signalfd4(arg1
, arg2
, arg4
);
12576 #if defined(TARGET_NR_signalfd)
12577 case TARGET_NR_signalfd
:
12578 return do_signalfd4(arg1
, arg2
, 0);
12580 #if defined(CONFIG_EPOLL)
12581 #if defined(TARGET_NR_epoll_create)
12582 case TARGET_NR_epoll_create
:
12583 return get_errno(epoll_create(arg1
));
12585 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12586 case TARGET_NR_epoll_create1
:
12587 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12589 #if defined(TARGET_NR_epoll_ctl)
12590 case TARGET_NR_epoll_ctl
:
12592 struct epoll_event ep
;
12593 struct epoll_event
*epp
= 0;
12595 if (arg2
!= EPOLL_CTL_DEL
) {
12596 struct target_epoll_event
*target_ep
;
12597 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12598 return -TARGET_EFAULT
;
12600 ep
.events
= tswap32(target_ep
->events
);
12602 * The epoll_data_t union is just opaque data to the kernel,
12603 * so we transfer all 64 bits across and need not worry what
12604 * actual data type it is.
12606 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12607 unlock_user_struct(target_ep
, arg4
, 0);
12610 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12611 * non-null pointer, even though this argument is ignored.
12616 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12620 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12621 #if defined(TARGET_NR_epoll_wait)
12622 case TARGET_NR_epoll_wait
:
12624 #if defined(TARGET_NR_epoll_pwait)
12625 case TARGET_NR_epoll_pwait
:
12628 struct target_epoll_event
*target_ep
;
12629 struct epoll_event
*ep
;
12631 int maxevents
= arg3
;
12632 int timeout
= arg4
;
12634 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12635 return -TARGET_EINVAL
;
12638 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12639 maxevents
* sizeof(struct target_epoll_event
), 1);
12641 return -TARGET_EFAULT
;
12644 ep
= g_try_new(struct epoll_event
, maxevents
);
12646 unlock_user(target_ep
, arg2
, 0);
12647 return -TARGET_ENOMEM
;
12651 #if defined(TARGET_NR_epoll_pwait)
12652 case TARGET_NR_epoll_pwait
:
12654 target_sigset_t
*target_set
;
12655 sigset_t _set
, *set
= &_set
;
12658 if (arg6
!= sizeof(target_sigset_t
)) {
12659 ret
= -TARGET_EINVAL
;
12663 target_set
= lock_user(VERIFY_READ
, arg5
,
12664 sizeof(target_sigset_t
), 1);
12666 ret
= -TARGET_EFAULT
;
12669 target_to_host_sigset(set
, target_set
);
12670 unlock_user(target_set
, arg5
, 0);
12675 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12676 set
, SIGSET_T_SIZE
));
12680 #if defined(TARGET_NR_epoll_wait)
12681 case TARGET_NR_epoll_wait
:
12682 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12687 ret
= -TARGET_ENOSYS
;
12689 if (!is_error(ret
)) {
12691 for (i
= 0; i
< ret
; i
++) {
12692 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12693 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12695 unlock_user(target_ep
, arg2
,
12696 ret
* sizeof(struct target_epoll_event
));
12698 unlock_user(target_ep
, arg2
, 0);
12705 #ifdef TARGET_NR_prlimit64
12706 case TARGET_NR_prlimit64
:
12708 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12709 struct target_rlimit64
*target_rnew
, *target_rold
;
12710 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12711 int resource
= target_to_host_resource(arg2
);
12713 if (arg3
&& (resource
!= RLIMIT_AS
&&
12714 resource
!= RLIMIT_DATA
&&
12715 resource
!= RLIMIT_STACK
)) {
12716 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12717 return -TARGET_EFAULT
;
12719 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12720 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12721 unlock_user_struct(target_rnew
, arg3
, 0);
12725 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12726 if (!is_error(ret
) && arg4
) {
12727 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12728 return -TARGET_EFAULT
;
12730 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12731 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12732 unlock_user_struct(target_rold
, arg4
, 1);
12737 #ifdef TARGET_NR_gethostname
12738 case TARGET_NR_gethostname
:
12740 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12742 ret
= get_errno(gethostname(name
, arg2
));
12743 unlock_user(name
, arg1
, arg2
);
12745 ret
= -TARGET_EFAULT
;
12750 #ifdef TARGET_NR_atomic_cmpxchg_32
12751 case TARGET_NR_atomic_cmpxchg_32
:
12753 /* should use start_exclusive from main.c */
12754 abi_ulong mem_value
;
12755 if (get_user_u32(mem_value
, arg6
)) {
12756 target_siginfo_t info
;
12757 info
.si_signo
= SIGSEGV
;
12759 info
.si_code
= TARGET_SEGV_MAPERR
;
12760 info
._sifields
._sigfault
._addr
= arg6
;
12761 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12762 QEMU_SI_FAULT
, &info
);
12766 if (mem_value
== arg2
)
12767 put_user_u32(arg1
, arg6
);
12771 #ifdef TARGET_NR_atomic_barrier
12772 case TARGET_NR_atomic_barrier
:
12773 /* Like the kernel implementation and the
12774 qemu arm barrier, no-op this? */
12778 #ifdef TARGET_NR_timer_create
12779 case TARGET_NR_timer_create
:
12781 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12783 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12786 int timer_index
= next_free_host_timer();
12788 if (timer_index
< 0) {
12789 ret
= -TARGET_EAGAIN
;
12791 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12794 phost_sevp
= &host_sevp
;
12795 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12801 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12805 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12806 return -TARGET_EFAULT
;
12814 #ifdef TARGET_NR_timer_settime
12815 case TARGET_NR_timer_settime
:
12817 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12818 * struct itimerspec * old_value */
12819 target_timer_t timerid
= get_timer_id(arg1
);
12823 } else if (arg3
== 0) {
12824 ret
= -TARGET_EINVAL
;
12826 timer_t htimer
= g_posix_timers
[timerid
];
12827 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12829 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12830 return -TARGET_EFAULT
;
12833 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12834 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12835 return -TARGET_EFAULT
;
12842 #ifdef TARGET_NR_timer_settime64
12843 case TARGET_NR_timer_settime64
:
12845 target_timer_t timerid
= get_timer_id(arg1
);
12849 } else if (arg3
== 0) {
12850 ret
= -TARGET_EINVAL
;
12852 timer_t htimer
= g_posix_timers
[timerid
];
12853 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12855 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12856 return -TARGET_EFAULT
;
12859 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12860 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12861 return -TARGET_EFAULT
;
12868 #ifdef TARGET_NR_timer_gettime
12869 case TARGET_NR_timer_gettime
:
12871 /* args: timer_t timerid, struct itimerspec *curr_value */
12872 target_timer_t timerid
= get_timer_id(arg1
);
12876 } else if (!arg2
) {
12877 ret
= -TARGET_EFAULT
;
12879 timer_t htimer
= g_posix_timers
[timerid
];
12880 struct itimerspec hspec
;
12881 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12883 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12884 ret
= -TARGET_EFAULT
;
12891 #ifdef TARGET_NR_timer_gettime64
12892 case TARGET_NR_timer_gettime64
:
12894 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12895 target_timer_t timerid
= get_timer_id(arg1
);
12899 } else if (!arg2
) {
12900 ret
= -TARGET_EFAULT
;
12902 timer_t htimer
= g_posix_timers
[timerid
];
12903 struct itimerspec hspec
;
12904 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12906 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12907 ret
= -TARGET_EFAULT
;
12914 #ifdef TARGET_NR_timer_getoverrun
12915 case TARGET_NR_timer_getoverrun
:
12917 /* args: timer_t timerid */
12918 target_timer_t timerid
= get_timer_id(arg1
);
12923 timer_t htimer
= g_posix_timers
[timerid
];
12924 ret
= get_errno(timer_getoverrun(htimer
));
12930 #ifdef TARGET_NR_timer_delete
12931 case TARGET_NR_timer_delete
:
12933 /* args: timer_t timerid */
12934 target_timer_t timerid
= get_timer_id(arg1
);
12939 timer_t htimer
= g_posix_timers
[timerid
];
12940 ret
= get_errno(timer_delete(htimer
));
12941 g_posix_timers
[timerid
] = 0;
12947 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12948 case TARGET_NR_timerfd_create
:
12949 return get_errno(timerfd_create(arg1
,
12950 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12953 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12954 case TARGET_NR_timerfd_gettime
:
12956 struct itimerspec its_curr
;
12958 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12960 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12961 return -TARGET_EFAULT
;
12967 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12968 case TARGET_NR_timerfd_gettime64
:
12970 struct itimerspec its_curr
;
12972 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12974 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12975 return -TARGET_EFAULT
;
12981 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12982 case TARGET_NR_timerfd_settime
:
12984 struct itimerspec its_new
, its_old
, *p_new
;
12987 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12988 return -TARGET_EFAULT
;
12995 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12997 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12998 return -TARGET_EFAULT
;
13004 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13005 case TARGET_NR_timerfd_settime64
:
13007 struct itimerspec its_new
, its_old
, *p_new
;
13010 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13011 return -TARGET_EFAULT
;
13018 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13020 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13021 return -TARGET_EFAULT
;
13027 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13028 case TARGET_NR_ioprio_get
:
13029 return get_errno(ioprio_get(arg1
, arg2
));
13032 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13033 case TARGET_NR_ioprio_set
:
13034 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13037 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13038 case TARGET_NR_setns
:
13039 return get_errno(setns(arg1
, arg2
));
13041 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13042 case TARGET_NR_unshare
:
13043 return get_errno(unshare(arg1
));
13045 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13046 case TARGET_NR_kcmp
:
13047 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13049 #ifdef TARGET_NR_swapcontext
13050 case TARGET_NR_swapcontext
:
13051 /* PowerPC specific. */
13052 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13054 #ifdef TARGET_NR_memfd_create
13055 case TARGET_NR_memfd_create
:
13056 p
= lock_user_string(arg1
);
13058 return -TARGET_EFAULT
;
13060 ret
= get_errno(memfd_create(p
, arg2
));
13061 fd_trans_unregister(ret
);
13062 unlock_user(p
, arg1
, 0);
13065 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13066 case TARGET_NR_membarrier
:
13067 return get_errno(membarrier(arg1
, arg2
));
13070 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13071 case TARGET_NR_copy_file_range
:
13073 loff_t inoff
, outoff
;
13074 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13077 if (get_user_u64(inoff
, arg2
)) {
13078 return -TARGET_EFAULT
;
13083 if (get_user_u64(outoff
, arg4
)) {
13084 return -TARGET_EFAULT
;
13088 /* Do not sign-extend the count parameter. */
13089 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13090 (abi_ulong
)arg5
, arg6
));
13091 if (!is_error(ret
) && ret
> 0) {
13093 if (put_user_u64(inoff
, arg2
)) {
13094 return -TARGET_EFAULT
;
13098 if (put_user_u64(outoff
, arg4
)) {
13099 return -TARGET_EFAULT
;
13107 #if defined(TARGET_NR_pivot_root)
13108 case TARGET_NR_pivot_root
:
13111 p
= lock_user_string(arg1
); /* new_root */
13112 p2
= lock_user_string(arg2
); /* put_old */
13114 ret
= -TARGET_EFAULT
;
13116 ret
= get_errno(pivot_root(p
, p2
));
13118 unlock_user(p2
, arg2
, 0);
13119 unlock_user(p
, arg1
, 0);
13125 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13126 return -TARGET_ENOSYS
;
13131 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13132 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13133 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13136 CPUState
*cpu
= env_cpu(cpu_env
);
13139 #ifdef DEBUG_ERESTARTSYS
13140 /* Debug-only code for exercising the syscall-restart code paths
13141 * in the per-architecture cpu main loops: restart every syscall
13142 * the guest makes once before letting it through.
13148 return -TARGET_ERESTARTSYS
;
13153 record_syscall_start(cpu
, num
, arg1
,
13154 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13156 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13157 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13160 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13161 arg5
, arg6
, arg7
, arg8
);
13163 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13164 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13165 arg3
, arg4
, arg5
, arg6
);
13168 record_syscall_return(cpu
, num
, ret
);