4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "qapi/error.h"
140 #include "fd-trans.h"
144 #define CLONE_IO 0x80000000 /* Clone io context */
147 /* We can't directly call the host clone syscall, because this will
148 * badly confuse libc (breaking mutexes, for example). So we must
149 * divide clone flags into:
150 * * flag combinations that look like pthread_create()
151 * * flag combinations that look like fork()
152 * * flags we can implement within QEMU itself
153 * * flags we can't support and will return an error for
155 /* For thread creation, all these flags must be present; for
156 * fork, none must be present.
158 #define CLONE_THREAD_FLAGS \
159 (CLONE_VM | CLONE_FS | CLONE_FILES | \
160 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 /* These flags are ignored:
163 * CLONE_DETACHED is now ignored by the kernel;
164 * CLONE_IO is just an optimisation hint to the I/O scheduler
166 #define CLONE_IGNORED_FLAGS \
167 (CLONE_DETACHED | CLONE_IO)
169 /* Flags for fork which we can implement within QEMU itself */
170 #define CLONE_OPTIONAL_FORK_FLAGS \
171 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
172 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 /* Flags for thread creation which we can implement within QEMU itself */
175 #define CLONE_OPTIONAL_THREAD_FLAGS \
176 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
177 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 #define CLONE_INVALID_FORK_FLAGS \
180 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 #define CLONE_INVALID_THREAD_FLAGS \
183 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
184 CLONE_IGNORED_FLAGS))
186 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
187 * have almost all been allocated. We cannot support any of
188 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
189 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
190 * The checks against the invalid thread masks above will catch these.
191 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
195 * once. This exercises the codepaths for restart.
197 //#define DEBUG_ERESTARTSYS
199 //#include <linux/msdos_fs.h>
200 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
201 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
211 #define _syscall0(type,name) \
212 static type name (void) \
214 return syscall(__NR_##name); \
217 #define _syscall1(type,name,type1,arg1) \
218 static type name (type1 arg1) \
220 return syscall(__NR_##name, arg1); \
223 #define _syscall2(type,name,type1,arg1,type2,arg2) \
224 static type name (type1 arg1,type2 arg2) \
226 return syscall(__NR_##name, arg1, arg2); \
229 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
230 static type name (type1 arg1,type2 arg2,type3 arg3) \
232 return syscall(__NR_##name, arg1, arg2, arg3); \
235 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
236 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
238 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
241 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
243 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
245 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
249 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
250 type5,arg5,type6,arg6) \
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
254 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
258 #define __NR_sys_uname __NR_uname
259 #define __NR_sys_getcwd1 __NR_getcwd
260 #define __NR_sys_getdents __NR_getdents
261 #define __NR_sys_getdents64 __NR_getdents64
262 #define __NR_sys_getpriority __NR_getpriority
263 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
264 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
265 #define __NR_sys_syslog __NR_syslog
266 #if defined(__NR_futex)
267 # define __NR_sys_futex __NR_futex
269 #if defined(__NR_futex_time64)
270 # define __NR_sys_futex_time64 __NR_futex_time64
272 #define __NR_sys_inotify_init __NR_inotify_init
273 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
274 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
275 #define __NR_sys_statx __NR_statx
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid
)
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
314 loff_t
*, res
, uint
, wh
);
316 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
317 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
319 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group
,int,error_code
)
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address
,int *,tidptr
)
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
328 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
332 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
336 unsigned long *, user_mask_ptr
);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
339 unsigned long *, user_mask_ptr
);
340 #define __NR_sys_getcpu __NR_getcpu
341 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
342 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
344 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
345 struct __user_cap_data_struct
*, data
);
346 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
347 struct __user_cap_data_struct
*, data
);
348 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
349 _syscall2(int, ioprio_get
, int, which
, int, who
)
351 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
352 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
354 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
355 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
358 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
359 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
360 unsigned long, idx1
, unsigned long, idx2
)
364 * It is assumed that struct statx is architecture independent.
366 #if defined(TARGET_NR_statx) && defined(__NR_statx)
367 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
368 unsigned int, mask
, struct target_statx
*, statxbuf
)
370 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
371 _syscall2(int, membarrier
, int, cmd
, int, flags
)
374 static const bitmask_transtbl fcntl_flags_tbl
[] = {
375 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
376 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
377 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
378 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
379 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
380 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
381 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
382 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
383 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
384 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
385 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
386 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
387 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
388 #if defined(O_DIRECT)
389 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
391 #if defined(O_NOATIME)
392 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
394 #if defined(O_CLOEXEC)
395 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
398 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
400 #if defined(O_TMPFILE)
401 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
403 /* Don't terminate the list prematurely on 64-bit host+guest. */
404 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
405 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
410 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
412 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
413 #if defined(__NR_utimensat)
414 #define __NR_sys_utimensat __NR_utimensat
415 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
416 const struct timespec
*,tsp
,int,flags
)
418 static int sys_utimensat(int dirfd
, const char *pathname
,
419 const struct timespec times
[2], int flags
)
425 #endif /* TARGET_NR_utimensat */
427 #ifdef TARGET_NR_renameat2
428 #if defined(__NR_renameat2)
429 #define __NR_sys_renameat2 __NR_renameat2
430 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
431 const char *, new, unsigned int, flags
)
433 static int sys_renameat2(int oldfd
, const char *old
,
434 int newfd
, const char *new, int flags
)
437 return renameat(oldfd
, old
, newfd
, new);
443 #endif /* TARGET_NR_renameat2 */
445 #ifdef CONFIG_INOTIFY
446 #include <sys/inotify.h>
448 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
449 static int sys_inotify_init(void)
451 return (inotify_init());
454 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
455 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
457 return (inotify_add_watch(fd
, pathname
, mask
));
460 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
461 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
463 return (inotify_rm_watch(fd
, wd
));
466 #ifdef CONFIG_INOTIFY1
467 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
468 static int sys_inotify_init1(int flags
)
470 return (inotify_init1(flags
));
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY */
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64
{
492 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
493 const struct host_rlimit64
*, new_limit
,
494 struct host_rlimit64
*, old_limit
)
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers
[32] = { 0, } ;
502 static inline int next_free_host_timer(void)
505 /* FIXME: Does finding the next free slot require a lock? */
506 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
507 if (g_posix_timers
[k
] == 0) {
508 g_posix_timers
[k
] = (timer_t
) 1;
516 static inline int host_to_target_errno(int host_errno
)
518 switch (host_errno
) {
519 #define E(X) case X: return TARGET_##X;
520 #include "errnos.c.inc"
527 static inline int target_to_host_errno(int target_errno
)
529 switch (target_errno
) {
530 #define E(X) case TARGET_##X: return X;
531 #include "errnos.c.inc"
538 static inline abi_long
get_errno(abi_long ret
)
541 return -host_to_target_errno(errno
);
546 const char *target_strerror(int err
)
548 if (err
== TARGET_ERESTARTSYS
) {
549 return "To be restarted";
551 if (err
== TARGET_QEMU_ESIGRETURN
) {
552 return "Successful exit from sigreturn";
555 return strerror(target_to_host_errno(err
));
558 #define safe_syscall0(type, name) \
559 static type safe_##name(void) \
561 return safe_syscall(__NR_##name); \
564 #define safe_syscall1(type, name, type1, arg1) \
565 static type safe_##name(type1 arg1) \
567 return safe_syscall(__NR_##name, arg1); \
570 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
571 static type safe_##name(type1 arg1, type2 arg2) \
573 return safe_syscall(__NR_##name, arg1, arg2); \
576 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
577 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
579 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
582 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
584 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
586 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
589 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
590 type4, arg4, type5, arg5) \
591 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
594 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
597 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
598 type4, arg4, type5, arg5, type6, arg6) \
599 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
600 type5 arg5, type6 arg6) \
602 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
605 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
606 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
607 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
608 int, flags
, mode_t
, mode
)
609 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
610 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
611 struct rusage
*, rusage
)
613 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
614 int, options
, struct rusage
*, rusage
)
615 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
616 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
617 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
618 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
619 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
621 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
622 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
623 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
626 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
627 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
629 #if defined(__NR_futex)
630 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
631 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
633 #if defined(__NR_futex_time64)
634 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
635 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
637 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
638 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
639 safe_syscall2(int, tkill
, int, tid
, int, sig
)
640 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
641 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
642 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
643 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
644 unsigned long, pos_l
, unsigned long, pos_h
)
645 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
646 unsigned long, pos_l
, unsigned long, pos_h
)
647 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
649 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
650 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
651 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
652 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
653 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
654 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
655 safe_syscall2(int, flock
, int, fd
, int, operation
)
656 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
657 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
658 const struct timespec
*, uts
, size_t, sigsetsize
)
660 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
662 #if defined(TARGET_NR_nanosleep)
663 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
664 struct timespec
*, rem
)
666 #if defined(TARGET_NR_clock_nanosleep) || \
667 defined(TARGET_NR_clock_nanosleep_time64)
668 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
669 const struct timespec
*, req
, struct timespec
*, rem
)
673 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
676 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
677 void *, ptr
, long, fifth
)
681 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
685 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
686 long, msgtype
, int, flags
)
688 #ifdef __NR_semtimedop
689 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
690 unsigned, nsops
, const struct timespec
*, timeout
)
692 #if defined(TARGET_NR_mq_timedsend) || \
693 defined(TARGET_NR_mq_timedsend_time64)
694 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
695 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
697 #if defined(TARGET_NR_mq_timedreceive) || \
698 defined(TARGET_NR_mq_timedreceive_time64)
699 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
700 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
702 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
703 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
704 int, outfd
, loff_t
*, poutoff
, size_t, length
,
708 /* We do ioctl like this rather than via safe_syscall3 to preserve the
709 * "third argument might be integer or pointer or not present" behaviour of
712 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
713 /* Similarly for fcntl. Note that callers must always:
714 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
715 * use the flock64 struct rather than unsuffixed flock
716 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
719 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
721 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
724 static inline int host_to_target_sock_type(int host_type
)
728 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
730 target_type
= TARGET_SOCK_DGRAM
;
733 target_type
= TARGET_SOCK_STREAM
;
736 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
740 #if defined(SOCK_CLOEXEC)
741 if (host_type
& SOCK_CLOEXEC
) {
742 target_type
|= TARGET_SOCK_CLOEXEC
;
746 #if defined(SOCK_NONBLOCK)
747 if (host_type
& SOCK_NONBLOCK
) {
748 target_type
|= TARGET_SOCK_NONBLOCK
;
755 static abi_ulong target_brk
;
756 static abi_ulong target_original_brk
;
757 static abi_ulong brk_page
;
759 void target_set_brk(abi_ulong new_brk
)
761 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
762 brk_page
= HOST_PAGE_ALIGN(target_brk
);
765 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
766 #define DEBUGF_BRK(message, args...)
768 /* do_brk() must return target values and target errnos. */
769 abi_long
do_brk(abi_ulong new_brk
)
771 abi_long mapped_addr
;
772 abi_ulong new_alloc_size
;
774 /* brk pointers are always untagged */
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
779 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
782 if (new_brk
< target_original_brk
) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk
<= brk_page
) {
791 /* Heap contents are initialized to zero, as for anonymous
793 if (new_brk
> target_brk
) {
794 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
796 target_brk
= new_brk
;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
807 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
808 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
809 PROT_READ
|PROT_WRITE
,
810 MAP_ANON
|MAP_PRIVATE
, 0, 0));
812 if (mapped_addr
== brk_page
) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
820 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
822 target_brk
= new_brk
;
823 brk_page
= HOST_PAGE_ALIGN(target_brk
);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
827 } else if (mapped_addr
!= -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
831 target_munmap(mapped_addr
, new_alloc_size
);
833 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
836 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM
;
844 /* For everything else, return the previous break. */
848 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
849 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
850 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
851 abi_ulong target_fds_addr
,
855 abi_ulong b
, *target_fds
;
857 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
858 if (!(target_fds
= lock_user(VERIFY_READ
,
860 sizeof(abi_ulong
) * nw
,
862 return -TARGET_EFAULT
;
866 for (i
= 0; i
< nw
; i
++) {
867 /* grab the abi_ulong */
868 __get_user(b
, &target_fds
[i
]);
869 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
870 /* check the bit inside the abi_ulong */
877 unlock_user(target_fds
, target_fds_addr
, 0);
882 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
883 abi_ulong target_fds_addr
,
886 if (target_fds_addr
) {
887 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
888 return -TARGET_EFAULT
;
896 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
902 abi_ulong
*target_fds
;
904 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
905 if (!(target_fds
= lock_user(VERIFY_WRITE
,
907 sizeof(abi_ulong
) * nw
,
909 return -TARGET_EFAULT
;
912 for (i
= 0; i
< nw
; i
++) {
914 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
915 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
918 __put_user(v
, &target_fds
[i
]);
921 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
927 #if defined(__alpha__)
933 static inline abi_long
host_to_target_clock_t(long ticks
)
935 #if HOST_HZ == TARGET_HZ
938 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
942 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
943 const struct rusage
*rusage
)
945 struct target_rusage
*target_rusage
;
947 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
948 return -TARGET_EFAULT
;
949 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
950 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
951 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
952 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
953 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
954 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
955 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
956 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
957 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
958 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
959 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
960 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
961 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
962 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
963 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
964 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
965 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
966 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
967 unlock_user_struct(target_rusage
, target_addr
, 1);
972 #ifdef TARGET_NR_setrlimit
973 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
975 abi_ulong target_rlim_swap
;
978 target_rlim_swap
= tswapal(target_rlim
);
979 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
980 return RLIM_INFINITY
;
982 result
= target_rlim_swap
;
983 if (target_rlim_swap
!= (rlim_t
)result
)
984 return RLIM_INFINITY
;
990 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
991 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
993 abi_ulong target_rlim_swap
;
996 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
997 target_rlim_swap
= TARGET_RLIM_INFINITY
;
999 target_rlim_swap
= rlim
;
1000 result
= tswapal(target_rlim_swap
);
1006 static inline int target_to_host_resource(int code
)
1009 case TARGET_RLIMIT_AS
:
1011 case TARGET_RLIMIT_CORE
:
1013 case TARGET_RLIMIT_CPU
:
1015 case TARGET_RLIMIT_DATA
:
1017 case TARGET_RLIMIT_FSIZE
:
1018 return RLIMIT_FSIZE
;
1019 case TARGET_RLIMIT_LOCKS
:
1020 return RLIMIT_LOCKS
;
1021 case TARGET_RLIMIT_MEMLOCK
:
1022 return RLIMIT_MEMLOCK
;
1023 case TARGET_RLIMIT_MSGQUEUE
:
1024 return RLIMIT_MSGQUEUE
;
1025 case TARGET_RLIMIT_NICE
:
1027 case TARGET_RLIMIT_NOFILE
:
1028 return RLIMIT_NOFILE
;
1029 case TARGET_RLIMIT_NPROC
:
1030 return RLIMIT_NPROC
;
1031 case TARGET_RLIMIT_RSS
:
1033 case TARGET_RLIMIT_RTPRIO
:
1034 return RLIMIT_RTPRIO
;
1035 case TARGET_RLIMIT_SIGPENDING
:
1036 return RLIMIT_SIGPENDING
;
1037 case TARGET_RLIMIT_STACK
:
1038 return RLIMIT_STACK
;
1044 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1045 abi_ulong target_tv_addr
)
1047 struct target_timeval
*target_tv
;
1049 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1050 return -TARGET_EFAULT
;
1053 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1054 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1056 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1061 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1062 const struct timeval
*tv
)
1064 struct target_timeval
*target_tv
;
1066 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1067 return -TARGET_EFAULT
;
1070 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1071 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1073 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1078 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1079 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1080 abi_ulong target_tv_addr
)
1082 struct target__kernel_sock_timeval
*target_tv
;
1084 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1085 return -TARGET_EFAULT
;
1088 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1089 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1091 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1097 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1098 const struct timeval
*tv
)
1100 struct target__kernel_sock_timeval
*target_tv
;
1102 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1103 return -TARGET_EFAULT
;
1106 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1107 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1109 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1114 #if defined(TARGET_NR_futex) || \
1115 defined(TARGET_NR_rt_sigtimedwait) || \
1116 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1117 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1118 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1119 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1120 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1121 defined(TARGET_NR_timer_settime) || \
1122 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1123 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1124 abi_ulong target_addr
)
1126 struct target_timespec
*target_ts
;
1128 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1129 return -TARGET_EFAULT
;
1131 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1132 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1133 unlock_user_struct(target_ts
, target_addr
, 0);
1138 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1139 defined(TARGET_NR_timer_settime64) || \
1140 defined(TARGET_NR_mq_timedsend_time64) || \
1141 defined(TARGET_NR_mq_timedreceive_time64) || \
1142 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1143 defined(TARGET_NR_clock_nanosleep_time64) || \
1144 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1145 defined(TARGET_NR_utimensat) || \
1146 defined(TARGET_NR_utimensat_time64) || \
1147 defined(TARGET_NR_semtimedop_time64) || \
1148 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1149 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1150 abi_ulong target_addr
)
1152 struct target__kernel_timespec
*target_ts
;
1154 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1155 return -TARGET_EFAULT
;
1157 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1158 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1159 /* in 32bit mode, this drops the padding */
1160 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1161 unlock_user_struct(target_ts
, target_addr
, 0);
1166 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1167 struct timespec
*host_ts
)
1169 struct target_timespec
*target_ts
;
1171 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1172 return -TARGET_EFAULT
;
1174 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1175 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1176 unlock_user_struct(target_ts
, target_addr
, 1);
1180 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1181 struct timespec
*host_ts
)
1183 struct target__kernel_timespec
*target_ts
;
1185 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1186 return -TARGET_EFAULT
;
1188 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1189 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1190 unlock_user_struct(target_ts
, target_addr
, 1);
1194 #if defined(TARGET_NR_gettimeofday)
1195 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1196 struct timezone
*tz
)
1198 struct target_timezone
*target_tz
;
1200 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1201 return -TARGET_EFAULT
;
1204 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1205 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1207 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1213 #if defined(TARGET_NR_settimeofday)
1214 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1215 abi_ulong target_tz_addr
)
1217 struct target_timezone
*target_tz
;
1219 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1220 return -TARGET_EFAULT
;
1223 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1224 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1226 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1232 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1235 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1236 abi_ulong target_mq_attr_addr
)
1238 struct target_mq_attr
*target_mq_attr
;
1240 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1241 target_mq_attr_addr
, 1))
1242 return -TARGET_EFAULT
;
1244 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1245 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1246 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1247 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1249 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1254 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1255 const struct mq_attr
*attr
)
1257 struct target_mq_attr
*target_mq_attr
;
1259 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1260 target_mq_attr_addr
, 0))
1261 return -TARGET_EFAULT
;
1263 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1264 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1265 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1266 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1268 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1274 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1275 /* do_select() must return target values and target errnos. */
1276 static abi_long
do_select(int n
,
1277 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1278 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1280 fd_set rfds
, wfds
, efds
;
1281 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1283 struct timespec ts
, *ts_ptr
;
1286 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1290 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1294 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1299 if (target_tv_addr
) {
1300 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1301 return -TARGET_EFAULT
;
1302 ts
.tv_sec
= tv
.tv_sec
;
1303 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1309 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1312 if (!is_error(ret
)) {
1313 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1314 return -TARGET_EFAULT
;
1315 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1316 return -TARGET_EFAULT
;
1317 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1318 return -TARGET_EFAULT
;
1320 if (target_tv_addr
) {
1321 tv
.tv_sec
= ts
.tv_sec
;
1322 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1323 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1324 return -TARGET_EFAULT
;
1332 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1333 static abi_long
do_old_select(abi_ulong arg1
)
1335 struct target_sel_arg_struct
*sel
;
1336 abi_ulong inp
, outp
, exp
, tvp
;
1339 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1340 return -TARGET_EFAULT
;
1343 nsel
= tswapal(sel
->n
);
1344 inp
= tswapal(sel
->inp
);
1345 outp
= tswapal(sel
->outp
);
1346 exp
= tswapal(sel
->exp
);
1347 tvp
= tswapal(sel
->tvp
);
1349 unlock_user_struct(sel
, arg1
, 0);
1351 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1356 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1357 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1358 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1361 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1362 fd_set rfds
, wfds
, efds
;
1363 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1364 struct timespec ts
, *ts_ptr
;
1368 * The 6th arg is actually two args smashed together,
1369 * so we cannot use the C library.
1377 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1378 target_sigset_t
*target_sigset
;
1386 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1390 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1394 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1400 * This takes a timespec, and not a timeval, so we cannot
1401 * use the do_select() helper ...
1405 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1406 return -TARGET_EFAULT
;
1409 if (target_to_host_timespec(&ts
, ts_addr
)) {
1410 return -TARGET_EFAULT
;
1418 /* Extract the two packed args for the sigset */
1421 sig
.size
= SIGSET_T_SIZE
;
1423 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1425 return -TARGET_EFAULT
;
1427 arg_sigset
= tswapal(arg7
[0]);
1428 arg_sigsize
= tswapal(arg7
[1]);
1429 unlock_user(arg7
, arg6
, 0);
1433 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1434 /* Like the kernel, we enforce correct size sigsets */
1435 return -TARGET_EINVAL
;
1437 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1438 sizeof(*target_sigset
), 1);
1439 if (!target_sigset
) {
1440 return -TARGET_EFAULT
;
1442 target_to_host_sigset(&set
, target_sigset
);
1443 unlock_user(target_sigset
, arg_sigset
, 0);
1451 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1454 if (!is_error(ret
)) {
1455 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1456 return -TARGET_EFAULT
;
1458 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1459 return -TARGET_EFAULT
;
1461 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1462 return -TARGET_EFAULT
;
1465 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1466 return -TARGET_EFAULT
;
1469 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1470 return -TARGET_EFAULT
;
1478 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1479 defined(TARGET_NR_ppoll_time64)
1480 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1481 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1483 struct target_pollfd
*target_pfd
;
1484 unsigned int nfds
= arg2
;
1492 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1493 return -TARGET_EINVAL
;
1495 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1496 sizeof(struct target_pollfd
) * nfds
, 1);
1498 return -TARGET_EFAULT
;
1501 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1502 for (i
= 0; i
< nfds
; i
++) {
1503 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1504 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1508 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1509 target_sigset_t
*target_set
;
1510 sigset_t _set
, *set
= &_set
;
1514 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1515 unlock_user(target_pfd
, arg1
, 0);
1516 return -TARGET_EFAULT
;
1519 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1520 unlock_user(target_pfd
, arg1
, 0);
1521 return -TARGET_EFAULT
;
1529 if (arg5
!= sizeof(target_sigset_t
)) {
1530 unlock_user(target_pfd
, arg1
, 0);
1531 return -TARGET_EINVAL
;
1534 target_set
= lock_user(VERIFY_READ
, arg4
,
1535 sizeof(target_sigset_t
), 1);
1537 unlock_user(target_pfd
, arg1
, 0);
1538 return -TARGET_EFAULT
;
1540 target_to_host_sigset(set
, target_set
);
1545 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1546 set
, SIGSET_T_SIZE
));
1548 if (!is_error(ret
) && arg3
) {
1550 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1551 return -TARGET_EFAULT
;
1554 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1555 return -TARGET_EFAULT
;
1560 unlock_user(target_set
, arg4
, 0);
1563 struct timespec ts
, *pts
;
1566 /* Convert ms to secs, ns */
1567 ts
.tv_sec
= arg3
/ 1000;
1568 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1571 /* -ve poll() timeout means "infinite" */
1574 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1577 if (!is_error(ret
)) {
1578 for (i
= 0; i
< nfds
; i
++) {
1579 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1582 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1587 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1590 return pipe2(host_pipe
, flags
);
1596 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1597 int flags
, int is_pipe2
)
1601 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1604 return get_errno(ret
);
1606 /* Several targets have special calling conventions for the original
1607 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1609 #if defined(TARGET_ALPHA)
1610 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1611 return host_pipe
[0];
1612 #elif defined(TARGET_MIPS)
1613 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1614 return host_pipe
[0];
1615 #elif defined(TARGET_SH4)
1616 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1617 return host_pipe
[0];
1618 #elif defined(TARGET_SPARC)
1619 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1620 return host_pipe
[0];
1624 if (put_user_s32(host_pipe
[0], pipedes
)
1625 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1626 return -TARGET_EFAULT
;
1627 return get_errno(ret
);
1630 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1631 abi_ulong target_addr
,
1634 struct target_ip_mreqn
*target_smreqn
;
1636 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1638 return -TARGET_EFAULT
;
1639 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1640 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1641 if (len
== sizeof(struct target_ip_mreqn
))
1642 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1643 unlock_user(target_smreqn
, target_addr
, 0);
1648 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1649 abi_ulong target_addr
,
1652 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1653 sa_family_t sa_family
;
1654 struct target_sockaddr
*target_saddr
;
1656 if (fd_trans_target_to_host_addr(fd
)) {
1657 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1660 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1662 return -TARGET_EFAULT
;
1664 sa_family
= tswap16(target_saddr
->sa_family
);
1666 /* Oops. The caller might send a incomplete sun_path; sun_path
1667 * must be terminated by \0 (see the manual page), but
1668 * unfortunately it is quite common to specify sockaddr_un
1669 * length as "strlen(x->sun_path)" while it should be
1670 * "strlen(...) + 1". We'll fix that here if needed.
1671 * Linux kernel has a similar feature.
1674 if (sa_family
== AF_UNIX
) {
1675 if (len
< unix_maxlen
&& len
> 0) {
1676 char *cp
= (char*)target_saddr
;
1678 if ( cp
[len
-1] && !cp
[len
] )
1681 if (len
> unix_maxlen
)
1685 memcpy(addr
, target_saddr
, len
);
1686 addr
->sa_family
= sa_family
;
1687 if (sa_family
== AF_NETLINK
) {
1688 struct sockaddr_nl
*nladdr
;
1690 nladdr
= (struct sockaddr_nl
*)addr
;
1691 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1692 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1693 } else if (sa_family
== AF_PACKET
) {
1694 struct target_sockaddr_ll
*lladdr
;
1696 lladdr
= (struct target_sockaddr_ll
*)addr
;
1697 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1698 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1700 unlock_user(target_saddr
, target_addr
, 0);
1705 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1706 struct sockaddr
*addr
,
1709 struct target_sockaddr
*target_saddr
;
1716 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1718 return -TARGET_EFAULT
;
1719 memcpy(target_saddr
, addr
, len
);
1720 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1721 sizeof(target_saddr
->sa_family
)) {
1722 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1724 if (addr
->sa_family
== AF_NETLINK
&&
1725 len
>= sizeof(struct target_sockaddr_nl
)) {
1726 struct target_sockaddr_nl
*target_nl
=
1727 (struct target_sockaddr_nl
*)target_saddr
;
1728 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1729 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1730 } else if (addr
->sa_family
== AF_PACKET
) {
1731 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1732 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1733 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1734 } else if (addr
->sa_family
== AF_INET6
&&
1735 len
>= sizeof(struct target_sockaddr_in6
)) {
1736 struct target_sockaddr_in6
*target_in6
=
1737 (struct target_sockaddr_in6
*)target_saddr
;
1738 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1740 unlock_user(target_saddr
, target_addr
, len
);
1745 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1746 struct target_msghdr
*target_msgh
)
1748 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1749 abi_long msg_controllen
;
1750 abi_ulong target_cmsg_addr
;
1751 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1752 socklen_t space
= 0;
1754 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1755 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1757 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1758 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1759 target_cmsg_start
= target_cmsg
;
1761 return -TARGET_EFAULT
;
1763 while (cmsg
&& target_cmsg
) {
1764 void *data
= CMSG_DATA(cmsg
);
1765 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1767 int len
= tswapal(target_cmsg
->cmsg_len
)
1768 - sizeof(struct target_cmsghdr
);
1770 space
+= CMSG_SPACE(len
);
1771 if (space
> msgh
->msg_controllen
) {
1772 space
-= CMSG_SPACE(len
);
1773 /* This is a QEMU bug, since we allocated the payload
1774 * area ourselves (unlike overflow in host-to-target
1775 * conversion, which is just the guest giving us a buffer
1776 * that's too small). It can't happen for the payload types
1777 * we currently support; if it becomes an issue in future
1778 * we would need to improve our allocation strategy to
1779 * something more intelligent than "twice the size of the
1780 * target buffer we're reading from".
1782 qemu_log_mask(LOG_UNIMP
,
1783 ("Unsupported ancillary data %d/%d: "
1784 "unhandled msg size\n"),
1785 tswap32(target_cmsg
->cmsg_level
),
1786 tswap32(target_cmsg
->cmsg_type
));
1790 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1791 cmsg
->cmsg_level
= SOL_SOCKET
;
1793 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1795 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1796 cmsg
->cmsg_len
= CMSG_LEN(len
);
1798 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1799 int *fd
= (int *)data
;
1800 int *target_fd
= (int *)target_data
;
1801 int i
, numfds
= len
/ sizeof(int);
1803 for (i
= 0; i
< numfds
; i
++) {
1804 __get_user(fd
[i
], target_fd
+ i
);
1806 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1807 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1808 struct ucred
*cred
= (struct ucred
*)data
;
1809 struct target_ucred
*target_cred
=
1810 (struct target_ucred
*)target_data
;
1812 __get_user(cred
->pid
, &target_cred
->pid
);
1813 __get_user(cred
->uid
, &target_cred
->uid
);
1814 __get_user(cred
->gid
, &target_cred
->gid
);
1816 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1817 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1818 memcpy(data
, target_data
, len
);
1821 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1822 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1825 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1827 msgh
->msg_controllen
= space
;
1831 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1832 struct msghdr
*msgh
)
1834 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1835 abi_long msg_controllen
;
1836 abi_ulong target_cmsg_addr
;
1837 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1838 socklen_t space
= 0;
1840 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1841 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1843 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1844 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1845 target_cmsg_start
= target_cmsg
;
1847 return -TARGET_EFAULT
;
1849 while (cmsg
&& target_cmsg
) {
1850 void *data
= CMSG_DATA(cmsg
);
1851 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1853 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1854 int tgt_len
, tgt_space
;
1856 /* We never copy a half-header but may copy half-data;
1857 * this is Linux's behaviour in put_cmsg(). Note that
1858 * truncation here is a guest problem (which we report
1859 * to the guest via the CTRUNC bit), unlike truncation
1860 * in target_to_host_cmsg, which is a QEMU bug.
1862 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1863 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1867 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1868 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1870 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1872 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1874 /* Payload types which need a different size of payload on
1875 * the target must adjust tgt_len here.
1878 switch (cmsg
->cmsg_level
) {
1880 switch (cmsg
->cmsg_type
) {
1882 tgt_len
= sizeof(struct target_timeval
);
1892 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1893 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1894 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1897 /* We must now copy-and-convert len bytes of payload
1898 * into tgt_len bytes of destination space. Bear in mind
1899 * that in both source and destination we may be dealing
1900 * with a truncated value!
1902 switch (cmsg
->cmsg_level
) {
1904 switch (cmsg
->cmsg_type
) {
1907 int *fd
= (int *)data
;
1908 int *target_fd
= (int *)target_data
;
1909 int i
, numfds
= tgt_len
/ sizeof(int);
1911 for (i
= 0; i
< numfds
; i
++) {
1912 __put_user(fd
[i
], target_fd
+ i
);
1918 struct timeval
*tv
= (struct timeval
*)data
;
1919 struct target_timeval
*target_tv
=
1920 (struct target_timeval
*)target_data
;
1922 if (len
!= sizeof(struct timeval
) ||
1923 tgt_len
!= sizeof(struct target_timeval
)) {
1927 /* copy struct timeval to target */
1928 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1929 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1932 case SCM_CREDENTIALS
:
1934 struct ucred
*cred
= (struct ucred
*)data
;
1935 struct target_ucred
*target_cred
=
1936 (struct target_ucred
*)target_data
;
1938 __put_user(cred
->pid
, &target_cred
->pid
);
1939 __put_user(cred
->uid
, &target_cred
->uid
);
1940 __put_user(cred
->gid
, &target_cred
->gid
);
1949 switch (cmsg
->cmsg_type
) {
1952 uint32_t *v
= (uint32_t *)data
;
1953 uint32_t *t_int
= (uint32_t *)target_data
;
1955 if (len
!= sizeof(uint32_t) ||
1956 tgt_len
!= sizeof(uint32_t)) {
1959 __put_user(*v
, t_int
);
1965 struct sock_extended_err ee
;
1966 struct sockaddr_in offender
;
1968 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1969 struct errhdr_t
*target_errh
=
1970 (struct errhdr_t
*)target_data
;
1972 if (len
!= sizeof(struct errhdr_t
) ||
1973 tgt_len
!= sizeof(struct errhdr_t
)) {
1976 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1977 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1978 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1979 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1980 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1981 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1982 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1983 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1984 (void *) &errh
->offender
, sizeof(errh
->offender
));
1993 switch (cmsg
->cmsg_type
) {
1996 uint32_t *v
= (uint32_t *)data
;
1997 uint32_t *t_int
= (uint32_t *)target_data
;
1999 if (len
!= sizeof(uint32_t) ||
2000 tgt_len
!= sizeof(uint32_t)) {
2003 __put_user(*v
, t_int
);
2009 struct sock_extended_err ee
;
2010 struct sockaddr_in6 offender
;
2012 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2013 struct errhdr6_t
*target_errh
=
2014 (struct errhdr6_t
*)target_data
;
2016 if (len
!= sizeof(struct errhdr6_t
) ||
2017 tgt_len
!= sizeof(struct errhdr6_t
)) {
2020 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2021 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2022 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2023 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2024 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2025 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2026 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2027 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2028 (void *) &errh
->offender
, sizeof(errh
->offender
));
2038 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2039 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2040 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2041 if (tgt_len
> len
) {
2042 memset(target_data
+ len
, 0, tgt_len
- len
);
2046 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2047 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2048 if (msg_controllen
< tgt_space
) {
2049 tgt_space
= msg_controllen
;
2051 msg_controllen
-= tgt_space
;
2053 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2054 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2057 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2059 target_msgh
->msg_controllen
= tswapal(space
);
2063 /* do_setsockopt() Must return target values and target errnos. */
2064 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2065 abi_ulong optval_addr
, socklen_t optlen
)
2069 struct ip_mreqn
*ip_mreq
;
2070 struct ip_mreq_source
*ip_mreq_source
;
2075 /* TCP and UDP options all take an 'int' value. */
2076 if (optlen
< sizeof(uint32_t))
2077 return -TARGET_EINVAL
;
2079 if (get_user_u32(val
, optval_addr
))
2080 return -TARGET_EFAULT
;
2081 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2088 case IP_ROUTER_ALERT
:
2092 case IP_MTU_DISCOVER
:
2099 case IP_MULTICAST_TTL
:
2100 case IP_MULTICAST_LOOP
:
2102 if (optlen
>= sizeof(uint32_t)) {
2103 if (get_user_u32(val
, optval_addr
))
2104 return -TARGET_EFAULT
;
2105 } else if (optlen
>= 1) {
2106 if (get_user_u8(val
, optval_addr
))
2107 return -TARGET_EFAULT
;
2109 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2111 case IP_ADD_MEMBERSHIP
:
2112 case IP_DROP_MEMBERSHIP
:
2113 if (optlen
< sizeof (struct target_ip_mreq
) ||
2114 optlen
> sizeof (struct target_ip_mreqn
))
2115 return -TARGET_EINVAL
;
2117 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2118 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2119 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2122 case IP_BLOCK_SOURCE
:
2123 case IP_UNBLOCK_SOURCE
:
2124 case IP_ADD_SOURCE_MEMBERSHIP
:
2125 case IP_DROP_SOURCE_MEMBERSHIP
:
2126 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2127 return -TARGET_EINVAL
;
2129 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2130 if (!ip_mreq_source
) {
2131 return -TARGET_EFAULT
;
2133 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2134 unlock_user (ip_mreq_source
, optval_addr
, 0);
2143 case IPV6_MTU_DISCOVER
:
2146 case IPV6_RECVPKTINFO
:
2147 case IPV6_UNICAST_HOPS
:
2148 case IPV6_MULTICAST_HOPS
:
2149 case IPV6_MULTICAST_LOOP
:
2151 case IPV6_RECVHOPLIMIT
:
2152 case IPV6_2292HOPLIMIT
:
2155 case IPV6_2292PKTINFO
:
2156 case IPV6_RECVTCLASS
:
2157 case IPV6_RECVRTHDR
:
2158 case IPV6_2292RTHDR
:
2159 case IPV6_RECVHOPOPTS
:
2160 case IPV6_2292HOPOPTS
:
2161 case IPV6_RECVDSTOPTS
:
2162 case IPV6_2292DSTOPTS
:
2164 case IPV6_ADDR_PREFERENCES
:
2165 #ifdef IPV6_RECVPATHMTU
2166 case IPV6_RECVPATHMTU
:
2168 #ifdef IPV6_TRANSPARENT
2169 case IPV6_TRANSPARENT
:
2171 #ifdef IPV6_FREEBIND
2174 #ifdef IPV6_RECVORIGDSTADDR
2175 case IPV6_RECVORIGDSTADDR
:
2178 if (optlen
< sizeof(uint32_t)) {
2179 return -TARGET_EINVAL
;
2181 if (get_user_u32(val
, optval_addr
)) {
2182 return -TARGET_EFAULT
;
2184 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2185 &val
, sizeof(val
)));
2189 struct in6_pktinfo pki
;
2191 if (optlen
< sizeof(pki
)) {
2192 return -TARGET_EINVAL
;
2195 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2196 return -TARGET_EFAULT
;
2199 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2201 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2202 &pki
, sizeof(pki
)));
2205 case IPV6_ADD_MEMBERSHIP
:
2206 case IPV6_DROP_MEMBERSHIP
:
2208 struct ipv6_mreq ipv6mreq
;
2210 if (optlen
< sizeof(ipv6mreq
)) {
2211 return -TARGET_EINVAL
;
2214 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2215 return -TARGET_EFAULT
;
2218 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2220 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2221 &ipv6mreq
, sizeof(ipv6mreq
)));
2232 struct icmp6_filter icmp6f
;
2234 if (optlen
> sizeof(icmp6f
)) {
2235 optlen
= sizeof(icmp6f
);
2238 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2239 return -TARGET_EFAULT
;
2242 for (val
= 0; val
< 8; val
++) {
2243 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2246 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2258 /* those take an u32 value */
2259 if (optlen
< sizeof(uint32_t)) {
2260 return -TARGET_EINVAL
;
2263 if (get_user_u32(val
, optval_addr
)) {
2264 return -TARGET_EFAULT
;
2266 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2267 &val
, sizeof(val
)));
2274 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2279 char *alg_key
= g_malloc(optlen
);
2282 return -TARGET_ENOMEM
;
2284 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2286 return -TARGET_EFAULT
;
2288 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2293 case ALG_SET_AEAD_AUTHSIZE
:
2295 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2304 case TARGET_SOL_SOCKET
:
2306 case TARGET_SO_RCVTIMEO
:
2310 optname
= SO_RCVTIMEO
;
2313 if (optlen
!= sizeof(struct target_timeval
)) {
2314 return -TARGET_EINVAL
;
2317 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2318 return -TARGET_EFAULT
;
2321 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2325 case TARGET_SO_SNDTIMEO
:
2326 optname
= SO_SNDTIMEO
;
2328 case TARGET_SO_ATTACH_FILTER
:
2330 struct target_sock_fprog
*tfprog
;
2331 struct target_sock_filter
*tfilter
;
2332 struct sock_fprog fprog
;
2333 struct sock_filter
*filter
;
2336 if (optlen
!= sizeof(*tfprog
)) {
2337 return -TARGET_EINVAL
;
2339 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2340 return -TARGET_EFAULT
;
2342 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2343 tswapal(tfprog
->filter
), 0)) {
2344 unlock_user_struct(tfprog
, optval_addr
, 1);
2345 return -TARGET_EFAULT
;
2348 fprog
.len
= tswap16(tfprog
->len
);
2349 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2350 if (filter
== NULL
) {
2351 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2352 unlock_user_struct(tfprog
, optval_addr
, 1);
2353 return -TARGET_ENOMEM
;
2355 for (i
= 0; i
< fprog
.len
; i
++) {
2356 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2357 filter
[i
].jt
= tfilter
[i
].jt
;
2358 filter
[i
].jf
= tfilter
[i
].jf
;
2359 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2361 fprog
.filter
= filter
;
2363 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2364 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2367 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2368 unlock_user_struct(tfprog
, optval_addr
, 1);
2371 case TARGET_SO_BINDTODEVICE
:
2373 char *dev_ifname
, *addr_ifname
;
2375 if (optlen
> IFNAMSIZ
- 1) {
2376 optlen
= IFNAMSIZ
- 1;
2378 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2380 return -TARGET_EFAULT
;
2382 optname
= SO_BINDTODEVICE
;
2383 addr_ifname
= alloca(IFNAMSIZ
);
2384 memcpy(addr_ifname
, dev_ifname
, optlen
);
2385 addr_ifname
[optlen
] = 0;
2386 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2387 addr_ifname
, optlen
));
2388 unlock_user (dev_ifname
, optval_addr
, 0);
2391 case TARGET_SO_LINGER
:
2394 struct target_linger
*tlg
;
2396 if (optlen
!= sizeof(struct target_linger
)) {
2397 return -TARGET_EINVAL
;
2399 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2400 return -TARGET_EFAULT
;
2402 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2403 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2404 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2406 unlock_user_struct(tlg
, optval_addr
, 0);
2409 /* Options with 'int' argument. */
2410 case TARGET_SO_DEBUG
:
2413 case TARGET_SO_REUSEADDR
:
2414 optname
= SO_REUSEADDR
;
2417 case TARGET_SO_REUSEPORT
:
2418 optname
= SO_REUSEPORT
;
2421 case TARGET_SO_TYPE
:
2424 case TARGET_SO_ERROR
:
2427 case TARGET_SO_DONTROUTE
:
2428 optname
= SO_DONTROUTE
;
2430 case TARGET_SO_BROADCAST
:
2431 optname
= SO_BROADCAST
;
2433 case TARGET_SO_SNDBUF
:
2434 optname
= SO_SNDBUF
;
2436 case TARGET_SO_SNDBUFFORCE
:
2437 optname
= SO_SNDBUFFORCE
;
2439 case TARGET_SO_RCVBUF
:
2440 optname
= SO_RCVBUF
;
2442 case TARGET_SO_RCVBUFFORCE
:
2443 optname
= SO_RCVBUFFORCE
;
2445 case TARGET_SO_KEEPALIVE
:
2446 optname
= SO_KEEPALIVE
;
2448 case TARGET_SO_OOBINLINE
:
2449 optname
= SO_OOBINLINE
;
2451 case TARGET_SO_NO_CHECK
:
2452 optname
= SO_NO_CHECK
;
2454 case TARGET_SO_PRIORITY
:
2455 optname
= SO_PRIORITY
;
2458 case TARGET_SO_BSDCOMPAT
:
2459 optname
= SO_BSDCOMPAT
;
2462 case TARGET_SO_PASSCRED
:
2463 optname
= SO_PASSCRED
;
2465 case TARGET_SO_PASSSEC
:
2466 optname
= SO_PASSSEC
;
2468 case TARGET_SO_TIMESTAMP
:
2469 optname
= SO_TIMESTAMP
;
2471 case TARGET_SO_RCVLOWAT
:
2472 optname
= SO_RCVLOWAT
;
2477 if (optlen
< sizeof(uint32_t))
2478 return -TARGET_EINVAL
;
2480 if (get_user_u32(val
, optval_addr
))
2481 return -TARGET_EFAULT
;
2482 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2487 case NETLINK_PKTINFO
:
2488 case NETLINK_ADD_MEMBERSHIP
:
2489 case NETLINK_DROP_MEMBERSHIP
:
2490 case NETLINK_BROADCAST_ERROR
:
2491 case NETLINK_NO_ENOBUFS
:
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2493 case NETLINK_LISTEN_ALL_NSID
:
2494 case NETLINK_CAP_ACK
:
2495 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2496 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2497 case NETLINK_EXT_ACK
:
2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2499 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2500 case NETLINK_GET_STRICT_CHK
:
2501 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2507 if (optlen
< sizeof(uint32_t)) {
2508 return -TARGET_EINVAL
;
2510 if (get_user_u32(val
, optval_addr
)) {
2511 return -TARGET_EFAULT
;
2513 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2516 #endif /* SOL_NETLINK */
2519 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2521 ret
= -TARGET_ENOPROTOOPT
;
2526 /* do_getsockopt() Must return target values and target errnos. */
2527 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2528 abi_ulong optval_addr
, abi_ulong optlen
)
2535 case TARGET_SOL_SOCKET
:
2538 /* These don't just return a single integer */
2539 case TARGET_SO_PEERNAME
:
2541 case TARGET_SO_RCVTIMEO
: {
2545 optname
= SO_RCVTIMEO
;
2548 if (get_user_u32(len
, optlen
)) {
2549 return -TARGET_EFAULT
;
2552 return -TARGET_EINVAL
;
2556 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2561 if (len
> sizeof(struct target_timeval
)) {
2562 len
= sizeof(struct target_timeval
);
2564 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2565 return -TARGET_EFAULT
;
2567 if (put_user_u32(len
, optlen
)) {
2568 return -TARGET_EFAULT
;
2572 case TARGET_SO_SNDTIMEO
:
2573 optname
= SO_SNDTIMEO
;
2575 case TARGET_SO_PEERCRED
: {
2578 struct target_ucred
*tcr
;
2580 if (get_user_u32(len
, optlen
)) {
2581 return -TARGET_EFAULT
;
2584 return -TARGET_EINVAL
;
2588 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2596 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2597 return -TARGET_EFAULT
;
2599 __put_user(cr
.pid
, &tcr
->pid
);
2600 __put_user(cr
.uid
, &tcr
->uid
);
2601 __put_user(cr
.gid
, &tcr
->gid
);
2602 unlock_user_struct(tcr
, optval_addr
, 1);
2603 if (put_user_u32(len
, optlen
)) {
2604 return -TARGET_EFAULT
;
2608 case TARGET_SO_PEERSEC
: {
2611 if (get_user_u32(len
, optlen
)) {
2612 return -TARGET_EFAULT
;
2615 return -TARGET_EINVAL
;
2617 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2619 return -TARGET_EFAULT
;
2622 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2624 if (put_user_u32(lv
, optlen
)) {
2625 ret
= -TARGET_EFAULT
;
2627 unlock_user(name
, optval_addr
, lv
);
2630 case TARGET_SO_LINGER
:
2634 struct target_linger
*tlg
;
2636 if (get_user_u32(len
, optlen
)) {
2637 return -TARGET_EFAULT
;
2640 return -TARGET_EINVAL
;
2644 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2652 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2653 return -TARGET_EFAULT
;
2655 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2656 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2657 unlock_user_struct(tlg
, optval_addr
, 1);
2658 if (put_user_u32(len
, optlen
)) {
2659 return -TARGET_EFAULT
;
2663 /* Options with 'int' argument. */
2664 case TARGET_SO_DEBUG
:
2667 case TARGET_SO_REUSEADDR
:
2668 optname
= SO_REUSEADDR
;
2671 case TARGET_SO_REUSEPORT
:
2672 optname
= SO_REUSEPORT
;
2675 case TARGET_SO_TYPE
:
2678 case TARGET_SO_ERROR
:
2681 case TARGET_SO_DONTROUTE
:
2682 optname
= SO_DONTROUTE
;
2684 case TARGET_SO_BROADCAST
:
2685 optname
= SO_BROADCAST
;
2687 case TARGET_SO_SNDBUF
:
2688 optname
= SO_SNDBUF
;
2690 case TARGET_SO_RCVBUF
:
2691 optname
= SO_RCVBUF
;
2693 case TARGET_SO_KEEPALIVE
:
2694 optname
= SO_KEEPALIVE
;
2696 case TARGET_SO_OOBINLINE
:
2697 optname
= SO_OOBINLINE
;
2699 case TARGET_SO_NO_CHECK
:
2700 optname
= SO_NO_CHECK
;
2702 case TARGET_SO_PRIORITY
:
2703 optname
= SO_PRIORITY
;
2706 case TARGET_SO_BSDCOMPAT
:
2707 optname
= SO_BSDCOMPAT
;
2710 case TARGET_SO_PASSCRED
:
2711 optname
= SO_PASSCRED
;
2713 case TARGET_SO_TIMESTAMP
:
2714 optname
= SO_TIMESTAMP
;
2716 case TARGET_SO_RCVLOWAT
:
2717 optname
= SO_RCVLOWAT
;
2719 case TARGET_SO_ACCEPTCONN
:
2720 optname
= SO_ACCEPTCONN
;
2722 case TARGET_SO_PROTOCOL
:
2723 optname
= SO_PROTOCOL
;
2725 case TARGET_SO_DOMAIN
:
2726 optname
= SO_DOMAIN
;
2734 /* TCP and UDP options all take an 'int' value. */
2736 if (get_user_u32(len
, optlen
))
2737 return -TARGET_EFAULT
;
2739 return -TARGET_EINVAL
;
2741 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2744 if (optname
== SO_TYPE
) {
2745 val
= host_to_target_sock_type(val
);
2750 if (put_user_u32(val
, optval_addr
))
2751 return -TARGET_EFAULT
;
2753 if (put_user_u8(val
, optval_addr
))
2754 return -TARGET_EFAULT
;
2756 if (put_user_u32(len
, optlen
))
2757 return -TARGET_EFAULT
;
2764 case IP_ROUTER_ALERT
:
2768 case IP_MTU_DISCOVER
:
2774 case IP_MULTICAST_TTL
:
2775 case IP_MULTICAST_LOOP
:
2776 if (get_user_u32(len
, optlen
))
2777 return -TARGET_EFAULT
;
2779 return -TARGET_EINVAL
;
2781 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2784 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2786 if (put_user_u32(len
, optlen
)
2787 || put_user_u8(val
, optval_addr
))
2788 return -TARGET_EFAULT
;
2790 if (len
> sizeof(int))
2792 if (put_user_u32(len
, optlen
)
2793 || put_user_u32(val
, optval_addr
))
2794 return -TARGET_EFAULT
;
2798 ret
= -TARGET_ENOPROTOOPT
;
2804 case IPV6_MTU_DISCOVER
:
2807 case IPV6_RECVPKTINFO
:
2808 case IPV6_UNICAST_HOPS
:
2809 case IPV6_MULTICAST_HOPS
:
2810 case IPV6_MULTICAST_LOOP
:
2812 case IPV6_RECVHOPLIMIT
:
2813 case IPV6_2292HOPLIMIT
:
2816 case IPV6_2292PKTINFO
:
2817 case IPV6_RECVTCLASS
:
2818 case IPV6_RECVRTHDR
:
2819 case IPV6_2292RTHDR
:
2820 case IPV6_RECVHOPOPTS
:
2821 case IPV6_2292HOPOPTS
:
2822 case IPV6_RECVDSTOPTS
:
2823 case IPV6_2292DSTOPTS
:
2825 case IPV6_ADDR_PREFERENCES
:
2826 #ifdef IPV6_RECVPATHMTU
2827 case IPV6_RECVPATHMTU
:
2829 #ifdef IPV6_TRANSPARENT
2830 case IPV6_TRANSPARENT
:
2832 #ifdef IPV6_FREEBIND
2835 #ifdef IPV6_RECVORIGDSTADDR
2836 case IPV6_RECVORIGDSTADDR
:
2838 if (get_user_u32(len
, optlen
))
2839 return -TARGET_EFAULT
;
2841 return -TARGET_EINVAL
;
2843 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2846 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2848 if (put_user_u32(len
, optlen
)
2849 || put_user_u8(val
, optval_addr
))
2850 return -TARGET_EFAULT
;
2852 if (len
> sizeof(int))
2854 if (put_user_u32(len
, optlen
)
2855 || put_user_u32(val
, optval_addr
))
2856 return -TARGET_EFAULT
;
2860 ret
= -TARGET_ENOPROTOOPT
;
2867 case NETLINK_PKTINFO
:
2868 case NETLINK_BROADCAST_ERROR
:
2869 case NETLINK_NO_ENOBUFS
:
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2871 case NETLINK_LISTEN_ALL_NSID
:
2872 case NETLINK_CAP_ACK
:
2873 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2874 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2875 case NETLINK_EXT_ACK
:
2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2878 case NETLINK_GET_STRICT_CHK
:
2879 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2880 if (get_user_u32(len
, optlen
)) {
2881 return -TARGET_EFAULT
;
2883 if (len
!= sizeof(val
)) {
2884 return -TARGET_EINVAL
;
2887 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2891 if (put_user_u32(lv
, optlen
)
2892 || put_user_u32(val
, optval_addr
)) {
2893 return -TARGET_EFAULT
;
2896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2897 case NETLINK_LIST_MEMBERSHIPS
:
2901 if (get_user_u32(len
, optlen
)) {
2902 return -TARGET_EFAULT
;
2905 return -TARGET_EINVAL
;
2907 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2908 if (!results
&& len
> 0) {
2909 return -TARGET_EFAULT
;
2912 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2914 unlock_user(results
, optval_addr
, 0);
2917 /* swap host endianess to target endianess. */
2918 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2919 results
[i
] = tswap32(results
[i
]);
2921 if (put_user_u32(lv
, optlen
)) {
2922 return -TARGET_EFAULT
;
2924 unlock_user(results
, optval_addr
, 0);
2927 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2932 #endif /* SOL_NETLINK */
2935 qemu_log_mask(LOG_UNIMP
,
2936 "getsockopt level=%d optname=%d not yet supported\n",
2938 ret
= -TARGET_EOPNOTSUPP
;
2944 /* Convert target low/high pair representing file offset into the host
2945 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2946 * as the kernel doesn't handle them either.
2948 static void target_to_host_low_high(abi_ulong tlow
,
2950 unsigned long *hlow
,
2951 unsigned long *hhigh
)
2953 uint64_t off
= tlow
|
2954 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2955 TARGET_LONG_BITS
/ 2;
2958 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2961 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2962 abi_ulong count
, int copy
)
2964 struct target_iovec
*target_vec
;
2966 abi_ulong total_len
, max_len
;
2969 bool bad_address
= false;
2975 if (count
> IOV_MAX
) {
2980 vec
= g_try_new0(struct iovec
, count
);
2986 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2987 count
* sizeof(struct target_iovec
), 1);
2988 if (target_vec
== NULL
) {
2993 /* ??? If host page size > target page size, this will result in a
2994 value larger than what we can actually support. */
2995 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2998 for (i
= 0; i
< count
; i
++) {
2999 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3000 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3005 } else if (len
== 0) {
3006 /* Zero length pointer is ignored. */
3007 vec
[i
].iov_base
= 0;
3009 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3010 /* If the first buffer pointer is bad, this is a fault. But
3011 * subsequent bad buffers will result in a partial write; this
3012 * is realized by filling the vector with null pointers and
3014 if (!vec
[i
].iov_base
) {
3025 if (len
> max_len
- total_len
) {
3026 len
= max_len
- total_len
;
3029 vec
[i
].iov_len
= len
;
3033 unlock_user(target_vec
, target_addr
, 0);
3038 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3039 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3042 unlock_user(target_vec
, target_addr
, 0);
3049 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3050 abi_ulong count
, int copy
)
3052 struct target_iovec
*target_vec
;
3055 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3056 count
* sizeof(struct target_iovec
), 1);
3058 for (i
= 0; i
< count
; i
++) {
3059 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3060 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3064 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3066 unlock_user(target_vec
, target_addr
, 0);
3072 static inline int target_to_host_sock_type(int *type
)
3075 int target_type
= *type
;
3077 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3078 case TARGET_SOCK_DGRAM
:
3079 host_type
= SOCK_DGRAM
;
3081 case TARGET_SOCK_STREAM
:
3082 host_type
= SOCK_STREAM
;
3085 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3088 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3089 #if defined(SOCK_CLOEXEC)
3090 host_type
|= SOCK_CLOEXEC
;
3092 return -TARGET_EINVAL
;
3095 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3096 #if defined(SOCK_NONBLOCK)
3097 host_type
|= SOCK_NONBLOCK
;
3098 #elif !defined(O_NONBLOCK)
3099 return -TARGET_EINVAL
;
3106 /* Try to emulate socket type flags after socket creation. */
3107 static int sock_flags_fixup(int fd
, int target_type
)
3109 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3110 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3111 int flags
= fcntl(fd
, F_GETFL
);
3112 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3114 return -TARGET_EINVAL
;
3121 /* do_socket() Must return target values and target errnos. */
3122 static abi_long
do_socket(int domain
, int type
, int protocol
)
3124 int target_type
= type
;
3127 ret
= target_to_host_sock_type(&type
);
3132 if (domain
== PF_NETLINK
&& !(
3133 #ifdef CONFIG_RTNETLINK
3134 protocol
== NETLINK_ROUTE
||
3136 protocol
== NETLINK_KOBJECT_UEVENT
||
3137 protocol
== NETLINK_AUDIT
)) {
3138 return -TARGET_EPROTONOSUPPORT
;
3141 if (domain
== AF_PACKET
||
3142 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3143 protocol
= tswap16(protocol
);
3146 ret
= get_errno(socket(domain
, type
, protocol
));
3148 ret
= sock_flags_fixup(ret
, target_type
);
3149 if (type
== SOCK_PACKET
) {
3150 /* Manage an obsolete case :
3151 * if socket type is SOCK_PACKET, bind by name
3153 fd_trans_register(ret
, &target_packet_trans
);
3154 } else if (domain
== PF_NETLINK
) {
3156 #ifdef CONFIG_RTNETLINK
3158 fd_trans_register(ret
, &target_netlink_route_trans
);
3161 case NETLINK_KOBJECT_UEVENT
:
3162 /* nothing to do: messages are strings */
3165 fd_trans_register(ret
, &target_netlink_audit_trans
);
3168 g_assert_not_reached();
3175 /* do_bind() Must return target values and target errnos. */
3176 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3182 if ((int)addrlen
< 0) {
3183 return -TARGET_EINVAL
;
3186 addr
= alloca(addrlen
+1);
3188 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3192 return get_errno(bind(sockfd
, addr
, addrlen
));
3195 /* do_connect() Must return target values and target errnos. */
3196 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3202 if ((int)addrlen
< 0) {
3203 return -TARGET_EINVAL
;
3206 addr
= alloca(addrlen
+1);
3208 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3212 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3215 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3216 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3217 int flags
, int send
)
3223 abi_ulong target_vec
;
3225 if (msgp
->msg_name
) {
3226 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3227 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3228 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3229 tswapal(msgp
->msg_name
),
3231 if (ret
== -TARGET_EFAULT
) {
3232 /* For connected sockets msg_name and msg_namelen must
3233 * be ignored, so returning EFAULT immediately is wrong.
3234 * Instead, pass a bad msg_name to the host kernel, and
3235 * let it decide whether to return EFAULT or not.
3237 msg
.msg_name
= (void *)-1;
3242 msg
.msg_name
= NULL
;
3243 msg
.msg_namelen
= 0;
3245 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3246 msg
.msg_control
= alloca(msg
.msg_controllen
);
3247 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3249 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3251 count
= tswapal(msgp
->msg_iovlen
);
3252 target_vec
= tswapal(msgp
->msg_iov
);
3254 if (count
> IOV_MAX
) {
3255 /* sendrcvmsg returns a different errno for this condition than
3256 * readv/writev, so we must catch it here before lock_iovec() does.
3258 ret
= -TARGET_EMSGSIZE
;
3262 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3263 target_vec
, count
, send
);
3265 ret
= -host_to_target_errno(errno
);
3268 msg
.msg_iovlen
= count
;
3272 if (fd_trans_target_to_host_data(fd
)) {
3275 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3276 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3277 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3278 msg
.msg_iov
->iov_len
);
3280 msg
.msg_iov
->iov_base
= host_msg
;
3281 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3285 ret
= target_to_host_cmsg(&msg
, msgp
);
3287 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3291 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3292 if (!is_error(ret
)) {
3294 if (fd_trans_host_to_target_data(fd
)) {
3295 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3296 MIN(msg
.msg_iov
->iov_len
, len
));
3298 ret
= host_to_target_cmsg(msgp
, &msg
);
3300 if (!is_error(ret
)) {
3301 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3302 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3303 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3304 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3305 msg
.msg_name
, msg
.msg_namelen
);
3317 unlock_iovec(vec
, target_vec
, count
, !send
);
3322 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3323 int flags
, int send
)
3326 struct target_msghdr
*msgp
;
3328 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3332 return -TARGET_EFAULT
;
3334 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3335 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3339 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3340 * so it might not have this *mmsg-specific flag either.
3342 #ifndef MSG_WAITFORONE
3343 #define MSG_WAITFORONE 0x10000
3346 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3347 unsigned int vlen
, unsigned int flags
,
3350 struct target_mmsghdr
*mmsgp
;
3354 if (vlen
> UIO_MAXIOV
) {
3358 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3360 return -TARGET_EFAULT
;
3363 for (i
= 0; i
< vlen
; i
++) {
3364 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3365 if (is_error(ret
)) {
3368 mmsgp
[i
].msg_len
= tswap32(ret
);
3369 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3370 if (flags
& MSG_WAITFORONE
) {
3371 flags
|= MSG_DONTWAIT
;
3375 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3377 /* Return number of datagrams sent if we sent any at all;
3378 * otherwise return the error.
3386 /* do_accept4() Must return target values and target errnos. */
3387 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3388 abi_ulong target_addrlen_addr
, int flags
)
3390 socklen_t addrlen
, ret_addrlen
;
3395 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3397 if (target_addr
== 0) {
3398 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3401 /* linux returns EFAULT if addrlen pointer is invalid */
3402 if (get_user_u32(addrlen
, target_addrlen_addr
))
3403 return -TARGET_EFAULT
;
3405 if ((int)addrlen
< 0) {
3406 return -TARGET_EINVAL
;
3409 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3410 return -TARGET_EFAULT
;
3413 addr
= alloca(addrlen
);
3415 ret_addrlen
= addrlen
;
3416 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3417 if (!is_error(ret
)) {
3418 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3419 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3420 ret
= -TARGET_EFAULT
;
3426 /* do_getpeername() Must return target values and target errnos. */
3427 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3428 abi_ulong target_addrlen_addr
)
3430 socklen_t addrlen
, ret_addrlen
;
3434 if (get_user_u32(addrlen
, target_addrlen_addr
))
3435 return -TARGET_EFAULT
;
3437 if ((int)addrlen
< 0) {
3438 return -TARGET_EINVAL
;
3441 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3442 return -TARGET_EFAULT
;
3445 addr
= alloca(addrlen
);
3447 ret_addrlen
= addrlen
;
3448 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3449 if (!is_error(ret
)) {
3450 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3451 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3452 ret
= -TARGET_EFAULT
;
3458 /* do_getsockname() Must return target values and target errnos. */
3459 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3460 abi_ulong target_addrlen_addr
)
3462 socklen_t addrlen
, ret_addrlen
;
3466 if (get_user_u32(addrlen
, target_addrlen_addr
))
3467 return -TARGET_EFAULT
;
3469 if ((int)addrlen
< 0) {
3470 return -TARGET_EINVAL
;
3473 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3474 return -TARGET_EFAULT
;
3477 addr
= alloca(addrlen
);
3479 ret_addrlen
= addrlen
;
3480 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3481 if (!is_error(ret
)) {
3482 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3483 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3484 ret
= -TARGET_EFAULT
;
3490 /* do_socketpair() Must return target values and target errnos. */
3491 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3492 abi_ulong target_tab_addr
)
3497 target_to_host_sock_type(&type
);
3499 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3500 if (!is_error(ret
)) {
3501 if (put_user_s32(tab
[0], target_tab_addr
)
3502 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3503 ret
= -TARGET_EFAULT
;
3508 /* do_sendto() Must return target values and target errnos. */
3509 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3510 abi_ulong target_addr
, socklen_t addrlen
)
3514 void *copy_msg
= NULL
;
3517 if ((int)addrlen
< 0) {
3518 return -TARGET_EINVAL
;
3521 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3523 return -TARGET_EFAULT
;
3524 if (fd_trans_target_to_host_data(fd
)) {
3525 copy_msg
= host_msg
;
3526 host_msg
= g_malloc(len
);
3527 memcpy(host_msg
, copy_msg
, len
);
3528 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3534 addr
= alloca(addrlen
+1);
3535 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3539 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3541 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3546 host_msg
= copy_msg
;
3548 unlock_user(host_msg
, msg
, 0);
3552 /* do_recvfrom() Must return target values and target errnos. */
3553 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3554 abi_ulong target_addr
,
3555 abi_ulong target_addrlen
)
3557 socklen_t addrlen
, ret_addrlen
;
3565 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3567 return -TARGET_EFAULT
;
3571 if (get_user_u32(addrlen
, target_addrlen
)) {
3572 ret
= -TARGET_EFAULT
;
3575 if ((int)addrlen
< 0) {
3576 ret
= -TARGET_EINVAL
;
3579 addr
= alloca(addrlen
);
3580 ret_addrlen
= addrlen
;
3581 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3582 addr
, &ret_addrlen
));
3584 addr
= NULL
; /* To keep compiler quiet. */
3585 addrlen
= 0; /* To keep compiler quiet. */
3586 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3588 if (!is_error(ret
)) {
3589 if (fd_trans_host_to_target_data(fd
)) {
3591 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3592 if (is_error(trans
)) {
3598 host_to_target_sockaddr(target_addr
, addr
,
3599 MIN(addrlen
, ret_addrlen
));
3600 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3601 ret
= -TARGET_EFAULT
;
3605 unlock_user(host_msg
, msg
, len
);
3608 unlock_user(host_msg
, msg
, 0);
3613 #ifdef TARGET_NR_socketcall
3614 /* do_socketcall() must return target values and target errnos. */
3615 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3617 static const unsigned nargs
[] = { /* number of arguments per operation */
3618 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3619 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3620 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3621 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3622 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3623 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3624 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3625 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3626 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3627 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3628 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3629 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3630 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3631 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3632 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3633 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3634 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3635 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3636 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3637 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3639 abi_long a
[6]; /* max 6 args */
3642 /* check the range of the first argument num */
3643 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3644 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3645 return -TARGET_EINVAL
;
3647 /* ensure we have space for args */
3648 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3649 return -TARGET_EINVAL
;
3651 /* collect the arguments in a[] according to nargs[] */
3652 for (i
= 0; i
< nargs
[num
]; ++i
) {
3653 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3654 return -TARGET_EFAULT
;
3657 /* now when we have the args, invoke the appropriate underlying function */
3659 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3660 return do_socket(a
[0], a
[1], a
[2]);
3661 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3662 return do_bind(a
[0], a
[1], a
[2]);
3663 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3664 return do_connect(a
[0], a
[1], a
[2]);
3665 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3666 return get_errno(listen(a
[0], a
[1]));
3667 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3668 return do_accept4(a
[0], a
[1], a
[2], 0);
3669 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3670 return do_getsockname(a
[0], a
[1], a
[2]);
3671 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3672 return do_getpeername(a
[0], a
[1], a
[2]);
3673 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3674 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3675 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3676 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3677 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3678 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3679 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3680 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3681 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3682 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3683 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3684 return get_errno(shutdown(a
[0], a
[1]));
3685 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3686 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3687 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3688 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3689 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3690 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3691 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3692 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3693 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3694 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3695 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3696 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3697 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3698 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3700 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3701 return -TARGET_EINVAL
;
3706 #define N_SHM_REGIONS 32
3708 static struct shm_region
{
3712 } shm_regions
[N_SHM_REGIONS
];
3714 #ifndef TARGET_SEMID64_DS
3715 /* asm-generic version of this struct */
3716 struct target_semid64_ds
3718 struct target_ipc_perm sem_perm
;
3719 abi_ulong sem_otime
;
3720 #if TARGET_ABI_BITS == 32
3721 abi_ulong __unused1
;
3723 abi_ulong sem_ctime
;
3724 #if TARGET_ABI_BITS == 32
3725 abi_ulong __unused2
;
3727 abi_ulong sem_nsems
;
3728 abi_ulong __unused3
;
3729 abi_ulong __unused4
;
3733 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3734 abi_ulong target_addr
)
3736 struct target_ipc_perm
*target_ip
;
3737 struct target_semid64_ds
*target_sd
;
3739 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3740 return -TARGET_EFAULT
;
3741 target_ip
= &(target_sd
->sem_perm
);
3742 host_ip
->__key
= tswap32(target_ip
->__key
);
3743 host_ip
->uid
= tswap32(target_ip
->uid
);
3744 host_ip
->gid
= tswap32(target_ip
->gid
);
3745 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3746 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3747 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3748 host_ip
->mode
= tswap32(target_ip
->mode
);
3750 host_ip
->mode
= tswap16(target_ip
->mode
);
3752 #if defined(TARGET_PPC)
3753 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3755 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3757 unlock_user_struct(target_sd
, target_addr
, 0);
3761 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3762 struct ipc_perm
*host_ip
)
3764 struct target_ipc_perm
*target_ip
;
3765 struct target_semid64_ds
*target_sd
;
3767 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3768 return -TARGET_EFAULT
;
3769 target_ip
= &(target_sd
->sem_perm
);
3770 target_ip
->__key
= tswap32(host_ip
->__key
);
3771 target_ip
->uid
= tswap32(host_ip
->uid
);
3772 target_ip
->gid
= tswap32(host_ip
->gid
);
3773 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3774 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3775 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3776 target_ip
->mode
= tswap32(host_ip
->mode
);
3778 target_ip
->mode
= tswap16(host_ip
->mode
);
3780 #if defined(TARGET_PPC)
3781 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3783 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3785 unlock_user_struct(target_sd
, target_addr
, 1);
3789 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3790 abi_ulong target_addr
)
3792 struct target_semid64_ds
*target_sd
;
3794 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3795 return -TARGET_EFAULT
;
3796 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3797 return -TARGET_EFAULT
;
3798 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3799 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3800 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3801 unlock_user_struct(target_sd
, target_addr
, 0);
3805 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3806 struct semid_ds
*host_sd
)
3808 struct target_semid64_ds
*target_sd
;
3810 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3811 return -TARGET_EFAULT
;
3812 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3813 return -TARGET_EFAULT
;
3814 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3815 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3816 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3817 unlock_user_struct(target_sd
, target_addr
, 1);
3821 struct target_seminfo
{
3834 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3835 struct seminfo
*host_seminfo
)
3837 struct target_seminfo
*target_seminfo
;
3838 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3839 return -TARGET_EFAULT
;
3840 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3841 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3842 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3843 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3844 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3845 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3846 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3847 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3848 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3849 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3850 unlock_user_struct(target_seminfo
, target_addr
, 1);
3856 struct semid_ds
*buf
;
3857 unsigned short *array
;
3858 struct seminfo
*__buf
;
3861 union target_semun
{
3868 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3869 abi_ulong target_addr
)
3872 unsigned short *array
;
3874 struct semid_ds semid_ds
;
3877 semun
.buf
= &semid_ds
;
3879 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3881 return get_errno(ret
);
3883 nsems
= semid_ds
.sem_nsems
;
3885 *host_array
= g_try_new(unsigned short, nsems
);
3887 return -TARGET_ENOMEM
;
3889 array
= lock_user(VERIFY_READ
, target_addr
,
3890 nsems
*sizeof(unsigned short), 1);
3892 g_free(*host_array
);
3893 return -TARGET_EFAULT
;
3896 for(i
=0; i
<nsems
; i
++) {
3897 __get_user((*host_array
)[i
], &array
[i
]);
3899 unlock_user(array
, target_addr
, 0);
3904 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3905 unsigned short **host_array
)
3908 unsigned short *array
;
3910 struct semid_ds semid_ds
;
3913 semun
.buf
= &semid_ds
;
3915 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3917 return get_errno(ret
);
3919 nsems
= semid_ds
.sem_nsems
;
3921 array
= lock_user(VERIFY_WRITE
, target_addr
,
3922 nsems
*sizeof(unsigned short), 0);
3924 return -TARGET_EFAULT
;
3926 for(i
=0; i
<nsems
; i
++) {
3927 __put_user((*host_array
)[i
], &array
[i
]);
3929 g_free(*host_array
);
3930 unlock_user(array
, target_addr
, 1);
3935 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3936 abi_ulong target_arg
)
3938 union target_semun target_su
= { .buf
= target_arg
};
3940 struct semid_ds dsarg
;
3941 unsigned short *array
= NULL
;
3942 struct seminfo seminfo
;
3943 abi_long ret
= -TARGET_EINVAL
;
3950 /* In 64 bit cross-endian situations, we will erroneously pick up
3951 * the wrong half of the union for the "val" element. To rectify
3952 * this, the entire 8-byte structure is byteswapped, followed by
3953 * a swap of the 4 byte val field. In other cases, the data is
3954 * already in proper host byte order. */
3955 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3956 target_su
.buf
= tswapal(target_su
.buf
);
3957 arg
.val
= tswap32(target_su
.val
);
3959 arg
.val
= target_su
.val
;
3961 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3965 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3969 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3970 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3977 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3981 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3982 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3988 arg
.__buf
= &seminfo
;
3989 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3990 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3998 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4005 struct target_sembuf
{
4006 unsigned short sem_num
;
4011 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4012 abi_ulong target_addr
,
4015 struct target_sembuf
*target_sembuf
;
4018 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4019 nsops
*sizeof(struct target_sembuf
), 1);
4021 return -TARGET_EFAULT
;
4023 for(i
=0; i
<nsops
; i
++) {
4024 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4025 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4026 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4029 unlock_user(target_sembuf
, target_addr
, 0);
4034 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4035 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4038 * This macro is required to handle the s390 variants, which passes the
4039 * arguments in a different order than default.
4042 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4043 (__nsops), (__timeout), (__sops)
4045 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4046 (__nsops), 0, (__sops), (__timeout)
4049 static inline abi_long
do_semtimedop(int semid
,
4052 abi_long timeout
, bool time64
)
4054 struct sembuf
*sops
;
4055 struct timespec ts
, *pts
= NULL
;
4061 if (target_to_host_timespec64(pts
, timeout
)) {
4062 return -TARGET_EFAULT
;
4065 if (target_to_host_timespec(pts
, timeout
)) {
4066 return -TARGET_EFAULT
;
4071 if (nsops
> TARGET_SEMOPM
) {
4072 return -TARGET_E2BIG
;
4075 sops
= g_new(struct sembuf
, nsops
);
4077 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4079 return -TARGET_EFAULT
;
4082 ret
= -TARGET_ENOSYS
;
4083 #ifdef __NR_semtimedop
4084 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4087 if (ret
== -TARGET_ENOSYS
) {
4088 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4089 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4097 struct target_msqid_ds
4099 struct target_ipc_perm msg_perm
;
4100 abi_ulong msg_stime
;
4101 #if TARGET_ABI_BITS == 32
4102 abi_ulong __unused1
;
4104 abi_ulong msg_rtime
;
4105 #if TARGET_ABI_BITS == 32
4106 abi_ulong __unused2
;
4108 abi_ulong msg_ctime
;
4109 #if TARGET_ABI_BITS == 32
4110 abi_ulong __unused3
;
4112 abi_ulong __msg_cbytes
;
4114 abi_ulong msg_qbytes
;
4115 abi_ulong msg_lspid
;
4116 abi_ulong msg_lrpid
;
4117 abi_ulong __unused4
;
4118 abi_ulong __unused5
;
4121 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4122 abi_ulong target_addr
)
4124 struct target_msqid_ds
*target_md
;
4126 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4127 return -TARGET_EFAULT
;
4128 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4129 return -TARGET_EFAULT
;
4130 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4131 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4132 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4133 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4134 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4135 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4136 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4137 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4138 unlock_user_struct(target_md
, target_addr
, 0);
4142 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4143 struct msqid_ds
*host_md
)
4145 struct target_msqid_ds
*target_md
;
4147 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4148 return -TARGET_EFAULT
;
4149 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4150 return -TARGET_EFAULT
;
4151 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4152 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4153 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4154 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4155 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4156 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4157 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4158 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4159 unlock_user_struct(target_md
, target_addr
, 1);
4163 struct target_msginfo
{
4171 unsigned short int msgseg
;
4174 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4175 struct msginfo
*host_msginfo
)
4177 struct target_msginfo
*target_msginfo
;
4178 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4179 return -TARGET_EFAULT
;
4180 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4181 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4182 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4183 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4184 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4185 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4186 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4187 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4188 unlock_user_struct(target_msginfo
, target_addr
, 1);
4192 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4194 struct msqid_ds dsarg
;
4195 struct msginfo msginfo
;
4196 abi_long ret
= -TARGET_EINVAL
;
4204 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4205 return -TARGET_EFAULT
;
4206 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4207 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4208 return -TARGET_EFAULT
;
4211 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4215 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4216 if (host_to_target_msginfo(ptr
, &msginfo
))
4217 return -TARGET_EFAULT
;
4224 struct target_msgbuf
{
4229 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4230 ssize_t msgsz
, int msgflg
)
4232 struct target_msgbuf
*target_mb
;
4233 struct msgbuf
*host_mb
;
4237 return -TARGET_EINVAL
;
4240 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4241 return -TARGET_EFAULT
;
4242 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4244 unlock_user_struct(target_mb
, msgp
, 0);
4245 return -TARGET_ENOMEM
;
4247 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4248 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4249 ret
= -TARGET_ENOSYS
;
4251 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4254 if (ret
== -TARGET_ENOSYS
) {
4256 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4259 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4265 unlock_user_struct(target_mb
, msgp
, 0);
4271 #if defined(__sparc__)
4272 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4273 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4274 #elif defined(__s390x__)
4275 /* The s390 sys_ipc variant has only five parameters. */
4276 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4277 ((long int[]){(long int)__msgp, __msgtyp})
4279 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4280 ((long int[]){(long int)__msgp, __msgtyp}), 0
4284 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4285 ssize_t msgsz
, abi_long msgtyp
,
4288 struct target_msgbuf
*target_mb
;
4290 struct msgbuf
*host_mb
;
4294 return -TARGET_EINVAL
;
4297 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4298 return -TARGET_EFAULT
;
4300 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4302 ret
= -TARGET_ENOMEM
;
4305 ret
= -TARGET_ENOSYS
;
4307 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4310 if (ret
== -TARGET_ENOSYS
) {
4311 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4312 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4317 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4318 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4319 if (!target_mtext
) {
4320 ret
= -TARGET_EFAULT
;
4323 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4324 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4327 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4331 unlock_user_struct(target_mb
, msgp
, 1);
4336 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4337 abi_ulong target_addr
)
4339 struct target_shmid_ds
*target_sd
;
4341 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4342 return -TARGET_EFAULT
;
4343 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4344 return -TARGET_EFAULT
;
4345 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4346 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4347 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4348 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4349 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4350 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4351 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4352 unlock_user_struct(target_sd
, target_addr
, 0);
4356 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4357 struct shmid_ds
*host_sd
)
4359 struct target_shmid_ds
*target_sd
;
4361 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4362 return -TARGET_EFAULT
;
4363 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4364 return -TARGET_EFAULT
;
4365 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4366 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4367 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4368 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4369 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4370 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4371 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4372 unlock_user_struct(target_sd
, target_addr
, 1);
4376 struct target_shminfo
{
4384 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4385 struct shminfo
*host_shminfo
)
4387 struct target_shminfo
*target_shminfo
;
4388 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4389 return -TARGET_EFAULT
;
4390 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4391 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4392 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4393 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4394 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4395 unlock_user_struct(target_shminfo
, target_addr
, 1);
4399 struct target_shm_info
{
4404 abi_ulong swap_attempts
;
4405 abi_ulong swap_successes
;
4408 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4409 struct shm_info
*host_shm_info
)
4411 struct target_shm_info
*target_shm_info
;
4412 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4413 return -TARGET_EFAULT
;
4414 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4415 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4416 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4417 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4418 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4419 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4420 unlock_user_struct(target_shm_info
, target_addr
, 1);
4424 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4426 struct shmid_ds dsarg
;
4427 struct shminfo shminfo
;
4428 struct shm_info shm_info
;
4429 abi_long ret
= -TARGET_EINVAL
;
4437 if (target_to_host_shmid_ds(&dsarg
, buf
))
4438 return -TARGET_EFAULT
;
4439 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4440 if (host_to_target_shmid_ds(buf
, &dsarg
))
4441 return -TARGET_EFAULT
;
4444 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4445 if (host_to_target_shminfo(buf
, &shminfo
))
4446 return -TARGET_EFAULT
;
4449 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4450 if (host_to_target_shm_info(buf
, &shm_info
))
4451 return -TARGET_EFAULT
;
4456 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4463 #ifndef TARGET_FORCE_SHMLBA
4464 /* For most architectures, SHMLBA is the same as the page size;
4465 * some architectures have larger values, in which case they should
4466 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4467 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4468 * and defining its own value for SHMLBA.
4470 * The kernel also permits SHMLBA to be set by the architecture to a
4471 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4472 * this means that addresses are rounded to the large size if
4473 * SHM_RND is set but addresses not aligned to that size are not rejected
4474 * as long as they are at least page-aligned. Since the only architecture
4475 * which uses this is ia64 this code doesn't provide for that oddity.
4477 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4479 return TARGET_PAGE_SIZE
;
4483 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4484 int shmid
, abi_ulong shmaddr
, int shmflg
)
4486 CPUState
*cpu
= env_cpu(cpu_env
);
4489 struct shmid_ds shm_info
;
4493 /* shmat pointers are always untagged */
4495 /* find out the length of the shared memory segment */
4496 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4497 if (is_error(ret
)) {
4498 /* can't get length, bail out */
4502 shmlba
= target_shmlba(cpu_env
);
4504 if (shmaddr
& (shmlba
- 1)) {
4505 if (shmflg
& SHM_RND
) {
4506 shmaddr
&= ~(shmlba
- 1);
4508 return -TARGET_EINVAL
;
4511 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4512 return -TARGET_EINVAL
;
4518 * We're mapping shared memory, so ensure we generate code for parallel
4519 * execution and flush old translations. This will work up to the level
4520 * supported by the host -- anything that requires EXCP_ATOMIC will not
4521 * be atomic with respect to an external process.
4523 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4524 cpu
->tcg_cflags
|= CF_PARALLEL
;
4529 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4531 abi_ulong mmap_start
;
4533 /* In order to use the host shmat, we need to honor host SHMLBA. */
4534 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4536 if (mmap_start
== -1) {
4538 host_raddr
= (void *)-1;
4540 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4541 shmflg
| SHM_REMAP
);
4544 if (host_raddr
== (void *)-1) {
4546 return get_errno((long)host_raddr
);
4548 raddr
=h2g((unsigned long)host_raddr
);
4550 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4551 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4552 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4554 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4555 if (!shm_regions
[i
].in_use
) {
4556 shm_regions
[i
].in_use
= true;
4557 shm_regions
[i
].start
= raddr
;
4558 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4568 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4573 /* shmdt pointers are always untagged */
4577 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4578 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4579 shm_regions
[i
].in_use
= false;
4580 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4584 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4591 #ifdef TARGET_NR_ipc
4592 /* ??? This only works with linear mappings. */
4593 /* do_ipc() must return target values and target errnos. */
4594 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4595 unsigned int call
, abi_long first
,
4596 abi_long second
, abi_long third
,
4597 abi_long ptr
, abi_long fifth
)
4602 version
= call
>> 16;
4607 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4609 case IPCOP_semtimedop
:
4611 * The s390 sys_ipc variant has only five parameters instead of six
4612 * (as for default variant) and the only difference is the handling of
4613 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4614 * to a struct timespec where the generic variant uses fifth parameter.
4616 #if defined(TARGET_S390X)
4617 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4619 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4624 ret
= get_errno(semget(first
, second
, third
));
4627 case IPCOP_semctl
: {
4628 /* The semun argument to semctl is passed by value, so dereference the
4631 get_user_ual(atptr
, ptr
);
4632 ret
= do_semctl(first
, second
, third
, atptr
);
4637 ret
= get_errno(msgget(first
, second
));
4641 ret
= do_msgsnd(first
, ptr
, second
, third
);
4645 ret
= do_msgctl(first
, second
, ptr
);
4652 struct target_ipc_kludge
{
4657 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4658 ret
= -TARGET_EFAULT
;
4662 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4664 unlock_user_struct(tmp
, ptr
, 0);
4668 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4677 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4678 if (is_error(raddr
))
4679 return get_errno(raddr
);
4680 if (put_user_ual(raddr
, third
))
4681 return -TARGET_EFAULT
;
4685 ret
= -TARGET_EINVAL
;
4690 ret
= do_shmdt(ptr
);
4694 /* IPC_* flag values are the same on all linux platforms */
4695 ret
= get_errno(shmget(first
, second
, third
));
4698 /* IPC_* and SHM_* command values are the same on all linux platforms */
4700 ret
= do_shmctl(first
, second
, ptr
);
4703 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4705 ret
= -TARGET_ENOSYS
;
4712 /* kernel structure types definitions */
4714 #define STRUCT(name, ...) STRUCT_ ## name,
4715 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4717 #include "syscall_types.h"
4721 #undef STRUCT_SPECIAL
4723 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4724 #define STRUCT_SPECIAL(name)
4725 #include "syscall_types.h"
4727 #undef STRUCT_SPECIAL
4729 #define MAX_STRUCT_SIZE 4096
4731 #ifdef CONFIG_FIEMAP
4732 /* So fiemap access checks don't overflow on 32 bit systems.
4733 * This is very slightly smaller than the limit imposed by
4734 * the underlying kernel.
4736 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4737 / sizeof(struct fiemap_extent))
4739 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4740 int fd
, int cmd
, abi_long arg
)
4742 /* The parameter for this ioctl is a struct fiemap followed
4743 * by an array of struct fiemap_extent whose size is set
4744 * in fiemap->fm_extent_count. The array is filled in by the
4747 int target_size_in
, target_size_out
;
4749 const argtype
*arg_type
= ie
->arg_type
;
4750 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4753 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4757 assert(arg_type
[0] == TYPE_PTR
);
4758 assert(ie
->access
== IOC_RW
);
4760 target_size_in
= thunk_type_size(arg_type
, 0);
4761 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4763 return -TARGET_EFAULT
;
4765 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4766 unlock_user(argptr
, arg
, 0);
4767 fm
= (struct fiemap
*)buf_temp
;
4768 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4769 return -TARGET_EINVAL
;
4772 outbufsz
= sizeof (*fm
) +
4773 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4775 if (outbufsz
> MAX_STRUCT_SIZE
) {
4776 /* We can't fit all the extents into the fixed size buffer.
4777 * Allocate one that is large enough and use it instead.
4779 fm
= g_try_malloc(outbufsz
);
4781 return -TARGET_ENOMEM
;
4783 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4786 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4787 if (!is_error(ret
)) {
4788 target_size_out
= target_size_in
;
4789 /* An extent_count of 0 means we were only counting the extents
4790 * so there are no structs to copy
4792 if (fm
->fm_extent_count
!= 0) {
4793 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4795 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4797 ret
= -TARGET_EFAULT
;
4799 /* Convert the struct fiemap */
4800 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4801 if (fm
->fm_extent_count
!= 0) {
4802 p
= argptr
+ target_size_in
;
4803 /* ...and then all the struct fiemap_extents */
4804 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4805 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4810 unlock_user(argptr
, arg
, target_size_out
);
4820 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4821 int fd
, int cmd
, abi_long arg
)
4823 const argtype
*arg_type
= ie
->arg_type
;
4827 struct ifconf
*host_ifconf
;
4829 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4830 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4831 int target_ifreq_size
;
4836 abi_long target_ifc_buf
;
4840 assert(arg_type
[0] == TYPE_PTR
);
4841 assert(ie
->access
== IOC_RW
);
4844 target_size
= thunk_type_size(arg_type
, 0);
4846 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4848 return -TARGET_EFAULT
;
4849 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4850 unlock_user(argptr
, arg
, 0);
4852 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4853 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4854 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4856 if (target_ifc_buf
!= 0) {
4857 target_ifc_len
= host_ifconf
->ifc_len
;
4858 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4859 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4861 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4862 if (outbufsz
> MAX_STRUCT_SIZE
) {
4864 * We can't fit all the extents into the fixed size buffer.
4865 * Allocate one that is large enough and use it instead.
4867 host_ifconf
= malloc(outbufsz
);
4869 return -TARGET_ENOMEM
;
4871 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4874 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4876 host_ifconf
->ifc_len
= host_ifc_len
;
4878 host_ifc_buf
= NULL
;
4880 host_ifconf
->ifc_buf
= host_ifc_buf
;
4882 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4883 if (!is_error(ret
)) {
4884 /* convert host ifc_len to target ifc_len */
4886 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4887 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4888 host_ifconf
->ifc_len
= target_ifc_len
;
4890 /* restore target ifc_buf */
4892 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4894 /* copy struct ifconf to target user */
4896 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4898 return -TARGET_EFAULT
;
4899 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4900 unlock_user(argptr
, arg
, target_size
);
4902 if (target_ifc_buf
!= 0) {
4903 /* copy ifreq[] to target user */
4904 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4905 for (i
= 0; i
< nb_ifreq
; i
++) {
4906 thunk_convert(argptr
+ i
* target_ifreq_size
,
4907 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4908 ifreq_arg_type
, THUNK_TARGET
);
4910 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4921 #if defined(CONFIG_USBFS)
4922 #if HOST_LONG_BITS > 64
4923 #error USBDEVFS thunks do not support >64 bit hosts yet.
4926 uint64_t target_urb_adr
;
4927 uint64_t target_buf_adr
;
4928 char *target_buf_ptr
;
4929 struct usbdevfs_urb host_urb
;
4932 static GHashTable
*usbdevfs_urb_hashtable(void)
4934 static GHashTable
*urb_hashtable
;
4936 if (!urb_hashtable
) {
4937 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4939 return urb_hashtable
;
4942 static void urb_hashtable_insert(struct live_urb
*urb
)
4944 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4945 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4948 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4950 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4951 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4954 static void urb_hashtable_remove(struct live_urb
*urb
)
4956 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4957 g_hash_table_remove(urb_hashtable
, urb
);
4961 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4962 int fd
, int cmd
, abi_long arg
)
4964 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4965 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4966 struct live_urb
*lurb
;
4970 uintptr_t target_urb_adr
;
4973 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4975 memset(buf_temp
, 0, sizeof(uint64_t));
4976 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4977 if (is_error(ret
)) {
4981 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4982 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4983 if (!lurb
->target_urb_adr
) {
4984 return -TARGET_EFAULT
;
4986 urb_hashtable_remove(lurb
);
4987 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4988 lurb
->host_urb
.buffer_length
);
4989 lurb
->target_buf_ptr
= NULL
;
4991 /* restore the guest buffer pointer */
4992 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4994 /* update the guest urb struct */
4995 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4998 return -TARGET_EFAULT
;
5000 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5001 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5003 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5004 /* write back the urb handle */
5005 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5008 return -TARGET_EFAULT
;
5011 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5012 target_urb_adr
= lurb
->target_urb_adr
;
5013 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5014 unlock_user(argptr
, arg
, target_size
);
5021 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5022 uint8_t *buf_temp
__attribute__((unused
)),
5023 int fd
, int cmd
, abi_long arg
)
5025 struct live_urb
*lurb
;
5027 /* map target address back to host URB with metadata. */
5028 lurb
= urb_hashtable_lookup(arg
);
5030 return -TARGET_EFAULT
;
5032 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5036 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5037 int fd
, int cmd
, abi_long arg
)
5039 const argtype
*arg_type
= ie
->arg_type
;
5044 struct live_urb
*lurb
;
5047 * each submitted URB needs to map to a unique ID for the
5048 * kernel, and that unique ID needs to be a pointer to
5049 * host memory. hence, we need to malloc for each URB.
5050 * isochronous transfers have a variable length struct.
5053 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5055 /* construct host copy of urb and metadata */
5056 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5058 return -TARGET_ENOMEM
;
5061 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5064 return -TARGET_EFAULT
;
5066 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5067 unlock_user(argptr
, arg
, 0);
5069 lurb
->target_urb_adr
= arg
;
5070 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5072 /* buffer space used depends on endpoint type so lock the entire buffer */
5073 /* control type urbs should check the buffer contents for true direction */
5074 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5075 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5076 lurb
->host_urb
.buffer_length
, 1);
5077 if (lurb
->target_buf_ptr
== NULL
) {
5079 return -TARGET_EFAULT
;
5082 /* update buffer pointer in host copy */
5083 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5085 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5086 if (is_error(ret
)) {
5087 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5090 urb_hashtable_insert(lurb
);
5095 #endif /* CONFIG_USBFS */
5097 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5098 int cmd
, abi_long arg
)
5101 struct dm_ioctl
*host_dm
;
5102 abi_long guest_data
;
5103 uint32_t guest_data_size
;
5105 const argtype
*arg_type
= ie
->arg_type
;
5107 void *big_buf
= NULL
;
5111 target_size
= thunk_type_size(arg_type
, 0);
5112 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5114 ret
= -TARGET_EFAULT
;
5117 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5118 unlock_user(argptr
, arg
, 0);
5120 /* buf_temp is too small, so fetch things into a bigger buffer */
5121 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5122 memcpy(big_buf
, buf_temp
, target_size
);
5126 guest_data
= arg
+ host_dm
->data_start
;
5127 if ((guest_data
- arg
) < 0) {
5128 ret
= -TARGET_EINVAL
;
5131 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5132 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5134 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5136 ret
= -TARGET_EFAULT
;
5140 switch (ie
->host_cmd
) {
5142 case DM_LIST_DEVICES
:
5145 case DM_DEV_SUSPEND
:
5148 case DM_TABLE_STATUS
:
5149 case DM_TABLE_CLEAR
:
5151 case DM_LIST_VERSIONS
:
5155 case DM_DEV_SET_GEOMETRY
:
5156 /* data contains only strings */
5157 memcpy(host_data
, argptr
, guest_data_size
);
5160 memcpy(host_data
, argptr
, guest_data_size
);
5161 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5165 void *gspec
= argptr
;
5166 void *cur_data
= host_data
;
5167 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5168 int spec_size
= thunk_type_size(arg_type
, 0);
5171 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5172 struct dm_target_spec
*spec
= cur_data
;
5176 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5177 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5179 spec
->next
= sizeof(*spec
) + slen
;
5180 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5182 cur_data
+= spec
->next
;
5187 ret
= -TARGET_EINVAL
;
5188 unlock_user(argptr
, guest_data
, 0);
5191 unlock_user(argptr
, guest_data
, 0);
5193 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5194 if (!is_error(ret
)) {
5195 guest_data
= arg
+ host_dm
->data_start
;
5196 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5197 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5198 switch (ie
->host_cmd
) {
5203 case DM_DEV_SUSPEND
:
5206 case DM_TABLE_CLEAR
:
5208 case DM_DEV_SET_GEOMETRY
:
5209 /* no return data */
5211 case DM_LIST_DEVICES
:
5213 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5214 uint32_t remaining_data
= guest_data_size
;
5215 void *cur_data
= argptr
;
5216 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5217 int nl_size
= 12; /* can't use thunk_size due to alignment */
5220 uint32_t next
= nl
->next
;
5222 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5224 if (remaining_data
< nl
->next
) {
5225 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5228 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5229 strcpy(cur_data
+ nl_size
, nl
->name
);
5230 cur_data
+= nl
->next
;
5231 remaining_data
-= nl
->next
;
5235 nl
= (void*)nl
+ next
;
5240 case DM_TABLE_STATUS
:
5242 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5243 void *cur_data
= argptr
;
5244 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5245 int spec_size
= thunk_type_size(arg_type
, 0);
5248 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5249 uint32_t next
= spec
->next
;
5250 int slen
= strlen((char*)&spec
[1]) + 1;
5251 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5252 if (guest_data_size
< spec
->next
) {
5253 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5256 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5257 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5258 cur_data
= argptr
+ spec
->next
;
5259 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5265 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5266 int count
= *(uint32_t*)hdata
;
5267 uint64_t *hdev
= hdata
+ 8;
5268 uint64_t *gdev
= argptr
+ 8;
5271 *(uint32_t*)argptr
= tswap32(count
);
5272 for (i
= 0; i
< count
; i
++) {
5273 *gdev
= tswap64(*hdev
);
5279 case DM_LIST_VERSIONS
:
5281 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5282 uint32_t remaining_data
= guest_data_size
;
5283 void *cur_data
= argptr
;
5284 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5285 int vers_size
= thunk_type_size(arg_type
, 0);
5288 uint32_t next
= vers
->next
;
5290 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5292 if (remaining_data
< vers
->next
) {
5293 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5296 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5297 strcpy(cur_data
+ vers_size
, vers
->name
);
5298 cur_data
+= vers
->next
;
5299 remaining_data
-= vers
->next
;
5303 vers
= (void*)vers
+ next
;
5308 unlock_user(argptr
, guest_data
, 0);
5309 ret
= -TARGET_EINVAL
;
5312 unlock_user(argptr
, guest_data
, guest_data_size
);
5314 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5316 ret
= -TARGET_EFAULT
;
5319 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5320 unlock_user(argptr
, arg
, target_size
);
5327 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5328 int cmd
, abi_long arg
)
5332 const argtype
*arg_type
= ie
->arg_type
;
5333 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5336 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5337 struct blkpg_partition host_part
;
5339 /* Read and convert blkpg */
5341 target_size
= thunk_type_size(arg_type
, 0);
5342 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5344 ret
= -TARGET_EFAULT
;
5347 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5348 unlock_user(argptr
, arg
, 0);
5350 switch (host_blkpg
->op
) {
5351 case BLKPG_ADD_PARTITION
:
5352 case BLKPG_DEL_PARTITION
:
5353 /* payload is struct blkpg_partition */
5356 /* Unknown opcode */
5357 ret
= -TARGET_EINVAL
;
5361 /* Read and convert blkpg->data */
5362 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5363 target_size
= thunk_type_size(part_arg_type
, 0);
5364 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5366 ret
= -TARGET_EFAULT
;
5369 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5370 unlock_user(argptr
, arg
, 0);
5372 /* Swizzle the data pointer to our local copy and call! */
5373 host_blkpg
->data
= &host_part
;
5374 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5380 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5381 int fd
, int cmd
, abi_long arg
)
5383 const argtype
*arg_type
= ie
->arg_type
;
5384 const StructEntry
*se
;
5385 const argtype
*field_types
;
5386 const int *dst_offsets
, *src_offsets
;
5389 abi_ulong
*target_rt_dev_ptr
= NULL
;
5390 unsigned long *host_rt_dev_ptr
= NULL
;
5394 assert(ie
->access
== IOC_W
);
5395 assert(*arg_type
== TYPE_PTR
);
5397 assert(*arg_type
== TYPE_STRUCT
);
5398 target_size
= thunk_type_size(arg_type
, 0);
5399 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5401 return -TARGET_EFAULT
;
5404 assert(*arg_type
== (int)STRUCT_rtentry
);
5405 se
= struct_entries
+ *arg_type
++;
5406 assert(se
->convert
[0] == NULL
);
5407 /* convert struct here to be able to catch rt_dev string */
5408 field_types
= se
->field_types
;
5409 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5410 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5411 for (i
= 0; i
< se
->nb_fields
; i
++) {
5412 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5413 assert(*field_types
== TYPE_PTRVOID
);
5414 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5415 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5416 if (*target_rt_dev_ptr
!= 0) {
5417 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5418 tswapal(*target_rt_dev_ptr
));
5419 if (!*host_rt_dev_ptr
) {
5420 unlock_user(argptr
, arg
, 0);
5421 return -TARGET_EFAULT
;
5424 *host_rt_dev_ptr
= 0;
5429 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5430 argptr
+ src_offsets
[i
],
5431 field_types
, THUNK_HOST
);
5433 unlock_user(argptr
, arg
, 0);
5435 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5437 assert(host_rt_dev_ptr
!= NULL
);
5438 assert(target_rt_dev_ptr
!= NULL
);
5439 if (*host_rt_dev_ptr
!= 0) {
5440 unlock_user((void *)*host_rt_dev_ptr
,
5441 *target_rt_dev_ptr
, 0);
5446 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5447 int fd
, int cmd
, abi_long arg
)
5449 int sig
= target_to_host_signal(arg
);
5450 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5453 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5454 int fd
, int cmd
, abi_long arg
)
5459 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5460 if (is_error(ret
)) {
5464 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5465 if (copy_to_user_timeval(arg
, &tv
)) {
5466 return -TARGET_EFAULT
;
5469 if (copy_to_user_timeval64(arg
, &tv
)) {
5470 return -TARGET_EFAULT
;
5477 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5478 int fd
, int cmd
, abi_long arg
)
5483 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5484 if (is_error(ret
)) {
5488 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5489 if (host_to_target_timespec(arg
, &ts
)) {
5490 return -TARGET_EFAULT
;
5493 if (host_to_target_timespec64(arg
, &ts
)) {
5494 return -TARGET_EFAULT
;
5502 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5503 int fd
, int cmd
, abi_long arg
)
5505 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5506 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5512 static void unlock_drm_version(struct drm_version
*host_ver
,
5513 struct target_drm_version
*target_ver
,
5516 unlock_user(host_ver
->name
, target_ver
->name
,
5517 copy
? host_ver
->name_len
: 0);
5518 unlock_user(host_ver
->date
, target_ver
->date
,
5519 copy
? host_ver
->date_len
: 0);
5520 unlock_user(host_ver
->desc
, target_ver
->desc
,
5521 copy
? host_ver
->desc_len
: 0);
5524 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5525 struct target_drm_version
*target_ver
)
5527 memset(host_ver
, 0, sizeof(*host_ver
));
5529 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5530 if (host_ver
->name_len
) {
5531 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5532 target_ver
->name_len
, 0);
5533 if (!host_ver
->name
) {
5538 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5539 if (host_ver
->date_len
) {
5540 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5541 target_ver
->date_len
, 0);
5542 if (!host_ver
->date
) {
5547 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5548 if (host_ver
->desc_len
) {
5549 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5550 target_ver
->desc_len
, 0);
5551 if (!host_ver
->desc
) {
5558 unlock_drm_version(host_ver
, target_ver
, false);
5562 static inline void host_to_target_drmversion(
5563 struct target_drm_version
*target_ver
,
5564 struct drm_version
*host_ver
)
5566 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5567 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5568 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5569 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5570 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5571 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5572 unlock_drm_version(host_ver
, target_ver
, true);
5575 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5576 int fd
, int cmd
, abi_long arg
)
5578 struct drm_version
*ver
;
5579 struct target_drm_version
*target_ver
;
5582 switch (ie
->host_cmd
) {
5583 case DRM_IOCTL_VERSION
:
5584 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5585 return -TARGET_EFAULT
;
5587 ver
= (struct drm_version
*)buf_temp
;
5588 ret
= target_to_host_drmversion(ver
, target_ver
);
5589 if (!is_error(ret
)) {
5590 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5591 if (is_error(ret
)) {
5592 unlock_drm_version(ver
, target_ver
, false);
5594 host_to_target_drmversion(target_ver
, ver
);
5597 unlock_user_struct(target_ver
, arg
, 0);
5600 return -TARGET_ENOSYS
;
5603 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5604 struct drm_i915_getparam
*gparam
,
5605 int fd
, abi_long arg
)
5609 struct target_drm_i915_getparam
*target_gparam
;
5611 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5612 return -TARGET_EFAULT
;
5615 __get_user(gparam
->param
, &target_gparam
->param
);
5616 gparam
->value
= &value
;
5617 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5618 put_user_s32(value
, target_gparam
->value
);
5620 unlock_user_struct(target_gparam
, arg
, 0);
5624 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5625 int fd
, int cmd
, abi_long arg
)
5627 switch (ie
->host_cmd
) {
5628 case DRM_IOCTL_I915_GETPARAM
:
5629 return do_ioctl_drm_i915_getparam(ie
,
5630 (struct drm_i915_getparam
*)buf_temp
,
5633 return -TARGET_ENOSYS
;
5639 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5640 int fd
, int cmd
, abi_long arg
)
5642 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5643 struct tun_filter
*target_filter
;
5646 assert(ie
->access
== IOC_W
);
5648 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5649 if (!target_filter
) {
5650 return -TARGET_EFAULT
;
5652 filter
->flags
= tswap16(target_filter
->flags
);
5653 filter
->count
= tswap16(target_filter
->count
);
5654 unlock_user(target_filter
, arg
, 0);
5656 if (filter
->count
) {
5657 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5659 return -TARGET_EFAULT
;
5662 target_addr
= lock_user(VERIFY_READ
,
5663 arg
+ offsetof(struct tun_filter
, addr
),
5664 filter
->count
* ETH_ALEN
, 1);
5666 return -TARGET_EFAULT
;
5668 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5669 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5672 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5675 IOCTLEntry ioctl_entries
[] = {
5676 #define IOCTL(cmd, access, ...) \
5677 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5678 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5679 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5680 #define IOCTL_IGNORE(cmd) \
5681 { TARGET_ ## cmd, 0, #cmd },
5686 /* ??? Implement proper locking for ioctls. */
5687 /* do_ioctl() Must return target values and target errnos. */
5688 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5690 const IOCTLEntry
*ie
;
5691 const argtype
*arg_type
;
5693 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5699 if (ie
->target_cmd
== 0) {
5701 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5702 return -TARGET_ENOSYS
;
5704 if (ie
->target_cmd
== cmd
)
5708 arg_type
= ie
->arg_type
;
5710 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5711 } else if (!ie
->host_cmd
) {
5712 /* Some architectures define BSD ioctls in their headers
5713 that are not implemented in Linux. */
5714 return -TARGET_ENOSYS
;
5717 switch(arg_type
[0]) {
5720 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5726 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5730 target_size
= thunk_type_size(arg_type
, 0);
5731 switch(ie
->access
) {
5733 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5734 if (!is_error(ret
)) {
5735 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5737 return -TARGET_EFAULT
;
5738 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5739 unlock_user(argptr
, arg
, target_size
);
5743 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5745 return -TARGET_EFAULT
;
5746 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5747 unlock_user(argptr
, arg
, 0);
5748 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5752 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5754 return -TARGET_EFAULT
;
5755 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5756 unlock_user(argptr
, arg
, 0);
5757 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5758 if (!is_error(ret
)) {
5759 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5761 return -TARGET_EFAULT
;
5762 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5763 unlock_user(argptr
, arg
, target_size
);
5769 qemu_log_mask(LOG_UNIMP
,
5770 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5771 (long)cmd
, arg_type
[0]);
5772 ret
= -TARGET_ENOSYS
;
5778 static const bitmask_transtbl iflag_tbl
[] = {
5779 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5780 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5781 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5782 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5783 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5784 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5785 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5786 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5787 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5788 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5789 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5790 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5791 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5792 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5793 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5797 static const bitmask_transtbl oflag_tbl
[] = {
5798 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5799 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5800 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5801 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5802 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5803 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5804 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5805 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5806 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5807 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5808 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5809 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5810 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5811 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5812 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5813 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5814 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5815 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5816 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5817 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5818 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5819 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5820 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5821 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5825 static const bitmask_transtbl cflag_tbl
[] = {
5826 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5827 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5828 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5829 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5830 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5831 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5832 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5833 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5834 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5835 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5836 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5837 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5838 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5839 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5840 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5841 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5842 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5843 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5844 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5845 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5846 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5847 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5848 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5849 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5850 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5851 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5852 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5853 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5854 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5855 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5856 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5860 static const bitmask_transtbl lflag_tbl
[] = {
5861 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5862 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5863 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5864 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5865 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5866 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5867 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5868 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5869 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5870 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5871 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5872 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5873 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5874 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5875 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5876 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5880 static void target_to_host_termios (void *dst
, const void *src
)
5882 struct host_termios
*host
= dst
;
5883 const struct target_termios
*target
= src
;
5886 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5888 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5890 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5892 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5893 host
->c_line
= target
->c_line
;
5895 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5896 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5897 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5898 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5899 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5900 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5901 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5902 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5903 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5904 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5905 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5906 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5907 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5908 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5909 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5910 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5911 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5912 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5915 static void host_to_target_termios (void *dst
, const void *src
)
5917 struct target_termios
*target
= dst
;
5918 const struct host_termios
*host
= src
;
5921 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5923 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5925 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5927 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5928 target
->c_line
= host
->c_line
;
5930 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5931 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5932 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5933 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5934 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5935 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5936 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5937 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5938 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5939 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5940 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5941 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5942 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5943 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5944 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5945 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5946 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5947 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5950 static const StructEntry struct_termios_def
= {
5951 .convert
= { host_to_target_termios
, target_to_host_termios
},
5952 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5953 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5954 .print
= print_termios
,
5957 static const bitmask_transtbl mmap_flags_tbl
[] = {
5958 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5959 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5960 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5961 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5962 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5963 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5964 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5965 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5966 MAP_DENYWRITE
, MAP_DENYWRITE
},
5967 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5968 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5969 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5970 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5971 MAP_NORESERVE
, MAP_NORESERVE
},
5972 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5973 /* MAP_STACK had been ignored by the kernel for quite some time.
5974 Recognize it for the target insofar as we do not want to pass
5975 it through to the host. */
5976 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5981 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5982 * TARGET_I386 is defined if TARGET_X86_64 is defined
5984 #if defined(TARGET_I386)
5986 /* NOTE: there is really one LDT for all the threads */
5987 static uint8_t *ldt_table
;
5989 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5996 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5997 if (size
> bytecount
)
5999 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6001 return -TARGET_EFAULT
;
6002 /* ??? Should this by byteswapped? */
6003 memcpy(p
, ldt_table
, size
);
6004 unlock_user(p
, ptr
, size
);
6008 /* XXX: add locking support */
6009 static abi_long
write_ldt(CPUX86State
*env
,
6010 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6012 struct target_modify_ldt_ldt_s ldt_info
;
6013 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6014 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6015 int seg_not_present
, useable
, lm
;
6016 uint32_t *lp
, entry_1
, entry_2
;
6018 if (bytecount
!= sizeof(ldt_info
))
6019 return -TARGET_EINVAL
;
6020 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6021 return -TARGET_EFAULT
;
6022 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6023 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6024 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6025 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6026 unlock_user_struct(target_ldt_info
, ptr
, 0);
6028 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6029 return -TARGET_EINVAL
;
6030 seg_32bit
= ldt_info
.flags
& 1;
6031 contents
= (ldt_info
.flags
>> 1) & 3;
6032 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6033 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6034 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6035 useable
= (ldt_info
.flags
>> 6) & 1;
6039 lm
= (ldt_info
.flags
>> 7) & 1;
6041 if (contents
== 3) {
6043 return -TARGET_EINVAL
;
6044 if (seg_not_present
== 0)
6045 return -TARGET_EINVAL
;
6047 /* allocate the LDT */
6049 env
->ldt
.base
= target_mmap(0,
6050 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6051 PROT_READ
|PROT_WRITE
,
6052 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6053 if (env
->ldt
.base
== -1)
6054 return -TARGET_ENOMEM
;
6055 memset(g2h_untagged(env
->ldt
.base
), 0,
6056 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6057 env
->ldt
.limit
= 0xffff;
6058 ldt_table
= g2h_untagged(env
->ldt
.base
);
6061 /* NOTE: same code as Linux kernel */
6062 /* Allow LDTs to be cleared by the user. */
6063 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6066 read_exec_only
== 1 &&
6068 limit_in_pages
== 0 &&
6069 seg_not_present
== 1 &&
6077 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6078 (ldt_info
.limit
& 0x0ffff);
6079 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6080 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6081 (ldt_info
.limit
& 0xf0000) |
6082 ((read_exec_only
^ 1) << 9) |
6084 ((seg_not_present
^ 1) << 15) |
6086 (limit_in_pages
<< 23) |
6090 entry_2
|= (useable
<< 20);
6092 /* Install the new entry ... */
6094 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6095 lp
[0] = tswap32(entry_1
);
6096 lp
[1] = tswap32(entry_2
);
6100 /* specific and weird i386 syscalls */
6101 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6102 unsigned long bytecount
)
6108 ret
= read_ldt(ptr
, bytecount
);
6111 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6114 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6117 ret
= -TARGET_ENOSYS
;
6123 #if defined(TARGET_ABI32)
6124 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6126 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6127 struct target_modify_ldt_ldt_s ldt_info
;
6128 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6129 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6130 int seg_not_present
, useable
, lm
;
6131 uint32_t *lp
, entry_1
, entry_2
;
6134 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6135 if (!target_ldt_info
)
6136 return -TARGET_EFAULT
;
6137 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6138 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6139 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6140 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6141 if (ldt_info
.entry_number
== -1) {
6142 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6143 if (gdt_table
[i
] == 0) {
6144 ldt_info
.entry_number
= i
;
6145 target_ldt_info
->entry_number
= tswap32(i
);
6150 unlock_user_struct(target_ldt_info
, ptr
, 1);
6152 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6153 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6154 return -TARGET_EINVAL
;
6155 seg_32bit
= ldt_info
.flags
& 1;
6156 contents
= (ldt_info
.flags
>> 1) & 3;
6157 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6158 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6159 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6160 useable
= (ldt_info
.flags
>> 6) & 1;
6164 lm
= (ldt_info
.flags
>> 7) & 1;
6167 if (contents
== 3) {
6168 if (seg_not_present
== 0)
6169 return -TARGET_EINVAL
;
6172 /* NOTE: same code as Linux kernel */
6173 /* Allow LDTs to be cleared by the user. */
6174 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6175 if ((contents
== 0 &&
6176 read_exec_only
== 1 &&
6178 limit_in_pages
== 0 &&
6179 seg_not_present
== 1 &&
6187 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6188 (ldt_info
.limit
& 0x0ffff);
6189 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6190 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6191 (ldt_info
.limit
& 0xf0000) |
6192 ((read_exec_only
^ 1) << 9) |
6194 ((seg_not_present
^ 1) << 15) |
6196 (limit_in_pages
<< 23) |
6201 /* Install the new entry ... */
6203 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6204 lp
[0] = tswap32(entry_1
);
6205 lp
[1] = tswap32(entry_2
);
6209 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6211 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6212 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6213 uint32_t base_addr
, limit
, flags
;
6214 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6215 int seg_not_present
, useable
, lm
;
6216 uint32_t *lp
, entry_1
, entry_2
;
6218 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6219 if (!target_ldt_info
)
6220 return -TARGET_EFAULT
;
6221 idx
= tswap32(target_ldt_info
->entry_number
);
6222 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6223 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6224 unlock_user_struct(target_ldt_info
, ptr
, 1);
6225 return -TARGET_EINVAL
;
6227 lp
= (uint32_t *)(gdt_table
+ idx
);
6228 entry_1
= tswap32(lp
[0]);
6229 entry_2
= tswap32(lp
[1]);
6231 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6232 contents
= (entry_2
>> 10) & 3;
6233 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6234 seg_32bit
= (entry_2
>> 22) & 1;
6235 limit_in_pages
= (entry_2
>> 23) & 1;
6236 useable
= (entry_2
>> 20) & 1;
6240 lm
= (entry_2
>> 21) & 1;
6242 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6243 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6244 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6245 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6246 base_addr
= (entry_1
>> 16) |
6247 (entry_2
& 0xff000000) |
6248 ((entry_2
& 0xff) << 16);
6249 target_ldt_info
->base_addr
= tswapal(base_addr
);
6250 target_ldt_info
->limit
= tswap32(limit
);
6251 target_ldt_info
->flags
= tswap32(flags
);
6252 unlock_user_struct(target_ldt_info
, ptr
, 1);
6256 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6258 return -TARGET_ENOSYS
;
6261 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6268 case TARGET_ARCH_SET_GS
:
6269 case TARGET_ARCH_SET_FS
:
6270 if (code
== TARGET_ARCH_SET_GS
)
6274 cpu_x86_load_seg(env
, idx
, 0);
6275 env
->segs
[idx
].base
= addr
;
6277 case TARGET_ARCH_GET_GS
:
6278 case TARGET_ARCH_GET_FS
:
6279 if (code
== TARGET_ARCH_GET_GS
)
6283 val
= env
->segs
[idx
].base
;
6284 if (put_user(val
, addr
, abi_ulong
))
6285 ret
= -TARGET_EFAULT
;
6288 ret
= -TARGET_EINVAL
;
6293 #endif /* defined(TARGET_ABI32 */
6295 #endif /* defined(TARGET_I386) */
6297 #define NEW_STACK_SIZE 0x40000
6300 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6303 pthread_mutex_t mutex
;
6304 pthread_cond_t cond
;
6307 abi_ulong child_tidptr
;
6308 abi_ulong parent_tidptr
;
6312 static void *clone_func(void *arg
)
6314 new_thread_info
*info
= arg
;
6319 rcu_register_thread();
6320 tcg_register_thread();
6324 ts
= (TaskState
*)cpu
->opaque
;
6325 info
->tid
= sys_gettid();
6327 if (info
->child_tidptr
)
6328 put_user_u32(info
->tid
, info
->child_tidptr
);
6329 if (info
->parent_tidptr
)
6330 put_user_u32(info
->tid
, info
->parent_tidptr
);
6331 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6332 /* Enable signals. */
6333 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6334 /* Signal to the parent that we're ready. */
6335 pthread_mutex_lock(&info
->mutex
);
6336 pthread_cond_broadcast(&info
->cond
);
6337 pthread_mutex_unlock(&info
->mutex
);
6338 /* Wait until the parent has finished initializing the tls state. */
6339 pthread_mutex_lock(&clone_lock
);
6340 pthread_mutex_unlock(&clone_lock
);
6346 /* do_fork() Must return host values and target errnos (unlike most
6347 do_*() functions). */
6348 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6349 abi_ulong parent_tidptr
, target_ulong newtls
,
6350 abi_ulong child_tidptr
)
6352 CPUState
*cpu
= env_cpu(env
);
6356 CPUArchState
*new_env
;
6359 flags
&= ~CLONE_IGNORED_FLAGS
;
6361 /* Emulate vfork() with fork() */
6362 if (flags
& CLONE_VFORK
)
6363 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6365 if (flags
& CLONE_VM
) {
6366 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6367 new_thread_info info
;
6368 pthread_attr_t attr
;
6370 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6371 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6372 return -TARGET_EINVAL
;
6375 ts
= g_new0(TaskState
, 1);
6376 init_task_state(ts
);
6378 /* Grab a mutex so that thread setup appears atomic. */
6379 pthread_mutex_lock(&clone_lock
);
6382 * If this is our first additional thread, we need to ensure we
6383 * generate code for parallel execution and flush old translations.
6384 * Do this now so that the copy gets CF_PARALLEL too.
6386 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6387 cpu
->tcg_cflags
|= CF_PARALLEL
;
6391 /* we create a new CPU instance. */
6392 new_env
= cpu_copy(env
);
6393 /* Init regs that differ from the parent. */
6394 cpu_clone_regs_child(new_env
, newsp
, flags
);
6395 cpu_clone_regs_parent(env
, flags
);
6396 new_cpu
= env_cpu(new_env
);
6397 new_cpu
->opaque
= ts
;
6398 ts
->bprm
= parent_ts
->bprm
;
6399 ts
->info
= parent_ts
->info
;
6400 ts
->signal_mask
= parent_ts
->signal_mask
;
6402 if (flags
& CLONE_CHILD_CLEARTID
) {
6403 ts
->child_tidptr
= child_tidptr
;
6406 if (flags
& CLONE_SETTLS
) {
6407 cpu_set_tls (new_env
, newtls
);
6410 memset(&info
, 0, sizeof(info
));
6411 pthread_mutex_init(&info
.mutex
, NULL
);
6412 pthread_mutex_lock(&info
.mutex
);
6413 pthread_cond_init(&info
.cond
, NULL
);
6415 if (flags
& CLONE_CHILD_SETTID
) {
6416 info
.child_tidptr
= child_tidptr
;
6418 if (flags
& CLONE_PARENT_SETTID
) {
6419 info
.parent_tidptr
= parent_tidptr
;
6422 ret
= pthread_attr_init(&attr
);
6423 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6424 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6425 /* It is not safe to deliver signals until the child has finished
6426 initializing, so temporarily block all signals. */
6427 sigfillset(&sigmask
);
6428 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6429 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6431 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6432 /* TODO: Free new CPU state if thread creation failed. */
6434 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6435 pthread_attr_destroy(&attr
);
6437 /* Wait for the child to initialize. */
6438 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6443 pthread_mutex_unlock(&info
.mutex
);
6444 pthread_cond_destroy(&info
.cond
);
6445 pthread_mutex_destroy(&info
.mutex
);
6446 pthread_mutex_unlock(&clone_lock
);
6448 /* if no CLONE_VM, we consider it is a fork */
6449 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6450 return -TARGET_EINVAL
;
6453 /* We can't support custom termination signals */
6454 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6455 return -TARGET_EINVAL
;
6458 if (block_signals()) {
6459 return -TARGET_ERESTARTSYS
;
6465 /* Child Process. */
6466 cpu_clone_regs_child(env
, newsp
, flags
);
6468 /* There is a race condition here. The parent process could
6469 theoretically read the TID in the child process before the child
6470 tid is set. This would require using either ptrace
6471 (not implemented) or having *_tidptr to point at a shared memory
6472 mapping. We can't repeat the spinlock hack used above because
6473 the child process gets its own copy of the lock. */
6474 if (flags
& CLONE_CHILD_SETTID
)
6475 put_user_u32(sys_gettid(), child_tidptr
);
6476 if (flags
& CLONE_PARENT_SETTID
)
6477 put_user_u32(sys_gettid(), parent_tidptr
);
6478 ts
= (TaskState
*)cpu
->opaque
;
6479 if (flags
& CLONE_SETTLS
)
6480 cpu_set_tls (env
, newtls
);
6481 if (flags
& CLONE_CHILD_CLEARTID
)
6482 ts
->child_tidptr
= child_tidptr
;
6484 cpu_clone_regs_parent(env
, flags
);
6491 /* warning : doesn't handle linux specific flags... */
6492 static int target_to_host_fcntl_cmd(int cmd
)
6497 case TARGET_F_DUPFD
:
6498 case TARGET_F_GETFD
:
6499 case TARGET_F_SETFD
:
6500 case TARGET_F_GETFL
:
6501 case TARGET_F_SETFL
:
6502 case TARGET_F_OFD_GETLK
:
6503 case TARGET_F_OFD_SETLK
:
6504 case TARGET_F_OFD_SETLKW
:
6507 case TARGET_F_GETLK
:
6510 case TARGET_F_SETLK
:
6513 case TARGET_F_SETLKW
:
6516 case TARGET_F_GETOWN
:
6519 case TARGET_F_SETOWN
:
6522 case TARGET_F_GETSIG
:
6525 case TARGET_F_SETSIG
:
6528 #if TARGET_ABI_BITS == 32
6529 case TARGET_F_GETLK64
:
6532 case TARGET_F_SETLK64
:
6535 case TARGET_F_SETLKW64
:
6539 case TARGET_F_SETLEASE
:
6542 case TARGET_F_GETLEASE
:
6545 #ifdef F_DUPFD_CLOEXEC
6546 case TARGET_F_DUPFD_CLOEXEC
:
6547 ret
= F_DUPFD_CLOEXEC
;
6550 case TARGET_F_NOTIFY
:
6554 case TARGET_F_GETOWN_EX
:
6559 case TARGET_F_SETOWN_EX
:
6564 case TARGET_F_SETPIPE_SZ
:
6567 case TARGET_F_GETPIPE_SZ
:
6572 case TARGET_F_ADD_SEALS
:
6575 case TARGET_F_GET_SEALS
:
6580 ret
= -TARGET_EINVAL
;
6584 #if defined(__powerpc64__)
6585 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6586 * is not supported by kernel. The glibc fcntl call actually adjusts
6587 * them to 5, 6 and 7 before making the syscall(). Since we make the
6588 * syscall directly, adjust to what is supported by the kernel.
6590 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6591 ret
-= F_GETLK64
- 5;
6598 #define FLOCK_TRANSTBL \
6600 TRANSTBL_CONVERT(F_RDLCK); \
6601 TRANSTBL_CONVERT(F_WRLCK); \
6602 TRANSTBL_CONVERT(F_UNLCK); \
6605 static int target_to_host_flock(int type
)
6607 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6609 #undef TRANSTBL_CONVERT
6610 return -TARGET_EINVAL
;
6613 static int host_to_target_flock(int type
)
6615 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6617 #undef TRANSTBL_CONVERT
6618 /* if we don't know how to convert the value coming
6619 * from the host we copy to the target field as-is
6624 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6625 abi_ulong target_flock_addr
)
6627 struct target_flock
*target_fl
;
6630 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6631 return -TARGET_EFAULT
;
6634 __get_user(l_type
, &target_fl
->l_type
);
6635 l_type
= target_to_host_flock(l_type
);
6639 fl
->l_type
= l_type
;
6640 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6641 __get_user(fl
->l_start
, &target_fl
->l_start
);
6642 __get_user(fl
->l_len
, &target_fl
->l_len
);
6643 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6644 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6648 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6649 const struct flock64
*fl
)
6651 struct target_flock
*target_fl
;
6654 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6655 return -TARGET_EFAULT
;
6658 l_type
= host_to_target_flock(fl
->l_type
);
6659 __put_user(l_type
, &target_fl
->l_type
);
6660 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6661 __put_user(fl
->l_start
, &target_fl
->l_start
);
6662 __put_user(fl
->l_len
, &target_fl
->l_len
);
6663 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6664 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6668 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6669 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6671 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6672 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6673 abi_ulong target_flock_addr
)
6675 struct target_oabi_flock64
*target_fl
;
6678 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6679 return -TARGET_EFAULT
;
6682 __get_user(l_type
, &target_fl
->l_type
);
6683 l_type
= target_to_host_flock(l_type
);
6687 fl
->l_type
= l_type
;
6688 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6689 __get_user(fl
->l_start
, &target_fl
->l_start
);
6690 __get_user(fl
->l_len
, &target_fl
->l_len
);
6691 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6692 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6696 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6697 const struct flock64
*fl
)
6699 struct target_oabi_flock64
*target_fl
;
6702 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6703 return -TARGET_EFAULT
;
6706 l_type
= host_to_target_flock(fl
->l_type
);
6707 __put_user(l_type
, &target_fl
->l_type
);
6708 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6709 __put_user(fl
->l_start
, &target_fl
->l_start
);
6710 __put_user(fl
->l_len
, &target_fl
->l_len
);
6711 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6712 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6717 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6718 abi_ulong target_flock_addr
)
6720 struct target_flock64
*target_fl
;
6723 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6724 return -TARGET_EFAULT
;
6727 __get_user(l_type
, &target_fl
->l_type
);
6728 l_type
= target_to_host_flock(l_type
);
6732 fl
->l_type
= l_type
;
6733 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6734 __get_user(fl
->l_start
, &target_fl
->l_start
);
6735 __get_user(fl
->l_len
, &target_fl
->l_len
);
6736 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6737 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6741 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6742 const struct flock64
*fl
)
6744 struct target_flock64
*target_fl
;
6747 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6748 return -TARGET_EFAULT
;
6751 l_type
= host_to_target_flock(fl
->l_type
);
6752 __put_user(l_type
, &target_fl
->l_type
);
6753 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6754 __put_user(fl
->l_start
, &target_fl
->l_start
);
6755 __put_user(fl
->l_len
, &target_fl
->l_len
);
6756 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6757 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6761 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6763 struct flock64 fl64
;
6765 struct f_owner_ex fox
;
6766 struct target_f_owner_ex
*target_fox
;
6769 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6771 if (host_cmd
== -TARGET_EINVAL
)
6775 case TARGET_F_GETLK
:
6776 ret
= copy_from_user_flock(&fl64
, arg
);
6780 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6782 ret
= copy_to_user_flock(arg
, &fl64
);
6786 case TARGET_F_SETLK
:
6787 case TARGET_F_SETLKW
:
6788 ret
= copy_from_user_flock(&fl64
, arg
);
6792 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6795 case TARGET_F_GETLK64
:
6796 case TARGET_F_OFD_GETLK
:
6797 ret
= copy_from_user_flock64(&fl64
, arg
);
6801 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6803 ret
= copy_to_user_flock64(arg
, &fl64
);
6806 case TARGET_F_SETLK64
:
6807 case TARGET_F_SETLKW64
:
6808 case TARGET_F_OFD_SETLK
:
6809 case TARGET_F_OFD_SETLKW
:
6810 ret
= copy_from_user_flock64(&fl64
, arg
);
6814 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6817 case TARGET_F_GETFL
:
6818 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6820 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6824 case TARGET_F_SETFL
:
6825 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6826 target_to_host_bitmask(arg
,
6831 case TARGET_F_GETOWN_EX
:
6832 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6834 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6835 return -TARGET_EFAULT
;
6836 target_fox
->type
= tswap32(fox
.type
);
6837 target_fox
->pid
= tswap32(fox
.pid
);
6838 unlock_user_struct(target_fox
, arg
, 1);
6844 case TARGET_F_SETOWN_EX
:
6845 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6846 return -TARGET_EFAULT
;
6847 fox
.type
= tswap32(target_fox
->type
);
6848 fox
.pid
= tswap32(target_fox
->pid
);
6849 unlock_user_struct(target_fox
, arg
, 0);
6850 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6854 case TARGET_F_SETSIG
:
6855 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6858 case TARGET_F_GETSIG
:
6859 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6862 case TARGET_F_SETOWN
:
6863 case TARGET_F_GETOWN
:
6864 case TARGET_F_SETLEASE
:
6865 case TARGET_F_GETLEASE
:
6866 case TARGET_F_SETPIPE_SZ
:
6867 case TARGET_F_GETPIPE_SZ
:
6868 case TARGET_F_ADD_SEALS
:
6869 case TARGET_F_GET_SEALS
:
6870 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6874 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6882 static inline int high2lowuid(int uid
)
6890 static inline int high2lowgid(int gid
)
6898 static inline int low2highuid(int uid
)
6900 if ((int16_t)uid
== -1)
6906 static inline int low2highgid(int gid
)
6908 if ((int16_t)gid
== -1)
6913 static inline int tswapid(int id
)
6918 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6920 #else /* !USE_UID16 */
6921 static inline int high2lowuid(int uid
)
6925 static inline int high2lowgid(int gid
)
6929 static inline int low2highuid(int uid
)
6933 static inline int low2highgid(int gid
)
6937 static inline int tswapid(int id
)
6942 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6944 #endif /* USE_UID16 */
6946 /* We must do direct syscalls for setting UID/GID, because we want to
6947 * implement the Linux system call semantics of "change only for this thread",
6948 * not the libc/POSIX semantics of "change for all threads in process".
6949 * (See http://ewontfix.com/17/ for more details.)
6950 * We use the 32-bit version of the syscalls if present; if it is not
6951 * then either the host architecture supports 32-bit UIDs natively with
6952 * the standard syscall, or the 16-bit UID is the best we can do.
6954 #ifdef __NR_setuid32
6955 #define __NR_sys_setuid __NR_setuid32
6957 #define __NR_sys_setuid __NR_setuid
6959 #ifdef __NR_setgid32
6960 #define __NR_sys_setgid __NR_setgid32
6962 #define __NR_sys_setgid __NR_setgid
6964 #ifdef __NR_setresuid32
6965 #define __NR_sys_setresuid __NR_setresuid32
6967 #define __NR_sys_setresuid __NR_setresuid
6969 #ifdef __NR_setresgid32
6970 #define __NR_sys_setresgid __NR_setresgid32
6972 #define __NR_sys_setresgid __NR_setresgid
6975 _syscall1(int, sys_setuid
, uid_t
, uid
)
6976 _syscall1(int, sys_setgid
, gid_t
, gid
)
6977 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6978 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6980 void syscall_init(void)
6983 const argtype
*arg_type
;
6986 thunk_init(STRUCT_MAX
);
6988 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6989 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6990 #include "syscall_types.h"
6992 #undef STRUCT_SPECIAL
6994 /* we patch the ioctl size if necessary. We rely on the fact that
6995 no ioctl has all the bits at '1' in the size field */
6997 while (ie
->target_cmd
!= 0) {
6998 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6999 TARGET_IOC_SIZEMASK
) {
7000 arg_type
= ie
->arg_type
;
7001 if (arg_type
[0] != TYPE_PTR
) {
7002 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7007 size
= thunk_type_size(arg_type
, 0);
7008 ie
->target_cmd
= (ie
->target_cmd
&
7009 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7010 (size
<< TARGET_IOC_SIZESHIFT
);
7013 /* automatic consistency check if same arch */
7014 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7015 (defined(__x86_64__) && defined(TARGET_X86_64))
7016 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7017 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7018 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7025 #ifdef TARGET_NR_truncate64
7026 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7031 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7035 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7039 #ifdef TARGET_NR_ftruncate64
7040 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7045 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7049 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7053 #if defined(TARGET_NR_timer_settime) || \
7054 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7055 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7056 abi_ulong target_addr
)
7058 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7059 offsetof(struct target_itimerspec
,
7061 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7062 offsetof(struct target_itimerspec
,
7064 return -TARGET_EFAULT
;
7071 #if defined(TARGET_NR_timer_settime64) || \
7072 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7073 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7074 abi_ulong target_addr
)
7076 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7077 offsetof(struct target__kernel_itimerspec
,
7079 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7080 offsetof(struct target__kernel_itimerspec
,
7082 return -TARGET_EFAULT
;
7089 #if ((defined(TARGET_NR_timerfd_gettime) || \
7090 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7091 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7092 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7093 struct itimerspec
*host_its
)
7095 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7097 &host_its
->it_interval
) ||
7098 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7100 &host_its
->it_value
)) {
7101 return -TARGET_EFAULT
;
7107 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7108 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7109 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7110 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7111 struct itimerspec
*host_its
)
7113 if (host_to_target_timespec64(target_addr
+
7114 offsetof(struct target__kernel_itimerspec
,
7116 &host_its
->it_interval
) ||
7117 host_to_target_timespec64(target_addr
+
7118 offsetof(struct target__kernel_itimerspec
,
7120 &host_its
->it_value
)) {
7121 return -TARGET_EFAULT
;
7127 #if defined(TARGET_NR_adjtimex) || \
7128 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7129 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7130 abi_long target_addr
)
7132 struct target_timex
*target_tx
;
7134 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7135 return -TARGET_EFAULT
;
7138 __get_user(host_tx
->modes
, &target_tx
->modes
);
7139 __get_user(host_tx
->offset
, &target_tx
->offset
);
7140 __get_user(host_tx
->freq
, &target_tx
->freq
);
7141 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7142 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7143 __get_user(host_tx
->status
, &target_tx
->status
);
7144 __get_user(host_tx
->constant
, &target_tx
->constant
);
7145 __get_user(host_tx
->precision
, &target_tx
->precision
);
7146 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7147 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7148 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7149 __get_user(host_tx
->tick
, &target_tx
->tick
);
7150 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7151 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7152 __get_user(host_tx
->shift
, &target_tx
->shift
);
7153 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7154 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7155 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7156 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7157 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7158 __get_user(host_tx
->tai
, &target_tx
->tai
);
7160 unlock_user_struct(target_tx
, target_addr
, 0);
7164 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7165 struct timex
*host_tx
)
7167 struct target_timex
*target_tx
;
7169 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7170 return -TARGET_EFAULT
;
7173 __put_user(host_tx
->modes
, &target_tx
->modes
);
7174 __put_user(host_tx
->offset
, &target_tx
->offset
);
7175 __put_user(host_tx
->freq
, &target_tx
->freq
);
7176 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7177 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7178 __put_user(host_tx
->status
, &target_tx
->status
);
7179 __put_user(host_tx
->constant
, &target_tx
->constant
);
7180 __put_user(host_tx
->precision
, &target_tx
->precision
);
7181 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7182 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7183 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7184 __put_user(host_tx
->tick
, &target_tx
->tick
);
7185 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7186 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7187 __put_user(host_tx
->shift
, &target_tx
->shift
);
7188 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7189 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7190 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7191 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7192 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7193 __put_user(host_tx
->tai
, &target_tx
->tai
);
7195 unlock_user_struct(target_tx
, target_addr
, 1);
7201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7202 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7203 abi_long target_addr
)
7205 struct target__kernel_timex
*target_tx
;
7207 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7208 offsetof(struct target__kernel_timex
,
7210 return -TARGET_EFAULT
;
7213 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7214 return -TARGET_EFAULT
;
7217 __get_user(host_tx
->modes
, &target_tx
->modes
);
7218 __get_user(host_tx
->offset
, &target_tx
->offset
);
7219 __get_user(host_tx
->freq
, &target_tx
->freq
);
7220 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7221 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7222 __get_user(host_tx
->status
, &target_tx
->status
);
7223 __get_user(host_tx
->constant
, &target_tx
->constant
);
7224 __get_user(host_tx
->precision
, &target_tx
->precision
);
7225 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7226 __get_user(host_tx
->tick
, &target_tx
->tick
);
7227 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7228 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7229 __get_user(host_tx
->shift
, &target_tx
->shift
);
7230 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7231 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7232 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7233 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7234 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7235 __get_user(host_tx
->tai
, &target_tx
->tai
);
7237 unlock_user_struct(target_tx
, target_addr
, 0);
7241 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7242 struct timex
*host_tx
)
7244 struct target__kernel_timex
*target_tx
;
7246 if (copy_to_user_timeval64(target_addr
+
7247 offsetof(struct target__kernel_timex
, time
),
7249 return -TARGET_EFAULT
;
7252 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7253 return -TARGET_EFAULT
;
7256 __put_user(host_tx
->modes
, &target_tx
->modes
);
7257 __put_user(host_tx
->offset
, &target_tx
->offset
);
7258 __put_user(host_tx
->freq
, &target_tx
->freq
);
7259 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7260 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7261 __put_user(host_tx
->status
, &target_tx
->status
);
7262 __put_user(host_tx
->constant
, &target_tx
->constant
);
7263 __put_user(host_tx
->precision
, &target_tx
->precision
);
7264 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7265 __put_user(host_tx
->tick
, &target_tx
->tick
);
7266 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7267 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7268 __put_user(host_tx
->shift
, &target_tx
->shift
);
7269 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7270 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7271 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7272 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7273 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7274 __put_user(host_tx
->tai
, &target_tx
->tai
);
7276 unlock_user_struct(target_tx
, target_addr
, 1);
7281 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7282 #define sigev_notify_thread_id _sigev_un._tid
7285 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7286 abi_ulong target_addr
)
7288 struct target_sigevent
*target_sevp
;
7290 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7291 return -TARGET_EFAULT
;
7294 /* This union is awkward on 64 bit systems because it has a 32 bit
7295 * integer and a pointer in it; we follow the conversion approach
7296 * used for handling sigval types in signal.c so the guest should get
7297 * the correct value back even if we did a 64 bit byteswap and it's
7298 * using the 32 bit integer.
7300 host_sevp
->sigev_value
.sival_ptr
=
7301 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7302 host_sevp
->sigev_signo
=
7303 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7304 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7305 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7307 unlock_user_struct(target_sevp
, target_addr
, 1);
7311 #if defined(TARGET_NR_mlockall)
7312 static inline int target_to_host_mlockall_arg(int arg
)
7316 if (arg
& TARGET_MCL_CURRENT
) {
7317 result
|= MCL_CURRENT
;
7319 if (arg
& TARGET_MCL_FUTURE
) {
7320 result
|= MCL_FUTURE
;
7323 if (arg
& TARGET_MCL_ONFAULT
) {
7324 result
|= MCL_ONFAULT
;
7332 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7333 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7334 defined(TARGET_NR_newfstatat))
7335 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7336 abi_ulong target_addr
,
7337 struct stat
*host_st
)
7339 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7340 if (((CPUARMState
*)cpu_env
)->eabi
) {
7341 struct target_eabi_stat64
*target_st
;
7343 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7344 return -TARGET_EFAULT
;
7345 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7346 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7347 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7348 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7349 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7351 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7352 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7353 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7354 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7355 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7356 __put_user(host_st
->st_size
, &target_st
->st_size
);
7357 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7358 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7359 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7360 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7361 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7362 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7363 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7364 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7365 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7367 unlock_user_struct(target_st
, target_addr
, 1);
7371 #if defined(TARGET_HAS_STRUCT_STAT64)
7372 struct target_stat64
*target_st
;
7374 struct target_stat
*target_st
;
7377 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7378 return -TARGET_EFAULT
;
7379 memset(target_st
, 0, sizeof(*target_st
));
7380 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7381 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7382 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7383 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7385 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7386 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7387 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7388 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7389 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7390 /* XXX: better use of kernel struct */
7391 __put_user(host_st
->st_size
, &target_st
->st_size
);
7392 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7393 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7394 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7395 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7396 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7397 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7398 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7399 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7400 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7402 unlock_user_struct(target_st
, target_addr
, 1);
7409 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7410 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7411 abi_ulong target_addr
)
7413 struct target_statx
*target_stx
;
7415 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7416 return -TARGET_EFAULT
;
7418 memset(target_stx
, 0, sizeof(*target_stx
));
7420 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7421 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7422 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7423 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7424 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7425 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7426 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7427 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7428 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7429 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7430 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7431 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7432 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7433 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7434 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7435 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7436 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7437 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7438 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7439 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7440 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7441 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7442 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7444 unlock_user_struct(target_stx
, target_addr
, 1);
7450 static int do_sys_futex(int *uaddr
, int op
, int val
,
7451 const struct timespec
*timeout
, int *uaddr2
,
7454 #if HOST_LONG_BITS == 64
7455 #if defined(__NR_futex)
7456 /* always a 64-bit time_t, it doesn't define _time64 version */
7457 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7460 #else /* HOST_LONG_BITS == 64 */
7461 #if defined(__NR_futex_time64)
7462 if (sizeof(timeout
->tv_sec
) == 8) {
7463 /* _time64 function on 32bit arch */
7464 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7467 #if defined(__NR_futex)
7468 /* old function on 32bit arch */
7469 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7471 #endif /* HOST_LONG_BITS == 64 */
7472 g_assert_not_reached();
7475 static int do_safe_futex(int *uaddr
, int op
, int val
,
7476 const struct timespec
*timeout
, int *uaddr2
,
7479 #if HOST_LONG_BITS == 64
7480 #if defined(__NR_futex)
7481 /* always a 64-bit time_t, it doesn't define _time64 version */
7482 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7484 #else /* HOST_LONG_BITS == 64 */
7485 #if defined(__NR_futex_time64)
7486 if (sizeof(timeout
->tv_sec
) == 8) {
7487 /* _time64 function on 32bit arch */
7488 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7492 #if defined(__NR_futex)
7493 /* old function on 32bit arch */
7494 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7496 #endif /* HOST_LONG_BITS == 64 */
7497 return -TARGET_ENOSYS
;
7500 /* ??? Using host futex calls even when target atomic operations
7501 are not really atomic probably breaks things. However implementing
7502 futexes locally would make futexes shared between multiple processes
7503 tricky. However they're probably useless because guest atomic
7504 operations won't work either. */
7505 #if defined(TARGET_NR_futex)
7506 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7507 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7509 struct timespec ts
, *pts
;
7512 /* ??? We assume FUTEX_* constants are the same on both host
7514 #ifdef FUTEX_CMD_MASK
7515 base_op
= op
& FUTEX_CMD_MASK
;
7521 case FUTEX_WAIT_BITSET
:
7524 target_to_host_timespec(pts
, timeout
);
7528 return do_safe_futex(g2h(cpu
, uaddr
),
7529 op
, tswap32(val
), pts
, NULL
, val3
);
7531 return do_safe_futex(g2h(cpu
, uaddr
),
7532 op
, val
, NULL
, NULL
, 0);
7534 return do_safe_futex(g2h(cpu
, uaddr
),
7535 op
, val
, NULL
, NULL
, 0);
7537 case FUTEX_CMP_REQUEUE
:
7539 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7540 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7541 But the prototype takes a `struct timespec *'; insert casts
7542 to satisfy the compiler. We do not need to tswap TIMEOUT
7543 since it's not compared to guest memory. */
7544 pts
= (struct timespec
*)(uintptr_t) timeout
;
7545 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7546 (base_op
== FUTEX_CMP_REQUEUE
7547 ? tswap32(val3
) : val3
));
7549 return -TARGET_ENOSYS
;
7554 #if defined(TARGET_NR_futex_time64)
7555 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7556 int val
, target_ulong timeout
,
7557 target_ulong uaddr2
, int val3
)
7559 struct timespec ts
, *pts
;
7562 /* ??? We assume FUTEX_* constants are the same on both host
7564 #ifdef FUTEX_CMD_MASK
7565 base_op
= op
& FUTEX_CMD_MASK
;
7571 case FUTEX_WAIT_BITSET
:
7574 if (target_to_host_timespec64(pts
, timeout
)) {
7575 return -TARGET_EFAULT
;
7580 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7581 tswap32(val
), pts
, NULL
, val3
);
7583 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7585 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7587 case FUTEX_CMP_REQUEUE
:
7589 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7590 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7591 But the prototype takes a `struct timespec *'; insert casts
7592 to satisfy the compiler. We do not need to tswap TIMEOUT
7593 since it's not compared to guest memory. */
7594 pts
= (struct timespec
*)(uintptr_t) timeout
;
7595 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7596 (base_op
== FUTEX_CMP_REQUEUE
7597 ? tswap32(val3
) : val3
));
7599 return -TARGET_ENOSYS
;
7604 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7605 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7606 abi_long handle
, abi_long mount_id
,
7609 struct file_handle
*target_fh
;
7610 struct file_handle
*fh
;
7614 unsigned int size
, total_size
;
7616 if (get_user_s32(size
, handle
)) {
7617 return -TARGET_EFAULT
;
7620 name
= lock_user_string(pathname
);
7622 return -TARGET_EFAULT
;
7625 total_size
= sizeof(struct file_handle
) + size
;
7626 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7628 unlock_user(name
, pathname
, 0);
7629 return -TARGET_EFAULT
;
7632 fh
= g_malloc0(total_size
);
7633 fh
->handle_bytes
= size
;
7635 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7636 unlock_user(name
, pathname
, 0);
7638 /* man name_to_handle_at(2):
7639 * Other than the use of the handle_bytes field, the caller should treat
7640 * the file_handle structure as an opaque data type
7643 memcpy(target_fh
, fh
, total_size
);
7644 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7645 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7647 unlock_user(target_fh
, handle
, total_size
);
7649 if (put_user_s32(mid
, mount_id
)) {
7650 return -TARGET_EFAULT
;
7658 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7659 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7662 struct file_handle
*target_fh
;
7663 struct file_handle
*fh
;
7664 unsigned int size
, total_size
;
7667 if (get_user_s32(size
, handle
)) {
7668 return -TARGET_EFAULT
;
7671 total_size
= sizeof(struct file_handle
) + size
;
7672 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7674 return -TARGET_EFAULT
;
7677 fh
= g_memdup(target_fh
, total_size
);
7678 fh
->handle_bytes
= size
;
7679 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7681 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7682 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7686 unlock_user(target_fh
, handle
, total_size
);
7692 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7694 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7697 target_sigset_t
*target_mask
;
7701 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7702 return -TARGET_EINVAL
;
7704 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7705 return -TARGET_EFAULT
;
7708 target_to_host_sigset(&host_mask
, target_mask
);
7710 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7712 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7714 fd_trans_register(ret
, &target_signalfd_trans
);
7717 unlock_user_struct(target_mask
, mask
, 0);
7723 /* Map host to target signal numbers for the wait family of syscalls.
7724 Assume all other status bits are the same. */
7725 int host_to_target_waitstatus(int status
)
7727 if (WIFSIGNALED(status
)) {
7728 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7730 if (WIFSTOPPED(status
)) {
7731 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7737 static int open_self_cmdline(void *cpu_env
, int fd
)
7739 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7740 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7743 for (i
= 0; i
< bprm
->argc
; i
++) {
7744 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7746 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7754 static int open_self_maps(void *cpu_env
, int fd
)
7756 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7757 TaskState
*ts
= cpu
->opaque
;
7758 GSList
*map_info
= read_self_maps();
7762 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7763 MapInfo
*e
= (MapInfo
*) s
->data
;
7765 if (h2g_valid(e
->start
)) {
7766 unsigned long min
= e
->start
;
7767 unsigned long max
= e
->end
;
7768 int flags
= page_get_flags(h2g(min
));
7771 max
= h2g_valid(max
- 1) ?
7772 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
7774 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7778 if (h2g(min
) == ts
->info
->stack_limit
) {
7784 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7785 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7786 h2g(min
), h2g(max
- 1) + 1,
7787 (flags
& PAGE_READ
) ? 'r' : '-',
7788 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
7789 (flags
& PAGE_EXEC
) ? 'x' : '-',
7790 e
->is_priv
? 'p' : '-',
7791 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7793 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7800 free_self_maps(map_info
);
7802 #ifdef TARGET_VSYSCALL_PAGE
7804 * We only support execution from the vsyscall page.
7805 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7807 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7808 " --xp 00000000 00:00 0",
7809 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7810 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7816 static int open_self_stat(void *cpu_env
, int fd
)
7818 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7819 TaskState
*ts
= cpu
->opaque
;
7820 g_autoptr(GString
) buf
= g_string_new(NULL
);
7823 for (i
= 0; i
< 44; i
++) {
7826 g_string_printf(buf
, FMT_pid
" ", getpid());
7827 } else if (i
== 1) {
7829 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7830 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7831 g_string_printf(buf
, "(%.15s) ", bin
);
7832 } else if (i
== 3) {
7834 g_string_printf(buf
, FMT_pid
" ", getppid());
7835 } else if (i
== 27) {
7837 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7839 /* for the rest, there is MasterCard */
7840 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7843 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7851 static int open_self_auxv(void *cpu_env
, int fd
)
7853 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7854 TaskState
*ts
= cpu
->opaque
;
7855 abi_ulong auxv
= ts
->info
->saved_auxv
;
7856 abi_ulong len
= ts
->info
->auxv_len
;
7860 * Auxiliary vector is stored in target process stack.
7861 * read in whole auxv vector and copy it to file
7863 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7867 r
= write(fd
, ptr
, len
);
7874 lseek(fd
, 0, SEEK_SET
);
7875 unlock_user(ptr
, auxv
, len
);
7881 static int is_proc_myself(const char *filename
, const char *entry
)
7883 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7884 filename
+= strlen("/proc/");
7885 if (!strncmp(filename
, "self/", strlen("self/"))) {
7886 filename
+= strlen("self/");
7887 } else if (*filename
>= '1' && *filename
<= '9') {
7889 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7890 if (!strncmp(filename
, myself
, strlen(myself
))) {
7891 filename
+= strlen(myself
);
7898 if (!strcmp(filename
, entry
)) {
7905 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7906 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7907 static int is_proc(const char *filename
, const char *entry
)
7909 return strcmp(filename
, entry
) == 0;
7913 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7914 static int open_net_route(void *cpu_env
, int fd
)
7921 fp
= fopen("/proc/net/route", "r");
7928 read
= getline(&line
, &len
, fp
);
7929 dprintf(fd
, "%s", line
);
7933 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7935 uint32_t dest
, gw
, mask
;
7936 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7939 fields
= sscanf(line
,
7940 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7941 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7942 &mask
, &mtu
, &window
, &irtt
);
7946 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7947 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7948 metric
, tswap32(mask
), mtu
, window
, irtt
);
7958 #if defined(TARGET_SPARC)
7959 static int open_cpuinfo(void *cpu_env
, int fd
)
7961 dprintf(fd
, "type\t\t: sun4u\n");
7966 #if defined(TARGET_HPPA)
7967 static int open_cpuinfo(void *cpu_env
, int fd
)
7969 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7970 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7971 dprintf(fd
, "capabilities\t: os32\n");
7972 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7973 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7978 #if defined(TARGET_M68K)
7979 static int open_hardware(void *cpu_env
, int fd
)
7981 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7986 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7989 const char *filename
;
7990 int (*fill
)(void *cpu_env
, int fd
);
7991 int (*cmp
)(const char *s1
, const char *s2
);
7993 const struct fake_open
*fake_open
;
7994 static const struct fake_open fakes
[] = {
7995 { "maps", open_self_maps
, is_proc_myself
},
7996 { "stat", open_self_stat
, is_proc_myself
},
7997 { "auxv", open_self_auxv
, is_proc_myself
},
7998 { "cmdline", open_self_cmdline
, is_proc_myself
},
7999 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8000 { "/proc/net/route", open_net_route
, is_proc
},
8002 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8003 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8005 #if defined(TARGET_M68K)
8006 { "/proc/hardware", open_hardware
, is_proc
},
8008 { NULL
, NULL
, NULL
}
8011 if (is_proc_myself(pathname
, "exe")) {
8012 int execfd
= qemu_getauxval(AT_EXECFD
);
8013 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8016 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8017 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8022 if (fake_open
->filename
) {
8024 char filename
[PATH_MAX
];
8027 /* create temporary file to map stat to */
8028 tmpdir
= getenv("TMPDIR");
8031 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8032 fd
= mkstemp(filename
);
8038 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8044 lseek(fd
, 0, SEEK_SET
);
8049 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8052 #define TIMER_MAGIC 0x0caf0000
8053 #define TIMER_MAGIC_MASK 0xffff0000
8055 /* Convert QEMU provided timer ID back to internal 16bit index format */
8056 static target_timer_t
get_timer_id(abi_long arg
)
8058 target_timer_t timerid
= arg
;
8060 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8061 return -TARGET_EINVAL
;
8066 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8067 return -TARGET_EINVAL
;
8073 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8075 abi_ulong target_addr
,
8078 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8079 unsigned host_bits
= sizeof(*host_mask
) * 8;
8080 abi_ulong
*target_mask
;
8083 assert(host_size
>= target_size
);
8085 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8087 return -TARGET_EFAULT
;
8089 memset(host_mask
, 0, host_size
);
8091 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8092 unsigned bit
= i
* target_bits
;
8095 __get_user(val
, &target_mask
[i
]);
8096 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8097 if (val
& (1UL << j
)) {
8098 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8103 unlock_user(target_mask
, target_addr
, 0);
8107 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8109 abi_ulong target_addr
,
8112 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8113 unsigned host_bits
= sizeof(*host_mask
) * 8;
8114 abi_ulong
*target_mask
;
8117 assert(host_size
>= target_size
);
8119 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8121 return -TARGET_EFAULT
;
8124 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8125 unsigned bit
= i
* target_bits
;
8128 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8129 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8133 __put_user(val
, &target_mask
[i
]);
8136 unlock_user(target_mask
, target_addr
, target_size
);
8140 #ifdef TARGET_NR_getdents
8141 static int do_getdents(abi_long arg1
, abi_long arg2
, abi_long arg3
)
8145 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8146 # if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
8147 struct target_dirent
*target_dirp
;
8148 struct linux_dirent
*dirp
;
8149 abi_long count
= arg3
;
8151 dirp
= g_try_malloc(count
);
8153 return -TARGET_ENOMEM
;
8156 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8157 if (!is_error(ret
)) {
8158 struct linux_dirent
*de
;
8159 struct target_dirent
*tde
;
8161 int reclen
, treclen
;
8162 int count1
, tnamelen
;
8166 target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8168 return -TARGET_EFAULT
;
8172 reclen
= de
->d_reclen
;
8173 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
8174 assert(tnamelen
>= 0);
8175 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
8176 assert(count1
+ treclen
<= count
);
8177 tde
->d_reclen
= tswap16(treclen
);
8178 tde
->d_ino
= tswapal(de
->d_ino
);
8179 tde
->d_off
= tswapal(de
->d_off
);
8180 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
8181 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8183 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8187 unlock_user(target_dirp
, arg2
, ret
);
8191 struct linux_dirent
*dirp
;
8192 abi_long count
= arg3
;
8194 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8196 return -TARGET_EFAULT
;
8198 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
8199 if (!is_error(ret
)) {
8200 struct linux_dirent
*de
;
8205 reclen
= de
->d_reclen
;
8209 de
->d_reclen
= tswap16(reclen
);
8210 tswapls(&de
->d_ino
);
8211 tswapls(&de
->d_off
);
8212 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
8216 unlock_user(dirp
, arg2
, ret
);
8219 /* Implement getdents in terms of getdents64 */
8220 struct linux_dirent64
*dirp
;
8221 abi_long count
= arg3
;
8223 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8225 return -TARGET_EFAULT
;
8227 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8228 if (!is_error(ret
)) {
8230 * Convert the dirent64 structs to target dirent. We do this
8231 * in-place, since we can guarantee that a target_dirent is no
8232 * larger than a dirent64; however this means we have to be
8233 * careful to read everything before writing in the new format.
8235 struct linux_dirent64
*de
;
8236 struct target_dirent
*tde
;
8241 tde
= (struct target_dirent
*)dirp
;
8243 int namelen
, treclen
;
8244 int reclen
= de
->d_reclen
;
8245 uint64_t ino
= de
->d_ino
;
8246 int64_t off
= de
->d_off
;
8247 uint8_t type
= de
->d_type
;
8249 namelen
= strlen(de
->d_name
);
8250 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8251 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
8253 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
8254 tde
->d_ino
= tswapal(ino
);
8255 tde
->d_off
= tswapal(off
);
8256 tde
->d_reclen
= tswap16(treclen
);
8258 * The target_dirent type is in what was formerly a padding
8259 * byte at the end of the structure:
8261 *(((char *)tde
) + treclen
- 1) = type
;
8263 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8264 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
8270 unlock_user(dirp
, arg2
, ret
);
8274 #endif /* TARGET_NR_getdents */
8276 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8277 static int do_getdents64(abi_long arg1
, abi_long arg2
, abi_long arg3
)
8279 struct linux_dirent64
*dirp
;
8280 abi_long count
= arg3
;
8283 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8285 return -TARGET_EFAULT
;
8287 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
8288 if (!is_error(ret
)) {
8289 struct linux_dirent64
*de
;
8294 reclen
= de
->d_reclen
;
8298 de
->d_reclen
= tswap16(reclen
);
8299 tswap64s((uint64_t *)&de
->d_ino
);
8300 tswap64s((uint64_t *)&de
->d_off
);
8301 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
8305 unlock_user(dirp
, arg2
, ret
);
8308 #endif /* TARGET_NR_getdents64 */
8310 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8311 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8314 /* This is an internal helper for do_syscall so that it is easier
8315 * to have a single return point, so that actions, such as logging
8316 * of syscall results, can be performed.
8317 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8319 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8320 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8321 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8324 CPUState
*cpu
= env_cpu(cpu_env
);
8326 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8327 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8328 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8329 || defined(TARGET_NR_statx)
8332 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8333 || defined(TARGET_NR_fstatfs)
8339 case TARGET_NR_exit
:
8340 /* In old applications this may be used to implement _exit(2).
8341 However in threaded applications it is used for thread termination,
8342 and _exit_group is used for application termination.
8343 Do thread termination if we have more then one thread. */
8345 if (block_signals()) {
8346 return -TARGET_ERESTARTSYS
;
8349 pthread_mutex_lock(&clone_lock
);
8351 if (CPU_NEXT(first_cpu
)) {
8352 TaskState
*ts
= cpu
->opaque
;
8354 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8355 object_unref(OBJECT(cpu
));
8357 * At this point the CPU should be unrealized and removed
8358 * from cpu lists. We can clean-up the rest of the thread
8359 * data without the lock held.
8362 pthread_mutex_unlock(&clone_lock
);
8364 if (ts
->child_tidptr
) {
8365 put_user_u32(0, ts
->child_tidptr
);
8366 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8367 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8371 rcu_unregister_thread();
8375 pthread_mutex_unlock(&clone_lock
);
8376 preexit_cleanup(cpu_env
, arg1
);
8378 return 0; /* avoid warning */
8379 case TARGET_NR_read
:
8380 if (arg2
== 0 && arg3
== 0) {
8381 return get_errno(safe_read(arg1
, 0, 0));
8383 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8384 return -TARGET_EFAULT
;
8385 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8387 fd_trans_host_to_target_data(arg1
)) {
8388 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8390 unlock_user(p
, arg2
, ret
);
8393 case TARGET_NR_write
:
8394 if (arg2
== 0 && arg3
== 0) {
8395 return get_errno(safe_write(arg1
, 0, 0));
8397 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8398 return -TARGET_EFAULT
;
8399 if (fd_trans_target_to_host_data(arg1
)) {
8400 void *copy
= g_malloc(arg3
);
8401 memcpy(copy
, p
, arg3
);
8402 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8404 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8408 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8410 unlock_user(p
, arg2
, 0);
8413 #ifdef TARGET_NR_open
8414 case TARGET_NR_open
:
8415 if (!(p
= lock_user_string(arg1
)))
8416 return -TARGET_EFAULT
;
8417 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8418 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8420 fd_trans_unregister(ret
);
8421 unlock_user(p
, arg1
, 0);
8424 case TARGET_NR_openat
:
8425 if (!(p
= lock_user_string(arg2
)))
8426 return -TARGET_EFAULT
;
8427 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8428 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8430 fd_trans_unregister(ret
);
8431 unlock_user(p
, arg2
, 0);
8433 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8434 case TARGET_NR_name_to_handle_at
:
8435 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8438 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8439 case TARGET_NR_open_by_handle_at
:
8440 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8441 fd_trans_unregister(ret
);
8444 case TARGET_NR_close
:
8445 fd_trans_unregister(arg1
);
8446 return get_errno(close(arg1
));
8449 return do_brk(arg1
);
8450 #ifdef TARGET_NR_fork
8451 case TARGET_NR_fork
:
8452 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8454 #ifdef TARGET_NR_waitpid
8455 case TARGET_NR_waitpid
:
8458 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8459 if (!is_error(ret
) && arg2
&& ret
8460 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8461 return -TARGET_EFAULT
;
8465 #ifdef TARGET_NR_waitid
8466 case TARGET_NR_waitid
:
8470 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8471 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8472 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8473 return -TARGET_EFAULT
;
8474 host_to_target_siginfo(p
, &info
);
8475 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8480 #ifdef TARGET_NR_creat /* not on alpha */
8481 case TARGET_NR_creat
:
8482 if (!(p
= lock_user_string(arg1
)))
8483 return -TARGET_EFAULT
;
8484 ret
= get_errno(creat(p
, arg2
));
8485 fd_trans_unregister(ret
);
8486 unlock_user(p
, arg1
, 0);
8489 #ifdef TARGET_NR_link
8490 case TARGET_NR_link
:
8493 p
= lock_user_string(arg1
);
8494 p2
= lock_user_string(arg2
);
8496 ret
= -TARGET_EFAULT
;
8498 ret
= get_errno(link(p
, p2
));
8499 unlock_user(p2
, arg2
, 0);
8500 unlock_user(p
, arg1
, 0);
8504 #if defined(TARGET_NR_linkat)
8505 case TARGET_NR_linkat
:
8509 return -TARGET_EFAULT
;
8510 p
= lock_user_string(arg2
);
8511 p2
= lock_user_string(arg4
);
8513 ret
= -TARGET_EFAULT
;
8515 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8516 unlock_user(p
, arg2
, 0);
8517 unlock_user(p2
, arg4
, 0);
8521 #ifdef TARGET_NR_unlink
8522 case TARGET_NR_unlink
:
8523 if (!(p
= lock_user_string(arg1
)))
8524 return -TARGET_EFAULT
;
8525 ret
= get_errno(unlink(p
));
8526 unlock_user(p
, arg1
, 0);
8529 #if defined(TARGET_NR_unlinkat)
8530 case TARGET_NR_unlinkat
:
8531 if (!(p
= lock_user_string(arg2
)))
8532 return -TARGET_EFAULT
;
8533 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8534 unlock_user(p
, arg2
, 0);
8537 case TARGET_NR_execve
:
8539 char **argp
, **envp
;
8542 abi_ulong guest_argp
;
8543 abi_ulong guest_envp
;
8549 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8550 if (get_user_ual(addr
, gp
))
8551 return -TARGET_EFAULT
;
8558 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8559 if (get_user_ual(addr
, gp
))
8560 return -TARGET_EFAULT
;
8566 argp
= g_new0(char *, argc
+ 1);
8567 envp
= g_new0(char *, envc
+ 1);
8569 for (gp
= guest_argp
, q
= argp
; gp
;
8570 gp
+= sizeof(abi_ulong
), q
++) {
8571 if (get_user_ual(addr
, gp
))
8575 if (!(*q
= lock_user_string(addr
)))
8580 for (gp
= guest_envp
, q
= envp
; gp
;
8581 gp
+= sizeof(abi_ulong
), q
++) {
8582 if (get_user_ual(addr
, gp
))
8586 if (!(*q
= lock_user_string(addr
)))
8591 if (!(p
= lock_user_string(arg1
)))
8593 /* Although execve() is not an interruptible syscall it is
8594 * a special case where we must use the safe_syscall wrapper:
8595 * if we allow a signal to happen before we make the host
8596 * syscall then we will 'lose' it, because at the point of
8597 * execve the process leaves QEMU's control. So we use the
8598 * safe syscall wrapper to ensure that we either take the
8599 * signal as a guest signal, or else it does not happen
8600 * before the execve completes and makes it the other
8601 * program's problem.
8603 ret
= get_errno(safe_execve(p
, argp
, envp
));
8604 unlock_user(p
, arg1
, 0);
8609 ret
= -TARGET_EFAULT
;
8612 for (gp
= guest_argp
, q
= argp
; *q
;
8613 gp
+= sizeof(abi_ulong
), q
++) {
8614 if (get_user_ual(addr
, gp
)
8617 unlock_user(*q
, addr
, 0);
8619 for (gp
= guest_envp
, q
= envp
; *q
;
8620 gp
+= sizeof(abi_ulong
), q
++) {
8621 if (get_user_ual(addr
, gp
)
8624 unlock_user(*q
, addr
, 0);
8631 case TARGET_NR_chdir
:
8632 if (!(p
= lock_user_string(arg1
)))
8633 return -TARGET_EFAULT
;
8634 ret
= get_errno(chdir(p
));
8635 unlock_user(p
, arg1
, 0);
8637 #ifdef TARGET_NR_time
8638 case TARGET_NR_time
:
8641 ret
= get_errno(time(&host_time
));
8644 && put_user_sal(host_time
, arg1
))
8645 return -TARGET_EFAULT
;
8649 #ifdef TARGET_NR_mknod
8650 case TARGET_NR_mknod
:
8651 if (!(p
= lock_user_string(arg1
)))
8652 return -TARGET_EFAULT
;
8653 ret
= get_errno(mknod(p
, arg2
, arg3
));
8654 unlock_user(p
, arg1
, 0);
8657 #if defined(TARGET_NR_mknodat)
8658 case TARGET_NR_mknodat
:
8659 if (!(p
= lock_user_string(arg2
)))
8660 return -TARGET_EFAULT
;
8661 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8662 unlock_user(p
, arg2
, 0);
8665 #ifdef TARGET_NR_chmod
8666 case TARGET_NR_chmod
:
8667 if (!(p
= lock_user_string(arg1
)))
8668 return -TARGET_EFAULT
;
8669 ret
= get_errno(chmod(p
, arg2
));
8670 unlock_user(p
, arg1
, 0);
8673 #ifdef TARGET_NR_lseek
8674 case TARGET_NR_lseek
:
8675 return get_errno(lseek(arg1
, arg2
, arg3
));
8677 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8678 /* Alpha specific */
8679 case TARGET_NR_getxpid
:
8680 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8681 return get_errno(getpid());
8683 #ifdef TARGET_NR_getpid
8684 case TARGET_NR_getpid
:
8685 return get_errno(getpid());
8687 case TARGET_NR_mount
:
8689 /* need to look at the data field */
8693 p
= lock_user_string(arg1
);
8695 return -TARGET_EFAULT
;
8701 p2
= lock_user_string(arg2
);
8704 unlock_user(p
, arg1
, 0);
8706 return -TARGET_EFAULT
;
8710 p3
= lock_user_string(arg3
);
8713 unlock_user(p
, arg1
, 0);
8715 unlock_user(p2
, arg2
, 0);
8716 return -TARGET_EFAULT
;
8722 /* FIXME - arg5 should be locked, but it isn't clear how to
8723 * do that since it's not guaranteed to be a NULL-terminated
8727 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8729 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8731 ret
= get_errno(ret
);
8734 unlock_user(p
, arg1
, 0);
8736 unlock_user(p2
, arg2
, 0);
8738 unlock_user(p3
, arg3
, 0);
8742 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8743 #if defined(TARGET_NR_umount)
8744 case TARGET_NR_umount
:
8746 #if defined(TARGET_NR_oldumount)
8747 case TARGET_NR_oldumount
:
8749 if (!(p
= lock_user_string(arg1
)))
8750 return -TARGET_EFAULT
;
8751 ret
= get_errno(umount(p
));
8752 unlock_user(p
, arg1
, 0);
8755 #ifdef TARGET_NR_stime /* not on alpha */
8756 case TARGET_NR_stime
:
8760 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8761 return -TARGET_EFAULT
;
8763 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8766 #ifdef TARGET_NR_alarm /* not on alpha */
8767 case TARGET_NR_alarm
:
8770 #ifdef TARGET_NR_pause /* not on alpha */
8771 case TARGET_NR_pause
:
8772 if (!block_signals()) {
8773 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8775 return -TARGET_EINTR
;
8777 #ifdef TARGET_NR_utime
8778 case TARGET_NR_utime
:
8780 struct utimbuf tbuf
, *host_tbuf
;
8781 struct target_utimbuf
*target_tbuf
;
8783 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8784 return -TARGET_EFAULT
;
8785 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8786 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8787 unlock_user_struct(target_tbuf
, arg2
, 0);
8792 if (!(p
= lock_user_string(arg1
)))
8793 return -TARGET_EFAULT
;
8794 ret
= get_errno(utime(p
, host_tbuf
));
8795 unlock_user(p
, arg1
, 0);
8799 #ifdef TARGET_NR_utimes
8800 case TARGET_NR_utimes
:
8802 struct timeval
*tvp
, tv
[2];
8804 if (copy_from_user_timeval(&tv
[0], arg2
)
8805 || copy_from_user_timeval(&tv
[1],
8806 arg2
+ sizeof(struct target_timeval
)))
8807 return -TARGET_EFAULT
;
8812 if (!(p
= lock_user_string(arg1
)))
8813 return -TARGET_EFAULT
;
8814 ret
= get_errno(utimes(p
, tvp
));
8815 unlock_user(p
, arg1
, 0);
8819 #if defined(TARGET_NR_futimesat)
8820 case TARGET_NR_futimesat
:
8822 struct timeval
*tvp
, tv
[2];
8824 if (copy_from_user_timeval(&tv
[0], arg3
)
8825 || copy_from_user_timeval(&tv
[1],
8826 arg3
+ sizeof(struct target_timeval
)))
8827 return -TARGET_EFAULT
;
8832 if (!(p
= lock_user_string(arg2
))) {
8833 return -TARGET_EFAULT
;
8835 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8836 unlock_user(p
, arg2
, 0);
8840 #ifdef TARGET_NR_access
8841 case TARGET_NR_access
:
8842 if (!(p
= lock_user_string(arg1
))) {
8843 return -TARGET_EFAULT
;
8845 ret
= get_errno(access(path(p
), arg2
));
8846 unlock_user(p
, arg1
, 0);
8849 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8850 case TARGET_NR_faccessat
:
8851 if (!(p
= lock_user_string(arg2
))) {
8852 return -TARGET_EFAULT
;
8854 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8855 unlock_user(p
, arg2
, 0);
8858 #ifdef TARGET_NR_nice /* not on alpha */
8859 case TARGET_NR_nice
:
8860 return get_errno(nice(arg1
));
8862 case TARGET_NR_sync
:
8865 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8866 case TARGET_NR_syncfs
:
8867 return get_errno(syncfs(arg1
));
8869 case TARGET_NR_kill
:
8870 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8871 #ifdef TARGET_NR_rename
8872 case TARGET_NR_rename
:
8875 p
= lock_user_string(arg1
);
8876 p2
= lock_user_string(arg2
);
8878 ret
= -TARGET_EFAULT
;
8880 ret
= get_errno(rename(p
, p2
));
8881 unlock_user(p2
, arg2
, 0);
8882 unlock_user(p
, arg1
, 0);
8886 #if defined(TARGET_NR_renameat)
8887 case TARGET_NR_renameat
:
8890 p
= lock_user_string(arg2
);
8891 p2
= lock_user_string(arg4
);
8893 ret
= -TARGET_EFAULT
;
8895 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8896 unlock_user(p2
, arg4
, 0);
8897 unlock_user(p
, arg2
, 0);
8901 #if defined(TARGET_NR_renameat2)
8902 case TARGET_NR_renameat2
:
8905 p
= lock_user_string(arg2
);
8906 p2
= lock_user_string(arg4
);
8908 ret
= -TARGET_EFAULT
;
8910 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8912 unlock_user(p2
, arg4
, 0);
8913 unlock_user(p
, arg2
, 0);
8917 #ifdef TARGET_NR_mkdir
8918 case TARGET_NR_mkdir
:
8919 if (!(p
= lock_user_string(arg1
)))
8920 return -TARGET_EFAULT
;
8921 ret
= get_errno(mkdir(p
, arg2
));
8922 unlock_user(p
, arg1
, 0);
8925 #if defined(TARGET_NR_mkdirat)
8926 case TARGET_NR_mkdirat
:
8927 if (!(p
= lock_user_string(arg2
)))
8928 return -TARGET_EFAULT
;
8929 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8930 unlock_user(p
, arg2
, 0);
8933 #ifdef TARGET_NR_rmdir
8934 case TARGET_NR_rmdir
:
8935 if (!(p
= lock_user_string(arg1
)))
8936 return -TARGET_EFAULT
;
8937 ret
= get_errno(rmdir(p
));
8938 unlock_user(p
, arg1
, 0);
8942 ret
= get_errno(dup(arg1
));
8944 fd_trans_dup(arg1
, ret
);
8947 #ifdef TARGET_NR_pipe
8948 case TARGET_NR_pipe
:
8949 return do_pipe(cpu_env
, arg1
, 0, 0);
8951 #ifdef TARGET_NR_pipe2
8952 case TARGET_NR_pipe2
:
8953 return do_pipe(cpu_env
, arg1
,
8954 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8956 case TARGET_NR_times
:
8958 struct target_tms
*tmsp
;
8960 ret
= get_errno(times(&tms
));
8962 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8964 return -TARGET_EFAULT
;
8965 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8966 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8967 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8968 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8971 ret
= host_to_target_clock_t(ret
);
8974 case TARGET_NR_acct
:
8976 ret
= get_errno(acct(NULL
));
8978 if (!(p
= lock_user_string(arg1
))) {
8979 return -TARGET_EFAULT
;
8981 ret
= get_errno(acct(path(p
)));
8982 unlock_user(p
, arg1
, 0);
8985 #ifdef TARGET_NR_umount2
8986 case TARGET_NR_umount2
:
8987 if (!(p
= lock_user_string(arg1
)))
8988 return -TARGET_EFAULT
;
8989 ret
= get_errno(umount2(p
, arg2
));
8990 unlock_user(p
, arg1
, 0);
8993 case TARGET_NR_ioctl
:
8994 return do_ioctl(arg1
, arg2
, arg3
);
8995 #ifdef TARGET_NR_fcntl
8996 case TARGET_NR_fcntl
:
8997 return do_fcntl(arg1
, arg2
, arg3
);
8999 case TARGET_NR_setpgid
:
9000 return get_errno(setpgid(arg1
, arg2
));
9001 case TARGET_NR_umask
:
9002 return get_errno(umask(arg1
));
9003 case TARGET_NR_chroot
:
9004 if (!(p
= lock_user_string(arg1
)))
9005 return -TARGET_EFAULT
;
9006 ret
= get_errno(chroot(p
));
9007 unlock_user(p
, arg1
, 0);
9009 #ifdef TARGET_NR_dup2
9010 case TARGET_NR_dup2
:
9011 ret
= get_errno(dup2(arg1
, arg2
));
9013 fd_trans_dup(arg1
, arg2
);
9017 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9018 case TARGET_NR_dup3
:
9022 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9025 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9026 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9028 fd_trans_dup(arg1
, arg2
);
9033 #ifdef TARGET_NR_getppid /* not on alpha */
9034 case TARGET_NR_getppid
:
9035 return get_errno(getppid());
9037 #ifdef TARGET_NR_getpgrp
9038 case TARGET_NR_getpgrp
:
9039 return get_errno(getpgrp());
9041 case TARGET_NR_setsid
:
9042 return get_errno(setsid());
9043 #ifdef TARGET_NR_sigaction
9044 case TARGET_NR_sigaction
:
9046 #if defined(TARGET_MIPS)
9047 struct target_sigaction act
, oact
, *pact
, *old_act
;
9050 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9051 return -TARGET_EFAULT
;
9052 act
._sa_handler
= old_act
->_sa_handler
;
9053 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9054 act
.sa_flags
= old_act
->sa_flags
;
9055 unlock_user_struct(old_act
, arg2
, 0);
9061 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9063 if (!is_error(ret
) && arg3
) {
9064 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9065 return -TARGET_EFAULT
;
9066 old_act
->_sa_handler
= oact
._sa_handler
;
9067 old_act
->sa_flags
= oact
.sa_flags
;
9068 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9069 old_act
->sa_mask
.sig
[1] = 0;
9070 old_act
->sa_mask
.sig
[2] = 0;
9071 old_act
->sa_mask
.sig
[3] = 0;
9072 unlock_user_struct(old_act
, arg3
, 1);
9075 struct target_old_sigaction
*old_act
;
9076 struct target_sigaction act
, oact
, *pact
;
9078 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9079 return -TARGET_EFAULT
;
9080 act
._sa_handler
= old_act
->_sa_handler
;
9081 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9082 act
.sa_flags
= old_act
->sa_flags
;
9083 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9084 act
.sa_restorer
= old_act
->sa_restorer
;
9086 unlock_user_struct(old_act
, arg2
, 0);
9091 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9092 if (!is_error(ret
) && arg3
) {
9093 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9094 return -TARGET_EFAULT
;
9095 old_act
->_sa_handler
= oact
._sa_handler
;
9096 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9097 old_act
->sa_flags
= oact
.sa_flags
;
9098 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9099 old_act
->sa_restorer
= oact
.sa_restorer
;
9101 unlock_user_struct(old_act
, arg3
, 1);
9107 case TARGET_NR_rt_sigaction
:
9110 * For Alpha and SPARC this is a 5 argument syscall, with
9111 * a 'restorer' parameter which must be copied into the
9112 * sa_restorer field of the sigaction struct.
9113 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9114 * and arg5 is the sigsetsize.
9116 #if defined(TARGET_ALPHA)
9117 target_ulong sigsetsize
= arg4
;
9118 target_ulong restorer
= arg5
;
9119 #elif defined(TARGET_SPARC)
9120 target_ulong restorer
= arg4
;
9121 target_ulong sigsetsize
= arg5
;
9123 target_ulong sigsetsize
= arg4
;
9124 target_ulong restorer
= 0;
9126 struct target_sigaction
*act
= NULL
;
9127 struct target_sigaction
*oact
= NULL
;
9129 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9130 return -TARGET_EINVAL
;
9132 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9133 return -TARGET_EFAULT
;
9135 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9136 ret
= -TARGET_EFAULT
;
9138 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9140 unlock_user_struct(oact
, arg3
, 1);
9144 unlock_user_struct(act
, arg2
, 0);
9148 #ifdef TARGET_NR_sgetmask /* not on alpha */
9149 case TARGET_NR_sgetmask
:
9152 abi_ulong target_set
;
9153 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9155 host_to_target_old_sigset(&target_set
, &cur_set
);
9161 #ifdef TARGET_NR_ssetmask /* not on alpha */
9162 case TARGET_NR_ssetmask
:
9165 abi_ulong target_set
= arg1
;
9166 target_to_host_old_sigset(&set
, &target_set
);
9167 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9169 host_to_target_old_sigset(&target_set
, &oset
);
9175 #ifdef TARGET_NR_sigprocmask
9176 case TARGET_NR_sigprocmask
:
9178 #if defined(TARGET_ALPHA)
9179 sigset_t set
, oldset
;
9184 case TARGET_SIG_BLOCK
:
9187 case TARGET_SIG_UNBLOCK
:
9190 case TARGET_SIG_SETMASK
:
9194 return -TARGET_EINVAL
;
9197 target_to_host_old_sigset(&set
, &mask
);
9199 ret
= do_sigprocmask(how
, &set
, &oldset
);
9200 if (!is_error(ret
)) {
9201 host_to_target_old_sigset(&mask
, &oldset
);
9203 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9206 sigset_t set
, oldset
, *set_ptr
;
9211 case TARGET_SIG_BLOCK
:
9214 case TARGET_SIG_UNBLOCK
:
9217 case TARGET_SIG_SETMASK
:
9221 return -TARGET_EINVAL
;
9223 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9224 return -TARGET_EFAULT
;
9225 target_to_host_old_sigset(&set
, p
);
9226 unlock_user(p
, arg2
, 0);
9232 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9233 if (!is_error(ret
) && arg3
) {
9234 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9235 return -TARGET_EFAULT
;
9236 host_to_target_old_sigset(p
, &oldset
);
9237 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9243 case TARGET_NR_rt_sigprocmask
:
9246 sigset_t set
, oldset
, *set_ptr
;
9248 if (arg4
!= sizeof(target_sigset_t
)) {
9249 return -TARGET_EINVAL
;
9254 case TARGET_SIG_BLOCK
:
9257 case TARGET_SIG_UNBLOCK
:
9260 case TARGET_SIG_SETMASK
:
9264 return -TARGET_EINVAL
;
9266 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9267 return -TARGET_EFAULT
;
9268 target_to_host_sigset(&set
, p
);
9269 unlock_user(p
, arg2
, 0);
9275 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9276 if (!is_error(ret
) && arg3
) {
9277 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9278 return -TARGET_EFAULT
;
9279 host_to_target_sigset(p
, &oldset
);
9280 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9284 #ifdef TARGET_NR_sigpending
9285 case TARGET_NR_sigpending
:
9288 ret
= get_errno(sigpending(&set
));
9289 if (!is_error(ret
)) {
9290 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9291 return -TARGET_EFAULT
;
9292 host_to_target_old_sigset(p
, &set
);
9293 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9298 case TARGET_NR_rt_sigpending
:
9302 /* Yes, this check is >, not != like most. We follow the kernel's
9303 * logic and it does it like this because it implements
9304 * NR_sigpending through the same code path, and in that case
9305 * the old_sigset_t is smaller in size.
9307 if (arg2
> sizeof(target_sigset_t
)) {
9308 return -TARGET_EINVAL
;
9311 ret
= get_errno(sigpending(&set
));
9312 if (!is_error(ret
)) {
9313 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9314 return -TARGET_EFAULT
;
9315 host_to_target_sigset(p
, &set
);
9316 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9320 #ifdef TARGET_NR_sigsuspend
9321 case TARGET_NR_sigsuspend
:
9323 TaskState
*ts
= cpu
->opaque
;
9324 #if defined(TARGET_ALPHA)
9325 abi_ulong mask
= arg1
;
9326 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9328 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9329 return -TARGET_EFAULT
;
9330 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9331 unlock_user(p
, arg1
, 0);
9333 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9335 if (ret
!= -TARGET_ERESTARTSYS
) {
9336 ts
->in_sigsuspend
= 1;
9341 case TARGET_NR_rt_sigsuspend
:
9343 TaskState
*ts
= cpu
->opaque
;
9345 if (arg2
!= sizeof(target_sigset_t
)) {
9346 return -TARGET_EINVAL
;
9348 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9349 return -TARGET_EFAULT
;
9350 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9351 unlock_user(p
, arg1
, 0);
9352 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9354 if (ret
!= -TARGET_ERESTARTSYS
) {
9355 ts
->in_sigsuspend
= 1;
9359 #ifdef TARGET_NR_rt_sigtimedwait
9360 case TARGET_NR_rt_sigtimedwait
:
9363 struct timespec uts
, *puts
;
9366 if (arg4
!= sizeof(target_sigset_t
)) {
9367 return -TARGET_EINVAL
;
9370 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9371 return -TARGET_EFAULT
;
9372 target_to_host_sigset(&set
, p
);
9373 unlock_user(p
, arg1
, 0);
9376 if (target_to_host_timespec(puts
, arg3
)) {
9377 return -TARGET_EFAULT
;
9382 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9384 if (!is_error(ret
)) {
9386 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9389 return -TARGET_EFAULT
;
9391 host_to_target_siginfo(p
, &uinfo
);
9392 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9394 ret
= host_to_target_signal(ret
);
9399 #ifdef TARGET_NR_rt_sigtimedwait_time64
9400 case TARGET_NR_rt_sigtimedwait_time64
:
9403 struct timespec uts
, *puts
;
9406 if (arg4
!= sizeof(target_sigset_t
)) {
9407 return -TARGET_EINVAL
;
9410 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9412 return -TARGET_EFAULT
;
9414 target_to_host_sigset(&set
, p
);
9415 unlock_user(p
, arg1
, 0);
9418 if (target_to_host_timespec64(puts
, arg3
)) {
9419 return -TARGET_EFAULT
;
9424 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9426 if (!is_error(ret
)) {
9428 p
= lock_user(VERIFY_WRITE
, arg2
,
9429 sizeof(target_siginfo_t
), 0);
9431 return -TARGET_EFAULT
;
9433 host_to_target_siginfo(p
, &uinfo
);
9434 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9436 ret
= host_to_target_signal(ret
);
9441 case TARGET_NR_rt_sigqueueinfo
:
9445 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9447 return -TARGET_EFAULT
;
9449 target_to_host_siginfo(&uinfo
, p
);
9450 unlock_user(p
, arg3
, 0);
9451 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9454 case TARGET_NR_rt_tgsigqueueinfo
:
9458 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9460 return -TARGET_EFAULT
;
9462 target_to_host_siginfo(&uinfo
, p
);
9463 unlock_user(p
, arg4
, 0);
9464 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9467 #ifdef TARGET_NR_sigreturn
9468 case TARGET_NR_sigreturn
:
9469 if (block_signals()) {
9470 return -TARGET_ERESTARTSYS
;
9472 return do_sigreturn(cpu_env
);
9474 case TARGET_NR_rt_sigreturn
:
9475 if (block_signals()) {
9476 return -TARGET_ERESTARTSYS
;
9478 return do_rt_sigreturn(cpu_env
);
9479 case TARGET_NR_sethostname
:
9480 if (!(p
= lock_user_string(arg1
)))
9481 return -TARGET_EFAULT
;
9482 ret
= get_errno(sethostname(p
, arg2
));
9483 unlock_user(p
, arg1
, 0);
9485 #ifdef TARGET_NR_setrlimit
9486 case TARGET_NR_setrlimit
:
9488 int resource
= target_to_host_resource(arg1
);
9489 struct target_rlimit
*target_rlim
;
9491 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9492 return -TARGET_EFAULT
;
9493 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9494 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9495 unlock_user_struct(target_rlim
, arg2
, 0);
9497 * If we just passed through resource limit settings for memory then
9498 * they would also apply to QEMU's own allocations, and QEMU will
9499 * crash or hang or die if its allocations fail. Ideally we would
9500 * track the guest allocations in QEMU and apply the limits ourselves.
9501 * For now, just tell the guest the call succeeded but don't actually
9504 if (resource
!= RLIMIT_AS
&&
9505 resource
!= RLIMIT_DATA
&&
9506 resource
!= RLIMIT_STACK
) {
9507 return get_errno(setrlimit(resource
, &rlim
));
9513 #ifdef TARGET_NR_getrlimit
9514 case TARGET_NR_getrlimit
:
9516 int resource
= target_to_host_resource(arg1
);
9517 struct target_rlimit
*target_rlim
;
9520 ret
= get_errno(getrlimit(resource
, &rlim
));
9521 if (!is_error(ret
)) {
9522 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9523 return -TARGET_EFAULT
;
9524 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9525 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9526 unlock_user_struct(target_rlim
, arg2
, 1);
9531 case TARGET_NR_getrusage
:
9533 struct rusage rusage
;
9534 ret
= get_errno(getrusage(arg1
, &rusage
));
9535 if (!is_error(ret
)) {
9536 ret
= host_to_target_rusage(arg2
, &rusage
);
9540 #if defined(TARGET_NR_gettimeofday)
9541 case TARGET_NR_gettimeofday
:
9546 ret
= get_errno(gettimeofday(&tv
, &tz
));
9547 if (!is_error(ret
)) {
9548 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9549 return -TARGET_EFAULT
;
9551 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9552 return -TARGET_EFAULT
;
9558 #if defined(TARGET_NR_settimeofday)
9559 case TARGET_NR_settimeofday
:
9561 struct timeval tv
, *ptv
= NULL
;
9562 struct timezone tz
, *ptz
= NULL
;
9565 if (copy_from_user_timeval(&tv
, arg1
)) {
9566 return -TARGET_EFAULT
;
9572 if (copy_from_user_timezone(&tz
, arg2
)) {
9573 return -TARGET_EFAULT
;
9578 return get_errno(settimeofday(ptv
, ptz
));
9581 #if defined(TARGET_NR_select)
9582 case TARGET_NR_select
:
9583 #if defined(TARGET_WANT_NI_OLD_SELECT)
9584 /* some architectures used to have old_select here
9585 * but now ENOSYS it.
9587 ret
= -TARGET_ENOSYS
;
9588 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9589 ret
= do_old_select(arg1
);
9591 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9595 #ifdef TARGET_NR_pselect6
9596 case TARGET_NR_pselect6
:
9597 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9599 #ifdef TARGET_NR_pselect6_time64
9600 case TARGET_NR_pselect6_time64
:
9601 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9603 #ifdef TARGET_NR_symlink
9604 case TARGET_NR_symlink
:
9607 p
= lock_user_string(arg1
);
9608 p2
= lock_user_string(arg2
);
9610 ret
= -TARGET_EFAULT
;
9612 ret
= get_errno(symlink(p
, p2
));
9613 unlock_user(p2
, arg2
, 0);
9614 unlock_user(p
, arg1
, 0);
9618 #if defined(TARGET_NR_symlinkat)
9619 case TARGET_NR_symlinkat
:
9622 p
= lock_user_string(arg1
);
9623 p2
= lock_user_string(arg3
);
9625 ret
= -TARGET_EFAULT
;
9627 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9628 unlock_user(p2
, arg3
, 0);
9629 unlock_user(p
, arg1
, 0);
9633 #ifdef TARGET_NR_readlink
9634 case TARGET_NR_readlink
:
9637 p
= lock_user_string(arg1
);
9638 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9640 ret
= -TARGET_EFAULT
;
9642 /* Short circuit this for the magic exe check. */
9643 ret
= -TARGET_EINVAL
;
9644 } else if (is_proc_myself((const char *)p
, "exe")) {
9645 char real
[PATH_MAX
], *temp
;
9646 temp
= realpath(exec_path
, real
);
9647 /* Return value is # of bytes that we wrote to the buffer. */
9649 ret
= get_errno(-1);
9651 /* Don't worry about sign mismatch as earlier mapping
9652 * logic would have thrown a bad address error. */
9653 ret
= MIN(strlen(real
), arg3
);
9654 /* We cannot NUL terminate the string. */
9655 memcpy(p2
, real
, ret
);
9658 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9660 unlock_user(p2
, arg2
, ret
);
9661 unlock_user(p
, arg1
, 0);
9665 #if defined(TARGET_NR_readlinkat)
9666 case TARGET_NR_readlinkat
:
9669 p
= lock_user_string(arg2
);
9670 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9672 ret
= -TARGET_EFAULT
;
9673 } else if (is_proc_myself((const char *)p
, "exe")) {
9674 char real
[PATH_MAX
], *temp
;
9675 temp
= realpath(exec_path
, real
);
9676 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9677 snprintf((char *)p2
, arg4
, "%s", real
);
9679 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9681 unlock_user(p2
, arg3
, ret
);
9682 unlock_user(p
, arg2
, 0);
9686 #ifdef TARGET_NR_swapon
9687 case TARGET_NR_swapon
:
9688 if (!(p
= lock_user_string(arg1
)))
9689 return -TARGET_EFAULT
;
9690 ret
= get_errno(swapon(p
, arg2
));
9691 unlock_user(p
, arg1
, 0);
9694 case TARGET_NR_reboot
:
9695 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9696 /* arg4 must be ignored in all other cases */
9697 p
= lock_user_string(arg4
);
9699 return -TARGET_EFAULT
;
9701 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9702 unlock_user(p
, arg4
, 0);
9704 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9707 #ifdef TARGET_NR_mmap
9708 case TARGET_NR_mmap
:
9709 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9710 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9711 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9712 || defined(TARGET_S390X)
9715 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9716 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9717 return -TARGET_EFAULT
;
9724 unlock_user(v
, arg1
, 0);
9725 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9726 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9730 /* mmap pointers are always untagged */
9731 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9732 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9738 #ifdef TARGET_NR_mmap2
9739 case TARGET_NR_mmap2
:
9741 #define MMAP_SHIFT 12
9743 ret
= target_mmap(arg1
, arg2
, arg3
,
9744 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9745 arg5
, arg6
<< MMAP_SHIFT
);
9746 return get_errno(ret
);
9748 case TARGET_NR_munmap
:
9749 arg1
= cpu_untagged_addr(cpu
, arg1
);
9750 return get_errno(target_munmap(arg1
, arg2
));
9751 case TARGET_NR_mprotect
:
9752 arg1
= cpu_untagged_addr(cpu
, arg1
);
9754 TaskState
*ts
= cpu
->opaque
;
9755 /* Special hack to detect libc making the stack executable. */
9756 if ((arg3
& PROT_GROWSDOWN
)
9757 && arg1
>= ts
->info
->stack_limit
9758 && arg1
<= ts
->info
->start_stack
) {
9759 arg3
&= ~PROT_GROWSDOWN
;
9760 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9761 arg1
= ts
->info
->stack_limit
;
9764 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9765 #ifdef TARGET_NR_mremap
9766 case TARGET_NR_mremap
:
9767 arg1
= cpu_untagged_addr(cpu
, arg1
);
9768 /* mremap new_addr (arg5) is always untagged */
9769 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9771 /* ??? msync/mlock/munlock are broken for softmmu. */
9772 #ifdef TARGET_NR_msync
9773 case TARGET_NR_msync
:
9774 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9776 #ifdef TARGET_NR_mlock
9777 case TARGET_NR_mlock
:
9778 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9780 #ifdef TARGET_NR_munlock
9781 case TARGET_NR_munlock
:
9782 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
9784 #ifdef TARGET_NR_mlockall
9785 case TARGET_NR_mlockall
:
9786 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9788 #ifdef TARGET_NR_munlockall
9789 case TARGET_NR_munlockall
:
9790 return get_errno(munlockall());
9792 #ifdef TARGET_NR_truncate
9793 case TARGET_NR_truncate
:
9794 if (!(p
= lock_user_string(arg1
)))
9795 return -TARGET_EFAULT
;
9796 ret
= get_errno(truncate(p
, arg2
));
9797 unlock_user(p
, arg1
, 0);
9800 #ifdef TARGET_NR_ftruncate
9801 case TARGET_NR_ftruncate
:
9802 return get_errno(ftruncate(arg1
, arg2
));
9804 case TARGET_NR_fchmod
:
9805 return get_errno(fchmod(arg1
, arg2
));
9806 #if defined(TARGET_NR_fchmodat)
9807 case TARGET_NR_fchmodat
:
9808 if (!(p
= lock_user_string(arg2
)))
9809 return -TARGET_EFAULT
;
9810 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9811 unlock_user(p
, arg2
, 0);
9814 case TARGET_NR_getpriority
:
9815 /* Note that negative values are valid for getpriority, so we must
9816 differentiate based on errno settings. */
9818 ret
= getpriority(arg1
, arg2
);
9819 if (ret
== -1 && errno
!= 0) {
9820 return -host_to_target_errno(errno
);
9823 /* Return value is the unbiased priority. Signal no error. */
9824 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9826 /* Return value is a biased priority to avoid negative numbers. */
9830 case TARGET_NR_setpriority
:
9831 return get_errno(setpriority(arg1
, arg2
, arg3
));
9832 #ifdef TARGET_NR_statfs
9833 case TARGET_NR_statfs
:
9834 if (!(p
= lock_user_string(arg1
))) {
9835 return -TARGET_EFAULT
;
9837 ret
= get_errno(statfs(path(p
), &stfs
));
9838 unlock_user(p
, arg1
, 0);
9840 if (!is_error(ret
)) {
9841 struct target_statfs
*target_stfs
;
9843 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9844 return -TARGET_EFAULT
;
9845 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9846 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9847 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9848 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9849 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9850 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9851 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9852 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9853 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9854 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9855 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9856 #ifdef _STATFS_F_FLAGS
9857 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9859 __put_user(0, &target_stfs
->f_flags
);
9861 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9862 unlock_user_struct(target_stfs
, arg2
, 1);
9866 #ifdef TARGET_NR_fstatfs
9867 case TARGET_NR_fstatfs
:
9868 ret
= get_errno(fstatfs(arg1
, &stfs
));
9869 goto convert_statfs
;
9871 #ifdef TARGET_NR_statfs64
9872 case TARGET_NR_statfs64
:
9873 if (!(p
= lock_user_string(arg1
))) {
9874 return -TARGET_EFAULT
;
9876 ret
= get_errno(statfs(path(p
), &stfs
));
9877 unlock_user(p
, arg1
, 0);
9879 if (!is_error(ret
)) {
9880 struct target_statfs64
*target_stfs
;
9882 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9883 return -TARGET_EFAULT
;
9884 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9885 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9886 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9887 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9888 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9889 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9890 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9891 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9892 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9893 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9894 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9895 #ifdef _STATFS_F_FLAGS
9896 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9898 __put_user(0, &target_stfs
->f_flags
);
9900 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9901 unlock_user_struct(target_stfs
, arg3
, 1);
9904 case TARGET_NR_fstatfs64
:
9905 ret
= get_errno(fstatfs(arg1
, &stfs
));
9906 goto convert_statfs64
;
9908 #ifdef TARGET_NR_socketcall
9909 case TARGET_NR_socketcall
:
9910 return do_socketcall(arg1
, arg2
);
9912 #ifdef TARGET_NR_accept
9913 case TARGET_NR_accept
:
9914 return do_accept4(arg1
, arg2
, arg3
, 0);
9916 #ifdef TARGET_NR_accept4
9917 case TARGET_NR_accept4
:
9918 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9920 #ifdef TARGET_NR_bind
9921 case TARGET_NR_bind
:
9922 return do_bind(arg1
, arg2
, arg3
);
9924 #ifdef TARGET_NR_connect
9925 case TARGET_NR_connect
:
9926 return do_connect(arg1
, arg2
, arg3
);
9928 #ifdef TARGET_NR_getpeername
9929 case TARGET_NR_getpeername
:
9930 return do_getpeername(arg1
, arg2
, arg3
);
9932 #ifdef TARGET_NR_getsockname
9933 case TARGET_NR_getsockname
:
9934 return do_getsockname(arg1
, arg2
, arg3
);
9936 #ifdef TARGET_NR_getsockopt
9937 case TARGET_NR_getsockopt
:
9938 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9940 #ifdef TARGET_NR_listen
9941 case TARGET_NR_listen
:
9942 return get_errno(listen(arg1
, arg2
));
9944 #ifdef TARGET_NR_recv
9945 case TARGET_NR_recv
:
9946 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9948 #ifdef TARGET_NR_recvfrom
9949 case TARGET_NR_recvfrom
:
9950 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9952 #ifdef TARGET_NR_recvmsg
9953 case TARGET_NR_recvmsg
:
9954 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9956 #ifdef TARGET_NR_send
9957 case TARGET_NR_send
:
9958 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9960 #ifdef TARGET_NR_sendmsg
9961 case TARGET_NR_sendmsg
:
9962 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9964 #ifdef TARGET_NR_sendmmsg
9965 case TARGET_NR_sendmmsg
:
9966 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9968 #ifdef TARGET_NR_recvmmsg
9969 case TARGET_NR_recvmmsg
:
9970 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9972 #ifdef TARGET_NR_sendto
9973 case TARGET_NR_sendto
:
9974 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9976 #ifdef TARGET_NR_shutdown
9977 case TARGET_NR_shutdown
:
9978 return get_errno(shutdown(arg1
, arg2
));
9980 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9981 case TARGET_NR_getrandom
:
9982 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9984 return -TARGET_EFAULT
;
9986 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9987 unlock_user(p
, arg1
, ret
);
9990 #ifdef TARGET_NR_socket
9991 case TARGET_NR_socket
:
9992 return do_socket(arg1
, arg2
, arg3
);
9994 #ifdef TARGET_NR_socketpair
9995 case TARGET_NR_socketpair
:
9996 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9998 #ifdef TARGET_NR_setsockopt
9999 case TARGET_NR_setsockopt
:
10000 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10002 #if defined(TARGET_NR_syslog)
10003 case TARGET_NR_syslog
:
10008 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10009 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10010 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10011 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10012 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10013 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10014 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10015 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10016 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10017 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10018 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10019 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10022 return -TARGET_EINVAL
;
10027 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10029 return -TARGET_EFAULT
;
10031 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10032 unlock_user(p
, arg2
, arg3
);
10036 return -TARGET_EINVAL
;
10041 case TARGET_NR_setitimer
:
10043 struct itimerval value
, ovalue
, *pvalue
;
10047 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10048 || copy_from_user_timeval(&pvalue
->it_value
,
10049 arg2
+ sizeof(struct target_timeval
)))
10050 return -TARGET_EFAULT
;
10054 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10055 if (!is_error(ret
) && arg3
) {
10056 if (copy_to_user_timeval(arg3
,
10057 &ovalue
.it_interval
)
10058 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10060 return -TARGET_EFAULT
;
10064 case TARGET_NR_getitimer
:
10066 struct itimerval value
;
10068 ret
= get_errno(getitimer(arg1
, &value
));
10069 if (!is_error(ret
) && arg2
) {
10070 if (copy_to_user_timeval(arg2
,
10071 &value
.it_interval
)
10072 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10074 return -TARGET_EFAULT
;
10078 #ifdef TARGET_NR_stat
10079 case TARGET_NR_stat
:
10080 if (!(p
= lock_user_string(arg1
))) {
10081 return -TARGET_EFAULT
;
10083 ret
= get_errno(stat(path(p
), &st
));
10084 unlock_user(p
, arg1
, 0);
10087 #ifdef TARGET_NR_lstat
10088 case TARGET_NR_lstat
:
10089 if (!(p
= lock_user_string(arg1
))) {
10090 return -TARGET_EFAULT
;
10092 ret
= get_errno(lstat(path(p
), &st
));
10093 unlock_user(p
, arg1
, 0);
10096 #ifdef TARGET_NR_fstat
10097 case TARGET_NR_fstat
:
10099 ret
= get_errno(fstat(arg1
, &st
));
10100 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10103 if (!is_error(ret
)) {
10104 struct target_stat
*target_st
;
10106 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10107 return -TARGET_EFAULT
;
10108 memset(target_st
, 0, sizeof(*target_st
));
10109 __put_user(st
.st_dev
, &target_st
->st_dev
);
10110 __put_user(st
.st_ino
, &target_st
->st_ino
);
10111 __put_user(st
.st_mode
, &target_st
->st_mode
);
10112 __put_user(st
.st_uid
, &target_st
->st_uid
);
10113 __put_user(st
.st_gid
, &target_st
->st_gid
);
10114 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10115 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10116 __put_user(st
.st_size
, &target_st
->st_size
);
10117 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10118 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10119 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10120 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10121 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10122 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10123 __put_user(st
.st_atim
.tv_nsec
,
10124 &target_st
->target_st_atime_nsec
);
10125 __put_user(st
.st_mtim
.tv_nsec
,
10126 &target_st
->target_st_mtime_nsec
);
10127 __put_user(st
.st_ctim
.tv_nsec
,
10128 &target_st
->target_st_ctime_nsec
);
10130 unlock_user_struct(target_st
, arg2
, 1);
10135 case TARGET_NR_vhangup
:
10136 return get_errno(vhangup());
10137 #ifdef TARGET_NR_syscall
10138 case TARGET_NR_syscall
:
10139 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10140 arg6
, arg7
, arg8
, 0);
10142 #if defined(TARGET_NR_wait4)
10143 case TARGET_NR_wait4
:
10146 abi_long status_ptr
= arg2
;
10147 struct rusage rusage
, *rusage_ptr
;
10148 abi_ulong target_rusage
= arg4
;
10149 abi_long rusage_err
;
10151 rusage_ptr
= &rusage
;
10154 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10155 if (!is_error(ret
)) {
10156 if (status_ptr
&& ret
) {
10157 status
= host_to_target_waitstatus(status
);
10158 if (put_user_s32(status
, status_ptr
))
10159 return -TARGET_EFAULT
;
10161 if (target_rusage
) {
10162 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10171 #ifdef TARGET_NR_swapoff
10172 case TARGET_NR_swapoff
:
10173 if (!(p
= lock_user_string(arg1
)))
10174 return -TARGET_EFAULT
;
10175 ret
= get_errno(swapoff(p
));
10176 unlock_user(p
, arg1
, 0);
10179 case TARGET_NR_sysinfo
:
10181 struct target_sysinfo
*target_value
;
10182 struct sysinfo value
;
10183 ret
= get_errno(sysinfo(&value
));
10184 if (!is_error(ret
) && arg1
)
10186 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10187 return -TARGET_EFAULT
;
10188 __put_user(value
.uptime
, &target_value
->uptime
);
10189 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10190 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10191 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10192 __put_user(value
.totalram
, &target_value
->totalram
);
10193 __put_user(value
.freeram
, &target_value
->freeram
);
10194 __put_user(value
.sharedram
, &target_value
->sharedram
);
10195 __put_user(value
.bufferram
, &target_value
->bufferram
);
10196 __put_user(value
.totalswap
, &target_value
->totalswap
);
10197 __put_user(value
.freeswap
, &target_value
->freeswap
);
10198 __put_user(value
.procs
, &target_value
->procs
);
10199 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10200 __put_user(value
.freehigh
, &target_value
->freehigh
);
10201 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10202 unlock_user_struct(target_value
, arg1
, 1);
10206 #ifdef TARGET_NR_ipc
10207 case TARGET_NR_ipc
:
10208 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10210 #ifdef TARGET_NR_semget
10211 case TARGET_NR_semget
:
10212 return get_errno(semget(arg1
, arg2
, arg3
));
10214 #ifdef TARGET_NR_semop
10215 case TARGET_NR_semop
:
10216 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10218 #ifdef TARGET_NR_semtimedop
10219 case TARGET_NR_semtimedop
:
10220 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10222 #ifdef TARGET_NR_semtimedop_time64
10223 case TARGET_NR_semtimedop_time64
:
10224 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10226 #ifdef TARGET_NR_semctl
10227 case TARGET_NR_semctl
:
10228 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10230 #ifdef TARGET_NR_msgctl
10231 case TARGET_NR_msgctl
:
10232 return do_msgctl(arg1
, arg2
, arg3
);
10234 #ifdef TARGET_NR_msgget
10235 case TARGET_NR_msgget
:
10236 return get_errno(msgget(arg1
, arg2
));
10238 #ifdef TARGET_NR_msgrcv
10239 case TARGET_NR_msgrcv
:
10240 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10242 #ifdef TARGET_NR_msgsnd
10243 case TARGET_NR_msgsnd
:
10244 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10246 #ifdef TARGET_NR_shmget
10247 case TARGET_NR_shmget
:
10248 return get_errno(shmget(arg1
, arg2
, arg3
));
10250 #ifdef TARGET_NR_shmctl
10251 case TARGET_NR_shmctl
:
10252 return do_shmctl(arg1
, arg2
, arg3
);
10254 #ifdef TARGET_NR_shmat
10255 case TARGET_NR_shmat
:
10256 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10258 #ifdef TARGET_NR_shmdt
10259 case TARGET_NR_shmdt
:
10260 return do_shmdt(arg1
);
10262 case TARGET_NR_fsync
:
10263 return get_errno(fsync(arg1
));
10264 case TARGET_NR_clone
:
10265 /* Linux manages to have three different orderings for its
10266 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10267 * match the kernel's CONFIG_CLONE_* settings.
10268 * Microblaze is further special in that it uses a sixth
10269 * implicit argument to clone for the TLS pointer.
10271 #if defined(TARGET_MICROBLAZE)
10272 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10273 #elif defined(TARGET_CLONE_BACKWARDS)
10274 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10275 #elif defined(TARGET_CLONE_BACKWARDS2)
10276 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10278 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10281 #ifdef __NR_exit_group
10282 /* new thread calls */
10283 case TARGET_NR_exit_group
:
10284 preexit_cleanup(cpu_env
, arg1
);
10285 return get_errno(exit_group(arg1
));
10287 case TARGET_NR_setdomainname
:
10288 if (!(p
= lock_user_string(arg1
)))
10289 return -TARGET_EFAULT
;
10290 ret
= get_errno(setdomainname(p
, arg2
));
10291 unlock_user(p
, arg1
, 0);
10293 case TARGET_NR_uname
:
10294 /* no need to transcode because we use the linux syscall */
10296 struct new_utsname
* buf
;
10298 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10299 return -TARGET_EFAULT
;
10300 ret
= get_errno(sys_uname(buf
));
10301 if (!is_error(ret
)) {
10302 /* Overwrite the native machine name with whatever is being
10304 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10305 sizeof(buf
->machine
));
10306 /* Allow the user to override the reported release. */
10307 if (qemu_uname_release
&& *qemu_uname_release
) {
10308 g_strlcpy(buf
->release
, qemu_uname_release
,
10309 sizeof(buf
->release
));
10312 unlock_user_struct(buf
, arg1
, 1);
10316 case TARGET_NR_modify_ldt
:
10317 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10318 #if !defined(TARGET_X86_64)
10319 case TARGET_NR_vm86
:
10320 return do_vm86(cpu_env
, arg1
, arg2
);
10323 #if defined(TARGET_NR_adjtimex)
10324 case TARGET_NR_adjtimex
:
10326 struct timex host_buf
;
10328 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10329 return -TARGET_EFAULT
;
10331 ret
= get_errno(adjtimex(&host_buf
));
10332 if (!is_error(ret
)) {
10333 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10334 return -TARGET_EFAULT
;
10340 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10341 case TARGET_NR_clock_adjtime
:
10343 struct timex htx
, *phtx
= &htx
;
10345 if (target_to_host_timex(phtx
, arg2
) != 0) {
10346 return -TARGET_EFAULT
;
10348 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10349 if (!is_error(ret
) && phtx
) {
10350 if (host_to_target_timex(arg2
, phtx
) != 0) {
10351 return -TARGET_EFAULT
;
10357 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10358 case TARGET_NR_clock_adjtime64
:
10362 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10363 return -TARGET_EFAULT
;
10365 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10366 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10367 return -TARGET_EFAULT
;
10372 case TARGET_NR_getpgid
:
10373 return get_errno(getpgid(arg1
));
10374 case TARGET_NR_fchdir
:
10375 return get_errno(fchdir(arg1
));
10376 case TARGET_NR_personality
:
10377 return get_errno(personality(arg1
));
10378 #ifdef TARGET_NR__llseek /* Not on alpha */
10379 case TARGET_NR__llseek
:
10382 #if !defined(__NR_llseek)
10383 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10385 ret
= get_errno(res
);
10390 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10392 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10393 return -TARGET_EFAULT
;
10398 #ifdef TARGET_NR_getdents
10399 case TARGET_NR_getdents
:
10400 return do_getdents(arg1
, arg2
, arg3
);
10401 #endif /* TARGET_NR_getdents */
10402 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10403 case TARGET_NR_getdents64
:
10404 return do_getdents64(arg1
, arg2
, arg3
);
10405 #endif /* TARGET_NR_getdents64 */
10406 #if defined(TARGET_NR__newselect)
10407 case TARGET_NR__newselect
:
10408 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10410 #ifdef TARGET_NR_poll
10411 case TARGET_NR_poll
:
10412 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10414 #ifdef TARGET_NR_ppoll
10415 case TARGET_NR_ppoll
:
10416 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10418 #ifdef TARGET_NR_ppoll_time64
10419 case TARGET_NR_ppoll_time64
:
10420 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10422 case TARGET_NR_flock
:
10423 /* NOTE: the flock constant seems to be the same for every
10425 return get_errno(safe_flock(arg1
, arg2
));
10426 case TARGET_NR_readv
:
10428 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10430 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10431 unlock_iovec(vec
, arg2
, arg3
, 1);
10433 ret
= -host_to_target_errno(errno
);
10437 case TARGET_NR_writev
:
10439 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10441 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10442 unlock_iovec(vec
, arg2
, arg3
, 0);
10444 ret
= -host_to_target_errno(errno
);
10448 #if defined(TARGET_NR_preadv)
10449 case TARGET_NR_preadv
:
10451 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10453 unsigned long low
, high
;
10455 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10456 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10457 unlock_iovec(vec
, arg2
, arg3
, 1);
10459 ret
= -host_to_target_errno(errno
);
10464 #if defined(TARGET_NR_pwritev)
10465 case TARGET_NR_pwritev
:
10467 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10469 unsigned long low
, high
;
10471 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10472 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10473 unlock_iovec(vec
, arg2
, arg3
, 0);
10475 ret
= -host_to_target_errno(errno
);
10480 case TARGET_NR_getsid
:
10481 return get_errno(getsid(arg1
));
10482 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10483 case TARGET_NR_fdatasync
:
10484 return get_errno(fdatasync(arg1
));
10486 case TARGET_NR_sched_getaffinity
:
10488 unsigned int mask_size
;
10489 unsigned long *mask
;
10492 * sched_getaffinity needs multiples of ulong, so need to take
10493 * care of mismatches between target ulong and host ulong sizes.
10495 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10496 return -TARGET_EINVAL
;
10498 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10500 mask
= alloca(mask_size
);
10501 memset(mask
, 0, mask_size
);
10502 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10504 if (!is_error(ret
)) {
10506 /* More data returned than the caller's buffer will fit.
10507 * This only happens if sizeof(abi_long) < sizeof(long)
10508 * and the caller passed us a buffer holding an odd number
10509 * of abi_longs. If the host kernel is actually using the
10510 * extra 4 bytes then fail EINVAL; otherwise we can just
10511 * ignore them and only copy the interesting part.
10513 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10514 if (numcpus
> arg2
* 8) {
10515 return -TARGET_EINVAL
;
10520 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10521 return -TARGET_EFAULT
;
10526 case TARGET_NR_sched_setaffinity
:
10528 unsigned int mask_size
;
10529 unsigned long *mask
;
10532 * sched_setaffinity needs multiples of ulong, so need to take
10533 * care of mismatches between target ulong and host ulong sizes.
10535 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10536 return -TARGET_EINVAL
;
10538 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10539 mask
= alloca(mask_size
);
10541 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10546 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10548 case TARGET_NR_getcpu
:
10550 unsigned cpu
, node
;
10551 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10552 arg2
? &node
: NULL
,
10554 if (is_error(ret
)) {
10557 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10558 return -TARGET_EFAULT
;
10560 if (arg2
&& put_user_u32(node
, arg2
)) {
10561 return -TARGET_EFAULT
;
10565 case TARGET_NR_sched_setparam
:
10567 struct sched_param
*target_schp
;
10568 struct sched_param schp
;
10571 return -TARGET_EINVAL
;
10573 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10574 return -TARGET_EFAULT
;
10575 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10576 unlock_user_struct(target_schp
, arg2
, 0);
10577 return get_errno(sched_setparam(arg1
, &schp
));
10579 case TARGET_NR_sched_getparam
:
10581 struct sched_param
*target_schp
;
10582 struct sched_param schp
;
10585 return -TARGET_EINVAL
;
10587 ret
= get_errno(sched_getparam(arg1
, &schp
));
10588 if (!is_error(ret
)) {
10589 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10590 return -TARGET_EFAULT
;
10591 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10592 unlock_user_struct(target_schp
, arg2
, 1);
10596 case TARGET_NR_sched_setscheduler
:
10598 struct sched_param
*target_schp
;
10599 struct sched_param schp
;
10601 return -TARGET_EINVAL
;
10603 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10604 return -TARGET_EFAULT
;
10605 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10606 unlock_user_struct(target_schp
, arg3
, 0);
10607 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10609 case TARGET_NR_sched_getscheduler
:
10610 return get_errno(sched_getscheduler(arg1
));
10611 case TARGET_NR_sched_yield
:
10612 return get_errno(sched_yield());
10613 case TARGET_NR_sched_get_priority_max
:
10614 return get_errno(sched_get_priority_max(arg1
));
10615 case TARGET_NR_sched_get_priority_min
:
10616 return get_errno(sched_get_priority_min(arg1
));
10617 #ifdef TARGET_NR_sched_rr_get_interval
10618 case TARGET_NR_sched_rr_get_interval
:
10620 struct timespec ts
;
10621 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10622 if (!is_error(ret
)) {
10623 ret
= host_to_target_timespec(arg2
, &ts
);
10628 #ifdef TARGET_NR_sched_rr_get_interval_time64
10629 case TARGET_NR_sched_rr_get_interval_time64
:
10631 struct timespec ts
;
10632 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10633 if (!is_error(ret
)) {
10634 ret
= host_to_target_timespec64(arg2
, &ts
);
10639 #if defined(TARGET_NR_nanosleep)
10640 case TARGET_NR_nanosleep
:
10642 struct timespec req
, rem
;
10643 target_to_host_timespec(&req
, arg1
);
10644 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10645 if (is_error(ret
) && arg2
) {
10646 host_to_target_timespec(arg2
, &rem
);
10651 case TARGET_NR_prctl
:
10653 case PR_GET_PDEATHSIG
:
10656 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10657 if (!is_error(ret
) && arg2
10658 && put_user_s32(deathsig
, arg2
)) {
10659 return -TARGET_EFAULT
;
10666 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10668 return -TARGET_EFAULT
;
10670 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10671 arg3
, arg4
, arg5
));
10672 unlock_user(name
, arg2
, 16);
10677 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10679 return -TARGET_EFAULT
;
10681 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10682 arg3
, arg4
, arg5
));
10683 unlock_user(name
, arg2
, 0);
10688 case TARGET_PR_GET_FP_MODE
:
10690 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10692 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10693 ret
|= TARGET_PR_FP_MODE_FR
;
10695 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10696 ret
|= TARGET_PR_FP_MODE_FRE
;
10700 case TARGET_PR_SET_FP_MODE
:
10702 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10703 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10704 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10705 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10706 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10708 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10709 TARGET_PR_FP_MODE_FRE
;
10711 /* If nothing to change, return right away, successfully. */
10712 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10715 /* Check the value is valid */
10716 if (arg2
& ~known_bits
) {
10717 return -TARGET_EOPNOTSUPP
;
10719 /* Setting FRE without FR is not supported. */
10720 if (new_fre
&& !new_fr
) {
10721 return -TARGET_EOPNOTSUPP
;
10723 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10724 /* FR1 is not supported */
10725 return -TARGET_EOPNOTSUPP
;
10727 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10728 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10729 /* cannot set FR=0 */
10730 return -TARGET_EOPNOTSUPP
;
10732 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10733 /* Cannot set FRE=1 */
10734 return -TARGET_EOPNOTSUPP
;
10738 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10739 for (i
= 0; i
< 32 ; i
+= 2) {
10740 if (!old_fr
&& new_fr
) {
10741 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10742 } else if (old_fr
&& !new_fr
) {
10743 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10748 env
->CP0_Status
|= (1 << CP0St_FR
);
10749 env
->hflags
|= MIPS_HFLAG_F64
;
10751 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10752 env
->hflags
&= ~MIPS_HFLAG_F64
;
10755 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10756 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10757 env
->hflags
|= MIPS_HFLAG_FRE
;
10760 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10761 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10767 #ifdef TARGET_AARCH64
10768 case TARGET_PR_SVE_SET_VL
:
10770 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10771 * PR_SVE_VL_INHERIT. Note the kernel definition
10772 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10773 * even though the current architectural maximum is VQ=16.
10775 ret
= -TARGET_EINVAL
;
10776 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10777 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10778 CPUARMState
*env
= cpu_env
;
10779 ARMCPU
*cpu
= env_archcpu(env
);
10780 uint32_t vq
, old_vq
;
10782 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10783 vq
= MAX(arg2
/ 16, 1);
10784 vq
= MIN(vq
, cpu
->sve_max_vq
);
10787 aarch64_sve_narrow_vq(env
, vq
);
10789 env
->vfp
.zcr_el
[1] = vq
- 1;
10790 arm_rebuild_hflags(env
);
10794 case TARGET_PR_SVE_GET_VL
:
10795 ret
= -TARGET_EINVAL
;
10797 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10798 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10799 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10803 case TARGET_PR_PAC_RESET_KEYS
:
10805 CPUARMState
*env
= cpu_env
;
10806 ARMCPU
*cpu
= env_archcpu(env
);
10808 if (arg3
|| arg4
|| arg5
) {
10809 return -TARGET_EINVAL
;
10811 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10812 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10813 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10814 TARGET_PR_PAC_APGAKEY
);
10820 } else if (arg2
& ~all
) {
10821 return -TARGET_EINVAL
;
10823 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10824 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10825 sizeof(ARMPACKey
), &err
);
10827 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10828 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10829 sizeof(ARMPACKey
), &err
);
10831 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10832 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10833 sizeof(ARMPACKey
), &err
);
10835 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10836 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10837 sizeof(ARMPACKey
), &err
);
10839 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10840 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10841 sizeof(ARMPACKey
), &err
);
10845 * Some unknown failure in the crypto. The best
10846 * we can do is log it and fail the syscall.
10847 * The real syscall cannot fail this way.
10849 qemu_log_mask(LOG_UNIMP
,
10850 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10851 error_get_pretty(err
));
10853 return -TARGET_EIO
;
10858 return -TARGET_EINVAL
;
10859 case TARGET_PR_SET_TAGGED_ADDR_CTRL
:
10861 abi_ulong valid_mask
= TARGET_PR_TAGGED_ADDR_ENABLE
;
10862 CPUARMState
*env
= cpu_env
;
10863 ARMCPU
*cpu
= env_archcpu(env
);
10865 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10866 valid_mask
|= TARGET_PR_MTE_TCF_MASK
;
10867 valid_mask
|= TARGET_PR_MTE_TAG_MASK
;
10870 if ((arg2
& ~valid_mask
) || arg3
|| arg4
|| arg5
) {
10871 return -TARGET_EINVAL
;
10873 env
->tagged_addr_enable
= arg2
& TARGET_PR_TAGGED_ADDR_ENABLE
;
10875 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10876 switch (arg2
& TARGET_PR_MTE_TCF_MASK
) {
10877 case TARGET_PR_MTE_TCF_NONE
:
10878 case TARGET_PR_MTE_TCF_SYNC
:
10879 case TARGET_PR_MTE_TCF_ASYNC
:
10886 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10887 * Note that the syscall values are consistent with hw.
10889 env
->cp15
.sctlr_el
[1] =
10890 deposit64(env
->cp15
.sctlr_el
[1], 38, 2,
10891 arg2
>> TARGET_PR_MTE_TCF_SHIFT
);
10894 * Write PR_MTE_TAG to GCR_EL1[Exclude].
10895 * Note that the syscall uses an include mask,
10896 * and hardware uses an exclude mask -- invert.
10898 env
->cp15
.gcr_el1
=
10899 deposit64(env
->cp15
.gcr_el1
, 0, 16,
10900 ~arg2
>> TARGET_PR_MTE_TAG_SHIFT
);
10901 arm_rebuild_hflags(env
);
10905 case TARGET_PR_GET_TAGGED_ADDR_CTRL
:
10908 CPUARMState
*env
= cpu_env
;
10909 ARMCPU
*cpu
= env_archcpu(env
);
10911 if (arg2
|| arg3
|| arg4
|| arg5
) {
10912 return -TARGET_EINVAL
;
10914 if (env
->tagged_addr_enable
) {
10915 ret
|= TARGET_PR_TAGGED_ADDR_ENABLE
;
10917 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10919 ret
|= (extract64(env
->cp15
.sctlr_el
[1], 38, 2)
10920 << TARGET_PR_MTE_TCF_SHIFT
);
10921 ret
= deposit64(ret
, TARGET_PR_MTE_TAG_SHIFT
, 16,
10922 ~env
->cp15
.gcr_el1
);
10926 #endif /* AARCH64 */
10927 case PR_GET_SECCOMP
:
10928 case PR_SET_SECCOMP
:
10929 /* Disable seccomp to prevent the target disabling syscalls we
10931 return -TARGET_EINVAL
;
10933 /* Most prctl options have no pointer arguments */
10934 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10937 #ifdef TARGET_NR_arch_prctl
10938 case TARGET_NR_arch_prctl
:
10939 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10941 #ifdef TARGET_NR_pread64
10942 case TARGET_NR_pread64
:
10943 if (regpairs_aligned(cpu_env
, num
)) {
10947 if (arg2
== 0 && arg3
== 0) {
10948 /* Special-case NULL buffer and zero length, which should succeed */
10951 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10953 return -TARGET_EFAULT
;
10956 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10957 unlock_user(p
, arg2
, ret
);
10959 case TARGET_NR_pwrite64
:
10960 if (regpairs_aligned(cpu_env
, num
)) {
10964 if (arg2
== 0 && arg3
== 0) {
10965 /* Special-case NULL buffer and zero length, which should succeed */
10968 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10970 return -TARGET_EFAULT
;
10973 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10974 unlock_user(p
, arg2
, 0);
10977 case TARGET_NR_getcwd
:
10978 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10979 return -TARGET_EFAULT
;
10980 ret
= get_errno(sys_getcwd1(p
, arg2
));
10981 unlock_user(p
, arg1
, ret
);
10983 case TARGET_NR_capget
:
10984 case TARGET_NR_capset
:
10986 struct target_user_cap_header
*target_header
;
10987 struct target_user_cap_data
*target_data
= NULL
;
10988 struct __user_cap_header_struct header
;
10989 struct __user_cap_data_struct data
[2];
10990 struct __user_cap_data_struct
*dataptr
= NULL
;
10991 int i
, target_datalen
;
10992 int data_items
= 1;
10994 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10995 return -TARGET_EFAULT
;
10997 header
.version
= tswap32(target_header
->version
);
10998 header
.pid
= tswap32(target_header
->pid
);
11000 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11001 /* Version 2 and up takes pointer to two user_data structs */
11005 target_datalen
= sizeof(*target_data
) * data_items
;
11008 if (num
== TARGET_NR_capget
) {
11009 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11011 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11013 if (!target_data
) {
11014 unlock_user_struct(target_header
, arg1
, 0);
11015 return -TARGET_EFAULT
;
11018 if (num
== TARGET_NR_capset
) {
11019 for (i
= 0; i
< data_items
; i
++) {
11020 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11021 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11022 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11029 if (num
== TARGET_NR_capget
) {
11030 ret
= get_errno(capget(&header
, dataptr
));
11032 ret
= get_errno(capset(&header
, dataptr
));
11035 /* The kernel always updates version for both capget and capset */
11036 target_header
->version
= tswap32(header
.version
);
11037 unlock_user_struct(target_header
, arg1
, 1);
11040 if (num
== TARGET_NR_capget
) {
11041 for (i
= 0; i
< data_items
; i
++) {
11042 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11043 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11044 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11046 unlock_user(target_data
, arg2
, target_datalen
);
11048 unlock_user(target_data
, arg2
, 0);
11053 case TARGET_NR_sigaltstack
:
11054 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11056 #ifdef CONFIG_SENDFILE
11057 #ifdef TARGET_NR_sendfile
11058 case TARGET_NR_sendfile
:
11060 off_t
*offp
= NULL
;
11063 ret
= get_user_sal(off
, arg3
);
11064 if (is_error(ret
)) {
11069 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11070 if (!is_error(ret
) && arg3
) {
11071 abi_long ret2
= put_user_sal(off
, arg3
);
11072 if (is_error(ret2
)) {
11079 #ifdef TARGET_NR_sendfile64
11080 case TARGET_NR_sendfile64
:
11082 off_t
*offp
= NULL
;
11085 ret
= get_user_s64(off
, arg3
);
11086 if (is_error(ret
)) {
11091 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11092 if (!is_error(ret
) && arg3
) {
11093 abi_long ret2
= put_user_s64(off
, arg3
);
11094 if (is_error(ret2
)) {
11102 #ifdef TARGET_NR_vfork
11103 case TARGET_NR_vfork
:
11104 return get_errno(do_fork(cpu_env
,
11105 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11108 #ifdef TARGET_NR_ugetrlimit
11109 case TARGET_NR_ugetrlimit
:
11111 struct rlimit rlim
;
11112 int resource
= target_to_host_resource(arg1
);
11113 ret
= get_errno(getrlimit(resource
, &rlim
));
11114 if (!is_error(ret
)) {
11115 struct target_rlimit
*target_rlim
;
11116 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11117 return -TARGET_EFAULT
;
11118 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11119 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11120 unlock_user_struct(target_rlim
, arg2
, 1);
11125 #ifdef TARGET_NR_truncate64
11126 case TARGET_NR_truncate64
:
11127 if (!(p
= lock_user_string(arg1
)))
11128 return -TARGET_EFAULT
;
11129 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11130 unlock_user(p
, arg1
, 0);
11133 #ifdef TARGET_NR_ftruncate64
11134 case TARGET_NR_ftruncate64
:
11135 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11137 #ifdef TARGET_NR_stat64
11138 case TARGET_NR_stat64
:
11139 if (!(p
= lock_user_string(arg1
))) {
11140 return -TARGET_EFAULT
;
11142 ret
= get_errno(stat(path(p
), &st
));
11143 unlock_user(p
, arg1
, 0);
11144 if (!is_error(ret
))
11145 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11148 #ifdef TARGET_NR_lstat64
11149 case TARGET_NR_lstat64
:
11150 if (!(p
= lock_user_string(arg1
))) {
11151 return -TARGET_EFAULT
;
11153 ret
= get_errno(lstat(path(p
), &st
));
11154 unlock_user(p
, arg1
, 0);
11155 if (!is_error(ret
))
11156 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11159 #ifdef TARGET_NR_fstat64
11160 case TARGET_NR_fstat64
:
11161 ret
= get_errno(fstat(arg1
, &st
));
11162 if (!is_error(ret
))
11163 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11166 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11167 #ifdef TARGET_NR_fstatat64
11168 case TARGET_NR_fstatat64
:
11170 #ifdef TARGET_NR_newfstatat
11171 case TARGET_NR_newfstatat
:
11173 if (!(p
= lock_user_string(arg2
))) {
11174 return -TARGET_EFAULT
;
11176 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11177 unlock_user(p
, arg2
, 0);
11178 if (!is_error(ret
))
11179 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11182 #if defined(TARGET_NR_statx)
11183 case TARGET_NR_statx
:
11185 struct target_statx
*target_stx
;
11189 p
= lock_user_string(arg2
);
11191 return -TARGET_EFAULT
;
11193 #if defined(__NR_statx)
11196 * It is assumed that struct statx is architecture independent.
11198 struct target_statx host_stx
;
11201 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11202 if (!is_error(ret
)) {
11203 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11204 unlock_user(p
, arg2
, 0);
11205 return -TARGET_EFAULT
;
11209 if (ret
!= -TARGET_ENOSYS
) {
11210 unlock_user(p
, arg2
, 0);
11215 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11216 unlock_user(p
, arg2
, 0);
11218 if (!is_error(ret
)) {
11219 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11220 return -TARGET_EFAULT
;
11222 memset(target_stx
, 0, sizeof(*target_stx
));
11223 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11224 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11225 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11226 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11227 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11228 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11229 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11230 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11231 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11232 __put_user(st
.st_size
, &target_stx
->stx_size
);
11233 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11234 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11235 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11236 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11237 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11238 unlock_user_struct(target_stx
, arg5
, 1);
11243 #ifdef TARGET_NR_lchown
11244 case TARGET_NR_lchown
:
11245 if (!(p
= lock_user_string(arg1
)))
11246 return -TARGET_EFAULT
;
11247 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11248 unlock_user(p
, arg1
, 0);
11251 #ifdef TARGET_NR_getuid
11252 case TARGET_NR_getuid
:
11253 return get_errno(high2lowuid(getuid()));
11255 #ifdef TARGET_NR_getgid
11256 case TARGET_NR_getgid
:
11257 return get_errno(high2lowgid(getgid()));
11259 #ifdef TARGET_NR_geteuid
11260 case TARGET_NR_geteuid
:
11261 return get_errno(high2lowuid(geteuid()));
11263 #ifdef TARGET_NR_getegid
11264 case TARGET_NR_getegid
:
11265 return get_errno(high2lowgid(getegid()));
11267 case TARGET_NR_setreuid
:
11268 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11269 case TARGET_NR_setregid
:
11270 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11271 case TARGET_NR_getgroups
:
11273 int gidsetsize
= arg1
;
11274 target_id
*target_grouplist
;
11278 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11279 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11280 if (gidsetsize
== 0)
11282 if (!is_error(ret
)) {
11283 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11284 if (!target_grouplist
)
11285 return -TARGET_EFAULT
;
11286 for(i
= 0;i
< ret
; i
++)
11287 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11288 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11292 case TARGET_NR_setgroups
:
11294 int gidsetsize
= arg1
;
11295 target_id
*target_grouplist
;
11296 gid_t
*grouplist
= NULL
;
11299 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11300 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11301 if (!target_grouplist
) {
11302 return -TARGET_EFAULT
;
11304 for (i
= 0; i
< gidsetsize
; i
++) {
11305 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11307 unlock_user(target_grouplist
, arg2
, 0);
11309 return get_errno(setgroups(gidsetsize
, grouplist
));
11311 case TARGET_NR_fchown
:
11312 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11313 #if defined(TARGET_NR_fchownat)
11314 case TARGET_NR_fchownat
:
11315 if (!(p
= lock_user_string(arg2
)))
11316 return -TARGET_EFAULT
;
11317 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11318 low2highgid(arg4
), arg5
));
11319 unlock_user(p
, arg2
, 0);
11322 #ifdef TARGET_NR_setresuid
11323 case TARGET_NR_setresuid
:
11324 return get_errno(sys_setresuid(low2highuid(arg1
),
11326 low2highuid(arg3
)));
11328 #ifdef TARGET_NR_getresuid
11329 case TARGET_NR_getresuid
:
11331 uid_t ruid
, euid
, suid
;
11332 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11333 if (!is_error(ret
)) {
11334 if (put_user_id(high2lowuid(ruid
), arg1
)
11335 || put_user_id(high2lowuid(euid
), arg2
)
11336 || put_user_id(high2lowuid(suid
), arg3
))
11337 return -TARGET_EFAULT
;
11342 #ifdef TARGET_NR_getresgid
11343 case TARGET_NR_setresgid
:
11344 return get_errno(sys_setresgid(low2highgid(arg1
),
11346 low2highgid(arg3
)));
11348 #ifdef TARGET_NR_getresgid
11349 case TARGET_NR_getresgid
:
11351 gid_t rgid
, egid
, sgid
;
11352 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11353 if (!is_error(ret
)) {
11354 if (put_user_id(high2lowgid(rgid
), arg1
)
11355 || put_user_id(high2lowgid(egid
), arg2
)
11356 || put_user_id(high2lowgid(sgid
), arg3
))
11357 return -TARGET_EFAULT
;
11362 #ifdef TARGET_NR_chown
11363 case TARGET_NR_chown
:
11364 if (!(p
= lock_user_string(arg1
)))
11365 return -TARGET_EFAULT
;
11366 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11367 unlock_user(p
, arg1
, 0);
11370 case TARGET_NR_setuid
:
11371 return get_errno(sys_setuid(low2highuid(arg1
)));
11372 case TARGET_NR_setgid
:
11373 return get_errno(sys_setgid(low2highgid(arg1
)));
11374 case TARGET_NR_setfsuid
:
11375 return get_errno(setfsuid(arg1
));
11376 case TARGET_NR_setfsgid
:
11377 return get_errno(setfsgid(arg1
));
11379 #ifdef TARGET_NR_lchown32
11380 case TARGET_NR_lchown32
:
11381 if (!(p
= lock_user_string(arg1
)))
11382 return -TARGET_EFAULT
;
11383 ret
= get_errno(lchown(p
, arg2
, arg3
));
11384 unlock_user(p
, arg1
, 0);
11387 #ifdef TARGET_NR_getuid32
11388 case TARGET_NR_getuid32
:
11389 return get_errno(getuid());
11392 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11393 /* Alpha specific */
11394 case TARGET_NR_getxuid
:
11398 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11400 return get_errno(getuid());
11402 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11403 /* Alpha specific */
11404 case TARGET_NR_getxgid
:
11408 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11410 return get_errno(getgid());
11412 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11413 /* Alpha specific */
11414 case TARGET_NR_osf_getsysinfo
:
11415 ret
= -TARGET_EOPNOTSUPP
;
11417 case TARGET_GSI_IEEE_FP_CONTROL
:
11419 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11420 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11422 swcr
&= ~SWCR_STATUS_MASK
;
11423 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11425 if (put_user_u64 (swcr
, arg2
))
11426 return -TARGET_EFAULT
;
11431 /* case GSI_IEEE_STATE_AT_SIGNAL:
11432 -- Not implemented in linux kernel.
11434 -- Retrieves current unaligned access state; not much used.
11435 case GSI_PROC_TYPE:
11436 -- Retrieves implver information; surely not used.
11437 case GSI_GET_HWRPB:
11438 -- Grabs a copy of the HWRPB; surely not used.
11443 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11444 /* Alpha specific */
11445 case TARGET_NR_osf_setsysinfo
:
11446 ret
= -TARGET_EOPNOTSUPP
;
11448 case TARGET_SSI_IEEE_FP_CONTROL
:
11450 uint64_t swcr
, fpcr
;
11452 if (get_user_u64 (swcr
, arg2
)) {
11453 return -TARGET_EFAULT
;
11457 * The kernel calls swcr_update_status to update the
11458 * status bits from the fpcr at every point that it
11459 * could be queried. Therefore, we store the status
11460 * bits only in FPCR.
11462 ((CPUAlphaState
*)cpu_env
)->swcr
11463 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11465 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11466 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11467 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11468 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11473 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11475 uint64_t exc
, fpcr
, fex
;
11477 if (get_user_u64(exc
, arg2
)) {
11478 return -TARGET_EFAULT
;
11480 exc
&= SWCR_STATUS_MASK
;
11481 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11483 /* Old exceptions are not signaled. */
11484 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11486 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11487 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11489 /* Update the hardware fpcr. */
11490 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11491 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11494 int si_code
= TARGET_FPE_FLTUNK
;
11495 target_siginfo_t info
;
11497 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11498 si_code
= TARGET_FPE_FLTUND
;
11500 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11501 si_code
= TARGET_FPE_FLTRES
;
11503 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11504 si_code
= TARGET_FPE_FLTUND
;
11506 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11507 si_code
= TARGET_FPE_FLTOVF
;
11509 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11510 si_code
= TARGET_FPE_FLTDIV
;
11512 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11513 si_code
= TARGET_FPE_FLTINV
;
11516 info
.si_signo
= SIGFPE
;
11518 info
.si_code
= si_code
;
11519 info
._sifields
._sigfault
._addr
11520 = ((CPUArchState
*)cpu_env
)->pc
;
11521 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11522 QEMU_SI_FAULT
, &info
);
11528 /* case SSI_NVPAIRS:
11529 -- Used with SSIN_UACPROC to enable unaligned accesses.
11530 case SSI_IEEE_STATE_AT_SIGNAL:
11531 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11532 -- Not implemented in linux kernel
11537 #ifdef TARGET_NR_osf_sigprocmask
11538 /* Alpha specific. */
11539 case TARGET_NR_osf_sigprocmask
:
11543 sigset_t set
, oldset
;
11546 case TARGET_SIG_BLOCK
:
11549 case TARGET_SIG_UNBLOCK
:
11552 case TARGET_SIG_SETMASK
:
11556 return -TARGET_EINVAL
;
11559 target_to_host_old_sigset(&set
, &mask
);
11560 ret
= do_sigprocmask(how
, &set
, &oldset
);
11562 host_to_target_old_sigset(&mask
, &oldset
);
11569 #ifdef TARGET_NR_getgid32
11570 case TARGET_NR_getgid32
:
11571 return get_errno(getgid());
11573 #ifdef TARGET_NR_geteuid32
11574 case TARGET_NR_geteuid32
:
11575 return get_errno(geteuid());
11577 #ifdef TARGET_NR_getegid32
11578 case TARGET_NR_getegid32
:
11579 return get_errno(getegid());
11581 #ifdef TARGET_NR_setreuid32
11582 case TARGET_NR_setreuid32
:
11583 return get_errno(setreuid(arg1
, arg2
));
11585 #ifdef TARGET_NR_setregid32
11586 case TARGET_NR_setregid32
:
11587 return get_errno(setregid(arg1
, arg2
));
11589 #ifdef TARGET_NR_getgroups32
11590 case TARGET_NR_getgroups32
:
11592 int gidsetsize
= arg1
;
11593 uint32_t *target_grouplist
;
11597 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11598 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11599 if (gidsetsize
== 0)
11601 if (!is_error(ret
)) {
11602 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11603 if (!target_grouplist
) {
11604 return -TARGET_EFAULT
;
11606 for(i
= 0;i
< ret
; i
++)
11607 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11608 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11613 #ifdef TARGET_NR_setgroups32
11614 case TARGET_NR_setgroups32
:
11616 int gidsetsize
= arg1
;
11617 uint32_t *target_grouplist
;
11621 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11622 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11623 if (!target_grouplist
) {
11624 return -TARGET_EFAULT
;
11626 for(i
= 0;i
< gidsetsize
; i
++)
11627 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11628 unlock_user(target_grouplist
, arg2
, 0);
11629 return get_errno(setgroups(gidsetsize
, grouplist
));
11632 #ifdef TARGET_NR_fchown32
11633 case TARGET_NR_fchown32
:
11634 return get_errno(fchown(arg1
, arg2
, arg3
));
11636 #ifdef TARGET_NR_setresuid32
11637 case TARGET_NR_setresuid32
:
11638 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11640 #ifdef TARGET_NR_getresuid32
11641 case TARGET_NR_getresuid32
:
11643 uid_t ruid
, euid
, suid
;
11644 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11645 if (!is_error(ret
)) {
11646 if (put_user_u32(ruid
, arg1
)
11647 || put_user_u32(euid
, arg2
)
11648 || put_user_u32(suid
, arg3
))
11649 return -TARGET_EFAULT
;
11654 #ifdef TARGET_NR_setresgid32
11655 case TARGET_NR_setresgid32
:
11656 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11658 #ifdef TARGET_NR_getresgid32
11659 case TARGET_NR_getresgid32
:
11661 gid_t rgid
, egid
, sgid
;
11662 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11663 if (!is_error(ret
)) {
11664 if (put_user_u32(rgid
, arg1
)
11665 || put_user_u32(egid
, arg2
)
11666 || put_user_u32(sgid
, arg3
))
11667 return -TARGET_EFAULT
;
11672 #ifdef TARGET_NR_chown32
11673 case TARGET_NR_chown32
:
11674 if (!(p
= lock_user_string(arg1
)))
11675 return -TARGET_EFAULT
;
11676 ret
= get_errno(chown(p
, arg2
, arg3
));
11677 unlock_user(p
, arg1
, 0);
11680 #ifdef TARGET_NR_setuid32
11681 case TARGET_NR_setuid32
:
11682 return get_errno(sys_setuid(arg1
));
11684 #ifdef TARGET_NR_setgid32
11685 case TARGET_NR_setgid32
:
11686 return get_errno(sys_setgid(arg1
));
11688 #ifdef TARGET_NR_setfsuid32
11689 case TARGET_NR_setfsuid32
:
11690 return get_errno(setfsuid(arg1
));
11692 #ifdef TARGET_NR_setfsgid32
11693 case TARGET_NR_setfsgid32
:
11694 return get_errno(setfsgid(arg1
));
11696 #ifdef TARGET_NR_mincore
11697 case TARGET_NR_mincore
:
11699 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11701 return -TARGET_ENOMEM
;
11703 p
= lock_user_string(arg3
);
11705 ret
= -TARGET_EFAULT
;
11707 ret
= get_errno(mincore(a
, arg2
, p
));
11708 unlock_user(p
, arg3
, ret
);
11710 unlock_user(a
, arg1
, 0);
11714 #ifdef TARGET_NR_arm_fadvise64_64
11715 case TARGET_NR_arm_fadvise64_64
:
11716 /* arm_fadvise64_64 looks like fadvise64_64 but
11717 * with different argument order: fd, advice, offset, len
11718 * rather than the usual fd, offset, len, advice.
11719 * Note that offset and len are both 64-bit so appear as
11720 * pairs of 32-bit registers.
11722 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11723 target_offset64(arg5
, arg6
), arg2
);
11724 return -host_to_target_errno(ret
);
11727 #if TARGET_ABI_BITS == 32
11729 #ifdef TARGET_NR_fadvise64_64
11730 case TARGET_NR_fadvise64_64
:
11731 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11732 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11740 /* 6 args: fd, offset (high, low), len (high, low), advice */
11741 if (regpairs_aligned(cpu_env
, num
)) {
11742 /* offset is in (3,4), len in (5,6) and advice in 7 */
11750 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11751 target_offset64(arg4
, arg5
), arg6
);
11752 return -host_to_target_errno(ret
);
11755 #ifdef TARGET_NR_fadvise64
11756 case TARGET_NR_fadvise64
:
11757 /* 5 args: fd, offset (high, low), len, advice */
11758 if (regpairs_aligned(cpu_env
, num
)) {
11759 /* offset is in (3,4), len in 5 and advice in 6 */
11765 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11766 return -host_to_target_errno(ret
);
11769 #else /* not a 32-bit ABI */
11770 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11771 #ifdef TARGET_NR_fadvise64_64
11772 case TARGET_NR_fadvise64_64
:
11774 #ifdef TARGET_NR_fadvise64
11775 case TARGET_NR_fadvise64
:
11777 #ifdef TARGET_S390X
11779 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11780 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11781 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11782 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11786 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11788 #endif /* end of 64-bit ABI fadvise handling */
11790 #ifdef TARGET_NR_madvise
11791 case TARGET_NR_madvise
:
11792 /* A straight passthrough may not be safe because qemu sometimes
11793 turns private file-backed mappings into anonymous mappings.
11794 This will break MADV_DONTNEED.
11795 This is a hint, so ignoring and returning success is ok. */
11798 #ifdef TARGET_NR_fcntl64
11799 case TARGET_NR_fcntl64
:
11803 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11804 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11807 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11808 copyfrom
= copy_from_user_oabi_flock64
;
11809 copyto
= copy_to_user_oabi_flock64
;
11813 cmd
= target_to_host_fcntl_cmd(arg2
);
11814 if (cmd
== -TARGET_EINVAL
) {
11819 case TARGET_F_GETLK64
:
11820 ret
= copyfrom(&fl
, arg3
);
11824 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11826 ret
= copyto(arg3
, &fl
);
11830 case TARGET_F_SETLK64
:
11831 case TARGET_F_SETLKW64
:
11832 ret
= copyfrom(&fl
, arg3
);
11836 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11839 ret
= do_fcntl(arg1
, arg2
, arg3
);
11845 #ifdef TARGET_NR_cacheflush
11846 case TARGET_NR_cacheflush
:
11847 /* self-modifying code is handled automatically, so nothing needed */
11850 #ifdef TARGET_NR_getpagesize
11851 case TARGET_NR_getpagesize
:
11852 return TARGET_PAGE_SIZE
;
11854 case TARGET_NR_gettid
:
11855 return get_errno(sys_gettid());
11856 #ifdef TARGET_NR_readahead
11857 case TARGET_NR_readahead
:
11858 #if TARGET_ABI_BITS == 32
11859 if (regpairs_aligned(cpu_env
, num
)) {
11864 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11866 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11871 #ifdef TARGET_NR_setxattr
11872 case TARGET_NR_listxattr
:
11873 case TARGET_NR_llistxattr
:
11877 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11879 return -TARGET_EFAULT
;
11882 p
= lock_user_string(arg1
);
11884 if (num
== TARGET_NR_listxattr
) {
11885 ret
= get_errno(listxattr(p
, b
, arg3
));
11887 ret
= get_errno(llistxattr(p
, b
, arg3
));
11890 ret
= -TARGET_EFAULT
;
11892 unlock_user(p
, arg1
, 0);
11893 unlock_user(b
, arg2
, arg3
);
11896 case TARGET_NR_flistxattr
:
11900 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11902 return -TARGET_EFAULT
;
11905 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11906 unlock_user(b
, arg2
, arg3
);
11909 case TARGET_NR_setxattr
:
11910 case TARGET_NR_lsetxattr
:
11912 void *p
, *n
, *v
= 0;
11914 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11916 return -TARGET_EFAULT
;
11919 p
= lock_user_string(arg1
);
11920 n
= lock_user_string(arg2
);
11922 if (num
== TARGET_NR_setxattr
) {
11923 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11925 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11928 ret
= -TARGET_EFAULT
;
11930 unlock_user(p
, arg1
, 0);
11931 unlock_user(n
, arg2
, 0);
11932 unlock_user(v
, arg3
, 0);
11935 case TARGET_NR_fsetxattr
:
11939 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11941 return -TARGET_EFAULT
;
11944 n
= lock_user_string(arg2
);
11946 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11948 ret
= -TARGET_EFAULT
;
11950 unlock_user(n
, arg2
, 0);
11951 unlock_user(v
, arg3
, 0);
11954 case TARGET_NR_getxattr
:
11955 case TARGET_NR_lgetxattr
:
11957 void *p
, *n
, *v
= 0;
11959 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11961 return -TARGET_EFAULT
;
11964 p
= lock_user_string(arg1
);
11965 n
= lock_user_string(arg2
);
11967 if (num
== TARGET_NR_getxattr
) {
11968 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11970 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11973 ret
= -TARGET_EFAULT
;
11975 unlock_user(p
, arg1
, 0);
11976 unlock_user(n
, arg2
, 0);
11977 unlock_user(v
, arg3
, arg4
);
11980 case TARGET_NR_fgetxattr
:
11984 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11986 return -TARGET_EFAULT
;
11989 n
= lock_user_string(arg2
);
11991 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11993 ret
= -TARGET_EFAULT
;
11995 unlock_user(n
, arg2
, 0);
11996 unlock_user(v
, arg3
, arg4
);
11999 case TARGET_NR_removexattr
:
12000 case TARGET_NR_lremovexattr
:
12003 p
= lock_user_string(arg1
);
12004 n
= lock_user_string(arg2
);
12006 if (num
== TARGET_NR_removexattr
) {
12007 ret
= get_errno(removexattr(p
, n
));
12009 ret
= get_errno(lremovexattr(p
, n
));
12012 ret
= -TARGET_EFAULT
;
12014 unlock_user(p
, arg1
, 0);
12015 unlock_user(n
, arg2
, 0);
12018 case TARGET_NR_fremovexattr
:
12021 n
= lock_user_string(arg2
);
12023 ret
= get_errno(fremovexattr(arg1
, n
));
12025 ret
= -TARGET_EFAULT
;
12027 unlock_user(n
, arg2
, 0);
12031 #endif /* CONFIG_ATTR */
12032 #ifdef TARGET_NR_set_thread_area
12033 case TARGET_NR_set_thread_area
:
12034 #if defined(TARGET_MIPS)
12035 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12037 #elif defined(TARGET_CRIS)
12039 ret
= -TARGET_EINVAL
;
12041 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12045 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12046 return do_set_thread_area(cpu_env
, arg1
);
12047 #elif defined(TARGET_M68K)
12049 TaskState
*ts
= cpu
->opaque
;
12050 ts
->tp_value
= arg1
;
12054 return -TARGET_ENOSYS
;
12057 #ifdef TARGET_NR_get_thread_area
12058 case TARGET_NR_get_thread_area
:
12059 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12060 return do_get_thread_area(cpu_env
, arg1
);
12061 #elif defined(TARGET_M68K)
12063 TaskState
*ts
= cpu
->opaque
;
12064 return ts
->tp_value
;
12067 return -TARGET_ENOSYS
;
12070 #ifdef TARGET_NR_getdomainname
12071 case TARGET_NR_getdomainname
:
12072 return -TARGET_ENOSYS
;
12075 #ifdef TARGET_NR_clock_settime
12076 case TARGET_NR_clock_settime
:
12078 struct timespec ts
;
12080 ret
= target_to_host_timespec(&ts
, arg2
);
12081 if (!is_error(ret
)) {
12082 ret
= get_errno(clock_settime(arg1
, &ts
));
12087 #ifdef TARGET_NR_clock_settime64
12088 case TARGET_NR_clock_settime64
:
12090 struct timespec ts
;
12092 ret
= target_to_host_timespec64(&ts
, arg2
);
12093 if (!is_error(ret
)) {
12094 ret
= get_errno(clock_settime(arg1
, &ts
));
12099 #ifdef TARGET_NR_clock_gettime
12100 case TARGET_NR_clock_gettime
:
12102 struct timespec ts
;
12103 ret
= get_errno(clock_gettime(arg1
, &ts
));
12104 if (!is_error(ret
)) {
12105 ret
= host_to_target_timespec(arg2
, &ts
);
12110 #ifdef TARGET_NR_clock_gettime64
12111 case TARGET_NR_clock_gettime64
:
12113 struct timespec ts
;
12114 ret
= get_errno(clock_gettime(arg1
, &ts
));
12115 if (!is_error(ret
)) {
12116 ret
= host_to_target_timespec64(arg2
, &ts
);
12121 #ifdef TARGET_NR_clock_getres
12122 case TARGET_NR_clock_getres
:
12124 struct timespec ts
;
12125 ret
= get_errno(clock_getres(arg1
, &ts
));
12126 if (!is_error(ret
)) {
12127 host_to_target_timespec(arg2
, &ts
);
12132 #ifdef TARGET_NR_clock_getres_time64
12133 case TARGET_NR_clock_getres_time64
:
12135 struct timespec ts
;
12136 ret
= get_errno(clock_getres(arg1
, &ts
));
12137 if (!is_error(ret
)) {
12138 host_to_target_timespec64(arg2
, &ts
);
12143 #ifdef TARGET_NR_clock_nanosleep
12144 case TARGET_NR_clock_nanosleep
:
12146 struct timespec ts
;
12147 if (target_to_host_timespec(&ts
, arg3
)) {
12148 return -TARGET_EFAULT
;
12150 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12151 &ts
, arg4
? &ts
: NULL
));
12153 * if the call is interrupted by a signal handler, it fails
12154 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12155 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12157 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12158 host_to_target_timespec(arg4
, &ts
)) {
12159 return -TARGET_EFAULT
;
12165 #ifdef TARGET_NR_clock_nanosleep_time64
12166 case TARGET_NR_clock_nanosleep_time64
:
12168 struct timespec ts
;
12170 if (target_to_host_timespec64(&ts
, arg3
)) {
12171 return -TARGET_EFAULT
;
12174 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12175 &ts
, arg4
? &ts
: NULL
));
12177 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12178 host_to_target_timespec64(arg4
, &ts
)) {
12179 return -TARGET_EFAULT
;
12185 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12186 case TARGET_NR_set_tid_address
:
12187 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12190 case TARGET_NR_tkill
:
12191 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12193 case TARGET_NR_tgkill
:
12194 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12195 target_to_host_signal(arg3
)));
12197 #ifdef TARGET_NR_set_robust_list
12198 case TARGET_NR_set_robust_list
:
12199 case TARGET_NR_get_robust_list
:
12200 /* The ABI for supporting robust futexes has userspace pass
12201 * the kernel a pointer to a linked list which is updated by
12202 * userspace after the syscall; the list is walked by the kernel
12203 * when the thread exits. Since the linked list in QEMU guest
12204 * memory isn't a valid linked list for the host and we have
12205 * no way to reliably intercept the thread-death event, we can't
12206 * support these. Silently return ENOSYS so that guest userspace
12207 * falls back to a non-robust futex implementation (which should
12208 * be OK except in the corner case of the guest crashing while
12209 * holding a mutex that is shared with another process via
12212 return -TARGET_ENOSYS
;
12215 #if defined(TARGET_NR_utimensat)
12216 case TARGET_NR_utimensat
:
12218 struct timespec
*tsp
, ts
[2];
12222 if (target_to_host_timespec(ts
, arg3
)) {
12223 return -TARGET_EFAULT
;
12225 if (target_to_host_timespec(ts
+ 1, arg3
+
12226 sizeof(struct target_timespec
))) {
12227 return -TARGET_EFAULT
;
12232 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12234 if (!(p
= lock_user_string(arg2
))) {
12235 return -TARGET_EFAULT
;
12237 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12238 unlock_user(p
, arg2
, 0);
12243 #ifdef TARGET_NR_utimensat_time64
12244 case TARGET_NR_utimensat_time64
:
12246 struct timespec
*tsp
, ts
[2];
12250 if (target_to_host_timespec64(ts
, arg3
)) {
12251 return -TARGET_EFAULT
;
12253 if (target_to_host_timespec64(ts
+ 1, arg3
+
12254 sizeof(struct target__kernel_timespec
))) {
12255 return -TARGET_EFAULT
;
12260 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12262 p
= lock_user_string(arg2
);
12264 return -TARGET_EFAULT
;
12266 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12267 unlock_user(p
, arg2
, 0);
12272 #ifdef TARGET_NR_futex
12273 case TARGET_NR_futex
:
12274 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12276 #ifdef TARGET_NR_futex_time64
12277 case TARGET_NR_futex_time64
:
12278 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12280 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12281 case TARGET_NR_inotify_init
:
12282 ret
= get_errno(sys_inotify_init());
12284 fd_trans_register(ret
, &target_inotify_trans
);
12288 #ifdef CONFIG_INOTIFY1
12289 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12290 case TARGET_NR_inotify_init1
:
12291 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12292 fcntl_flags_tbl
)));
12294 fd_trans_register(ret
, &target_inotify_trans
);
12299 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12300 case TARGET_NR_inotify_add_watch
:
12301 p
= lock_user_string(arg2
);
12302 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12303 unlock_user(p
, arg2
, 0);
12306 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12307 case TARGET_NR_inotify_rm_watch
:
12308 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12311 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12312 case TARGET_NR_mq_open
:
12314 struct mq_attr posix_mq_attr
;
12315 struct mq_attr
*pposix_mq_attr
;
12318 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12319 pposix_mq_attr
= NULL
;
12321 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12322 return -TARGET_EFAULT
;
12324 pposix_mq_attr
= &posix_mq_attr
;
12326 p
= lock_user_string(arg1
- 1);
12328 return -TARGET_EFAULT
;
12330 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12331 unlock_user (p
, arg1
, 0);
12335 case TARGET_NR_mq_unlink
:
12336 p
= lock_user_string(arg1
- 1);
12338 return -TARGET_EFAULT
;
12340 ret
= get_errno(mq_unlink(p
));
12341 unlock_user (p
, arg1
, 0);
12344 #ifdef TARGET_NR_mq_timedsend
12345 case TARGET_NR_mq_timedsend
:
12347 struct timespec ts
;
12349 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12351 if (target_to_host_timespec(&ts
, arg5
)) {
12352 return -TARGET_EFAULT
;
12354 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12355 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12356 return -TARGET_EFAULT
;
12359 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12361 unlock_user (p
, arg2
, arg3
);
12365 #ifdef TARGET_NR_mq_timedsend_time64
12366 case TARGET_NR_mq_timedsend_time64
:
12368 struct timespec ts
;
12370 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12372 if (target_to_host_timespec64(&ts
, arg5
)) {
12373 return -TARGET_EFAULT
;
12375 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12376 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12377 return -TARGET_EFAULT
;
12380 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12382 unlock_user(p
, arg2
, arg3
);
12387 #ifdef TARGET_NR_mq_timedreceive
12388 case TARGET_NR_mq_timedreceive
:
12390 struct timespec ts
;
12393 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12395 if (target_to_host_timespec(&ts
, arg5
)) {
12396 return -TARGET_EFAULT
;
12398 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12400 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12401 return -TARGET_EFAULT
;
12404 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12407 unlock_user (p
, arg2
, arg3
);
12409 put_user_u32(prio
, arg4
);
12413 #ifdef TARGET_NR_mq_timedreceive_time64
12414 case TARGET_NR_mq_timedreceive_time64
:
12416 struct timespec ts
;
12419 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12421 if (target_to_host_timespec64(&ts
, arg5
)) {
12422 return -TARGET_EFAULT
;
12424 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12426 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12427 return -TARGET_EFAULT
;
12430 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12433 unlock_user(p
, arg2
, arg3
);
12435 put_user_u32(prio
, arg4
);
12441 /* Not implemented for now... */
12442 /* case TARGET_NR_mq_notify: */
12445 case TARGET_NR_mq_getsetattr
:
12447 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12450 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12451 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12452 &posix_mq_attr_out
));
12453 } else if (arg3
!= 0) {
12454 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12456 if (ret
== 0 && arg3
!= 0) {
12457 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12463 #ifdef CONFIG_SPLICE
12464 #ifdef TARGET_NR_tee
12465 case TARGET_NR_tee
:
12467 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12471 #ifdef TARGET_NR_splice
12472 case TARGET_NR_splice
:
12474 loff_t loff_in
, loff_out
;
12475 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12477 if (get_user_u64(loff_in
, arg2
)) {
12478 return -TARGET_EFAULT
;
12480 ploff_in
= &loff_in
;
12483 if (get_user_u64(loff_out
, arg4
)) {
12484 return -TARGET_EFAULT
;
12486 ploff_out
= &loff_out
;
12488 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12490 if (put_user_u64(loff_in
, arg2
)) {
12491 return -TARGET_EFAULT
;
12495 if (put_user_u64(loff_out
, arg4
)) {
12496 return -TARGET_EFAULT
;
12502 #ifdef TARGET_NR_vmsplice
12503 case TARGET_NR_vmsplice
:
12505 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12507 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12508 unlock_iovec(vec
, arg2
, arg3
, 0);
12510 ret
= -host_to_target_errno(errno
);
12515 #endif /* CONFIG_SPLICE */
12516 #ifdef CONFIG_EVENTFD
12517 #if defined(TARGET_NR_eventfd)
12518 case TARGET_NR_eventfd
:
12519 ret
= get_errno(eventfd(arg1
, 0));
12521 fd_trans_register(ret
, &target_eventfd_trans
);
12525 #if defined(TARGET_NR_eventfd2)
12526 case TARGET_NR_eventfd2
:
12528 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12529 if (arg2
& TARGET_O_NONBLOCK
) {
12530 host_flags
|= O_NONBLOCK
;
12532 if (arg2
& TARGET_O_CLOEXEC
) {
12533 host_flags
|= O_CLOEXEC
;
12535 ret
= get_errno(eventfd(arg1
, host_flags
));
12537 fd_trans_register(ret
, &target_eventfd_trans
);
12542 #endif /* CONFIG_EVENTFD */
12543 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12544 case TARGET_NR_fallocate
:
12545 #if TARGET_ABI_BITS == 32
12546 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12547 target_offset64(arg5
, arg6
)));
12549 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12553 #if defined(CONFIG_SYNC_FILE_RANGE)
12554 #if defined(TARGET_NR_sync_file_range)
12555 case TARGET_NR_sync_file_range
:
12556 #if TARGET_ABI_BITS == 32
12557 #if defined(TARGET_MIPS)
12558 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12559 target_offset64(arg5
, arg6
), arg7
));
12561 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12562 target_offset64(arg4
, arg5
), arg6
));
12563 #endif /* !TARGET_MIPS */
12565 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12569 #if defined(TARGET_NR_sync_file_range2) || \
12570 defined(TARGET_NR_arm_sync_file_range)
12571 #if defined(TARGET_NR_sync_file_range2)
12572 case TARGET_NR_sync_file_range2
:
12574 #if defined(TARGET_NR_arm_sync_file_range)
12575 case TARGET_NR_arm_sync_file_range
:
12577 /* This is like sync_file_range but the arguments are reordered */
12578 #if TARGET_ABI_BITS == 32
12579 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12580 target_offset64(arg5
, arg6
), arg2
));
12582 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12587 #if defined(TARGET_NR_signalfd4)
12588 case TARGET_NR_signalfd4
:
12589 return do_signalfd4(arg1
, arg2
, arg4
);
12591 #if defined(TARGET_NR_signalfd)
12592 case TARGET_NR_signalfd
:
12593 return do_signalfd4(arg1
, arg2
, 0);
12595 #if defined(CONFIG_EPOLL)
12596 #if defined(TARGET_NR_epoll_create)
12597 case TARGET_NR_epoll_create
:
12598 return get_errno(epoll_create(arg1
));
12600 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12601 case TARGET_NR_epoll_create1
:
12602 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12604 #if defined(TARGET_NR_epoll_ctl)
12605 case TARGET_NR_epoll_ctl
:
12607 struct epoll_event ep
;
12608 struct epoll_event
*epp
= 0;
12610 if (arg2
!= EPOLL_CTL_DEL
) {
12611 struct target_epoll_event
*target_ep
;
12612 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12613 return -TARGET_EFAULT
;
12615 ep
.events
= tswap32(target_ep
->events
);
12617 * The epoll_data_t union is just opaque data to the kernel,
12618 * so we transfer all 64 bits across and need not worry what
12619 * actual data type it is.
12621 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12622 unlock_user_struct(target_ep
, arg4
, 0);
12625 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12626 * non-null pointer, even though this argument is ignored.
12631 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12635 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12636 #if defined(TARGET_NR_epoll_wait)
12637 case TARGET_NR_epoll_wait
:
12639 #if defined(TARGET_NR_epoll_pwait)
12640 case TARGET_NR_epoll_pwait
:
12643 struct target_epoll_event
*target_ep
;
12644 struct epoll_event
*ep
;
12646 int maxevents
= arg3
;
12647 int timeout
= arg4
;
12649 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12650 return -TARGET_EINVAL
;
12653 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12654 maxevents
* sizeof(struct target_epoll_event
), 1);
12656 return -TARGET_EFAULT
;
12659 ep
= g_try_new(struct epoll_event
, maxevents
);
12661 unlock_user(target_ep
, arg2
, 0);
12662 return -TARGET_ENOMEM
;
12666 #if defined(TARGET_NR_epoll_pwait)
12667 case TARGET_NR_epoll_pwait
:
12669 target_sigset_t
*target_set
;
12670 sigset_t _set
, *set
= &_set
;
12673 if (arg6
!= sizeof(target_sigset_t
)) {
12674 ret
= -TARGET_EINVAL
;
12678 target_set
= lock_user(VERIFY_READ
, arg5
,
12679 sizeof(target_sigset_t
), 1);
12681 ret
= -TARGET_EFAULT
;
12684 target_to_host_sigset(set
, target_set
);
12685 unlock_user(target_set
, arg5
, 0);
12690 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12691 set
, SIGSET_T_SIZE
));
12695 #if defined(TARGET_NR_epoll_wait)
12696 case TARGET_NR_epoll_wait
:
12697 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12702 ret
= -TARGET_ENOSYS
;
12704 if (!is_error(ret
)) {
12706 for (i
= 0; i
< ret
; i
++) {
12707 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12708 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12710 unlock_user(target_ep
, arg2
,
12711 ret
* sizeof(struct target_epoll_event
));
12713 unlock_user(target_ep
, arg2
, 0);
12720 #ifdef TARGET_NR_prlimit64
12721 case TARGET_NR_prlimit64
:
12723 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12724 struct target_rlimit64
*target_rnew
, *target_rold
;
12725 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12726 int resource
= target_to_host_resource(arg2
);
12728 if (arg3
&& (resource
!= RLIMIT_AS
&&
12729 resource
!= RLIMIT_DATA
&&
12730 resource
!= RLIMIT_STACK
)) {
12731 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12732 return -TARGET_EFAULT
;
12734 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12735 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12736 unlock_user_struct(target_rnew
, arg3
, 0);
12740 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12741 if (!is_error(ret
) && arg4
) {
12742 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12743 return -TARGET_EFAULT
;
12745 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12746 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12747 unlock_user_struct(target_rold
, arg4
, 1);
12752 #ifdef TARGET_NR_gethostname
12753 case TARGET_NR_gethostname
:
12755 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12757 ret
= get_errno(gethostname(name
, arg2
));
12758 unlock_user(name
, arg1
, arg2
);
12760 ret
= -TARGET_EFAULT
;
12765 #ifdef TARGET_NR_atomic_cmpxchg_32
12766 case TARGET_NR_atomic_cmpxchg_32
:
12768 /* should use start_exclusive from main.c */
12769 abi_ulong mem_value
;
12770 if (get_user_u32(mem_value
, arg6
)) {
12771 target_siginfo_t info
;
12772 info
.si_signo
= SIGSEGV
;
12774 info
.si_code
= TARGET_SEGV_MAPERR
;
12775 info
._sifields
._sigfault
._addr
= arg6
;
12776 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12777 QEMU_SI_FAULT
, &info
);
12781 if (mem_value
== arg2
)
12782 put_user_u32(arg1
, arg6
);
12786 #ifdef TARGET_NR_atomic_barrier
12787 case TARGET_NR_atomic_barrier
:
12788 /* Like the kernel implementation and the
12789 qemu arm barrier, no-op this? */
12793 #ifdef TARGET_NR_timer_create
12794 case TARGET_NR_timer_create
:
12796 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12798 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12801 int timer_index
= next_free_host_timer();
12803 if (timer_index
< 0) {
12804 ret
= -TARGET_EAGAIN
;
12806 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12809 phost_sevp
= &host_sevp
;
12810 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12816 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12820 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12821 return -TARGET_EFAULT
;
12829 #ifdef TARGET_NR_timer_settime
12830 case TARGET_NR_timer_settime
:
12832 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12833 * struct itimerspec * old_value */
12834 target_timer_t timerid
= get_timer_id(arg1
);
12838 } else if (arg3
== 0) {
12839 ret
= -TARGET_EINVAL
;
12841 timer_t htimer
= g_posix_timers
[timerid
];
12842 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12844 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12845 return -TARGET_EFAULT
;
12848 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12849 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12850 return -TARGET_EFAULT
;
12857 #ifdef TARGET_NR_timer_settime64
12858 case TARGET_NR_timer_settime64
:
12860 target_timer_t timerid
= get_timer_id(arg1
);
12864 } else if (arg3
== 0) {
12865 ret
= -TARGET_EINVAL
;
12867 timer_t htimer
= g_posix_timers
[timerid
];
12868 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12870 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12871 return -TARGET_EFAULT
;
12874 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12875 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12876 return -TARGET_EFAULT
;
12883 #ifdef TARGET_NR_timer_gettime
12884 case TARGET_NR_timer_gettime
:
12886 /* args: timer_t timerid, struct itimerspec *curr_value */
12887 target_timer_t timerid
= get_timer_id(arg1
);
12891 } else if (!arg2
) {
12892 ret
= -TARGET_EFAULT
;
12894 timer_t htimer
= g_posix_timers
[timerid
];
12895 struct itimerspec hspec
;
12896 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12898 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12899 ret
= -TARGET_EFAULT
;
12906 #ifdef TARGET_NR_timer_gettime64
12907 case TARGET_NR_timer_gettime64
:
12909 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12910 target_timer_t timerid
= get_timer_id(arg1
);
12914 } else if (!arg2
) {
12915 ret
= -TARGET_EFAULT
;
12917 timer_t htimer
= g_posix_timers
[timerid
];
12918 struct itimerspec hspec
;
12919 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12921 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12922 ret
= -TARGET_EFAULT
;
12929 #ifdef TARGET_NR_timer_getoverrun
12930 case TARGET_NR_timer_getoverrun
:
12932 /* args: timer_t timerid */
12933 target_timer_t timerid
= get_timer_id(arg1
);
12938 timer_t htimer
= g_posix_timers
[timerid
];
12939 ret
= get_errno(timer_getoverrun(htimer
));
12945 #ifdef TARGET_NR_timer_delete
12946 case TARGET_NR_timer_delete
:
12948 /* args: timer_t timerid */
12949 target_timer_t timerid
= get_timer_id(arg1
);
12954 timer_t htimer
= g_posix_timers
[timerid
];
12955 ret
= get_errno(timer_delete(htimer
));
12956 g_posix_timers
[timerid
] = 0;
12962 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12963 case TARGET_NR_timerfd_create
:
12964 return get_errno(timerfd_create(arg1
,
12965 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12968 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12969 case TARGET_NR_timerfd_gettime
:
12971 struct itimerspec its_curr
;
12973 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12975 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12976 return -TARGET_EFAULT
;
12982 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12983 case TARGET_NR_timerfd_gettime64
:
12985 struct itimerspec its_curr
;
12987 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12989 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12990 return -TARGET_EFAULT
;
12996 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12997 case TARGET_NR_timerfd_settime
:
12999 struct itimerspec its_new
, its_old
, *p_new
;
13002 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13003 return -TARGET_EFAULT
;
13010 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13012 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13013 return -TARGET_EFAULT
;
13019 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13020 case TARGET_NR_timerfd_settime64
:
13022 struct itimerspec its_new
, its_old
, *p_new
;
13025 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13026 return -TARGET_EFAULT
;
13033 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13035 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13036 return -TARGET_EFAULT
;
13042 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13043 case TARGET_NR_ioprio_get
:
13044 return get_errno(ioprio_get(arg1
, arg2
));
13047 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13048 case TARGET_NR_ioprio_set
:
13049 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13052 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13053 case TARGET_NR_setns
:
13054 return get_errno(setns(arg1
, arg2
));
13056 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13057 case TARGET_NR_unshare
:
13058 return get_errno(unshare(arg1
));
13060 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13061 case TARGET_NR_kcmp
:
13062 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13064 #ifdef TARGET_NR_swapcontext
13065 case TARGET_NR_swapcontext
:
13066 /* PowerPC specific. */
13067 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13069 #ifdef TARGET_NR_memfd_create
13070 case TARGET_NR_memfd_create
:
13071 p
= lock_user_string(arg1
);
13073 return -TARGET_EFAULT
;
13075 ret
= get_errno(memfd_create(p
, arg2
));
13076 fd_trans_unregister(ret
);
13077 unlock_user(p
, arg1
, 0);
13080 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13081 case TARGET_NR_membarrier
:
13082 return get_errno(membarrier(arg1
, arg2
));
13085 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13086 case TARGET_NR_copy_file_range
:
13088 loff_t inoff
, outoff
;
13089 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13092 if (get_user_u64(inoff
, arg2
)) {
13093 return -TARGET_EFAULT
;
13098 if (get_user_u64(outoff
, arg4
)) {
13099 return -TARGET_EFAULT
;
13103 /* Do not sign-extend the count parameter. */
13104 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13105 (abi_ulong
)arg5
, arg6
));
13106 if (!is_error(ret
) && ret
> 0) {
13108 if (put_user_u64(inoff
, arg2
)) {
13109 return -TARGET_EFAULT
;
13113 if (put_user_u64(outoff
, arg4
)) {
13114 return -TARGET_EFAULT
;
13122 #if defined(TARGET_NR_pivot_root)
13123 case TARGET_NR_pivot_root
:
13126 p
= lock_user_string(arg1
); /* new_root */
13127 p2
= lock_user_string(arg2
); /* put_old */
13129 ret
= -TARGET_EFAULT
;
13131 ret
= get_errno(pivot_root(p
, p2
));
13133 unlock_user(p2
, arg2
, 0);
13134 unlock_user(p
, arg1
, 0);
13140 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13141 return -TARGET_ENOSYS
;
13146 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13147 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13148 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13151 CPUState
*cpu
= env_cpu(cpu_env
);
13154 #ifdef DEBUG_ERESTARTSYS
13155 /* Debug-only code for exercising the syscall-restart code paths
13156 * in the per-architecture cpu main loops: restart every syscall
13157 * the guest makes once before letting it through.
13163 return -TARGET_ERESTARTSYS
;
13168 record_syscall_start(cpu
, num
, arg1
,
13169 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13171 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13172 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13175 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13176 arg5
, arg6
, arg7
, arg8
);
13178 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13179 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13180 arg3
, arg4
, arg5
, arg6
);
13183 record_syscall_return(cpu
, num
, ret
);