4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "qapi/error.h"
140 #include "fd-trans.h"
144 #define CLONE_IO 0x80000000 /* Clone io context */
147 /* We can't directly call the host clone syscall, because this will
148 * badly confuse libc (breaking mutexes, for example). So we must
149 * divide clone flags into:
150 * * flag combinations that look like pthread_create()
151 * * flag combinations that look like fork()
152 * * flags we can implement within QEMU itself
153 * * flags we can't support and will return an error for
155 /* For thread creation, all these flags must be present; for
156 * fork, none must be present.
158 #define CLONE_THREAD_FLAGS \
159 (CLONE_VM | CLONE_FS | CLONE_FILES | \
160 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 /* These flags are ignored:
163 * CLONE_DETACHED is now ignored by the kernel;
164 * CLONE_IO is just an optimisation hint to the I/O scheduler
166 #define CLONE_IGNORED_FLAGS \
167 (CLONE_DETACHED | CLONE_IO)
169 /* Flags for fork which we can implement within QEMU itself */
170 #define CLONE_OPTIONAL_FORK_FLAGS \
171 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
172 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 /* Flags for thread creation which we can implement within QEMU itself */
175 #define CLONE_OPTIONAL_THREAD_FLAGS \
176 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
177 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 #define CLONE_INVALID_FORK_FLAGS \
180 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 #define CLONE_INVALID_THREAD_FLAGS \
183 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
184 CLONE_IGNORED_FLAGS))
186 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
187 * have almost all been allocated. We cannot support any of
188 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
189 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
190 * The checks against the invalid thread masks above will catch these.
191 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
195 * once. This exercises the codepaths for restart.
197 //#define DEBUG_ERESTARTSYS
199 //#include <linux/msdos_fs.h>
200 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
201 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
211 #define _syscall0(type,name) \
212 static type name (void) \
214 return syscall(__NR_##name); \
217 #define _syscall1(type,name,type1,arg1) \
218 static type name (type1 arg1) \
220 return syscall(__NR_##name, arg1); \
223 #define _syscall2(type,name,type1,arg1,type2,arg2) \
224 static type name (type1 arg1,type2 arg2) \
226 return syscall(__NR_##name, arg1, arg2); \
229 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
230 static type name (type1 arg1,type2 arg2,type3 arg3) \
232 return syscall(__NR_##name, arg1, arg2, arg3); \
235 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
236 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
238 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
241 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
243 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
245 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
249 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
250 type5,arg5,type6,arg6) \
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
254 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
258 #define __NR_sys_uname __NR_uname
259 #define __NR_sys_getcwd1 __NR_getcwd
260 #define __NR_sys_getdents __NR_getdents
261 #define __NR_sys_getdents64 __NR_getdents64
262 #define __NR_sys_getpriority __NR_getpriority
263 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
264 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
265 #define __NR_sys_syslog __NR_syslog
266 #if defined(__NR_futex)
267 # define __NR_sys_futex __NR_futex
269 #if defined(__NR_futex_time64)
270 # define __NR_sys_futex_time64 __NR_futex_time64
272 #define __NR_sys_inotify_init __NR_inotify_init
273 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
274 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
275 #define __NR_sys_statx __NR_statx
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid
)
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
314 loff_t
*, res
, uint
, wh
);
316 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
317 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
319 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group
,int,error_code
)
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address
,int *,tidptr
)
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
328 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
332 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
336 unsigned long *, user_mask_ptr
);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
339 unsigned long *, user_mask_ptr
);
340 #define __NR_sys_getcpu __NR_getcpu
341 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
342 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
344 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
345 struct __user_cap_data_struct
*, data
);
346 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
347 struct __user_cap_data_struct
*, data
);
348 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
349 _syscall2(int, ioprio_get
, int, which
, int, who
)
351 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
352 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
354 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
355 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
358 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
359 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
360 unsigned long, idx1
, unsigned long, idx2
)
364 * It is assumed that struct statx is architecture independent.
366 #if defined(TARGET_NR_statx) && defined(__NR_statx)
367 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
368 unsigned int, mask
, struct target_statx
*, statxbuf
)
370 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
371 _syscall2(int, membarrier
, int, cmd
, int, flags
)
374 static const bitmask_transtbl fcntl_flags_tbl
[] = {
375 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
376 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
377 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
378 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
379 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
380 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
381 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
382 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
383 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
384 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
385 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
386 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
387 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
388 #if defined(O_DIRECT)
389 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
391 #if defined(O_NOATIME)
392 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
394 #if defined(O_CLOEXEC)
395 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
398 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
400 #if defined(O_TMPFILE)
401 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
403 /* Don't terminate the list prematurely on 64-bit host+guest. */
404 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
405 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
410 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
412 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
413 #if defined(__NR_utimensat)
414 #define __NR_sys_utimensat __NR_utimensat
415 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
416 const struct timespec
*,tsp
,int,flags
)
418 static int sys_utimensat(int dirfd
, const char *pathname
,
419 const struct timespec times
[2], int flags
)
425 #endif /* TARGET_NR_utimensat */
427 #ifdef TARGET_NR_renameat2
428 #if defined(__NR_renameat2)
429 #define __NR_sys_renameat2 __NR_renameat2
430 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
431 const char *, new, unsigned int, flags
)
433 static int sys_renameat2(int oldfd
, const char *old
,
434 int newfd
, const char *new, int flags
)
437 return renameat(oldfd
, old
, newfd
, new);
443 #endif /* TARGET_NR_renameat2 */
445 #ifdef CONFIG_INOTIFY
446 #include <sys/inotify.h>
448 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
449 static int sys_inotify_init(void)
451 return (inotify_init());
454 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
455 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
457 return (inotify_add_watch(fd
, pathname
, mask
));
460 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
461 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
463 return (inotify_rm_watch(fd
, wd
));
466 #ifdef CONFIG_INOTIFY1
467 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
468 static int sys_inotify_init1(int flags
)
470 return (inotify_init1(flags
));
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY */
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64
{
492 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
493 const struct host_rlimit64
*, new_limit
,
494 struct host_rlimit64
*, old_limit
)
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers
[32] = { 0, } ;
502 static inline int next_free_host_timer(void)
505 /* FIXME: Does finding the next free slot require a lock? */
506 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
507 if (g_posix_timers
[k
] == 0) {
508 g_posix_timers
[k
] = (timer_t
) 1;
516 static inline int host_to_target_errno(int host_errno
)
518 switch (host_errno
) {
519 #define E(X) case X: return TARGET_##X;
520 #include "errnos.c.inc"
527 static inline int target_to_host_errno(int target_errno
)
529 switch (target_errno
) {
530 #define E(X) case TARGET_##X: return X;
531 #include "errnos.c.inc"
538 static inline abi_long
get_errno(abi_long ret
)
541 return -host_to_target_errno(errno
);
546 const char *target_strerror(int err
)
548 if (err
== TARGET_ERESTARTSYS
) {
549 return "To be restarted";
551 if (err
== TARGET_QEMU_ESIGRETURN
) {
552 return "Successful exit from sigreturn";
555 return strerror(target_to_host_errno(err
));
558 #define safe_syscall0(type, name) \
559 static type safe_##name(void) \
561 return safe_syscall(__NR_##name); \
564 #define safe_syscall1(type, name, type1, arg1) \
565 static type safe_##name(type1 arg1) \
567 return safe_syscall(__NR_##name, arg1); \
570 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
571 static type safe_##name(type1 arg1, type2 arg2) \
573 return safe_syscall(__NR_##name, arg1, arg2); \
576 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
577 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
579 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
582 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
584 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
586 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
589 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
590 type4, arg4, type5, arg5) \
591 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
594 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
597 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
598 type4, arg4, type5, arg5, type6, arg6) \
599 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
600 type5 arg5, type6 arg6) \
602 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
605 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
606 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
607 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
608 int, flags
, mode_t
, mode
)
609 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
610 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
611 struct rusage
*, rusage
)
613 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
614 int, options
, struct rusage
*, rusage
)
615 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
616 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
617 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
618 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
619 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
621 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
622 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
623 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
626 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
627 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
629 #if defined(__NR_futex)
630 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
631 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
633 #if defined(__NR_futex_time64)
634 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
635 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
637 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
638 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
639 safe_syscall2(int, tkill
, int, tid
, int, sig
)
640 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
641 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
642 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
643 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
644 unsigned long, pos_l
, unsigned long, pos_h
)
645 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
646 unsigned long, pos_l
, unsigned long, pos_h
)
647 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
649 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
650 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
651 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
652 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
653 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
654 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
655 safe_syscall2(int, flock
, int, fd
, int, operation
)
656 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
657 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
658 const struct timespec
*, uts
, size_t, sigsetsize
)
660 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
662 #if defined(TARGET_NR_nanosleep)
663 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
664 struct timespec
*, rem
)
666 #if defined(TARGET_NR_clock_nanosleep) || \
667 defined(TARGET_NR_clock_nanosleep_time64)
668 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
669 const struct timespec
*, req
, struct timespec
*, rem
)
673 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
676 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
677 void *, ptr
, long, fifth
)
681 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
685 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
686 long, msgtype
, int, flags
)
688 #ifdef __NR_semtimedop
689 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
690 unsigned, nsops
, const struct timespec
*, timeout
)
692 #if defined(TARGET_NR_mq_timedsend) || \
693 defined(TARGET_NR_mq_timedsend_time64)
694 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
695 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
697 #if defined(TARGET_NR_mq_timedreceive) || \
698 defined(TARGET_NR_mq_timedreceive_time64)
699 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
700 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
702 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
703 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
704 int, outfd
, loff_t
*, poutoff
, size_t, length
,
708 /* We do ioctl like this rather than via safe_syscall3 to preserve the
709 * "third argument might be integer or pointer or not present" behaviour of
712 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
713 /* Similarly for fcntl. Note that callers must always:
714 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
715 * use the flock64 struct rather than unsuffixed flock
716 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
719 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
721 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
724 static inline int host_to_target_sock_type(int host_type
)
728 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
730 target_type
= TARGET_SOCK_DGRAM
;
733 target_type
= TARGET_SOCK_STREAM
;
736 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
740 #if defined(SOCK_CLOEXEC)
741 if (host_type
& SOCK_CLOEXEC
) {
742 target_type
|= TARGET_SOCK_CLOEXEC
;
746 #if defined(SOCK_NONBLOCK)
747 if (host_type
& SOCK_NONBLOCK
) {
748 target_type
|= TARGET_SOCK_NONBLOCK
;
755 static abi_ulong target_brk
;
756 static abi_ulong target_original_brk
;
757 static abi_ulong brk_page
;
759 void target_set_brk(abi_ulong new_brk
)
761 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
762 brk_page
= HOST_PAGE_ALIGN(target_brk
);
765 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
766 #define DEBUGF_BRK(message, args...)
768 /* do_brk() must return target values and target errnos. */
769 abi_long
do_brk(abi_ulong new_brk
)
771 abi_long mapped_addr
;
772 abi_ulong new_alloc_size
;
774 /* brk pointers are always untagged */
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
779 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
782 if (new_brk
< target_original_brk
) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk
<= brk_page
) {
791 /* Heap contents are initialized to zero, as for anonymous
793 if (new_brk
> target_brk
) {
794 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
796 target_brk
= new_brk
;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
807 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
808 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
809 PROT_READ
|PROT_WRITE
,
810 MAP_ANON
|MAP_PRIVATE
, 0, 0));
812 if (mapped_addr
== brk_page
) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
820 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
822 target_brk
= new_brk
;
823 brk_page
= HOST_PAGE_ALIGN(target_brk
);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
827 } else if (mapped_addr
!= -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
831 target_munmap(mapped_addr
, new_alloc_size
);
833 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
836 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM
;
844 /* For everything else, return the previous break. */
848 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
849 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
850 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
851 abi_ulong target_fds_addr
,
855 abi_ulong b
, *target_fds
;
857 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
858 if (!(target_fds
= lock_user(VERIFY_READ
,
860 sizeof(abi_ulong
) * nw
,
862 return -TARGET_EFAULT
;
866 for (i
= 0; i
< nw
; i
++) {
867 /* grab the abi_ulong */
868 __get_user(b
, &target_fds
[i
]);
869 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
870 /* check the bit inside the abi_ulong */
877 unlock_user(target_fds
, target_fds_addr
, 0);
882 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
883 abi_ulong target_fds_addr
,
886 if (target_fds_addr
) {
887 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
888 return -TARGET_EFAULT
;
896 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
902 abi_ulong
*target_fds
;
904 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
905 if (!(target_fds
= lock_user(VERIFY_WRITE
,
907 sizeof(abi_ulong
) * nw
,
909 return -TARGET_EFAULT
;
912 for (i
= 0; i
< nw
; i
++) {
914 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
915 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
918 __put_user(v
, &target_fds
[i
]);
921 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
927 #if defined(__alpha__)
933 static inline abi_long
host_to_target_clock_t(long ticks
)
935 #if HOST_HZ == TARGET_HZ
938 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
942 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
943 const struct rusage
*rusage
)
945 struct target_rusage
*target_rusage
;
947 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
948 return -TARGET_EFAULT
;
949 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
950 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
951 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
952 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
953 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
954 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
955 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
956 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
957 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
958 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
959 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
960 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
961 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
962 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
963 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
964 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
965 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
966 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
967 unlock_user_struct(target_rusage
, target_addr
, 1);
972 #ifdef TARGET_NR_setrlimit
973 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
975 abi_ulong target_rlim_swap
;
978 target_rlim_swap
= tswapal(target_rlim
);
979 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
980 return RLIM_INFINITY
;
982 result
= target_rlim_swap
;
983 if (target_rlim_swap
!= (rlim_t
)result
)
984 return RLIM_INFINITY
;
990 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
991 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
993 abi_ulong target_rlim_swap
;
996 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
997 target_rlim_swap
= TARGET_RLIM_INFINITY
;
999 target_rlim_swap
= rlim
;
1000 result
= tswapal(target_rlim_swap
);
1006 static inline int target_to_host_resource(int code
)
1009 case TARGET_RLIMIT_AS
:
1011 case TARGET_RLIMIT_CORE
:
1013 case TARGET_RLIMIT_CPU
:
1015 case TARGET_RLIMIT_DATA
:
1017 case TARGET_RLIMIT_FSIZE
:
1018 return RLIMIT_FSIZE
;
1019 case TARGET_RLIMIT_LOCKS
:
1020 return RLIMIT_LOCKS
;
1021 case TARGET_RLIMIT_MEMLOCK
:
1022 return RLIMIT_MEMLOCK
;
1023 case TARGET_RLIMIT_MSGQUEUE
:
1024 return RLIMIT_MSGQUEUE
;
1025 case TARGET_RLIMIT_NICE
:
1027 case TARGET_RLIMIT_NOFILE
:
1028 return RLIMIT_NOFILE
;
1029 case TARGET_RLIMIT_NPROC
:
1030 return RLIMIT_NPROC
;
1031 case TARGET_RLIMIT_RSS
:
1033 case TARGET_RLIMIT_RTPRIO
:
1034 return RLIMIT_RTPRIO
;
1035 case TARGET_RLIMIT_SIGPENDING
:
1036 return RLIMIT_SIGPENDING
;
1037 case TARGET_RLIMIT_STACK
:
1038 return RLIMIT_STACK
;
1044 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1045 abi_ulong target_tv_addr
)
1047 struct target_timeval
*target_tv
;
1049 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1050 return -TARGET_EFAULT
;
1053 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1054 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1056 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1061 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1062 const struct timeval
*tv
)
1064 struct target_timeval
*target_tv
;
1066 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1067 return -TARGET_EFAULT
;
1070 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1071 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1073 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1078 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1079 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1080 abi_ulong target_tv_addr
)
1082 struct target__kernel_sock_timeval
*target_tv
;
1084 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1085 return -TARGET_EFAULT
;
1088 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1089 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1091 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1097 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1098 const struct timeval
*tv
)
1100 struct target__kernel_sock_timeval
*target_tv
;
1102 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1103 return -TARGET_EFAULT
;
1106 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1107 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1109 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1114 #if defined(TARGET_NR_futex) || \
1115 defined(TARGET_NR_rt_sigtimedwait) || \
1116 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1117 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1118 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1119 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1120 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1121 defined(TARGET_NR_timer_settime) || \
1122 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1123 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1124 abi_ulong target_addr
)
1126 struct target_timespec
*target_ts
;
1128 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1129 return -TARGET_EFAULT
;
1131 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1132 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1133 unlock_user_struct(target_ts
, target_addr
, 0);
1138 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1139 defined(TARGET_NR_timer_settime64) || \
1140 defined(TARGET_NR_mq_timedsend_time64) || \
1141 defined(TARGET_NR_mq_timedreceive_time64) || \
1142 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1143 defined(TARGET_NR_clock_nanosleep_time64) || \
1144 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1145 defined(TARGET_NR_utimensat) || \
1146 defined(TARGET_NR_utimensat_time64) || \
1147 defined(TARGET_NR_semtimedop_time64) || \
1148 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1149 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1150 abi_ulong target_addr
)
1152 struct target__kernel_timespec
*target_ts
;
1154 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1155 return -TARGET_EFAULT
;
1157 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1158 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1159 /* in 32bit mode, this drops the padding */
1160 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1161 unlock_user_struct(target_ts
, target_addr
, 0);
1166 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1167 struct timespec
*host_ts
)
1169 struct target_timespec
*target_ts
;
1171 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1172 return -TARGET_EFAULT
;
1174 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1175 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1176 unlock_user_struct(target_ts
, target_addr
, 1);
1180 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1181 struct timespec
*host_ts
)
1183 struct target__kernel_timespec
*target_ts
;
1185 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1186 return -TARGET_EFAULT
;
1188 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1189 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1190 unlock_user_struct(target_ts
, target_addr
, 1);
1194 #if defined(TARGET_NR_gettimeofday)
1195 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1196 struct timezone
*tz
)
1198 struct target_timezone
*target_tz
;
1200 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1201 return -TARGET_EFAULT
;
1204 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1205 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1207 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1213 #if defined(TARGET_NR_settimeofday)
1214 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1215 abi_ulong target_tz_addr
)
1217 struct target_timezone
*target_tz
;
1219 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1220 return -TARGET_EFAULT
;
1223 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1224 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1226 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1232 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1235 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1236 abi_ulong target_mq_attr_addr
)
1238 struct target_mq_attr
*target_mq_attr
;
1240 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1241 target_mq_attr_addr
, 1))
1242 return -TARGET_EFAULT
;
1244 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1245 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1246 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1247 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1249 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1254 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1255 const struct mq_attr
*attr
)
1257 struct target_mq_attr
*target_mq_attr
;
1259 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1260 target_mq_attr_addr
, 0))
1261 return -TARGET_EFAULT
;
1263 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1264 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1265 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1266 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1268 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1274 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1275 /* do_select() must return target values and target errnos. */
1276 static abi_long
do_select(int n
,
1277 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1278 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1280 fd_set rfds
, wfds
, efds
;
1281 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1283 struct timespec ts
, *ts_ptr
;
1286 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1290 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1294 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1299 if (target_tv_addr
) {
1300 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1301 return -TARGET_EFAULT
;
1302 ts
.tv_sec
= tv
.tv_sec
;
1303 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1309 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1312 if (!is_error(ret
)) {
1313 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1314 return -TARGET_EFAULT
;
1315 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1316 return -TARGET_EFAULT
;
1317 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1318 return -TARGET_EFAULT
;
1320 if (target_tv_addr
) {
1321 tv
.tv_sec
= ts
.tv_sec
;
1322 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1323 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1324 return -TARGET_EFAULT
;
1332 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1333 static abi_long
do_old_select(abi_ulong arg1
)
1335 struct target_sel_arg_struct
*sel
;
1336 abi_ulong inp
, outp
, exp
, tvp
;
1339 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1340 return -TARGET_EFAULT
;
1343 nsel
= tswapal(sel
->n
);
1344 inp
= tswapal(sel
->inp
);
1345 outp
= tswapal(sel
->outp
);
1346 exp
= tswapal(sel
->exp
);
1347 tvp
= tswapal(sel
->tvp
);
1349 unlock_user_struct(sel
, arg1
, 0);
1351 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1356 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1357 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1358 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1361 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1362 fd_set rfds
, wfds
, efds
;
1363 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1364 struct timespec ts
, *ts_ptr
;
1368 * The 6th arg is actually two args smashed together,
1369 * so we cannot use the C library.
1377 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1378 target_sigset_t
*target_sigset
;
1386 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1390 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1394 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1400 * This takes a timespec, and not a timeval, so we cannot
1401 * use the do_select() helper ...
1405 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1406 return -TARGET_EFAULT
;
1409 if (target_to_host_timespec(&ts
, ts_addr
)) {
1410 return -TARGET_EFAULT
;
1418 /* Extract the two packed args for the sigset */
1421 sig
.size
= SIGSET_T_SIZE
;
1423 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1425 return -TARGET_EFAULT
;
1427 arg_sigset
= tswapal(arg7
[0]);
1428 arg_sigsize
= tswapal(arg7
[1]);
1429 unlock_user(arg7
, arg6
, 0);
1433 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1434 /* Like the kernel, we enforce correct size sigsets */
1435 return -TARGET_EINVAL
;
1437 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1438 sizeof(*target_sigset
), 1);
1439 if (!target_sigset
) {
1440 return -TARGET_EFAULT
;
1442 target_to_host_sigset(&set
, target_sigset
);
1443 unlock_user(target_sigset
, arg_sigset
, 0);
1451 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1454 if (!is_error(ret
)) {
1455 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1456 return -TARGET_EFAULT
;
1458 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1459 return -TARGET_EFAULT
;
1461 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1462 return -TARGET_EFAULT
;
1465 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1466 return -TARGET_EFAULT
;
1469 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1470 return -TARGET_EFAULT
;
1478 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1479 defined(TARGET_NR_ppoll_time64)
1480 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1481 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1483 struct target_pollfd
*target_pfd
;
1484 unsigned int nfds
= arg2
;
1492 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1493 return -TARGET_EINVAL
;
1495 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1496 sizeof(struct target_pollfd
) * nfds
, 1);
1498 return -TARGET_EFAULT
;
1501 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1502 for (i
= 0; i
< nfds
; i
++) {
1503 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1504 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1508 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1509 target_sigset_t
*target_set
;
1510 sigset_t _set
, *set
= &_set
;
1514 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1515 unlock_user(target_pfd
, arg1
, 0);
1516 return -TARGET_EFAULT
;
1519 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1520 unlock_user(target_pfd
, arg1
, 0);
1521 return -TARGET_EFAULT
;
1529 if (arg5
!= sizeof(target_sigset_t
)) {
1530 unlock_user(target_pfd
, arg1
, 0);
1531 return -TARGET_EINVAL
;
1534 target_set
= lock_user(VERIFY_READ
, arg4
,
1535 sizeof(target_sigset_t
), 1);
1537 unlock_user(target_pfd
, arg1
, 0);
1538 return -TARGET_EFAULT
;
1540 target_to_host_sigset(set
, target_set
);
1545 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1546 set
, SIGSET_T_SIZE
));
1548 if (!is_error(ret
) && arg3
) {
1550 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1551 return -TARGET_EFAULT
;
1554 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1555 return -TARGET_EFAULT
;
1560 unlock_user(target_set
, arg4
, 0);
1563 struct timespec ts
, *pts
;
1566 /* Convert ms to secs, ns */
1567 ts
.tv_sec
= arg3
/ 1000;
1568 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1571 /* -ve poll() timeout means "infinite" */
1574 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1577 if (!is_error(ret
)) {
1578 for (i
= 0; i
< nfds
; i
++) {
1579 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1582 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1587 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1590 return pipe2(host_pipe
, flags
);
1596 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1597 int flags
, int is_pipe2
)
1601 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1604 return get_errno(ret
);
1606 /* Several targets have special calling conventions for the original
1607 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1609 #if defined(TARGET_ALPHA)
1610 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1611 return host_pipe
[0];
1612 #elif defined(TARGET_MIPS)
1613 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1614 return host_pipe
[0];
1615 #elif defined(TARGET_SH4)
1616 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1617 return host_pipe
[0];
1618 #elif defined(TARGET_SPARC)
1619 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1620 return host_pipe
[0];
1624 if (put_user_s32(host_pipe
[0], pipedes
)
1625 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1626 return -TARGET_EFAULT
;
1627 return get_errno(ret
);
1630 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1631 abi_ulong target_addr
,
1634 struct target_ip_mreqn
*target_smreqn
;
1636 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1638 return -TARGET_EFAULT
;
1639 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1640 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1641 if (len
== sizeof(struct target_ip_mreqn
))
1642 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1643 unlock_user(target_smreqn
, target_addr
, 0);
1648 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1649 abi_ulong target_addr
,
1652 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1653 sa_family_t sa_family
;
1654 struct target_sockaddr
*target_saddr
;
1656 if (fd_trans_target_to_host_addr(fd
)) {
1657 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1660 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1662 return -TARGET_EFAULT
;
1664 sa_family
= tswap16(target_saddr
->sa_family
);
1666 /* Oops. The caller might send a incomplete sun_path; sun_path
1667 * must be terminated by \0 (see the manual page), but
1668 * unfortunately it is quite common to specify sockaddr_un
1669 * length as "strlen(x->sun_path)" while it should be
1670 * "strlen(...) + 1". We'll fix that here if needed.
1671 * Linux kernel has a similar feature.
1674 if (sa_family
== AF_UNIX
) {
1675 if (len
< unix_maxlen
&& len
> 0) {
1676 char *cp
= (char*)target_saddr
;
1678 if ( cp
[len
-1] && !cp
[len
] )
1681 if (len
> unix_maxlen
)
1685 memcpy(addr
, target_saddr
, len
);
1686 addr
->sa_family
= sa_family
;
1687 if (sa_family
== AF_NETLINK
) {
1688 struct sockaddr_nl
*nladdr
;
1690 nladdr
= (struct sockaddr_nl
*)addr
;
1691 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1692 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1693 } else if (sa_family
== AF_PACKET
) {
1694 struct target_sockaddr_ll
*lladdr
;
1696 lladdr
= (struct target_sockaddr_ll
*)addr
;
1697 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1698 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1700 unlock_user(target_saddr
, target_addr
, 0);
1705 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1706 struct sockaddr
*addr
,
1709 struct target_sockaddr
*target_saddr
;
1716 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1718 return -TARGET_EFAULT
;
1719 memcpy(target_saddr
, addr
, len
);
1720 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1721 sizeof(target_saddr
->sa_family
)) {
1722 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1724 if (addr
->sa_family
== AF_NETLINK
&&
1725 len
>= sizeof(struct target_sockaddr_nl
)) {
1726 struct target_sockaddr_nl
*target_nl
=
1727 (struct target_sockaddr_nl
*)target_saddr
;
1728 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1729 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1730 } else if (addr
->sa_family
== AF_PACKET
) {
1731 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1732 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1733 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1734 } else if (addr
->sa_family
== AF_INET6
&&
1735 len
>= sizeof(struct target_sockaddr_in6
)) {
1736 struct target_sockaddr_in6
*target_in6
=
1737 (struct target_sockaddr_in6
*)target_saddr
;
1738 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1740 unlock_user(target_saddr
, target_addr
, len
);
1745 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1746 struct target_msghdr
*target_msgh
)
1748 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1749 abi_long msg_controllen
;
1750 abi_ulong target_cmsg_addr
;
1751 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1752 socklen_t space
= 0;
1754 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1755 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1757 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1758 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1759 target_cmsg_start
= target_cmsg
;
1761 return -TARGET_EFAULT
;
1763 while (cmsg
&& target_cmsg
) {
1764 void *data
= CMSG_DATA(cmsg
);
1765 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1767 int len
= tswapal(target_cmsg
->cmsg_len
)
1768 - sizeof(struct target_cmsghdr
);
1770 space
+= CMSG_SPACE(len
);
1771 if (space
> msgh
->msg_controllen
) {
1772 space
-= CMSG_SPACE(len
);
1773 /* This is a QEMU bug, since we allocated the payload
1774 * area ourselves (unlike overflow in host-to-target
1775 * conversion, which is just the guest giving us a buffer
1776 * that's too small). It can't happen for the payload types
1777 * we currently support; if it becomes an issue in future
1778 * we would need to improve our allocation strategy to
1779 * something more intelligent than "twice the size of the
1780 * target buffer we're reading from".
1782 qemu_log_mask(LOG_UNIMP
,
1783 ("Unsupported ancillary data %d/%d: "
1784 "unhandled msg size\n"),
1785 tswap32(target_cmsg
->cmsg_level
),
1786 tswap32(target_cmsg
->cmsg_type
));
1790 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1791 cmsg
->cmsg_level
= SOL_SOCKET
;
1793 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1795 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1796 cmsg
->cmsg_len
= CMSG_LEN(len
);
1798 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1799 int *fd
= (int *)data
;
1800 int *target_fd
= (int *)target_data
;
1801 int i
, numfds
= len
/ sizeof(int);
1803 for (i
= 0; i
< numfds
; i
++) {
1804 __get_user(fd
[i
], target_fd
+ i
);
1806 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1807 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1808 struct ucred
*cred
= (struct ucred
*)data
;
1809 struct target_ucred
*target_cred
=
1810 (struct target_ucred
*)target_data
;
1812 __get_user(cred
->pid
, &target_cred
->pid
);
1813 __get_user(cred
->uid
, &target_cred
->uid
);
1814 __get_user(cred
->gid
, &target_cred
->gid
);
1816 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1817 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1818 memcpy(data
, target_data
, len
);
1821 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1822 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1825 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1827 msgh
->msg_controllen
= space
;
1831 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1832 struct msghdr
*msgh
)
1834 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1835 abi_long msg_controllen
;
1836 abi_ulong target_cmsg_addr
;
1837 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1838 socklen_t space
= 0;
1840 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1841 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1843 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1844 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1845 target_cmsg_start
= target_cmsg
;
1847 return -TARGET_EFAULT
;
1849 while (cmsg
&& target_cmsg
) {
1850 void *data
= CMSG_DATA(cmsg
);
1851 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1853 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1854 int tgt_len
, tgt_space
;
1856 /* We never copy a half-header but may copy half-data;
1857 * this is Linux's behaviour in put_cmsg(). Note that
1858 * truncation here is a guest problem (which we report
1859 * to the guest via the CTRUNC bit), unlike truncation
1860 * in target_to_host_cmsg, which is a QEMU bug.
1862 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1863 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1867 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1868 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1870 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1872 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1874 /* Payload types which need a different size of payload on
1875 * the target must adjust tgt_len here.
1878 switch (cmsg
->cmsg_level
) {
1880 switch (cmsg
->cmsg_type
) {
1882 tgt_len
= sizeof(struct target_timeval
);
1892 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1893 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1894 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1897 /* We must now copy-and-convert len bytes of payload
1898 * into tgt_len bytes of destination space. Bear in mind
1899 * that in both source and destination we may be dealing
1900 * with a truncated value!
1902 switch (cmsg
->cmsg_level
) {
1904 switch (cmsg
->cmsg_type
) {
1907 int *fd
= (int *)data
;
1908 int *target_fd
= (int *)target_data
;
1909 int i
, numfds
= tgt_len
/ sizeof(int);
1911 for (i
= 0; i
< numfds
; i
++) {
1912 __put_user(fd
[i
], target_fd
+ i
);
1918 struct timeval
*tv
= (struct timeval
*)data
;
1919 struct target_timeval
*target_tv
=
1920 (struct target_timeval
*)target_data
;
1922 if (len
!= sizeof(struct timeval
) ||
1923 tgt_len
!= sizeof(struct target_timeval
)) {
1927 /* copy struct timeval to target */
1928 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1929 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1932 case SCM_CREDENTIALS
:
1934 struct ucred
*cred
= (struct ucred
*)data
;
1935 struct target_ucred
*target_cred
=
1936 (struct target_ucred
*)target_data
;
1938 __put_user(cred
->pid
, &target_cred
->pid
);
1939 __put_user(cred
->uid
, &target_cred
->uid
);
1940 __put_user(cred
->gid
, &target_cred
->gid
);
1949 switch (cmsg
->cmsg_type
) {
1952 uint32_t *v
= (uint32_t *)data
;
1953 uint32_t *t_int
= (uint32_t *)target_data
;
1955 if (len
!= sizeof(uint32_t) ||
1956 tgt_len
!= sizeof(uint32_t)) {
1959 __put_user(*v
, t_int
);
1965 struct sock_extended_err ee
;
1966 struct sockaddr_in offender
;
1968 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1969 struct errhdr_t
*target_errh
=
1970 (struct errhdr_t
*)target_data
;
1972 if (len
!= sizeof(struct errhdr_t
) ||
1973 tgt_len
!= sizeof(struct errhdr_t
)) {
1976 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1977 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1978 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1979 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1980 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1981 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1982 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1983 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1984 (void *) &errh
->offender
, sizeof(errh
->offender
));
1993 switch (cmsg
->cmsg_type
) {
1996 uint32_t *v
= (uint32_t *)data
;
1997 uint32_t *t_int
= (uint32_t *)target_data
;
1999 if (len
!= sizeof(uint32_t) ||
2000 tgt_len
!= sizeof(uint32_t)) {
2003 __put_user(*v
, t_int
);
2009 struct sock_extended_err ee
;
2010 struct sockaddr_in6 offender
;
2012 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2013 struct errhdr6_t
*target_errh
=
2014 (struct errhdr6_t
*)target_data
;
2016 if (len
!= sizeof(struct errhdr6_t
) ||
2017 tgt_len
!= sizeof(struct errhdr6_t
)) {
2020 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2021 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2022 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2023 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2024 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2025 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2026 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2027 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2028 (void *) &errh
->offender
, sizeof(errh
->offender
));
2038 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2039 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2040 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2041 if (tgt_len
> len
) {
2042 memset(target_data
+ len
, 0, tgt_len
- len
);
2046 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2047 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2048 if (msg_controllen
< tgt_space
) {
2049 tgt_space
= msg_controllen
;
2051 msg_controllen
-= tgt_space
;
2053 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2054 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2057 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2059 target_msgh
->msg_controllen
= tswapal(space
);
2063 /* do_setsockopt() Must return target values and target errnos. */
2064 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2065 abi_ulong optval_addr
, socklen_t optlen
)
2069 struct ip_mreqn
*ip_mreq
;
2070 struct ip_mreq_source
*ip_mreq_source
;
2075 /* TCP and UDP options all take an 'int' value. */
2076 if (optlen
< sizeof(uint32_t))
2077 return -TARGET_EINVAL
;
2079 if (get_user_u32(val
, optval_addr
))
2080 return -TARGET_EFAULT
;
2081 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2088 case IP_ROUTER_ALERT
:
2092 case IP_MTU_DISCOVER
:
2099 case IP_MULTICAST_TTL
:
2100 case IP_MULTICAST_LOOP
:
2102 if (optlen
>= sizeof(uint32_t)) {
2103 if (get_user_u32(val
, optval_addr
))
2104 return -TARGET_EFAULT
;
2105 } else if (optlen
>= 1) {
2106 if (get_user_u8(val
, optval_addr
))
2107 return -TARGET_EFAULT
;
2109 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2111 case IP_ADD_MEMBERSHIP
:
2112 case IP_DROP_MEMBERSHIP
:
2113 if (optlen
< sizeof (struct target_ip_mreq
) ||
2114 optlen
> sizeof (struct target_ip_mreqn
))
2115 return -TARGET_EINVAL
;
2117 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2118 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2119 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2122 case IP_BLOCK_SOURCE
:
2123 case IP_UNBLOCK_SOURCE
:
2124 case IP_ADD_SOURCE_MEMBERSHIP
:
2125 case IP_DROP_SOURCE_MEMBERSHIP
:
2126 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2127 return -TARGET_EINVAL
;
2129 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2130 if (!ip_mreq_source
) {
2131 return -TARGET_EFAULT
;
2133 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2134 unlock_user (ip_mreq_source
, optval_addr
, 0);
2143 case IPV6_MTU_DISCOVER
:
2146 case IPV6_RECVPKTINFO
:
2147 case IPV6_UNICAST_HOPS
:
2148 case IPV6_MULTICAST_HOPS
:
2149 case IPV6_MULTICAST_LOOP
:
2151 case IPV6_RECVHOPLIMIT
:
2152 case IPV6_2292HOPLIMIT
:
2155 case IPV6_2292PKTINFO
:
2156 case IPV6_RECVTCLASS
:
2157 case IPV6_RECVRTHDR
:
2158 case IPV6_2292RTHDR
:
2159 case IPV6_RECVHOPOPTS
:
2160 case IPV6_2292HOPOPTS
:
2161 case IPV6_RECVDSTOPTS
:
2162 case IPV6_2292DSTOPTS
:
2164 case IPV6_ADDR_PREFERENCES
:
2165 #ifdef IPV6_RECVPATHMTU
2166 case IPV6_RECVPATHMTU
:
2168 #ifdef IPV6_TRANSPARENT
2169 case IPV6_TRANSPARENT
:
2171 #ifdef IPV6_FREEBIND
2174 #ifdef IPV6_RECVORIGDSTADDR
2175 case IPV6_RECVORIGDSTADDR
:
2178 if (optlen
< sizeof(uint32_t)) {
2179 return -TARGET_EINVAL
;
2181 if (get_user_u32(val
, optval_addr
)) {
2182 return -TARGET_EFAULT
;
2184 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2185 &val
, sizeof(val
)));
2189 struct in6_pktinfo pki
;
2191 if (optlen
< sizeof(pki
)) {
2192 return -TARGET_EINVAL
;
2195 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2196 return -TARGET_EFAULT
;
2199 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2201 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2202 &pki
, sizeof(pki
)));
2205 case IPV6_ADD_MEMBERSHIP
:
2206 case IPV6_DROP_MEMBERSHIP
:
2208 struct ipv6_mreq ipv6mreq
;
2210 if (optlen
< sizeof(ipv6mreq
)) {
2211 return -TARGET_EINVAL
;
2214 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2215 return -TARGET_EFAULT
;
2218 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2220 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2221 &ipv6mreq
, sizeof(ipv6mreq
)));
2232 struct icmp6_filter icmp6f
;
2234 if (optlen
> sizeof(icmp6f
)) {
2235 optlen
= sizeof(icmp6f
);
2238 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2239 return -TARGET_EFAULT
;
2242 for (val
= 0; val
< 8; val
++) {
2243 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2246 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2258 /* those take an u32 value */
2259 if (optlen
< sizeof(uint32_t)) {
2260 return -TARGET_EINVAL
;
2263 if (get_user_u32(val
, optval_addr
)) {
2264 return -TARGET_EFAULT
;
2266 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2267 &val
, sizeof(val
)));
2274 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2279 char *alg_key
= g_malloc(optlen
);
2282 return -TARGET_ENOMEM
;
2284 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2286 return -TARGET_EFAULT
;
2288 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2293 case ALG_SET_AEAD_AUTHSIZE
:
2295 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2304 case TARGET_SOL_SOCKET
:
2306 case TARGET_SO_RCVTIMEO
:
2310 optname
= SO_RCVTIMEO
;
2313 if (optlen
!= sizeof(struct target_timeval
)) {
2314 return -TARGET_EINVAL
;
2317 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2318 return -TARGET_EFAULT
;
2321 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2325 case TARGET_SO_SNDTIMEO
:
2326 optname
= SO_SNDTIMEO
;
2328 case TARGET_SO_ATTACH_FILTER
:
2330 struct target_sock_fprog
*tfprog
;
2331 struct target_sock_filter
*tfilter
;
2332 struct sock_fprog fprog
;
2333 struct sock_filter
*filter
;
2336 if (optlen
!= sizeof(*tfprog
)) {
2337 return -TARGET_EINVAL
;
2339 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2340 return -TARGET_EFAULT
;
2342 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2343 tswapal(tfprog
->filter
), 0)) {
2344 unlock_user_struct(tfprog
, optval_addr
, 1);
2345 return -TARGET_EFAULT
;
2348 fprog
.len
= tswap16(tfprog
->len
);
2349 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2350 if (filter
== NULL
) {
2351 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2352 unlock_user_struct(tfprog
, optval_addr
, 1);
2353 return -TARGET_ENOMEM
;
2355 for (i
= 0; i
< fprog
.len
; i
++) {
2356 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2357 filter
[i
].jt
= tfilter
[i
].jt
;
2358 filter
[i
].jf
= tfilter
[i
].jf
;
2359 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2361 fprog
.filter
= filter
;
2363 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2364 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2367 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2368 unlock_user_struct(tfprog
, optval_addr
, 1);
2371 case TARGET_SO_BINDTODEVICE
:
2373 char *dev_ifname
, *addr_ifname
;
2375 if (optlen
> IFNAMSIZ
- 1) {
2376 optlen
= IFNAMSIZ
- 1;
2378 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2380 return -TARGET_EFAULT
;
2382 optname
= SO_BINDTODEVICE
;
2383 addr_ifname
= alloca(IFNAMSIZ
);
2384 memcpy(addr_ifname
, dev_ifname
, optlen
);
2385 addr_ifname
[optlen
] = 0;
2386 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2387 addr_ifname
, optlen
));
2388 unlock_user (dev_ifname
, optval_addr
, 0);
2391 case TARGET_SO_LINGER
:
2394 struct target_linger
*tlg
;
2396 if (optlen
!= sizeof(struct target_linger
)) {
2397 return -TARGET_EINVAL
;
2399 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2400 return -TARGET_EFAULT
;
2402 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2403 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2404 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2406 unlock_user_struct(tlg
, optval_addr
, 0);
2409 /* Options with 'int' argument. */
2410 case TARGET_SO_DEBUG
:
2413 case TARGET_SO_REUSEADDR
:
2414 optname
= SO_REUSEADDR
;
2417 case TARGET_SO_REUSEPORT
:
2418 optname
= SO_REUSEPORT
;
2421 case TARGET_SO_TYPE
:
2424 case TARGET_SO_ERROR
:
2427 case TARGET_SO_DONTROUTE
:
2428 optname
= SO_DONTROUTE
;
2430 case TARGET_SO_BROADCAST
:
2431 optname
= SO_BROADCAST
;
2433 case TARGET_SO_SNDBUF
:
2434 optname
= SO_SNDBUF
;
2436 case TARGET_SO_SNDBUFFORCE
:
2437 optname
= SO_SNDBUFFORCE
;
2439 case TARGET_SO_RCVBUF
:
2440 optname
= SO_RCVBUF
;
2442 case TARGET_SO_RCVBUFFORCE
:
2443 optname
= SO_RCVBUFFORCE
;
2445 case TARGET_SO_KEEPALIVE
:
2446 optname
= SO_KEEPALIVE
;
2448 case TARGET_SO_OOBINLINE
:
2449 optname
= SO_OOBINLINE
;
2451 case TARGET_SO_NO_CHECK
:
2452 optname
= SO_NO_CHECK
;
2454 case TARGET_SO_PRIORITY
:
2455 optname
= SO_PRIORITY
;
2458 case TARGET_SO_BSDCOMPAT
:
2459 optname
= SO_BSDCOMPAT
;
2462 case TARGET_SO_PASSCRED
:
2463 optname
= SO_PASSCRED
;
2465 case TARGET_SO_PASSSEC
:
2466 optname
= SO_PASSSEC
;
2468 case TARGET_SO_TIMESTAMP
:
2469 optname
= SO_TIMESTAMP
;
2471 case TARGET_SO_RCVLOWAT
:
2472 optname
= SO_RCVLOWAT
;
2477 if (optlen
< sizeof(uint32_t))
2478 return -TARGET_EINVAL
;
2480 if (get_user_u32(val
, optval_addr
))
2481 return -TARGET_EFAULT
;
2482 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2487 case NETLINK_PKTINFO
:
2488 case NETLINK_ADD_MEMBERSHIP
:
2489 case NETLINK_DROP_MEMBERSHIP
:
2490 case NETLINK_BROADCAST_ERROR
:
2491 case NETLINK_NO_ENOBUFS
:
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2493 case NETLINK_LISTEN_ALL_NSID
:
2494 case NETLINK_CAP_ACK
:
2495 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2496 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2497 case NETLINK_EXT_ACK
:
2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2499 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2500 case NETLINK_GET_STRICT_CHK
:
2501 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2507 if (optlen
< sizeof(uint32_t)) {
2508 return -TARGET_EINVAL
;
2510 if (get_user_u32(val
, optval_addr
)) {
2511 return -TARGET_EFAULT
;
2513 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2516 #endif /* SOL_NETLINK */
2519 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2521 ret
= -TARGET_ENOPROTOOPT
;
2526 /* do_getsockopt() Must return target values and target errnos. */
2527 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2528 abi_ulong optval_addr
, abi_ulong optlen
)
2535 case TARGET_SOL_SOCKET
:
2538 /* These don't just return a single integer */
2539 case TARGET_SO_PEERNAME
:
2541 case TARGET_SO_RCVTIMEO
: {
2545 optname
= SO_RCVTIMEO
;
2548 if (get_user_u32(len
, optlen
)) {
2549 return -TARGET_EFAULT
;
2552 return -TARGET_EINVAL
;
2556 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2561 if (len
> sizeof(struct target_timeval
)) {
2562 len
= sizeof(struct target_timeval
);
2564 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2565 return -TARGET_EFAULT
;
2567 if (put_user_u32(len
, optlen
)) {
2568 return -TARGET_EFAULT
;
2572 case TARGET_SO_SNDTIMEO
:
2573 optname
= SO_SNDTIMEO
;
2575 case TARGET_SO_PEERCRED
: {
2578 struct target_ucred
*tcr
;
2580 if (get_user_u32(len
, optlen
)) {
2581 return -TARGET_EFAULT
;
2584 return -TARGET_EINVAL
;
2588 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2596 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2597 return -TARGET_EFAULT
;
2599 __put_user(cr
.pid
, &tcr
->pid
);
2600 __put_user(cr
.uid
, &tcr
->uid
);
2601 __put_user(cr
.gid
, &tcr
->gid
);
2602 unlock_user_struct(tcr
, optval_addr
, 1);
2603 if (put_user_u32(len
, optlen
)) {
2604 return -TARGET_EFAULT
;
2608 case TARGET_SO_PEERSEC
: {
2611 if (get_user_u32(len
, optlen
)) {
2612 return -TARGET_EFAULT
;
2615 return -TARGET_EINVAL
;
2617 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2619 return -TARGET_EFAULT
;
2622 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2624 if (put_user_u32(lv
, optlen
)) {
2625 ret
= -TARGET_EFAULT
;
2627 unlock_user(name
, optval_addr
, lv
);
2630 case TARGET_SO_LINGER
:
2634 struct target_linger
*tlg
;
2636 if (get_user_u32(len
, optlen
)) {
2637 return -TARGET_EFAULT
;
2640 return -TARGET_EINVAL
;
2644 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2652 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2653 return -TARGET_EFAULT
;
2655 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2656 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2657 unlock_user_struct(tlg
, optval_addr
, 1);
2658 if (put_user_u32(len
, optlen
)) {
2659 return -TARGET_EFAULT
;
2663 /* Options with 'int' argument. */
2664 case TARGET_SO_DEBUG
:
2667 case TARGET_SO_REUSEADDR
:
2668 optname
= SO_REUSEADDR
;
2671 case TARGET_SO_REUSEPORT
:
2672 optname
= SO_REUSEPORT
;
2675 case TARGET_SO_TYPE
:
2678 case TARGET_SO_ERROR
:
2681 case TARGET_SO_DONTROUTE
:
2682 optname
= SO_DONTROUTE
;
2684 case TARGET_SO_BROADCAST
:
2685 optname
= SO_BROADCAST
;
2687 case TARGET_SO_SNDBUF
:
2688 optname
= SO_SNDBUF
;
2690 case TARGET_SO_RCVBUF
:
2691 optname
= SO_RCVBUF
;
2693 case TARGET_SO_KEEPALIVE
:
2694 optname
= SO_KEEPALIVE
;
2696 case TARGET_SO_OOBINLINE
:
2697 optname
= SO_OOBINLINE
;
2699 case TARGET_SO_NO_CHECK
:
2700 optname
= SO_NO_CHECK
;
2702 case TARGET_SO_PRIORITY
:
2703 optname
= SO_PRIORITY
;
2706 case TARGET_SO_BSDCOMPAT
:
2707 optname
= SO_BSDCOMPAT
;
2710 case TARGET_SO_PASSCRED
:
2711 optname
= SO_PASSCRED
;
2713 case TARGET_SO_TIMESTAMP
:
2714 optname
= SO_TIMESTAMP
;
2716 case TARGET_SO_RCVLOWAT
:
2717 optname
= SO_RCVLOWAT
;
2719 case TARGET_SO_ACCEPTCONN
:
2720 optname
= SO_ACCEPTCONN
;
2722 case TARGET_SO_PROTOCOL
:
2723 optname
= SO_PROTOCOL
;
2725 case TARGET_SO_DOMAIN
:
2726 optname
= SO_DOMAIN
;
2734 /* TCP and UDP options all take an 'int' value. */
2736 if (get_user_u32(len
, optlen
))
2737 return -TARGET_EFAULT
;
2739 return -TARGET_EINVAL
;
2741 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2744 if (optname
== SO_TYPE
) {
2745 val
= host_to_target_sock_type(val
);
2750 if (put_user_u32(val
, optval_addr
))
2751 return -TARGET_EFAULT
;
2753 if (put_user_u8(val
, optval_addr
))
2754 return -TARGET_EFAULT
;
2756 if (put_user_u32(len
, optlen
))
2757 return -TARGET_EFAULT
;
2764 case IP_ROUTER_ALERT
:
2768 case IP_MTU_DISCOVER
:
2774 case IP_MULTICAST_TTL
:
2775 case IP_MULTICAST_LOOP
:
2776 if (get_user_u32(len
, optlen
))
2777 return -TARGET_EFAULT
;
2779 return -TARGET_EINVAL
;
2781 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2784 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2786 if (put_user_u32(len
, optlen
)
2787 || put_user_u8(val
, optval_addr
))
2788 return -TARGET_EFAULT
;
2790 if (len
> sizeof(int))
2792 if (put_user_u32(len
, optlen
)
2793 || put_user_u32(val
, optval_addr
))
2794 return -TARGET_EFAULT
;
2798 ret
= -TARGET_ENOPROTOOPT
;
2804 case IPV6_MTU_DISCOVER
:
2807 case IPV6_RECVPKTINFO
:
2808 case IPV6_UNICAST_HOPS
:
2809 case IPV6_MULTICAST_HOPS
:
2810 case IPV6_MULTICAST_LOOP
:
2812 case IPV6_RECVHOPLIMIT
:
2813 case IPV6_2292HOPLIMIT
:
2816 case IPV6_2292PKTINFO
:
2817 case IPV6_RECVTCLASS
:
2818 case IPV6_RECVRTHDR
:
2819 case IPV6_2292RTHDR
:
2820 case IPV6_RECVHOPOPTS
:
2821 case IPV6_2292HOPOPTS
:
2822 case IPV6_RECVDSTOPTS
:
2823 case IPV6_2292DSTOPTS
:
2825 case IPV6_ADDR_PREFERENCES
:
2826 #ifdef IPV6_RECVPATHMTU
2827 case IPV6_RECVPATHMTU
:
2829 #ifdef IPV6_TRANSPARENT
2830 case IPV6_TRANSPARENT
:
2832 #ifdef IPV6_FREEBIND
2835 #ifdef IPV6_RECVORIGDSTADDR
2836 case IPV6_RECVORIGDSTADDR
:
2838 if (get_user_u32(len
, optlen
))
2839 return -TARGET_EFAULT
;
2841 return -TARGET_EINVAL
;
2843 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2846 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2848 if (put_user_u32(len
, optlen
)
2849 || put_user_u8(val
, optval_addr
))
2850 return -TARGET_EFAULT
;
2852 if (len
> sizeof(int))
2854 if (put_user_u32(len
, optlen
)
2855 || put_user_u32(val
, optval_addr
))
2856 return -TARGET_EFAULT
;
2860 ret
= -TARGET_ENOPROTOOPT
;
2867 case NETLINK_PKTINFO
:
2868 case NETLINK_BROADCAST_ERROR
:
2869 case NETLINK_NO_ENOBUFS
:
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2871 case NETLINK_LISTEN_ALL_NSID
:
2872 case NETLINK_CAP_ACK
:
2873 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2874 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2875 case NETLINK_EXT_ACK
:
2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2878 case NETLINK_GET_STRICT_CHK
:
2879 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2880 if (get_user_u32(len
, optlen
)) {
2881 return -TARGET_EFAULT
;
2883 if (len
!= sizeof(val
)) {
2884 return -TARGET_EINVAL
;
2887 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2891 if (put_user_u32(lv
, optlen
)
2892 || put_user_u32(val
, optval_addr
)) {
2893 return -TARGET_EFAULT
;
2896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2897 case NETLINK_LIST_MEMBERSHIPS
:
2901 if (get_user_u32(len
, optlen
)) {
2902 return -TARGET_EFAULT
;
2905 return -TARGET_EINVAL
;
2907 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2908 if (!results
&& len
> 0) {
2909 return -TARGET_EFAULT
;
2912 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2914 unlock_user(results
, optval_addr
, 0);
2917 /* swap host endianess to target endianess. */
2918 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2919 results
[i
] = tswap32(results
[i
]);
2921 if (put_user_u32(lv
, optlen
)) {
2922 return -TARGET_EFAULT
;
2924 unlock_user(results
, optval_addr
, 0);
2927 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2932 #endif /* SOL_NETLINK */
2935 qemu_log_mask(LOG_UNIMP
,
2936 "getsockopt level=%d optname=%d not yet supported\n",
2938 ret
= -TARGET_EOPNOTSUPP
;
2944 /* Convert target low/high pair representing file offset into the host
2945 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2946 * as the kernel doesn't handle them either.
2948 static void target_to_host_low_high(abi_ulong tlow
,
2950 unsigned long *hlow
,
2951 unsigned long *hhigh
)
2953 uint64_t off
= tlow
|
2954 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2955 TARGET_LONG_BITS
/ 2;
2958 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2961 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2962 abi_ulong count
, int copy
)
2964 struct target_iovec
*target_vec
;
2966 abi_ulong total_len
, max_len
;
2969 bool bad_address
= false;
2975 if (count
> IOV_MAX
) {
2980 vec
= g_try_new0(struct iovec
, count
);
2986 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2987 count
* sizeof(struct target_iovec
), 1);
2988 if (target_vec
== NULL
) {
2993 /* ??? If host page size > target page size, this will result in a
2994 value larger than what we can actually support. */
2995 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2998 for (i
= 0; i
< count
; i
++) {
2999 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3000 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3005 } else if (len
== 0) {
3006 /* Zero length pointer is ignored. */
3007 vec
[i
].iov_base
= 0;
3009 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3010 /* If the first buffer pointer is bad, this is a fault. But
3011 * subsequent bad buffers will result in a partial write; this
3012 * is realized by filling the vector with null pointers and
3014 if (!vec
[i
].iov_base
) {
3025 if (len
> max_len
- total_len
) {
3026 len
= max_len
- total_len
;
3029 vec
[i
].iov_len
= len
;
3033 unlock_user(target_vec
, target_addr
, 0);
3038 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3039 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3042 unlock_user(target_vec
, target_addr
, 0);
3049 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3050 abi_ulong count
, int copy
)
3052 struct target_iovec
*target_vec
;
3055 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3056 count
* sizeof(struct target_iovec
), 1);
3058 for (i
= 0; i
< count
; i
++) {
3059 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3060 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3064 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3066 unlock_user(target_vec
, target_addr
, 0);
3072 static inline int target_to_host_sock_type(int *type
)
3075 int target_type
= *type
;
3077 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3078 case TARGET_SOCK_DGRAM
:
3079 host_type
= SOCK_DGRAM
;
3081 case TARGET_SOCK_STREAM
:
3082 host_type
= SOCK_STREAM
;
3085 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3088 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3089 #if defined(SOCK_CLOEXEC)
3090 host_type
|= SOCK_CLOEXEC
;
3092 return -TARGET_EINVAL
;
3095 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3096 #if defined(SOCK_NONBLOCK)
3097 host_type
|= SOCK_NONBLOCK
;
3098 #elif !defined(O_NONBLOCK)
3099 return -TARGET_EINVAL
;
3106 /* Try to emulate socket type flags after socket creation. */
3107 static int sock_flags_fixup(int fd
, int target_type
)
3109 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3110 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3111 int flags
= fcntl(fd
, F_GETFL
);
3112 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3114 return -TARGET_EINVAL
;
3121 /* do_socket() Must return target values and target errnos. */
3122 static abi_long
do_socket(int domain
, int type
, int protocol
)
3124 int target_type
= type
;
3127 ret
= target_to_host_sock_type(&type
);
3132 if (domain
== PF_NETLINK
&& !(
3133 #ifdef CONFIG_RTNETLINK
3134 protocol
== NETLINK_ROUTE
||
3136 protocol
== NETLINK_KOBJECT_UEVENT
||
3137 protocol
== NETLINK_AUDIT
)) {
3138 return -TARGET_EPROTONOSUPPORT
;
3141 if (domain
== AF_PACKET
||
3142 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3143 protocol
= tswap16(protocol
);
3146 ret
= get_errno(socket(domain
, type
, protocol
));
3148 ret
= sock_flags_fixup(ret
, target_type
);
3149 if (type
== SOCK_PACKET
) {
3150 /* Manage an obsolete case :
3151 * if socket type is SOCK_PACKET, bind by name
3153 fd_trans_register(ret
, &target_packet_trans
);
3154 } else if (domain
== PF_NETLINK
) {
3156 #ifdef CONFIG_RTNETLINK
3158 fd_trans_register(ret
, &target_netlink_route_trans
);
3161 case NETLINK_KOBJECT_UEVENT
:
3162 /* nothing to do: messages are strings */
3165 fd_trans_register(ret
, &target_netlink_audit_trans
);
3168 g_assert_not_reached();
3175 /* do_bind() Must return target values and target errnos. */
3176 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3182 if ((int)addrlen
< 0) {
3183 return -TARGET_EINVAL
;
3186 addr
= alloca(addrlen
+1);
3188 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3192 return get_errno(bind(sockfd
, addr
, addrlen
));
3195 /* do_connect() Must return target values and target errnos. */
3196 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3202 if ((int)addrlen
< 0) {
3203 return -TARGET_EINVAL
;
3206 addr
= alloca(addrlen
+1);
3208 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3212 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3215 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3216 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3217 int flags
, int send
)
3223 abi_ulong target_vec
;
3225 if (msgp
->msg_name
) {
3226 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3227 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3228 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3229 tswapal(msgp
->msg_name
),
3231 if (ret
== -TARGET_EFAULT
) {
3232 /* For connected sockets msg_name and msg_namelen must
3233 * be ignored, so returning EFAULT immediately is wrong.
3234 * Instead, pass a bad msg_name to the host kernel, and
3235 * let it decide whether to return EFAULT or not.
3237 msg
.msg_name
= (void *)-1;
3242 msg
.msg_name
= NULL
;
3243 msg
.msg_namelen
= 0;
3245 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3246 msg
.msg_control
= alloca(msg
.msg_controllen
);
3247 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3249 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3251 count
= tswapal(msgp
->msg_iovlen
);
3252 target_vec
= tswapal(msgp
->msg_iov
);
3254 if (count
> IOV_MAX
) {
3255 /* sendrcvmsg returns a different errno for this condition than
3256 * readv/writev, so we must catch it here before lock_iovec() does.
3258 ret
= -TARGET_EMSGSIZE
;
3262 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3263 target_vec
, count
, send
);
3265 ret
= -host_to_target_errno(errno
);
3268 msg
.msg_iovlen
= count
;
3272 if (fd_trans_target_to_host_data(fd
)) {
3275 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3276 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3277 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3278 msg
.msg_iov
->iov_len
);
3280 msg
.msg_iov
->iov_base
= host_msg
;
3281 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3285 ret
= target_to_host_cmsg(&msg
, msgp
);
3287 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3291 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3292 if (!is_error(ret
)) {
3294 if (fd_trans_host_to_target_data(fd
)) {
3295 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3296 MIN(msg
.msg_iov
->iov_len
, len
));
3298 ret
= host_to_target_cmsg(msgp
, &msg
);
3300 if (!is_error(ret
)) {
3301 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3302 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3303 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3304 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3305 msg
.msg_name
, msg
.msg_namelen
);
3317 unlock_iovec(vec
, target_vec
, count
, !send
);
3322 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3323 int flags
, int send
)
3326 struct target_msghdr
*msgp
;
3328 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3332 return -TARGET_EFAULT
;
3334 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3335 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3339 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3340 * so it might not have this *mmsg-specific flag either.
3342 #ifndef MSG_WAITFORONE
3343 #define MSG_WAITFORONE 0x10000
3346 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3347 unsigned int vlen
, unsigned int flags
,
3350 struct target_mmsghdr
*mmsgp
;
3354 if (vlen
> UIO_MAXIOV
) {
3358 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3360 return -TARGET_EFAULT
;
3363 for (i
= 0; i
< vlen
; i
++) {
3364 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3365 if (is_error(ret
)) {
3368 mmsgp
[i
].msg_len
= tswap32(ret
);
3369 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3370 if (flags
& MSG_WAITFORONE
) {
3371 flags
|= MSG_DONTWAIT
;
3375 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3377 /* Return number of datagrams sent if we sent any at all;
3378 * otherwise return the error.
3386 /* do_accept4() Must return target values and target errnos. */
3387 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3388 abi_ulong target_addrlen_addr
, int flags
)
3390 socklen_t addrlen
, ret_addrlen
;
3395 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3397 if (target_addr
== 0) {
3398 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3401 /* linux returns EFAULT if addrlen pointer is invalid */
3402 if (get_user_u32(addrlen
, target_addrlen_addr
))
3403 return -TARGET_EFAULT
;
3405 if ((int)addrlen
< 0) {
3406 return -TARGET_EINVAL
;
3409 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3410 return -TARGET_EFAULT
;
3413 addr
= alloca(addrlen
);
3415 ret_addrlen
= addrlen
;
3416 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3417 if (!is_error(ret
)) {
3418 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3419 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3420 ret
= -TARGET_EFAULT
;
3426 /* do_getpeername() Must return target values and target errnos. */
3427 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3428 abi_ulong target_addrlen_addr
)
3430 socklen_t addrlen
, ret_addrlen
;
3434 if (get_user_u32(addrlen
, target_addrlen_addr
))
3435 return -TARGET_EFAULT
;
3437 if ((int)addrlen
< 0) {
3438 return -TARGET_EINVAL
;
3441 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3442 return -TARGET_EFAULT
;
3445 addr
= alloca(addrlen
);
3447 ret_addrlen
= addrlen
;
3448 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3449 if (!is_error(ret
)) {
3450 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3451 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3452 ret
= -TARGET_EFAULT
;
3458 /* do_getsockname() Must return target values and target errnos. */
3459 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3460 abi_ulong target_addrlen_addr
)
3462 socklen_t addrlen
, ret_addrlen
;
3466 if (get_user_u32(addrlen
, target_addrlen_addr
))
3467 return -TARGET_EFAULT
;
3469 if ((int)addrlen
< 0) {
3470 return -TARGET_EINVAL
;
3473 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3474 return -TARGET_EFAULT
;
3477 addr
= alloca(addrlen
);
3479 ret_addrlen
= addrlen
;
3480 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3481 if (!is_error(ret
)) {
3482 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3483 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3484 ret
= -TARGET_EFAULT
;
3490 /* do_socketpair() Must return target values and target errnos. */
3491 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3492 abi_ulong target_tab_addr
)
3497 target_to_host_sock_type(&type
);
3499 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3500 if (!is_error(ret
)) {
3501 if (put_user_s32(tab
[0], target_tab_addr
)
3502 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3503 ret
= -TARGET_EFAULT
;
3508 /* do_sendto() Must return target values and target errnos. */
3509 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3510 abi_ulong target_addr
, socklen_t addrlen
)
3514 void *copy_msg
= NULL
;
3517 if ((int)addrlen
< 0) {
3518 return -TARGET_EINVAL
;
3521 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3523 return -TARGET_EFAULT
;
3524 if (fd_trans_target_to_host_data(fd
)) {
3525 copy_msg
= host_msg
;
3526 host_msg
= g_malloc(len
);
3527 memcpy(host_msg
, copy_msg
, len
);
3528 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3534 addr
= alloca(addrlen
+1);
3535 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3539 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3541 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3546 host_msg
= copy_msg
;
3548 unlock_user(host_msg
, msg
, 0);
3552 /* do_recvfrom() Must return target values and target errnos. */
3553 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3554 abi_ulong target_addr
,
3555 abi_ulong target_addrlen
)
3557 socklen_t addrlen
, ret_addrlen
;
3565 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3567 return -TARGET_EFAULT
;
3571 if (get_user_u32(addrlen
, target_addrlen
)) {
3572 ret
= -TARGET_EFAULT
;
3575 if ((int)addrlen
< 0) {
3576 ret
= -TARGET_EINVAL
;
3579 addr
= alloca(addrlen
);
3580 ret_addrlen
= addrlen
;
3581 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3582 addr
, &ret_addrlen
));
3584 addr
= NULL
; /* To keep compiler quiet. */
3585 addrlen
= 0; /* To keep compiler quiet. */
3586 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3588 if (!is_error(ret
)) {
3589 if (fd_trans_host_to_target_data(fd
)) {
3591 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3592 if (is_error(trans
)) {
3598 host_to_target_sockaddr(target_addr
, addr
,
3599 MIN(addrlen
, ret_addrlen
));
3600 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3601 ret
= -TARGET_EFAULT
;
3605 unlock_user(host_msg
, msg
, len
);
3608 unlock_user(host_msg
, msg
, 0);
3613 #ifdef TARGET_NR_socketcall
3614 /* do_socketcall() must return target values and target errnos. */
3615 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3617 static const unsigned nargs
[] = { /* number of arguments per operation */
3618 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3619 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3620 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3621 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3622 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3623 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3624 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3625 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3626 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3627 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3628 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3629 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3630 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3631 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3632 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3633 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3634 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3635 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3636 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3637 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3639 abi_long a
[6]; /* max 6 args */
3642 /* check the range of the first argument num */
3643 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3644 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3645 return -TARGET_EINVAL
;
3647 /* ensure we have space for args */
3648 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3649 return -TARGET_EINVAL
;
3651 /* collect the arguments in a[] according to nargs[] */
3652 for (i
= 0; i
< nargs
[num
]; ++i
) {
3653 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3654 return -TARGET_EFAULT
;
3657 /* now when we have the args, invoke the appropriate underlying function */
3659 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3660 return do_socket(a
[0], a
[1], a
[2]);
3661 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3662 return do_bind(a
[0], a
[1], a
[2]);
3663 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3664 return do_connect(a
[0], a
[1], a
[2]);
3665 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3666 return get_errno(listen(a
[0], a
[1]));
3667 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3668 return do_accept4(a
[0], a
[1], a
[2], 0);
3669 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3670 return do_getsockname(a
[0], a
[1], a
[2]);
3671 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3672 return do_getpeername(a
[0], a
[1], a
[2]);
3673 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3674 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3675 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3676 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3677 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3678 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3679 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3680 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3681 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3682 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3683 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3684 return get_errno(shutdown(a
[0], a
[1]));
3685 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3686 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3687 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3688 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3689 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3690 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3691 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3692 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3693 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3694 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3695 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3696 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3697 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3698 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3700 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3701 return -TARGET_EINVAL
;
3706 #define N_SHM_REGIONS 32
3708 static struct shm_region
{
3712 } shm_regions
[N_SHM_REGIONS
];
3714 #ifndef TARGET_SEMID64_DS
3715 /* asm-generic version of this struct */
3716 struct target_semid64_ds
3718 struct target_ipc_perm sem_perm
;
3719 abi_ulong sem_otime
;
3720 #if TARGET_ABI_BITS == 32
3721 abi_ulong __unused1
;
3723 abi_ulong sem_ctime
;
3724 #if TARGET_ABI_BITS == 32
3725 abi_ulong __unused2
;
3727 abi_ulong sem_nsems
;
3728 abi_ulong __unused3
;
3729 abi_ulong __unused4
;
3733 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3734 abi_ulong target_addr
)
3736 struct target_ipc_perm
*target_ip
;
3737 struct target_semid64_ds
*target_sd
;
3739 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3740 return -TARGET_EFAULT
;
3741 target_ip
= &(target_sd
->sem_perm
);
3742 host_ip
->__key
= tswap32(target_ip
->__key
);
3743 host_ip
->uid
= tswap32(target_ip
->uid
);
3744 host_ip
->gid
= tswap32(target_ip
->gid
);
3745 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3746 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3747 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3748 host_ip
->mode
= tswap32(target_ip
->mode
);
3750 host_ip
->mode
= tswap16(target_ip
->mode
);
3752 #if defined(TARGET_PPC)
3753 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3755 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3757 unlock_user_struct(target_sd
, target_addr
, 0);
3761 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3762 struct ipc_perm
*host_ip
)
3764 struct target_ipc_perm
*target_ip
;
3765 struct target_semid64_ds
*target_sd
;
3767 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3768 return -TARGET_EFAULT
;
3769 target_ip
= &(target_sd
->sem_perm
);
3770 target_ip
->__key
= tswap32(host_ip
->__key
);
3771 target_ip
->uid
= tswap32(host_ip
->uid
);
3772 target_ip
->gid
= tswap32(host_ip
->gid
);
3773 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3774 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3775 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3776 target_ip
->mode
= tswap32(host_ip
->mode
);
3778 target_ip
->mode
= tswap16(host_ip
->mode
);
3780 #if defined(TARGET_PPC)
3781 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3783 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3785 unlock_user_struct(target_sd
, target_addr
, 1);
3789 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3790 abi_ulong target_addr
)
3792 struct target_semid64_ds
*target_sd
;
3794 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3795 return -TARGET_EFAULT
;
3796 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3797 return -TARGET_EFAULT
;
3798 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3799 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3800 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3801 unlock_user_struct(target_sd
, target_addr
, 0);
3805 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3806 struct semid_ds
*host_sd
)
3808 struct target_semid64_ds
*target_sd
;
3810 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3811 return -TARGET_EFAULT
;
3812 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3813 return -TARGET_EFAULT
;
3814 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3815 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3816 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3817 unlock_user_struct(target_sd
, target_addr
, 1);
3821 struct target_seminfo
{
3834 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3835 struct seminfo
*host_seminfo
)
3837 struct target_seminfo
*target_seminfo
;
3838 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3839 return -TARGET_EFAULT
;
3840 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3841 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3842 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3843 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3844 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3845 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3846 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3847 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3848 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3849 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3850 unlock_user_struct(target_seminfo
, target_addr
, 1);
3856 struct semid_ds
*buf
;
3857 unsigned short *array
;
3858 struct seminfo
*__buf
;
3861 union target_semun
{
3868 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3869 abi_ulong target_addr
)
3872 unsigned short *array
;
3874 struct semid_ds semid_ds
;
3877 semun
.buf
= &semid_ds
;
3879 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3881 return get_errno(ret
);
3883 nsems
= semid_ds
.sem_nsems
;
3885 *host_array
= g_try_new(unsigned short, nsems
);
3887 return -TARGET_ENOMEM
;
3889 array
= lock_user(VERIFY_READ
, target_addr
,
3890 nsems
*sizeof(unsigned short), 1);
3892 g_free(*host_array
);
3893 return -TARGET_EFAULT
;
3896 for(i
=0; i
<nsems
; i
++) {
3897 __get_user((*host_array
)[i
], &array
[i
]);
3899 unlock_user(array
, target_addr
, 0);
3904 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3905 unsigned short **host_array
)
3908 unsigned short *array
;
3910 struct semid_ds semid_ds
;
3913 semun
.buf
= &semid_ds
;
3915 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3917 return get_errno(ret
);
3919 nsems
= semid_ds
.sem_nsems
;
3921 array
= lock_user(VERIFY_WRITE
, target_addr
,
3922 nsems
*sizeof(unsigned short), 0);
3924 return -TARGET_EFAULT
;
3926 for(i
=0; i
<nsems
; i
++) {
3927 __put_user((*host_array
)[i
], &array
[i
]);
3929 g_free(*host_array
);
3930 unlock_user(array
, target_addr
, 1);
3935 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3936 abi_ulong target_arg
)
3938 union target_semun target_su
= { .buf
= target_arg
};
3940 struct semid_ds dsarg
;
3941 unsigned short *array
= NULL
;
3942 struct seminfo seminfo
;
3943 abi_long ret
= -TARGET_EINVAL
;
3950 /* In 64 bit cross-endian situations, we will erroneously pick up
3951 * the wrong half of the union for the "val" element. To rectify
3952 * this, the entire 8-byte structure is byteswapped, followed by
3953 * a swap of the 4 byte val field. In other cases, the data is
3954 * already in proper host byte order. */
3955 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3956 target_su
.buf
= tswapal(target_su
.buf
);
3957 arg
.val
= tswap32(target_su
.val
);
3959 arg
.val
= target_su
.val
;
3961 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3965 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3969 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3970 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3977 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3981 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3982 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3988 arg
.__buf
= &seminfo
;
3989 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3990 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
3998 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4005 struct target_sembuf
{
4006 unsigned short sem_num
;
4011 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4012 abi_ulong target_addr
,
4015 struct target_sembuf
*target_sembuf
;
4018 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4019 nsops
*sizeof(struct target_sembuf
), 1);
4021 return -TARGET_EFAULT
;
4023 for(i
=0; i
<nsops
; i
++) {
4024 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4025 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4026 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4029 unlock_user(target_sembuf
, target_addr
, 0);
4034 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4035 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4038 * This macro is required to handle the s390 variants, which passes the
4039 * arguments in a different order than default.
4042 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4043 (__nsops), (__timeout), (__sops)
4045 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4046 (__nsops), 0, (__sops), (__timeout)
4049 static inline abi_long
do_semtimedop(int semid
,
4052 abi_long timeout
, bool time64
)
4054 struct sembuf
*sops
;
4055 struct timespec ts
, *pts
= NULL
;
4061 if (target_to_host_timespec64(pts
, timeout
)) {
4062 return -TARGET_EFAULT
;
4065 if (target_to_host_timespec(pts
, timeout
)) {
4066 return -TARGET_EFAULT
;
4071 if (nsops
> TARGET_SEMOPM
) {
4072 return -TARGET_E2BIG
;
4075 sops
= g_new(struct sembuf
, nsops
);
4077 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4079 return -TARGET_EFAULT
;
4082 ret
= -TARGET_ENOSYS
;
4083 #ifdef __NR_semtimedop
4084 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4087 if (ret
== -TARGET_ENOSYS
) {
4088 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4089 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4097 struct target_msqid_ds
4099 struct target_ipc_perm msg_perm
;
4100 abi_ulong msg_stime
;
4101 #if TARGET_ABI_BITS == 32
4102 abi_ulong __unused1
;
4104 abi_ulong msg_rtime
;
4105 #if TARGET_ABI_BITS == 32
4106 abi_ulong __unused2
;
4108 abi_ulong msg_ctime
;
4109 #if TARGET_ABI_BITS == 32
4110 abi_ulong __unused3
;
4112 abi_ulong __msg_cbytes
;
4114 abi_ulong msg_qbytes
;
4115 abi_ulong msg_lspid
;
4116 abi_ulong msg_lrpid
;
4117 abi_ulong __unused4
;
4118 abi_ulong __unused5
;
4121 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4122 abi_ulong target_addr
)
4124 struct target_msqid_ds
*target_md
;
4126 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4127 return -TARGET_EFAULT
;
4128 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4129 return -TARGET_EFAULT
;
4130 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4131 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4132 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4133 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4134 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4135 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4136 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4137 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4138 unlock_user_struct(target_md
, target_addr
, 0);
4142 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4143 struct msqid_ds
*host_md
)
4145 struct target_msqid_ds
*target_md
;
4147 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4148 return -TARGET_EFAULT
;
4149 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4150 return -TARGET_EFAULT
;
4151 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4152 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4153 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4154 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4155 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4156 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4157 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4158 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4159 unlock_user_struct(target_md
, target_addr
, 1);
4163 struct target_msginfo
{
4171 unsigned short int msgseg
;
4174 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4175 struct msginfo
*host_msginfo
)
4177 struct target_msginfo
*target_msginfo
;
4178 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4179 return -TARGET_EFAULT
;
4180 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4181 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4182 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4183 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4184 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4185 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4186 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4187 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4188 unlock_user_struct(target_msginfo
, target_addr
, 1);
4192 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4194 struct msqid_ds dsarg
;
4195 struct msginfo msginfo
;
4196 abi_long ret
= -TARGET_EINVAL
;
4204 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4205 return -TARGET_EFAULT
;
4206 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4207 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4208 return -TARGET_EFAULT
;
4211 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4215 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4216 if (host_to_target_msginfo(ptr
, &msginfo
))
4217 return -TARGET_EFAULT
;
4224 struct target_msgbuf
{
4229 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4230 ssize_t msgsz
, int msgflg
)
4232 struct target_msgbuf
*target_mb
;
4233 struct msgbuf
*host_mb
;
4237 return -TARGET_EINVAL
;
4240 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4241 return -TARGET_EFAULT
;
4242 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4244 unlock_user_struct(target_mb
, msgp
, 0);
4245 return -TARGET_ENOMEM
;
4247 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4248 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4249 ret
= -TARGET_ENOSYS
;
4251 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4254 if (ret
== -TARGET_ENOSYS
) {
4256 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4259 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4265 unlock_user_struct(target_mb
, msgp
, 0);
4271 #if defined(__sparc__)
4272 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4273 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4274 #elif defined(__s390x__)
4275 /* The s390 sys_ipc variant has only five parameters. */
4276 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4277 ((long int[]){(long int)__msgp, __msgtyp})
4279 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4280 ((long int[]){(long int)__msgp, __msgtyp}), 0
4284 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4285 ssize_t msgsz
, abi_long msgtyp
,
4288 struct target_msgbuf
*target_mb
;
4290 struct msgbuf
*host_mb
;
4294 return -TARGET_EINVAL
;
4297 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4298 return -TARGET_EFAULT
;
4300 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4302 ret
= -TARGET_ENOMEM
;
4305 ret
= -TARGET_ENOSYS
;
4307 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4310 if (ret
== -TARGET_ENOSYS
) {
4311 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4312 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4317 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4318 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4319 if (!target_mtext
) {
4320 ret
= -TARGET_EFAULT
;
4323 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4324 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4327 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4331 unlock_user_struct(target_mb
, msgp
, 1);
4336 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4337 abi_ulong target_addr
)
4339 struct target_shmid_ds
*target_sd
;
4341 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4342 return -TARGET_EFAULT
;
4343 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4344 return -TARGET_EFAULT
;
4345 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4346 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4347 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4348 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4349 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4350 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4351 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4352 unlock_user_struct(target_sd
, target_addr
, 0);
4356 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4357 struct shmid_ds
*host_sd
)
4359 struct target_shmid_ds
*target_sd
;
4361 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4362 return -TARGET_EFAULT
;
4363 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4364 return -TARGET_EFAULT
;
4365 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4366 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4367 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4368 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4369 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4370 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4371 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4372 unlock_user_struct(target_sd
, target_addr
, 1);
4376 struct target_shminfo
{
4384 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4385 struct shminfo
*host_shminfo
)
4387 struct target_shminfo
*target_shminfo
;
4388 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4389 return -TARGET_EFAULT
;
4390 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4391 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4392 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4393 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4394 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4395 unlock_user_struct(target_shminfo
, target_addr
, 1);
4399 struct target_shm_info
{
4404 abi_ulong swap_attempts
;
4405 abi_ulong swap_successes
;
4408 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4409 struct shm_info
*host_shm_info
)
4411 struct target_shm_info
*target_shm_info
;
4412 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4413 return -TARGET_EFAULT
;
4414 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4415 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4416 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4417 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4418 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4419 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4420 unlock_user_struct(target_shm_info
, target_addr
, 1);
4424 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4426 struct shmid_ds dsarg
;
4427 struct shminfo shminfo
;
4428 struct shm_info shm_info
;
4429 abi_long ret
= -TARGET_EINVAL
;
4437 if (target_to_host_shmid_ds(&dsarg
, buf
))
4438 return -TARGET_EFAULT
;
4439 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4440 if (host_to_target_shmid_ds(buf
, &dsarg
))
4441 return -TARGET_EFAULT
;
4444 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4445 if (host_to_target_shminfo(buf
, &shminfo
))
4446 return -TARGET_EFAULT
;
4449 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4450 if (host_to_target_shm_info(buf
, &shm_info
))
4451 return -TARGET_EFAULT
;
4456 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4463 #ifndef TARGET_FORCE_SHMLBA
4464 /* For most architectures, SHMLBA is the same as the page size;
4465 * some architectures have larger values, in which case they should
4466 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4467 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4468 * and defining its own value for SHMLBA.
4470 * The kernel also permits SHMLBA to be set by the architecture to a
4471 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4472 * this means that addresses are rounded to the large size if
4473 * SHM_RND is set but addresses not aligned to that size are not rejected
4474 * as long as they are at least page-aligned. Since the only architecture
4475 * which uses this is ia64 this code doesn't provide for that oddity.
4477 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4479 return TARGET_PAGE_SIZE
;
4483 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4484 int shmid
, abi_ulong shmaddr
, int shmflg
)
4486 CPUState
*cpu
= env_cpu(cpu_env
);
4489 struct shmid_ds shm_info
;
4493 /* shmat pointers are always untagged */
4495 /* find out the length of the shared memory segment */
4496 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4497 if (is_error(ret
)) {
4498 /* can't get length, bail out */
4502 shmlba
= target_shmlba(cpu_env
);
4504 if (shmaddr
& (shmlba
- 1)) {
4505 if (shmflg
& SHM_RND
) {
4506 shmaddr
&= ~(shmlba
- 1);
4508 return -TARGET_EINVAL
;
4511 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4512 return -TARGET_EINVAL
;
4518 * We're mapping shared memory, so ensure we generate code for parallel
4519 * execution and flush old translations. This will work up to the level
4520 * supported by the host -- anything that requires EXCP_ATOMIC will not
4521 * be atomic with respect to an external process.
4523 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4524 cpu
->tcg_cflags
|= CF_PARALLEL
;
4529 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4531 abi_ulong mmap_start
;
4533 /* In order to use the host shmat, we need to honor host SHMLBA. */
4534 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4536 if (mmap_start
== -1) {
4538 host_raddr
= (void *)-1;
4540 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4541 shmflg
| SHM_REMAP
);
4544 if (host_raddr
== (void *)-1) {
4546 return get_errno((long)host_raddr
);
4548 raddr
=h2g((unsigned long)host_raddr
);
4550 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4551 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4552 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4554 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4555 if (!shm_regions
[i
].in_use
) {
4556 shm_regions
[i
].in_use
= true;
4557 shm_regions
[i
].start
= raddr
;
4558 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4568 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4573 /* shmdt pointers are always untagged */
4577 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4578 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4579 shm_regions
[i
].in_use
= false;
4580 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4584 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4591 #ifdef TARGET_NR_ipc
4592 /* ??? This only works with linear mappings. */
4593 /* do_ipc() must return target values and target errnos. */
4594 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4595 unsigned int call
, abi_long first
,
4596 abi_long second
, abi_long third
,
4597 abi_long ptr
, abi_long fifth
)
4602 version
= call
>> 16;
4607 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4609 case IPCOP_semtimedop
:
4611 * The s390 sys_ipc variant has only five parameters instead of six
4612 * (as for default variant) and the only difference is the handling of
4613 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4614 * to a struct timespec where the generic variant uses fifth parameter.
4616 #if defined(TARGET_S390X)
4617 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4619 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4624 ret
= get_errno(semget(first
, second
, third
));
4627 case IPCOP_semctl
: {
4628 /* The semun argument to semctl is passed by value, so dereference the
4631 get_user_ual(atptr
, ptr
);
4632 ret
= do_semctl(first
, second
, third
, atptr
);
4637 ret
= get_errno(msgget(first
, second
));
4641 ret
= do_msgsnd(first
, ptr
, second
, third
);
4645 ret
= do_msgctl(first
, second
, ptr
);
4652 struct target_ipc_kludge
{
4657 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4658 ret
= -TARGET_EFAULT
;
4662 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4664 unlock_user_struct(tmp
, ptr
, 0);
4668 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4677 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4678 if (is_error(raddr
))
4679 return get_errno(raddr
);
4680 if (put_user_ual(raddr
, third
))
4681 return -TARGET_EFAULT
;
4685 ret
= -TARGET_EINVAL
;
4690 ret
= do_shmdt(ptr
);
4694 /* IPC_* flag values are the same on all linux platforms */
4695 ret
= get_errno(shmget(first
, second
, third
));
4698 /* IPC_* and SHM_* command values are the same on all linux platforms */
4700 ret
= do_shmctl(first
, second
, ptr
);
4703 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4705 ret
= -TARGET_ENOSYS
;
4712 /* kernel structure types definitions */
4714 #define STRUCT(name, ...) STRUCT_ ## name,
4715 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4717 #include "syscall_types.h"
4721 #undef STRUCT_SPECIAL
4723 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4724 #define STRUCT_SPECIAL(name)
4725 #include "syscall_types.h"
4727 #undef STRUCT_SPECIAL
4729 #define MAX_STRUCT_SIZE 4096
4731 #ifdef CONFIG_FIEMAP
4732 /* So fiemap access checks don't overflow on 32 bit systems.
4733 * This is very slightly smaller than the limit imposed by
4734 * the underlying kernel.
4736 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4737 / sizeof(struct fiemap_extent))
4739 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4740 int fd
, int cmd
, abi_long arg
)
4742 /* The parameter for this ioctl is a struct fiemap followed
4743 * by an array of struct fiemap_extent whose size is set
4744 * in fiemap->fm_extent_count. The array is filled in by the
4747 int target_size_in
, target_size_out
;
4749 const argtype
*arg_type
= ie
->arg_type
;
4750 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4753 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4757 assert(arg_type
[0] == TYPE_PTR
);
4758 assert(ie
->access
== IOC_RW
);
4760 target_size_in
= thunk_type_size(arg_type
, 0);
4761 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4763 return -TARGET_EFAULT
;
4765 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4766 unlock_user(argptr
, arg
, 0);
4767 fm
= (struct fiemap
*)buf_temp
;
4768 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4769 return -TARGET_EINVAL
;
4772 outbufsz
= sizeof (*fm
) +
4773 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4775 if (outbufsz
> MAX_STRUCT_SIZE
) {
4776 /* We can't fit all the extents into the fixed size buffer.
4777 * Allocate one that is large enough and use it instead.
4779 fm
= g_try_malloc(outbufsz
);
4781 return -TARGET_ENOMEM
;
4783 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4786 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4787 if (!is_error(ret
)) {
4788 target_size_out
= target_size_in
;
4789 /* An extent_count of 0 means we were only counting the extents
4790 * so there are no structs to copy
4792 if (fm
->fm_extent_count
!= 0) {
4793 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4795 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4797 ret
= -TARGET_EFAULT
;
4799 /* Convert the struct fiemap */
4800 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4801 if (fm
->fm_extent_count
!= 0) {
4802 p
= argptr
+ target_size_in
;
4803 /* ...and then all the struct fiemap_extents */
4804 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4805 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4810 unlock_user(argptr
, arg
, target_size_out
);
4820 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4821 int fd
, int cmd
, abi_long arg
)
4823 const argtype
*arg_type
= ie
->arg_type
;
4827 struct ifconf
*host_ifconf
;
4829 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4830 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4831 int target_ifreq_size
;
4836 abi_long target_ifc_buf
;
4840 assert(arg_type
[0] == TYPE_PTR
);
4841 assert(ie
->access
== IOC_RW
);
4844 target_size
= thunk_type_size(arg_type
, 0);
4846 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4848 return -TARGET_EFAULT
;
4849 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4850 unlock_user(argptr
, arg
, 0);
4852 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4853 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4854 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4856 if (target_ifc_buf
!= 0) {
4857 target_ifc_len
= host_ifconf
->ifc_len
;
4858 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4859 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4861 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4862 if (outbufsz
> MAX_STRUCT_SIZE
) {
4864 * We can't fit all the extents into the fixed size buffer.
4865 * Allocate one that is large enough and use it instead.
4867 host_ifconf
= malloc(outbufsz
);
4869 return -TARGET_ENOMEM
;
4871 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4874 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4876 host_ifconf
->ifc_len
= host_ifc_len
;
4878 host_ifc_buf
= NULL
;
4880 host_ifconf
->ifc_buf
= host_ifc_buf
;
4882 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4883 if (!is_error(ret
)) {
4884 /* convert host ifc_len to target ifc_len */
4886 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4887 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4888 host_ifconf
->ifc_len
= target_ifc_len
;
4890 /* restore target ifc_buf */
4892 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4894 /* copy struct ifconf to target user */
4896 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4898 return -TARGET_EFAULT
;
4899 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4900 unlock_user(argptr
, arg
, target_size
);
4902 if (target_ifc_buf
!= 0) {
4903 /* copy ifreq[] to target user */
4904 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4905 for (i
= 0; i
< nb_ifreq
; i
++) {
4906 thunk_convert(argptr
+ i
* target_ifreq_size
,
4907 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4908 ifreq_arg_type
, THUNK_TARGET
);
4910 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4921 #if defined(CONFIG_USBFS)
4922 #if HOST_LONG_BITS > 64
4923 #error USBDEVFS thunks do not support >64 bit hosts yet.
4926 uint64_t target_urb_adr
;
4927 uint64_t target_buf_adr
;
4928 char *target_buf_ptr
;
4929 struct usbdevfs_urb host_urb
;
4932 static GHashTable
*usbdevfs_urb_hashtable(void)
4934 static GHashTable
*urb_hashtable
;
4936 if (!urb_hashtable
) {
4937 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4939 return urb_hashtable
;
4942 static void urb_hashtable_insert(struct live_urb
*urb
)
4944 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4945 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4948 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4950 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4951 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4954 static void urb_hashtable_remove(struct live_urb
*urb
)
4956 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4957 g_hash_table_remove(urb_hashtable
, urb
);
4961 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4962 int fd
, int cmd
, abi_long arg
)
4964 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4965 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4966 struct live_urb
*lurb
;
4970 uintptr_t target_urb_adr
;
4973 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4975 memset(buf_temp
, 0, sizeof(uint64_t));
4976 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4977 if (is_error(ret
)) {
4981 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4982 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4983 if (!lurb
->target_urb_adr
) {
4984 return -TARGET_EFAULT
;
4986 urb_hashtable_remove(lurb
);
4987 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4988 lurb
->host_urb
.buffer_length
);
4989 lurb
->target_buf_ptr
= NULL
;
4991 /* restore the guest buffer pointer */
4992 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4994 /* update the guest urb struct */
4995 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4998 return -TARGET_EFAULT
;
5000 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5001 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5003 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5004 /* write back the urb handle */
5005 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5008 return -TARGET_EFAULT
;
5011 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5012 target_urb_adr
= lurb
->target_urb_adr
;
5013 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5014 unlock_user(argptr
, arg
, target_size
);
5021 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5022 uint8_t *buf_temp
__attribute__((unused
)),
5023 int fd
, int cmd
, abi_long arg
)
5025 struct live_urb
*lurb
;
5027 /* map target address back to host URB with metadata. */
5028 lurb
= urb_hashtable_lookup(arg
);
5030 return -TARGET_EFAULT
;
5032 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5036 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5037 int fd
, int cmd
, abi_long arg
)
5039 const argtype
*arg_type
= ie
->arg_type
;
5044 struct live_urb
*lurb
;
5047 * each submitted URB needs to map to a unique ID for the
5048 * kernel, and that unique ID needs to be a pointer to
5049 * host memory. hence, we need to malloc for each URB.
5050 * isochronous transfers have a variable length struct.
5053 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5055 /* construct host copy of urb and metadata */
5056 lurb
= g_try_malloc0(sizeof(struct live_urb
));
5058 return -TARGET_ENOMEM
;
5061 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5064 return -TARGET_EFAULT
;
5066 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5067 unlock_user(argptr
, arg
, 0);
5069 lurb
->target_urb_adr
= arg
;
5070 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5072 /* buffer space used depends on endpoint type so lock the entire buffer */
5073 /* control type urbs should check the buffer contents for true direction */
5074 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5075 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5076 lurb
->host_urb
.buffer_length
, 1);
5077 if (lurb
->target_buf_ptr
== NULL
) {
5079 return -TARGET_EFAULT
;
5082 /* update buffer pointer in host copy */
5083 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5085 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5086 if (is_error(ret
)) {
5087 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5090 urb_hashtable_insert(lurb
);
5095 #endif /* CONFIG_USBFS */
5097 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5098 int cmd
, abi_long arg
)
5101 struct dm_ioctl
*host_dm
;
5102 abi_long guest_data
;
5103 uint32_t guest_data_size
;
5105 const argtype
*arg_type
= ie
->arg_type
;
5107 void *big_buf
= NULL
;
5111 target_size
= thunk_type_size(arg_type
, 0);
5112 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5114 ret
= -TARGET_EFAULT
;
5117 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5118 unlock_user(argptr
, arg
, 0);
5120 /* buf_temp is too small, so fetch things into a bigger buffer */
5121 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5122 memcpy(big_buf
, buf_temp
, target_size
);
5126 guest_data
= arg
+ host_dm
->data_start
;
5127 if ((guest_data
- arg
) < 0) {
5128 ret
= -TARGET_EINVAL
;
5131 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5132 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5134 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5136 ret
= -TARGET_EFAULT
;
5140 switch (ie
->host_cmd
) {
5142 case DM_LIST_DEVICES
:
5145 case DM_DEV_SUSPEND
:
5148 case DM_TABLE_STATUS
:
5149 case DM_TABLE_CLEAR
:
5151 case DM_LIST_VERSIONS
:
5155 case DM_DEV_SET_GEOMETRY
:
5156 /* data contains only strings */
5157 memcpy(host_data
, argptr
, guest_data_size
);
5160 memcpy(host_data
, argptr
, guest_data_size
);
5161 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5165 void *gspec
= argptr
;
5166 void *cur_data
= host_data
;
5167 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5168 int spec_size
= thunk_type_size(arg_type
, 0);
5171 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5172 struct dm_target_spec
*spec
= cur_data
;
5176 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5177 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5179 spec
->next
= sizeof(*spec
) + slen
;
5180 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5182 cur_data
+= spec
->next
;
5187 ret
= -TARGET_EINVAL
;
5188 unlock_user(argptr
, guest_data
, 0);
5191 unlock_user(argptr
, guest_data
, 0);
5193 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5194 if (!is_error(ret
)) {
5195 guest_data
= arg
+ host_dm
->data_start
;
5196 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5197 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5198 switch (ie
->host_cmd
) {
5203 case DM_DEV_SUSPEND
:
5206 case DM_TABLE_CLEAR
:
5208 case DM_DEV_SET_GEOMETRY
:
5209 /* no return data */
5211 case DM_LIST_DEVICES
:
5213 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5214 uint32_t remaining_data
= guest_data_size
;
5215 void *cur_data
= argptr
;
5216 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5217 int nl_size
= 12; /* can't use thunk_size due to alignment */
5220 uint32_t next
= nl
->next
;
5222 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5224 if (remaining_data
< nl
->next
) {
5225 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5228 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5229 strcpy(cur_data
+ nl_size
, nl
->name
);
5230 cur_data
+= nl
->next
;
5231 remaining_data
-= nl
->next
;
5235 nl
= (void*)nl
+ next
;
5240 case DM_TABLE_STATUS
:
5242 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5243 void *cur_data
= argptr
;
5244 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5245 int spec_size
= thunk_type_size(arg_type
, 0);
5248 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5249 uint32_t next
= spec
->next
;
5250 int slen
= strlen((char*)&spec
[1]) + 1;
5251 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5252 if (guest_data_size
< spec
->next
) {
5253 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5256 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5257 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5258 cur_data
= argptr
+ spec
->next
;
5259 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5265 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5266 int count
= *(uint32_t*)hdata
;
5267 uint64_t *hdev
= hdata
+ 8;
5268 uint64_t *gdev
= argptr
+ 8;
5271 *(uint32_t*)argptr
= tswap32(count
);
5272 for (i
= 0; i
< count
; i
++) {
5273 *gdev
= tswap64(*hdev
);
5279 case DM_LIST_VERSIONS
:
5281 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5282 uint32_t remaining_data
= guest_data_size
;
5283 void *cur_data
= argptr
;
5284 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5285 int vers_size
= thunk_type_size(arg_type
, 0);
5288 uint32_t next
= vers
->next
;
5290 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5292 if (remaining_data
< vers
->next
) {
5293 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5296 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5297 strcpy(cur_data
+ vers_size
, vers
->name
);
5298 cur_data
+= vers
->next
;
5299 remaining_data
-= vers
->next
;
5303 vers
= (void*)vers
+ next
;
5308 unlock_user(argptr
, guest_data
, 0);
5309 ret
= -TARGET_EINVAL
;
5312 unlock_user(argptr
, guest_data
, guest_data_size
);
5314 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5316 ret
= -TARGET_EFAULT
;
5319 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5320 unlock_user(argptr
, arg
, target_size
);
5327 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5328 int cmd
, abi_long arg
)
5332 const argtype
*arg_type
= ie
->arg_type
;
5333 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5336 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5337 struct blkpg_partition host_part
;
5339 /* Read and convert blkpg */
5341 target_size
= thunk_type_size(arg_type
, 0);
5342 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5344 ret
= -TARGET_EFAULT
;
5347 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5348 unlock_user(argptr
, arg
, 0);
5350 switch (host_blkpg
->op
) {
5351 case BLKPG_ADD_PARTITION
:
5352 case BLKPG_DEL_PARTITION
:
5353 /* payload is struct blkpg_partition */
5356 /* Unknown opcode */
5357 ret
= -TARGET_EINVAL
;
5361 /* Read and convert blkpg->data */
5362 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5363 target_size
= thunk_type_size(part_arg_type
, 0);
5364 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5366 ret
= -TARGET_EFAULT
;
5369 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5370 unlock_user(argptr
, arg
, 0);
5372 /* Swizzle the data pointer to our local copy and call! */
5373 host_blkpg
->data
= &host_part
;
5374 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5380 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5381 int fd
, int cmd
, abi_long arg
)
5383 const argtype
*arg_type
= ie
->arg_type
;
5384 const StructEntry
*se
;
5385 const argtype
*field_types
;
5386 const int *dst_offsets
, *src_offsets
;
5389 abi_ulong
*target_rt_dev_ptr
= NULL
;
5390 unsigned long *host_rt_dev_ptr
= NULL
;
5394 assert(ie
->access
== IOC_W
);
5395 assert(*arg_type
== TYPE_PTR
);
5397 assert(*arg_type
== TYPE_STRUCT
);
5398 target_size
= thunk_type_size(arg_type
, 0);
5399 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5401 return -TARGET_EFAULT
;
5404 assert(*arg_type
== (int)STRUCT_rtentry
);
5405 se
= struct_entries
+ *arg_type
++;
5406 assert(se
->convert
[0] == NULL
);
5407 /* convert struct here to be able to catch rt_dev string */
5408 field_types
= se
->field_types
;
5409 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5410 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5411 for (i
= 0; i
< se
->nb_fields
; i
++) {
5412 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5413 assert(*field_types
== TYPE_PTRVOID
);
5414 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5415 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5416 if (*target_rt_dev_ptr
!= 0) {
5417 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5418 tswapal(*target_rt_dev_ptr
));
5419 if (!*host_rt_dev_ptr
) {
5420 unlock_user(argptr
, arg
, 0);
5421 return -TARGET_EFAULT
;
5424 *host_rt_dev_ptr
= 0;
5429 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5430 argptr
+ src_offsets
[i
],
5431 field_types
, THUNK_HOST
);
5433 unlock_user(argptr
, arg
, 0);
5435 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5437 assert(host_rt_dev_ptr
!= NULL
);
5438 assert(target_rt_dev_ptr
!= NULL
);
5439 if (*host_rt_dev_ptr
!= 0) {
5440 unlock_user((void *)*host_rt_dev_ptr
,
5441 *target_rt_dev_ptr
, 0);
5446 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5447 int fd
, int cmd
, abi_long arg
)
5449 int sig
= target_to_host_signal(arg
);
5450 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5453 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5454 int fd
, int cmd
, abi_long arg
)
5459 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5460 if (is_error(ret
)) {
5464 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5465 if (copy_to_user_timeval(arg
, &tv
)) {
5466 return -TARGET_EFAULT
;
5469 if (copy_to_user_timeval64(arg
, &tv
)) {
5470 return -TARGET_EFAULT
;
5477 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5478 int fd
, int cmd
, abi_long arg
)
5483 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5484 if (is_error(ret
)) {
5488 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5489 if (host_to_target_timespec(arg
, &ts
)) {
5490 return -TARGET_EFAULT
;
5493 if (host_to_target_timespec64(arg
, &ts
)) {
5494 return -TARGET_EFAULT
;
5502 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5503 int fd
, int cmd
, abi_long arg
)
5505 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5506 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5512 static void unlock_drm_version(struct drm_version
*host_ver
,
5513 struct target_drm_version
*target_ver
,
5516 unlock_user(host_ver
->name
, target_ver
->name
,
5517 copy
? host_ver
->name_len
: 0);
5518 unlock_user(host_ver
->date
, target_ver
->date
,
5519 copy
? host_ver
->date_len
: 0);
5520 unlock_user(host_ver
->desc
, target_ver
->desc
,
5521 copy
? host_ver
->desc_len
: 0);
5524 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5525 struct target_drm_version
*target_ver
)
5527 memset(host_ver
, 0, sizeof(*host_ver
));
5529 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5530 if (host_ver
->name_len
) {
5531 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5532 target_ver
->name_len
, 0);
5533 if (!host_ver
->name
) {
5538 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5539 if (host_ver
->date_len
) {
5540 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5541 target_ver
->date_len
, 0);
5542 if (!host_ver
->date
) {
5547 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5548 if (host_ver
->desc_len
) {
5549 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5550 target_ver
->desc_len
, 0);
5551 if (!host_ver
->desc
) {
5558 unlock_drm_version(host_ver
, target_ver
, false);
5562 static inline void host_to_target_drmversion(
5563 struct target_drm_version
*target_ver
,
5564 struct drm_version
*host_ver
)
5566 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5567 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5568 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5569 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5570 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5571 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5572 unlock_drm_version(host_ver
, target_ver
, true);
5575 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5576 int fd
, int cmd
, abi_long arg
)
5578 struct drm_version
*ver
;
5579 struct target_drm_version
*target_ver
;
5582 switch (ie
->host_cmd
) {
5583 case DRM_IOCTL_VERSION
:
5584 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5585 return -TARGET_EFAULT
;
5587 ver
= (struct drm_version
*)buf_temp
;
5588 ret
= target_to_host_drmversion(ver
, target_ver
);
5589 if (!is_error(ret
)) {
5590 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5591 if (is_error(ret
)) {
5592 unlock_drm_version(ver
, target_ver
, false);
5594 host_to_target_drmversion(target_ver
, ver
);
5597 unlock_user_struct(target_ver
, arg
, 0);
5600 return -TARGET_ENOSYS
;
5603 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5604 struct drm_i915_getparam
*gparam
,
5605 int fd
, abi_long arg
)
5609 struct target_drm_i915_getparam
*target_gparam
;
5611 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5612 return -TARGET_EFAULT
;
5615 __get_user(gparam
->param
, &target_gparam
->param
);
5616 gparam
->value
= &value
;
5617 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5618 put_user_s32(value
, target_gparam
->value
);
5620 unlock_user_struct(target_gparam
, arg
, 0);
5624 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5625 int fd
, int cmd
, abi_long arg
)
5627 switch (ie
->host_cmd
) {
5628 case DRM_IOCTL_I915_GETPARAM
:
5629 return do_ioctl_drm_i915_getparam(ie
,
5630 (struct drm_i915_getparam
*)buf_temp
,
5633 return -TARGET_ENOSYS
;
5639 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5640 int fd
, int cmd
, abi_long arg
)
5642 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5643 struct tun_filter
*target_filter
;
5646 assert(ie
->access
== IOC_W
);
5648 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5649 if (!target_filter
) {
5650 return -TARGET_EFAULT
;
5652 filter
->flags
= tswap16(target_filter
->flags
);
5653 filter
->count
= tswap16(target_filter
->count
);
5654 unlock_user(target_filter
, arg
, 0);
5656 if (filter
->count
) {
5657 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5659 return -TARGET_EFAULT
;
5662 target_addr
= lock_user(VERIFY_READ
,
5663 arg
+ offsetof(struct tun_filter
, addr
),
5664 filter
->count
* ETH_ALEN
, 1);
5666 return -TARGET_EFAULT
;
5668 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5669 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5672 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5675 IOCTLEntry ioctl_entries
[] = {
5676 #define IOCTL(cmd, access, ...) \
5677 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5678 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5679 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5680 #define IOCTL_IGNORE(cmd) \
5681 { TARGET_ ## cmd, 0, #cmd },
5686 /* ??? Implement proper locking for ioctls. */
5687 /* do_ioctl() Must return target values and target errnos. */
5688 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5690 const IOCTLEntry
*ie
;
5691 const argtype
*arg_type
;
5693 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5699 if (ie
->target_cmd
== 0) {
5701 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5702 return -TARGET_ENOSYS
;
5704 if (ie
->target_cmd
== cmd
)
5708 arg_type
= ie
->arg_type
;
5710 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5711 } else if (!ie
->host_cmd
) {
5712 /* Some architectures define BSD ioctls in their headers
5713 that are not implemented in Linux. */
5714 return -TARGET_ENOSYS
;
5717 switch(arg_type
[0]) {
5720 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5726 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5730 target_size
= thunk_type_size(arg_type
, 0);
5731 switch(ie
->access
) {
5733 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5734 if (!is_error(ret
)) {
5735 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5737 return -TARGET_EFAULT
;
5738 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5739 unlock_user(argptr
, arg
, target_size
);
5743 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5745 return -TARGET_EFAULT
;
5746 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5747 unlock_user(argptr
, arg
, 0);
5748 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5752 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5754 return -TARGET_EFAULT
;
5755 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5756 unlock_user(argptr
, arg
, 0);
5757 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5758 if (!is_error(ret
)) {
5759 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5761 return -TARGET_EFAULT
;
5762 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5763 unlock_user(argptr
, arg
, target_size
);
5769 qemu_log_mask(LOG_UNIMP
,
5770 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5771 (long)cmd
, arg_type
[0]);
5772 ret
= -TARGET_ENOSYS
;
5778 static const bitmask_transtbl iflag_tbl
[] = {
5779 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5780 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5781 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5782 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5783 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5784 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5785 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5786 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5787 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5788 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5789 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5790 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5791 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5792 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5793 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5797 static const bitmask_transtbl oflag_tbl
[] = {
5798 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5799 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5800 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5801 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5802 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5803 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5804 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5805 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5806 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5807 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5808 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5809 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5810 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5811 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5812 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5813 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5814 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5815 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5816 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5817 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5818 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5819 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5820 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5821 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5825 static const bitmask_transtbl cflag_tbl
[] = {
5826 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5827 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5828 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5829 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5830 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5831 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5832 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5833 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5834 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5835 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5836 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5837 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5838 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5839 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5840 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5841 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5842 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5843 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5844 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5845 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5846 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5847 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5848 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5849 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5850 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5851 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5852 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5853 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5854 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5855 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5856 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5860 static const bitmask_transtbl lflag_tbl
[] = {
5861 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5862 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5863 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5864 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5865 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5866 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5867 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5868 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5869 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5870 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5871 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5872 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5873 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5874 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5875 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5876 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5880 static void target_to_host_termios (void *dst
, const void *src
)
5882 struct host_termios
*host
= dst
;
5883 const struct target_termios
*target
= src
;
5886 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5888 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5890 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5892 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5893 host
->c_line
= target
->c_line
;
5895 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5896 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5897 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5898 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5899 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5900 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5901 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5902 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5903 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5904 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5905 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5906 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5907 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5908 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5909 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5910 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5911 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5912 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5915 static void host_to_target_termios (void *dst
, const void *src
)
5917 struct target_termios
*target
= dst
;
5918 const struct host_termios
*host
= src
;
5921 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5923 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5925 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5927 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5928 target
->c_line
= host
->c_line
;
5930 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5931 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5932 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5933 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5934 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5935 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5936 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5937 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5938 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5939 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5940 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5941 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5942 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5943 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5944 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5945 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5946 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5947 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5950 static const StructEntry struct_termios_def
= {
5951 .convert
= { host_to_target_termios
, target_to_host_termios
},
5952 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5953 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5954 .print
= print_termios
,
5957 static const bitmask_transtbl mmap_flags_tbl
[] = {
5958 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5959 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5960 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5961 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5962 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5963 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5964 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5965 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5966 MAP_DENYWRITE
, MAP_DENYWRITE
},
5967 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5968 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5969 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5970 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5971 MAP_NORESERVE
, MAP_NORESERVE
},
5972 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5973 /* MAP_STACK had been ignored by the kernel for quite some time.
5974 Recognize it for the target insofar as we do not want to pass
5975 it through to the host. */
5976 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5981 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5982 * TARGET_I386 is defined if TARGET_X86_64 is defined
5984 #if defined(TARGET_I386)
5986 /* NOTE: there is really one LDT for all the threads */
5987 static uint8_t *ldt_table
;
5989 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5996 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5997 if (size
> bytecount
)
5999 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6001 return -TARGET_EFAULT
;
6002 /* ??? Should this by byteswapped? */
6003 memcpy(p
, ldt_table
, size
);
6004 unlock_user(p
, ptr
, size
);
6008 /* XXX: add locking support */
6009 static abi_long
write_ldt(CPUX86State
*env
,
6010 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6012 struct target_modify_ldt_ldt_s ldt_info
;
6013 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6014 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6015 int seg_not_present
, useable
, lm
;
6016 uint32_t *lp
, entry_1
, entry_2
;
6018 if (bytecount
!= sizeof(ldt_info
))
6019 return -TARGET_EINVAL
;
6020 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6021 return -TARGET_EFAULT
;
6022 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6023 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6024 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6025 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6026 unlock_user_struct(target_ldt_info
, ptr
, 0);
6028 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6029 return -TARGET_EINVAL
;
6030 seg_32bit
= ldt_info
.flags
& 1;
6031 contents
= (ldt_info
.flags
>> 1) & 3;
6032 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6033 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6034 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6035 useable
= (ldt_info
.flags
>> 6) & 1;
6039 lm
= (ldt_info
.flags
>> 7) & 1;
6041 if (contents
== 3) {
6043 return -TARGET_EINVAL
;
6044 if (seg_not_present
== 0)
6045 return -TARGET_EINVAL
;
6047 /* allocate the LDT */
6049 env
->ldt
.base
= target_mmap(0,
6050 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6051 PROT_READ
|PROT_WRITE
,
6052 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6053 if (env
->ldt
.base
== -1)
6054 return -TARGET_ENOMEM
;
6055 memset(g2h_untagged(env
->ldt
.base
), 0,
6056 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6057 env
->ldt
.limit
= 0xffff;
6058 ldt_table
= g2h_untagged(env
->ldt
.base
);
6061 /* NOTE: same code as Linux kernel */
6062 /* Allow LDTs to be cleared by the user. */
6063 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6066 read_exec_only
== 1 &&
6068 limit_in_pages
== 0 &&
6069 seg_not_present
== 1 &&
6077 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6078 (ldt_info
.limit
& 0x0ffff);
6079 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6080 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6081 (ldt_info
.limit
& 0xf0000) |
6082 ((read_exec_only
^ 1) << 9) |
6084 ((seg_not_present
^ 1) << 15) |
6086 (limit_in_pages
<< 23) |
6090 entry_2
|= (useable
<< 20);
6092 /* Install the new entry ... */
6094 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6095 lp
[0] = tswap32(entry_1
);
6096 lp
[1] = tswap32(entry_2
);
6100 /* specific and weird i386 syscalls */
6101 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6102 unsigned long bytecount
)
6108 ret
= read_ldt(ptr
, bytecount
);
6111 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6114 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6117 ret
= -TARGET_ENOSYS
;
6123 #if defined(TARGET_ABI32)
6124 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6126 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6127 struct target_modify_ldt_ldt_s ldt_info
;
6128 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6129 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6130 int seg_not_present
, useable
, lm
;
6131 uint32_t *lp
, entry_1
, entry_2
;
6134 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6135 if (!target_ldt_info
)
6136 return -TARGET_EFAULT
;
6137 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6138 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6139 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6140 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6141 if (ldt_info
.entry_number
== -1) {
6142 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6143 if (gdt_table
[i
] == 0) {
6144 ldt_info
.entry_number
= i
;
6145 target_ldt_info
->entry_number
= tswap32(i
);
6150 unlock_user_struct(target_ldt_info
, ptr
, 1);
6152 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6153 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6154 return -TARGET_EINVAL
;
6155 seg_32bit
= ldt_info
.flags
& 1;
6156 contents
= (ldt_info
.flags
>> 1) & 3;
6157 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6158 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6159 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6160 useable
= (ldt_info
.flags
>> 6) & 1;
6164 lm
= (ldt_info
.flags
>> 7) & 1;
6167 if (contents
== 3) {
6168 if (seg_not_present
== 0)
6169 return -TARGET_EINVAL
;
6172 /* NOTE: same code as Linux kernel */
6173 /* Allow LDTs to be cleared by the user. */
6174 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6175 if ((contents
== 0 &&
6176 read_exec_only
== 1 &&
6178 limit_in_pages
== 0 &&
6179 seg_not_present
== 1 &&
6187 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6188 (ldt_info
.limit
& 0x0ffff);
6189 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6190 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6191 (ldt_info
.limit
& 0xf0000) |
6192 ((read_exec_only
^ 1) << 9) |
6194 ((seg_not_present
^ 1) << 15) |
6196 (limit_in_pages
<< 23) |
6201 /* Install the new entry ... */
6203 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6204 lp
[0] = tswap32(entry_1
);
6205 lp
[1] = tswap32(entry_2
);
6209 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6211 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6212 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6213 uint32_t base_addr
, limit
, flags
;
6214 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6215 int seg_not_present
, useable
, lm
;
6216 uint32_t *lp
, entry_1
, entry_2
;
6218 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6219 if (!target_ldt_info
)
6220 return -TARGET_EFAULT
;
6221 idx
= tswap32(target_ldt_info
->entry_number
);
6222 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6223 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6224 unlock_user_struct(target_ldt_info
, ptr
, 1);
6225 return -TARGET_EINVAL
;
6227 lp
= (uint32_t *)(gdt_table
+ idx
);
6228 entry_1
= tswap32(lp
[0]);
6229 entry_2
= tswap32(lp
[1]);
6231 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6232 contents
= (entry_2
>> 10) & 3;
6233 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6234 seg_32bit
= (entry_2
>> 22) & 1;
6235 limit_in_pages
= (entry_2
>> 23) & 1;
6236 useable
= (entry_2
>> 20) & 1;
6240 lm
= (entry_2
>> 21) & 1;
6242 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6243 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6244 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6245 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6246 base_addr
= (entry_1
>> 16) |
6247 (entry_2
& 0xff000000) |
6248 ((entry_2
& 0xff) << 16);
6249 target_ldt_info
->base_addr
= tswapal(base_addr
);
6250 target_ldt_info
->limit
= tswap32(limit
);
6251 target_ldt_info
->flags
= tswap32(flags
);
6252 unlock_user_struct(target_ldt_info
, ptr
, 1);
6256 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6258 return -TARGET_ENOSYS
;
6261 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6268 case TARGET_ARCH_SET_GS
:
6269 case TARGET_ARCH_SET_FS
:
6270 if (code
== TARGET_ARCH_SET_GS
)
6274 cpu_x86_load_seg(env
, idx
, 0);
6275 env
->segs
[idx
].base
= addr
;
6277 case TARGET_ARCH_GET_GS
:
6278 case TARGET_ARCH_GET_FS
:
6279 if (code
== TARGET_ARCH_GET_GS
)
6283 val
= env
->segs
[idx
].base
;
6284 if (put_user(val
, addr
, abi_ulong
))
6285 ret
= -TARGET_EFAULT
;
6288 ret
= -TARGET_EINVAL
;
6293 #endif /* defined(TARGET_ABI32 */
6295 #endif /* defined(TARGET_I386) */
6297 #define NEW_STACK_SIZE 0x40000
6300 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6303 pthread_mutex_t mutex
;
6304 pthread_cond_t cond
;
6307 abi_ulong child_tidptr
;
6308 abi_ulong parent_tidptr
;
6312 static void *clone_func(void *arg
)
6314 new_thread_info
*info
= arg
;
6319 rcu_register_thread();
6320 tcg_register_thread();
6324 ts
= (TaskState
*)cpu
->opaque
;
6325 info
->tid
= sys_gettid();
6327 if (info
->child_tidptr
)
6328 put_user_u32(info
->tid
, info
->child_tidptr
);
6329 if (info
->parent_tidptr
)
6330 put_user_u32(info
->tid
, info
->parent_tidptr
);
6331 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6332 /* Enable signals. */
6333 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6334 /* Signal to the parent that we're ready. */
6335 pthread_mutex_lock(&info
->mutex
);
6336 pthread_cond_broadcast(&info
->cond
);
6337 pthread_mutex_unlock(&info
->mutex
);
6338 /* Wait until the parent has finished initializing the tls state. */
6339 pthread_mutex_lock(&clone_lock
);
6340 pthread_mutex_unlock(&clone_lock
);
6346 /* do_fork() Must return host values and target errnos (unlike most
6347 do_*() functions). */
6348 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6349 abi_ulong parent_tidptr
, target_ulong newtls
,
6350 abi_ulong child_tidptr
)
6352 CPUState
*cpu
= env_cpu(env
);
6356 CPUArchState
*new_env
;
6359 flags
&= ~CLONE_IGNORED_FLAGS
;
6361 /* Emulate vfork() with fork() */
6362 if (flags
& CLONE_VFORK
)
6363 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6365 if (flags
& CLONE_VM
) {
6366 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6367 new_thread_info info
;
6368 pthread_attr_t attr
;
6370 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6371 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6372 return -TARGET_EINVAL
;
6375 ts
= g_new0(TaskState
, 1);
6376 init_task_state(ts
);
6378 /* Grab a mutex so that thread setup appears atomic. */
6379 pthread_mutex_lock(&clone_lock
);
6382 * If this is our first additional thread, we need to ensure we
6383 * generate code for parallel execution and flush old translations.
6384 * Do this now so that the copy gets CF_PARALLEL too.
6386 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6387 cpu
->tcg_cflags
|= CF_PARALLEL
;
6391 /* we create a new CPU instance. */
6392 new_env
= cpu_copy(env
);
6393 /* Init regs that differ from the parent. */
6394 cpu_clone_regs_child(new_env
, newsp
, flags
);
6395 cpu_clone_regs_parent(env
, flags
);
6396 new_cpu
= env_cpu(new_env
);
6397 new_cpu
->opaque
= ts
;
6398 ts
->bprm
= parent_ts
->bprm
;
6399 ts
->info
= parent_ts
->info
;
6400 ts
->signal_mask
= parent_ts
->signal_mask
;
6402 if (flags
& CLONE_CHILD_CLEARTID
) {
6403 ts
->child_tidptr
= child_tidptr
;
6406 if (flags
& CLONE_SETTLS
) {
6407 cpu_set_tls (new_env
, newtls
);
6410 memset(&info
, 0, sizeof(info
));
6411 pthread_mutex_init(&info
.mutex
, NULL
);
6412 pthread_mutex_lock(&info
.mutex
);
6413 pthread_cond_init(&info
.cond
, NULL
);
6415 if (flags
& CLONE_CHILD_SETTID
) {
6416 info
.child_tidptr
= child_tidptr
;
6418 if (flags
& CLONE_PARENT_SETTID
) {
6419 info
.parent_tidptr
= parent_tidptr
;
6422 ret
= pthread_attr_init(&attr
);
6423 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6424 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6425 /* It is not safe to deliver signals until the child has finished
6426 initializing, so temporarily block all signals. */
6427 sigfillset(&sigmask
);
6428 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6429 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6431 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6432 /* TODO: Free new CPU state if thread creation failed. */
6434 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6435 pthread_attr_destroy(&attr
);
6437 /* Wait for the child to initialize. */
6438 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6443 pthread_mutex_unlock(&info
.mutex
);
6444 pthread_cond_destroy(&info
.cond
);
6445 pthread_mutex_destroy(&info
.mutex
);
6446 pthread_mutex_unlock(&clone_lock
);
6448 /* if no CLONE_VM, we consider it is a fork */
6449 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6450 return -TARGET_EINVAL
;
6453 /* We can't support custom termination signals */
6454 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6455 return -TARGET_EINVAL
;
6458 if (block_signals()) {
6459 return -TARGET_ERESTARTSYS
;
6465 /* Child Process. */
6466 cpu_clone_regs_child(env
, newsp
, flags
);
6468 /* There is a race condition here. The parent process could
6469 theoretically read the TID in the child process before the child
6470 tid is set. This would require using either ptrace
6471 (not implemented) or having *_tidptr to point at a shared memory
6472 mapping. We can't repeat the spinlock hack used above because
6473 the child process gets its own copy of the lock. */
6474 if (flags
& CLONE_CHILD_SETTID
)
6475 put_user_u32(sys_gettid(), child_tidptr
);
6476 if (flags
& CLONE_PARENT_SETTID
)
6477 put_user_u32(sys_gettid(), parent_tidptr
);
6478 ts
= (TaskState
*)cpu
->opaque
;
6479 if (flags
& CLONE_SETTLS
)
6480 cpu_set_tls (env
, newtls
);
6481 if (flags
& CLONE_CHILD_CLEARTID
)
6482 ts
->child_tidptr
= child_tidptr
;
6484 cpu_clone_regs_parent(env
, flags
);
6491 /* warning : doesn't handle linux specific flags... */
6492 static int target_to_host_fcntl_cmd(int cmd
)
6497 case TARGET_F_DUPFD
:
6498 case TARGET_F_GETFD
:
6499 case TARGET_F_SETFD
:
6500 case TARGET_F_GETFL
:
6501 case TARGET_F_SETFL
:
6502 case TARGET_F_OFD_GETLK
:
6503 case TARGET_F_OFD_SETLK
:
6504 case TARGET_F_OFD_SETLKW
:
6507 case TARGET_F_GETLK
:
6510 case TARGET_F_SETLK
:
6513 case TARGET_F_SETLKW
:
6516 case TARGET_F_GETOWN
:
6519 case TARGET_F_SETOWN
:
6522 case TARGET_F_GETSIG
:
6525 case TARGET_F_SETSIG
:
6528 #if TARGET_ABI_BITS == 32
6529 case TARGET_F_GETLK64
:
6532 case TARGET_F_SETLK64
:
6535 case TARGET_F_SETLKW64
:
6539 case TARGET_F_SETLEASE
:
6542 case TARGET_F_GETLEASE
:
6545 #ifdef F_DUPFD_CLOEXEC
6546 case TARGET_F_DUPFD_CLOEXEC
:
6547 ret
= F_DUPFD_CLOEXEC
;
6550 case TARGET_F_NOTIFY
:
6554 case TARGET_F_GETOWN_EX
:
6559 case TARGET_F_SETOWN_EX
:
6564 case TARGET_F_SETPIPE_SZ
:
6567 case TARGET_F_GETPIPE_SZ
:
6572 case TARGET_F_ADD_SEALS
:
6575 case TARGET_F_GET_SEALS
:
6580 ret
= -TARGET_EINVAL
;
6584 #if defined(__powerpc64__)
6585 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6586 * is not supported by kernel. The glibc fcntl call actually adjusts
6587 * them to 5, 6 and 7 before making the syscall(). Since we make the
6588 * syscall directly, adjust to what is supported by the kernel.
6590 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6591 ret
-= F_GETLK64
- 5;
6598 #define FLOCK_TRANSTBL \
6600 TRANSTBL_CONVERT(F_RDLCK); \
6601 TRANSTBL_CONVERT(F_WRLCK); \
6602 TRANSTBL_CONVERT(F_UNLCK); \
6605 static int target_to_host_flock(int type
)
6607 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6609 #undef TRANSTBL_CONVERT
6610 return -TARGET_EINVAL
;
6613 static int host_to_target_flock(int type
)
6615 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6617 #undef TRANSTBL_CONVERT
6618 /* if we don't know how to convert the value coming
6619 * from the host we copy to the target field as-is
6624 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6625 abi_ulong target_flock_addr
)
6627 struct target_flock
*target_fl
;
6630 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6631 return -TARGET_EFAULT
;
6634 __get_user(l_type
, &target_fl
->l_type
);
6635 l_type
= target_to_host_flock(l_type
);
6639 fl
->l_type
= l_type
;
6640 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6641 __get_user(fl
->l_start
, &target_fl
->l_start
);
6642 __get_user(fl
->l_len
, &target_fl
->l_len
);
6643 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6644 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6648 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6649 const struct flock64
*fl
)
6651 struct target_flock
*target_fl
;
6654 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6655 return -TARGET_EFAULT
;
6658 l_type
= host_to_target_flock(fl
->l_type
);
6659 __put_user(l_type
, &target_fl
->l_type
);
6660 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6661 __put_user(fl
->l_start
, &target_fl
->l_start
);
6662 __put_user(fl
->l_len
, &target_fl
->l_len
);
6663 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6664 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6668 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6669 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6671 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6672 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6673 abi_ulong target_flock_addr
)
6675 struct target_oabi_flock64
*target_fl
;
6678 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6679 return -TARGET_EFAULT
;
6682 __get_user(l_type
, &target_fl
->l_type
);
6683 l_type
= target_to_host_flock(l_type
);
6687 fl
->l_type
= l_type
;
6688 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6689 __get_user(fl
->l_start
, &target_fl
->l_start
);
6690 __get_user(fl
->l_len
, &target_fl
->l_len
);
6691 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6692 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6696 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6697 const struct flock64
*fl
)
6699 struct target_oabi_flock64
*target_fl
;
6702 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6703 return -TARGET_EFAULT
;
6706 l_type
= host_to_target_flock(fl
->l_type
);
6707 __put_user(l_type
, &target_fl
->l_type
);
6708 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6709 __put_user(fl
->l_start
, &target_fl
->l_start
);
6710 __put_user(fl
->l_len
, &target_fl
->l_len
);
6711 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6712 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6717 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6718 abi_ulong target_flock_addr
)
6720 struct target_flock64
*target_fl
;
6723 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6724 return -TARGET_EFAULT
;
6727 __get_user(l_type
, &target_fl
->l_type
);
6728 l_type
= target_to_host_flock(l_type
);
6732 fl
->l_type
= l_type
;
6733 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6734 __get_user(fl
->l_start
, &target_fl
->l_start
);
6735 __get_user(fl
->l_len
, &target_fl
->l_len
);
6736 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6737 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6741 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6742 const struct flock64
*fl
)
6744 struct target_flock64
*target_fl
;
6747 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6748 return -TARGET_EFAULT
;
6751 l_type
= host_to_target_flock(fl
->l_type
);
6752 __put_user(l_type
, &target_fl
->l_type
);
6753 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6754 __put_user(fl
->l_start
, &target_fl
->l_start
);
6755 __put_user(fl
->l_len
, &target_fl
->l_len
);
6756 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6757 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6761 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6763 struct flock64 fl64
;
6765 struct f_owner_ex fox
;
6766 struct target_f_owner_ex
*target_fox
;
6769 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6771 if (host_cmd
== -TARGET_EINVAL
)
6775 case TARGET_F_GETLK
:
6776 ret
= copy_from_user_flock(&fl64
, arg
);
6780 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6782 ret
= copy_to_user_flock(arg
, &fl64
);
6786 case TARGET_F_SETLK
:
6787 case TARGET_F_SETLKW
:
6788 ret
= copy_from_user_flock(&fl64
, arg
);
6792 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6795 case TARGET_F_GETLK64
:
6796 case TARGET_F_OFD_GETLK
:
6797 ret
= copy_from_user_flock64(&fl64
, arg
);
6801 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6803 ret
= copy_to_user_flock64(arg
, &fl64
);
6806 case TARGET_F_SETLK64
:
6807 case TARGET_F_SETLKW64
:
6808 case TARGET_F_OFD_SETLK
:
6809 case TARGET_F_OFD_SETLKW
:
6810 ret
= copy_from_user_flock64(&fl64
, arg
);
6814 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6817 case TARGET_F_GETFL
:
6818 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6820 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6824 case TARGET_F_SETFL
:
6825 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6826 target_to_host_bitmask(arg
,
6831 case TARGET_F_GETOWN_EX
:
6832 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6834 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6835 return -TARGET_EFAULT
;
6836 target_fox
->type
= tswap32(fox
.type
);
6837 target_fox
->pid
= tswap32(fox
.pid
);
6838 unlock_user_struct(target_fox
, arg
, 1);
6844 case TARGET_F_SETOWN_EX
:
6845 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6846 return -TARGET_EFAULT
;
6847 fox
.type
= tswap32(target_fox
->type
);
6848 fox
.pid
= tswap32(target_fox
->pid
);
6849 unlock_user_struct(target_fox
, arg
, 0);
6850 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6854 case TARGET_F_SETSIG
:
6855 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
6858 case TARGET_F_GETSIG
:
6859 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
6862 case TARGET_F_SETOWN
:
6863 case TARGET_F_GETOWN
:
6864 case TARGET_F_SETLEASE
:
6865 case TARGET_F_GETLEASE
:
6866 case TARGET_F_SETPIPE_SZ
:
6867 case TARGET_F_GETPIPE_SZ
:
6868 case TARGET_F_ADD_SEALS
:
6869 case TARGET_F_GET_SEALS
:
6870 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6874 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6882 static inline int high2lowuid(int uid
)
6890 static inline int high2lowgid(int gid
)
6898 static inline int low2highuid(int uid
)
6900 if ((int16_t)uid
== -1)
6906 static inline int low2highgid(int gid
)
6908 if ((int16_t)gid
== -1)
6913 static inline int tswapid(int id
)
6918 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6920 #else /* !USE_UID16 */
6921 static inline int high2lowuid(int uid
)
6925 static inline int high2lowgid(int gid
)
6929 static inline int low2highuid(int uid
)
6933 static inline int low2highgid(int gid
)
6937 static inline int tswapid(int id
)
6942 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6944 #endif /* USE_UID16 */
6946 /* We must do direct syscalls for setting UID/GID, because we want to
6947 * implement the Linux system call semantics of "change only for this thread",
6948 * not the libc/POSIX semantics of "change for all threads in process".
6949 * (See http://ewontfix.com/17/ for more details.)
6950 * We use the 32-bit version of the syscalls if present; if it is not
6951 * then either the host architecture supports 32-bit UIDs natively with
6952 * the standard syscall, or the 16-bit UID is the best we can do.
6954 #ifdef __NR_setuid32
6955 #define __NR_sys_setuid __NR_setuid32
6957 #define __NR_sys_setuid __NR_setuid
6959 #ifdef __NR_setgid32
6960 #define __NR_sys_setgid __NR_setgid32
6962 #define __NR_sys_setgid __NR_setgid
6964 #ifdef __NR_setresuid32
6965 #define __NR_sys_setresuid __NR_setresuid32
6967 #define __NR_sys_setresuid __NR_setresuid
6969 #ifdef __NR_setresgid32
6970 #define __NR_sys_setresgid __NR_setresgid32
6972 #define __NR_sys_setresgid __NR_setresgid
6975 _syscall1(int, sys_setuid
, uid_t
, uid
)
6976 _syscall1(int, sys_setgid
, gid_t
, gid
)
6977 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6978 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6980 void syscall_init(void)
6983 const argtype
*arg_type
;
6986 thunk_init(STRUCT_MAX
);
6988 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6989 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6990 #include "syscall_types.h"
6992 #undef STRUCT_SPECIAL
6994 /* we patch the ioctl size if necessary. We rely on the fact that
6995 no ioctl has all the bits at '1' in the size field */
6997 while (ie
->target_cmd
!= 0) {
6998 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6999 TARGET_IOC_SIZEMASK
) {
7000 arg_type
= ie
->arg_type
;
7001 if (arg_type
[0] != TYPE_PTR
) {
7002 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7007 size
= thunk_type_size(arg_type
, 0);
7008 ie
->target_cmd
= (ie
->target_cmd
&
7009 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7010 (size
<< TARGET_IOC_SIZESHIFT
);
7013 /* automatic consistency check if same arch */
7014 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7015 (defined(__x86_64__) && defined(TARGET_X86_64))
7016 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7017 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7018 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7025 #ifdef TARGET_NR_truncate64
7026 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7031 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7035 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7039 #ifdef TARGET_NR_ftruncate64
7040 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7045 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7049 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7053 #if defined(TARGET_NR_timer_settime) || \
7054 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7055 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7056 abi_ulong target_addr
)
7058 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7059 offsetof(struct target_itimerspec
,
7061 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7062 offsetof(struct target_itimerspec
,
7064 return -TARGET_EFAULT
;
7071 #if defined(TARGET_NR_timer_settime64) || \
7072 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7073 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7074 abi_ulong target_addr
)
7076 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7077 offsetof(struct target__kernel_itimerspec
,
7079 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7080 offsetof(struct target__kernel_itimerspec
,
7082 return -TARGET_EFAULT
;
7089 #if ((defined(TARGET_NR_timerfd_gettime) || \
7090 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7091 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7092 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7093 struct itimerspec
*host_its
)
7095 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7097 &host_its
->it_interval
) ||
7098 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7100 &host_its
->it_value
)) {
7101 return -TARGET_EFAULT
;
7107 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7108 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7109 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7110 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7111 struct itimerspec
*host_its
)
7113 if (host_to_target_timespec64(target_addr
+
7114 offsetof(struct target__kernel_itimerspec
,
7116 &host_its
->it_interval
) ||
7117 host_to_target_timespec64(target_addr
+
7118 offsetof(struct target__kernel_itimerspec
,
7120 &host_its
->it_value
)) {
7121 return -TARGET_EFAULT
;
7127 #if defined(TARGET_NR_adjtimex) || \
7128 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7129 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7130 abi_long target_addr
)
7132 struct target_timex
*target_tx
;
7134 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7135 return -TARGET_EFAULT
;
7138 __get_user(host_tx
->modes
, &target_tx
->modes
);
7139 __get_user(host_tx
->offset
, &target_tx
->offset
);
7140 __get_user(host_tx
->freq
, &target_tx
->freq
);
7141 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7142 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7143 __get_user(host_tx
->status
, &target_tx
->status
);
7144 __get_user(host_tx
->constant
, &target_tx
->constant
);
7145 __get_user(host_tx
->precision
, &target_tx
->precision
);
7146 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7147 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7148 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7149 __get_user(host_tx
->tick
, &target_tx
->tick
);
7150 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7151 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7152 __get_user(host_tx
->shift
, &target_tx
->shift
);
7153 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7154 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7155 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7156 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7157 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7158 __get_user(host_tx
->tai
, &target_tx
->tai
);
7160 unlock_user_struct(target_tx
, target_addr
, 0);
7164 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7165 struct timex
*host_tx
)
7167 struct target_timex
*target_tx
;
7169 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7170 return -TARGET_EFAULT
;
7173 __put_user(host_tx
->modes
, &target_tx
->modes
);
7174 __put_user(host_tx
->offset
, &target_tx
->offset
);
7175 __put_user(host_tx
->freq
, &target_tx
->freq
);
7176 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7177 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7178 __put_user(host_tx
->status
, &target_tx
->status
);
7179 __put_user(host_tx
->constant
, &target_tx
->constant
);
7180 __put_user(host_tx
->precision
, &target_tx
->precision
);
7181 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7182 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7183 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7184 __put_user(host_tx
->tick
, &target_tx
->tick
);
7185 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7186 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7187 __put_user(host_tx
->shift
, &target_tx
->shift
);
7188 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7189 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7190 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7191 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7192 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7193 __put_user(host_tx
->tai
, &target_tx
->tai
);
7195 unlock_user_struct(target_tx
, target_addr
, 1);
7201 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7202 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7203 abi_long target_addr
)
7205 struct target__kernel_timex
*target_tx
;
7207 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7208 offsetof(struct target__kernel_timex
,
7210 return -TARGET_EFAULT
;
7213 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7214 return -TARGET_EFAULT
;
7217 __get_user(host_tx
->modes
, &target_tx
->modes
);
7218 __get_user(host_tx
->offset
, &target_tx
->offset
);
7219 __get_user(host_tx
->freq
, &target_tx
->freq
);
7220 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7221 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7222 __get_user(host_tx
->status
, &target_tx
->status
);
7223 __get_user(host_tx
->constant
, &target_tx
->constant
);
7224 __get_user(host_tx
->precision
, &target_tx
->precision
);
7225 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7226 __get_user(host_tx
->tick
, &target_tx
->tick
);
7227 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7228 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7229 __get_user(host_tx
->shift
, &target_tx
->shift
);
7230 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7231 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7232 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7233 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7234 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7235 __get_user(host_tx
->tai
, &target_tx
->tai
);
7237 unlock_user_struct(target_tx
, target_addr
, 0);
7241 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7242 struct timex
*host_tx
)
7244 struct target__kernel_timex
*target_tx
;
7246 if (copy_to_user_timeval64(target_addr
+
7247 offsetof(struct target__kernel_timex
, time
),
7249 return -TARGET_EFAULT
;
7252 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7253 return -TARGET_EFAULT
;
7256 __put_user(host_tx
->modes
, &target_tx
->modes
);
7257 __put_user(host_tx
->offset
, &target_tx
->offset
);
7258 __put_user(host_tx
->freq
, &target_tx
->freq
);
7259 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7260 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7261 __put_user(host_tx
->status
, &target_tx
->status
);
7262 __put_user(host_tx
->constant
, &target_tx
->constant
);
7263 __put_user(host_tx
->precision
, &target_tx
->precision
);
7264 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7265 __put_user(host_tx
->tick
, &target_tx
->tick
);
7266 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7267 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7268 __put_user(host_tx
->shift
, &target_tx
->shift
);
7269 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7270 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7271 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7272 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7273 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7274 __put_user(host_tx
->tai
, &target_tx
->tai
);
7276 unlock_user_struct(target_tx
, target_addr
, 1);
7281 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7282 #define sigev_notify_thread_id _sigev_un._tid
7285 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7286 abi_ulong target_addr
)
7288 struct target_sigevent
*target_sevp
;
7290 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7291 return -TARGET_EFAULT
;
7294 /* This union is awkward on 64 bit systems because it has a 32 bit
7295 * integer and a pointer in it; we follow the conversion approach
7296 * used for handling sigval types in signal.c so the guest should get
7297 * the correct value back even if we did a 64 bit byteswap and it's
7298 * using the 32 bit integer.
7300 host_sevp
->sigev_value
.sival_ptr
=
7301 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7302 host_sevp
->sigev_signo
=
7303 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7304 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7305 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7307 unlock_user_struct(target_sevp
, target_addr
, 1);
7311 #if defined(TARGET_NR_mlockall)
7312 static inline int target_to_host_mlockall_arg(int arg
)
7316 if (arg
& TARGET_MCL_CURRENT
) {
7317 result
|= MCL_CURRENT
;
7319 if (arg
& TARGET_MCL_FUTURE
) {
7320 result
|= MCL_FUTURE
;
7323 if (arg
& TARGET_MCL_ONFAULT
) {
7324 result
|= MCL_ONFAULT
;
7332 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7333 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7334 defined(TARGET_NR_newfstatat))
7335 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7336 abi_ulong target_addr
,
7337 struct stat
*host_st
)
7339 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7340 if (((CPUARMState
*)cpu_env
)->eabi
) {
7341 struct target_eabi_stat64
*target_st
;
7343 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7344 return -TARGET_EFAULT
;
7345 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7346 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7347 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7348 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7349 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7351 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7352 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7353 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7354 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7355 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7356 __put_user(host_st
->st_size
, &target_st
->st_size
);
7357 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7358 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7359 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7360 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7361 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7362 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7363 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7364 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7365 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7367 unlock_user_struct(target_st
, target_addr
, 1);
7371 #if defined(TARGET_HAS_STRUCT_STAT64)
7372 struct target_stat64
*target_st
;
7374 struct target_stat
*target_st
;
7377 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7378 return -TARGET_EFAULT
;
7379 memset(target_st
, 0, sizeof(*target_st
));
7380 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7381 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7382 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7383 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7385 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7386 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7387 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7388 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7389 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7390 /* XXX: better use of kernel struct */
7391 __put_user(host_st
->st_size
, &target_st
->st_size
);
7392 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7393 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7394 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7395 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7396 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7397 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7398 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7399 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7400 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7402 unlock_user_struct(target_st
, target_addr
, 1);
7409 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7410 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7411 abi_ulong target_addr
)
7413 struct target_statx
*target_stx
;
7415 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7416 return -TARGET_EFAULT
;
7418 memset(target_stx
, 0, sizeof(*target_stx
));
7420 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7421 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7422 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7423 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7424 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7425 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7426 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7427 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7428 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7429 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7430 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7431 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7432 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7433 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7434 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7435 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7436 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7437 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7438 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7439 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7440 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7441 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7442 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7444 unlock_user_struct(target_stx
, target_addr
, 1);
7450 static int do_sys_futex(int *uaddr
, int op
, int val
,
7451 const struct timespec
*timeout
, int *uaddr2
,
7454 #if HOST_LONG_BITS == 64
7455 #if defined(__NR_futex)
7456 /* always a 64-bit time_t, it doesn't define _time64 version */
7457 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7460 #else /* HOST_LONG_BITS == 64 */
7461 #if defined(__NR_futex_time64)
7462 if (sizeof(timeout
->tv_sec
) == 8) {
7463 /* _time64 function on 32bit arch */
7464 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7467 #if defined(__NR_futex)
7468 /* old function on 32bit arch */
7469 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7471 #endif /* HOST_LONG_BITS == 64 */
7472 g_assert_not_reached();
7475 static int do_safe_futex(int *uaddr
, int op
, int val
,
7476 const struct timespec
*timeout
, int *uaddr2
,
7479 #if HOST_LONG_BITS == 64
7480 #if defined(__NR_futex)
7481 /* always a 64-bit time_t, it doesn't define _time64 version */
7482 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7484 #else /* HOST_LONG_BITS == 64 */
7485 #if defined(__NR_futex_time64)
7486 if (sizeof(timeout
->tv_sec
) == 8) {
7487 /* _time64 function on 32bit arch */
7488 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7492 #if defined(__NR_futex)
7493 /* old function on 32bit arch */
7494 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7496 #endif /* HOST_LONG_BITS == 64 */
7497 return -TARGET_ENOSYS
;
7500 /* ??? Using host futex calls even when target atomic operations
7501 are not really atomic probably breaks things. However implementing
7502 futexes locally would make futexes shared between multiple processes
7503 tricky. However they're probably useless because guest atomic
7504 operations won't work either. */
7505 #if defined(TARGET_NR_futex)
7506 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7507 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7509 struct timespec ts
, *pts
;
7512 /* ??? We assume FUTEX_* constants are the same on both host
7514 #ifdef FUTEX_CMD_MASK
7515 base_op
= op
& FUTEX_CMD_MASK
;
7521 case FUTEX_WAIT_BITSET
:
7524 target_to_host_timespec(pts
, timeout
);
7528 return do_safe_futex(g2h(cpu
, uaddr
),
7529 op
, tswap32(val
), pts
, NULL
, val3
);
7531 return do_safe_futex(g2h(cpu
, uaddr
),
7532 op
, val
, NULL
, NULL
, 0);
7534 return do_safe_futex(g2h(cpu
, uaddr
),
7535 op
, val
, NULL
, NULL
, 0);
7537 case FUTEX_CMP_REQUEUE
:
7539 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7540 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7541 But the prototype takes a `struct timespec *'; insert casts
7542 to satisfy the compiler. We do not need to tswap TIMEOUT
7543 since it's not compared to guest memory. */
7544 pts
= (struct timespec
*)(uintptr_t) timeout
;
7545 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7546 (base_op
== FUTEX_CMP_REQUEUE
7547 ? tswap32(val3
) : val3
));
7549 return -TARGET_ENOSYS
;
7554 #if defined(TARGET_NR_futex_time64)
7555 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7556 int val
, target_ulong timeout
,
7557 target_ulong uaddr2
, int val3
)
7559 struct timespec ts
, *pts
;
7562 /* ??? We assume FUTEX_* constants are the same on both host
7564 #ifdef FUTEX_CMD_MASK
7565 base_op
= op
& FUTEX_CMD_MASK
;
7571 case FUTEX_WAIT_BITSET
:
7574 if (target_to_host_timespec64(pts
, timeout
)) {
7575 return -TARGET_EFAULT
;
7580 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7581 tswap32(val
), pts
, NULL
, val3
);
7583 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7585 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7587 case FUTEX_CMP_REQUEUE
:
7589 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7590 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7591 But the prototype takes a `struct timespec *'; insert casts
7592 to satisfy the compiler. We do not need to tswap TIMEOUT
7593 since it's not compared to guest memory. */
7594 pts
= (struct timespec
*)(uintptr_t) timeout
;
7595 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7596 (base_op
== FUTEX_CMP_REQUEUE
7597 ? tswap32(val3
) : val3
));
7599 return -TARGET_ENOSYS
;
7604 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7605 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7606 abi_long handle
, abi_long mount_id
,
7609 struct file_handle
*target_fh
;
7610 struct file_handle
*fh
;
7614 unsigned int size
, total_size
;
7616 if (get_user_s32(size
, handle
)) {
7617 return -TARGET_EFAULT
;
7620 name
= lock_user_string(pathname
);
7622 return -TARGET_EFAULT
;
7625 total_size
= sizeof(struct file_handle
) + size
;
7626 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7628 unlock_user(name
, pathname
, 0);
7629 return -TARGET_EFAULT
;
7632 fh
= g_malloc0(total_size
);
7633 fh
->handle_bytes
= size
;
7635 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7636 unlock_user(name
, pathname
, 0);
7638 /* man name_to_handle_at(2):
7639 * Other than the use of the handle_bytes field, the caller should treat
7640 * the file_handle structure as an opaque data type
7643 memcpy(target_fh
, fh
, total_size
);
7644 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7645 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7647 unlock_user(target_fh
, handle
, total_size
);
7649 if (put_user_s32(mid
, mount_id
)) {
7650 return -TARGET_EFAULT
;
7658 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7659 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7662 struct file_handle
*target_fh
;
7663 struct file_handle
*fh
;
7664 unsigned int size
, total_size
;
7667 if (get_user_s32(size
, handle
)) {
7668 return -TARGET_EFAULT
;
7671 total_size
= sizeof(struct file_handle
) + size
;
7672 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7674 return -TARGET_EFAULT
;
7677 fh
= g_memdup(target_fh
, total_size
);
7678 fh
->handle_bytes
= size
;
7679 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7681 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7682 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7686 unlock_user(target_fh
, handle
, total_size
);
7692 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7694 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7697 target_sigset_t
*target_mask
;
7701 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7702 return -TARGET_EINVAL
;
7704 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7705 return -TARGET_EFAULT
;
7708 target_to_host_sigset(&host_mask
, target_mask
);
7710 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7712 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7714 fd_trans_register(ret
, &target_signalfd_trans
);
7717 unlock_user_struct(target_mask
, mask
, 0);
7723 /* Map host to target signal numbers for the wait family of syscalls.
7724 Assume all other status bits are the same. */
7725 int host_to_target_waitstatus(int status
)
7727 if (WIFSIGNALED(status
)) {
7728 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7730 if (WIFSTOPPED(status
)) {
7731 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7737 static int open_self_cmdline(void *cpu_env
, int fd
)
7739 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7740 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7743 for (i
= 0; i
< bprm
->argc
; i
++) {
7744 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7746 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7754 static int open_self_maps(void *cpu_env
, int fd
)
7756 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7757 TaskState
*ts
= cpu
->opaque
;
7758 GSList
*map_info
= read_self_maps();
7762 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7763 MapInfo
*e
= (MapInfo
*) s
->data
;
7765 if (h2g_valid(e
->start
)) {
7766 unsigned long min
= e
->start
;
7767 unsigned long max
= e
->end
;
7768 int flags
= page_get_flags(h2g(min
));
7771 max
= h2g_valid(max
- 1) ?
7772 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
7774 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7778 if (h2g(min
) == ts
->info
->stack_limit
) {
7784 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7785 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
7786 h2g(min
), h2g(max
- 1) + 1,
7787 (flags
& PAGE_READ
) ? 'r' : '-',
7788 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
7789 (flags
& PAGE_EXEC
) ? 'x' : '-',
7790 e
->is_priv
? 'p' : '-',
7791 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
7793 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
7800 free_self_maps(map_info
);
7802 #ifdef TARGET_VSYSCALL_PAGE
7804 * We only support execution from the vsyscall page.
7805 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7807 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
7808 " --xp 00000000 00:00 0",
7809 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
7810 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
7816 static int open_self_stat(void *cpu_env
, int fd
)
7818 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7819 TaskState
*ts
= cpu
->opaque
;
7820 g_autoptr(GString
) buf
= g_string_new(NULL
);
7823 for (i
= 0; i
< 44; i
++) {
7826 g_string_printf(buf
, FMT_pid
" ", getpid());
7827 } else if (i
== 1) {
7829 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
7830 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
7831 g_string_printf(buf
, "(%.15s) ", bin
);
7832 } else if (i
== 3) {
7834 g_string_printf(buf
, FMT_pid
" ", getppid());
7835 } else if (i
== 27) {
7837 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
7839 /* for the rest, there is MasterCard */
7840 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
7843 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
7851 static int open_self_auxv(void *cpu_env
, int fd
)
7853 CPUState
*cpu
= env_cpu((CPUArchState
*)cpu_env
);
7854 TaskState
*ts
= cpu
->opaque
;
7855 abi_ulong auxv
= ts
->info
->saved_auxv
;
7856 abi_ulong len
= ts
->info
->auxv_len
;
7860 * Auxiliary vector is stored in target process stack.
7861 * read in whole auxv vector and copy it to file
7863 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7867 r
= write(fd
, ptr
, len
);
7874 lseek(fd
, 0, SEEK_SET
);
7875 unlock_user(ptr
, auxv
, len
);
7881 static int is_proc_myself(const char *filename
, const char *entry
)
7883 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7884 filename
+= strlen("/proc/");
7885 if (!strncmp(filename
, "self/", strlen("self/"))) {
7886 filename
+= strlen("self/");
7887 } else if (*filename
>= '1' && *filename
<= '9') {
7889 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7890 if (!strncmp(filename
, myself
, strlen(myself
))) {
7891 filename
+= strlen(myself
);
7898 if (!strcmp(filename
, entry
)) {
7905 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7906 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7907 static int is_proc(const char *filename
, const char *entry
)
7909 return strcmp(filename
, entry
) == 0;
7913 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7914 static int open_net_route(void *cpu_env
, int fd
)
7921 fp
= fopen("/proc/net/route", "r");
7928 read
= getline(&line
, &len
, fp
);
7929 dprintf(fd
, "%s", line
);
7933 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7935 uint32_t dest
, gw
, mask
;
7936 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7939 fields
= sscanf(line
,
7940 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7941 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7942 &mask
, &mtu
, &window
, &irtt
);
7946 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7947 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7948 metric
, tswap32(mask
), mtu
, window
, irtt
);
7958 #if defined(TARGET_SPARC)
7959 static int open_cpuinfo(void *cpu_env
, int fd
)
7961 dprintf(fd
, "type\t\t: sun4u\n");
7966 #if defined(TARGET_HPPA)
7967 static int open_cpuinfo(void *cpu_env
, int fd
)
7969 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
7970 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
7971 dprintf(fd
, "capabilities\t: os32\n");
7972 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
7973 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7978 #if defined(TARGET_M68K)
7979 static int open_hardware(void *cpu_env
, int fd
)
7981 dprintf(fd
, "Model:\t\tqemu-m68k\n");
7986 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7989 const char *filename
;
7990 int (*fill
)(void *cpu_env
, int fd
);
7991 int (*cmp
)(const char *s1
, const char *s2
);
7993 const struct fake_open
*fake_open
;
7994 static const struct fake_open fakes
[] = {
7995 { "maps", open_self_maps
, is_proc_myself
},
7996 { "stat", open_self_stat
, is_proc_myself
},
7997 { "auxv", open_self_auxv
, is_proc_myself
},
7998 { "cmdline", open_self_cmdline
, is_proc_myself
},
7999 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8000 { "/proc/net/route", open_net_route
, is_proc
},
8002 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8003 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8005 #if defined(TARGET_M68K)
8006 { "/proc/hardware", open_hardware
, is_proc
},
8008 { NULL
, NULL
, NULL
}
8011 if (is_proc_myself(pathname
, "exe")) {
8012 int execfd
= qemu_getauxval(AT_EXECFD
);
8013 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8016 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8017 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8022 if (fake_open
->filename
) {
8024 char filename
[PATH_MAX
];
8027 /* create temporary file to map stat to */
8028 tmpdir
= getenv("TMPDIR");
8031 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8032 fd
= mkstemp(filename
);
8038 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8044 lseek(fd
, 0, SEEK_SET
);
8049 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8052 #define TIMER_MAGIC 0x0caf0000
8053 #define TIMER_MAGIC_MASK 0xffff0000
8055 /* Convert QEMU provided timer ID back to internal 16bit index format */
8056 static target_timer_t
get_timer_id(abi_long arg
)
8058 target_timer_t timerid
= arg
;
8060 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8061 return -TARGET_EINVAL
;
8066 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8067 return -TARGET_EINVAL
;
8073 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8075 abi_ulong target_addr
,
8078 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8079 unsigned host_bits
= sizeof(*host_mask
) * 8;
8080 abi_ulong
*target_mask
;
8083 assert(host_size
>= target_size
);
8085 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8087 return -TARGET_EFAULT
;
8089 memset(host_mask
, 0, host_size
);
8091 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8092 unsigned bit
= i
* target_bits
;
8095 __get_user(val
, &target_mask
[i
]);
8096 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8097 if (val
& (1UL << j
)) {
8098 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8103 unlock_user(target_mask
, target_addr
, 0);
8107 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8109 abi_ulong target_addr
,
8112 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8113 unsigned host_bits
= sizeof(*host_mask
) * 8;
8114 abi_ulong
*target_mask
;
8117 assert(host_size
>= target_size
);
8119 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8121 return -TARGET_EFAULT
;
8124 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8125 unsigned bit
= i
* target_bits
;
8128 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8129 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8133 __put_user(val
, &target_mask
[i
]);
8136 unlock_user(target_mask
, target_addr
, target_size
);
8140 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8141 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8144 /* This is an internal helper for do_syscall so that it is easier
8145 * to have a single return point, so that actions, such as logging
8146 * of syscall results, can be performed.
8147 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8149 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8150 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8151 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8154 CPUState
*cpu
= env_cpu(cpu_env
);
8156 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8157 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8158 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8159 || defined(TARGET_NR_statx)
8162 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8163 || defined(TARGET_NR_fstatfs)
8169 case TARGET_NR_exit
:
8170 /* In old applications this may be used to implement _exit(2).
8171 However in threaded applications it is used for thread termination,
8172 and _exit_group is used for application termination.
8173 Do thread termination if we have more then one thread. */
8175 if (block_signals()) {
8176 return -TARGET_ERESTARTSYS
;
8179 pthread_mutex_lock(&clone_lock
);
8181 if (CPU_NEXT(first_cpu
)) {
8182 TaskState
*ts
= cpu
->opaque
;
8184 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8185 object_unref(OBJECT(cpu
));
8187 * At this point the CPU should be unrealized and removed
8188 * from cpu lists. We can clean-up the rest of the thread
8189 * data without the lock held.
8192 pthread_mutex_unlock(&clone_lock
);
8194 if (ts
->child_tidptr
) {
8195 put_user_u32(0, ts
->child_tidptr
);
8196 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8197 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8201 rcu_unregister_thread();
8205 pthread_mutex_unlock(&clone_lock
);
8206 preexit_cleanup(cpu_env
, arg1
);
8208 return 0; /* avoid warning */
8209 case TARGET_NR_read
:
8210 if (arg2
== 0 && arg3
== 0) {
8211 return get_errno(safe_read(arg1
, 0, 0));
8213 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8214 return -TARGET_EFAULT
;
8215 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8217 fd_trans_host_to_target_data(arg1
)) {
8218 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8220 unlock_user(p
, arg2
, ret
);
8223 case TARGET_NR_write
:
8224 if (arg2
== 0 && arg3
== 0) {
8225 return get_errno(safe_write(arg1
, 0, 0));
8227 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8228 return -TARGET_EFAULT
;
8229 if (fd_trans_target_to_host_data(arg1
)) {
8230 void *copy
= g_malloc(arg3
);
8231 memcpy(copy
, p
, arg3
);
8232 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8234 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8238 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8240 unlock_user(p
, arg2
, 0);
8243 #ifdef TARGET_NR_open
8244 case TARGET_NR_open
:
8245 if (!(p
= lock_user_string(arg1
)))
8246 return -TARGET_EFAULT
;
8247 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8248 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8250 fd_trans_unregister(ret
);
8251 unlock_user(p
, arg1
, 0);
8254 case TARGET_NR_openat
:
8255 if (!(p
= lock_user_string(arg2
)))
8256 return -TARGET_EFAULT
;
8257 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8258 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8260 fd_trans_unregister(ret
);
8261 unlock_user(p
, arg2
, 0);
8263 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8264 case TARGET_NR_name_to_handle_at
:
8265 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8268 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8269 case TARGET_NR_open_by_handle_at
:
8270 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8271 fd_trans_unregister(ret
);
8274 case TARGET_NR_close
:
8275 fd_trans_unregister(arg1
);
8276 return get_errno(close(arg1
));
8279 return do_brk(arg1
);
8280 #ifdef TARGET_NR_fork
8281 case TARGET_NR_fork
:
8282 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8284 #ifdef TARGET_NR_waitpid
8285 case TARGET_NR_waitpid
:
8288 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8289 if (!is_error(ret
) && arg2
&& ret
8290 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8291 return -TARGET_EFAULT
;
8295 #ifdef TARGET_NR_waitid
8296 case TARGET_NR_waitid
:
8300 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8301 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8302 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8303 return -TARGET_EFAULT
;
8304 host_to_target_siginfo(p
, &info
);
8305 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8310 #ifdef TARGET_NR_creat /* not on alpha */
8311 case TARGET_NR_creat
:
8312 if (!(p
= lock_user_string(arg1
)))
8313 return -TARGET_EFAULT
;
8314 ret
= get_errno(creat(p
, arg2
));
8315 fd_trans_unregister(ret
);
8316 unlock_user(p
, arg1
, 0);
8319 #ifdef TARGET_NR_link
8320 case TARGET_NR_link
:
8323 p
= lock_user_string(arg1
);
8324 p2
= lock_user_string(arg2
);
8326 ret
= -TARGET_EFAULT
;
8328 ret
= get_errno(link(p
, p2
));
8329 unlock_user(p2
, arg2
, 0);
8330 unlock_user(p
, arg1
, 0);
8334 #if defined(TARGET_NR_linkat)
8335 case TARGET_NR_linkat
:
8339 return -TARGET_EFAULT
;
8340 p
= lock_user_string(arg2
);
8341 p2
= lock_user_string(arg4
);
8343 ret
= -TARGET_EFAULT
;
8345 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8346 unlock_user(p
, arg2
, 0);
8347 unlock_user(p2
, arg4
, 0);
8351 #ifdef TARGET_NR_unlink
8352 case TARGET_NR_unlink
:
8353 if (!(p
= lock_user_string(arg1
)))
8354 return -TARGET_EFAULT
;
8355 ret
= get_errno(unlink(p
));
8356 unlock_user(p
, arg1
, 0);
8359 #if defined(TARGET_NR_unlinkat)
8360 case TARGET_NR_unlinkat
:
8361 if (!(p
= lock_user_string(arg2
)))
8362 return -TARGET_EFAULT
;
8363 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8364 unlock_user(p
, arg2
, 0);
8367 case TARGET_NR_execve
:
8369 char **argp
, **envp
;
8372 abi_ulong guest_argp
;
8373 abi_ulong guest_envp
;
8379 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8380 if (get_user_ual(addr
, gp
))
8381 return -TARGET_EFAULT
;
8388 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8389 if (get_user_ual(addr
, gp
))
8390 return -TARGET_EFAULT
;
8396 argp
= g_new0(char *, argc
+ 1);
8397 envp
= g_new0(char *, envc
+ 1);
8399 for (gp
= guest_argp
, q
= argp
; gp
;
8400 gp
+= sizeof(abi_ulong
), q
++) {
8401 if (get_user_ual(addr
, gp
))
8405 if (!(*q
= lock_user_string(addr
)))
8410 for (gp
= guest_envp
, q
= envp
; gp
;
8411 gp
+= sizeof(abi_ulong
), q
++) {
8412 if (get_user_ual(addr
, gp
))
8416 if (!(*q
= lock_user_string(addr
)))
8421 if (!(p
= lock_user_string(arg1
)))
8423 /* Although execve() is not an interruptible syscall it is
8424 * a special case where we must use the safe_syscall wrapper:
8425 * if we allow a signal to happen before we make the host
8426 * syscall then we will 'lose' it, because at the point of
8427 * execve the process leaves QEMU's control. So we use the
8428 * safe syscall wrapper to ensure that we either take the
8429 * signal as a guest signal, or else it does not happen
8430 * before the execve completes and makes it the other
8431 * program's problem.
8433 ret
= get_errno(safe_execve(p
, argp
, envp
));
8434 unlock_user(p
, arg1
, 0);
8439 ret
= -TARGET_EFAULT
;
8442 for (gp
= guest_argp
, q
= argp
; *q
;
8443 gp
+= sizeof(abi_ulong
), q
++) {
8444 if (get_user_ual(addr
, gp
)
8447 unlock_user(*q
, addr
, 0);
8449 for (gp
= guest_envp
, q
= envp
; *q
;
8450 gp
+= sizeof(abi_ulong
), q
++) {
8451 if (get_user_ual(addr
, gp
)
8454 unlock_user(*q
, addr
, 0);
8461 case TARGET_NR_chdir
:
8462 if (!(p
= lock_user_string(arg1
)))
8463 return -TARGET_EFAULT
;
8464 ret
= get_errno(chdir(p
));
8465 unlock_user(p
, arg1
, 0);
8467 #ifdef TARGET_NR_time
8468 case TARGET_NR_time
:
8471 ret
= get_errno(time(&host_time
));
8474 && put_user_sal(host_time
, arg1
))
8475 return -TARGET_EFAULT
;
8479 #ifdef TARGET_NR_mknod
8480 case TARGET_NR_mknod
:
8481 if (!(p
= lock_user_string(arg1
)))
8482 return -TARGET_EFAULT
;
8483 ret
= get_errno(mknod(p
, arg2
, arg3
));
8484 unlock_user(p
, arg1
, 0);
8487 #if defined(TARGET_NR_mknodat)
8488 case TARGET_NR_mknodat
:
8489 if (!(p
= lock_user_string(arg2
)))
8490 return -TARGET_EFAULT
;
8491 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8492 unlock_user(p
, arg2
, 0);
8495 #ifdef TARGET_NR_chmod
8496 case TARGET_NR_chmod
:
8497 if (!(p
= lock_user_string(arg1
)))
8498 return -TARGET_EFAULT
;
8499 ret
= get_errno(chmod(p
, arg2
));
8500 unlock_user(p
, arg1
, 0);
8503 #ifdef TARGET_NR_lseek
8504 case TARGET_NR_lseek
:
8505 return get_errno(lseek(arg1
, arg2
, arg3
));
8507 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8508 /* Alpha specific */
8509 case TARGET_NR_getxpid
:
8510 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8511 return get_errno(getpid());
8513 #ifdef TARGET_NR_getpid
8514 case TARGET_NR_getpid
:
8515 return get_errno(getpid());
8517 case TARGET_NR_mount
:
8519 /* need to look at the data field */
8523 p
= lock_user_string(arg1
);
8525 return -TARGET_EFAULT
;
8531 p2
= lock_user_string(arg2
);
8534 unlock_user(p
, arg1
, 0);
8536 return -TARGET_EFAULT
;
8540 p3
= lock_user_string(arg3
);
8543 unlock_user(p
, arg1
, 0);
8545 unlock_user(p2
, arg2
, 0);
8546 return -TARGET_EFAULT
;
8552 /* FIXME - arg5 should be locked, but it isn't clear how to
8553 * do that since it's not guaranteed to be a NULL-terminated
8557 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8559 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8561 ret
= get_errno(ret
);
8564 unlock_user(p
, arg1
, 0);
8566 unlock_user(p2
, arg2
, 0);
8568 unlock_user(p3
, arg3
, 0);
8572 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8573 #if defined(TARGET_NR_umount)
8574 case TARGET_NR_umount
:
8576 #if defined(TARGET_NR_oldumount)
8577 case TARGET_NR_oldumount
:
8579 if (!(p
= lock_user_string(arg1
)))
8580 return -TARGET_EFAULT
;
8581 ret
= get_errno(umount(p
));
8582 unlock_user(p
, arg1
, 0);
8585 #ifdef TARGET_NR_stime /* not on alpha */
8586 case TARGET_NR_stime
:
8590 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8591 return -TARGET_EFAULT
;
8593 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8596 #ifdef TARGET_NR_alarm /* not on alpha */
8597 case TARGET_NR_alarm
:
8600 #ifdef TARGET_NR_pause /* not on alpha */
8601 case TARGET_NR_pause
:
8602 if (!block_signals()) {
8603 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8605 return -TARGET_EINTR
;
8607 #ifdef TARGET_NR_utime
8608 case TARGET_NR_utime
:
8610 struct utimbuf tbuf
, *host_tbuf
;
8611 struct target_utimbuf
*target_tbuf
;
8613 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8614 return -TARGET_EFAULT
;
8615 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8616 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8617 unlock_user_struct(target_tbuf
, arg2
, 0);
8622 if (!(p
= lock_user_string(arg1
)))
8623 return -TARGET_EFAULT
;
8624 ret
= get_errno(utime(p
, host_tbuf
));
8625 unlock_user(p
, arg1
, 0);
8629 #ifdef TARGET_NR_utimes
8630 case TARGET_NR_utimes
:
8632 struct timeval
*tvp
, tv
[2];
8634 if (copy_from_user_timeval(&tv
[0], arg2
)
8635 || copy_from_user_timeval(&tv
[1],
8636 arg2
+ sizeof(struct target_timeval
)))
8637 return -TARGET_EFAULT
;
8642 if (!(p
= lock_user_string(arg1
)))
8643 return -TARGET_EFAULT
;
8644 ret
= get_errno(utimes(p
, tvp
));
8645 unlock_user(p
, arg1
, 0);
8649 #if defined(TARGET_NR_futimesat)
8650 case TARGET_NR_futimesat
:
8652 struct timeval
*tvp
, tv
[2];
8654 if (copy_from_user_timeval(&tv
[0], arg3
)
8655 || copy_from_user_timeval(&tv
[1],
8656 arg3
+ sizeof(struct target_timeval
)))
8657 return -TARGET_EFAULT
;
8662 if (!(p
= lock_user_string(arg2
))) {
8663 return -TARGET_EFAULT
;
8665 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8666 unlock_user(p
, arg2
, 0);
8670 #ifdef TARGET_NR_access
8671 case TARGET_NR_access
:
8672 if (!(p
= lock_user_string(arg1
))) {
8673 return -TARGET_EFAULT
;
8675 ret
= get_errno(access(path(p
), arg2
));
8676 unlock_user(p
, arg1
, 0);
8679 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8680 case TARGET_NR_faccessat
:
8681 if (!(p
= lock_user_string(arg2
))) {
8682 return -TARGET_EFAULT
;
8684 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8685 unlock_user(p
, arg2
, 0);
8688 #ifdef TARGET_NR_nice /* not on alpha */
8689 case TARGET_NR_nice
:
8690 return get_errno(nice(arg1
));
8692 case TARGET_NR_sync
:
8695 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8696 case TARGET_NR_syncfs
:
8697 return get_errno(syncfs(arg1
));
8699 case TARGET_NR_kill
:
8700 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8701 #ifdef TARGET_NR_rename
8702 case TARGET_NR_rename
:
8705 p
= lock_user_string(arg1
);
8706 p2
= lock_user_string(arg2
);
8708 ret
= -TARGET_EFAULT
;
8710 ret
= get_errno(rename(p
, p2
));
8711 unlock_user(p2
, arg2
, 0);
8712 unlock_user(p
, arg1
, 0);
8716 #if defined(TARGET_NR_renameat)
8717 case TARGET_NR_renameat
:
8720 p
= lock_user_string(arg2
);
8721 p2
= lock_user_string(arg4
);
8723 ret
= -TARGET_EFAULT
;
8725 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8726 unlock_user(p2
, arg4
, 0);
8727 unlock_user(p
, arg2
, 0);
8731 #if defined(TARGET_NR_renameat2)
8732 case TARGET_NR_renameat2
:
8735 p
= lock_user_string(arg2
);
8736 p2
= lock_user_string(arg4
);
8738 ret
= -TARGET_EFAULT
;
8740 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8742 unlock_user(p2
, arg4
, 0);
8743 unlock_user(p
, arg2
, 0);
8747 #ifdef TARGET_NR_mkdir
8748 case TARGET_NR_mkdir
:
8749 if (!(p
= lock_user_string(arg1
)))
8750 return -TARGET_EFAULT
;
8751 ret
= get_errno(mkdir(p
, arg2
));
8752 unlock_user(p
, arg1
, 0);
8755 #if defined(TARGET_NR_mkdirat)
8756 case TARGET_NR_mkdirat
:
8757 if (!(p
= lock_user_string(arg2
)))
8758 return -TARGET_EFAULT
;
8759 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8760 unlock_user(p
, arg2
, 0);
8763 #ifdef TARGET_NR_rmdir
8764 case TARGET_NR_rmdir
:
8765 if (!(p
= lock_user_string(arg1
)))
8766 return -TARGET_EFAULT
;
8767 ret
= get_errno(rmdir(p
));
8768 unlock_user(p
, arg1
, 0);
8772 ret
= get_errno(dup(arg1
));
8774 fd_trans_dup(arg1
, ret
);
8777 #ifdef TARGET_NR_pipe
8778 case TARGET_NR_pipe
:
8779 return do_pipe(cpu_env
, arg1
, 0, 0);
8781 #ifdef TARGET_NR_pipe2
8782 case TARGET_NR_pipe2
:
8783 return do_pipe(cpu_env
, arg1
,
8784 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8786 case TARGET_NR_times
:
8788 struct target_tms
*tmsp
;
8790 ret
= get_errno(times(&tms
));
8792 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8794 return -TARGET_EFAULT
;
8795 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8796 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8797 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8798 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8801 ret
= host_to_target_clock_t(ret
);
8804 case TARGET_NR_acct
:
8806 ret
= get_errno(acct(NULL
));
8808 if (!(p
= lock_user_string(arg1
))) {
8809 return -TARGET_EFAULT
;
8811 ret
= get_errno(acct(path(p
)));
8812 unlock_user(p
, arg1
, 0);
8815 #ifdef TARGET_NR_umount2
8816 case TARGET_NR_umount2
:
8817 if (!(p
= lock_user_string(arg1
)))
8818 return -TARGET_EFAULT
;
8819 ret
= get_errno(umount2(p
, arg2
));
8820 unlock_user(p
, arg1
, 0);
8823 case TARGET_NR_ioctl
:
8824 return do_ioctl(arg1
, arg2
, arg3
);
8825 #ifdef TARGET_NR_fcntl
8826 case TARGET_NR_fcntl
:
8827 return do_fcntl(arg1
, arg2
, arg3
);
8829 case TARGET_NR_setpgid
:
8830 return get_errno(setpgid(arg1
, arg2
));
8831 case TARGET_NR_umask
:
8832 return get_errno(umask(arg1
));
8833 case TARGET_NR_chroot
:
8834 if (!(p
= lock_user_string(arg1
)))
8835 return -TARGET_EFAULT
;
8836 ret
= get_errno(chroot(p
));
8837 unlock_user(p
, arg1
, 0);
8839 #ifdef TARGET_NR_dup2
8840 case TARGET_NR_dup2
:
8841 ret
= get_errno(dup2(arg1
, arg2
));
8843 fd_trans_dup(arg1
, arg2
);
8847 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8848 case TARGET_NR_dup3
:
8852 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8855 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8856 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8858 fd_trans_dup(arg1
, arg2
);
8863 #ifdef TARGET_NR_getppid /* not on alpha */
8864 case TARGET_NR_getppid
:
8865 return get_errno(getppid());
8867 #ifdef TARGET_NR_getpgrp
8868 case TARGET_NR_getpgrp
:
8869 return get_errno(getpgrp());
8871 case TARGET_NR_setsid
:
8872 return get_errno(setsid());
8873 #ifdef TARGET_NR_sigaction
8874 case TARGET_NR_sigaction
:
8876 #if defined(TARGET_MIPS)
8877 struct target_sigaction act
, oact
, *pact
, *old_act
;
8880 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8881 return -TARGET_EFAULT
;
8882 act
._sa_handler
= old_act
->_sa_handler
;
8883 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8884 act
.sa_flags
= old_act
->sa_flags
;
8885 unlock_user_struct(old_act
, arg2
, 0);
8891 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
8893 if (!is_error(ret
) && arg3
) {
8894 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8895 return -TARGET_EFAULT
;
8896 old_act
->_sa_handler
= oact
._sa_handler
;
8897 old_act
->sa_flags
= oact
.sa_flags
;
8898 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8899 old_act
->sa_mask
.sig
[1] = 0;
8900 old_act
->sa_mask
.sig
[2] = 0;
8901 old_act
->sa_mask
.sig
[3] = 0;
8902 unlock_user_struct(old_act
, arg3
, 1);
8905 struct target_old_sigaction
*old_act
;
8906 struct target_sigaction act
, oact
, *pact
;
8908 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8909 return -TARGET_EFAULT
;
8910 act
._sa_handler
= old_act
->_sa_handler
;
8911 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8912 act
.sa_flags
= old_act
->sa_flags
;
8913 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8914 act
.sa_restorer
= old_act
->sa_restorer
;
8916 unlock_user_struct(old_act
, arg2
, 0);
8921 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
8922 if (!is_error(ret
) && arg3
) {
8923 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8924 return -TARGET_EFAULT
;
8925 old_act
->_sa_handler
= oact
._sa_handler
;
8926 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8927 old_act
->sa_flags
= oact
.sa_flags
;
8928 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8929 old_act
->sa_restorer
= oact
.sa_restorer
;
8931 unlock_user_struct(old_act
, arg3
, 1);
8937 case TARGET_NR_rt_sigaction
:
8940 * For Alpha and SPARC this is a 5 argument syscall, with
8941 * a 'restorer' parameter which must be copied into the
8942 * sa_restorer field of the sigaction struct.
8943 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8944 * and arg5 is the sigsetsize.
8946 #if defined(TARGET_ALPHA)
8947 target_ulong sigsetsize
= arg4
;
8948 target_ulong restorer
= arg5
;
8949 #elif defined(TARGET_SPARC)
8950 target_ulong restorer
= arg4
;
8951 target_ulong sigsetsize
= arg5
;
8953 target_ulong sigsetsize
= arg4
;
8954 target_ulong restorer
= 0;
8956 struct target_sigaction
*act
= NULL
;
8957 struct target_sigaction
*oact
= NULL
;
8959 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8960 return -TARGET_EINVAL
;
8962 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8963 return -TARGET_EFAULT
;
8965 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8966 ret
= -TARGET_EFAULT
;
8968 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
8970 unlock_user_struct(oact
, arg3
, 1);
8974 unlock_user_struct(act
, arg2
, 0);
8978 #ifdef TARGET_NR_sgetmask /* not on alpha */
8979 case TARGET_NR_sgetmask
:
8982 abi_ulong target_set
;
8983 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8985 host_to_target_old_sigset(&target_set
, &cur_set
);
8991 #ifdef TARGET_NR_ssetmask /* not on alpha */
8992 case TARGET_NR_ssetmask
:
8995 abi_ulong target_set
= arg1
;
8996 target_to_host_old_sigset(&set
, &target_set
);
8997 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8999 host_to_target_old_sigset(&target_set
, &oset
);
9005 #ifdef TARGET_NR_sigprocmask
9006 case TARGET_NR_sigprocmask
:
9008 #if defined(TARGET_ALPHA)
9009 sigset_t set
, oldset
;
9014 case TARGET_SIG_BLOCK
:
9017 case TARGET_SIG_UNBLOCK
:
9020 case TARGET_SIG_SETMASK
:
9024 return -TARGET_EINVAL
;
9027 target_to_host_old_sigset(&set
, &mask
);
9029 ret
= do_sigprocmask(how
, &set
, &oldset
);
9030 if (!is_error(ret
)) {
9031 host_to_target_old_sigset(&mask
, &oldset
);
9033 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9036 sigset_t set
, oldset
, *set_ptr
;
9041 case TARGET_SIG_BLOCK
:
9044 case TARGET_SIG_UNBLOCK
:
9047 case TARGET_SIG_SETMASK
:
9051 return -TARGET_EINVAL
;
9053 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9054 return -TARGET_EFAULT
;
9055 target_to_host_old_sigset(&set
, p
);
9056 unlock_user(p
, arg2
, 0);
9062 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9063 if (!is_error(ret
) && arg3
) {
9064 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9065 return -TARGET_EFAULT
;
9066 host_to_target_old_sigset(p
, &oldset
);
9067 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9073 case TARGET_NR_rt_sigprocmask
:
9076 sigset_t set
, oldset
, *set_ptr
;
9078 if (arg4
!= sizeof(target_sigset_t
)) {
9079 return -TARGET_EINVAL
;
9084 case TARGET_SIG_BLOCK
:
9087 case TARGET_SIG_UNBLOCK
:
9090 case TARGET_SIG_SETMASK
:
9094 return -TARGET_EINVAL
;
9096 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9097 return -TARGET_EFAULT
;
9098 target_to_host_sigset(&set
, p
);
9099 unlock_user(p
, arg2
, 0);
9105 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9106 if (!is_error(ret
) && arg3
) {
9107 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9108 return -TARGET_EFAULT
;
9109 host_to_target_sigset(p
, &oldset
);
9110 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9114 #ifdef TARGET_NR_sigpending
9115 case TARGET_NR_sigpending
:
9118 ret
= get_errno(sigpending(&set
));
9119 if (!is_error(ret
)) {
9120 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9121 return -TARGET_EFAULT
;
9122 host_to_target_old_sigset(p
, &set
);
9123 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9128 case TARGET_NR_rt_sigpending
:
9132 /* Yes, this check is >, not != like most. We follow the kernel's
9133 * logic and it does it like this because it implements
9134 * NR_sigpending through the same code path, and in that case
9135 * the old_sigset_t is smaller in size.
9137 if (arg2
> sizeof(target_sigset_t
)) {
9138 return -TARGET_EINVAL
;
9141 ret
= get_errno(sigpending(&set
));
9142 if (!is_error(ret
)) {
9143 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9144 return -TARGET_EFAULT
;
9145 host_to_target_sigset(p
, &set
);
9146 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9150 #ifdef TARGET_NR_sigsuspend
9151 case TARGET_NR_sigsuspend
:
9153 TaskState
*ts
= cpu
->opaque
;
9154 #if defined(TARGET_ALPHA)
9155 abi_ulong mask
= arg1
;
9156 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9158 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9159 return -TARGET_EFAULT
;
9160 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9161 unlock_user(p
, arg1
, 0);
9163 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9165 if (ret
!= -TARGET_ERESTARTSYS
) {
9166 ts
->in_sigsuspend
= 1;
9171 case TARGET_NR_rt_sigsuspend
:
9173 TaskState
*ts
= cpu
->opaque
;
9175 if (arg2
!= sizeof(target_sigset_t
)) {
9176 return -TARGET_EINVAL
;
9178 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9179 return -TARGET_EFAULT
;
9180 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9181 unlock_user(p
, arg1
, 0);
9182 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9184 if (ret
!= -TARGET_ERESTARTSYS
) {
9185 ts
->in_sigsuspend
= 1;
9189 #ifdef TARGET_NR_rt_sigtimedwait
9190 case TARGET_NR_rt_sigtimedwait
:
9193 struct timespec uts
, *puts
;
9196 if (arg4
!= sizeof(target_sigset_t
)) {
9197 return -TARGET_EINVAL
;
9200 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9201 return -TARGET_EFAULT
;
9202 target_to_host_sigset(&set
, p
);
9203 unlock_user(p
, arg1
, 0);
9206 if (target_to_host_timespec(puts
, arg3
)) {
9207 return -TARGET_EFAULT
;
9212 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9214 if (!is_error(ret
)) {
9216 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9219 return -TARGET_EFAULT
;
9221 host_to_target_siginfo(p
, &uinfo
);
9222 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9224 ret
= host_to_target_signal(ret
);
9229 #ifdef TARGET_NR_rt_sigtimedwait_time64
9230 case TARGET_NR_rt_sigtimedwait_time64
:
9233 struct timespec uts
, *puts
;
9236 if (arg4
!= sizeof(target_sigset_t
)) {
9237 return -TARGET_EINVAL
;
9240 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9242 return -TARGET_EFAULT
;
9244 target_to_host_sigset(&set
, p
);
9245 unlock_user(p
, arg1
, 0);
9248 if (target_to_host_timespec64(puts
, arg3
)) {
9249 return -TARGET_EFAULT
;
9254 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9256 if (!is_error(ret
)) {
9258 p
= lock_user(VERIFY_WRITE
, arg2
,
9259 sizeof(target_siginfo_t
), 0);
9261 return -TARGET_EFAULT
;
9263 host_to_target_siginfo(p
, &uinfo
);
9264 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9266 ret
= host_to_target_signal(ret
);
9271 case TARGET_NR_rt_sigqueueinfo
:
9275 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9277 return -TARGET_EFAULT
;
9279 target_to_host_siginfo(&uinfo
, p
);
9280 unlock_user(p
, arg3
, 0);
9281 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9284 case TARGET_NR_rt_tgsigqueueinfo
:
9288 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9290 return -TARGET_EFAULT
;
9292 target_to_host_siginfo(&uinfo
, p
);
9293 unlock_user(p
, arg4
, 0);
9294 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9297 #ifdef TARGET_NR_sigreturn
9298 case TARGET_NR_sigreturn
:
9299 if (block_signals()) {
9300 return -TARGET_ERESTARTSYS
;
9302 return do_sigreturn(cpu_env
);
9304 case TARGET_NR_rt_sigreturn
:
9305 if (block_signals()) {
9306 return -TARGET_ERESTARTSYS
;
9308 return do_rt_sigreturn(cpu_env
);
9309 case TARGET_NR_sethostname
:
9310 if (!(p
= lock_user_string(arg1
)))
9311 return -TARGET_EFAULT
;
9312 ret
= get_errno(sethostname(p
, arg2
));
9313 unlock_user(p
, arg1
, 0);
9315 #ifdef TARGET_NR_setrlimit
9316 case TARGET_NR_setrlimit
:
9318 int resource
= target_to_host_resource(arg1
);
9319 struct target_rlimit
*target_rlim
;
9321 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9322 return -TARGET_EFAULT
;
9323 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9324 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9325 unlock_user_struct(target_rlim
, arg2
, 0);
9327 * If we just passed through resource limit settings for memory then
9328 * they would also apply to QEMU's own allocations, and QEMU will
9329 * crash or hang or die if its allocations fail. Ideally we would
9330 * track the guest allocations in QEMU and apply the limits ourselves.
9331 * For now, just tell the guest the call succeeded but don't actually
9334 if (resource
!= RLIMIT_AS
&&
9335 resource
!= RLIMIT_DATA
&&
9336 resource
!= RLIMIT_STACK
) {
9337 return get_errno(setrlimit(resource
, &rlim
));
9343 #ifdef TARGET_NR_getrlimit
9344 case TARGET_NR_getrlimit
:
9346 int resource
= target_to_host_resource(arg1
);
9347 struct target_rlimit
*target_rlim
;
9350 ret
= get_errno(getrlimit(resource
, &rlim
));
9351 if (!is_error(ret
)) {
9352 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9353 return -TARGET_EFAULT
;
9354 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9355 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9356 unlock_user_struct(target_rlim
, arg2
, 1);
9361 case TARGET_NR_getrusage
:
9363 struct rusage rusage
;
9364 ret
= get_errno(getrusage(arg1
, &rusage
));
9365 if (!is_error(ret
)) {
9366 ret
= host_to_target_rusage(arg2
, &rusage
);
9370 #if defined(TARGET_NR_gettimeofday)
9371 case TARGET_NR_gettimeofday
:
9376 ret
= get_errno(gettimeofday(&tv
, &tz
));
9377 if (!is_error(ret
)) {
9378 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9379 return -TARGET_EFAULT
;
9381 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9382 return -TARGET_EFAULT
;
9388 #if defined(TARGET_NR_settimeofday)
9389 case TARGET_NR_settimeofday
:
9391 struct timeval tv
, *ptv
= NULL
;
9392 struct timezone tz
, *ptz
= NULL
;
9395 if (copy_from_user_timeval(&tv
, arg1
)) {
9396 return -TARGET_EFAULT
;
9402 if (copy_from_user_timezone(&tz
, arg2
)) {
9403 return -TARGET_EFAULT
;
9408 return get_errno(settimeofday(ptv
, ptz
));
9411 #if defined(TARGET_NR_select)
9412 case TARGET_NR_select
:
9413 #if defined(TARGET_WANT_NI_OLD_SELECT)
9414 /* some architectures used to have old_select here
9415 * but now ENOSYS it.
9417 ret
= -TARGET_ENOSYS
;
9418 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9419 ret
= do_old_select(arg1
);
9421 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9425 #ifdef TARGET_NR_pselect6
9426 case TARGET_NR_pselect6
:
9427 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9429 #ifdef TARGET_NR_pselect6_time64
9430 case TARGET_NR_pselect6_time64
:
9431 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9433 #ifdef TARGET_NR_symlink
9434 case TARGET_NR_symlink
:
9437 p
= lock_user_string(arg1
);
9438 p2
= lock_user_string(arg2
);
9440 ret
= -TARGET_EFAULT
;
9442 ret
= get_errno(symlink(p
, p2
));
9443 unlock_user(p2
, arg2
, 0);
9444 unlock_user(p
, arg1
, 0);
9448 #if defined(TARGET_NR_symlinkat)
9449 case TARGET_NR_symlinkat
:
9452 p
= lock_user_string(arg1
);
9453 p2
= lock_user_string(arg3
);
9455 ret
= -TARGET_EFAULT
;
9457 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9458 unlock_user(p2
, arg3
, 0);
9459 unlock_user(p
, arg1
, 0);
9463 #ifdef TARGET_NR_readlink
9464 case TARGET_NR_readlink
:
9467 p
= lock_user_string(arg1
);
9468 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9470 ret
= -TARGET_EFAULT
;
9472 /* Short circuit this for the magic exe check. */
9473 ret
= -TARGET_EINVAL
;
9474 } else if (is_proc_myself((const char *)p
, "exe")) {
9475 char real
[PATH_MAX
], *temp
;
9476 temp
= realpath(exec_path
, real
);
9477 /* Return value is # of bytes that we wrote to the buffer. */
9479 ret
= get_errno(-1);
9481 /* Don't worry about sign mismatch as earlier mapping
9482 * logic would have thrown a bad address error. */
9483 ret
= MIN(strlen(real
), arg3
);
9484 /* We cannot NUL terminate the string. */
9485 memcpy(p2
, real
, ret
);
9488 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9490 unlock_user(p2
, arg2
, ret
);
9491 unlock_user(p
, arg1
, 0);
9495 #if defined(TARGET_NR_readlinkat)
9496 case TARGET_NR_readlinkat
:
9499 p
= lock_user_string(arg2
);
9500 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9502 ret
= -TARGET_EFAULT
;
9503 } else if (is_proc_myself((const char *)p
, "exe")) {
9504 char real
[PATH_MAX
], *temp
;
9505 temp
= realpath(exec_path
, real
);
9506 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9507 snprintf((char *)p2
, arg4
, "%s", real
);
9509 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9511 unlock_user(p2
, arg3
, ret
);
9512 unlock_user(p
, arg2
, 0);
9516 #ifdef TARGET_NR_swapon
9517 case TARGET_NR_swapon
:
9518 if (!(p
= lock_user_string(arg1
)))
9519 return -TARGET_EFAULT
;
9520 ret
= get_errno(swapon(p
, arg2
));
9521 unlock_user(p
, arg1
, 0);
9524 case TARGET_NR_reboot
:
9525 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9526 /* arg4 must be ignored in all other cases */
9527 p
= lock_user_string(arg4
);
9529 return -TARGET_EFAULT
;
9531 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9532 unlock_user(p
, arg4
, 0);
9534 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9537 #ifdef TARGET_NR_mmap
9538 case TARGET_NR_mmap
:
9539 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9540 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9541 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9542 || defined(TARGET_S390X)
9545 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9546 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9547 return -TARGET_EFAULT
;
9554 unlock_user(v
, arg1
, 0);
9555 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9556 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9560 /* mmap pointers are always untagged */
9561 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9562 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9568 #ifdef TARGET_NR_mmap2
9569 case TARGET_NR_mmap2
:
9571 #define MMAP_SHIFT 12
9573 ret
= target_mmap(arg1
, arg2
, arg3
,
9574 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9575 arg5
, arg6
<< MMAP_SHIFT
);
9576 return get_errno(ret
);
9578 case TARGET_NR_munmap
:
9579 arg1
= cpu_untagged_addr(cpu
, arg1
);
9580 return get_errno(target_munmap(arg1
, arg2
));
9581 case TARGET_NR_mprotect
:
9582 arg1
= cpu_untagged_addr(cpu
, arg1
);
9584 TaskState
*ts
= cpu
->opaque
;
9585 /* Special hack to detect libc making the stack executable. */
9586 if ((arg3
& PROT_GROWSDOWN
)
9587 && arg1
>= ts
->info
->stack_limit
9588 && arg1
<= ts
->info
->start_stack
) {
9589 arg3
&= ~PROT_GROWSDOWN
;
9590 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9591 arg1
= ts
->info
->stack_limit
;
9594 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9595 #ifdef TARGET_NR_mremap
9596 case TARGET_NR_mremap
:
9597 arg1
= cpu_untagged_addr(cpu
, arg1
);
9598 /* mremap new_addr (arg5) is always untagged */
9599 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9601 /* ??? msync/mlock/munlock are broken for softmmu. */
9602 #ifdef TARGET_NR_msync
9603 case TARGET_NR_msync
:
9604 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9606 #ifdef TARGET_NR_mlock
9607 case TARGET_NR_mlock
:
9608 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9610 #ifdef TARGET_NR_munlock
9611 case TARGET_NR_munlock
:
9612 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
9614 #ifdef TARGET_NR_mlockall
9615 case TARGET_NR_mlockall
:
9616 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9618 #ifdef TARGET_NR_munlockall
9619 case TARGET_NR_munlockall
:
9620 return get_errno(munlockall());
9622 #ifdef TARGET_NR_truncate
9623 case TARGET_NR_truncate
:
9624 if (!(p
= lock_user_string(arg1
)))
9625 return -TARGET_EFAULT
;
9626 ret
= get_errno(truncate(p
, arg2
));
9627 unlock_user(p
, arg1
, 0);
9630 #ifdef TARGET_NR_ftruncate
9631 case TARGET_NR_ftruncate
:
9632 return get_errno(ftruncate(arg1
, arg2
));
9634 case TARGET_NR_fchmod
:
9635 return get_errno(fchmod(arg1
, arg2
));
9636 #if defined(TARGET_NR_fchmodat)
9637 case TARGET_NR_fchmodat
:
9638 if (!(p
= lock_user_string(arg2
)))
9639 return -TARGET_EFAULT
;
9640 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9641 unlock_user(p
, arg2
, 0);
9644 case TARGET_NR_getpriority
:
9645 /* Note that negative values are valid for getpriority, so we must
9646 differentiate based on errno settings. */
9648 ret
= getpriority(arg1
, arg2
);
9649 if (ret
== -1 && errno
!= 0) {
9650 return -host_to_target_errno(errno
);
9653 /* Return value is the unbiased priority. Signal no error. */
9654 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9656 /* Return value is a biased priority to avoid negative numbers. */
9660 case TARGET_NR_setpriority
:
9661 return get_errno(setpriority(arg1
, arg2
, arg3
));
9662 #ifdef TARGET_NR_statfs
9663 case TARGET_NR_statfs
:
9664 if (!(p
= lock_user_string(arg1
))) {
9665 return -TARGET_EFAULT
;
9667 ret
= get_errno(statfs(path(p
), &stfs
));
9668 unlock_user(p
, arg1
, 0);
9670 if (!is_error(ret
)) {
9671 struct target_statfs
*target_stfs
;
9673 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9674 return -TARGET_EFAULT
;
9675 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9676 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9677 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9678 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9679 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9680 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9681 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9682 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9683 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9684 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9685 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9686 #ifdef _STATFS_F_FLAGS
9687 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9689 __put_user(0, &target_stfs
->f_flags
);
9691 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9692 unlock_user_struct(target_stfs
, arg2
, 1);
9696 #ifdef TARGET_NR_fstatfs
9697 case TARGET_NR_fstatfs
:
9698 ret
= get_errno(fstatfs(arg1
, &stfs
));
9699 goto convert_statfs
;
9701 #ifdef TARGET_NR_statfs64
9702 case TARGET_NR_statfs64
:
9703 if (!(p
= lock_user_string(arg1
))) {
9704 return -TARGET_EFAULT
;
9706 ret
= get_errno(statfs(path(p
), &stfs
));
9707 unlock_user(p
, arg1
, 0);
9709 if (!is_error(ret
)) {
9710 struct target_statfs64
*target_stfs
;
9712 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9713 return -TARGET_EFAULT
;
9714 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9715 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9716 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9717 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9718 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9719 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9720 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9721 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9722 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9723 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9724 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9725 #ifdef _STATFS_F_FLAGS
9726 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9728 __put_user(0, &target_stfs
->f_flags
);
9730 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9731 unlock_user_struct(target_stfs
, arg3
, 1);
9734 case TARGET_NR_fstatfs64
:
9735 ret
= get_errno(fstatfs(arg1
, &stfs
));
9736 goto convert_statfs64
;
9738 #ifdef TARGET_NR_socketcall
9739 case TARGET_NR_socketcall
:
9740 return do_socketcall(arg1
, arg2
);
9742 #ifdef TARGET_NR_accept
9743 case TARGET_NR_accept
:
9744 return do_accept4(arg1
, arg2
, arg3
, 0);
9746 #ifdef TARGET_NR_accept4
9747 case TARGET_NR_accept4
:
9748 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9750 #ifdef TARGET_NR_bind
9751 case TARGET_NR_bind
:
9752 return do_bind(arg1
, arg2
, arg3
);
9754 #ifdef TARGET_NR_connect
9755 case TARGET_NR_connect
:
9756 return do_connect(arg1
, arg2
, arg3
);
9758 #ifdef TARGET_NR_getpeername
9759 case TARGET_NR_getpeername
:
9760 return do_getpeername(arg1
, arg2
, arg3
);
9762 #ifdef TARGET_NR_getsockname
9763 case TARGET_NR_getsockname
:
9764 return do_getsockname(arg1
, arg2
, arg3
);
9766 #ifdef TARGET_NR_getsockopt
9767 case TARGET_NR_getsockopt
:
9768 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9770 #ifdef TARGET_NR_listen
9771 case TARGET_NR_listen
:
9772 return get_errno(listen(arg1
, arg2
));
9774 #ifdef TARGET_NR_recv
9775 case TARGET_NR_recv
:
9776 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9778 #ifdef TARGET_NR_recvfrom
9779 case TARGET_NR_recvfrom
:
9780 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9782 #ifdef TARGET_NR_recvmsg
9783 case TARGET_NR_recvmsg
:
9784 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9786 #ifdef TARGET_NR_send
9787 case TARGET_NR_send
:
9788 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9790 #ifdef TARGET_NR_sendmsg
9791 case TARGET_NR_sendmsg
:
9792 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9794 #ifdef TARGET_NR_sendmmsg
9795 case TARGET_NR_sendmmsg
:
9796 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9798 #ifdef TARGET_NR_recvmmsg
9799 case TARGET_NR_recvmmsg
:
9800 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9802 #ifdef TARGET_NR_sendto
9803 case TARGET_NR_sendto
:
9804 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9806 #ifdef TARGET_NR_shutdown
9807 case TARGET_NR_shutdown
:
9808 return get_errno(shutdown(arg1
, arg2
));
9810 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9811 case TARGET_NR_getrandom
:
9812 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9814 return -TARGET_EFAULT
;
9816 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9817 unlock_user(p
, arg1
, ret
);
9820 #ifdef TARGET_NR_socket
9821 case TARGET_NR_socket
:
9822 return do_socket(arg1
, arg2
, arg3
);
9824 #ifdef TARGET_NR_socketpair
9825 case TARGET_NR_socketpair
:
9826 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9828 #ifdef TARGET_NR_setsockopt
9829 case TARGET_NR_setsockopt
:
9830 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9832 #if defined(TARGET_NR_syslog)
9833 case TARGET_NR_syslog
:
9838 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9839 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9840 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9841 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9842 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9843 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9844 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9845 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9846 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9847 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9848 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9849 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9852 return -TARGET_EINVAL
;
9857 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9859 return -TARGET_EFAULT
;
9861 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9862 unlock_user(p
, arg2
, arg3
);
9866 return -TARGET_EINVAL
;
9871 case TARGET_NR_setitimer
:
9873 struct itimerval value
, ovalue
, *pvalue
;
9877 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9878 || copy_from_user_timeval(&pvalue
->it_value
,
9879 arg2
+ sizeof(struct target_timeval
)))
9880 return -TARGET_EFAULT
;
9884 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9885 if (!is_error(ret
) && arg3
) {
9886 if (copy_to_user_timeval(arg3
,
9887 &ovalue
.it_interval
)
9888 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9890 return -TARGET_EFAULT
;
9894 case TARGET_NR_getitimer
:
9896 struct itimerval value
;
9898 ret
= get_errno(getitimer(arg1
, &value
));
9899 if (!is_error(ret
) && arg2
) {
9900 if (copy_to_user_timeval(arg2
,
9902 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9904 return -TARGET_EFAULT
;
9908 #ifdef TARGET_NR_stat
9909 case TARGET_NR_stat
:
9910 if (!(p
= lock_user_string(arg1
))) {
9911 return -TARGET_EFAULT
;
9913 ret
= get_errno(stat(path(p
), &st
));
9914 unlock_user(p
, arg1
, 0);
9917 #ifdef TARGET_NR_lstat
9918 case TARGET_NR_lstat
:
9919 if (!(p
= lock_user_string(arg1
))) {
9920 return -TARGET_EFAULT
;
9922 ret
= get_errno(lstat(path(p
), &st
));
9923 unlock_user(p
, arg1
, 0);
9926 #ifdef TARGET_NR_fstat
9927 case TARGET_NR_fstat
:
9929 ret
= get_errno(fstat(arg1
, &st
));
9930 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9933 if (!is_error(ret
)) {
9934 struct target_stat
*target_st
;
9936 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9937 return -TARGET_EFAULT
;
9938 memset(target_st
, 0, sizeof(*target_st
));
9939 __put_user(st
.st_dev
, &target_st
->st_dev
);
9940 __put_user(st
.st_ino
, &target_st
->st_ino
);
9941 __put_user(st
.st_mode
, &target_st
->st_mode
);
9942 __put_user(st
.st_uid
, &target_st
->st_uid
);
9943 __put_user(st
.st_gid
, &target_st
->st_gid
);
9944 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9945 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9946 __put_user(st
.st_size
, &target_st
->st_size
);
9947 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9948 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9949 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9950 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9951 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9952 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9953 __put_user(st
.st_atim
.tv_nsec
,
9954 &target_st
->target_st_atime_nsec
);
9955 __put_user(st
.st_mtim
.tv_nsec
,
9956 &target_st
->target_st_mtime_nsec
);
9957 __put_user(st
.st_ctim
.tv_nsec
,
9958 &target_st
->target_st_ctime_nsec
);
9960 unlock_user_struct(target_st
, arg2
, 1);
9965 case TARGET_NR_vhangup
:
9966 return get_errno(vhangup());
9967 #ifdef TARGET_NR_syscall
9968 case TARGET_NR_syscall
:
9969 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9970 arg6
, arg7
, arg8
, 0);
9972 #if defined(TARGET_NR_wait4)
9973 case TARGET_NR_wait4
:
9976 abi_long status_ptr
= arg2
;
9977 struct rusage rusage
, *rusage_ptr
;
9978 abi_ulong target_rusage
= arg4
;
9979 abi_long rusage_err
;
9981 rusage_ptr
= &rusage
;
9984 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9985 if (!is_error(ret
)) {
9986 if (status_ptr
&& ret
) {
9987 status
= host_to_target_waitstatus(status
);
9988 if (put_user_s32(status
, status_ptr
))
9989 return -TARGET_EFAULT
;
9991 if (target_rusage
) {
9992 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10001 #ifdef TARGET_NR_swapoff
10002 case TARGET_NR_swapoff
:
10003 if (!(p
= lock_user_string(arg1
)))
10004 return -TARGET_EFAULT
;
10005 ret
= get_errno(swapoff(p
));
10006 unlock_user(p
, arg1
, 0);
10009 case TARGET_NR_sysinfo
:
10011 struct target_sysinfo
*target_value
;
10012 struct sysinfo value
;
10013 ret
= get_errno(sysinfo(&value
));
10014 if (!is_error(ret
) && arg1
)
10016 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10017 return -TARGET_EFAULT
;
10018 __put_user(value
.uptime
, &target_value
->uptime
);
10019 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10020 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10021 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10022 __put_user(value
.totalram
, &target_value
->totalram
);
10023 __put_user(value
.freeram
, &target_value
->freeram
);
10024 __put_user(value
.sharedram
, &target_value
->sharedram
);
10025 __put_user(value
.bufferram
, &target_value
->bufferram
);
10026 __put_user(value
.totalswap
, &target_value
->totalswap
);
10027 __put_user(value
.freeswap
, &target_value
->freeswap
);
10028 __put_user(value
.procs
, &target_value
->procs
);
10029 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10030 __put_user(value
.freehigh
, &target_value
->freehigh
);
10031 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10032 unlock_user_struct(target_value
, arg1
, 1);
10036 #ifdef TARGET_NR_ipc
10037 case TARGET_NR_ipc
:
10038 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10040 #ifdef TARGET_NR_semget
10041 case TARGET_NR_semget
:
10042 return get_errno(semget(arg1
, arg2
, arg3
));
10044 #ifdef TARGET_NR_semop
10045 case TARGET_NR_semop
:
10046 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10048 #ifdef TARGET_NR_semtimedop
10049 case TARGET_NR_semtimedop
:
10050 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10052 #ifdef TARGET_NR_semtimedop_time64
10053 case TARGET_NR_semtimedop_time64
:
10054 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10056 #ifdef TARGET_NR_semctl
10057 case TARGET_NR_semctl
:
10058 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10060 #ifdef TARGET_NR_msgctl
10061 case TARGET_NR_msgctl
:
10062 return do_msgctl(arg1
, arg2
, arg3
);
10064 #ifdef TARGET_NR_msgget
10065 case TARGET_NR_msgget
:
10066 return get_errno(msgget(arg1
, arg2
));
10068 #ifdef TARGET_NR_msgrcv
10069 case TARGET_NR_msgrcv
:
10070 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10072 #ifdef TARGET_NR_msgsnd
10073 case TARGET_NR_msgsnd
:
10074 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10076 #ifdef TARGET_NR_shmget
10077 case TARGET_NR_shmget
:
10078 return get_errno(shmget(arg1
, arg2
, arg3
));
10080 #ifdef TARGET_NR_shmctl
10081 case TARGET_NR_shmctl
:
10082 return do_shmctl(arg1
, arg2
, arg3
);
10084 #ifdef TARGET_NR_shmat
10085 case TARGET_NR_shmat
:
10086 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10088 #ifdef TARGET_NR_shmdt
10089 case TARGET_NR_shmdt
:
10090 return do_shmdt(arg1
);
10092 case TARGET_NR_fsync
:
10093 return get_errno(fsync(arg1
));
10094 case TARGET_NR_clone
:
10095 /* Linux manages to have three different orderings for its
10096 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10097 * match the kernel's CONFIG_CLONE_* settings.
10098 * Microblaze is further special in that it uses a sixth
10099 * implicit argument to clone for the TLS pointer.
10101 #if defined(TARGET_MICROBLAZE)
10102 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10103 #elif defined(TARGET_CLONE_BACKWARDS)
10104 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10105 #elif defined(TARGET_CLONE_BACKWARDS2)
10106 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10108 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10111 #ifdef __NR_exit_group
10112 /* new thread calls */
10113 case TARGET_NR_exit_group
:
10114 preexit_cleanup(cpu_env
, arg1
);
10115 return get_errno(exit_group(arg1
));
10117 case TARGET_NR_setdomainname
:
10118 if (!(p
= lock_user_string(arg1
)))
10119 return -TARGET_EFAULT
;
10120 ret
= get_errno(setdomainname(p
, arg2
));
10121 unlock_user(p
, arg1
, 0);
10123 case TARGET_NR_uname
:
10124 /* no need to transcode because we use the linux syscall */
10126 struct new_utsname
* buf
;
10128 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10129 return -TARGET_EFAULT
;
10130 ret
= get_errno(sys_uname(buf
));
10131 if (!is_error(ret
)) {
10132 /* Overwrite the native machine name with whatever is being
10134 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10135 sizeof(buf
->machine
));
10136 /* Allow the user to override the reported release. */
10137 if (qemu_uname_release
&& *qemu_uname_release
) {
10138 g_strlcpy(buf
->release
, qemu_uname_release
,
10139 sizeof(buf
->release
));
10142 unlock_user_struct(buf
, arg1
, 1);
10146 case TARGET_NR_modify_ldt
:
10147 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10148 #if !defined(TARGET_X86_64)
10149 case TARGET_NR_vm86
:
10150 return do_vm86(cpu_env
, arg1
, arg2
);
10153 #if defined(TARGET_NR_adjtimex)
10154 case TARGET_NR_adjtimex
:
10156 struct timex host_buf
;
10158 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10159 return -TARGET_EFAULT
;
10161 ret
= get_errno(adjtimex(&host_buf
));
10162 if (!is_error(ret
)) {
10163 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10164 return -TARGET_EFAULT
;
10170 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10171 case TARGET_NR_clock_adjtime
:
10173 struct timex htx
, *phtx
= &htx
;
10175 if (target_to_host_timex(phtx
, arg2
) != 0) {
10176 return -TARGET_EFAULT
;
10178 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10179 if (!is_error(ret
) && phtx
) {
10180 if (host_to_target_timex(arg2
, phtx
) != 0) {
10181 return -TARGET_EFAULT
;
10187 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10188 case TARGET_NR_clock_adjtime64
:
10192 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10193 return -TARGET_EFAULT
;
10195 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10196 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10197 return -TARGET_EFAULT
;
10202 case TARGET_NR_getpgid
:
10203 return get_errno(getpgid(arg1
));
10204 case TARGET_NR_fchdir
:
10205 return get_errno(fchdir(arg1
));
10206 case TARGET_NR_personality
:
10207 return get_errno(personality(arg1
));
10208 #ifdef TARGET_NR__llseek /* Not on alpha */
10209 case TARGET_NR__llseek
:
10212 #if !defined(__NR_llseek)
10213 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10215 ret
= get_errno(res
);
10220 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10222 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10223 return -TARGET_EFAULT
;
10228 #ifdef TARGET_NR_getdents
10229 case TARGET_NR_getdents
:
10230 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10231 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10233 struct target_dirent
*target_dirp
;
10234 struct linux_dirent
*dirp
;
10235 abi_long count
= arg3
;
10237 dirp
= g_try_malloc(count
);
10239 return -TARGET_ENOMEM
;
10242 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10243 if (!is_error(ret
)) {
10244 struct linux_dirent
*de
;
10245 struct target_dirent
*tde
;
10247 int reclen
, treclen
;
10248 int count1
, tnamelen
;
10252 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10253 return -TARGET_EFAULT
;
10256 reclen
= de
->d_reclen
;
10257 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10258 assert(tnamelen
>= 0);
10259 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10260 assert(count1
+ treclen
<= count
);
10261 tde
->d_reclen
= tswap16(treclen
);
10262 tde
->d_ino
= tswapal(de
->d_ino
);
10263 tde
->d_off
= tswapal(de
->d_off
);
10264 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10265 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10267 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10271 unlock_user(target_dirp
, arg2
, ret
);
10277 struct linux_dirent
*dirp
;
10278 abi_long count
= arg3
;
10280 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10281 return -TARGET_EFAULT
;
10282 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10283 if (!is_error(ret
)) {
10284 struct linux_dirent
*de
;
10289 reclen
= de
->d_reclen
;
10292 de
->d_reclen
= tswap16(reclen
);
10293 tswapls(&de
->d_ino
);
10294 tswapls(&de
->d_off
);
10295 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10299 unlock_user(dirp
, arg2
, ret
);
10303 /* Implement getdents in terms of getdents64 */
10305 struct linux_dirent64
*dirp
;
10306 abi_long count
= arg3
;
10308 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10310 return -TARGET_EFAULT
;
10312 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10313 if (!is_error(ret
)) {
10314 /* Convert the dirent64 structs to target dirent. We do this
10315 * in-place, since we can guarantee that a target_dirent is no
10316 * larger than a dirent64; however this means we have to be
10317 * careful to read everything before writing in the new format.
10319 struct linux_dirent64
*de
;
10320 struct target_dirent
*tde
;
10325 tde
= (struct target_dirent
*)dirp
;
10327 int namelen
, treclen
;
10328 int reclen
= de
->d_reclen
;
10329 uint64_t ino
= de
->d_ino
;
10330 int64_t off
= de
->d_off
;
10331 uint8_t type
= de
->d_type
;
10333 namelen
= strlen(de
->d_name
);
10334 treclen
= offsetof(struct target_dirent
, d_name
)
10336 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10338 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10339 tde
->d_ino
= tswapal(ino
);
10340 tde
->d_off
= tswapal(off
);
10341 tde
->d_reclen
= tswap16(treclen
);
10342 /* The target_dirent type is in what was formerly a padding
10343 * byte at the end of the structure:
10345 *(((char *)tde
) + treclen
- 1) = type
;
10347 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10348 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10354 unlock_user(dirp
, arg2
, ret
);
10358 #endif /* TARGET_NR_getdents */
10359 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10360 case TARGET_NR_getdents64
:
10362 struct linux_dirent64
*dirp
;
10363 abi_long count
= arg3
;
10364 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10365 return -TARGET_EFAULT
;
10366 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10367 if (!is_error(ret
)) {
10368 struct linux_dirent64
*de
;
10373 reclen
= de
->d_reclen
;
10376 de
->d_reclen
= tswap16(reclen
);
10377 tswap64s((uint64_t *)&de
->d_ino
);
10378 tswap64s((uint64_t *)&de
->d_off
);
10379 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10383 unlock_user(dirp
, arg2
, ret
);
10386 #endif /* TARGET_NR_getdents64 */
10387 #if defined(TARGET_NR__newselect)
10388 case TARGET_NR__newselect
:
10389 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10391 #ifdef TARGET_NR_poll
10392 case TARGET_NR_poll
:
10393 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10395 #ifdef TARGET_NR_ppoll
10396 case TARGET_NR_ppoll
:
10397 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10399 #ifdef TARGET_NR_ppoll_time64
10400 case TARGET_NR_ppoll_time64
:
10401 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10403 case TARGET_NR_flock
:
10404 /* NOTE: the flock constant seems to be the same for every
10406 return get_errno(safe_flock(arg1
, arg2
));
10407 case TARGET_NR_readv
:
10409 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10411 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10412 unlock_iovec(vec
, arg2
, arg3
, 1);
10414 ret
= -host_to_target_errno(errno
);
10418 case TARGET_NR_writev
:
10420 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10422 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10423 unlock_iovec(vec
, arg2
, arg3
, 0);
10425 ret
= -host_to_target_errno(errno
);
10429 #if defined(TARGET_NR_preadv)
10430 case TARGET_NR_preadv
:
10432 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10434 unsigned long low
, high
;
10436 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10437 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10438 unlock_iovec(vec
, arg2
, arg3
, 1);
10440 ret
= -host_to_target_errno(errno
);
10445 #if defined(TARGET_NR_pwritev)
10446 case TARGET_NR_pwritev
:
10448 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10450 unsigned long low
, high
;
10452 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10453 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10454 unlock_iovec(vec
, arg2
, arg3
, 0);
10456 ret
= -host_to_target_errno(errno
);
10461 case TARGET_NR_getsid
:
10462 return get_errno(getsid(arg1
));
10463 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10464 case TARGET_NR_fdatasync
:
10465 return get_errno(fdatasync(arg1
));
10467 case TARGET_NR_sched_getaffinity
:
10469 unsigned int mask_size
;
10470 unsigned long *mask
;
10473 * sched_getaffinity needs multiples of ulong, so need to take
10474 * care of mismatches between target ulong and host ulong sizes.
10476 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10477 return -TARGET_EINVAL
;
10479 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10481 mask
= alloca(mask_size
);
10482 memset(mask
, 0, mask_size
);
10483 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10485 if (!is_error(ret
)) {
10487 /* More data returned than the caller's buffer will fit.
10488 * This only happens if sizeof(abi_long) < sizeof(long)
10489 * and the caller passed us a buffer holding an odd number
10490 * of abi_longs. If the host kernel is actually using the
10491 * extra 4 bytes then fail EINVAL; otherwise we can just
10492 * ignore them and only copy the interesting part.
10494 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10495 if (numcpus
> arg2
* 8) {
10496 return -TARGET_EINVAL
;
10501 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10502 return -TARGET_EFAULT
;
10507 case TARGET_NR_sched_setaffinity
:
10509 unsigned int mask_size
;
10510 unsigned long *mask
;
10513 * sched_setaffinity needs multiples of ulong, so need to take
10514 * care of mismatches between target ulong and host ulong sizes.
10516 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10517 return -TARGET_EINVAL
;
10519 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10520 mask
= alloca(mask_size
);
10522 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10527 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10529 case TARGET_NR_getcpu
:
10531 unsigned cpu
, node
;
10532 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10533 arg2
? &node
: NULL
,
10535 if (is_error(ret
)) {
10538 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10539 return -TARGET_EFAULT
;
10541 if (arg2
&& put_user_u32(node
, arg2
)) {
10542 return -TARGET_EFAULT
;
10546 case TARGET_NR_sched_setparam
:
10548 struct sched_param
*target_schp
;
10549 struct sched_param schp
;
10552 return -TARGET_EINVAL
;
10554 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10555 return -TARGET_EFAULT
;
10556 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10557 unlock_user_struct(target_schp
, arg2
, 0);
10558 return get_errno(sched_setparam(arg1
, &schp
));
10560 case TARGET_NR_sched_getparam
:
10562 struct sched_param
*target_schp
;
10563 struct sched_param schp
;
10566 return -TARGET_EINVAL
;
10568 ret
= get_errno(sched_getparam(arg1
, &schp
));
10569 if (!is_error(ret
)) {
10570 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10571 return -TARGET_EFAULT
;
10572 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10573 unlock_user_struct(target_schp
, arg2
, 1);
10577 case TARGET_NR_sched_setscheduler
:
10579 struct sched_param
*target_schp
;
10580 struct sched_param schp
;
10582 return -TARGET_EINVAL
;
10584 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10585 return -TARGET_EFAULT
;
10586 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10587 unlock_user_struct(target_schp
, arg3
, 0);
10588 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10590 case TARGET_NR_sched_getscheduler
:
10591 return get_errno(sched_getscheduler(arg1
));
10592 case TARGET_NR_sched_yield
:
10593 return get_errno(sched_yield());
10594 case TARGET_NR_sched_get_priority_max
:
10595 return get_errno(sched_get_priority_max(arg1
));
10596 case TARGET_NR_sched_get_priority_min
:
10597 return get_errno(sched_get_priority_min(arg1
));
10598 #ifdef TARGET_NR_sched_rr_get_interval
10599 case TARGET_NR_sched_rr_get_interval
:
10601 struct timespec ts
;
10602 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10603 if (!is_error(ret
)) {
10604 ret
= host_to_target_timespec(arg2
, &ts
);
10609 #ifdef TARGET_NR_sched_rr_get_interval_time64
10610 case TARGET_NR_sched_rr_get_interval_time64
:
10612 struct timespec ts
;
10613 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10614 if (!is_error(ret
)) {
10615 ret
= host_to_target_timespec64(arg2
, &ts
);
10620 #if defined(TARGET_NR_nanosleep)
10621 case TARGET_NR_nanosleep
:
10623 struct timespec req
, rem
;
10624 target_to_host_timespec(&req
, arg1
);
10625 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10626 if (is_error(ret
) && arg2
) {
10627 host_to_target_timespec(arg2
, &rem
);
10632 case TARGET_NR_prctl
:
10634 case PR_GET_PDEATHSIG
:
10637 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10638 if (!is_error(ret
) && arg2
10639 && put_user_s32(deathsig
, arg2
)) {
10640 return -TARGET_EFAULT
;
10647 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10649 return -TARGET_EFAULT
;
10651 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10652 arg3
, arg4
, arg5
));
10653 unlock_user(name
, arg2
, 16);
10658 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10660 return -TARGET_EFAULT
;
10662 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10663 arg3
, arg4
, arg5
));
10664 unlock_user(name
, arg2
, 0);
10669 case TARGET_PR_GET_FP_MODE
:
10671 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10673 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
10674 ret
|= TARGET_PR_FP_MODE_FR
;
10676 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
10677 ret
|= TARGET_PR_FP_MODE_FRE
;
10681 case TARGET_PR_SET_FP_MODE
:
10683 CPUMIPSState
*env
= ((CPUMIPSState
*)cpu_env
);
10684 bool old_fr
= env
->CP0_Status
& (1 << CP0St_FR
);
10685 bool old_fre
= env
->CP0_Config5
& (1 << CP0C5_FRE
);
10686 bool new_fr
= arg2
& TARGET_PR_FP_MODE_FR
;
10687 bool new_fre
= arg2
& TARGET_PR_FP_MODE_FRE
;
10689 const unsigned int known_bits
= TARGET_PR_FP_MODE_FR
|
10690 TARGET_PR_FP_MODE_FRE
;
10692 /* If nothing to change, return right away, successfully. */
10693 if (old_fr
== new_fr
&& old_fre
== new_fre
) {
10696 /* Check the value is valid */
10697 if (arg2
& ~known_bits
) {
10698 return -TARGET_EOPNOTSUPP
;
10700 /* Setting FRE without FR is not supported. */
10701 if (new_fre
&& !new_fr
) {
10702 return -TARGET_EOPNOTSUPP
;
10704 if (new_fr
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_F64
))) {
10705 /* FR1 is not supported */
10706 return -TARGET_EOPNOTSUPP
;
10708 if (!new_fr
&& (env
->active_fpu
.fcr0
& (1 << FCR0_F64
))
10709 && !(env
->CP0_Status_rw_bitmask
& (1 << CP0St_FR
))) {
10710 /* cannot set FR=0 */
10711 return -TARGET_EOPNOTSUPP
;
10713 if (new_fre
&& !(env
->active_fpu
.fcr0
& (1 << FCR0_FREP
))) {
10714 /* Cannot set FRE=1 */
10715 return -TARGET_EOPNOTSUPP
;
10719 fpr_t
*fpr
= env
->active_fpu
.fpr
;
10720 for (i
= 0; i
< 32 ; i
+= 2) {
10721 if (!old_fr
&& new_fr
) {
10722 fpr
[i
].w
[!FP_ENDIAN_IDX
] = fpr
[i
+ 1].w
[FP_ENDIAN_IDX
];
10723 } else if (old_fr
&& !new_fr
) {
10724 fpr
[i
+ 1].w
[FP_ENDIAN_IDX
] = fpr
[i
].w
[!FP_ENDIAN_IDX
];
10729 env
->CP0_Status
|= (1 << CP0St_FR
);
10730 env
->hflags
|= MIPS_HFLAG_F64
;
10732 env
->CP0_Status
&= ~(1 << CP0St_FR
);
10733 env
->hflags
&= ~MIPS_HFLAG_F64
;
10736 env
->CP0_Config5
|= (1 << CP0C5_FRE
);
10737 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
10738 env
->hflags
|= MIPS_HFLAG_FRE
;
10741 env
->CP0_Config5
&= ~(1 << CP0C5_FRE
);
10742 env
->hflags
&= ~MIPS_HFLAG_FRE
;
10748 #ifdef TARGET_AARCH64
10749 case TARGET_PR_SVE_SET_VL
:
10751 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10752 * PR_SVE_VL_INHERIT. Note the kernel definition
10753 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10754 * even though the current architectural maximum is VQ=16.
10756 ret
= -TARGET_EINVAL
;
10757 if (cpu_isar_feature(aa64_sve
, env_archcpu(cpu_env
))
10758 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10759 CPUARMState
*env
= cpu_env
;
10760 ARMCPU
*cpu
= env_archcpu(env
);
10761 uint32_t vq
, old_vq
;
10763 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10764 vq
= MAX(arg2
/ 16, 1);
10765 vq
= MIN(vq
, cpu
->sve_max_vq
);
10768 aarch64_sve_narrow_vq(env
, vq
);
10770 env
->vfp
.zcr_el
[1] = vq
- 1;
10771 arm_rebuild_hflags(env
);
10775 case TARGET_PR_SVE_GET_VL
:
10776 ret
= -TARGET_EINVAL
;
10778 ARMCPU
*cpu
= env_archcpu(cpu_env
);
10779 if (cpu_isar_feature(aa64_sve
, cpu
)) {
10780 ret
= ((cpu
->env
.vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10784 case TARGET_PR_PAC_RESET_KEYS
:
10786 CPUARMState
*env
= cpu_env
;
10787 ARMCPU
*cpu
= env_archcpu(env
);
10789 if (arg3
|| arg4
|| arg5
) {
10790 return -TARGET_EINVAL
;
10792 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
10793 int all
= (TARGET_PR_PAC_APIAKEY
| TARGET_PR_PAC_APIBKEY
|
10794 TARGET_PR_PAC_APDAKEY
| TARGET_PR_PAC_APDBKEY
|
10795 TARGET_PR_PAC_APGAKEY
);
10801 } else if (arg2
& ~all
) {
10802 return -TARGET_EINVAL
;
10804 if (arg2
& TARGET_PR_PAC_APIAKEY
) {
10805 ret
|= qemu_guest_getrandom(&env
->keys
.apia
,
10806 sizeof(ARMPACKey
), &err
);
10808 if (arg2
& TARGET_PR_PAC_APIBKEY
) {
10809 ret
|= qemu_guest_getrandom(&env
->keys
.apib
,
10810 sizeof(ARMPACKey
), &err
);
10812 if (arg2
& TARGET_PR_PAC_APDAKEY
) {
10813 ret
|= qemu_guest_getrandom(&env
->keys
.apda
,
10814 sizeof(ARMPACKey
), &err
);
10816 if (arg2
& TARGET_PR_PAC_APDBKEY
) {
10817 ret
|= qemu_guest_getrandom(&env
->keys
.apdb
,
10818 sizeof(ARMPACKey
), &err
);
10820 if (arg2
& TARGET_PR_PAC_APGAKEY
) {
10821 ret
|= qemu_guest_getrandom(&env
->keys
.apga
,
10822 sizeof(ARMPACKey
), &err
);
10826 * Some unknown failure in the crypto. The best
10827 * we can do is log it and fail the syscall.
10828 * The real syscall cannot fail this way.
10830 qemu_log_mask(LOG_UNIMP
,
10831 "PR_PAC_RESET_KEYS: Crypto failure: %s",
10832 error_get_pretty(err
));
10834 return -TARGET_EIO
;
10839 return -TARGET_EINVAL
;
10840 case TARGET_PR_SET_TAGGED_ADDR_CTRL
:
10842 abi_ulong valid_mask
= TARGET_PR_TAGGED_ADDR_ENABLE
;
10843 CPUARMState
*env
= cpu_env
;
10844 ARMCPU
*cpu
= env_archcpu(env
);
10846 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10847 valid_mask
|= TARGET_PR_MTE_TCF_MASK
;
10848 valid_mask
|= TARGET_PR_MTE_TAG_MASK
;
10851 if ((arg2
& ~valid_mask
) || arg3
|| arg4
|| arg5
) {
10852 return -TARGET_EINVAL
;
10854 env
->tagged_addr_enable
= arg2
& TARGET_PR_TAGGED_ADDR_ENABLE
;
10856 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10857 switch (arg2
& TARGET_PR_MTE_TCF_MASK
) {
10858 case TARGET_PR_MTE_TCF_NONE
:
10859 case TARGET_PR_MTE_TCF_SYNC
:
10860 case TARGET_PR_MTE_TCF_ASYNC
:
10867 * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10868 * Note that the syscall values are consistent with hw.
10870 env
->cp15
.sctlr_el
[1] =
10871 deposit64(env
->cp15
.sctlr_el
[1], 38, 2,
10872 arg2
>> TARGET_PR_MTE_TCF_SHIFT
);
10875 * Write PR_MTE_TAG to GCR_EL1[Exclude].
10876 * Note that the syscall uses an include mask,
10877 * and hardware uses an exclude mask -- invert.
10879 env
->cp15
.gcr_el1
=
10880 deposit64(env
->cp15
.gcr_el1
, 0, 16,
10881 ~arg2
>> TARGET_PR_MTE_TAG_SHIFT
);
10882 arm_rebuild_hflags(env
);
10886 case TARGET_PR_GET_TAGGED_ADDR_CTRL
:
10889 CPUARMState
*env
= cpu_env
;
10890 ARMCPU
*cpu
= env_archcpu(env
);
10892 if (arg2
|| arg3
|| arg4
|| arg5
) {
10893 return -TARGET_EINVAL
;
10895 if (env
->tagged_addr_enable
) {
10896 ret
|= TARGET_PR_TAGGED_ADDR_ENABLE
;
10898 if (cpu_isar_feature(aa64_mte
, cpu
)) {
10900 ret
|= (extract64(env
->cp15
.sctlr_el
[1], 38, 2)
10901 << TARGET_PR_MTE_TCF_SHIFT
);
10902 ret
= deposit64(ret
, TARGET_PR_MTE_TAG_SHIFT
, 16,
10903 ~env
->cp15
.gcr_el1
);
10907 #endif /* AARCH64 */
10908 case PR_GET_SECCOMP
:
10909 case PR_SET_SECCOMP
:
10910 /* Disable seccomp to prevent the target disabling syscalls we
10912 return -TARGET_EINVAL
;
10914 /* Most prctl options have no pointer arguments */
10915 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10918 #ifdef TARGET_NR_arch_prctl
10919 case TARGET_NR_arch_prctl
:
10920 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10922 #ifdef TARGET_NR_pread64
10923 case TARGET_NR_pread64
:
10924 if (regpairs_aligned(cpu_env
, num
)) {
10928 if (arg2
== 0 && arg3
== 0) {
10929 /* Special-case NULL buffer and zero length, which should succeed */
10932 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10934 return -TARGET_EFAULT
;
10937 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10938 unlock_user(p
, arg2
, ret
);
10940 case TARGET_NR_pwrite64
:
10941 if (regpairs_aligned(cpu_env
, num
)) {
10945 if (arg2
== 0 && arg3
== 0) {
10946 /* Special-case NULL buffer and zero length, which should succeed */
10949 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10951 return -TARGET_EFAULT
;
10954 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10955 unlock_user(p
, arg2
, 0);
10958 case TARGET_NR_getcwd
:
10959 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10960 return -TARGET_EFAULT
;
10961 ret
= get_errno(sys_getcwd1(p
, arg2
));
10962 unlock_user(p
, arg1
, ret
);
10964 case TARGET_NR_capget
:
10965 case TARGET_NR_capset
:
10967 struct target_user_cap_header
*target_header
;
10968 struct target_user_cap_data
*target_data
= NULL
;
10969 struct __user_cap_header_struct header
;
10970 struct __user_cap_data_struct data
[2];
10971 struct __user_cap_data_struct
*dataptr
= NULL
;
10972 int i
, target_datalen
;
10973 int data_items
= 1;
10975 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10976 return -TARGET_EFAULT
;
10978 header
.version
= tswap32(target_header
->version
);
10979 header
.pid
= tswap32(target_header
->pid
);
10981 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10982 /* Version 2 and up takes pointer to two user_data structs */
10986 target_datalen
= sizeof(*target_data
) * data_items
;
10989 if (num
== TARGET_NR_capget
) {
10990 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10992 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10994 if (!target_data
) {
10995 unlock_user_struct(target_header
, arg1
, 0);
10996 return -TARGET_EFAULT
;
10999 if (num
== TARGET_NR_capset
) {
11000 for (i
= 0; i
< data_items
; i
++) {
11001 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11002 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11003 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11010 if (num
== TARGET_NR_capget
) {
11011 ret
= get_errno(capget(&header
, dataptr
));
11013 ret
= get_errno(capset(&header
, dataptr
));
11016 /* The kernel always updates version for both capget and capset */
11017 target_header
->version
= tswap32(header
.version
);
11018 unlock_user_struct(target_header
, arg1
, 1);
11021 if (num
== TARGET_NR_capget
) {
11022 for (i
= 0; i
< data_items
; i
++) {
11023 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11024 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11025 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11027 unlock_user(target_data
, arg2
, target_datalen
);
11029 unlock_user(target_data
, arg2
, 0);
11034 case TARGET_NR_sigaltstack
:
11035 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11037 #ifdef CONFIG_SENDFILE
11038 #ifdef TARGET_NR_sendfile
11039 case TARGET_NR_sendfile
:
11041 off_t
*offp
= NULL
;
11044 ret
= get_user_sal(off
, arg3
);
11045 if (is_error(ret
)) {
11050 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11051 if (!is_error(ret
) && arg3
) {
11052 abi_long ret2
= put_user_sal(off
, arg3
);
11053 if (is_error(ret2
)) {
11060 #ifdef TARGET_NR_sendfile64
11061 case TARGET_NR_sendfile64
:
11063 off_t
*offp
= NULL
;
11066 ret
= get_user_s64(off
, arg3
);
11067 if (is_error(ret
)) {
11072 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11073 if (!is_error(ret
) && arg3
) {
11074 abi_long ret2
= put_user_s64(off
, arg3
);
11075 if (is_error(ret2
)) {
11083 #ifdef TARGET_NR_vfork
11084 case TARGET_NR_vfork
:
11085 return get_errno(do_fork(cpu_env
,
11086 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11089 #ifdef TARGET_NR_ugetrlimit
11090 case TARGET_NR_ugetrlimit
:
11092 struct rlimit rlim
;
11093 int resource
= target_to_host_resource(arg1
);
11094 ret
= get_errno(getrlimit(resource
, &rlim
));
11095 if (!is_error(ret
)) {
11096 struct target_rlimit
*target_rlim
;
11097 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11098 return -TARGET_EFAULT
;
11099 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11100 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11101 unlock_user_struct(target_rlim
, arg2
, 1);
11106 #ifdef TARGET_NR_truncate64
11107 case TARGET_NR_truncate64
:
11108 if (!(p
= lock_user_string(arg1
)))
11109 return -TARGET_EFAULT
;
11110 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11111 unlock_user(p
, arg1
, 0);
11114 #ifdef TARGET_NR_ftruncate64
11115 case TARGET_NR_ftruncate64
:
11116 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11118 #ifdef TARGET_NR_stat64
11119 case TARGET_NR_stat64
:
11120 if (!(p
= lock_user_string(arg1
))) {
11121 return -TARGET_EFAULT
;
11123 ret
= get_errno(stat(path(p
), &st
));
11124 unlock_user(p
, arg1
, 0);
11125 if (!is_error(ret
))
11126 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11129 #ifdef TARGET_NR_lstat64
11130 case TARGET_NR_lstat64
:
11131 if (!(p
= lock_user_string(arg1
))) {
11132 return -TARGET_EFAULT
;
11134 ret
= get_errno(lstat(path(p
), &st
));
11135 unlock_user(p
, arg1
, 0);
11136 if (!is_error(ret
))
11137 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11140 #ifdef TARGET_NR_fstat64
11141 case TARGET_NR_fstat64
:
11142 ret
= get_errno(fstat(arg1
, &st
));
11143 if (!is_error(ret
))
11144 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11147 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11148 #ifdef TARGET_NR_fstatat64
11149 case TARGET_NR_fstatat64
:
11151 #ifdef TARGET_NR_newfstatat
11152 case TARGET_NR_newfstatat
:
11154 if (!(p
= lock_user_string(arg2
))) {
11155 return -TARGET_EFAULT
;
11157 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11158 unlock_user(p
, arg2
, 0);
11159 if (!is_error(ret
))
11160 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11163 #if defined(TARGET_NR_statx)
11164 case TARGET_NR_statx
:
11166 struct target_statx
*target_stx
;
11170 p
= lock_user_string(arg2
);
11172 return -TARGET_EFAULT
;
11174 #if defined(__NR_statx)
11177 * It is assumed that struct statx is architecture independent.
11179 struct target_statx host_stx
;
11182 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11183 if (!is_error(ret
)) {
11184 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11185 unlock_user(p
, arg2
, 0);
11186 return -TARGET_EFAULT
;
11190 if (ret
!= -TARGET_ENOSYS
) {
11191 unlock_user(p
, arg2
, 0);
11196 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11197 unlock_user(p
, arg2
, 0);
11199 if (!is_error(ret
)) {
11200 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11201 return -TARGET_EFAULT
;
11203 memset(target_stx
, 0, sizeof(*target_stx
));
11204 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11205 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11206 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11207 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11208 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11209 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11210 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11211 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11212 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11213 __put_user(st
.st_size
, &target_stx
->stx_size
);
11214 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11215 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11216 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11217 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11218 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11219 unlock_user_struct(target_stx
, arg5
, 1);
11224 #ifdef TARGET_NR_lchown
11225 case TARGET_NR_lchown
:
11226 if (!(p
= lock_user_string(arg1
)))
11227 return -TARGET_EFAULT
;
11228 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11229 unlock_user(p
, arg1
, 0);
11232 #ifdef TARGET_NR_getuid
11233 case TARGET_NR_getuid
:
11234 return get_errno(high2lowuid(getuid()));
11236 #ifdef TARGET_NR_getgid
11237 case TARGET_NR_getgid
:
11238 return get_errno(high2lowgid(getgid()));
11240 #ifdef TARGET_NR_geteuid
11241 case TARGET_NR_geteuid
:
11242 return get_errno(high2lowuid(geteuid()));
11244 #ifdef TARGET_NR_getegid
11245 case TARGET_NR_getegid
:
11246 return get_errno(high2lowgid(getegid()));
11248 case TARGET_NR_setreuid
:
11249 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11250 case TARGET_NR_setregid
:
11251 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11252 case TARGET_NR_getgroups
:
11254 int gidsetsize
= arg1
;
11255 target_id
*target_grouplist
;
11259 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11260 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11261 if (gidsetsize
== 0)
11263 if (!is_error(ret
)) {
11264 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11265 if (!target_grouplist
)
11266 return -TARGET_EFAULT
;
11267 for(i
= 0;i
< ret
; i
++)
11268 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11269 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11273 case TARGET_NR_setgroups
:
11275 int gidsetsize
= arg1
;
11276 target_id
*target_grouplist
;
11277 gid_t
*grouplist
= NULL
;
11280 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11281 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11282 if (!target_grouplist
) {
11283 return -TARGET_EFAULT
;
11285 for (i
= 0; i
< gidsetsize
; i
++) {
11286 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11288 unlock_user(target_grouplist
, arg2
, 0);
11290 return get_errno(setgroups(gidsetsize
, grouplist
));
11292 case TARGET_NR_fchown
:
11293 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11294 #if defined(TARGET_NR_fchownat)
11295 case TARGET_NR_fchownat
:
11296 if (!(p
= lock_user_string(arg2
)))
11297 return -TARGET_EFAULT
;
11298 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11299 low2highgid(arg4
), arg5
));
11300 unlock_user(p
, arg2
, 0);
11303 #ifdef TARGET_NR_setresuid
11304 case TARGET_NR_setresuid
:
11305 return get_errno(sys_setresuid(low2highuid(arg1
),
11307 low2highuid(arg3
)));
11309 #ifdef TARGET_NR_getresuid
11310 case TARGET_NR_getresuid
:
11312 uid_t ruid
, euid
, suid
;
11313 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11314 if (!is_error(ret
)) {
11315 if (put_user_id(high2lowuid(ruid
), arg1
)
11316 || put_user_id(high2lowuid(euid
), arg2
)
11317 || put_user_id(high2lowuid(suid
), arg3
))
11318 return -TARGET_EFAULT
;
11323 #ifdef TARGET_NR_getresgid
11324 case TARGET_NR_setresgid
:
11325 return get_errno(sys_setresgid(low2highgid(arg1
),
11327 low2highgid(arg3
)));
11329 #ifdef TARGET_NR_getresgid
11330 case TARGET_NR_getresgid
:
11332 gid_t rgid
, egid
, sgid
;
11333 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11334 if (!is_error(ret
)) {
11335 if (put_user_id(high2lowgid(rgid
), arg1
)
11336 || put_user_id(high2lowgid(egid
), arg2
)
11337 || put_user_id(high2lowgid(sgid
), arg3
))
11338 return -TARGET_EFAULT
;
11343 #ifdef TARGET_NR_chown
11344 case TARGET_NR_chown
:
11345 if (!(p
= lock_user_string(arg1
)))
11346 return -TARGET_EFAULT
;
11347 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11348 unlock_user(p
, arg1
, 0);
11351 case TARGET_NR_setuid
:
11352 return get_errno(sys_setuid(low2highuid(arg1
)));
11353 case TARGET_NR_setgid
:
11354 return get_errno(sys_setgid(low2highgid(arg1
)));
11355 case TARGET_NR_setfsuid
:
11356 return get_errno(setfsuid(arg1
));
11357 case TARGET_NR_setfsgid
:
11358 return get_errno(setfsgid(arg1
));
11360 #ifdef TARGET_NR_lchown32
11361 case TARGET_NR_lchown32
:
11362 if (!(p
= lock_user_string(arg1
)))
11363 return -TARGET_EFAULT
;
11364 ret
= get_errno(lchown(p
, arg2
, arg3
));
11365 unlock_user(p
, arg1
, 0);
11368 #ifdef TARGET_NR_getuid32
11369 case TARGET_NR_getuid32
:
11370 return get_errno(getuid());
11373 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11374 /* Alpha specific */
11375 case TARGET_NR_getxuid
:
11379 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11381 return get_errno(getuid());
11383 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11384 /* Alpha specific */
11385 case TARGET_NR_getxgid
:
11389 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11391 return get_errno(getgid());
11393 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11394 /* Alpha specific */
11395 case TARGET_NR_osf_getsysinfo
:
11396 ret
= -TARGET_EOPNOTSUPP
;
11398 case TARGET_GSI_IEEE_FP_CONTROL
:
11400 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11401 uint64_t swcr
= ((CPUAlphaState
*)cpu_env
)->swcr
;
11403 swcr
&= ~SWCR_STATUS_MASK
;
11404 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11406 if (put_user_u64 (swcr
, arg2
))
11407 return -TARGET_EFAULT
;
11412 /* case GSI_IEEE_STATE_AT_SIGNAL:
11413 -- Not implemented in linux kernel.
11415 -- Retrieves current unaligned access state; not much used.
11416 case GSI_PROC_TYPE:
11417 -- Retrieves implver information; surely not used.
11418 case GSI_GET_HWRPB:
11419 -- Grabs a copy of the HWRPB; surely not used.
11424 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11425 /* Alpha specific */
11426 case TARGET_NR_osf_setsysinfo
:
11427 ret
= -TARGET_EOPNOTSUPP
;
11429 case TARGET_SSI_IEEE_FP_CONTROL
:
11431 uint64_t swcr
, fpcr
;
11433 if (get_user_u64 (swcr
, arg2
)) {
11434 return -TARGET_EFAULT
;
11438 * The kernel calls swcr_update_status to update the
11439 * status bits from the fpcr at every point that it
11440 * could be queried. Therefore, we store the status
11441 * bits only in FPCR.
11443 ((CPUAlphaState
*)cpu_env
)->swcr
11444 = swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11446 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11447 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11448 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11449 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11454 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11456 uint64_t exc
, fpcr
, fex
;
11458 if (get_user_u64(exc
, arg2
)) {
11459 return -TARGET_EFAULT
;
11461 exc
&= SWCR_STATUS_MASK
;
11462 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11464 /* Old exceptions are not signaled. */
11465 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11467 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11468 fex
&= ((CPUArchState
*)cpu_env
)->swcr
;
11470 /* Update the hardware fpcr. */
11471 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11472 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11475 int si_code
= TARGET_FPE_FLTUNK
;
11476 target_siginfo_t info
;
11478 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11479 si_code
= TARGET_FPE_FLTUND
;
11481 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11482 si_code
= TARGET_FPE_FLTRES
;
11484 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11485 si_code
= TARGET_FPE_FLTUND
;
11487 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11488 si_code
= TARGET_FPE_FLTOVF
;
11490 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11491 si_code
= TARGET_FPE_FLTDIV
;
11493 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11494 si_code
= TARGET_FPE_FLTINV
;
11497 info
.si_signo
= SIGFPE
;
11499 info
.si_code
= si_code
;
11500 info
._sifields
._sigfault
._addr
11501 = ((CPUArchState
*)cpu_env
)->pc
;
11502 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11503 QEMU_SI_FAULT
, &info
);
11509 /* case SSI_NVPAIRS:
11510 -- Used with SSIN_UACPROC to enable unaligned accesses.
11511 case SSI_IEEE_STATE_AT_SIGNAL:
11512 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11513 -- Not implemented in linux kernel
11518 #ifdef TARGET_NR_osf_sigprocmask
11519 /* Alpha specific. */
11520 case TARGET_NR_osf_sigprocmask
:
11524 sigset_t set
, oldset
;
11527 case TARGET_SIG_BLOCK
:
11530 case TARGET_SIG_UNBLOCK
:
11533 case TARGET_SIG_SETMASK
:
11537 return -TARGET_EINVAL
;
11540 target_to_host_old_sigset(&set
, &mask
);
11541 ret
= do_sigprocmask(how
, &set
, &oldset
);
11543 host_to_target_old_sigset(&mask
, &oldset
);
11550 #ifdef TARGET_NR_getgid32
11551 case TARGET_NR_getgid32
:
11552 return get_errno(getgid());
11554 #ifdef TARGET_NR_geteuid32
11555 case TARGET_NR_geteuid32
:
11556 return get_errno(geteuid());
11558 #ifdef TARGET_NR_getegid32
11559 case TARGET_NR_getegid32
:
11560 return get_errno(getegid());
11562 #ifdef TARGET_NR_setreuid32
11563 case TARGET_NR_setreuid32
:
11564 return get_errno(setreuid(arg1
, arg2
));
11566 #ifdef TARGET_NR_setregid32
11567 case TARGET_NR_setregid32
:
11568 return get_errno(setregid(arg1
, arg2
));
11570 #ifdef TARGET_NR_getgroups32
11571 case TARGET_NR_getgroups32
:
11573 int gidsetsize
= arg1
;
11574 uint32_t *target_grouplist
;
11578 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11579 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11580 if (gidsetsize
== 0)
11582 if (!is_error(ret
)) {
11583 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11584 if (!target_grouplist
) {
11585 return -TARGET_EFAULT
;
11587 for(i
= 0;i
< ret
; i
++)
11588 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11589 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11594 #ifdef TARGET_NR_setgroups32
11595 case TARGET_NR_setgroups32
:
11597 int gidsetsize
= arg1
;
11598 uint32_t *target_grouplist
;
11602 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11603 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11604 if (!target_grouplist
) {
11605 return -TARGET_EFAULT
;
11607 for(i
= 0;i
< gidsetsize
; i
++)
11608 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11609 unlock_user(target_grouplist
, arg2
, 0);
11610 return get_errno(setgroups(gidsetsize
, grouplist
));
11613 #ifdef TARGET_NR_fchown32
11614 case TARGET_NR_fchown32
:
11615 return get_errno(fchown(arg1
, arg2
, arg3
));
11617 #ifdef TARGET_NR_setresuid32
11618 case TARGET_NR_setresuid32
:
11619 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11621 #ifdef TARGET_NR_getresuid32
11622 case TARGET_NR_getresuid32
:
11624 uid_t ruid
, euid
, suid
;
11625 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11626 if (!is_error(ret
)) {
11627 if (put_user_u32(ruid
, arg1
)
11628 || put_user_u32(euid
, arg2
)
11629 || put_user_u32(suid
, arg3
))
11630 return -TARGET_EFAULT
;
11635 #ifdef TARGET_NR_setresgid32
11636 case TARGET_NR_setresgid32
:
11637 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11639 #ifdef TARGET_NR_getresgid32
11640 case TARGET_NR_getresgid32
:
11642 gid_t rgid
, egid
, sgid
;
11643 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11644 if (!is_error(ret
)) {
11645 if (put_user_u32(rgid
, arg1
)
11646 || put_user_u32(egid
, arg2
)
11647 || put_user_u32(sgid
, arg3
))
11648 return -TARGET_EFAULT
;
11653 #ifdef TARGET_NR_chown32
11654 case TARGET_NR_chown32
:
11655 if (!(p
= lock_user_string(arg1
)))
11656 return -TARGET_EFAULT
;
11657 ret
= get_errno(chown(p
, arg2
, arg3
));
11658 unlock_user(p
, arg1
, 0);
11661 #ifdef TARGET_NR_setuid32
11662 case TARGET_NR_setuid32
:
11663 return get_errno(sys_setuid(arg1
));
11665 #ifdef TARGET_NR_setgid32
11666 case TARGET_NR_setgid32
:
11667 return get_errno(sys_setgid(arg1
));
11669 #ifdef TARGET_NR_setfsuid32
11670 case TARGET_NR_setfsuid32
:
11671 return get_errno(setfsuid(arg1
));
11673 #ifdef TARGET_NR_setfsgid32
11674 case TARGET_NR_setfsgid32
:
11675 return get_errno(setfsgid(arg1
));
11677 #ifdef TARGET_NR_mincore
11678 case TARGET_NR_mincore
:
11680 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11682 return -TARGET_ENOMEM
;
11684 p
= lock_user_string(arg3
);
11686 ret
= -TARGET_EFAULT
;
11688 ret
= get_errno(mincore(a
, arg2
, p
));
11689 unlock_user(p
, arg3
, ret
);
11691 unlock_user(a
, arg1
, 0);
11695 #ifdef TARGET_NR_arm_fadvise64_64
11696 case TARGET_NR_arm_fadvise64_64
:
11697 /* arm_fadvise64_64 looks like fadvise64_64 but
11698 * with different argument order: fd, advice, offset, len
11699 * rather than the usual fd, offset, len, advice.
11700 * Note that offset and len are both 64-bit so appear as
11701 * pairs of 32-bit registers.
11703 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11704 target_offset64(arg5
, arg6
), arg2
);
11705 return -host_to_target_errno(ret
);
11708 #if TARGET_ABI_BITS == 32
11710 #ifdef TARGET_NR_fadvise64_64
11711 case TARGET_NR_fadvise64_64
:
11712 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11713 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11721 /* 6 args: fd, offset (high, low), len (high, low), advice */
11722 if (regpairs_aligned(cpu_env
, num
)) {
11723 /* offset is in (3,4), len in (5,6) and advice in 7 */
11731 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11732 target_offset64(arg4
, arg5
), arg6
);
11733 return -host_to_target_errno(ret
);
11736 #ifdef TARGET_NR_fadvise64
11737 case TARGET_NR_fadvise64
:
11738 /* 5 args: fd, offset (high, low), len, advice */
11739 if (regpairs_aligned(cpu_env
, num
)) {
11740 /* offset is in (3,4), len in 5 and advice in 6 */
11746 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11747 return -host_to_target_errno(ret
);
11750 #else /* not a 32-bit ABI */
11751 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11752 #ifdef TARGET_NR_fadvise64_64
11753 case TARGET_NR_fadvise64_64
:
11755 #ifdef TARGET_NR_fadvise64
11756 case TARGET_NR_fadvise64
:
11758 #ifdef TARGET_S390X
11760 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11761 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11762 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11763 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11767 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11769 #endif /* end of 64-bit ABI fadvise handling */
11771 #ifdef TARGET_NR_madvise
11772 case TARGET_NR_madvise
:
11773 /* A straight passthrough may not be safe because qemu sometimes
11774 turns private file-backed mappings into anonymous mappings.
11775 This will break MADV_DONTNEED.
11776 This is a hint, so ignoring and returning success is ok. */
11779 #ifdef TARGET_NR_fcntl64
11780 case TARGET_NR_fcntl64
:
11784 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11785 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11788 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11789 copyfrom
= copy_from_user_oabi_flock64
;
11790 copyto
= copy_to_user_oabi_flock64
;
11794 cmd
= target_to_host_fcntl_cmd(arg2
);
11795 if (cmd
== -TARGET_EINVAL
) {
11800 case TARGET_F_GETLK64
:
11801 ret
= copyfrom(&fl
, arg3
);
11805 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11807 ret
= copyto(arg3
, &fl
);
11811 case TARGET_F_SETLK64
:
11812 case TARGET_F_SETLKW64
:
11813 ret
= copyfrom(&fl
, arg3
);
11817 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11820 ret
= do_fcntl(arg1
, arg2
, arg3
);
11826 #ifdef TARGET_NR_cacheflush
11827 case TARGET_NR_cacheflush
:
11828 /* self-modifying code is handled automatically, so nothing needed */
11831 #ifdef TARGET_NR_getpagesize
11832 case TARGET_NR_getpagesize
:
11833 return TARGET_PAGE_SIZE
;
11835 case TARGET_NR_gettid
:
11836 return get_errno(sys_gettid());
11837 #ifdef TARGET_NR_readahead
11838 case TARGET_NR_readahead
:
11839 #if TARGET_ABI_BITS == 32
11840 if (regpairs_aligned(cpu_env
, num
)) {
11845 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11847 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11852 #ifdef TARGET_NR_setxattr
11853 case TARGET_NR_listxattr
:
11854 case TARGET_NR_llistxattr
:
11858 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11860 return -TARGET_EFAULT
;
11863 p
= lock_user_string(arg1
);
11865 if (num
== TARGET_NR_listxattr
) {
11866 ret
= get_errno(listxattr(p
, b
, arg3
));
11868 ret
= get_errno(llistxattr(p
, b
, arg3
));
11871 ret
= -TARGET_EFAULT
;
11873 unlock_user(p
, arg1
, 0);
11874 unlock_user(b
, arg2
, arg3
);
11877 case TARGET_NR_flistxattr
:
11881 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11883 return -TARGET_EFAULT
;
11886 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11887 unlock_user(b
, arg2
, arg3
);
11890 case TARGET_NR_setxattr
:
11891 case TARGET_NR_lsetxattr
:
11893 void *p
, *n
, *v
= 0;
11895 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11897 return -TARGET_EFAULT
;
11900 p
= lock_user_string(arg1
);
11901 n
= lock_user_string(arg2
);
11903 if (num
== TARGET_NR_setxattr
) {
11904 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11906 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11909 ret
= -TARGET_EFAULT
;
11911 unlock_user(p
, arg1
, 0);
11912 unlock_user(n
, arg2
, 0);
11913 unlock_user(v
, arg3
, 0);
11916 case TARGET_NR_fsetxattr
:
11920 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11922 return -TARGET_EFAULT
;
11925 n
= lock_user_string(arg2
);
11927 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11929 ret
= -TARGET_EFAULT
;
11931 unlock_user(n
, arg2
, 0);
11932 unlock_user(v
, arg3
, 0);
11935 case TARGET_NR_getxattr
:
11936 case TARGET_NR_lgetxattr
:
11938 void *p
, *n
, *v
= 0;
11940 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11942 return -TARGET_EFAULT
;
11945 p
= lock_user_string(arg1
);
11946 n
= lock_user_string(arg2
);
11948 if (num
== TARGET_NR_getxattr
) {
11949 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11951 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11954 ret
= -TARGET_EFAULT
;
11956 unlock_user(p
, arg1
, 0);
11957 unlock_user(n
, arg2
, 0);
11958 unlock_user(v
, arg3
, arg4
);
11961 case TARGET_NR_fgetxattr
:
11965 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11967 return -TARGET_EFAULT
;
11970 n
= lock_user_string(arg2
);
11972 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11974 ret
= -TARGET_EFAULT
;
11976 unlock_user(n
, arg2
, 0);
11977 unlock_user(v
, arg3
, arg4
);
11980 case TARGET_NR_removexattr
:
11981 case TARGET_NR_lremovexattr
:
11984 p
= lock_user_string(arg1
);
11985 n
= lock_user_string(arg2
);
11987 if (num
== TARGET_NR_removexattr
) {
11988 ret
= get_errno(removexattr(p
, n
));
11990 ret
= get_errno(lremovexattr(p
, n
));
11993 ret
= -TARGET_EFAULT
;
11995 unlock_user(p
, arg1
, 0);
11996 unlock_user(n
, arg2
, 0);
11999 case TARGET_NR_fremovexattr
:
12002 n
= lock_user_string(arg2
);
12004 ret
= get_errno(fremovexattr(arg1
, n
));
12006 ret
= -TARGET_EFAULT
;
12008 unlock_user(n
, arg2
, 0);
12012 #endif /* CONFIG_ATTR */
12013 #ifdef TARGET_NR_set_thread_area
12014 case TARGET_NR_set_thread_area
:
12015 #if defined(TARGET_MIPS)
12016 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12018 #elif defined(TARGET_CRIS)
12020 ret
= -TARGET_EINVAL
;
12022 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12026 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12027 return do_set_thread_area(cpu_env
, arg1
);
12028 #elif defined(TARGET_M68K)
12030 TaskState
*ts
= cpu
->opaque
;
12031 ts
->tp_value
= arg1
;
12035 return -TARGET_ENOSYS
;
12038 #ifdef TARGET_NR_get_thread_area
12039 case TARGET_NR_get_thread_area
:
12040 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12041 return do_get_thread_area(cpu_env
, arg1
);
12042 #elif defined(TARGET_M68K)
12044 TaskState
*ts
= cpu
->opaque
;
12045 return ts
->tp_value
;
12048 return -TARGET_ENOSYS
;
12051 #ifdef TARGET_NR_getdomainname
12052 case TARGET_NR_getdomainname
:
12053 return -TARGET_ENOSYS
;
12056 #ifdef TARGET_NR_clock_settime
12057 case TARGET_NR_clock_settime
:
12059 struct timespec ts
;
12061 ret
= target_to_host_timespec(&ts
, arg2
);
12062 if (!is_error(ret
)) {
12063 ret
= get_errno(clock_settime(arg1
, &ts
));
12068 #ifdef TARGET_NR_clock_settime64
12069 case TARGET_NR_clock_settime64
:
12071 struct timespec ts
;
12073 ret
= target_to_host_timespec64(&ts
, arg2
);
12074 if (!is_error(ret
)) {
12075 ret
= get_errno(clock_settime(arg1
, &ts
));
12080 #ifdef TARGET_NR_clock_gettime
12081 case TARGET_NR_clock_gettime
:
12083 struct timespec ts
;
12084 ret
= get_errno(clock_gettime(arg1
, &ts
));
12085 if (!is_error(ret
)) {
12086 ret
= host_to_target_timespec(arg2
, &ts
);
12091 #ifdef TARGET_NR_clock_gettime64
12092 case TARGET_NR_clock_gettime64
:
12094 struct timespec ts
;
12095 ret
= get_errno(clock_gettime(arg1
, &ts
));
12096 if (!is_error(ret
)) {
12097 ret
= host_to_target_timespec64(arg2
, &ts
);
12102 #ifdef TARGET_NR_clock_getres
12103 case TARGET_NR_clock_getres
:
12105 struct timespec ts
;
12106 ret
= get_errno(clock_getres(arg1
, &ts
));
12107 if (!is_error(ret
)) {
12108 host_to_target_timespec(arg2
, &ts
);
12113 #ifdef TARGET_NR_clock_getres_time64
12114 case TARGET_NR_clock_getres_time64
:
12116 struct timespec ts
;
12117 ret
= get_errno(clock_getres(arg1
, &ts
));
12118 if (!is_error(ret
)) {
12119 host_to_target_timespec64(arg2
, &ts
);
12124 #ifdef TARGET_NR_clock_nanosleep
12125 case TARGET_NR_clock_nanosleep
:
12127 struct timespec ts
;
12128 if (target_to_host_timespec(&ts
, arg3
)) {
12129 return -TARGET_EFAULT
;
12131 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12132 &ts
, arg4
? &ts
: NULL
));
12134 * if the call is interrupted by a signal handler, it fails
12135 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12136 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12138 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12139 host_to_target_timespec(arg4
, &ts
)) {
12140 return -TARGET_EFAULT
;
12146 #ifdef TARGET_NR_clock_nanosleep_time64
12147 case TARGET_NR_clock_nanosleep_time64
:
12149 struct timespec ts
;
12151 if (target_to_host_timespec64(&ts
, arg3
)) {
12152 return -TARGET_EFAULT
;
12155 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12156 &ts
, arg4
? &ts
: NULL
));
12158 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12159 host_to_target_timespec64(arg4
, &ts
)) {
12160 return -TARGET_EFAULT
;
12166 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12167 case TARGET_NR_set_tid_address
:
12168 return get_errno(set_tid_address((int *)g2h(cpu
, arg1
)));
12171 case TARGET_NR_tkill
:
12172 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12174 case TARGET_NR_tgkill
:
12175 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12176 target_to_host_signal(arg3
)));
12178 #ifdef TARGET_NR_set_robust_list
12179 case TARGET_NR_set_robust_list
:
12180 case TARGET_NR_get_robust_list
:
12181 /* The ABI for supporting robust futexes has userspace pass
12182 * the kernel a pointer to a linked list which is updated by
12183 * userspace after the syscall; the list is walked by the kernel
12184 * when the thread exits. Since the linked list in QEMU guest
12185 * memory isn't a valid linked list for the host and we have
12186 * no way to reliably intercept the thread-death event, we can't
12187 * support these. Silently return ENOSYS so that guest userspace
12188 * falls back to a non-robust futex implementation (which should
12189 * be OK except in the corner case of the guest crashing while
12190 * holding a mutex that is shared with another process via
12193 return -TARGET_ENOSYS
;
12196 #if defined(TARGET_NR_utimensat)
12197 case TARGET_NR_utimensat
:
12199 struct timespec
*tsp
, ts
[2];
12203 if (target_to_host_timespec(ts
, arg3
)) {
12204 return -TARGET_EFAULT
;
12206 if (target_to_host_timespec(ts
+ 1, arg3
+
12207 sizeof(struct target_timespec
))) {
12208 return -TARGET_EFAULT
;
12213 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12215 if (!(p
= lock_user_string(arg2
))) {
12216 return -TARGET_EFAULT
;
12218 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12219 unlock_user(p
, arg2
, 0);
12224 #ifdef TARGET_NR_utimensat_time64
12225 case TARGET_NR_utimensat_time64
:
12227 struct timespec
*tsp
, ts
[2];
12231 if (target_to_host_timespec64(ts
, arg3
)) {
12232 return -TARGET_EFAULT
;
12234 if (target_to_host_timespec64(ts
+ 1, arg3
+
12235 sizeof(struct target__kernel_timespec
))) {
12236 return -TARGET_EFAULT
;
12241 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12243 p
= lock_user_string(arg2
);
12245 return -TARGET_EFAULT
;
12247 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12248 unlock_user(p
, arg2
, 0);
12253 #ifdef TARGET_NR_futex
12254 case TARGET_NR_futex
:
12255 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12257 #ifdef TARGET_NR_futex_time64
12258 case TARGET_NR_futex_time64
:
12259 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12261 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12262 case TARGET_NR_inotify_init
:
12263 ret
= get_errno(sys_inotify_init());
12265 fd_trans_register(ret
, &target_inotify_trans
);
12269 #ifdef CONFIG_INOTIFY1
12270 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12271 case TARGET_NR_inotify_init1
:
12272 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12273 fcntl_flags_tbl
)));
12275 fd_trans_register(ret
, &target_inotify_trans
);
12280 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12281 case TARGET_NR_inotify_add_watch
:
12282 p
= lock_user_string(arg2
);
12283 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12284 unlock_user(p
, arg2
, 0);
12287 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12288 case TARGET_NR_inotify_rm_watch
:
12289 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12292 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12293 case TARGET_NR_mq_open
:
12295 struct mq_attr posix_mq_attr
;
12296 struct mq_attr
*pposix_mq_attr
;
12299 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12300 pposix_mq_attr
= NULL
;
12302 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12303 return -TARGET_EFAULT
;
12305 pposix_mq_attr
= &posix_mq_attr
;
12307 p
= lock_user_string(arg1
- 1);
12309 return -TARGET_EFAULT
;
12311 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12312 unlock_user (p
, arg1
, 0);
12316 case TARGET_NR_mq_unlink
:
12317 p
= lock_user_string(arg1
- 1);
12319 return -TARGET_EFAULT
;
12321 ret
= get_errno(mq_unlink(p
));
12322 unlock_user (p
, arg1
, 0);
12325 #ifdef TARGET_NR_mq_timedsend
12326 case TARGET_NR_mq_timedsend
:
12328 struct timespec ts
;
12330 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12332 if (target_to_host_timespec(&ts
, arg5
)) {
12333 return -TARGET_EFAULT
;
12335 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12336 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12337 return -TARGET_EFAULT
;
12340 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12342 unlock_user (p
, arg2
, arg3
);
12346 #ifdef TARGET_NR_mq_timedsend_time64
12347 case TARGET_NR_mq_timedsend_time64
:
12349 struct timespec ts
;
12351 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12353 if (target_to_host_timespec64(&ts
, arg5
)) {
12354 return -TARGET_EFAULT
;
12356 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12357 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12358 return -TARGET_EFAULT
;
12361 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12363 unlock_user(p
, arg2
, arg3
);
12368 #ifdef TARGET_NR_mq_timedreceive
12369 case TARGET_NR_mq_timedreceive
:
12371 struct timespec ts
;
12374 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12376 if (target_to_host_timespec(&ts
, arg5
)) {
12377 return -TARGET_EFAULT
;
12379 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12381 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12382 return -TARGET_EFAULT
;
12385 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12388 unlock_user (p
, arg2
, arg3
);
12390 put_user_u32(prio
, arg4
);
12394 #ifdef TARGET_NR_mq_timedreceive_time64
12395 case TARGET_NR_mq_timedreceive_time64
:
12397 struct timespec ts
;
12400 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12402 if (target_to_host_timespec64(&ts
, arg5
)) {
12403 return -TARGET_EFAULT
;
12405 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12407 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12408 return -TARGET_EFAULT
;
12411 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12414 unlock_user(p
, arg2
, arg3
);
12416 put_user_u32(prio
, arg4
);
12422 /* Not implemented for now... */
12423 /* case TARGET_NR_mq_notify: */
12426 case TARGET_NR_mq_getsetattr
:
12428 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12431 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12432 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12433 &posix_mq_attr_out
));
12434 } else if (arg3
!= 0) {
12435 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12437 if (ret
== 0 && arg3
!= 0) {
12438 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12444 #ifdef CONFIG_SPLICE
12445 #ifdef TARGET_NR_tee
12446 case TARGET_NR_tee
:
12448 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12452 #ifdef TARGET_NR_splice
12453 case TARGET_NR_splice
:
12455 loff_t loff_in
, loff_out
;
12456 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12458 if (get_user_u64(loff_in
, arg2
)) {
12459 return -TARGET_EFAULT
;
12461 ploff_in
= &loff_in
;
12464 if (get_user_u64(loff_out
, arg4
)) {
12465 return -TARGET_EFAULT
;
12467 ploff_out
= &loff_out
;
12469 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12471 if (put_user_u64(loff_in
, arg2
)) {
12472 return -TARGET_EFAULT
;
12476 if (put_user_u64(loff_out
, arg4
)) {
12477 return -TARGET_EFAULT
;
12483 #ifdef TARGET_NR_vmsplice
12484 case TARGET_NR_vmsplice
:
12486 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12488 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12489 unlock_iovec(vec
, arg2
, arg3
, 0);
12491 ret
= -host_to_target_errno(errno
);
12496 #endif /* CONFIG_SPLICE */
12497 #ifdef CONFIG_EVENTFD
12498 #if defined(TARGET_NR_eventfd)
12499 case TARGET_NR_eventfd
:
12500 ret
= get_errno(eventfd(arg1
, 0));
12502 fd_trans_register(ret
, &target_eventfd_trans
);
12506 #if defined(TARGET_NR_eventfd2)
12507 case TARGET_NR_eventfd2
:
12509 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12510 if (arg2
& TARGET_O_NONBLOCK
) {
12511 host_flags
|= O_NONBLOCK
;
12513 if (arg2
& TARGET_O_CLOEXEC
) {
12514 host_flags
|= O_CLOEXEC
;
12516 ret
= get_errno(eventfd(arg1
, host_flags
));
12518 fd_trans_register(ret
, &target_eventfd_trans
);
12523 #endif /* CONFIG_EVENTFD */
12524 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12525 case TARGET_NR_fallocate
:
12526 #if TARGET_ABI_BITS == 32
12527 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12528 target_offset64(arg5
, arg6
)));
12530 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12534 #if defined(CONFIG_SYNC_FILE_RANGE)
12535 #if defined(TARGET_NR_sync_file_range)
12536 case TARGET_NR_sync_file_range
:
12537 #if TARGET_ABI_BITS == 32
12538 #if defined(TARGET_MIPS)
12539 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12540 target_offset64(arg5
, arg6
), arg7
));
12542 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12543 target_offset64(arg4
, arg5
), arg6
));
12544 #endif /* !TARGET_MIPS */
12546 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12550 #if defined(TARGET_NR_sync_file_range2) || \
12551 defined(TARGET_NR_arm_sync_file_range)
12552 #if defined(TARGET_NR_sync_file_range2)
12553 case TARGET_NR_sync_file_range2
:
12555 #if defined(TARGET_NR_arm_sync_file_range)
12556 case TARGET_NR_arm_sync_file_range
:
12558 /* This is like sync_file_range but the arguments are reordered */
12559 #if TARGET_ABI_BITS == 32
12560 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12561 target_offset64(arg5
, arg6
), arg2
));
12563 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12568 #if defined(TARGET_NR_signalfd4)
12569 case TARGET_NR_signalfd4
:
12570 return do_signalfd4(arg1
, arg2
, arg4
);
12572 #if defined(TARGET_NR_signalfd)
12573 case TARGET_NR_signalfd
:
12574 return do_signalfd4(arg1
, arg2
, 0);
12576 #if defined(CONFIG_EPOLL)
12577 #if defined(TARGET_NR_epoll_create)
12578 case TARGET_NR_epoll_create
:
12579 return get_errno(epoll_create(arg1
));
12581 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12582 case TARGET_NR_epoll_create1
:
12583 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12585 #if defined(TARGET_NR_epoll_ctl)
12586 case TARGET_NR_epoll_ctl
:
12588 struct epoll_event ep
;
12589 struct epoll_event
*epp
= 0;
12591 if (arg2
!= EPOLL_CTL_DEL
) {
12592 struct target_epoll_event
*target_ep
;
12593 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12594 return -TARGET_EFAULT
;
12596 ep
.events
= tswap32(target_ep
->events
);
12598 * The epoll_data_t union is just opaque data to the kernel,
12599 * so we transfer all 64 bits across and need not worry what
12600 * actual data type it is.
12602 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12603 unlock_user_struct(target_ep
, arg4
, 0);
12606 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12607 * non-null pointer, even though this argument is ignored.
12612 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12616 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12617 #if defined(TARGET_NR_epoll_wait)
12618 case TARGET_NR_epoll_wait
:
12620 #if defined(TARGET_NR_epoll_pwait)
12621 case TARGET_NR_epoll_pwait
:
12624 struct target_epoll_event
*target_ep
;
12625 struct epoll_event
*ep
;
12627 int maxevents
= arg3
;
12628 int timeout
= arg4
;
12630 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12631 return -TARGET_EINVAL
;
12634 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12635 maxevents
* sizeof(struct target_epoll_event
), 1);
12637 return -TARGET_EFAULT
;
12640 ep
= g_try_new(struct epoll_event
, maxevents
);
12642 unlock_user(target_ep
, arg2
, 0);
12643 return -TARGET_ENOMEM
;
12647 #if defined(TARGET_NR_epoll_pwait)
12648 case TARGET_NR_epoll_pwait
:
12650 target_sigset_t
*target_set
;
12651 sigset_t _set
, *set
= &_set
;
12654 if (arg6
!= sizeof(target_sigset_t
)) {
12655 ret
= -TARGET_EINVAL
;
12659 target_set
= lock_user(VERIFY_READ
, arg5
,
12660 sizeof(target_sigset_t
), 1);
12662 ret
= -TARGET_EFAULT
;
12665 target_to_host_sigset(set
, target_set
);
12666 unlock_user(target_set
, arg5
, 0);
12671 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12672 set
, SIGSET_T_SIZE
));
12676 #if defined(TARGET_NR_epoll_wait)
12677 case TARGET_NR_epoll_wait
:
12678 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12683 ret
= -TARGET_ENOSYS
;
12685 if (!is_error(ret
)) {
12687 for (i
= 0; i
< ret
; i
++) {
12688 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12689 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12691 unlock_user(target_ep
, arg2
,
12692 ret
* sizeof(struct target_epoll_event
));
12694 unlock_user(target_ep
, arg2
, 0);
12701 #ifdef TARGET_NR_prlimit64
12702 case TARGET_NR_prlimit64
:
12704 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12705 struct target_rlimit64
*target_rnew
, *target_rold
;
12706 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12707 int resource
= target_to_host_resource(arg2
);
12709 if (arg3
&& (resource
!= RLIMIT_AS
&&
12710 resource
!= RLIMIT_DATA
&&
12711 resource
!= RLIMIT_STACK
)) {
12712 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12713 return -TARGET_EFAULT
;
12715 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12716 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12717 unlock_user_struct(target_rnew
, arg3
, 0);
12721 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12722 if (!is_error(ret
) && arg4
) {
12723 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12724 return -TARGET_EFAULT
;
12726 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12727 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12728 unlock_user_struct(target_rold
, arg4
, 1);
12733 #ifdef TARGET_NR_gethostname
12734 case TARGET_NR_gethostname
:
12736 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12738 ret
= get_errno(gethostname(name
, arg2
));
12739 unlock_user(name
, arg1
, arg2
);
12741 ret
= -TARGET_EFAULT
;
12746 #ifdef TARGET_NR_atomic_cmpxchg_32
12747 case TARGET_NR_atomic_cmpxchg_32
:
12749 /* should use start_exclusive from main.c */
12750 abi_ulong mem_value
;
12751 if (get_user_u32(mem_value
, arg6
)) {
12752 target_siginfo_t info
;
12753 info
.si_signo
= SIGSEGV
;
12755 info
.si_code
= TARGET_SEGV_MAPERR
;
12756 info
._sifields
._sigfault
._addr
= arg6
;
12757 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12758 QEMU_SI_FAULT
, &info
);
12762 if (mem_value
== arg2
)
12763 put_user_u32(arg1
, arg6
);
12767 #ifdef TARGET_NR_atomic_barrier
12768 case TARGET_NR_atomic_barrier
:
12769 /* Like the kernel implementation and the
12770 qemu arm barrier, no-op this? */
12774 #ifdef TARGET_NR_timer_create
12775 case TARGET_NR_timer_create
:
12777 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12779 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12782 int timer_index
= next_free_host_timer();
12784 if (timer_index
< 0) {
12785 ret
= -TARGET_EAGAIN
;
12787 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12790 phost_sevp
= &host_sevp
;
12791 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12797 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12801 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12802 return -TARGET_EFAULT
;
12810 #ifdef TARGET_NR_timer_settime
12811 case TARGET_NR_timer_settime
:
12813 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12814 * struct itimerspec * old_value */
12815 target_timer_t timerid
= get_timer_id(arg1
);
12819 } else if (arg3
== 0) {
12820 ret
= -TARGET_EINVAL
;
12822 timer_t htimer
= g_posix_timers
[timerid
];
12823 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12825 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12826 return -TARGET_EFAULT
;
12829 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12830 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12831 return -TARGET_EFAULT
;
12838 #ifdef TARGET_NR_timer_settime64
12839 case TARGET_NR_timer_settime64
:
12841 target_timer_t timerid
= get_timer_id(arg1
);
12845 } else if (arg3
== 0) {
12846 ret
= -TARGET_EINVAL
;
12848 timer_t htimer
= g_posix_timers
[timerid
];
12849 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12851 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12852 return -TARGET_EFAULT
;
12855 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12856 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12857 return -TARGET_EFAULT
;
12864 #ifdef TARGET_NR_timer_gettime
12865 case TARGET_NR_timer_gettime
:
12867 /* args: timer_t timerid, struct itimerspec *curr_value */
12868 target_timer_t timerid
= get_timer_id(arg1
);
12872 } else if (!arg2
) {
12873 ret
= -TARGET_EFAULT
;
12875 timer_t htimer
= g_posix_timers
[timerid
];
12876 struct itimerspec hspec
;
12877 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12879 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12880 ret
= -TARGET_EFAULT
;
12887 #ifdef TARGET_NR_timer_gettime64
12888 case TARGET_NR_timer_gettime64
:
12890 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12891 target_timer_t timerid
= get_timer_id(arg1
);
12895 } else if (!arg2
) {
12896 ret
= -TARGET_EFAULT
;
12898 timer_t htimer
= g_posix_timers
[timerid
];
12899 struct itimerspec hspec
;
12900 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12902 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12903 ret
= -TARGET_EFAULT
;
12910 #ifdef TARGET_NR_timer_getoverrun
12911 case TARGET_NR_timer_getoverrun
:
12913 /* args: timer_t timerid */
12914 target_timer_t timerid
= get_timer_id(arg1
);
12919 timer_t htimer
= g_posix_timers
[timerid
];
12920 ret
= get_errno(timer_getoverrun(htimer
));
12926 #ifdef TARGET_NR_timer_delete
12927 case TARGET_NR_timer_delete
:
12929 /* args: timer_t timerid */
12930 target_timer_t timerid
= get_timer_id(arg1
);
12935 timer_t htimer
= g_posix_timers
[timerid
];
12936 ret
= get_errno(timer_delete(htimer
));
12937 g_posix_timers
[timerid
] = 0;
12943 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12944 case TARGET_NR_timerfd_create
:
12945 return get_errno(timerfd_create(arg1
,
12946 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12949 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12950 case TARGET_NR_timerfd_gettime
:
12952 struct itimerspec its_curr
;
12954 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12956 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12957 return -TARGET_EFAULT
;
12963 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12964 case TARGET_NR_timerfd_gettime64
:
12966 struct itimerspec its_curr
;
12968 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12970 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12971 return -TARGET_EFAULT
;
12977 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12978 case TARGET_NR_timerfd_settime
:
12980 struct itimerspec its_new
, its_old
, *p_new
;
12983 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12984 return -TARGET_EFAULT
;
12991 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12993 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12994 return -TARGET_EFAULT
;
13000 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13001 case TARGET_NR_timerfd_settime64
:
13003 struct itimerspec its_new
, its_old
, *p_new
;
13006 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13007 return -TARGET_EFAULT
;
13014 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13016 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13017 return -TARGET_EFAULT
;
13023 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13024 case TARGET_NR_ioprio_get
:
13025 return get_errno(ioprio_get(arg1
, arg2
));
13028 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13029 case TARGET_NR_ioprio_set
:
13030 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13033 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13034 case TARGET_NR_setns
:
13035 return get_errno(setns(arg1
, arg2
));
13037 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13038 case TARGET_NR_unshare
:
13039 return get_errno(unshare(arg1
));
13041 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13042 case TARGET_NR_kcmp
:
13043 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13045 #ifdef TARGET_NR_swapcontext
13046 case TARGET_NR_swapcontext
:
13047 /* PowerPC specific. */
13048 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13050 #ifdef TARGET_NR_memfd_create
13051 case TARGET_NR_memfd_create
:
13052 p
= lock_user_string(arg1
);
13054 return -TARGET_EFAULT
;
13056 ret
= get_errno(memfd_create(p
, arg2
));
13057 fd_trans_unregister(ret
);
13058 unlock_user(p
, arg1
, 0);
13061 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13062 case TARGET_NR_membarrier
:
13063 return get_errno(membarrier(arg1
, arg2
));
13066 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13067 case TARGET_NR_copy_file_range
:
13069 loff_t inoff
, outoff
;
13070 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13073 if (get_user_u64(inoff
, arg2
)) {
13074 return -TARGET_EFAULT
;
13079 if (get_user_u64(outoff
, arg4
)) {
13080 return -TARGET_EFAULT
;
13084 /* Do not sign-extend the count parameter. */
13085 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13086 (abi_ulong
)arg5
, arg6
));
13087 if (!is_error(ret
) && ret
> 0) {
13089 if (put_user_u64(inoff
, arg2
)) {
13090 return -TARGET_EFAULT
;
13094 if (put_user_u64(outoff
, arg4
)) {
13095 return -TARGET_EFAULT
;
13103 #if defined(TARGET_NR_pivot_root)
13104 case TARGET_NR_pivot_root
:
13107 p
= lock_user_string(arg1
); /* new_root */
13108 p2
= lock_user_string(arg2
); /* put_old */
13110 ret
= -TARGET_EFAULT
;
13112 ret
= get_errno(pivot_root(p
, p2
));
13114 unlock_user(p2
, arg2
, 0);
13115 unlock_user(p
, arg1
, 0);
13121 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13122 return -TARGET_ENOSYS
;
13127 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
13128 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13129 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13132 CPUState
*cpu
= env_cpu(cpu_env
);
13135 #ifdef DEBUG_ERESTARTSYS
13136 /* Debug-only code for exercising the syscall-restart code paths
13137 * in the per-architecture cpu main loops: restart every syscall
13138 * the guest makes once before letting it through.
13144 return -TARGET_ERESTARTSYS
;
13149 record_syscall_start(cpu
, num
, arg1
,
13150 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13152 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13153 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13156 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13157 arg5
, arg6
, arg7
, arg8
);
13159 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13160 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13161 arg3
, arg4
, arg5
, arg6
);
13164 record_syscall_return(cpu
, num
, ret
);