4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
66 #include <sys/timerfd.h>
69 #include <sys/eventfd.h>
72 #include <sys/epoll.h>
75 #include "qemu/xattr.h"
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
80 #ifdef HAVE_SYS_KCOV_H
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
97 #include <linux/mtio.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
120 #include <linux/btrfs.h>
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
126 #include "linux_loop.h"
130 #include "user-internals.h"
132 #include "signal-common.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
145 #define CLONE_IO 0x80000000 /* Clone io context */
148 /* We can't directly call the host clone syscall, because this will
149 * badly confuse libc (breaking mutexes, for example). So we must
150 * divide clone flags into:
151 * * flag combinations that look like pthread_create()
152 * * flag combinations that look like fork()
153 * * flags we can implement within QEMU itself
154 * * flags we can't support and will return an error for
156 /* For thread creation, all these flags must be present; for
157 * fork, none must be present.
159 #define CLONE_THREAD_FLAGS \
160 (CLONE_VM | CLONE_FS | CLONE_FILES | \
161 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 /* These flags are ignored:
164 * CLONE_DETACHED is now ignored by the kernel;
165 * CLONE_IO is just an optimisation hint to the I/O scheduler
167 #define CLONE_IGNORED_FLAGS \
168 (CLONE_DETACHED | CLONE_IO)
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS \
172 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
173 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS \
177 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
178 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 #define CLONE_INVALID_FORK_FLAGS \
181 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 #define CLONE_INVALID_THREAD_FLAGS \
184 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
185 CLONE_IGNORED_FLAGS))
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188 * have almost all been allocated. We cannot support any of
189 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191 * The checks against the invalid thread masks above will catch these.
192 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196 * once. This exercises the codepaths for restart.
198 //#define DEBUG_ERESTARTSYS
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
214 #define _syscall0(type,name) \
215 static type name (void) \
217 return syscall(__NR_##name); \
220 #define _syscall1(type,name,type1,arg1) \
221 static type name (type1 arg1) \
223 return syscall(__NR_##name, arg1); \
226 #define _syscall2(type,name,type1,arg1,type2,arg2) \
227 static type name (type1 arg1,type2 arg2) \
229 return syscall(__NR_##name, arg1, arg2); \
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
233 static type name (type1 arg1,type2 arg2,type3 arg3) \
235 return syscall(__NR_##name, arg1, arg2, arg3); \
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
241 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
248 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
253 type5,arg5,type6,arg6) \
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
257 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
275 #define __NR_sys_statx __NR_statx
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid
)
294 /* For the 64-bit guest on 32-bit host case we must emulate
295 * getdents using getdents64, because otherwise the host
296 * might hand us back more dirent records than we can fit
297 * into the guest buffer after structure format conversion.
298 * Otherwise we emulate getdents with getdents if the host has it.
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
307 #if (defined(TARGET_NR_getdents) && \
308 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
314 loff_t
*, res
, uint
, wh
);
316 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
317 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
319 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group
,int,error_code
)
323 #if defined(__NR_futex)
324 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
325 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
327 #if defined(__NR_futex_time64)
328 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
329 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
331 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
332 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
333 unsigned long *, user_mask_ptr
);
334 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
335 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
336 unsigned long *, user_mask_ptr
);
337 /* sched_attr is not defined in glibc */
340 uint32_t sched_policy
;
341 uint64_t sched_flags
;
343 uint32_t sched_priority
;
344 uint64_t sched_runtime
;
345 uint64_t sched_deadline
;
346 uint64_t sched_period
;
347 uint32_t sched_util_min
;
348 uint32_t sched_util_max
;
350 #define __NR_sys_sched_getattr __NR_sched_getattr
351 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
352 unsigned int, size
, unsigned int, flags
);
353 #define __NR_sys_sched_setattr __NR_sched_setattr
354 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
355 unsigned int, flags
);
356 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
357 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
358 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
359 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
360 const struct sched_param
*, param
);
361 #define __NR_sys_sched_getparam __NR_sched_getparam
362 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
363 struct sched_param
*, param
);
364 #define __NR_sys_sched_setparam __NR_sched_setparam
365 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
366 const struct sched_param
*, param
);
367 #define __NR_sys_getcpu __NR_getcpu
368 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
369 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
371 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
372 struct __user_cap_data_struct
*, data
);
373 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
374 struct __user_cap_data_struct
*, data
);
375 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
376 _syscall2(int, ioprio_get
, int, which
, int, who
)
378 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
379 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
381 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
382 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
385 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
386 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
387 unsigned long, idx1
, unsigned long, idx2
)
391 * It is assumed that struct statx is architecture independent.
393 #if defined(TARGET_NR_statx) && defined(__NR_statx)
394 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
395 unsigned int, mask
, struct target_statx
*, statxbuf
)
397 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
398 _syscall2(int, membarrier
, int, cmd
, int, flags
)
401 static const bitmask_transtbl fcntl_flags_tbl
[] = {
402 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
403 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
404 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
405 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
406 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
407 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
408 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
409 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
410 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
411 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
412 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
413 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
414 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
415 #if defined(O_DIRECT)
416 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
418 #if defined(O_NOATIME)
419 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
421 #if defined(O_CLOEXEC)
422 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
425 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
427 #if defined(O_TMPFILE)
428 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
430 /* Don't terminate the list prematurely on 64-bit host+guest. */
431 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
432 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
437 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
439 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
440 #if defined(__NR_utimensat)
441 #define __NR_sys_utimensat __NR_utimensat
442 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
443 const struct timespec
*,tsp
,int,flags
)
445 static int sys_utimensat(int dirfd
, const char *pathname
,
446 const struct timespec times
[2], int flags
)
452 #endif /* TARGET_NR_utimensat */
454 #ifdef TARGET_NR_renameat2
455 #if defined(__NR_renameat2)
456 #define __NR_sys_renameat2 __NR_renameat2
457 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
458 const char *, new, unsigned int, flags
)
460 static int sys_renameat2(int oldfd
, const char *old
,
461 int newfd
, const char *new, int flags
)
464 return renameat(oldfd
, old
, newfd
, new);
470 #endif /* TARGET_NR_renameat2 */
472 #ifdef CONFIG_INOTIFY
473 #include <sys/inotify.h>
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY */
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64
{
492 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
493 const struct host_rlimit64
*, new_limit
,
494 struct host_rlimit64
*, old_limit
)
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers
[32] = { 0, } ;
502 static inline int next_free_host_timer(void)
505 /* FIXME: Does finding the next free slot require a lock? */
506 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
507 if (g_posix_timers
[k
] == 0) {
508 g_posix_timers
[k
] = (timer_t
) 1;
516 static inline int host_to_target_errno(int host_errno
)
518 switch (host_errno
) {
519 #define E(X) case X: return TARGET_##X;
520 #include "errnos.c.inc"
527 static inline int target_to_host_errno(int target_errno
)
529 switch (target_errno
) {
530 #define E(X) case TARGET_##X: return X;
531 #include "errnos.c.inc"
538 abi_long
get_errno(abi_long ret
)
541 return -host_to_target_errno(errno
);
546 const char *target_strerror(int err
)
548 if (err
== QEMU_ERESTARTSYS
) {
549 return "To be restarted";
551 if (err
== QEMU_ESIGRETURN
) {
552 return "Successful exit from sigreturn";
555 return strerror(target_to_host_errno(err
));
558 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
562 if (usize
<= ksize
) {
565 for (i
= ksize
; i
< usize
; i
++) {
566 if (get_user_u8(b
, addr
+ i
)) {
567 return -TARGET_EFAULT
;
576 #define safe_syscall0(type, name) \
577 static type safe_##name(void) \
579 return safe_syscall(__NR_##name); \
582 #define safe_syscall1(type, name, type1, arg1) \
583 static type safe_##name(type1 arg1) \
585 return safe_syscall(__NR_##name, arg1); \
588 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
589 static type safe_##name(type1 arg1, type2 arg2) \
591 return safe_syscall(__NR_##name, arg1, arg2); \
594 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
595 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
597 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
600 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
604 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
607 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
608 type4, arg4, type5, arg5) \
609 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
612 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
615 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
616 type4, arg4, type5, arg5, type6, arg6) \
617 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
618 type5 arg5, type6 arg6) \
620 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
623 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
624 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
625 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
626 int, flags
, mode_t
, mode
)
627 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
628 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
629 struct rusage
*, rusage
)
631 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
632 int, options
, struct rusage
*, rusage
)
633 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
634 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
635 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
636 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
637 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
639 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
640 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
641 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
644 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
645 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
647 #if defined(__NR_futex)
648 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
649 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
651 #if defined(__NR_futex_time64)
652 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
653 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
655 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
656 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
657 safe_syscall2(int, tkill
, int, tid
, int, sig
)
658 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
659 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
660 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
661 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
662 unsigned long, pos_l
, unsigned long, pos_h
)
663 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
664 unsigned long, pos_l
, unsigned long, pos_h
)
665 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
667 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
668 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
669 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
670 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
671 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
672 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
673 safe_syscall2(int, flock
, int, fd
, int, operation
)
674 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
675 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
676 const struct timespec
*, uts
, size_t, sigsetsize
)
678 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
680 #if defined(TARGET_NR_nanosleep)
681 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
682 struct timespec
*, rem
)
684 #if defined(TARGET_NR_clock_nanosleep) || \
685 defined(TARGET_NR_clock_nanosleep_time64)
686 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
687 const struct timespec
*, req
, struct timespec
*, rem
)
691 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
694 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
695 void *, ptr
, long, fifth
)
699 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
703 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
704 long, msgtype
, int, flags
)
706 #ifdef __NR_semtimedop
707 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
708 unsigned, nsops
, const struct timespec
*, timeout
)
710 #if defined(TARGET_NR_mq_timedsend) || \
711 defined(TARGET_NR_mq_timedsend_time64)
712 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
713 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
715 #if defined(TARGET_NR_mq_timedreceive) || \
716 defined(TARGET_NR_mq_timedreceive_time64)
717 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
718 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
720 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
721 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
722 int, outfd
, loff_t
*, poutoff
, size_t, length
,
726 /* We do ioctl like this rather than via safe_syscall3 to preserve the
727 * "third argument might be integer or pointer or not present" behaviour of
730 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
731 /* Similarly for fcntl. Note that callers must always:
732 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
733 * use the flock64 struct rather than unsuffixed flock
734 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
737 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
739 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
742 static inline int host_to_target_sock_type(int host_type
)
746 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
748 target_type
= TARGET_SOCK_DGRAM
;
751 target_type
= TARGET_SOCK_STREAM
;
754 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
758 #if defined(SOCK_CLOEXEC)
759 if (host_type
& SOCK_CLOEXEC
) {
760 target_type
|= TARGET_SOCK_CLOEXEC
;
764 #if defined(SOCK_NONBLOCK)
765 if (host_type
& SOCK_NONBLOCK
) {
766 target_type
|= TARGET_SOCK_NONBLOCK
;
773 static abi_ulong target_brk
;
774 static abi_ulong target_original_brk
;
775 static abi_ulong brk_page
;
777 void target_set_brk(abi_ulong new_brk
)
779 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
780 brk_page
= HOST_PAGE_ALIGN(target_brk
);
783 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
784 #define DEBUGF_BRK(message, args...)
786 /* do_brk() must return target values and target errnos. */
787 abi_long
do_brk(abi_ulong new_brk
)
789 abi_long mapped_addr
;
790 abi_ulong new_alloc_size
;
792 /* brk pointers are always untagged */
794 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
797 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
800 if (new_brk
< target_original_brk
) {
801 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
806 /* If the new brk is less than the highest page reserved to the
807 * target heap allocation, set it and we're almost done... */
808 if (new_brk
<= brk_page
) {
809 /* Heap contents are initialized to zero, as for anonymous
811 if (new_brk
> target_brk
) {
812 memset(g2h_untagged(target_brk
), 0, new_brk
- target_brk
);
814 target_brk
= new_brk
;
815 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
819 /* We need to allocate more memory after the brk... Note that
820 * we don't use MAP_FIXED because that will map over the top of
821 * any existing mapping (like the one with the host libc or qemu
822 * itself); instead we treat "mapped but at wrong address" as
823 * a failure and unmap again.
825 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
826 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
827 PROT_READ
|PROT_WRITE
,
828 MAP_ANON
|MAP_PRIVATE
, 0, 0));
830 if (mapped_addr
== brk_page
) {
831 /* Heap contents are initialized to zero, as for anonymous
832 * mapped pages. Technically the new pages are already
833 * initialized to zero since they *are* anonymous mapped
834 * pages, however we have to take care with the contents that
835 * come from the remaining part of the previous page: it may
836 * contains garbage data due to a previous heap usage (grown
838 memset(g2h_untagged(target_brk
), 0, brk_page
- target_brk
);
840 target_brk
= new_brk
;
841 brk_page
= HOST_PAGE_ALIGN(target_brk
);
842 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
845 } else if (mapped_addr
!= -1) {
846 /* Mapped but at wrong address, meaning there wasn't actually
847 * enough space for this brk.
849 target_munmap(mapped_addr
, new_alloc_size
);
851 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
854 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
857 #if defined(TARGET_ALPHA)
858 /* We (partially) emulate OSF/1 on Alpha, which requires we
859 return a proper errno, not an unchanged brk value. */
860 return -TARGET_ENOMEM
;
862 /* For everything else, return the previous break. */
866 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
867 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
868 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
869 abi_ulong target_fds_addr
,
873 abi_ulong b
, *target_fds
;
875 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
876 if (!(target_fds
= lock_user(VERIFY_READ
,
878 sizeof(abi_ulong
) * nw
,
880 return -TARGET_EFAULT
;
884 for (i
= 0; i
< nw
; i
++) {
885 /* grab the abi_ulong */
886 __get_user(b
, &target_fds
[i
]);
887 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
888 /* check the bit inside the abi_ulong */
895 unlock_user(target_fds
, target_fds_addr
, 0);
900 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
901 abi_ulong target_fds_addr
,
904 if (target_fds_addr
) {
905 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
906 return -TARGET_EFAULT
;
914 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
920 abi_ulong
*target_fds
;
922 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
923 if (!(target_fds
= lock_user(VERIFY_WRITE
,
925 sizeof(abi_ulong
) * nw
,
927 return -TARGET_EFAULT
;
930 for (i
= 0; i
< nw
; i
++) {
932 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
933 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
936 __put_user(v
, &target_fds
[i
]);
939 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
945 #if defined(__alpha__)
951 static inline abi_long
host_to_target_clock_t(long ticks
)
953 #if HOST_HZ == TARGET_HZ
956 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
960 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
961 const struct rusage
*rusage
)
963 struct target_rusage
*target_rusage
;
965 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
966 return -TARGET_EFAULT
;
967 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
968 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
969 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
970 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
971 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
972 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
973 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
974 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
975 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
976 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
977 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
978 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
979 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
980 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
981 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
982 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
983 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
984 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
985 unlock_user_struct(target_rusage
, target_addr
, 1);
990 #ifdef TARGET_NR_setrlimit
991 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
993 abi_ulong target_rlim_swap
;
996 target_rlim_swap
= tswapal(target_rlim
);
997 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
998 return RLIM_INFINITY
;
1000 result
= target_rlim_swap
;
1001 if (target_rlim_swap
!= (rlim_t
)result
)
1002 return RLIM_INFINITY
;
1008 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1009 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1011 abi_ulong target_rlim_swap
;
1014 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1015 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1017 target_rlim_swap
= rlim
;
1018 result
= tswapal(target_rlim_swap
);
1024 static inline int target_to_host_resource(int code
)
1027 case TARGET_RLIMIT_AS
:
1029 case TARGET_RLIMIT_CORE
:
1031 case TARGET_RLIMIT_CPU
:
1033 case TARGET_RLIMIT_DATA
:
1035 case TARGET_RLIMIT_FSIZE
:
1036 return RLIMIT_FSIZE
;
1037 case TARGET_RLIMIT_LOCKS
:
1038 return RLIMIT_LOCKS
;
1039 case TARGET_RLIMIT_MEMLOCK
:
1040 return RLIMIT_MEMLOCK
;
1041 case TARGET_RLIMIT_MSGQUEUE
:
1042 return RLIMIT_MSGQUEUE
;
1043 case TARGET_RLIMIT_NICE
:
1045 case TARGET_RLIMIT_NOFILE
:
1046 return RLIMIT_NOFILE
;
1047 case TARGET_RLIMIT_NPROC
:
1048 return RLIMIT_NPROC
;
1049 case TARGET_RLIMIT_RSS
:
1051 case TARGET_RLIMIT_RTPRIO
:
1052 return RLIMIT_RTPRIO
;
1053 #ifdef RLIMIT_RTTIME
1054 case TARGET_RLIMIT_RTTIME
:
1055 return RLIMIT_RTTIME
;
1057 case TARGET_RLIMIT_SIGPENDING
:
1058 return RLIMIT_SIGPENDING
;
1059 case TARGET_RLIMIT_STACK
:
1060 return RLIMIT_STACK
;
1066 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1067 abi_ulong target_tv_addr
)
1069 struct target_timeval
*target_tv
;
1071 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1072 return -TARGET_EFAULT
;
1075 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1076 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1078 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1083 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1084 const struct timeval
*tv
)
1086 struct target_timeval
*target_tv
;
1088 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1089 return -TARGET_EFAULT
;
1092 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1093 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1095 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1100 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1101 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1102 abi_ulong target_tv_addr
)
1104 struct target__kernel_sock_timeval
*target_tv
;
1106 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1107 return -TARGET_EFAULT
;
1110 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1111 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1113 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1119 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1120 const struct timeval
*tv
)
1122 struct target__kernel_sock_timeval
*target_tv
;
1124 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1125 return -TARGET_EFAULT
;
1128 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1129 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1131 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1136 #if defined(TARGET_NR_futex) || \
1137 defined(TARGET_NR_rt_sigtimedwait) || \
1138 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1139 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1140 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1141 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1142 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1143 defined(TARGET_NR_timer_settime) || \
1144 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1145 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1146 abi_ulong target_addr
)
1148 struct target_timespec
*target_ts
;
1150 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1151 return -TARGET_EFAULT
;
1153 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1154 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1155 unlock_user_struct(target_ts
, target_addr
, 0);
1160 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1161 defined(TARGET_NR_timer_settime64) || \
1162 defined(TARGET_NR_mq_timedsend_time64) || \
1163 defined(TARGET_NR_mq_timedreceive_time64) || \
1164 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1165 defined(TARGET_NR_clock_nanosleep_time64) || \
1166 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1167 defined(TARGET_NR_utimensat) || \
1168 defined(TARGET_NR_utimensat_time64) || \
1169 defined(TARGET_NR_semtimedop_time64) || \
1170 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1171 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1172 abi_ulong target_addr
)
1174 struct target__kernel_timespec
*target_ts
;
1176 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1177 return -TARGET_EFAULT
;
1179 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1180 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1181 /* in 32bit mode, this drops the padding */
1182 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1183 unlock_user_struct(target_ts
, target_addr
, 0);
1188 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1189 struct timespec
*host_ts
)
1191 struct target_timespec
*target_ts
;
1193 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1194 return -TARGET_EFAULT
;
1196 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1197 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1198 unlock_user_struct(target_ts
, target_addr
, 1);
1202 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1203 struct timespec
*host_ts
)
1205 struct target__kernel_timespec
*target_ts
;
1207 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1208 return -TARGET_EFAULT
;
1210 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1211 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1212 unlock_user_struct(target_ts
, target_addr
, 1);
1216 #if defined(TARGET_NR_gettimeofday)
1217 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1218 struct timezone
*tz
)
1220 struct target_timezone
*target_tz
;
1222 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1223 return -TARGET_EFAULT
;
1226 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1227 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1229 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1235 #if defined(TARGET_NR_settimeofday)
1236 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1237 abi_ulong target_tz_addr
)
1239 struct target_timezone
*target_tz
;
1241 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1242 return -TARGET_EFAULT
;
1245 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1246 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1248 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1254 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1257 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1258 abi_ulong target_mq_attr_addr
)
1260 struct target_mq_attr
*target_mq_attr
;
1262 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1263 target_mq_attr_addr
, 1))
1264 return -TARGET_EFAULT
;
1266 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1267 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1268 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1269 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1271 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1276 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1277 const struct mq_attr
*attr
)
1279 struct target_mq_attr
*target_mq_attr
;
1281 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1282 target_mq_attr_addr
, 0))
1283 return -TARGET_EFAULT
;
1285 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1286 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1287 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1288 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1290 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1296 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1297 /* do_select() must return target values and target errnos. */
1298 static abi_long
do_select(int n
,
1299 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1300 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1302 fd_set rfds
, wfds
, efds
;
1303 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1305 struct timespec ts
, *ts_ptr
;
1308 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1312 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1316 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1321 if (target_tv_addr
) {
1322 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1323 return -TARGET_EFAULT
;
1324 ts
.tv_sec
= tv
.tv_sec
;
1325 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1331 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1334 if (!is_error(ret
)) {
1335 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1336 return -TARGET_EFAULT
;
1337 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1338 return -TARGET_EFAULT
;
1339 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1340 return -TARGET_EFAULT
;
1342 if (target_tv_addr
) {
1343 tv
.tv_sec
= ts
.tv_sec
;
1344 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1345 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1346 return -TARGET_EFAULT
;
1354 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1355 static abi_long
do_old_select(abi_ulong arg1
)
1357 struct target_sel_arg_struct
*sel
;
1358 abi_ulong inp
, outp
, exp
, tvp
;
1361 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1362 return -TARGET_EFAULT
;
1365 nsel
= tswapal(sel
->n
);
1366 inp
= tswapal(sel
->inp
);
1367 outp
= tswapal(sel
->outp
);
1368 exp
= tswapal(sel
->exp
);
1369 tvp
= tswapal(sel
->tvp
);
1371 unlock_user_struct(sel
, arg1
, 0);
1373 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1378 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1379 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1380 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1383 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1384 fd_set rfds
, wfds
, efds
;
1385 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1386 struct timespec ts
, *ts_ptr
;
1390 * The 6th arg is actually two args smashed together,
1391 * so we cannot use the C library.
1398 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1406 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1410 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1414 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1420 * This takes a timespec, and not a timeval, so we cannot
1421 * use the do_select() helper ...
1425 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1426 return -TARGET_EFAULT
;
1429 if (target_to_host_timespec(&ts
, ts_addr
)) {
1430 return -TARGET_EFAULT
;
1438 /* Extract the two packed args for the sigset */
1441 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1443 return -TARGET_EFAULT
;
1445 arg_sigset
= tswapal(arg7
[0]);
1446 arg_sigsize
= tswapal(arg7
[1]);
1447 unlock_user(arg7
, arg6
, 0);
1450 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1455 sig
.size
= SIGSET_T_SIZE
;
1459 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1463 finish_sigsuspend_mask(ret
);
1466 if (!is_error(ret
)) {
1467 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1468 return -TARGET_EFAULT
;
1470 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1471 return -TARGET_EFAULT
;
1473 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1474 return -TARGET_EFAULT
;
1477 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1478 return -TARGET_EFAULT
;
1481 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1482 return -TARGET_EFAULT
;
1490 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1491 defined(TARGET_NR_ppoll_time64)
1492 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1493 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1495 struct target_pollfd
*target_pfd
;
1496 unsigned int nfds
= arg2
;
1504 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1505 return -TARGET_EINVAL
;
1507 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1508 sizeof(struct target_pollfd
) * nfds
, 1);
1510 return -TARGET_EFAULT
;
1513 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1514 for (i
= 0; i
< nfds
; i
++) {
1515 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1516 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1520 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1521 sigset_t
*set
= NULL
;
1525 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1526 unlock_user(target_pfd
, arg1
, 0);
1527 return -TARGET_EFAULT
;
1530 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1531 unlock_user(target_pfd
, arg1
, 0);
1532 return -TARGET_EFAULT
;
1540 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1542 unlock_user(target_pfd
, arg1
, 0);
1547 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1548 set
, SIGSET_T_SIZE
));
1551 finish_sigsuspend_mask(ret
);
1553 if (!is_error(ret
) && arg3
) {
1555 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1556 return -TARGET_EFAULT
;
1559 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1560 return -TARGET_EFAULT
;
1565 struct timespec ts
, *pts
;
1568 /* Convert ms to secs, ns */
1569 ts
.tv_sec
= arg3
/ 1000;
1570 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1573 /* -ve poll() timeout means "infinite" */
1576 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1579 if (!is_error(ret
)) {
1580 for (i
= 0; i
< nfds
; i
++) {
1581 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1584 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1589 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1592 return pipe2(host_pipe
, flags
);
1598 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1599 int flags
, int is_pipe2
)
1603 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1606 return get_errno(ret
);
1608 /* Several targets have special calling conventions for the original
1609 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1611 #if defined(TARGET_ALPHA)
1612 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1613 return host_pipe
[0];
1614 #elif defined(TARGET_MIPS)
1615 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1616 return host_pipe
[0];
1617 #elif defined(TARGET_SH4)
1618 cpu_env
->gregs
[1] = host_pipe
[1];
1619 return host_pipe
[0];
1620 #elif defined(TARGET_SPARC)
1621 cpu_env
->regwptr
[1] = host_pipe
[1];
1622 return host_pipe
[0];
1626 if (put_user_s32(host_pipe
[0], pipedes
)
1627 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1628 return -TARGET_EFAULT
;
1629 return get_errno(ret
);
1632 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1633 abi_ulong target_addr
,
1636 struct target_ip_mreqn
*target_smreqn
;
1638 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1640 return -TARGET_EFAULT
;
1641 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1642 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1643 if (len
== sizeof(struct target_ip_mreqn
))
1644 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1645 unlock_user(target_smreqn
, target_addr
, 0);
1650 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1651 abi_ulong target_addr
,
1654 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1655 sa_family_t sa_family
;
1656 struct target_sockaddr
*target_saddr
;
1658 if (fd_trans_target_to_host_addr(fd
)) {
1659 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1662 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1664 return -TARGET_EFAULT
;
1666 sa_family
= tswap16(target_saddr
->sa_family
);
1668 /* Oops. The caller might send a incomplete sun_path; sun_path
1669 * must be terminated by \0 (see the manual page), but
1670 * unfortunately it is quite common to specify sockaddr_un
1671 * length as "strlen(x->sun_path)" while it should be
1672 * "strlen(...) + 1". We'll fix that here if needed.
1673 * Linux kernel has a similar feature.
1676 if (sa_family
== AF_UNIX
) {
1677 if (len
< unix_maxlen
&& len
> 0) {
1678 char *cp
= (char*)target_saddr
;
1680 if ( cp
[len
-1] && !cp
[len
] )
1683 if (len
> unix_maxlen
)
1687 memcpy(addr
, target_saddr
, len
);
1688 addr
->sa_family
= sa_family
;
1689 if (sa_family
== AF_NETLINK
) {
1690 struct sockaddr_nl
*nladdr
;
1692 nladdr
= (struct sockaddr_nl
*)addr
;
1693 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1694 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1695 } else if (sa_family
== AF_PACKET
) {
1696 struct target_sockaddr_ll
*lladdr
;
1698 lladdr
= (struct target_sockaddr_ll
*)addr
;
1699 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1700 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1702 unlock_user(target_saddr
, target_addr
, 0);
1707 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1708 struct sockaddr
*addr
,
1711 struct target_sockaddr
*target_saddr
;
1718 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1720 return -TARGET_EFAULT
;
1721 memcpy(target_saddr
, addr
, len
);
1722 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1723 sizeof(target_saddr
->sa_family
)) {
1724 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1726 if (addr
->sa_family
== AF_NETLINK
&&
1727 len
>= sizeof(struct target_sockaddr_nl
)) {
1728 struct target_sockaddr_nl
*target_nl
=
1729 (struct target_sockaddr_nl
*)target_saddr
;
1730 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1731 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1732 } else if (addr
->sa_family
== AF_PACKET
) {
1733 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1734 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1735 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1736 } else if (addr
->sa_family
== AF_INET6
&&
1737 len
>= sizeof(struct target_sockaddr_in6
)) {
1738 struct target_sockaddr_in6
*target_in6
=
1739 (struct target_sockaddr_in6
*)target_saddr
;
1740 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1742 unlock_user(target_saddr
, target_addr
, len
);
1747 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1748 struct target_msghdr
*target_msgh
)
1750 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1751 abi_long msg_controllen
;
1752 abi_ulong target_cmsg_addr
;
1753 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1754 socklen_t space
= 0;
1756 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1757 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1759 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1760 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1761 target_cmsg_start
= target_cmsg
;
1763 return -TARGET_EFAULT
;
1765 while (cmsg
&& target_cmsg
) {
1766 void *data
= CMSG_DATA(cmsg
);
1767 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1769 int len
= tswapal(target_cmsg
->cmsg_len
)
1770 - sizeof(struct target_cmsghdr
);
1772 space
+= CMSG_SPACE(len
);
1773 if (space
> msgh
->msg_controllen
) {
1774 space
-= CMSG_SPACE(len
);
1775 /* This is a QEMU bug, since we allocated the payload
1776 * area ourselves (unlike overflow in host-to-target
1777 * conversion, which is just the guest giving us a buffer
1778 * that's too small). It can't happen for the payload types
1779 * we currently support; if it becomes an issue in future
1780 * we would need to improve our allocation strategy to
1781 * something more intelligent than "twice the size of the
1782 * target buffer we're reading from".
1784 qemu_log_mask(LOG_UNIMP
,
1785 ("Unsupported ancillary data %d/%d: "
1786 "unhandled msg size\n"),
1787 tswap32(target_cmsg
->cmsg_level
),
1788 tswap32(target_cmsg
->cmsg_type
));
1792 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1793 cmsg
->cmsg_level
= SOL_SOCKET
;
1795 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1797 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1798 cmsg
->cmsg_len
= CMSG_LEN(len
);
1800 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1801 int *fd
= (int *)data
;
1802 int *target_fd
= (int *)target_data
;
1803 int i
, numfds
= len
/ sizeof(int);
1805 for (i
= 0; i
< numfds
; i
++) {
1806 __get_user(fd
[i
], target_fd
+ i
);
1808 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1809 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1810 struct ucred
*cred
= (struct ucred
*)data
;
1811 struct target_ucred
*target_cred
=
1812 (struct target_ucred
*)target_data
;
1814 __get_user(cred
->pid
, &target_cred
->pid
);
1815 __get_user(cred
->uid
, &target_cred
->uid
);
1816 __get_user(cred
->gid
, &target_cred
->gid
);
1818 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1819 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1820 memcpy(data
, target_data
, len
);
1823 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1824 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1827 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1829 msgh
->msg_controllen
= space
;
1833 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1834 struct msghdr
*msgh
)
1836 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1837 abi_long msg_controllen
;
1838 abi_ulong target_cmsg_addr
;
1839 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1840 socklen_t space
= 0;
1842 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1843 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1845 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1846 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1847 target_cmsg_start
= target_cmsg
;
1849 return -TARGET_EFAULT
;
1851 while (cmsg
&& target_cmsg
) {
1852 void *data
= CMSG_DATA(cmsg
);
1853 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1855 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1856 int tgt_len
, tgt_space
;
1858 /* We never copy a half-header but may copy half-data;
1859 * this is Linux's behaviour in put_cmsg(). Note that
1860 * truncation here is a guest problem (which we report
1861 * to the guest via the CTRUNC bit), unlike truncation
1862 * in target_to_host_cmsg, which is a QEMU bug.
1864 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1865 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1869 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1870 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1872 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1874 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1876 /* Payload types which need a different size of payload on
1877 * the target must adjust tgt_len here.
1880 switch (cmsg
->cmsg_level
) {
1882 switch (cmsg
->cmsg_type
) {
1884 tgt_len
= sizeof(struct target_timeval
);
1894 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1895 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1896 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1899 /* We must now copy-and-convert len bytes of payload
1900 * into tgt_len bytes of destination space. Bear in mind
1901 * that in both source and destination we may be dealing
1902 * with a truncated value!
1904 switch (cmsg
->cmsg_level
) {
1906 switch (cmsg
->cmsg_type
) {
1909 int *fd
= (int *)data
;
1910 int *target_fd
= (int *)target_data
;
1911 int i
, numfds
= tgt_len
/ sizeof(int);
1913 for (i
= 0; i
< numfds
; i
++) {
1914 __put_user(fd
[i
], target_fd
+ i
);
1920 struct timeval
*tv
= (struct timeval
*)data
;
1921 struct target_timeval
*target_tv
=
1922 (struct target_timeval
*)target_data
;
1924 if (len
!= sizeof(struct timeval
) ||
1925 tgt_len
!= sizeof(struct target_timeval
)) {
1929 /* copy struct timeval to target */
1930 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1931 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1934 case SCM_CREDENTIALS
:
1936 struct ucred
*cred
= (struct ucred
*)data
;
1937 struct target_ucred
*target_cred
=
1938 (struct target_ucred
*)target_data
;
1940 __put_user(cred
->pid
, &target_cred
->pid
);
1941 __put_user(cred
->uid
, &target_cred
->uid
);
1942 __put_user(cred
->gid
, &target_cred
->gid
);
1951 switch (cmsg
->cmsg_type
) {
1954 uint32_t *v
= (uint32_t *)data
;
1955 uint32_t *t_int
= (uint32_t *)target_data
;
1957 if (len
!= sizeof(uint32_t) ||
1958 tgt_len
!= sizeof(uint32_t)) {
1961 __put_user(*v
, t_int
);
1967 struct sock_extended_err ee
;
1968 struct sockaddr_in offender
;
1970 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1971 struct errhdr_t
*target_errh
=
1972 (struct errhdr_t
*)target_data
;
1974 if (len
!= sizeof(struct errhdr_t
) ||
1975 tgt_len
!= sizeof(struct errhdr_t
)) {
1978 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1979 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1980 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1981 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1982 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1983 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1984 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1985 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1986 (void *) &errh
->offender
, sizeof(errh
->offender
));
1995 switch (cmsg
->cmsg_type
) {
1998 uint32_t *v
= (uint32_t *)data
;
1999 uint32_t *t_int
= (uint32_t *)target_data
;
2001 if (len
!= sizeof(uint32_t) ||
2002 tgt_len
!= sizeof(uint32_t)) {
2005 __put_user(*v
, t_int
);
2011 struct sock_extended_err ee
;
2012 struct sockaddr_in6 offender
;
2014 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2015 struct errhdr6_t
*target_errh
=
2016 (struct errhdr6_t
*)target_data
;
2018 if (len
!= sizeof(struct errhdr6_t
) ||
2019 tgt_len
!= sizeof(struct errhdr6_t
)) {
2022 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2023 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2024 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2025 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2026 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2027 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2028 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2029 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2030 (void *) &errh
->offender
, sizeof(errh
->offender
));
2040 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2041 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2042 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2043 if (tgt_len
> len
) {
2044 memset(target_data
+ len
, 0, tgt_len
- len
);
2048 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2049 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2050 if (msg_controllen
< tgt_space
) {
2051 tgt_space
= msg_controllen
;
2053 msg_controllen
-= tgt_space
;
2055 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2056 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2059 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2061 target_msgh
->msg_controllen
= tswapal(space
);
2065 /* do_setsockopt() Must return target values and target errnos. */
2066 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2067 abi_ulong optval_addr
, socklen_t optlen
)
2071 struct ip_mreqn
*ip_mreq
;
2072 struct ip_mreq_source
*ip_mreq_source
;
2077 /* TCP and UDP options all take an 'int' value. */
2078 if (optlen
< sizeof(uint32_t))
2079 return -TARGET_EINVAL
;
2081 if (get_user_u32(val
, optval_addr
))
2082 return -TARGET_EFAULT
;
2083 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2090 case IP_ROUTER_ALERT
:
2094 case IP_MTU_DISCOVER
:
2101 case IP_MULTICAST_TTL
:
2102 case IP_MULTICAST_LOOP
:
2104 if (optlen
>= sizeof(uint32_t)) {
2105 if (get_user_u32(val
, optval_addr
))
2106 return -TARGET_EFAULT
;
2107 } else if (optlen
>= 1) {
2108 if (get_user_u8(val
, optval_addr
))
2109 return -TARGET_EFAULT
;
2111 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2113 case IP_ADD_MEMBERSHIP
:
2114 case IP_DROP_MEMBERSHIP
:
2115 if (optlen
< sizeof (struct target_ip_mreq
) ||
2116 optlen
> sizeof (struct target_ip_mreqn
))
2117 return -TARGET_EINVAL
;
2119 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2120 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2121 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2124 case IP_BLOCK_SOURCE
:
2125 case IP_UNBLOCK_SOURCE
:
2126 case IP_ADD_SOURCE_MEMBERSHIP
:
2127 case IP_DROP_SOURCE_MEMBERSHIP
:
2128 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2129 return -TARGET_EINVAL
;
2131 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2132 if (!ip_mreq_source
) {
2133 return -TARGET_EFAULT
;
2135 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2136 unlock_user (ip_mreq_source
, optval_addr
, 0);
2145 case IPV6_MTU_DISCOVER
:
2148 case IPV6_RECVPKTINFO
:
2149 case IPV6_UNICAST_HOPS
:
2150 case IPV6_MULTICAST_HOPS
:
2151 case IPV6_MULTICAST_LOOP
:
2153 case IPV6_RECVHOPLIMIT
:
2154 case IPV6_2292HOPLIMIT
:
2157 case IPV6_2292PKTINFO
:
2158 case IPV6_RECVTCLASS
:
2159 case IPV6_RECVRTHDR
:
2160 case IPV6_2292RTHDR
:
2161 case IPV6_RECVHOPOPTS
:
2162 case IPV6_2292HOPOPTS
:
2163 case IPV6_RECVDSTOPTS
:
2164 case IPV6_2292DSTOPTS
:
2166 case IPV6_ADDR_PREFERENCES
:
2167 #ifdef IPV6_RECVPATHMTU
2168 case IPV6_RECVPATHMTU
:
2170 #ifdef IPV6_TRANSPARENT
2171 case IPV6_TRANSPARENT
:
2173 #ifdef IPV6_FREEBIND
2176 #ifdef IPV6_RECVORIGDSTADDR
2177 case IPV6_RECVORIGDSTADDR
:
2180 if (optlen
< sizeof(uint32_t)) {
2181 return -TARGET_EINVAL
;
2183 if (get_user_u32(val
, optval_addr
)) {
2184 return -TARGET_EFAULT
;
2186 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2187 &val
, sizeof(val
)));
2191 struct in6_pktinfo pki
;
2193 if (optlen
< sizeof(pki
)) {
2194 return -TARGET_EINVAL
;
2197 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2198 return -TARGET_EFAULT
;
2201 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2203 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2204 &pki
, sizeof(pki
)));
2207 case IPV6_ADD_MEMBERSHIP
:
2208 case IPV6_DROP_MEMBERSHIP
:
2210 struct ipv6_mreq ipv6mreq
;
2212 if (optlen
< sizeof(ipv6mreq
)) {
2213 return -TARGET_EINVAL
;
2216 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2217 return -TARGET_EFAULT
;
2220 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2222 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2223 &ipv6mreq
, sizeof(ipv6mreq
)));
2234 struct icmp6_filter icmp6f
;
2236 if (optlen
> sizeof(icmp6f
)) {
2237 optlen
= sizeof(icmp6f
);
2240 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2241 return -TARGET_EFAULT
;
2244 for (val
= 0; val
< 8; val
++) {
2245 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2248 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2260 /* those take an u32 value */
2261 if (optlen
< sizeof(uint32_t)) {
2262 return -TARGET_EINVAL
;
2265 if (get_user_u32(val
, optval_addr
)) {
2266 return -TARGET_EFAULT
;
2268 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2269 &val
, sizeof(val
)));
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2281 char *alg_key
= g_malloc(optlen
);
2284 return -TARGET_ENOMEM
;
2286 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2288 return -TARGET_EFAULT
;
2290 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2295 case ALG_SET_AEAD_AUTHSIZE
:
2297 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2306 case TARGET_SOL_SOCKET
:
2308 case TARGET_SO_RCVTIMEO
:
2312 optname
= SO_RCVTIMEO
;
2315 if (optlen
!= sizeof(struct target_timeval
)) {
2316 return -TARGET_EINVAL
;
2319 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2320 return -TARGET_EFAULT
;
2323 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2327 case TARGET_SO_SNDTIMEO
:
2328 optname
= SO_SNDTIMEO
;
2330 case TARGET_SO_ATTACH_FILTER
:
2332 struct target_sock_fprog
*tfprog
;
2333 struct target_sock_filter
*tfilter
;
2334 struct sock_fprog fprog
;
2335 struct sock_filter
*filter
;
2338 if (optlen
!= sizeof(*tfprog
)) {
2339 return -TARGET_EINVAL
;
2341 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2342 return -TARGET_EFAULT
;
2344 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2345 tswapal(tfprog
->filter
), 0)) {
2346 unlock_user_struct(tfprog
, optval_addr
, 1);
2347 return -TARGET_EFAULT
;
2350 fprog
.len
= tswap16(tfprog
->len
);
2351 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2352 if (filter
== NULL
) {
2353 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2354 unlock_user_struct(tfprog
, optval_addr
, 1);
2355 return -TARGET_ENOMEM
;
2357 for (i
= 0; i
< fprog
.len
; i
++) {
2358 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2359 filter
[i
].jt
= tfilter
[i
].jt
;
2360 filter
[i
].jf
= tfilter
[i
].jf
;
2361 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2363 fprog
.filter
= filter
;
2365 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2366 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2369 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2370 unlock_user_struct(tfprog
, optval_addr
, 1);
2373 case TARGET_SO_BINDTODEVICE
:
2375 char *dev_ifname
, *addr_ifname
;
2377 if (optlen
> IFNAMSIZ
- 1) {
2378 optlen
= IFNAMSIZ
- 1;
2380 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2382 return -TARGET_EFAULT
;
2384 optname
= SO_BINDTODEVICE
;
2385 addr_ifname
= alloca(IFNAMSIZ
);
2386 memcpy(addr_ifname
, dev_ifname
, optlen
);
2387 addr_ifname
[optlen
] = 0;
2388 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2389 addr_ifname
, optlen
));
2390 unlock_user (dev_ifname
, optval_addr
, 0);
2393 case TARGET_SO_LINGER
:
2396 struct target_linger
*tlg
;
2398 if (optlen
!= sizeof(struct target_linger
)) {
2399 return -TARGET_EINVAL
;
2401 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2402 return -TARGET_EFAULT
;
2404 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2405 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2406 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2408 unlock_user_struct(tlg
, optval_addr
, 0);
2411 /* Options with 'int' argument. */
2412 case TARGET_SO_DEBUG
:
2415 case TARGET_SO_REUSEADDR
:
2416 optname
= SO_REUSEADDR
;
2419 case TARGET_SO_REUSEPORT
:
2420 optname
= SO_REUSEPORT
;
2423 case TARGET_SO_TYPE
:
2426 case TARGET_SO_ERROR
:
2429 case TARGET_SO_DONTROUTE
:
2430 optname
= SO_DONTROUTE
;
2432 case TARGET_SO_BROADCAST
:
2433 optname
= SO_BROADCAST
;
2435 case TARGET_SO_SNDBUF
:
2436 optname
= SO_SNDBUF
;
2438 case TARGET_SO_SNDBUFFORCE
:
2439 optname
= SO_SNDBUFFORCE
;
2441 case TARGET_SO_RCVBUF
:
2442 optname
= SO_RCVBUF
;
2444 case TARGET_SO_RCVBUFFORCE
:
2445 optname
= SO_RCVBUFFORCE
;
2447 case TARGET_SO_KEEPALIVE
:
2448 optname
= SO_KEEPALIVE
;
2450 case TARGET_SO_OOBINLINE
:
2451 optname
= SO_OOBINLINE
;
2453 case TARGET_SO_NO_CHECK
:
2454 optname
= SO_NO_CHECK
;
2456 case TARGET_SO_PRIORITY
:
2457 optname
= SO_PRIORITY
;
2460 case TARGET_SO_BSDCOMPAT
:
2461 optname
= SO_BSDCOMPAT
;
2464 case TARGET_SO_PASSCRED
:
2465 optname
= SO_PASSCRED
;
2467 case TARGET_SO_PASSSEC
:
2468 optname
= SO_PASSSEC
;
2470 case TARGET_SO_TIMESTAMP
:
2471 optname
= SO_TIMESTAMP
;
2473 case TARGET_SO_RCVLOWAT
:
2474 optname
= SO_RCVLOWAT
;
2479 if (optlen
< sizeof(uint32_t))
2480 return -TARGET_EINVAL
;
2482 if (get_user_u32(val
, optval_addr
))
2483 return -TARGET_EFAULT
;
2484 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2489 case NETLINK_PKTINFO
:
2490 case NETLINK_ADD_MEMBERSHIP
:
2491 case NETLINK_DROP_MEMBERSHIP
:
2492 case NETLINK_BROADCAST_ERROR
:
2493 case NETLINK_NO_ENOBUFS
:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495 case NETLINK_LISTEN_ALL_NSID
:
2496 case NETLINK_CAP_ACK
:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499 case NETLINK_EXT_ACK
:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502 case NETLINK_GET_STRICT_CHK
:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2509 if (optlen
< sizeof(uint32_t)) {
2510 return -TARGET_EINVAL
;
2512 if (get_user_u32(val
, optval_addr
)) {
2513 return -TARGET_EFAULT
;
2515 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2518 #endif /* SOL_NETLINK */
2521 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2523 ret
= -TARGET_ENOPROTOOPT
;
2528 /* do_getsockopt() Must return target values and target errnos. */
2529 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2530 abi_ulong optval_addr
, abi_ulong optlen
)
2537 case TARGET_SOL_SOCKET
:
2540 /* These don't just return a single integer */
2541 case TARGET_SO_PEERNAME
:
2543 case TARGET_SO_RCVTIMEO
: {
2547 optname
= SO_RCVTIMEO
;
2550 if (get_user_u32(len
, optlen
)) {
2551 return -TARGET_EFAULT
;
2554 return -TARGET_EINVAL
;
2558 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2563 if (len
> sizeof(struct target_timeval
)) {
2564 len
= sizeof(struct target_timeval
);
2566 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2567 return -TARGET_EFAULT
;
2569 if (put_user_u32(len
, optlen
)) {
2570 return -TARGET_EFAULT
;
2574 case TARGET_SO_SNDTIMEO
:
2575 optname
= SO_SNDTIMEO
;
2577 case TARGET_SO_PEERCRED
: {
2580 struct target_ucred
*tcr
;
2582 if (get_user_u32(len
, optlen
)) {
2583 return -TARGET_EFAULT
;
2586 return -TARGET_EINVAL
;
2590 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2598 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2599 return -TARGET_EFAULT
;
2601 __put_user(cr
.pid
, &tcr
->pid
);
2602 __put_user(cr
.uid
, &tcr
->uid
);
2603 __put_user(cr
.gid
, &tcr
->gid
);
2604 unlock_user_struct(tcr
, optval_addr
, 1);
2605 if (put_user_u32(len
, optlen
)) {
2606 return -TARGET_EFAULT
;
2610 case TARGET_SO_PEERSEC
: {
2613 if (get_user_u32(len
, optlen
)) {
2614 return -TARGET_EFAULT
;
2617 return -TARGET_EINVAL
;
2619 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2621 return -TARGET_EFAULT
;
2624 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2626 if (put_user_u32(lv
, optlen
)) {
2627 ret
= -TARGET_EFAULT
;
2629 unlock_user(name
, optval_addr
, lv
);
2632 case TARGET_SO_LINGER
:
2636 struct target_linger
*tlg
;
2638 if (get_user_u32(len
, optlen
)) {
2639 return -TARGET_EFAULT
;
2642 return -TARGET_EINVAL
;
2646 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2654 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2655 return -TARGET_EFAULT
;
2657 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2658 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2659 unlock_user_struct(tlg
, optval_addr
, 1);
2660 if (put_user_u32(len
, optlen
)) {
2661 return -TARGET_EFAULT
;
2665 /* Options with 'int' argument. */
2666 case TARGET_SO_DEBUG
:
2669 case TARGET_SO_REUSEADDR
:
2670 optname
= SO_REUSEADDR
;
2673 case TARGET_SO_REUSEPORT
:
2674 optname
= SO_REUSEPORT
;
2677 case TARGET_SO_TYPE
:
2680 case TARGET_SO_ERROR
:
2683 case TARGET_SO_DONTROUTE
:
2684 optname
= SO_DONTROUTE
;
2686 case TARGET_SO_BROADCAST
:
2687 optname
= SO_BROADCAST
;
2689 case TARGET_SO_SNDBUF
:
2690 optname
= SO_SNDBUF
;
2692 case TARGET_SO_RCVBUF
:
2693 optname
= SO_RCVBUF
;
2695 case TARGET_SO_KEEPALIVE
:
2696 optname
= SO_KEEPALIVE
;
2698 case TARGET_SO_OOBINLINE
:
2699 optname
= SO_OOBINLINE
;
2701 case TARGET_SO_NO_CHECK
:
2702 optname
= SO_NO_CHECK
;
2704 case TARGET_SO_PRIORITY
:
2705 optname
= SO_PRIORITY
;
2708 case TARGET_SO_BSDCOMPAT
:
2709 optname
= SO_BSDCOMPAT
;
2712 case TARGET_SO_PASSCRED
:
2713 optname
= SO_PASSCRED
;
2715 case TARGET_SO_TIMESTAMP
:
2716 optname
= SO_TIMESTAMP
;
2718 case TARGET_SO_RCVLOWAT
:
2719 optname
= SO_RCVLOWAT
;
2721 case TARGET_SO_ACCEPTCONN
:
2722 optname
= SO_ACCEPTCONN
;
2724 case TARGET_SO_PROTOCOL
:
2725 optname
= SO_PROTOCOL
;
2727 case TARGET_SO_DOMAIN
:
2728 optname
= SO_DOMAIN
;
2736 /* TCP and UDP options all take an 'int' value. */
2738 if (get_user_u32(len
, optlen
))
2739 return -TARGET_EFAULT
;
2741 return -TARGET_EINVAL
;
2743 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2746 if (optname
== SO_TYPE
) {
2747 val
= host_to_target_sock_type(val
);
2752 if (put_user_u32(val
, optval_addr
))
2753 return -TARGET_EFAULT
;
2755 if (put_user_u8(val
, optval_addr
))
2756 return -TARGET_EFAULT
;
2758 if (put_user_u32(len
, optlen
))
2759 return -TARGET_EFAULT
;
2766 case IP_ROUTER_ALERT
:
2770 case IP_MTU_DISCOVER
:
2776 case IP_MULTICAST_TTL
:
2777 case IP_MULTICAST_LOOP
:
2778 if (get_user_u32(len
, optlen
))
2779 return -TARGET_EFAULT
;
2781 return -TARGET_EINVAL
;
2783 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2786 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2788 if (put_user_u32(len
, optlen
)
2789 || put_user_u8(val
, optval_addr
))
2790 return -TARGET_EFAULT
;
2792 if (len
> sizeof(int))
2794 if (put_user_u32(len
, optlen
)
2795 || put_user_u32(val
, optval_addr
))
2796 return -TARGET_EFAULT
;
2800 ret
= -TARGET_ENOPROTOOPT
;
2806 case IPV6_MTU_DISCOVER
:
2809 case IPV6_RECVPKTINFO
:
2810 case IPV6_UNICAST_HOPS
:
2811 case IPV6_MULTICAST_HOPS
:
2812 case IPV6_MULTICAST_LOOP
:
2814 case IPV6_RECVHOPLIMIT
:
2815 case IPV6_2292HOPLIMIT
:
2818 case IPV6_2292PKTINFO
:
2819 case IPV6_RECVTCLASS
:
2820 case IPV6_RECVRTHDR
:
2821 case IPV6_2292RTHDR
:
2822 case IPV6_RECVHOPOPTS
:
2823 case IPV6_2292HOPOPTS
:
2824 case IPV6_RECVDSTOPTS
:
2825 case IPV6_2292DSTOPTS
:
2827 case IPV6_ADDR_PREFERENCES
:
2828 #ifdef IPV6_RECVPATHMTU
2829 case IPV6_RECVPATHMTU
:
2831 #ifdef IPV6_TRANSPARENT
2832 case IPV6_TRANSPARENT
:
2834 #ifdef IPV6_FREEBIND
2837 #ifdef IPV6_RECVORIGDSTADDR
2838 case IPV6_RECVORIGDSTADDR
:
2840 if (get_user_u32(len
, optlen
))
2841 return -TARGET_EFAULT
;
2843 return -TARGET_EINVAL
;
2845 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2848 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2850 if (put_user_u32(len
, optlen
)
2851 || put_user_u8(val
, optval_addr
))
2852 return -TARGET_EFAULT
;
2854 if (len
> sizeof(int))
2856 if (put_user_u32(len
, optlen
)
2857 || put_user_u32(val
, optval_addr
))
2858 return -TARGET_EFAULT
;
2862 ret
= -TARGET_ENOPROTOOPT
;
2869 case NETLINK_PKTINFO
:
2870 case NETLINK_BROADCAST_ERROR
:
2871 case NETLINK_NO_ENOBUFS
:
2872 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2873 case NETLINK_LISTEN_ALL_NSID
:
2874 case NETLINK_CAP_ACK
:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2876 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2877 case NETLINK_EXT_ACK
:
2878 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2879 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2880 case NETLINK_GET_STRICT_CHK
:
2881 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2882 if (get_user_u32(len
, optlen
)) {
2883 return -TARGET_EFAULT
;
2885 if (len
!= sizeof(val
)) {
2886 return -TARGET_EINVAL
;
2889 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2893 if (put_user_u32(lv
, optlen
)
2894 || put_user_u32(val
, optval_addr
)) {
2895 return -TARGET_EFAULT
;
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2899 case NETLINK_LIST_MEMBERSHIPS
:
2903 if (get_user_u32(len
, optlen
)) {
2904 return -TARGET_EFAULT
;
2907 return -TARGET_EINVAL
;
2909 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2910 if (!results
&& len
> 0) {
2911 return -TARGET_EFAULT
;
2914 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2916 unlock_user(results
, optval_addr
, 0);
2919 /* swap host endianess to target endianess. */
2920 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2921 results
[i
] = tswap32(results
[i
]);
2923 if (put_user_u32(lv
, optlen
)) {
2924 return -TARGET_EFAULT
;
2926 unlock_user(results
, optval_addr
, 0);
2929 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2934 #endif /* SOL_NETLINK */
2937 qemu_log_mask(LOG_UNIMP
,
2938 "getsockopt level=%d optname=%d not yet supported\n",
2940 ret
= -TARGET_EOPNOTSUPP
;
2946 /* Convert target low/high pair representing file offset into the host
2947 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2948 * as the kernel doesn't handle them either.
2950 static void target_to_host_low_high(abi_ulong tlow
,
2952 unsigned long *hlow
,
2953 unsigned long *hhigh
)
2955 uint64_t off
= tlow
|
2956 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2957 TARGET_LONG_BITS
/ 2;
2960 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2963 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2964 abi_ulong count
, int copy
)
2966 struct target_iovec
*target_vec
;
2968 abi_ulong total_len
, max_len
;
2971 bool bad_address
= false;
2977 if (count
> IOV_MAX
) {
2982 vec
= g_try_new0(struct iovec
, count
);
2988 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2989 count
* sizeof(struct target_iovec
), 1);
2990 if (target_vec
== NULL
) {
2995 /* ??? If host page size > target page size, this will result in a
2996 value larger than what we can actually support. */
2997 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3000 for (i
= 0; i
< count
; i
++) {
3001 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3002 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3007 } else if (len
== 0) {
3008 /* Zero length pointer is ignored. */
3009 vec
[i
].iov_base
= 0;
3011 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3012 /* If the first buffer pointer is bad, this is a fault. But
3013 * subsequent bad buffers will result in a partial write; this
3014 * is realized by filling the vector with null pointers and
3016 if (!vec
[i
].iov_base
) {
3027 if (len
> max_len
- total_len
) {
3028 len
= max_len
- total_len
;
3031 vec
[i
].iov_len
= len
;
3035 unlock_user(target_vec
, target_addr
, 0);
3040 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3041 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3044 unlock_user(target_vec
, target_addr
, 0);
3051 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3052 abi_ulong count
, int copy
)
3054 struct target_iovec
*target_vec
;
3057 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3058 count
* sizeof(struct target_iovec
), 1);
3060 for (i
= 0; i
< count
; i
++) {
3061 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3062 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3066 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3068 unlock_user(target_vec
, target_addr
, 0);
3074 static inline int target_to_host_sock_type(int *type
)
3077 int target_type
= *type
;
3079 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3080 case TARGET_SOCK_DGRAM
:
3081 host_type
= SOCK_DGRAM
;
3083 case TARGET_SOCK_STREAM
:
3084 host_type
= SOCK_STREAM
;
3087 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3090 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3091 #if defined(SOCK_CLOEXEC)
3092 host_type
|= SOCK_CLOEXEC
;
3094 return -TARGET_EINVAL
;
3097 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3098 #if defined(SOCK_NONBLOCK)
3099 host_type
|= SOCK_NONBLOCK
;
3100 #elif !defined(O_NONBLOCK)
3101 return -TARGET_EINVAL
;
3108 /* Try to emulate socket type flags after socket creation. */
3109 static int sock_flags_fixup(int fd
, int target_type
)
3111 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3112 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3113 int flags
= fcntl(fd
, F_GETFL
);
3114 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3116 return -TARGET_EINVAL
;
3123 /* do_socket() Must return target values and target errnos. */
3124 static abi_long
do_socket(int domain
, int type
, int protocol
)
3126 int target_type
= type
;
3129 ret
= target_to_host_sock_type(&type
);
3134 if (domain
== PF_NETLINK
&& !(
3135 #ifdef CONFIG_RTNETLINK
3136 protocol
== NETLINK_ROUTE
||
3138 protocol
== NETLINK_KOBJECT_UEVENT
||
3139 protocol
== NETLINK_AUDIT
)) {
3140 return -TARGET_EPROTONOSUPPORT
;
3143 if (domain
== AF_PACKET
||
3144 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3145 protocol
= tswap16(protocol
);
3148 ret
= get_errno(socket(domain
, type
, protocol
));
3150 ret
= sock_flags_fixup(ret
, target_type
);
3151 if (type
== SOCK_PACKET
) {
3152 /* Manage an obsolete case :
3153 * if socket type is SOCK_PACKET, bind by name
3155 fd_trans_register(ret
, &target_packet_trans
);
3156 } else if (domain
== PF_NETLINK
) {
3158 #ifdef CONFIG_RTNETLINK
3160 fd_trans_register(ret
, &target_netlink_route_trans
);
3163 case NETLINK_KOBJECT_UEVENT
:
3164 /* nothing to do: messages are strings */
3167 fd_trans_register(ret
, &target_netlink_audit_trans
);
3170 g_assert_not_reached();
3177 /* do_bind() Must return target values and target errnos. */
3178 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3184 if ((int)addrlen
< 0) {
3185 return -TARGET_EINVAL
;
3188 addr
= alloca(addrlen
+1);
3190 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3194 return get_errno(bind(sockfd
, addr
, addrlen
));
3197 /* do_connect() Must return target values and target errnos. */
3198 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3204 if ((int)addrlen
< 0) {
3205 return -TARGET_EINVAL
;
3208 addr
= alloca(addrlen
+1);
3210 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3214 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3217 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3218 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3219 int flags
, int send
)
3225 abi_ulong target_vec
;
3227 if (msgp
->msg_name
) {
3228 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3229 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3230 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3231 tswapal(msgp
->msg_name
),
3233 if (ret
== -TARGET_EFAULT
) {
3234 /* For connected sockets msg_name and msg_namelen must
3235 * be ignored, so returning EFAULT immediately is wrong.
3236 * Instead, pass a bad msg_name to the host kernel, and
3237 * let it decide whether to return EFAULT or not.
3239 msg
.msg_name
= (void *)-1;
3244 msg
.msg_name
= NULL
;
3245 msg
.msg_namelen
= 0;
3247 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3248 msg
.msg_control
= alloca(msg
.msg_controllen
);
3249 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3251 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3253 count
= tswapal(msgp
->msg_iovlen
);
3254 target_vec
= tswapal(msgp
->msg_iov
);
3256 if (count
> IOV_MAX
) {
3257 /* sendrcvmsg returns a different errno for this condition than
3258 * readv/writev, so we must catch it here before lock_iovec() does.
3260 ret
= -TARGET_EMSGSIZE
;
3264 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3265 target_vec
, count
, send
);
3267 ret
= -host_to_target_errno(errno
);
3270 msg
.msg_iovlen
= count
;
3274 if (fd_trans_target_to_host_data(fd
)) {
3277 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3278 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3279 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3280 msg
.msg_iov
->iov_len
);
3282 msg
.msg_iov
->iov_base
= host_msg
;
3283 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3287 ret
= target_to_host_cmsg(&msg
, msgp
);
3289 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3293 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3294 if (!is_error(ret
)) {
3296 if (fd_trans_host_to_target_data(fd
)) {
3297 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3298 MIN(msg
.msg_iov
->iov_len
, len
));
3300 ret
= host_to_target_cmsg(msgp
, &msg
);
3302 if (!is_error(ret
)) {
3303 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3304 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3305 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3306 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3307 msg
.msg_name
, msg
.msg_namelen
);
3319 unlock_iovec(vec
, target_vec
, count
, !send
);
3324 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3325 int flags
, int send
)
3328 struct target_msghdr
*msgp
;
3330 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3334 return -TARGET_EFAULT
;
3336 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3337 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3341 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3342 * so it might not have this *mmsg-specific flag either.
3344 #ifndef MSG_WAITFORONE
3345 #define MSG_WAITFORONE 0x10000
3348 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3349 unsigned int vlen
, unsigned int flags
,
3352 struct target_mmsghdr
*mmsgp
;
3356 if (vlen
> UIO_MAXIOV
) {
3360 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3362 return -TARGET_EFAULT
;
3365 for (i
= 0; i
< vlen
; i
++) {
3366 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3367 if (is_error(ret
)) {
3370 mmsgp
[i
].msg_len
= tswap32(ret
);
3371 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3372 if (flags
& MSG_WAITFORONE
) {
3373 flags
|= MSG_DONTWAIT
;
3377 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3379 /* Return number of datagrams sent if we sent any at all;
3380 * otherwise return the error.
3388 /* do_accept4() Must return target values and target errnos. */
3389 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3390 abi_ulong target_addrlen_addr
, int flags
)
3392 socklen_t addrlen
, ret_addrlen
;
3397 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3399 if (target_addr
== 0) {
3400 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3403 /* linux returns EFAULT if addrlen pointer is invalid */
3404 if (get_user_u32(addrlen
, target_addrlen_addr
))
3405 return -TARGET_EFAULT
;
3407 if ((int)addrlen
< 0) {
3408 return -TARGET_EINVAL
;
3411 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3412 return -TARGET_EFAULT
;
3415 addr
= alloca(addrlen
);
3417 ret_addrlen
= addrlen
;
3418 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3419 if (!is_error(ret
)) {
3420 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3421 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3422 ret
= -TARGET_EFAULT
;
3428 /* do_getpeername() Must return target values and target errnos. */
3429 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3430 abi_ulong target_addrlen_addr
)
3432 socklen_t addrlen
, ret_addrlen
;
3436 if (get_user_u32(addrlen
, target_addrlen_addr
))
3437 return -TARGET_EFAULT
;
3439 if ((int)addrlen
< 0) {
3440 return -TARGET_EINVAL
;
3443 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3444 return -TARGET_EFAULT
;
3447 addr
= alloca(addrlen
);
3449 ret_addrlen
= addrlen
;
3450 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3451 if (!is_error(ret
)) {
3452 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3453 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3454 ret
= -TARGET_EFAULT
;
3460 /* do_getsockname() Must return target values and target errnos. */
3461 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3462 abi_ulong target_addrlen_addr
)
3464 socklen_t addrlen
, ret_addrlen
;
3468 if (get_user_u32(addrlen
, target_addrlen_addr
))
3469 return -TARGET_EFAULT
;
3471 if ((int)addrlen
< 0) {
3472 return -TARGET_EINVAL
;
3475 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3476 return -TARGET_EFAULT
;
3479 addr
= alloca(addrlen
);
3481 ret_addrlen
= addrlen
;
3482 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3483 if (!is_error(ret
)) {
3484 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3485 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3486 ret
= -TARGET_EFAULT
;
3492 /* do_socketpair() Must return target values and target errnos. */
3493 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3494 abi_ulong target_tab_addr
)
3499 target_to_host_sock_type(&type
);
3501 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3502 if (!is_error(ret
)) {
3503 if (put_user_s32(tab
[0], target_tab_addr
)
3504 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3505 ret
= -TARGET_EFAULT
;
3510 /* do_sendto() Must return target values and target errnos. */
3511 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3512 abi_ulong target_addr
, socklen_t addrlen
)
3516 void *copy_msg
= NULL
;
3519 if ((int)addrlen
< 0) {
3520 return -TARGET_EINVAL
;
3523 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3525 return -TARGET_EFAULT
;
3526 if (fd_trans_target_to_host_data(fd
)) {
3527 copy_msg
= host_msg
;
3528 host_msg
= g_malloc(len
);
3529 memcpy(host_msg
, copy_msg
, len
);
3530 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3536 addr
= alloca(addrlen
+1);
3537 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3541 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3543 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3548 host_msg
= copy_msg
;
3550 unlock_user(host_msg
, msg
, 0);
3554 /* do_recvfrom() Must return target values and target errnos. */
3555 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3556 abi_ulong target_addr
,
3557 abi_ulong target_addrlen
)
3559 socklen_t addrlen
, ret_addrlen
;
3567 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3569 return -TARGET_EFAULT
;
3573 if (get_user_u32(addrlen
, target_addrlen
)) {
3574 ret
= -TARGET_EFAULT
;
3577 if ((int)addrlen
< 0) {
3578 ret
= -TARGET_EINVAL
;
3581 addr
= alloca(addrlen
);
3582 ret_addrlen
= addrlen
;
3583 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3584 addr
, &ret_addrlen
));
3586 addr
= NULL
; /* To keep compiler quiet. */
3587 addrlen
= 0; /* To keep compiler quiet. */
3588 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3590 if (!is_error(ret
)) {
3591 if (fd_trans_host_to_target_data(fd
)) {
3593 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3594 if (is_error(trans
)) {
3600 host_to_target_sockaddr(target_addr
, addr
,
3601 MIN(addrlen
, ret_addrlen
));
3602 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3603 ret
= -TARGET_EFAULT
;
3607 unlock_user(host_msg
, msg
, len
);
3610 unlock_user(host_msg
, msg
, 0);
3615 #ifdef TARGET_NR_socketcall
3616 /* do_socketcall() must return target values and target errnos. */
3617 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3619 static const unsigned nargs
[] = { /* number of arguments per operation */
3620 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3621 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3622 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3623 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3624 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3625 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3626 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3627 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3628 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3629 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3630 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3631 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3632 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3633 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3634 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3635 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3636 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3637 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3638 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3639 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3641 abi_long a
[6]; /* max 6 args */
3644 /* check the range of the first argument num */
3645 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3646 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3647 return -TARGET_EINVAL
;
3649 /* ensure we have space for args */
3650 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3651 return -TARGET_EINVAL
;
3653 /* collect the arguments in a[] according to nargs[] */
3654 for (i
= 0; i
< nargs
[num
]; ++i
) {
3655 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3656 return -TARGET_EFAULT
;
3659 /* now when we have the args, invoke the appropriate underlying function */
3661 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3662 return do_socket(a
[0], a
[1], a
[2]);
3663 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3664 return do_bind(a
[0], a
[1], a
[2]);
3665 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3666 return do_connect(a
[0], a
[1], a
[2]);
3667 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3668 return get_errno(listen(a
[0], a
[1]));
3669 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3670 return do_accept4(a
[0], a
[1], a
[2], 0);
3671 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3672 return do_getsockname(a
[0], a
[1], a
[2]);
3673 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3674 return do_getpeername(a
[0], a
[1], a
[2]);
3675 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3676 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3677 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3678 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3679 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3680 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3681 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3682 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3683 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3684 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3685 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3686 return get_errno(shutdown(a
[0], a
[1]));
3687 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3688 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3689 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3690 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3691 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3692 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3693 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3694 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3695 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3696 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3697 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3698 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3699 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3700 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3702 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3703 return -TARGET_EINVAL
;
3708 #define N_SHM_REGIONS 32
3710 static struct shm_region
{
3714 } shm_regions
[N_SHM_REGIONS
];
3716 #ifndef TARGET_SEMID64_DS
3717 /* asm-generic version of this struct */
3718 struct target_semid64_ds
3720 struct target_ipc_perm sem_perm
;
3721 abi_ulong sem_otime
;
3722 #if TARGET_ABI_BITS == 32
3723 abi_ulong __unused1
;
3725 abi_ulong sem_ctime
;
3726 #if TARGET_ABI_BITS == 32
3727 abi_ulong __unused2
;
3729 abi_ulong sem_nsems
;
3730 abi_ulong __unused3
;
3731 abi_ulong __unused4
;
3735 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3736 abi_ulong target_addr
)
3738 struct target_ipc_perm
*target_ip
;
3739 struct target_semid64_ds
*target_sd
;
3741 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3742 return -TARGET_EFAULT
;
3743 target_ip
= &(target_sd
->sem_perm
);
3744 host_ip
->__key
= tswap32(target_ip
->__key
);
3745 host_ip
->uid
= tswap32(target_ip
->uid
);
3746 host_ip
->gid
= tswap32(target_ip
->gid
);
3747 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3748 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3749 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3750 host_ip
->mode
= tswap32(target_ip
->mode
);
3752 host_ip
->mode
= tswap16(target_ip
->mode
);
3754 #if defined(TARGET_PPC)
3755 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3757 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3759 unlock_user_struct(target_sd
, target_addr
, 0);
3763 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3764 struct ipc_perm
*host_ip
)
3766 struct target_ipc_perm
*target_ip
;
3767 struct target_semid64_ds
*target_sd
;
3769 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3770 return -TARGET_EFAULT
;
3771 target_ip
= &(target_sd
->sem_perm
);
3772 target_ip
->__key
= tswap32(host_ip
->__key
);
3773 target_ip
->uid
= tswap32(host_ip
->uid
);
3774 target_ip
->gid
= tswap32(host_ip
->gid
);
3775 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3776 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3777 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3778 target_ip
->mode
= tswap32(host_ip
->mode
);
3780 target_ip
->mode
= tswap16(host_ip
->mode
);
3782 #if defined(TARGET_PPC)
3783 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3785 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3787 unlock_user_struct(target_sd
, target_addr
, 1);
3791 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3792 abi_ulong target_addr
)
3794 struct target_semid64_ds
*target_sd
;
3796 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3797 return -TARGET_EFAULT
;
3798 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3799 return -TARGET_EFAULT
;
3800 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3801 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3802 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3803 unlock_user_struct(target_sd
, target_addr
, 0);
3807 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3808 struct semid_ds
*host_sd
)
3810 struct target_semid64_ds
*target_sd
;
3812 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3813 return -TARGET_EFAULT
;
3814 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3815 return -TARGET_EFAULT
;
3816 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3817 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3818 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3819 unlock_user_struct(target_sd
, target_addr
, 1);
3823 struct target_seminfo
{
3836 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3837 struct seminfo
*host_seminfo
)
3839 struct target_seminfo
*target_seminfo
;
3840 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3841 return -TARGET_EFAULT
;
3842 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3843 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3844 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3845 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3846 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3847 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3848 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3849 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3850 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3851 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3852 unlock_user_struct(target_seminfo
, target_addr
, 1);
3858 struct semid_ds
*buf
;
3859 unsigned short *array
;
3860 struct seminfo
*__buf
;
3863 union target_semun
{
3870 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3871 abi_ulong target_addr
)
3874 unsigned short *array
;
3876 struct semid_ds semid_ds
;
3879 semun
.buf
= &semid_ds
;
3881 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3883 return get_errno(ret
);
3885 nsems
= semid_ds
.sem_nsems
;
3887 *host_array
= g_try_new(unsigned short, nsems
);
3889 return -TARGET_ENOMEM
;
3891 array
= lock_user(VERIFY_READ
, target_addr
,
3892 nsems
*sizeof(unsigned short), 1);
3894 g_free(*host_array
);
3895 return -TARGET_EFAULT
;
3898 for(i
=0; i
<nsems
; i
++) {
3899 __get_user((*host_array
)[i
], &array
[i
]);
3901 unlock_user(array
, target_addr
, 0);
3906 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3907 unsigned short **host_array
)
3910 unsigned short *array
;
3912 struct semid_ds semid_ds
;
3915 semun
.buf
= &semid_ds
;
3917 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3919 return get_errno(ret
);
3921 nsems
= semid_ds
.sem_nsems
;
3923 array
= lock_user(VERIFY_WRITE
, target_addr
,
3924 nsems
*sizeof(unsigned short), 0);
3926 return -TARGET_EFAULT
;
3928 for(i
=0; i
<nsems
; i
++) {
3929 __put_user((*host_array
)[i
], &array
[i
]);
3931 g_free(*host_array
);
3932 unlock_user(array
, target_addr
, 1);
3937 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3938 abi_ulong target_arg
)
3940 union target_semun target_su
= { .buf
= target_arg
};
3942 struct semid_ds dsarg
;
3943 unsigned short *array
= NULL
;
3944 struct seminfo seminfo
;
3945 abi_long ret
= -TARGET_EINVAL
;
3952 /* In 64 bit cross-endian situations, we will erroneously pick up
3953 * the wrong half of the union for the "val" element. To rectify
3954 * this, the entire 8-byte structure is byteswapped, followed by
3955 * a swap of the 4 byte val field. In other cases, the data is
3956 * already in proper host byte order. */
3957 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3958 target_su
.buf
= tswapal(target_su
.buf
);
3959 arg
.val
= tswap32(target_su
.val
);
3961 arg
.val
= target_su
.val
;
3963 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3967 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3971 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3972 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3979 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3983 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3984 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3990 arg
.__buf
= &seminfo
;
3991 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3992 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4000 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4007 struct target_sembuf
{
4008 unsigned short sem_num
;
4013 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4014 abi_ulong target_addr
,
4017 struct target_sembuf
*target_sembuf
;
4020 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4021 nsops
*sizeof(struct target_sembuf
), 1);
4023 return -TARGET_EFAULT
;
4025 for(i
=0; i
<nsops
; i
++) {
4026 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4027 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4028 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4031 unlock_user(target_sembuf
, target_addr
, 0);
4036 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4037 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4040 * This macro is required to handle the s390 variants, which passes the
4041 * arguments in a different order than default.
4044 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4045 (__nsops), (__timeout), (__sops)
4047 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4048 (__nsops), 0, (__sops), (__timeout)
4051 static inline abi_long
do_semtimedop(int semid
,
4054 abi_long timeout
, bool time64
)
4056 struct sembuf
*sops
;
4057 struct timespec ts
, *pts
= NULL
;
4063 if (target_to_host_timespec64(pts
, timeout
)) {
4064 return -TARGET_EFAULT
;
4067 if (target_to_host_timespec(pts
, timeout
)) {
4068 return -TARGET_EFAULT
;
4073 if (nsops
> TARGET_SEMOPM
) {
4074 return -TARGET_E2BIG
;
4077 sops
= g_new(struct sembuf
, nsops
);
4079 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4081 return -TARGET_EFAULT
;
4084 ret
= -TARGET_ENOSYS
;
4085 #ifdef __NR_semtimedop
4086 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4089 if (ret
== -TARGET_ENOSYS
) {
4090 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4091 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4099 struct target_msqid_ds
4101 struct target_ipc_perm msg_perm
;
4102 abi_ulong msg_stime
;
4103 #if TARGET_ABI_BITS == 32
4104 abi_ulong __unused1
;
4106 abi_ulong msg_rtime
;
4107 #if TARGET_ABI_BITS == 32
4108 abi_ulong __unused2
;
4110 abi_ulong msg_ctime
;
4111 #if TARGET_ABI_BITS == 32
4112 abi_ulong __unused3
;
4114 abi_ulong __msg_cbytes
;
4116 abi_ulong msg_qbytes
;
4117 abi_ulong msg_lspid
;
4118 abi_ulong msg_lrpid
;
4119 abi_ulong __unused4
;
4120 abi_ulong __unused5
;
4123 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4124 abi_ulong target_addr
)
4126 struct target_msqid_ds
*target_md
;
4128 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4129 return -TARGET_EFAULT
;
4130 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4131 return -TARGET_EFAULT
;
4132 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4133 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4134 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4135 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4136 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4137 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4138 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4139 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4140 unlock_user_struct(target_md
, target_addr
, 0);
4144 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4145 struct msqid_ds
*host_md
)
4147 struct target_msqid_ds
*target_md
;
4149 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4150 return -TARGET_EFAULT
;
4151 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4152 return -TARGET_EFAULT
;
4153 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4154 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4155 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4156 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4157 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4158 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4159 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4160 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4161 unlock_user_struct(target_md
, target_addr
, 1);
4165 struct target_msginfo
{
4173 unsigned short int msgseg
;
4176 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4177 struct msginfo
*host_msginfo
)
4179 struct target_msginfo
*target_msginfo
;
4180 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4181 return -TARGET_EFAULT
;
4182 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4183 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4184 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4185 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4186 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4187 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4188 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4189 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4190 unlock_user_struct(target_msginfo
, target_addr
, 1);
4194 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4196 struct msqid_ds dsarg
;
4197 struct msginfo msginfo
;
4198 abi_long ret
= -TARGET_EINVAL
;
4206 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4207 return -TARGET_EFAULT
;
4208 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4209 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4210 return -TARGET_EFAULT
;
4213 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4217 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4218 if (host_to_target_msginfo(ptr
, &msginfo
))
4219 return -TARGET_EFAULT
;
4226 struct target_msgbuf
{
4231 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4232 ssize_t msgsz
, int msgflg
)
4234 struct target_msgbuf
*target_mb
;
4235 struct msgbuf
*host_mb
;
4239 return -TARGET_EINVAL
;
4242 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4243 return -TARGET_EFAULT
;
4244 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4246 unlock_user_struct(target_mb
, msgp
, 0);
4247 return -TARGET_ENOMEM
;
4249 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4250 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4251 ret
= -TARGET_ENOSYS
;
4253 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4256 if (ret
== -TARGET_ENOSYS
) {
4258 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4261 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4267 unlock_user_struct(target_mb
, msgp
, 0);
4273 #if defined(__sparc__)
4274 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4275 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4276 #elif defined(__s390x__)
4277 /* The s390 sys_ipc variant has only five parameters. */
4278 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4279 ((long int[]){(long int)__msgp, __msgtyp})
4281 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4282 ((long int[]){(long int)__msgp, __msgtyp}), 0
4286 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4287 ssize_t msgsz
, abi_long msgtyp
,
4290 struct target_msgbuf
*target_mb
;
4292 struct msgbuf
*host_mb
;
4296 return -TARGET_EINVAL
;
4299 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4300 return -TARGET_EFAULT
;
4302 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4304 ret
= -TARGET_ENOMEM
;
4307 ret
= -TARGET_ENOSYS
;
4309 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4312 if (ret
== -TARGET_ENOSYS
) {
4313 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4314 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4319 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4320 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4321 if (!target_mtext
) {
4322 ret
= -TARGET_EFAULT
;
4325 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4326 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4329 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4333 unlock_user_struct(target_mb
, msgp
, 1);
4338 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4339 abi_ulong target_addr
)
4341 struct target_shmid_ds
*target_sd
;
4343 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4344 return -TARGET_EFAULT
;
4345 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4346 return -TARGET_EFAULT
;
4347 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4348 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4349 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4350 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4351 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4352 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4353 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4354 unlock_user_struct(target_sd
, target_addr
, 0);
4358 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4359 struct shmid_ds
*host_sd
)
4361 struct target_shmid_ds
*target_sd
;
4363 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4364 return -TARGET_EFAULT
;
4365 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4366 return -TARGET_EFAULT
;
4367 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4368 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4369 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4370 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4371 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4372 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4373 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4374 unlock_user_struct(target_sd
, target_addr
, 1);
4378 struct target_shminfo
{
4386 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4387 struct shminfo
*host_shminfo
)
4389 struct target_shminfo
*target_shminfo
;
4390 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4391 return -TARGET_EFAULT
;
4392 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4393 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4394 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4395 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4396 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4397 unlock_user_struct(target_shminfo
, target_addr
, 1);
4401 struct target_shm_info
{
4406 abi_ulong swap_attempts
;
4407 abi_ulong swap_successes
;
4410 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4411 struct shm_info
*host_shm_info
)
4413 struct target_shm_info
*target_shm_info
;
4414 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4415 return -TARGET_EFAULT
;
4416 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4417 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4418 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4419 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4420 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4421 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4422 unlock_user_struct(target_shm_info
, target_addr
, 1);
4426 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4428 struct shmid_ds dsarg
;
4429 struct shminfo shminfo
;
4430 struct shm_info shm_info
;
4431 abi_long ret
= -TARGET_EINVAL
;
4439 if (target_to_host_shmid_ds(&dsarg
, buf
))
4440 return -TARGET_EFAULT
;
4441 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4442 if (host_to_target_shmid_ds(buf
, &dsarg
))
4443 return -TARGET_EFAULT
;
4446 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4447 if (host_to_target_shminfo(buf
, &shminfo
))
4448 return -TARGET_EFAULT
;
4451 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4452 if (host_to_target_shm_info(buf
, &shm_info
))
4453 return -TARGET_EFAULT
;
4458 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4465 #ifndef TARGET_FORCE_SHMLBA
4466 /* For most architectures, SHMLBA is the same as the page size;
4467 * some architectures have larger values, in which case they should
4468 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4469 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4470 * and defining its own value for SHMLBA.
4472 * The kernel also permits SHMLBA to be set by the architecture to a
4473 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4474 * this means that addresses are rounded to the large size if
4475 * SHM_RND is set but addresses not aligned to that size are not rejected
4476 * as long as they are at least page-aligned. Since the only architecture
4477 * which uses this is ia64 this code doesn't provide for that oddity.
4479 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4481 return TARGET_PAGE_SIZE
;
4485 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4486 int shmid
, abi_ulong shmaddr
, int shmflg
)
4488 CPUState
*cpu
= env_cpu(cpu_env
);
4491 struct shmid_ds shm_info
;
4495 /* shmat pointers are always untagged */
4497 /* find out the length of the shared memory segment */
4498 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4499 if (is_error(ret
)) {
4500 /* can't get length, bail out */
4504 shmlba
= target_shmlba(cpu_env
);
4506 if (shmaddr
& (shmlba
- 1)) {
4507 if (shmflg
& SHM_RND
) {
4508 shmaddr
&= ~(shmlba
- 1);
4510 return -TARGET_EINVAL
;
4513 if (!guest_range_valid_untagged(shmaddr
, shm_info
.shm_segsz
)) {
4514 return -TARGET_EINVAL
;
4520 * We're mapping shared memory, so ensure we generate code for parallel
4521 * execution and flush old translations. This will work up to the level
4522 * supported by the host -- anything that requires EXCP_ATOMIC will not
4523 * be atomic with respect to an external process.
4525 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
4526 cpu
->tcg_cflags
|= CF_PARALLEL
;
4531 host_raddr
= shmat(shmid
, (void *)g2h_untagged(shmaddr
), shmflg
);
4533 abi_ulong mmap_start
;
4535 /* In order to use the host shmat, we need to honor host SHMLBA. */
4536 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
, MAX(SHMLBA
, shmlba
));
4538 if (mmap_start
== -1) {
4540 host_raddr
= (void *)-1;
4542 host_raddr
= shmat(shmid
, g2h_untagged(mmap_start
),
4543 shmflg
| SHM_REMAP
);
4546 if (host_raddr
== (void *)-1) {
4548 return get_errno((long)host_raddr
);
4550 raddr
=h2g((unsigned long)host_raddr
);
4552 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4553 PAGE_VALID
| PAGE_RESET
| PAGE_READ
|
4554 (shmflg
& SHM_RDONLY
? 0 : PAGE_WRITE
));
4556 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4557 if (!shm_regions
[i
].in_use
) {
4558 shm_regions
[i
].in_use
= true;
4559 shm_regions
[i
].start
= raddr
;
4560 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4570 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4575 /* shmdt pointers are always untagged */
4579 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4580 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4581 shm_regions
[i
].in_use
= false;
4582 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4586 rv
= get_errno(shmdt(g2h_untagged(shmaddr
)));
4593 #ifdef TARGET_NR_ipc
4594 /* ??? This only works with linear mappings. */
4595 /* do_ipc() must return target values and target errnos. */
4596 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4597 unsigned int call
, abi_long first
,
4598 abi_long second
, abi_long third
,
4599 abi_long ptr
, abi_long fifth
)
4604 version
= call
>> 16;
4609 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4611 case IPCOP_semtimedop
:
4613 * The s390 sys_ipc variant has only five parameters instead of six
4614 * (as for default variant) and the only difference is the handling of
4615 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4616 * to a struct timespec where the generic variant uses fifth parameter.
4618 #if defined(TARGET_S390X)
4619 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4621 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4626 ret
= get_errno(semget(first
, second
, third
));
4629 case IPCOP_semctl
: {
4630 /* The semun argument to semctl is passed by value, so dereference the
4633 get_user_ual(atptr
, ptr
);
4634 ret
= do_semctl(first
, second
, third
, atptr
);
4639 ret
= get_errno(msgget(first
, second
));
4643 ret
= do_msgsnd(first
, ptr
, second
, third
);
4647 ret
= do_msgctl(first
, second
, ptr
);
4654 struct target_ipc_kludge
{
4659 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4660 ret
= -TARGET_EFAULT
;
4664 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4666 unlock_user_struct(tmp
, ptr
, 0);
4670 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4679 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4680 if (is_error(raddr
))
4681 return get_errno(raddr
);
4682 if (put_user_ual(raddr
, third
))
4683 return -TARGET_EFAULT
;
4687 ret
= -TARGET_EINVAL
;
4692 ret
= do_shmdt(ptr
);
4696 /* IPC_* flag values are the same on all linux platforms */
4697 ret
= get_errno(shmget(first
, second
, third
));
4700 /* IPC_* and SHM_* command values are the same on all linux platforms */
4702 ret
= do_shmctl(first
, second
, ptr
);
4705 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4707 ret
= -TARGET_ENOSYS
;
4714 /* kernel structure types definitions */
4716 #define STRUCT(name, ...) STRUCT_ ## name,
4717 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4719 #include "syscall_types.h"
4723 #undef STRUCT_SPECIAL
4725 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4726 #define STRUCT_SPECIAL(name)
4727 #include "syscall_types.h"
4729 #undef STRUCT_SPECIAL
4731 #define MAX_STRUCT_SIZE 4096
4733 #ifdef CONFIG_FIEMAP
4734 /* So fiemap access checks don't overflow on 32 bit systems.
4735 * This is very slightly smaller than the limit imposed by
4736 * the underlying kernel.
4738 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4739 / sizeof(struct fiemap_extent))
4741 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4742 int fd
, int cmd
, abi_long arg
)
4744 /* The parameter for this ioctl is a struct fiemap followed
4745 * by an array of struct fiemap_extent whose size is set
4746 * in fiemap->fm_extent_count. The array is filled in by the
4749 int target_size_in
, target_size_out
;
4751 const argtype
*arg_type
= ie
->arg_type
;
4752 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4755 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4759 assert(arg_type
[0] == TYPE_PTR
);
4760 assert(ie
->access
== IOC_RW
);
4762 target_size_in
= thunk_type_size(arg_type
, 0);
4763 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4765 return -TARGET_EFAULT
;
4767 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4768 unlock_user(argptr
, arg
, 0);
4769 fm
= (struct fiemap
*)buf_temp
;
4770 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4771 return -TARGET_EINVAL
;
4774 outbufsz
= sizeof (*fm
) +
4775 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4777 if (outbufsz
> MAX_STRUCT_SIZE
) {
4778 /* We can't fit all the extents into the fixed size buffer.
4779 * Allocate one that is large enough and use it instead.
4781 fm
= g_try_malloc(outbufsz
);
4783 return -TARGET_ENOMEM
;
4785 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4788 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4789 if (!is_error(ret
)) {
4790 target_size_out
= target_size_in
;
4791 /* An extent_count of 0 means we were only counting the extents
4792 * so there are no structs to copy
4794 if (fm
->fm_extent_count
!= 0) {
4795 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4797 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4799 ret
= -TARGET_EFAULT
;
4801 /* Convert the struct fiemap */
4802 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4803 if (fm
->fm_extent_count
!= 0) {
4804 p
= argptr
+ target_size_in
;
4805 /* ...and then all the struct fiemap_extents */
4806 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4807 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4812 unlock_user(argptr
, arg
, target_size_out
);
4822 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4823 int fd
, int cmd
, abi_long arg
)
4825 const argtype
*arg_type
= ie
->arg_type
;
4829 struct ifconf
*host_ifconf
;
4831 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4832 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4833 int target_ifreq_size
;
4838 abi_long target_ifc_buf
;
4842 assert(arg_type
[0] == TYPE_PTR
);
4843 assert(ie
->access
== IOC_RW
);
4846 target_size
= thunk_type_size(arg_type
, 0);
4848 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4850 return -TARGET_EFAULT
;
4851 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4852 unlock_user(argptr
, arg
, 0);
4854 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4855 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4856 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4858 if (target_ifc_buf
!= 0) {
4859 target_ifc_len
= host_ifconf
->ifc_len
;
4860 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4861 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4863 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4864 if (outbufsz
> MAX_STRUCT_SIZE
) {
4866 * We can't fit all the extents into the fixed size buffer.
4867 * Allocate one that is large enough and use it instead.
4869 host_ifconf
= g_try_malloc(outbufsz
);
4871 return -TARGET_ENOMEM
;
4873 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4876 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4878 host_ifconf
->ifc_len
= host_ifc_len
;
4880 host_ifc_buf
= NULL
;
4882 host_ifconf
->ifc_buf
= host_ifc_buf
;
4884 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4885 if (!is_error(ret
)) {
4886 /* convert host ifc_len to target ifc_len */
4888 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4889 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4890 host_ifconf
->ifc_len
= target_ifc_len
;
4892 /* restore target ifc_buf */
4894 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4896 /* copy struct ifconf to target user */
4898 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4900 return -TARGET_EFAULT
;
4901 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4902 unlock_user(argptr
, arg
, target_size
);
4904 if (target_ifc_buf
!= 0) {
4905 /* copy ifreq[] to target user */
4906 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4907 for (i
= 0; i
< nb_ifreq
; i
++) {
4908 thunk_convert(argptr
+ i
* target_ifreq_size
,
4909 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4910 ifreq_arg_type
, THUNK_TARGET
);
4912 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4917 g_free(host_ifconf
);
4923 #if defined(CONFIG_USBFS)
4924 #if HOST_LONG_BITS > 64
4925 #error USBDEVFS thunks do not support >64 bit hosts yet.
4928 uint64_t target_urb_adr
;
4929 uint64_t target_buf_adr
;
4930 char *target_buf_ptr
;
4931 struct usbdevfs_urb host_urb
;
4934 static GHashTable
*usbdevfs_urb_hashtable(void)
4936 static GHashTable
*urb_hashtable
;
4938 if (!urb_hashtable
) {
4939 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4941 return urb_hashtable
;
4944 static void urb_hashtable_insert(struct live_urb
*urb
)
4946 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4947 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4950 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4952 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4953 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4956 static void urb_hashtable_remove(struct live_urb
*urb
)
4958 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4959 g_hash_table_remove(urb_hashtable
, urb
);
4963 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4964 int fd
, int cmd
, abi_long arg
)
4966 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4967 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4968 struct live_urb
*lurb
;
4972 uintptr_t target_urb_adr
;
4975 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4977 memset(buf_temp
, 0, sizeof(uint64_t));
4978 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4979 if (is_error(ret
)) {
4983 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4984 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4985 if (!lurb
->target_urb_adr
) {
4986 return -TARGET_EFAULT
;
4988 urb_hashtable_remove(lurb
);
4989 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4990 lurb
->host_urb
.buffer_length
);
4991 lurb
->target_buf_ptr
= NULL
;
4993 /* restore the guest buffer pointer */
4994 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4996 /* update the guest urb struct */
4997 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
5000 return -TARGET_EFAULT
;
5002 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
5003 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
5005 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
5006 /* write back the urb handle */
5007 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5010 return -TARGET_EFAULT
;
5013 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5014 target_urb_adr
= lurb
->target_urb_adr
;
5015 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
5016 unlock_user(argptr
, arg
, target_size
);
5023 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
5024 uint8_t *buf_temp
__attribute__((unused
)),
5025 int fd
, int cmd
, abi_long arg
)
5027 struct live_urb
*lurb
;
5029 /* map target address back to host URB with metadata. */
5030 lurb
= urb_hashtable_lookup(arg
);
5032 return -TARGET_EFAULT
;
5034 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5038 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5039 int fd
, int cmd
, abi_long arg
)
5041 const argtype
*arg_type
= ie
->arg_type
;
5046 struct live_urb
*lurb
;
5049 * each submitted URB needs to map to a unique ID for the
5050 * kernel, and that unique ID needs to be a pointer to
5051 * host memory. hence, we need to malloc for each URB.
5052 * isochronous transfers have a variable length struct.
5055 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
5057 /* construct host copy of urb and metadata */
5058 lurb
= g_try_new0(struct live_urb
, 1);
5060 return -TARGET_ENOMEM
;
5063 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5066 return -TARGET_EFAULT
;
5068 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
5069 unlock_user(argptr
, arg
, 0);
5071 lurb
->target_urb_adr
= arg
;
5072 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
5074 /* buffer space used depends on endpoint type so lock the entire buffer */
5075 /* control type urbs should check the buffer contents for true direction */
5076 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
5077 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
5078 lurb
->host_urb
.buffer_length
, 1);
5079 if (lurb
->target_buf_ptr
== NULL
) {
5081 return -TARGET_EFAULT
;
5084 /* update buffer pointer in host copy */
5085 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
5087 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
5088 if (is_error(ret
)) {
5089 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
5092 urb_hashtable_insert(lurb
);
5097 #endif /* CONFIG_USBFS */
5099 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5100 int cmd
, abi_long arg
)
5103 struct dm_ioctl
*host_dm
;
5104 abi_long guest_data
;
5105 uint32_t guest_data_size
;
5107 const argtype
*arg_type
= ie
->arg_type
;
5109 void *big_buf
= NULL
;
5113 target_size
= thunk_type_size(arg_type
, 0);
5114 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5116 ret
= -TARGET_EFAULT
;
5119 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5120 unlock_user(argptr
, arg
, 0);
5122 /* buf_temp is too small, so fetch things into a bigger buffer */
5123 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5124 memcpy(big_buf
, buf_temp
, target_size
);
5128 guest_data
= arg
+ host_dm
->data_start
;
5129 if ((guest_data
- arg
) < 0) {
5130 ret
= -TARGET_EINVAL
;
5133 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5134 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5136 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5138 ret
= -TARGET_EFAULT
;
5142 switch (ie
->host_cmd
) {
5144 case DM_LIST_DEVICES
:
5147 case DM_DEV_SUSPEND
:
5150 case DM_TABLE_STATUS
:
5151 case DM_TABLE_CLEAR
:
5153 case DM_LIST_VERSIONS
:
5157 case DM_DEV_SET_GEOMETRY
:
5158 /* data contains only strings */
5159 memcpy(host_data
, argptr
, guest_data_size
);
5162 memcpy(host_data
, argptr
, guest_data_size
);
5163 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5167 void *gspec
= argptr
;
5168 void *cur_data
= host_data
;
5169 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5170 int spec_size
= thunk_type_size(arg_type
, 0);
5173 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5174 struct dm_target_spec
*spec
= cur_data
;
5178 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5179 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5181 spec
->next
= sizeof(*spec
) + slen
;
5182 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5184 cur_data
+= spec
->next
;
5189 ret
= -TARGET_EINVAL
;
5190 unlock_user(argptr
, guest_data
, 0);
5193 unlock_user(argptr
, guest_data
, 0);
5195 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5196 if (!is_error(ret
)) {
5197 guest_data
= arg
+ host_dm
->data_start
;
5198 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5199 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5200 switch (ie
->host_cmd
) {
5205 case DM_DEV_SUSPEND
:
5208 case DM_TABLE_CLEAR
:
5210 case DM_DEV_SET_GEOMETRY
:
5211 /* no return data */
5213 case DM_LIST_DEVICES
:
5215 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5216 uint32_t remaining_data
= guest_data_size
;
5217 void *cur_data
= argptr
;
5218 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5219 int nl_size
= 12; /* can't use thunk_size due to alignment */
5222 uint32_t next
= nl
->next
;
5224 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5226 if (remaining_data
< nl
->next
) {
5227 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5230 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5231 strcpy(cur_data
+ nl_size
, nl
->name
);
5232 cur_data
+= nl
->next
;
5233 remaining_data
-= nl
->next
;
5237 nl
= (void*)nl
+ next
;
5242 case DM_TABLE_STATUS
:
5244 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5245 void *cur_data
= argptr
;
5246 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5247 int spec_size
= thunk_type_size(arg_type
, 0);
5250 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5251 uint32_t next
= spec
->next
;
5252 int slen
= strlen((char*)&spec
[1]) + 1;
5253 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5254 if (guest_data_size
< spec
->next
) {
5255 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5258 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5259 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5260 cur_data
= argptr
+ spec
->next
;
5261 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5267 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5268 int count
= *(uint32_t*)hdata
;
5269 uint64_t *hdev
= hdata
+ 8;
5270 uint64_t *gdev
= argptr
+ 8;
5273 *(uint32_t*)argptr
= tswap32(count
);
5274 for (i
= 0; i
< count
; i
++) {
5275 *gdev
= tswap64(*hdev
);
5281 case DM_LIST_VERSIONS
:
5283 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5284 uint32_t remaining_data
= guest_data_size
;
5285 void *cur_data
= argptr
;
5286 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5287 int vers_size
= thunk_type_size(arg_type
, 0);
5290 uint32_t next
= vers
->next
;
5292 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5294 if (remaining_data
< vers
->next
) {
5295 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5298 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5299 strcpy(cur_data
+ vers_size
, vers
->name
);
5300 cur_data
+= vers
->next
;
5301 remaining_data
-= vers
->next
;
5305 vers
= (void*)vers
+ next
;
5310 unlock_user(argptr
, guest_data
, 0);
5311 ret
= -TARGET_EINVAL
;
5314 unlock_user(argptr
, guest_data
, guest_data_size
);
5316 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5318 ret
= -TARGET_EFAULT
;
5321 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5322 unlock_user(argptr
, arg
, target_size
);
5329 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5330 int cmd
, abi_long arg
)
5334 const argtype
*arg_type
= ie
->arg_type
;
5335 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5338 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5339 struct blkpg_partition host_part
;
5341 /* Read and convert blkpg */
5343 target_size
= thunk_type_size(arg_type
, 0);
5344 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5346 ret
= -TARGET_EFAULT
;
5349 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5350 unlock_user(argptr
, arg
, 0);
5352 switch (host_blkpg
->op
) {
5353 case BLKPG_ADD_PARTITION
:
5354 case BLKPG_DEL_PARTITION
:
5355 /* payload is struct blkpg_partition */
5358 /* Unknown opcode */
5359 ret
= -TARGET_EINVAL
;
5363 /* Read and convert blkpg->data */
5364 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5365 target_size
= thunk_type_size(part_arg_type
, 0);
5366 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5368 ret
= -TARGET_EFAULT
;
5371 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5372 unlock_user(argptr
, arg
, 0);
5374 /* Swizzle the data pointer to our local copy and call! */
5375 host_blkpg
->data
= &host_part
;
5376 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5382 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5383 int fd
, int cmd
, abi_long arg
)
5385 const argtype
*arg_type
= ie
->arg_type
;
5386 const StructEntry
*se
;
5387 const argtype
*field_types
;
5388 const int *dst_offsets
, *src_offsets
;
5391 abi_ulong
*target_rt_dev_ptr
= NULL
;
5392 unsigned long *host_rt_dev_ptr
= NULL
;
5396 assert(ie
->access
== IOC_W
);
5397 assert(*arg_type
== TYPE_PTR
);
5399 assert(*arg_type
== TYPE_STRUCT
);
5400 target_size
= thunk_type_size(arg_type
, 0);
5401 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5403 return -TARGET_EFAULT
;
5406 assert(*arg_type
== (int)STRUCT_rtentry
);
5407 se
= struct_entries
+ *arg_type
++;
5408 assert(se
->convert
[0] == NULL
);
5409 /* convert struct here to be able to catch rt_dev string */
5410 field_types
= se
->field_types
;
5411 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5412 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5413 for (i
= 0; i
< se
->nb_fields
; i
++) {
5414 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5415 assert(*field_types
== TYPE_PTRVOID
);
5416 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5417 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5418 if (*target_rt_dev_ptr
!= 0) {
5419 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5420 tswapal(*target_rt_dev_ptr
));
5421 if (!*host_rt_dev_ptr
) {
5422 unlock_user(argptr
, arg
, 0);
5423 return -TARGET_EFAULT
;
5426 *host_rt_dev_ptr
= 0;
5431 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5432 argptr
+ src_offsets
[i
],
5433 field_types
, THUNK_HOST
);
5435 unlock_user(argptr
, arg
, 0);
5437 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5439 assert(host_rt_dev_ptr
!= NULL
);
5440 assert(target_rt_dev_ptr
!= NULL
);
5441 if (*host_rt_dev_ptr
!= 0) {
5442 unlock_user((void *)*host_rt_dev_ptr
,
5443 *target_rt_dev_ptr
, 0);
5448 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5449 int fd
, int cmd
, abi_long arg
)
5451 int sig
= target_to_host_signal(arg
);
5452 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5455 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5456 int fd
, int cmd
, abi_long arg
)
5461 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5462 if (is_error(ret
)) {
5466 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5467 if (copy_to_user_timeval(arg
, &tv
)) {
5468 return -TARGET_EFAULT
;
5471 if (copy_to_user_timeval64(arg
, &tv
)) {
5472 return -TARGET_EFAULT
;
5479 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5480 int fd
, int cmd
, abi_long arg
)
5485 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5486 if (is_error(ret
)) {
5490 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5491 if (host_to_target_timespec(arg
, &ts
)) {
5492 return -TARGET_EFAULT
;
5495 if (host_to_target_timespec64(arg
, &ts
)) {
5496 return -TARGET_EFAULT
;
5504 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5505 int fd
, int cmd
, abi_long arg
)
5507 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5508 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5514 static void unlock_drm_version(struct drm_version
*host_ver
,
5515 struct target_drm_version
*target_ver
,
5518 unlock_user(host_ver
->name
, target_ver
->name
,
5519 copy
? host_ver
->name_len
: 0);
5520 unlock_user(host_ver
->date
, target_ver
->date
,
5521 copy
? host_ver
->date_len
: 0);
5522 unlock_user(host_ver
->desc
, target_ver
->desc
,
5523 copy
? host_ver
->desc_len
: 0);
5526 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5527 struct target_drm_version
*target_ver
)
5529 memset(host_ver
, 0, sizeof(*host_ver
));
5531 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5532 if (host_ver
->name_len
) {
5533 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5534 target_ver
->name_len
, 0);
5535 if (!host_ver
->name
) {
5540 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5541 if (host_ver
->date_len
) {
5542 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5543 target_ver
->date_len
, 0);
5544 if (!host_ver
->date
) {
5549 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5550 if (host_ver
->desc_len
) {
5551 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5552 target_ver
->desc_len
, 0);
5553 if (!host_ver
->desc
) {
5560 unlock_drm_version(host_ver
, target_ver
, false);
5564 static inline void host_to_target_drmversion(
5565 struct target_drm_version
*target_ver
,
5566 struct drm_version
*host_ver
)
5568 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5569 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5570 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5571 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5572 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5573 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5574 unlock_drm_version(host_ver
, target_ver
, true);
5577 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5578 int fd
, int cmd
, abi_long arg
)
5580 struct drm_version
*ver
;
5581 struct target_drm_version
*target_ver
;
5584 switch (ie
->host_cmd
) {
5585 case DRM_IOCTL_VERSION
:
5586 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5587 return -TARGET_EFAULT
;
5589 ver
= (struct drm_version
*)buf_temp
;
5590 ret
= target_to_host_drmversion(ver
, target_ver
);
5591 if (!is_error(ret
)) {
5592 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5593 if (is_error(ret
)) {
5594 unlock_drm_version(ver
, target_ver
, false);
5596 host_to_target_drmversion(target_ver
, ver
);
5599 unlock_user_struct(target_ver
, arg
, 0);
5602 return -TARGET_ENOSYS
;
5605 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5606 struct drm_i915_getparam
*gparam
,
5607 int fd
, abi_long arg
)
5611 struct target_drm_i915_getparam
*target_gparam
;
5613 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5614 return -TARGET_EFAULT
;
5617 __get_user(gparam
->param
, &target_gparam
->param
);
5618 gparam
->value
= &value
;
5619 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5620 put_user_s32(value
, target_gparam
->value
);
5622 unlock_user_struct(target_gparam
, arg
, 0);
5626 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5627 int fd
, int cmd
, abi_long arg
)
5629 switch (ie
->host_cmd
) {
5630 case DRM_IOCTL_I915_GETPARAM
:
5631 return do_ioctl_drm_i915_getparam(ie
,
5632 (struct drm_i915_getparam
*)buf_temp
,
5635 return -TARGET_ENOSYS
;
5641 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5642 int fd
, int cmd
, abi_long arg
)
5644 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5645 struct tun_filter
*target_filter
;
5648 assert(ie
->access
== IOC_W
);
5650 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5651 if (!target_filter
) {
5652 return -TARGET_EFAULT
;
5654 filter
->flags
= tswap16(target_filter
->flags
);
5655 filter
->count
= tswap16(target_filter
->count
);
5656 unlock_user(target_filter
, arg
, 0);
5658 if (filter
->count
) {
5659 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5661 return -TARGET_EFAULT
;
5664 target_addr
= lock_user(VERIFY_READ
,
5665 arg
+ offsetof(struct tun_filter
, addr
),
5666 filter
->count
* ETH_ALEN
, 1);
5668 return -TARGET_EFAULT
;
5670 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5671 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5674 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5677 IOCTLEntry ioctl_entries
[] = {
5678 #define IOCTL(cmd, access, ...) \
5679 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5680 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5681 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5682 #define IOCTL_IGNORE(cmd) \
5683 { TARGET_ ## cmd, 0, #cmd },
5688 /* ??? Implement proper locking for ioctls. */
5689 /* do_ioctl() Must return target values and target errnos. */
5690 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5692 const IOCTLEntry
*ie
;
5693 const argtype
*arg_type
;
5695 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5701 if (ie
->target_cmd
== 0) {
5703 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5704 return -TARGET_ENOSYS
;
5706 if (ie
->target_cmd
== cmd
)
5710 arg_type
= ie
->arg_type
;
5712 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5713 } else if (!ie
->host_cmd
) {
5714 /* Some architectures define BSD ioctls in their headers
5715 that are not implemented in Linux. */
5716 return -TARGET_ENOSYS
;
5719 switch(arg_type
[0]) {
5722 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5728 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5732 target_size
= thunk_type_size(arg_type
, 0);
5733 switch(ie
->access
) {
5735 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5736 if (!is_error(ret
)) {
5737 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5739 return -TARGET_EFAULT
;
5740 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5741 unlock_user(argptr
, arg
, target_size
);
5745 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5747 return -TARGET_EFAULT
;
5748 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5749 unlock_user(argptr
, arg
, 0);
5750 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5754 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5756 return -TARGET_EFAULT
;
5757 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5758 unlock_user(argptr
, arg
, 0);
5759 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5760 if (!is_error(ret
)) {
5761 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5763 return -TARGET_EFAULT
;
5764 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5765 unlock_user(argptr
, arg
, target_size
);
5771 qemu_log_mask(LOG_UNIMP
,
5772 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5773 (long)cmd
, arg_type
[0]);
5774 ret
= -TARGET_ENOSYS
;
5780 static const bitmask_transtbl iflag_tbl
[] = {
5781 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5782 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5783 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5784 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5785 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5786 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5787 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5788 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5789 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5790 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5791 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5792 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5793 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5794 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5795 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5799 static const bitmask_transtbl oflag_tbl
[] = {
5800 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5801 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5802 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5803 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5804 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5805 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5806 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5807 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5808 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5809 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5810 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5811 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5812 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5813 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5814 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5815 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5816 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5817 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5818 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5819 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5820 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5821 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5822 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5823 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5827 static const bitmask_transtbl cflag_tbl
[] = {
5828 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5829 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5830 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5831 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5832 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5833 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5834 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5835 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5836 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5837 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5838 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5839 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5840 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5841 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5842 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5843 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5844 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5845 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5846 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5847 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5848 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5849 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5850 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5851 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5852 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5853 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5854 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5855 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5856 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5857 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5858 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5862 static const bitmask_transtbl lflag_tbl
[] = {
5863 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5864 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5865 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5866 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5867 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5868 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5869 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5870 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5871 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5872 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5873 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5874 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5875 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5876 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5877 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5878 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5882 static void target_to_host_termios (void *dst
, const void *src
)
5884 struct host_termios
*host
= dst
;
5885 const struct target_termios
*target
= src
;
5888 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5890 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5892 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5894 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5895 host
->c_line
= target
->c_line
;
5897 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5898 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5899 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5900 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5901 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5902 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5903 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5904 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5905 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5906 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5907 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5908 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5909 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5910 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5911 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5912 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5913 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5914 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5917 static void host_to_target_termios (void *dst
, const void *src
)
5919 struct target_termios
*target
= dst
;
5920 const struct host_termios
*host
= src
;
5923 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5925 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5927 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5929 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5930 target
->c_line
= host
->c_line
;
5932 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5933 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5934 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5935 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5936 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5937 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5938 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5939 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5940 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5941 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5942 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5943 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5944 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5945 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5946 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5947 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5948 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5949 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5952 static const StructEntry struct_termios_def
= {
5953 .convert
= { host_to_target_termios
, target_to_host_termios
},
5954 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5955 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5956 .print
= print_termios
,
5959 static const bitmask_transtbl mmap_flags_tbl
[] = {
5960 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5961 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5962 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5963 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5964 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5965 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5966 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5967 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5968 MAP_DENYWRITE
, MAP_DENYWRITE
},
5969 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5970 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5971 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5972 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5973 MAP_NORESERVE
, MAP_NORESERVE
},
5974 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5975 /* MAP_STACK had been ignored by the kernel for quite some time.
5976 Recognize it for the target insofar as we do not want to pass
5977 it through to the host. */
5978 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5983 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5984 * TARGET_I386 is defined if TARGET_X86_64 is defined
5986 #if defined(TARGET_I386)
5988 /* NOTE: there is really one LDT for all the threads */
5989 static uint8_t *ldt_table
;
5991 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5998 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5999 if (size
> bytecount
)
6001 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6003 return -TARGET_EFAULT
;
6004 /* ??? Should this by byteswapped? */
6005 memcpy(p
, ldt_table
, size
);
6006 unlock_user(p
, ptr
, size
);
6010 /* XXX: add locking support */
6011 static abi_long
write_ldt(CPUX86State
*env
,
6012 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6014 struct target_modify_ldt_ldt_s ldt_info
;
6015 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6016 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6017 int seg_not_present
, useable
, lm
;
6018 uint32_t *lp
, entry_1
, entry_2
;
6020 if (bytecount
!= sizeof(ldt_info
))
6021 return -TARGET_EINVAL
;
6022 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6023 return -TARGET_EFAULT
;
6024 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6025 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6026 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6027 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6028 unlock_user_struct(target_ldt_info
, ptr
, 0);
6030 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6031 return -TARGET_EINVAL
;
6032 seg_32bit
= ldt_info
.flags
& 1;
6033 contents
= (ldt_info
.flags
>> 1) & 3;
6034 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6035 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6036 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6037 useable
= (ldt_info
.flags
>> 6) & 1;
6041 lm
= (ldt_info
.flags
>> 7) & 1;
6043 if (contents
== 3) {
6045 return -TARGET_EINVAL
;
6046 if (seg_not_present
== 0)
6047 return -TARGET_EINVAL
;
6049 /* allocate the LDT */
6051 env
->ldt
.base
= target_mmap(0,
6052 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6053 PROT_READ
|PROT_WRITE
,
6054 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6055 if (env
->ldt
.base
== -1)
6056 return -TARGET_ENOMEM
;
6057 memset(g2h_untagged(env
->ldt
.base
), 0,
6058 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6059 env
->ldt
.limit
= 0xffff;
6060 ldt_table
= g2h_untagged(env
->ldt
.base
);
6063 /* NOTE: same code as Linux kernel */
6064 /* Allow LDTs to be cleared by the user. */
6065 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6068 read_exec_only
== 1 &&
6070 limit_in_pages
== 0 &&
6071 seg_not_present
== 1 &&
6079 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6080 (ldt_info
.limit
& 0x0ffff);
6081 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6082 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6083 (ldt_info
.limit
& 0xf0000) |
6084 ((read_exec_only
^ 1) << 9) |
6086 ((seg_not_present
^ 1) << 15) |
6088 (limit_in_pages
<< 23) |
6092 entry_2
|= (useable
<< 20);
6094 /* Install the new entry ... */
6096 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6097 lp
[0] = tswap32(entry_1
);
6098 lp
[1] = tswap32(entry_2
);
6102 /* specific and weird i386 syscalls */
6103 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6104 unsigned long bytecount
)
6110 ret
= read_ldt(ptr
, bytecount
);
6113 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6116 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6119 ret
= -TARGET_ENOSYS
;
6125 #if defined(TARGET_ABI32)
6126 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6128 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6129 struct target_modify_ldt_ldt_s ldt_info
;
6130 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6131 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6132 int seg_not_present
, useable
, lm
;
6133 uint32_t *lp
, entry_1
, entry_2
;
6136 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6137 if (!target_ldt_info
)
6138 return -TARGET_EFAULT
;
6139 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6140 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6141 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6142 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6143 if (ldt_info
.entry_number
== -1) {
6144 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6145 if (gdt_table
[i
] == 0) {
6146 ldt_info
.entry_number
= i
;
6147 target_ldt_info
->entry_number
= tswap32(i
);
6152 unlock_user_struct(target_ldt_info
, ptr
, 1);
6154 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6155 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6156 return -TARGET_EINVAL
;
6157 seg_32bit
= ldt_info
.flags
& 1;
6158 contents
= (ldt_info
.flags
>> 1) & 3;
6159 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6160 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6161 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6162 useable
= (ldt_info
.flags
>> 6) & 1;
6166 lm
= (ldt_info
.flags
>> 7) & 1;
6169 if (contents
== 3) {
6170 if (seg_not_present
== 0)
6171 return -TARGET_EINVAL
;
6174 /* NOTE: same code as Linux kernel */
6175 /* Allow LDTs to be cleared by the user. */
6176 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6177 if ((contents
== 0 &&
6178 read_exec_only
== 1 &&
6180 limit_in_pages
== 0 &&
6181 seg_not_present
== 1 &&
6189 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6190 (ldt_info
.limit
& 0x0ffff);
6191 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6192 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6193 (ldt_info
.limit
& 0xf0000) |
6194 ((read_exec_only
^ 1) << 9) |
6196 ((seg_not_present
^ 1) << 15) |
6198 (limit_in_pages
<< 23) |
6203 /* Install the new entry ... */
6205 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6206 lp
[0] = tswap32(entry_1
);
6207 lp
[1] = tswap32(entry_2
);
6211 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6213 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6214 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6215 uint32_t base_addr
, limit
, flags
;
6216 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6217 int seg_not_present
, useable
, lm
;
6218 uint32_t *lp
, entry_1
, entry_2
;
6220 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6221 if (!target_ldt_info
)
6222 return -TARGET_EFAULT
;
6223 idx
= tswap32(target_ldt_info
->entry_number
);
6224 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6225 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6226 unlock_user_struct(target_ldt_info
, ptr
, 1);
6227 return -TARGET_EINVAL
;
6229 lp
= (uint32_t *)(gdt_table
+ idx
);
6230 entry_1
= tswap32(lp
[0]);
6231 entry_2
= tswap32(lp
[1]);
6233 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6234 contents
= (entry_2
>> 10) & 3;
6235 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6236 seg_32bit
= (entry_2
>> 22) & 1;
6237 limit_in_pages
= (entry_2
>> 23) & 1;
6238 useable
= (entry_2
>> 20) & 1;
6242 lm
= (entry_2
>> 21) & 1;
6244 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6245 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6246 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6247 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6248 base_addr
= (entry_1
>> 16) |
6249 (entry_2
& 0xff000000) |
6250 ((entry_2
& 0xff) << 16);
6251 target_ldt_info
->base_addr
= tswapal(base_addr
);
6252 target_ldt_info
->limit
= tswap32(limit
);
6253 target_ldt_info
->flags
= tswap32(flags
);
6254 unlock_user_struct(target_ldt_info
, ptr
, 1);
6258 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6260 return -TARGET_ENOSYS
;
6263 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6270 case TARGET_ARCH_SET_GS
:
6271 case TARGET_ARCH_SET_FS
:
6272 if (code
== TARGET_ARCH_SET_GS
)
6276 cpu_x86_load_seg(env
, idx
, 0);
6277 env
->segs
[idx
].base
= addr
;
6279 case TARGET_ARCH_GET_GS
:
6280 case TARGET_ARCH_GET_FS
:
6281 if (code
== TARGET_ARCH_GET_GS
)
6285 val
= env
->segs
[idx
].base
;
6286 if (put_user(val
, addr
, abi_ulong
))
6287 ret
= -TARGET_EFAULT
;
6290 ret
= -TARGET_EINVAL
;
6295 #endif /* defined(TARGET_ABI32 */
6296 #endif /* defined(TARGET_I386) */
6299 * These constants are generic. Supply any that are missing from the host.
6302 # define PR_SET_NAME 15
6303 # define PR_GET_NAME 16
6305 #ifndef PR_SET_FP_MODE
6306 # define PR_SET_FP_MODE 45
6307 # define PR_GET_FP_MODE 46
6308 # define PR_FP_MODE_FR (1 << 0)
6309 # define PR_FP_MODE_FRE (1 << 1)
6311 #ifndef PR_SVE_SET_VL
6312 # define PR_SVE_SET_VL 50
6313 # define PR_SVE_GET_VL 51
6314 # define PR_SVE_VL_LEN_MASK 0xffff
6315 # define PR_SVE_VL_INHERIT (1 << 17)
6317 #ifndef PR_PAC_RESET_KEYS
6318 # define PR_PAC_RESET_KEYS 54
6319 # define PR_PAC_APIAKEY (1 << 0)
6320 # define PR_PAC_APIBKEY (1 << 1)
6321 # define PR_PAC_APDAKEY (1 << 2)
6322 # define PR_PAC_APDBKEY (1 << 3)
6323 # define PR_PAC_APGAKEY (1 << 4)
6325 #ifndef PR_SET_TAGGED_ADDR_CTRL
6326 # define PR_SET_TAGGED_ADDR_CTRL 55
6327 # define PR_GET_TAGGED_ADDR_CTRL 56
6328 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6330 #ifndef PR_MTE_TCF_SHIFT
6331 # define PR_MTE_TCF_SHIFT 1
6332 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
6333 # define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
6334 # define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
6335 # define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
6336 # define PR_MTE_TAG_SHIFT 3
6337 # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
6339 #ifndef PR_SET_IO_FLUSHER
6340 # define PR_SET_IO_FLUSHER 57
6341 # define PR_GET_IO_FLUSHER 58
6343 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6344 # define PR_SET_SYSCALL_USER_DISPATCH 59
6347 #include "target_prctl.h"
6349 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6351 return -TARGET_EINVAL
;
6354 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6356 return -TARGET_EINVAL
;
6359 #ifndef do_prctl_get_fp_mode
6360 #define do_prctl_get_fp_mode do_prctl_inval0
6362 #ifndef do_prctl_set_fp_mode
6363 #define do_prctl_set_fp_mode do_prctl_inval1
6365 #ifndef do_prctl_get_vl
6366 #define do_prctl_get_vl do_prctl_inval0
6368 #ifndef do_prctl_set_vl
6369 #define do_prctl_set_vl do_prctl_inval1
6371 #ifndef do_prctl_reset_keys
6372 #define do_prctl_reset_keys do_prctl_inval1
6374 #ifndef do_prctl_set_tagged_addr_ctrl
6375 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6377 #ifndef do_prctl_get_tagged_addr_ctrl
6378 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6380 #ifndef do_prctl_get_unalign
6381 #define do_prctl_get_unalign do_prctl_inval1
6383 #ifndef do_prctl_set_unalign
6384 #define do_prctl_set_unalign do_prctl_inval1
6387 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6388 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6393 case PR_GET_PDEATHSIG
:
6396 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6398 if (!is_error(ret
) &&
6399 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6400 return -TARGET_EFAULT
;
6404 case PR_SET_PDEATHSIG
:
6405 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6409 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6411 return -TARGET_EFAULT
;
6413 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6415 unlock_user(name
, arg2
, 16);
6420 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6422 return -TARGET_EFAULT
;
6424 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6426 unlock_user(name
, arg2
, 0);
6429 case PR_GET_FP_MODE
:
6430 return do_prctl_get_fp_mode(env
);
6431 case PR_SET_FP_MODE
:
6432 return do_prctl_set_fp_mode(env
, arg2
);
6434 return do_prctl_get_vl(env
);
6436 return do_prctl_set_vl(env
, arg2
);
6437 case PR_PAC_RESET_KEYS
:
6438 if (arg3
|| arg4
|| arg5
) {
6439 return -TARGET_EINVAL
;
6441 return do_prctl_reset_keys(env
, arg2
);
6442 case PR_SET_TAGGED_ADDR_CTRL
:
6443 if (arg3
|| arg4
|| arg5
) {
6444 return -TARGET_EINVAL
;
6446 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6447 case PR_GET_TAGGED_ADDR_CTRL
:
6448 if (arg2
|| arg3
|| arg4
|| arg5
) {
6449 return -TARGET_EINVAL
;
6451 return do_prctl_get_tagged_addr_ctrl(env
);
6453 case PR_GET_UNALIGN
:
6454 return do_prctl_get_unalign(env
, arg2
);
6455 case PR_SET_UNALIGN
:
6456 return do_prctl_set_unalign(env
, arg2
);
6458 case PR_CAP_AMBIENT
:
6459 case PR_CAPBSET_READ
:
6460 case PR_CAPBSET_DROP
:
6461 case PR_GET_DUMPABLE
:
6462 case PR_SET_DUMPABLE
:
6463 case PR_GET_KEEPCAPS
:
6464 case PR_SET_KEEPCAPS
:
6465 case PR_GET_SECUREBITS
:
6466 case PR_SET_SECUREBITS
:
6469 case PR_GET_TIMERSLACK
:
6470 case PR_SET_TIMERSLACK
:
6472 case PR_MCE_KILL_GET
:
6473 case PR_GET_NO_NEW_PRIVS
:
6474 case PR_SET_NO_NEW_PRIVS
:
6475 case PR_GET_IO_FLUSHER
:
6476 case PR_SET_IO_FLUSHER
:
6477 /* Some prctl options have no pointer arguments and we can pass on. */
6478 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6480 case PR_GET_CHILD_SUBREAPER
:
6481 case PR_SET_CHILD_SUBREAPER
:
6482 case PR_GET_SPECULATION_CTRL
:
6483 case PR_SET_SPECULATION_CTRL
:
6484 case PR_GET_TID_ADDRESS
:
6486 return -TARGET_EINVAL
;
6490 /* Was used for SPE on PowerPC. */
6491 return -TARGET_EINVAL
;
6498 case PR_GET_SECCOMP
:
6499 case PR_SET_SECCOMP
:
6500 case PR_SET_SYSCALL_USER_DISPATCH
:
6501 case PR_GET_THP_DISABLE
:
6502 case PR_SET_THP_DISABLE
:
6505 /* Disable to prevent the target disabling stuff we need. */
6506 return -TARGET_EINVAL
;
6509 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6511 return -TARGET_EINVAL
;
6515 #define NEW_STACK_SIZE 0x40000
6518 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6521 pthread_mutex_t mutex
;
6522 pthread_cond_t cond
;
6525 abi_ulong child_tidptr
;
6526 abi_ulong parent_tidptr
;
6530 static void *clone_func(void *arg
)
6532 new_thread_info
*info
= arg
;
6537 rcu_register_thread();
6538 tcg_register_thread();
6542 ts
= (TaskState
*)cpu
->opaque
;
6543 info
->tid
= sys_gettid();
6545 if (info
->child_tidptr
)
6546 put_user_u32(info
->tid
, info
->child_tidptr
);
6547 if (info
->parent_tidptr
)
6548 put_user_u32(info
->tid
, info
->parent_tidptr
);
6549 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6550 /* Enable signals. */
6551 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6552 /* Signal to the parent that we're ready. */
6553 pthread_mutex_lock(&info
->mutex
);
6554 pthread_cond_broadcast(&info
->cond
);
6555 pthread_mutex_unlock(&info
->mutex
);
6556 /* Wait until the parent has finished initializing the tls state. */
6557 pthread_mutex_lock(&clone_lock
);
6558 pthread_mutex_unlock(&clone_lock
);
6564 /* do_fork() Must return host values and target errnos (unlike most
6565 do_*() functions). */
6566 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6567 abi_ulong parent_tidptr
, target_ulong newtls
,
6568 abi_ulong child_tidptr
)
6570 CPUState
*cpu
= env_cpu(env
);
6574 CPUArchState
*new_env
;
6577 flags
&= ~CLONE_IGNORED_FLAGS
;
6579 /* Emulate vfork() with fork() */
6580 if (flags
& CLONE_VFORK
)
6581 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6583 if (flags
& CLONE_VM
) {
6584 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6585 new_thread_info info
;
6586 pthread_attr_t attr
;
6588 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6589 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6590 return -TARGET_EINVAL
;
6593 ts
= g_new0(TaskState
, 1);
6594 init_task_state(ts
);
6596 /* Grab a mutex so that thread setup appears atomic. */
6597 pthread_mutex_lock(&clone_lock
);
6600 * If this is our first additional thread, we need to ensure we
6601 * generate code for parallel execution and flush old translations.
6602 * Do this now so that the copy gets CF_PARALLEL too.
6604 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
6605 cpu
->tcg_cflags
|= CF_PARALLEL
;
6609 /* we create a new CPU instance. */
6610 new_env
= cpu_copy(env
);
6611 /* Init regs that differ from the parent. */
6612 cpu_clone_regs_child(new_env
, newsp
, flags
);
6613 cpu_clone_regs_parent(env
, flags
);
6614 new_cpu
= env_cpu(new_env
);
6615 new_cpu
->opaque
= ts
;
6616 ts
->bprm
= parent_ts
->bprm
;
6617 ts
->info
= parent_ts
->info
;
6618 ts
->signal_mask
= parent_ts
->signal_mask
;
6620 if (flags
& CLONE_CHILD_CLEARTID
) {
6621 ts
->child_tidptr
= child_tidptr
;
6624 if (flags
& CLONE_SETTLS
) {
6625 cpu_set_tls (new_env
, newtls
);
6628 memset(&info
, 0, sizeof(info
));
6629 pthread_mutex_init(&info
.mutex
, NULL
);
6630 pthread_mutex_lock(&info
.mutex
);
6631 pthread_cond_init(&info
.cond
, NULL
);
6633 if (flags
& CLONE_CHILD_SETTID
) {
6634 info
.child_tidptr
= child_tidptr
;
6636 if (flags
& CLONE_PARENT_SETTID
) {
6637 info
.parent_tidptr
= parent_tidptr
;
6640 ret
= pthread_attr_init(&attr
);
6641 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6642 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6643 /* It is not safe to deliver signals until the child has finished
6644 initializing, so temporarily block all signals. */
6645 sigfillset(&sigmask
);
6646 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6647 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6649 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6650 /* TODO: Free new CPU state if thread creation failed. */
6652 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6653 pthread_attr_destroy(&attr
);
6655 /* Wait for the child to initialize. */
6656 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6661 pthread_mutex_unlock(&info
.mutex
);
6662 pthread_cond_destroy(&info
.cond
);
6663 pthread_mutex_destroy(&info
.mutex
);
6664 pthread_mutex_unlock(&clone_lock
);
6666 /* if no CLONE_VM, we consider it is a fork */
6667 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6668 return -TARGET_EINVAL
;
6671 /* We can't support custom termination signals */
6672 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6673 return -TARGET_EINVAL
;
6676 if (block_signals()) {
6677 return -QEMU_ERESTARTSYS
;
6683 /* Child Process. */
6684 cpu_clone_regs_child(env
, newsp
, flags
);
6686 /* There is a race condition here. The parent process could
6687 theoretically read the TID in the child process before the child
6688 tid is set. This would require using either ptrace
6689 (not implemented) or having *_tidptr to point at a shared memory
6690 mapping. We can't repeat the spinlock hack used above because
6691 the child process gets its own copy of the lock. */
6692 if (flags
& CLONE_CHILD_SETTID
)
6693 put_user_u32(sys_gettid(), child_tidptr
);
6694 if (flags
& CLONE_PARENT_SETTID
)
6695 put_user_u32(sys_gettid(), parent_tidptr
);
6696 ts
= (TaskState
*)cpu
->opaque
;
6697 if (flags
& CLONE_SETTLS
)
6698 cpu_set_tls (env
, newtls
);
6699 if (flags
& CLONE_CHILD_CLEARTID
)
6700 ts
->child_tidptr
= child_tidptr
;
6702 cpu_clone_regs_parent(env
, flags
);
6709 /* warning : doesn't handle linux specific flags... */
6710 static int target_to_host_fcntl_cmd(int cmd
)
6715 case TARGET_F_DUPFD
:
6716 case TARGET_F_GETFD
:
6717 case TARGET_F_SETFD
:
6718 case TARGET_F_GETFL
:
6719 case TARGET_F_SETFL
:
6720 case TARGET_F_OFD_GETLK
:
6721 case TARGET_F_OFD_SETLK
:
6722 case TARGET_F_OFD_SETLKW
:
6725 case TARGET_F_GETLK
:
6728 case TARGET_F_SETLK
:
6731 case TARGET_F_SETLKW
:
6734 case TARGET_F_GETOWN
:
6737 case TARGET_F_SETOWN
:
6740 case TARGET_F_GETSIG
:
6743 case TARGET_F_SETSIG
:
6746 #if TARGET_ABI_BITS == 32
6747 case TARGET_F_GETLK64
:
6750 case TARGET_F_SETLK64
:
6753 case TARGET_F_SETLKW64
:
6757 case TARGET_F_SETLEASE
:
6760 case TARGET_F_GETLEASE
:
6763 #ifdef F_DUPFD_CLOEXEC
6764 case TARGET_F_DUPFD_CLOEXEC
:
6765 ret
= F_DUPFD_CLOEXEC
;
6768 case TARGET_F_NOTIFY
:
6772 case TARGET_F_GETOWN_EX
:
6777 case TARGET_F_SETOWN_EX
:
6782 case TARGET_F_SETPIPE_SZ
:
6785 case TARGET_F_GETPIPE_SZ
:
6790 case TARGET_F_ADD_SEALS
:
6793 case TARGET_F_GET_SEALS
:
6798 ret
= -TARGET_EINVAL
;
6802 #if defined(__powerpc64__)
6803 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6804 * is not supported by kernel. The glibc fcntl call actually adjusts
6805 * them to 5, 6 and 7 before making the syscall(). Since we make the
6806 * syscall directly, adjust to what is supported by the kernel.
6808 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6809 ret
-= F_GETLK64
- 5;
6816 #define FLOCK_TRANSTBL \
6818 TRANSTBL_CONVERT(F_RDLCK); \
6819 TRANSTBL_CONVERT(F_WRLCK); \
6820 TRANSTBL_CONVERT(F_UNLCK); \
6823 static int target_to_host_flock(int type
)
6825 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6827 #undef TRANSTBL_CONVERT
6828 return -TARGET_EINVAL
;
6831 static int host_to_target_flock(int type
)
6833 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6835 #undef TRANSTBL_CONVERT
6836 /* if we don't know how to convert the value coming
6837 * from the host we copy to the target field as-is
6842 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6843 abi_ulong target_flock_addr
)
6845 struct target_flock
*target_fl
;
6848 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6849 return -TARGET_EFAULT
;
6852 __get_user(l_type
, &target_fl
->l_type
);
6853 l_type
= target_to_host_flock(l_type
);
6857 fl
->l_type
= l_type
;
6858 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6859 __get_user(fl
->l_start
, &target_fl
->l_start
);
6860 __get_user(fl
->l_len
, &target_fl
->l_len
);
6861 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6862 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6866 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6867 const struct flock64
*fl
)
6869 struct target_flock
*target_fl
;
6872 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6873 return -TARGET_EFAULT
;
6876 l_type
= host_to_target_flock(fl
->l_type
);
6877 __put_user(l_type
, &target_fl
->l_type
);
6878 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6879 __put_user(fl
->l_start
, &target_fl
->l_start
);
6880 __put_user(fl
->l_len
, &target_fl
->l_len
);
6881 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6882 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6886 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6887 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6889 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6890 struct target_oabi_flock64
{
6898 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6899 abi_ulong target_flock_addr
)
6901 struct target_oabi_flock64
*target_fl
;
6904 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6905 return -TARGET_EFAULT
;
6908 __get_user(l_type
, &target_fl
->l_type
);
6909 l_type
= target_to_host_flock(l_type
);
6913 fl
->l_type
= l_type
;
6914 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6915 __get_user(fl
->l_start
, &target_fl
->l_start
);
6916 __get_user(fl
->l_len
, &target_fl
->l_len
);
6917 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6918 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6922 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6923 const struct flock64
*fl
)
6925 struct target_oabi_flock64
*target_fl
;
6928 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6929 return -TARGET_EFAULT
;
6932 l_type
= host_to_target_flock(fl
->l_type
);
6933 __put_user(l_type
, &target_fl
->l_type
);
6934 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6935 __put_user(fl
->l_start
, &target_fl
->l_start
);
6936 __put_user(fl
->l_len
, &target_fl
->l_len
);
6937 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6938 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6943 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6944 abi_ulong target_flock_addr
)
6946 struct target_flock64
*target_fl
;
6949 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6950 return -TARGET_EFAULT
;
6953 __get_user(l_type
, &target_fl
->l_type
);
6954 l_type
= target_to_host_flock(l_type
);
6958 fl
->l_type
= l_type
;
6959 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6960 __get_user(fl
->l_start
, &target_fl
->l_start
);
6961 __get_user(fl
->l_len
, &target_fl
->l_len
);
6962 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6963 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6967 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6968 const struct flock64
*fl
)
6970 struct target_flock64
*target_fl
;
6973 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6974 return -TARGET_EFAULT
;
6977 l_type
= host_to_target_flock(fl
->l_type
);
6978 __put_user(l_type
, &target_fl
->l_type
);
6979 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6980 __put_user(fl
->l_start
, &target_fl
->l_start
);
6981 __put_user(fl
->l_len
, &target_fl
->l_len
);
6982 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6983 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6987 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6989 struct flock64 fl64
;
6991 struct f_owner_ex fox
;
6992 struct target_f_owner_ex
*target_fox
;
6995 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6997 if (host_cmd
== -TARGET_EINVAL
)
7001 case TARGET_F_GETLK
:
7002 ret
= copy_from_user_flock(&fl64
, arg
);
7006 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7008 ret
= copy_to_user_flock(arg
, &fl64
);
7012 case TARGET_F_SETLK
:
7013 case TARGET_F_SETLKW
:
7014 ret
= copy_from_user_flock(&fl64
, arg
);
7018 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7021 case TARGET_F_GETLK64
:
7022 case TARGET_F_OFD_GETLK
:
7023 ret
= copy_from_user_flock64(&fl64
, arg
);
7027 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7029 ret
= copy_to_user_flock64(arg
, &fl64
);
7032 case TARGET_F_SETLK64
:
7033 case TARGET_F_SETLKW64
:
7034 case TARGET_F_OFD_SETLK
:
7035 case TARGET_F_OFD_SETLKW
:
7036 ret
= copy_from_user_flock64(&fl64
, arg
);
7040 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
7043 case TARGET_F_GETFL
:
7044 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7046 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7050 case TARGET_F_SETFL
:
7051 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7052 target_to_host_bitmask(arg
,
7057 case TARGET_F_GETOWN_EX
:
7058 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7060 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7061 return -TARGET_EFAULT
;
7062 target_fox
->type
= tswap32(fox
.type
);
7063 target_fox
->pid
= tswap32(fox
.pid
);
7064 unlock_user_struct(target_fox
, arg
, 1);
7070 case TARGET_F_SETOWN_EX
:
7071 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7072 return -TARGET_EFAULT
;
7073 fox
.type
= tswap32(target_fox
->type
);
7074 fox
.pid
= tswap32(target_fox
->pid
);
7075 unlock_user_struct(target_fox
, arg
, 0);
7076 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7080 case TARGET_F_SETSIG
:
7081 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7084 case TARGET_F_GETSIG
:
7085 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7088 case TARGET_F_SETOWN
:
7089 case TARGET_F_GETOWN
:
7090 case TARGET_F_SETLEASE
:
7091 case TARGET_F_GETLEASE
:
7092 case TARGET_F_SETPIPE_SZ
:
7093 case TARGET_F_GETPIPE_SZ
:
7094 case TARGET_F_ADD_SEALS
:
7095 case TARGET_F_GET_SEALS
:
7096 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7100 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7108 static inline int high2lowuid(int uid
)
7116 static inline int high2lowgid(int gid
)
7124 static inline int low2highuid(int uid
)
7126 if ((int16_t)uid
== -1)
7132 static inline int low2highgid(int gid
)
7134 if ((int16_t)gid
== -1)
7139 static inline int tswapid(int id
)
7144 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7146 #else /* !USE_UID16 */
7147 static inline int high2lowuid(int uid
)
7151 static inline int high2lowgid(int gid
)
7155 static inline int low2highuid(int uid
)
7159 static inline int low2highgid(int gid
)
7163 static inline int tswapid(int id
)
7168 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7170 #endif /* USE_UID16 */
7172 /* We must do direct syscalls for setting UID/GID, because we want to
7173 * implement the Linux system call semantics of "change only for this thread",
7174 * not the libc/POSIX semantics of "change for all threads in process".
7175 * (See http://ewontfix.com/17/ for more details.)
7176 * We use the 32-bit version of the syscalls if present; if it is not
7177 * then either the host architecture supports 32-bit UIDs natively with
7178 * the standard syscall, or the 16-bit UID is the best we can do.
7180 #ifdef __NR_setuid32
7181 #define __NR_sys_setuid __NR_setuid32
7183 #define __NR_sys_setuid __NR_setuid
7185 #ifdef __NR_setgid32
7186 #define __NR_sys_setgid __NR_setgid32
7188 #define __NR_sys_setgid __NR_setgid
7190 #ifdef __NR_setresuid32
7191 #define __NR_sys_setresuid __NR_setresuid32
7193 #define __NR_sys_setresuid __NR_setresuid
7195 #ifdef __NR_setresgid32
7196 #define __NR_sys_setresgid __NR_setresgid32
7198 #define __NR_sys_setresgid __NR_setresgid
7201 _syscall1(int, sys_setuid
, uid_t
, uid
)
7202 _syscall1(int, sys_setgid
, gid_t
, gid
)
7203 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7204 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7206 void syscall_init(void)
7209 const argtype
*arg_type
;
7212 thunk_init(STRUCT_MAX
);
7214 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7215 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7216 #include "syscall_types.h"
7218 #undef STRUCT_SPECIAL
7220 /* we patch the ioctl size if necessary. We rely on the fact that
7221 no ioctl has all the bits at '1' in the size field */
7223 while (ie
->target_cmd
!= 0) {
7224 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7225 TARGET_IOC_SIZEMASK
) {
7226 arg_type
= ie
->arg_type
;
7227 if (arg_type
[0] != TYPE_PTR
) {
7228 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7233 size
= thunk_type_size(arg_type
, 0);
7234 ie
->target_cmd
= (ie
->target_cmd
&
7235 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7236 (size
<< TARGET_IOC_SIZESHIFT
);
7239 /* automatic consistency check if same arch */
7240 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7241 (defined(__x86_64__) && defined(TARGET_X86_64))
7242 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7243 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7244 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7251 #ifdef TARGET_NR_truncate64
7252 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7257 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7261 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7265 #ifdef TARGET_NR_ftruncate64
7266 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7271 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7275 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7279 #if defined(TARGET_NR_timer_settime) || \
7280 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7281 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7282 abi_ulong target_addr
)
7284 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7285 offsetof(struct target_itimerspec
,
7287 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7288 offsetof(struct target_itimerspec
,
7290 return -TARGET_EFAULT
;
7297 #if defined(TARGET_NR_timer_settime64) || \
7298 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7299 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7300 abi_ulong target_addr
)
7302 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7303 offsetof(struct target__kernel_itimerspec
,
7305 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7306 offsetof(struct target__kernel_itimerspec
,
7308 return -TARGET_EFAULT
;
7315 #if ((defined(TARGET_NR_timerfd_gettime) || \
7316 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7317 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7318 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7319 struct itimerspec
*host_its
)
7321 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7323 &host_its
->it_interval
) ||
7324 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7326 &host_its
->it_value
)) {
7327 return -TARGET_EFAULT
;
7333 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7334 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7335 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7336 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7337 struct itimerspec
*host_its
)
7339 if (host_to_target_timespec64(target_addr
+
7340 offsetof(struct target__kernel_itimerspec
,
7342 &host_its
->it_interval
) ||
7343 host_to_target_timespec64(target_addr
+
7344 offsetof(struct target__kernel_itimerspec
,
7346 &host_its
->it_value
)) {
7347 return -TARGET_EFAULT
;
7353 #if defined(TARGET_NR_adjtimex) || \
7354 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7355 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7356 abi_long target_addr
)
7358 struct target_timex
*target_tx
;
7360 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7361 return -TARGET_EFAULT
;
7364 __get_user(host_tx
->modes
, &target_tx
->modes
);
7365 __get_user(host_tx
->offset
, &target_tx
->offset
);
7366 __get_user(host_tx
->freq
, &target_tx
->freq
);
7367 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7368 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7369 __get_user(host_tx
->status
, &target_tx
->status
);
7370 __get_user(host_tx
->constant
, &target_tx
->constant
);
7371 __get_user(host_tx
->precision
, &target_tx
->precision
);
7372 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7373 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7374 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7375 __get_user(host_tx
->tick
, &target_tx
->tick
);
7376 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7377 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7378 __get_user(host_tx
->shift
, &target_tx
->shift
);
7379 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7380 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7381 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7382 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7383 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7384 __get_user(host_tx
->tai
, &target_tx
->tai
);
7386 unlock_user_struct(target_tx
, target_addr
, 0);
7390 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7391 struct timex
*host_tx
)
7393 struct target_timex
*target_tx
;
7395 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7396 return -TARGET_EFAULT
;
7399 __put_user(host_tx
->modes
, &target_tx
->modes
);
7400 __put_user(host_tx
->offset
, &target_tx
->offset
);
7401 __put_user(host_tx
->freq
, &target_tx
->freq
);
7402 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7403 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7404 __put_user(host_tx
->status
, &target_tx
->status
);
7405 __put_user(host_tx
->constant
, &target_tx
->constant
);
7406 __put_user(host_tx
->precision
, &target_tx
->precision
);
7407 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7408 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7409 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7410 __put_user(host_tx
->tick
, &target_tx
->tick
);
7411 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7412 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7413 __put_user(host_tx
->shift
, &target_tx
->shift
);
7414 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7415 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7416 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7417 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7418 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7419 __put_user(host_tx
->tai
, &target_tx
->tai
);
7421 unlock_user_struct(target_tx
, target_addr
, 1);
7427 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7428 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7429 abi_long target_addr
)
7431 struct target__kernel_timex
*target_tx
;
7433 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7434 offsetof(struct target__kernel_timex
,
7436 return -TARGET_EFAULT
;
7439 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7440 return -TARGET_EFAULT
;
7443 __get_user(host_tx
->modes
, &target_tx
->modes
);
7444 __get_user(host_tx
->offset
, &target_tx
->offset
);
7445 __get_user(host_tx
->freq
, &target_tx
->freq
);
7446 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7447 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7448 __get_user(host_tx
->status
, &target_tx
->status
);
7449 __get_user(host_tx
->constant
, &target_tx
->constant
);
7450 __get_user(host_tx
->precision
, &target_tx
->precision
);
7451 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7452 __get_user(host_tx
->tick
, &target_tx
->tick
);
7453 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7454 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7455 __get_user(host_tx
->shift
, &target_tx
->shift
);
7456 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7457 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7458 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7459 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7460 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7461 __get_user(host_tx
->tai
, &target_tx
->tai
);
7463 unlock_user_struct(target_tx
, target_addr
, 0);
7467 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7468 struct timex
*host_tx
)
7470 struct target__kernel_timex
*target_tx
;
7472 if (copy_to_user_timeval64(target_addr
+
7473 offsetof(struct target__kernel_timex
, time
),
7475 return -TARGET_EFAULT
;
7478 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7479 return -TARGET_EFAULT
;
7482 __put_user(host_tx
->modes
, &target_tx
->modes
);
7483 __put_user(host_tx
->offset
, &target_tx
->offset
);
7484 __put_user(host_tx
->freq
, &target_tx
->freq
);
7485 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7486 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7487 __put_user(host_tx
->status
, &target_tx
->status
);
7488 __put_user(host_tx
->constant
, &target_tx
->constant
);
7489 __put_user(host_tx
->precision
, &target_tx
->precision
);
7490 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7491 __put_user(host_tx
->tick
, &target_tx
->tick
);
7492 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7493 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7494 __put_user(host_tx
->shift
, &target_tx
->shift
);
7495 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7496 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7497 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7498 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7499 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7500 __put_user(host_tx
->tai
, &target_tx
->tai
);
7502 unlock_user_struct(target_tx
, target_addr
, 1);
7507 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7508 #define sigev_notify_thread_id _sigev_un._tid
7511 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7512 abi_ulong target_addr
)
7514 struct target_sigevent
*target_sevp
;
7516 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7517 return -TARGET_EFAULT
;
7520 /* This union is awkward on 64 bit systems because it has a 32 bit
7521 * integer and a pointer in it; we follow the conversion approach
7522 * used for handling sigval types in signal.c so the guest should get
7523 * the correct value back even if we did a 64 bit byteswap and it's
7524 * using the 32 bit integer.
7526 host_sevp
->sigev_value
.sival_ptr
=
7527 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7528 host_sevp
->sigev_signo
=
7529 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7530 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7531 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7533 unlock_user_struct(target_sevp
, target_addr
, 1);
7537 #if defined(TARGET_NR_mlockall)
7538 static inline int target_to_host_mlockall_arg(int arg
)
7542 if (arg
& TARGET_MCL_CURRENT
) {
7543 result
|= MCL_CURRENT
;
7545 if (arg
& TARGET_MCL_FUTURE
) {
7546 result
|= MCL_FUTURE
;
7549 if (arg
& TARGET_MCL_ONFAULT
) {
7550 result
|= MCL_ONFAULT
;
7558 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7559 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7560 defined(TARGET_NR_newfstatat))
7561 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7562 abi_ulong target_addr
,
7563 struct stat
*host_st
)
7565 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7566 if (cpu_env
->eabi
) {
7567 struct target_eabi_stat64
*target_st
;
7569 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7570 return -TARGET_EFAULT
;
7571 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7572 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7573 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7574 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7575 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7577 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7578 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7579 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7580 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7581 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7582 __put_user(host_st
->st_size
, &target_st
->st_size
);
7583 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7584 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7585 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7586 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7587 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7588 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7589 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7590 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7591 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7593 unlock_user_struct(target_st
, target_addr
, 1);
7597 #if defined(TARGET_HAS_STRUCT_STAT64)
7598 struct target_stat64
*target_st
;
7600 struct target_stat
*target_st
;
7603 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7604 return -TARGET_EFAULT
;
7605 memset(target_st
, 0, sizeof(*target_st
));
7606 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7607 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7608 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7609 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7611 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7612 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7613 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7614 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7615 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7616 /* XXX: better use of kernel struct */
7617 __put_user(host_st
->st_size
, &target_st
->st_size
);
7618 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7619 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7620 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7621 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7622 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7623 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7624 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7625 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7626 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7628 unlock_user_struct(target_st
, target_addr
, 1);
7635 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7636 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7637 abi_ulong target_addr
)
7639 struct target_statx
*target_stx
;
7641 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7642 return -TARGET_EFAULT
;
7644 memset(target_stx
, 0, sizeof(*target_stx
));
7646 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7647 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7648 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7649 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7650 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7651 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7652 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7653 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7654 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7655 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7656 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7657 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7658 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7659 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7660 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7661 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7662 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7663 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7664 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7665 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7666 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7667 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7668 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7670 unlock_user_struct(target_stx
, target_addr
, 1);
7676 static int do_sys_futex(int *uaddr
, int op
, int val
,
7677 const struct timespec
*timeout
, int *uaddr2
,
7680 #if HOST_LONG_BITS == 64
7681 #if defined(__NR_futex)
7682 /* always a 64-bit time_t, it doesn't define _time64 version */
7683 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7686 #else /* HOST_LONG_BITS == 64 */
7687 #if defined(__NR_futex_time64)
7688 if (sizeof(timeout
->tv_sec
) == 8) {
7689 /* _time64 function on 32bit arch */
7690 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7693 #if defined(__NR_futex)
7694 /* old function on 32bit arch */
7695 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7697 #endif /* HOST_LONG_BITS == 64 */
7698 g_assert_not_reached();
7701 static int do_safe_futex(int *uaddr
, int op
, int val
,
7702 const struct timespec
*timeout
, int *uaddr2
,
7705 #if HOST_LONG_BITS == 64
7706 #if defined(__NR_futex)
7707 /* always a 64-bit time_t, it doesn't define _time64 version */
7708 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7710 #else /* HOST_LONG_BITS == 64 */
7711 #if defined(__NR_futex_time64)
7712 if (sizeof(timeout
->tv_sec
) == 8) {
7713 /* _time64 function on 32bit arch */
7714 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7718 #if defined(__NR_futex)
7719 /* old function on 32bit arch */
7720 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7722 #endif /* HOST_LONG_BITS == 64 */
7723 return -TARGET_ENOSYS
;
7726 /* ??? Using host futex calls even when target atomic operations
7727 are not really atomic probably breaks things. However implementing
7728 futexes locally would make futexes shared between multiple processes
7729 tricky. However they're probably useless because guest atomic
7730 operations won't work either. */
7731 #if defined(TARGET_NR_futex)
7732 static int do_futex(CPUState
*cpu
, target_ulong uaddr
, int op
, int val
,
7733 target_ulong timeout
, target_ulong uaddr2
, int val3
)
7735 struct timespec ts
, *pts
;
7738 /* ??? We assume FUTEX_* constants are the same on both host
7740 #ifdef FUTEX_CMD_MASK
7741 base_op
= op
& FUTEX_CMD_MASK
;
7747 case FUTEX_WAIT_BITSET
:
7750 target_to_host_timespec(pts
, timeout
);
7754 return do_safe_futex(g2h(cpu
, uaddr
),
7755 op
, tswap32(val
), pts
, NULL
, val3
);
7757 return do_safe_futex(g2h(cpu
, uaddr
),
7758 op
, val
, NULL
, NULL
, 0);
7760 return do_safe_futex(g2h(cpu
, uaddr
),
7761 op
, val
, NULL
, NULL
, 0);
7763 case FUTEX_CMP_REQUEUE
:
7765 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7766 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7767 But the prototype takes a `struct timespec *'; insert casts
7768 to satisfy the compiler. We do not need to tswap TIMEOUT
7769 since it's not compared to guest memory. */
7770 pts
= (struct timespec
*)(uintptr_t) timeout
;
7771 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7772 (base_op
== FUTEX_CMP_REQUEUE
7773 ? tswap32(val3
) : val3
));
7775 return -TARGET_ENOSYS
;
7780 #if defined(TARGET_NR_futex_time64)
7781 static int do_futex_time64(CPUState
*cpu
, target_ulong uaddr
, int op
,
7782 int val
, target_ulong timeout
,
7783 target_ulong uaddr2
, int val3
)
7785 struct timespec ts
, *pts
;
7788 /* ??? We assume FUTEX_* constants are the same on both host
7790 #ifdef FUTEX_CMD_MASK
7791 base_op
= op
& FUTEX_CMD_MASK
;
7797 case FUTEX_WAIT_BITSET
:
7800 if (target_to_host_timespec64(pts
, timeout
)) {
7801 return -TARGET_EFAULT
;
7806 return do_safe_futex(g2h(cpu
, uaddr
), op
,
7807 tswap32(val
), pts
, NULL
, val3
);
7809 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7811 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, NULL
, NULL
, 0);
7813 case FUTEX_CMP_REQUEUE
:
7815 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7816 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7817 But the prototype takes a `struct timespec *'; insert casts
7818 to satisfy the compiler. We do not need to tswap TIMEOUT
7819 since it's not compared to guest memory. */
7820 pts
= (struct timespec
*)(uintptr_t) timeout
;
7821 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, g2h(cpu
, uaddr2
),
7822 (base_op
== FUTEX_CMP_REQUEUE
7823 ? tswap32(val3
) : val3
));
7825 return -TARGET_ENOSYS
;
7830 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7831 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7832 abi_long handle
, abi_long mount_id
,
7835 struct file_handle
*target_fh
;
7836 struct file_handle
*fh
;
7840 unsigned int size
, total_size
;
7842 if (get_user_s32(size
, handle
)) {
7843 return -TARGET_EFAULT
;
7846 name
= lock_user_string(pathname
);
7848 return -TARGET_EFAULT
;
7851 total_size
= sizeof(struct file_handle
) + size
;
7852 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7854 unlock_user(name
, pathname
, 0);
7855 return -TARGET_EFAULT
;
7858 fh
= g_malloc0(total_size
);
7859 fh
->handle_bytes
= size
;
7861 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7862 unlock_user(name
, pathname
, 0);
7864 /* man name_to_handle_at(2):
7865 * Other than the use of the handle_bytes field, the caller should treat
7866 * the file_handle structure as an opaque data type
7869 memcpy(target_fh
, fh
, total_size
);
7870 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7871 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7873 unlock_user(target_fh
, handle
, total_size
);
7875 if (put_user_s32(mid
, mount_id
)) {
7876 return -TARGET_EFAULT
;
7884 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7885 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7888 struct file_handle
*target_fh
;
7889 struct file_handle
*fh
;
7890 unsigned int size
, total_size
;
7893 if (get_user_s32(size
, handle
)) {
7894 return -TARGET_EFAULT
;
7897 total_size
= sizeof(struct file_handle
) + size
;
7898 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7900 return -TARGET_EFAULT
;
7903 fh
= g_memdup(target_fh
, total_size
);
7904 fh
->handle_bytes
= size
;
7905 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7907 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7908 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7912 unlock_user(target_fh
, handle
, total_size
);
7918 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7920 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7923 target_sigset_t
*target_mask
;
7927 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7928 return -TARGET_EINVAL
;
7930 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7931 return -TARGET_EFAULT
;
7934 target_to_host_sigset(&host_mask
, target_mask
);
7936 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7938 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7940 fd_trans_register(ret
, &target_signalfd_trans
);
7943 unlock_user_struct(target_mask
, mask
, 0);
7949 /* Map host to target signal numbers for the wait family of syscalls.
7950 Assume all other status bits are the same. */
7951 int host_to_target_waitstatus(int status
)
7953 if (WIFSIGNALED(status
)) {
7954 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7956 if (WIFSTOPPED(status
)) {
7957 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7963 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
7965 CPUState
*cpu
= env_cpu(cpu_env
);
7966 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7969 for (i
= 0; i
< bprm
->argc
; i
++) {
7970 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7972 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7980 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
7982 CPUState
*cpu
= env_cpu(cpu_env
);
7983 TaskState
*ts
= cpu
->opaque
;
7984 GSList
*map_info
= read_self_maps();
7988 for (s
= map_info
; s
; s
= g_slist_next(s
)) {
7989 MapInfo
*e
= (MapInfo
*) s
->data
;
7991 if (h2g_valid(e
->start
)) {
7992 unsigned long min
= e
->start
;
7993 unsigned long max
= e
->end
;
7994 int flags
= page_get_flags(h2g(min
));
7997 max
= h2g_valid(max
- 1) ?
7998 max
: (uintptr_t) g2h_untagged(GUEST_ADDR_MAX
) + 1;
8000 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
8004 if (h2g(min
) == ts
->info
->stack_limit
) {
8010 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8011 " %c%c%c%c %08" PRIx64
" %s %"PRId64
,
8012 h2g(min
), h2g(max
- 1) + 1,
8013 (flags
& PAGE_READ
) ? 'r' : '-',
8014 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8015 (flags
& PAGE_EXEC
) ? 'x' : '-',
8016 e
->is_priv
? 'p' : 's',
8017 (uint64_t) e
->offset
, e
->dev
, e
->inode
);
8019 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8026 free_self_maps(map_info
);
8028 #ifdef TARGET_VSYSCALL_PAGE
8030 * We only support execution from the vsyscall page.
8031 * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8033 count
= dprintf(fd
, TARGET_FMT_lx
"-" TARGET_FMT_lx
8034 " --xp 00000000 00:00 0",
8035 TARGET_VSYSCALL_PAGE
, TARGET_VSYSCALL_PAGE
+ TARGET_PAGE_SIZE
);
8036 dprintf(fd
, "%*s%s\n", 73 - count
, "", "[vsyscall]");
8042 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8044 CPUState
*cpu
= env_cpu(cpu_env
);
8045 TaskState
*ts
= cpu
->opaque
;
8046 g_autoptr(GString
) buf
= g_string_new(NULL
);
8049 for (i
= 0; i
< 44; i
++) {
8052 g_string_printf(buf
, FMT_pid
" ", getpid());
8053 } else if (i
== 1) {
8055 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8056 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8057 g_string_printf(buf
, "(%.15s) ", bin
);
8058 } else if (i
== 3) {
8060 g_string_printf(buf
, FMT_pid
" ", getppid());
8061 } else if (i
== 21) {
8063 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8064 } else if (i
== 27) {
8066 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8068 /* for the rest, there is MasterCard */
8069 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8072 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8080 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8082 CPUState
*cpu
= env_cpu(cpu_env
);
8083 TaskState
*ts
= cpu
->opaque
;
8084 abi_ulong auxv
= ts
->info
->saved_auxv
;
8085 abi_ulong len
= ts
->info
->auxv_len
;
8089 * Auxiliary vector is stored in target process stack.
8090 * read in whole auxv vector and copy it to file
8092 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8096 r
= write(fd
, ptr
, len
);
8103 lseek(fd
, 0, SEEK_SET
);
8104 unlock_user(ptr
, auxv
, len
);
8110 static int is_proc_myself(const char *filename
, const char *entry
)
8112 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8113 filename
+= strlen("/proc/");
8114 if (!strncmp(filename
, "self/", strlen("self/"))) {
8115 filename
+= strlen("self/");
8116 } else if (*filename
>= '1' && *filename
<= '9') {
8118 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8119 if (!strncmp(filename
, myself
, strlen(myself
))) {
8120 filename
+= strlen(myself
);
8127 if (!strcmp(filename
, entry
)) {
8134 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8135 defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8136 static int is_proc(const char *filename
, const char *entry
)
8138 return strcmp(filename
, entry
) == 0;
8142 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8143 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8150 fp
= fopen("/proc/net/route", "r");
8157 read
= getline(&line
, &len
, fp
);
8158 dprintf(fd
, "%s", line
);
8162 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8164 uint32_t dest
, gw
, mask
;
8165 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8168 fields
= sscanf(line
,
8169 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8170 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8171 &mask
, &mtu
, &window
, &irtt
);
8175 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8176 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8177 metric
, tswap32(mask
), mtu
, window
, irtt
);
8187 #if defined(TARGET_SPARC)
8188 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8190 dprintf(fd
, "type\t\t: sun4u\n");
8195 #if defined(TARGET_HPPA)
8196 static int open_cpuinfo(CPUArchState
*cpu_env
, int fd
)
8198 dprintf(fd
, "cpu family\t: PA-RISC 1.1e\n");
8199 dprintf(fd
, "cpu\t\t: PA7300LC (PCX-L2)\n");
8200 dprintf(fd
, "capabilities\t: os32\n");
8201 dprintf(fd
, "model\t\t: 9000/778/B160L\n");
8202 dprintf(fd
, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8207 #if defined(TARGET_M68K)
8208 static int open_hardware(CPUArchState
*cpu_env
, int fd
)
8210 dprintf(fd
, "Model:\t\tqemu-m68k\n");
8215 static int do_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
8218 const char *filename
;
8219 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8220 int (*cmp
)(const char *s1
, const char *s2
);
8222 const struct fake_open
*fake_open
;
8223 static const struct fake_open fakes
[] = {
8224 { "maps", open_self_maps
, is_proc_myself
},
8225 { "stat", open_self_stat
, is_proc_myself
},
8226 { "auxv", open_self_auxv
, is_proc_myself
},
8227 { "cmdline", open_self_cmdline
, is_proc_myself
},
8228 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8229 { "/proc/net/route", open_net_route
, is_proc
},
8231 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8232 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8234 #if defined(TARGET_M68K)
8235 { "/proc/hardware", open_hardware
, is_proc
},
8237 { NULL
, NULL
, NULL
}
8240 if (is_proc_myself(pathname
, "exe")) {
8241 int execfd
= qemu_getauxval(AT_EXECFD
);
8242 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
8245 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8246 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8251 if (fake_open
->filename
) {
8253 char filename
[PATH_MAX
];
8256 /* create temporary file to map stat to */
8257 tmpdir
= getenv("TMPDIR");
8260 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8261 fd
= mkstemp(filename
);
8267 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8273 lseek(fd
, 0, SEEK_SET
);
8278 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8281 #define TIMER_MAGIC 0x0caf0000
8282 #define TIMER_MAGIC_MASK 0xffff0000
8284 /* Convert QEMU provided timer ID back to internal 16bit index format */
8285 static target_timer_t
get_timer_id(abi_long arg
)
8287 target_timer_t timerid
= arg
;
8289 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8290 return -TARGET_EINVAL
;
8295 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8296 return -TARGET_EINVAL
;
8302 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8304 abi_ulong target_addr
,
8307 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8308 unsigned host_bits
= sizeof(*host_mask
) * 8;
8309 abi_ulong
*target_mask
;
8312 assert(host_size
>= target_size
);
8314 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8316 return -TARGET_EFAULT
;
8318 memset(host_mask
, 0, host_size
);
8320 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8321 unsigned bit
= i
* target_bits
;
8324 __get_user(val
, &target_mask
[i
]);
8325 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8326 if (val
& (1UL << j
)) {
8327 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8332 unlock_user(target_mask
, target_addr
, 0);
8336 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8338 abi_ulong target_addr
,
8341 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8342 unsigned host_bits
= sizeof(*host_mask
) * 8;
8343 abi_ulong
*target_mask
;
8346 assert(host_size
>= target_size
);
8348 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8350 return -TARGET_EFAULT
;
8353 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8354 unsigned bit
= i
* target_bits
;
8357 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8358 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8362 __put_user(val
, &target_mask
[i
]);
8365 unlock_user(target_mask
, target_addr
, target_size
);
8369 #ifdef TARGET_NR_getdents
8370 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8372 g_autofree
void *hdirp
= NULL
;
8374 int hlen
, hoff
, toff
;
8375 int hreclen
, treclen
;
8376 off64_t prev_diroff
= 0;
8378 hdirp
= g_try_malloc(count
);
8380 return -TARGET_ENOMEM
;
8383 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8384 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8386 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8389 hlen
= get_errno(hlen
);
8390 if (is_error(hlen
)) {
8394 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8396 return -TARGET_EFAULT
;
8399 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8400 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8401 struct linux_dirent
*hde
= hdirp
+ hoff
;
8403 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8405 struct target_dirent
*tde
= tdirp
+ toff
;
8409 namelen
= strlen(hde
->d_name
);
8410 hreclen
= hde
->d_reclen
;
8411 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8412 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8414 if (toff
+ treclen
> count
) {
8416 * If the host struct is smaller than the target struct, or
8417 * requires less alignment and thus packs into less space,
8418 * then the host can return more entries than we can pass
8422 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8426 * Return what we have, resetting the file pointer to the
8427 * location of the first record not returned.
8429 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8433 prev_diroff
= hde
->d_off
;
8434 tde
->d_ino
= tswapal(hde
->d_ino
);
8435 tde
->d_off
= tswapal(hde
->d_off
);
8436 tde
->d_reclen
= tswap16(treclen
);
8437 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8440 * The getdents type is in what was formerly a padding byte at the
8441 * end of the structure.
8443 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8444 type
= *((uint8_t *)hde
+ hreclen
- 1);
8448 *((uint8_t *)tde
+ treclen
- 1) = type
;
8451 unlock_user(tdirp
, arg2
, toff
);
8454 #endif /* TARGET_NR_getdents */
8456 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8457 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8459 g_autofree
void *hdirp
= NULL
;
8461 int hlen
, hoff
, toff
;
8462 int hreclen
, treclen
;
8463 off64_t prev_diroff
= 0;
8465 hdirp
= g_try_malloc(count
);
8467 return -TARGET_ENOMEM
;
8470 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8471 if (is_error(hlen
)) {
8475 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8477 return -TARGET_EFAULT
;
8480 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8481 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8482 struct target_dirent64
*tde
= tdirp
+ toff
;
8485 namelen
= strlen(hde
->d_name
) + 1;
8486 hreclen
= hde
->d_reclen
;
8487 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8488 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8490 if (toff
+ treclen
> count
) {
8492 * If the host struct is smaller than the target struct, or
8493 * requires less alignment and thus packs into less space,
8494 * then the host can return more entries than we can pass
8498 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8502 * Return what we have, resetting the file pointer to the
8503 * location of the first record not returned.
8505 lseek64(dirfd
, prev_diroff
, SEEK_SET
);
8509 prev_diroff
= hde
->d_off
;
8510 tde
->d_ino
= tswap64(hde
->d_ino
);
8511 tde
->d_off
= tswap64(hde
->d_off
);
8512 tde
->d_reclen
= tswap16(treclen
);
8513 tde
->d_type
= hde
->d_type
;
8514 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8517 unlock_user(tdirp
, arg2
, toff
);
8520 #endif /* TARGET_NR_getdents64 */
8522 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8523 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
8526 /* This is an internal helper for do_syscall so that it is easier
8527 * to have a single return point, so that actions, such as logging
8528 * of syscall results, can be performed.
8529 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8531 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
8532 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8533 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8536 CPUState
*cpu
= env_cpu(cpu_env
);
8538 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8539 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8540 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8541 || defined(TARGET_NR_statx)
8544 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8545 || defined(TARGET_NR_fstatfs)
8551 case TARGET_NR_exit
:
8552 /* In old applications this may be used to implement _exit(2).
8553 However in threaded applications it is used for thread termination,
8554 and _exit_group is used for application termination.
8555 Do thread termination if we have more then one thread. */
8557 if (block_signals()) {
8558 return -QEMU_ERESTARTSYS
;
8561 pthread_mutex_lock(&clone_lock
);
8563 if (CPU_NEXT(first_cpu
)) {
8564 TaskState
*ts
= cpu
->opaque
;
8566 object_property_set_bool(OBJECT(cpu
), "realized", false, NULL
);
8567 object_unref(OBJECT(cpu
));
8569 * At this point the CPU should be unrealized and removed
8570 * from cpu lists. We can clean-up the rest of the thread
8571 * data without the lock held.
8574 pthread_mutex_unlock(&clone_lock
);
8576 if (ts
->child_tidptr
) {
8577 put_user_u32(0, ts
->child_tidptr
);
8578 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
8579 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
8583 rcu_unregister_thread();
8587 pthread_mutex_unlock(&clone_lock
);
8588 preexit_cleanup(cpu_env
, arg1
);
8590 return 0; /* avoid warning */
8591 case TARGET_NR_read
:
8592 if (arg2
== 0 && arg3
== 0) {
8593 return get_errno(safe_read(arg1
, 0, 0));
8595 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8596 return -TARGET_EFAULT
;
8597 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8599 fd_trans_host_to_target_data(arg1
)) {
8600 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8602 unlock_user(p
, arg2
, ret
);
8605 case TARGET_NR_write
:
8606 if (arg2
== 0 && arg3
== 0) {
8607 return get_errno(safe_write(arg1
, 0, 0));
8609 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8610 return -TARGET_EFAULT
;
8611 if (fd_trans_target_to_host_data(arg1
)) {
8612 void *copy
= g_malloc(arg3
);
8613 memcpy(copy
, p
, arg3
);
8614 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8616 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8620 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8622 unlock_user(p
, arg2
, 0);
8625 #ifdef TARGET_NR_open
8626 case TARGET_NR_open
:
8627 if (!(p
= lock_user_string(arg1
)))
8628 return -TARGET_EFAULT
;
8629 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8630 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8632 fd_trans_unregister(ret
);
8633 unlock_user(p
, arg1
, 0);
8636 case TARGET_NR_openat
:
8637 if (!(p
= lock_user_string(arg2
)))
8638 return -TARGET_EFAULT
;
8639 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8640 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8642 fd_trans_unregister(ret
);
8643 unlock_user(p
, arg2
, 0);
8645 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8646 case TARGET_NR_name_to_handle_at
:
8647 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8650 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8651 case TARGET_NR_open_by_handle_at
:
8652 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8653 fd_trans_unregister(ret
);
8656 case TARGET_NR_close
:
8657 fd_trans_unregister(arg1
);
8658 return get_errno(close(arg1
));
8661 return do_brk(arg1
);
8662 #ifdef TARGET_NR_fork
8663 case TARGET_NR_fork
:
8664 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8666 #ifdef TARGET_NR_waitpid
8667 case TARGET_NR_waitpid
:
8670 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8671 if (!is_error(ret
) && arg2
&& ret
8672 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8673 return -TARGET_EFAULT
;
8677 #ifdef TARGET_NR_waitid
8678 case TARGET_NR_waitid
:
8682 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8683 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8684 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8685 return -TARGET_EFAULT
;
8686 host_to_target_siginfo(p
, &info
);
8687 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8692 #ifdef TARGET_NR_creat /* not on alpha */
8693 case TARGET_NR_creat
:
8694 if (!(p
= lock_user_string(arg1
)))
8695 return -TARGET_EFAULT
;
8696 ret
= get_errno(creat(p
, arg2
));
8697 fd_trans_unregister(ret
);
8698 unlock_user(p
, arg1
, 0);
8701 #ifdef TARGET_NR_link
8702 case TARGET_NR_link
:
8705 p
= lock_user_string(arg1
);
8706 p2
= lock_user_string(arg2
);
8708 ret
= -TARGET_EFAULT
;
8710 ret
= get_errno(link(p
, p2
));
8711 unlock_user(p2
, arg2
, 0);
8712 unlock_user(p
, arg1
, 0);
8716 #if defined(TARGET_NR_linkat)
8717 case TARGET_NR_linkat
:
8721 return -TARGET_EFAULT
;
8722 p
= lock_user_string(arg2
);
8723 p2
= lock_user_string(arg4
);
8725 ret
= -TARGET_EFAULT
;
8727 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8728 unlock_user(p
, arg2
, 0);
8729 unlock_user(p2
, arg4
, 0);
8733 #ifdef TARGET_NR_unlink
8734 case TARGET_NR_unlink
:
8735 if (!(p
= lock_user_string(arg1
)))
8736 return -TARGET_EFAULT
;
8737 ret
= get_errno(unlink(p
));
8738 unlock_user(p
, arg1
, 0);
8741 #if defined(TARGET_NR_unlinkat)
8742 case TARGET_NR_unlinkat
:
8743 if (!(p
= lock_user_string(arg2
)))
8744 return -TARGET_EFAULT
;
8745 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8746 unlock_user(p
, arg2
, 0);
8749 case TARGET_NR_execve
:
8751 char **argp
, **envp
;
8754 abi_ulong guest_argp
;
8755 abi_ulong guest_envp
;
8761 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8762 if (get_user_ual(addr
, gp
))
8763 return -TARGET_EFAULT
;
8770 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8771 if (get_user_ual(addr
, gp
))
8772 return -TARGET_EFAULT
;
8778 argp
= g_new0(char *, argc
+ 1);
8779 envp
= g_new0(char *, envc
+ 1);
8781 for (gp
= guest_argp
, q
= argp
; gp
;
8782 gp
+= sizeof(abi_ulong
), q
++) {
8783 if (get_user_ual(addr
, gp
))
8787 if (!(*q
= lock_user_string(addr
)))
8792 for (gp
= guest_envp
, q
= envp
; gp
;
8793 gp
+= sizeof(abi_ulong
), q
++) {
8794 if (get_user_ual(addr
, gp
))
8798 if (!(*q
= lock_user_string(addr
)))
8803 if (!(p
= lock_user_string(arg1
)))
8805 /* Although execve() is not an interruptible syscall it is
8806 * a special case where we must use the safe_syscall wrapper:
8807 * if we allow a signal to happen before we make the host
8808 * syscall then we will 'lose' it, because at the point of
8809 * execve the process leaves QEMU's control. So we use the
8810 * safe syscall wrapper to ensure that we either take the
8811 * signal as a guest signal, or else it does not happen
8812 * before the execve completes and makes it the other
8813 * program's problem.
8815 ret
= get_errno(safe_execve(p
, argp
, envp
));
8816 unlock_user(p
, arg1
, 0);
8821 ret
= -TARGET_EFAULT
;
8824 for (gp
= guest_argp
, q
= argp
; *q
;
8825 gp
+= sizeof(abi_ulong
), q
++) {
8826 if (get_user_ual(addr
, gp
)
8829 unlock_user(*q
, addr
, 0);
8831 for (gp
= guest_envp
, q
= envp
; *q
;
8832 gp
+= sizeof(abi_ulong
), q
++) {
8833 if (get_user_ual(addr
, gp
)
8836 unlock_user(*q
, addr
, 0);
8843 case TARGET_NR_chdir
:
8844 if (!(p
= lock_user_string(arg1
)))
8845 return -TARGET_EFAULT
;
8846 ret
= get_errno(chdir(p
));
8847 unlock_user(p
, arg1
, 0);
8849 #ifdef TARGET_NR_time
8850 case TARGET_NR_time
:
8853 ret
= get_errno(time(&host_time
));
8856 && put_user_sal(host_time
, arg1
))
8857 return -TARGET_EFAULT
;
8861 #ifdef TARGET_NR_mknod
8862 case TARGET_NR_mknod
:
8863 if (!(p
= lock_user_string(arg1
)))
8864 return -TARGET_EFAULT
;
8865 ret
= get_errno(mknod(p
, arg2
, arg3
));
8866 unlock_user(p
, arg1
, 0);
8869 #if defined(TARGET_NR_mknodat)
8870 case TARGET_NR_mknodat
:
8871 if (!(p
= lock_user_string(arg2
)))
8872 return -TARGET_EFAULT
;
8873 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8874 unlock_user(p
, arg2
, 0);
8877 #ifdef TARGET_NR_chmod
8878 case TARGET_NR_chmod
:
8879 if (!(p
= lock_user_string(arg1
)))
8880 return -TARGET_EFAULT
;
8881 ret
= get_errno(chmod(p
, arg2
));
8882 unlock_user(p
, arg1
, 0);
8885 #ifdef TARGET_NR_lseek
8886 case TARGET_NR_lseek
:
8887 return get_errno(lseek(arg1
, arg2
, arg3
));
8889 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8890 /* Alpha specific */
8891 case TARGET_NR_getxpid
:
8892 cpu_env
->ir
[IR_A4
] = getppid();
8893 return get_errno(getpid());
8895 #ifdef TARGET_NR_getpid
8896 case TARGET_NR_getpid
:
8897 return get_errno(getpid());
8899 case TARGET_NR_mount
:
8901 /* need to look at the data field */
8905 p
= lock_user_string(arg1
);
8907 return -TARGET_EFAULT
;
8913 p2
= lock_user_string(arg2
);
8916 unlock_user(p
, arg1
, 0);
8918 return -TARGET_EFAULT
;
8922 p3
= lock_user_string(arg3
);
8925 unlock_user(p
, arg1
, 0);
8927 unlock_user(p2
, arg2
, 0);
8928 return -TARGET_EFAULT
;
8934 /* FIXME - arg5 should be locked, but it isn't clear how to
8935 * do that since it's not guaranteed to be a NULL-terminated
8939 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8941 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
8943 ret
= get_errno(ret
);
8946 unlock_user(p
, arg1
, 0);
8948 unlock_user(p2
, arg2
, 0);
8950 unlock_user(p3
, arg3
, 0);
8954 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8955 #if defined(TARGET_NR_umount)
8956 case TARGET_NR_umount
:
8958 #if defined(TARGET_NR_oldumount)
8959 case TARGET_NR_oldumount
:
8961 if (!(p
= lock_user_string(arg1
)))
8962 return -TARGET_EFAULT
;
8963 ret
= get_errno(umount(p
));
8964 unlock_user(p
, arg1
, 0);
8967 #ifdef TARGET_NR_stime /* not on alpha */
8968 case TARGET_NR_stime
:
8972 if (get_user_sal(ts
.tv_sec
, arg1
)) {
8973 return -TARGET_EFAULT
;
8975 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
8978 #ifdef TARGET_NR_alarm /* not on alpha */
8979 case TARGET_NR_alarm
:
8982 #ifdef TARGET_NR_pause /* not on alpha */
8983 case TARGET_NR_pause
:
8984 if (!block_signals()) {
8985 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8987 return -TARGET_EINTR
;
8989 #ifdef TARGET_NR_utime
8990 case TARGET_NR_utime
:
8992 struct utimbuf tbuf
, *host_tbuf
;
8993 struct target_utimbuf
*target_tbuf
;
8995 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8996 return -TARGET_EFAULT
;
8997 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8998 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8999 unlock_user_struct(target_tbuf
, arg2
, 0);
9004 if (!(p
= lock_user_string(arg1
)))
9005 return -TARGET_EFAULT
;
9006 ret
= get_errno(utime(p
, host_tbuf
));
9007 unlock_user(p
, arg1
, 0);
9011 #ifdef TARGET_NR_utimes
9012 case TARGET_NR_utimes
:
9014 struct timeval
*tvp
, tv
[2];
9016 if (copy_from_user_timeval(&tv
[0], arg2
)
9017 || copy_from_user_timeval(&tv
[1],
9018 arg2
+ sizeof(struct target_timeval
)))
9019 return -TARGET_EFAULT
;
9024 if (!(p
= lock_user_string(arg1
)))
9025 return -TARGET_EFAULT
;
9026 ret
= get_errno(utimes(p
, tvp
));
9027 unlock_user(p
, arg1
, 0);
9031 #if defined(TARGET_NR_futimesat)
9032 case TARGET_NR_futimesat
:
9034 struct timeval
*tvp
, tv
[2];
9036 if (copy_from_user_timeval(&tv
[0], arg3
)
9037 || copy_from_user_timeval(&tv
[1],
9038 arg3
+ sizeof(struct target_timeval
)))
9039 return -TARGET_EFAULT
;
9044 if (!(p
= lock_user_string(arg2
))) {
9045 return -TARGET_EFAULT
;
9047 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9048 unlock_user(p
, arg2
, 0);
9052 #ifdef TARGET_NR_access
9053 case TARGET_NR_access
:
9054 if (!(p
= lock_user_string(arg1
))) {
9055 return -TARGET_EFAULT
;
9057 ret
= get_errno(access(path(p
), arg2
));
9058 unlock_user(p
, arg1
, 0);
9061 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9062 case TARGET_NR_faccessat
:
9063 if (!(p
= lock_user_string(arg2
))) {
9064 return -TARGET_EFAULT
;
9066 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9067 unlock_user(p
, arg2
, 0);
9070 #ifdef TARGET_NR_nice /* not on alpha */
9071 case TARGET_NR_nice
:
9072 return get_errno(nice(arg1
));
9074 case TARGET_NR_sync
:
9077 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9078 case TARGET_NR_syncfs
:
9079 return get_errno(syncfs(arg1
));
9081 case TARGET_NR_kill
:
9082 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9083 #ifdef TARGET_NR_rename
9084 case TARGET_NR_rename
:
9087 p
= lock_user_string(arg1
);
9088 p2
= lock_user_string(arg2
);
9090 ret
= -TARGET_EFAULT
;
9092 ret
= get_errno(rename(p
, p2
));
9093 unlock_user(p2
, arg2
, 0);
9094 unlock_user(p
, arg1
, 0);
9098 #if defined(TARGET_NR_renameat)
9099 case TARGET_NR_renameat
:
9102 p
= lock_user_string(arg2
);
9103 p2
= lock_user_string(arg4
);
9105 ret
= -TARGET_EFAULT
;
9107 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9108 unlock_user(p2
, arg4
, 0);
9109 unlock_user(p
, arg2
, 0);
9113 #if defined(TARGET_NR_renameat2)
9114 case TARGET_NR_renameat2
:
9117 p
= lock_user_string(arg2
);
9118 p2
= lock_user_string(arg4
);
9120 ret
= -TARGET_EFAULT
;
9122 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9124 unlock_user(p2
, arg4
, 0);
9125 unlock_user(p
, arg2
, 0);
9129 #ifdef TARGET_NR_mkdir
9130 case TARGET_NR_mkdir
:
9131 if (!(p
= lock_user_string(arg1
)))
9132 return -TARGET_EFAULT
;
9133 ret
= get_errno(mkdir(p
, arg2
));
9134 unlock_user(p
, arg1
, 0);
9137 #if defined(TARGET_NR_mkdirat)
9138 case TARGET_NR_mkdirat
:
9139 if (!(p
= lock_user_string(arg2
)))
9140 return -TARGET_EFAULT
;
9141 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9142 unlock_user(p
, arg2
, 0);
9145 #ifdef TARGET_NR_rmdir
9146 case TARGET_NR_rmdir
:
9147 if (!(p
= lock_user_string(arg1
)))
9148 return -TARGET_EFAULT
;
9149 ret
= get_errno(rmdir(p
));
9150 unlock_user(p
, arg1
, 0);
9154 ret
= get_errno(dup(arg1
));
9156 fd_trans_dup(arg1
, ret
);
9159 #ifdef TARGET_NR_pipe
9160 case TARGET_NR_pipe
:
9161 return do_pipe(cpu_env
, arg1
, 0, 0);
9163 #ifdef TARGET_NR_pipe2
9164 case TARGET_NR_pipe2
:
9165 return do_pipe(cpu_env
, arg1
,
9166 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9168 case TARGET_NR_times
:
9170 struct target_tms
*tmsp
;
9172 ret
= get_errno(times(&tms
));
9174 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9176 return -TARGET_EFAULT
;
9177 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9178 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9179 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9180 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9183 ret
= host_to_target_clock_t(ret
);
9186 case TARGET_NR_acct
:
9188 ret
= get_errno(acct(NULL
));
9190 if (!(p
= lock_user_string(arg1
))) {
9191 return -TARGET_EFAULT
;
9193 ret
= get_errno(acct(path(p
)));
9194 unlock_user(p
, arg1
, 0);
9197 #ifdef TARGET_NR_umount2
9198 case TARGET_NR_umount2
:
9199 if (!(p
= lock_user_string(arg1
)))
9200 return -TARGET_EFAULT
;
9201 ret
= get_errno(umount2(p
, arg2
));
9202 unlock_user(p
, arg1
, 0);
9205 case TARGET_NR_ioctl
:
9206 return do_ioctl(arg1
, arg2
, arg3
);
9207 #ifdef TARGET_NR_fcntl
9208 case TARGET_NR_fcntl
:
9209 return do_fcntl(arg1
, arg2
, arg3
);
9211 case TARGET_NR_setpgid
:
9212 return get_errno(setpgid(arg1
, arg2
));
9213 case TARGET_NR_umask
:
9214 return get_errno(umask(arg1
));
9215 case TARGET_NR_chroot
:
9216 if (!(p
= lock_user_string(arg1
)))
9217 return -TARGET_EFAULT
;
9218 ret
= get_errno(chroot(p
));
9219 unlock_user(p
, arg1
, 0);
9221 #ifdef TARGET_NR_dup2
9222 case TARGET_NR_dup2
:
9223 ret
= get_errno(dup2(arg1
, arg2
));
9225 fd_trans_dup(arg1
, arg2
);
9229 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9230 case TARGET_NR_dup3
:
9234 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9237 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9238 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9240 fd_trans_dup(arg1
, arg2
);
9245 #ifdef TARGET_NR_getppid /* not on alpha */
9246 case TARGET_NR_getppid
:
9247 return get_errno(getppid());
9249 #ifdef TARGET_NR_getpgrp
9250 case TARGET_NR_getpgrp
:
9251 return get_errno(getpgrp());
9253 case TARGET_NR_setsid
:
9254 return get_errno(setsid());
9255 #ifdef TARGET_NR_sigaction
9256 case TARGET_NR_sigaction
:
9258 #if defined(TARGET_MIPS)
9259 struct target_sigaction act
, oact
, *pact
, *old_act
;
9262 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9263 return -TARGET_EFAULT
;
9264 act
._sa_handler
= old_act
->_sa_handler
;
9265 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9266 act
.sa_flags
= old_act
->sa_flags
;
9267 unlock_user_struct(old_act
, arg2
, 0);
9273 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9275 if (!is_error(ret
) && arg3
) {
9276 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9277 return -TARGET_EFAULT
;
9278 old_act
->_sa_handler
= oact
._sa_handler
;
9279 old_act
->sa_flags
= oact
.sa_flags
;
9280 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9281 old_act
->sa_mask
.sig
[1] = 0;
9282 old_act
->sa_mask
.sig
[2] = 0;
9283 old_act
->sa_mask
.sig
[3] = 0;
9284 unlock_user_struct(old_act
, arg3
, 1);
9287 struct target_old_sigaction
*old_act
;
9288 struct target_sigaction act
, oact
, *pact
;
9290 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9291 return -TARGET_EFAULT
;
9292 act
._sa_handler
= old_act
->_sa_handler
;
9293 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9294 act
.sa_flags
= old_act
->sa_flags
;
9295 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9296 act
.sa_restorer
= old_act
->sa_restorer
;
9298 unlock_user_struct(old_act
, arg2
, 0);
9303 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9304 if (!is_error(ret
) && arg3
) {
9305 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9306 return -TARGET_EFAULT
;
9307 old_act
->_sa_handler
= oact
._sa_handler
;
9308 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9309 old_act
->sa_flags
= oact
.sa_flags
;
9310 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9311 old_act
->sa_restorer
= oact
.sa_restorer
;
9313 unlock_user_struct(old_act
, arg3
, 1);
9319 case TARGET_NR_rt_sigaction
:
9322 * For Alpha and SPARC this is a 5 argument syscall, with
9323 * a 'restorer' parameter which must be copied into the
9324 * sa_restorer field of the sigaction struct.
9325 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9326 * and arg5 is the sigsetsize.
9328 #if defined(TARGET_ALPHA)
9329 target_ulong sigsetsize
= arg4
;
9330 target_ulong restorer
= arg5
;
9331 #elif defined(TARGET_SPARC)
9332 target_ulong restorer
= arg4
;
9333 target_ulong sigsetsize
= arg5
;
9335 target_ulong sigsetsize
= arg4
;
9336 target_ulong restorer
= 0;
9338 struct target_sigaction
*act
= NULL
;
9339 struct target_sigaction
*oact
= NULL
;
9341 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9342 return -TARGET_EINVAL
;
9344 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9345 return -TARGET_EFAULT
;
9347 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9348 ret
= -TARGET_EFAULT
;
9350 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
9352 unlock_user_struct(oact
, arg3
, 1);
9356 unlock_user_struct(act
, arg2
, 0);
9360 #ifdef TARGET_NR_sgetmask /* not on alpha */
9361 case TARGET_NR_sgetmask
:
9364 abi_ulong target_set
;
9365 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9367 host_to_target_old_sigset(&target_set
, &cur_set
);
9373 #ifdef TARGET_NR_ssetmask /* not on alpha */
9374 case TARGET_NR_ssetmask
:
9377 abi_ulong target_set
= arg1
;
9378 target_to_host_old_sigset(&set
, &target_set
);
9379 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9381 host_to_target_old_sigset(&target_set
, &oset
);
9387 #ifdef TARGET_NR_sigprocmask
9388 case TARGET_NR_sigprocmask
:
9390 #if defined(TARGET_ALPHA)
9391 sigset_t set
, oldset
;
9396 case TARGET_SIG_BLOCK
:
9399 case TARGET_SIG_UNBLOCK
:
9402 case TARGET_SIG_SETMASK
:
9406 return -TARGET_EINVAL
;
9409 target_to_host_old_sigset(&set
, &mask
);
9411 ret
= do_sigprocmask(how
, &set
, &oldset
);
9412 if (!is_error(ret
)) {
9413 host_to_target_old_sigset(&mask
, &oldset
);
9415 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
9418 sigset_t set
, oldset
, *set_ptr
;
9422 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9424 return -TARGET_EFAULT
;
9426 target_to_host_old_sigset(&set
, p
);
9427 unlock_user(p
, arg2
, 0);
9430 case TARGET_SIG_BLOCK
:
9433 case TARGET_SIG_UNBLOCK
:
9436 case TARGET_SIG_SETMASK
:
9440 return -TARGET_EINVAL
;
9446 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9447 if (!is_error(ret
) && arg3
) {
9448 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9449 return -TARGET_EFAULT
;
9450 host_to_target_old_sigset(p
, &oldset
);
9451 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9457 case TARGET_NR_rt_sigprocmask
:
9460 sigset_t set
, oldset
, *set_ptr
;
9462 if (arg4
!= sizeof(target_sigset_t
)) {
9463 return -TARGET_EINVAL
;
9467 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
9469 return -TARGET_EFAULT
;
9471 target_to_host_sigset(&set
, p
);
9472 unlock_user(p
, arg2
, 0);
9475 case TARGET_SIG_BLOCK
:
9478 case TARGET_SIG_UNBLOCK
:
9481 case TARGET_SIG_SETMASK
:
9485 return -TARGET_EINVAL
;
9491 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9492 if (!is_error(ret
) && arg3
) {
9493 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9494 return -TARGET_EFAULT
;
9495 host_to_target_sigset(p
, &oldset
);
9496 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9500 #ifdef TARGET_NR_sigpending
9501 case TARGET_NR_sigpending
:
9504 ret
= get_errno(sigpending(&set
));
9505 if (!is_error(ret
)) {
9506 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9507 return -TARGET_EFAULT
;
9508 host_to_target_old_sigset(p
, &set
);
9509 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9514 case TARGET_NR_rt_sigpending
:
9518 /* Yes, this check is >, not != like most. We follow the kernel's
9519 * logic and it does it like this because it implements
9520 * NR_sigpending through the same code path, and in that case
9521 * the old_sigset_t is smaller in size.
9523 if (arg2
> sizeof(target_sigset_t
)) {
9524 return -TARGET_EINVAL
;
9527 ret
= get_errno(sigpending(&set
));
9528 if (!is_error(ret
)) {
9529 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9530 return -TARGET_EFAULT
;
9531 host_to_target_sigset(p
, &set
);
9532 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9536 #ifdef TARGET_NR_sigsuspend
9537 case TARGET_NR_sigsuspend
:
9541 #if defined(TARGET_ALPHA)
9542 TaskState
*ts
= cpu
->opaque
;
9543 /* target_to_host_old_sigset will bswap back */
9544 abi_ulong mask
= tswapal(arg1
);
9545 set
= &ts
->sigsuspend_mask
;
9546 target_to_host_old_sigset(set
, &mask
);
9548 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
9553 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9554 finish_sigsuspend_mask(ret
);
9558 case TARGET_NR_rt_sigsuspend
:
9562 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
9566 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
9567 finish_sigsuspend_mask(ret
);
9570 #ifdef TARGET_NR_rt_sigtimedwait
9571 case TARGET_NR_rt_sigtimedwait
:
9574 struct timespec uts
, *puts
;
9577 if (arg4
!= sizeof(target_sigset_t
)) {
9578 return -TARGET_EINVAL
;
9581 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9582 return -TARGET_EFAULT
;
9583 target_to_host_sigset(&set
, p
);
9584 unlock_user(p
, arg1
, 0);
9587 if (target_to_host_timespec(puts
, arg3
)) {
9588 return -TARGET_EFAULT
;
9593 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9595 if (!is_error(ret
)) {
9597 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9600 return -TARGET_EFAULT
;
9602 host_to_target_siginfo(p
, &uinfo
);
9603 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9605 ret
= host_to_target_signal(ret
);
9610 #ifdef TARGET_NR_rt_sigtimedwait_time64
9611 case TARGET_NR_rt_sigtimedwait_time64
:
9614 struct timespec uts
, *puts
;
9617 if (arg4
!= sizeof(target_sigset_t
)) {
9618 return -TARGET_EINVAL
;
9621 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
9623 return -TARGET_EFAULT
;
9625 target_to_host_sigset(&set
, p
);
9626 unlock_user(p
, arg1
, 0);
9629 if (target_to_host_timespec64(puts
, arg3
)) {
9630 return -TARGET_EFAULT
;
9635 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9637 if (!is_error(ret
)) {
9639 p
= lock_user(VERIFY_WRITE
, arg2
,
9640 sizeof(target_siginfo_t
), 0);
9642 return -TARGET_EFAULT
;
9644 host_to_target_siginfo(p
, &uinfo
);
9645 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9647 ret
= host_to_target_signal(ret
);
9652 case TARGET_NR_rt_sigqueueinfo
:
9656 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9658 return -TARGET_EFAULT
;
9660 target_to_host_siginfo(&uinfo
, p
);
9661 unlock_user(p
, arg3
, 0);
9662 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9665 case TARGET_NR_rt_tgsigqueueinfo
:
9669 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9671 return -TARGET_EFAULT
;
9673 target_to_host_siginfo(&uinfo
, p
);
9674 unlock_user(p
, arg4
, 0);
9675 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9678 #ifdef TARGET_NR_sigreturn
9679 case TARGET_NR_sigreturn
:
9680 if (block_signals()) {
9681 return -QEMU_ERESTARTSYS
;
9683 return do_sigreturn(cpu_env
);
9685 case TARGET_NR_rt_sigreturn
:
9686 if (block_signals()) {
9687 return -QEMU_ERESTARTSYS
;
9689 return do_rt_sigreturn(cpu_env
);
9690 case TARGET_NR_sethostname
:
9691 if (!(p
= lock_user_string(arg1
)))
9692 return -TARGET_EFAULT
;
9693 ret
= get_errno(sethostname(p
, arg2
));
9694 unlock_user(p
, arg1
, 0);
9696 #ifdef TARGET_NR_setrlimit
9697 case TARGET_NR_setrlimit
:
9699 int resource
= target_to_host_resource(arg1
);
9700 struct target_rlimit
*target_rlim
;
9702 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9703 return -TARGET_EFAULT
;
9704 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9705 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9706 unlock_user_struct(target_rlim
, arg2
, 0);
9708 * If we just passed through resource limit settings for memory then
9709 * they would also apply to QEMU's own allocations, and QEMU will
9710 * crash or hang or die if its allocations fail. Ideally we would
9711 * track the guest allocations in QEMU and apply the limits ourselves.
9712 * For now, just tell the guest the call succeeded but don't actually
9715 if (resource
!= RLIMIT_AS
&&
9716 resource
!= RLIMIT_DATA
&&
9717 resource
!= RLIMIT_STACK
) {
9718 return get_errno(setrlimit(resource
, &rlim
));
9724 #ifdef TARGET_NR_getrlimit
9725 case TARGET_NR_getrlimit
:
9727 int resource
= target_to_host_resource(arg1
);
9728 struct target_rlimit
*target_rlim
;
9731 ret
= get_errno(getrlimit(resource
, &rlim
));
9732 if (!is_error(ret
)) {
9733 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9734 return -TARGET_EFAULT
;
9735 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9736 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9737 unlock_user_struct(target_rlim
, arg2
, 1);
9742 case TARGET_NR_getrusage
:
9744 struct rusage rusage
;
9745 ret
= get_errno(getrusage(arg1
, &rusage
));
9746 if (!is_error(ret
)) {
9747 ret
= host_to_target_rusage(arg2
, &rusage
);
9751 #if defined(TARGET_NR_gettimeofday)
9752 case TARGET_NR_gettimeofday
:
9757 ret
= get_errno(gettimeofday(&tv
, &tz
));
9758 if (!is_error(ret
)) {
9759 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
9760 return -TARGET_EFAULT
;
9762 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
9763 return -TARGET_EFAULT
;
9769 #if defined(TARGET_NR_settimeofday)
9770 case TARGET_NR_settimeofday
:
9772 struct timeval tv
, *ptv
= NULL
;
9773 struct timezone tz
, *ptz
= NULL
;
9776 if (copy_from_user_timeval(&tv
, arg1
)) {
9777 return -TARGET_EFAULT
;
9783 if (copy_from_user_timezone(&tz
, arg2
)) {
9784 return -TARGET_EFAULT
;
9789 return get_errno(settimeofday(ptv
, ptz
));
9792 #if defined(TARGET_NR_select)
9793 case TARGET_NR_select
:
9794 #if defined(TARGET_WANT_NI_OLD_SELECT)
9795 /* some architectures used to have old_select here
9796 * but now ENOSYS it.
9798 ret
= -TARGET_ENOSYS
;
9799 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9800 ret
= do_old_select(arg1
);
9802 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9806 #ifdef TARGET_NR_pselect6
9807 case TARGET_NR_pselect6
:
9808 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
9810 #ifdef TARGET_NR_pselect6_time64
9811 case TARGET_NR_pselect6_time64
:
9812 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
9814 #ifdef TARGET_NR_symlink
9815 case TARGET_NR_symlink
:
9818 p
= lock_user_string(arg1
);
9819 p2
= lock_user_string(arg2
);
9821 ret
= -TARGET_EFAULT
;
9823 ret
= get_errno(symlink(p
, p2
));
9824 unlock_user(p2
, arg2
, 0);
9825 unlock_user(p
, arg1
, 0);
9829 #if defined(TARGET_NR_symlinkat)
9830 case TARGET_NR_symlinkat
:
9833 p
= lock_user_string(arg1
);
9834 p2
= lock_user_string(arg3
);
9836 ret
= -TARGET_EFAULT
;
9838 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9839 unlock_user(p2
, arg3
, 0);
9840 unlock_user(p
, arg1
, 0);
9844 #ifdef TARGET_NR_readlink
9845 case TARGET_NR_readlink
:
9848 p
= lock_user_string(arg1
);
9849 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9851 ret
= -TARGET_EFAULT
;
9853 /* Short circuit this for the magic exe check. */
9854 ret
= -TARGET_EINVAL
;
9855 } else if (is_proc_myself((const char *)p
, "exe")) {
9856 char real
[PATH_MAX
], *temp
;
9857 temp
= realpath(exec_path
, real
);
9858 /* Return value is # of bytes that we wrote to the buffer. */
9860 ret
= get_errno(-1);
9862 /* Don't worry about sign mismatch as earlier mapping
9863 * logic would have thrown a bad address error. */
9864 ret
= MIN(strlen(real
), arg3
);
9865 /* We cannot NUL terminate the string. */
9866 memcpy(p2
, real
, ret
);
9869 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9871 unlock_user(p2
, arg2
, ret
);
9872 unlock_user(p
, arg1
, 0);
9876 #if defined(TARGET_NR_readlinkat)
9877 case TARGET_NR_readlinkat
:
9880 p
= lock_user_string(arg2
);
9881 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9883 ret
= -TARGET_EFAULT
;
9884 } else if (is_proc_myself((const char *)p
, "exe")) {
9885 char real
[PATH_MAX
], *temp
;
9886 temp
= realpath(exec_path
, real
);
9887 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9888 snprintf((char *)p2
, arg4
, "%s", real
);
9890 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9892 unlock_user(p2
, arg3
, ret
);
9893 unlock_user(p
, arg2
, 0);
9897 #ifdef TARGET_NR_swapon
9898 case TARGET_NR_swapon
:
9899 if (!(p
= lock_user_string(arg1
)))
9900 return -TARGET_EFAULT
;
9901 ret
= get_errno(swapon(p
, arg2
));
9902 unlock_user(p
, arg1
, 0);
9905 case TARGET_NR_reboot
:
9906 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9907 /* arg4 must be ignored in all other cases */
9908 p
= lock_user_string(arg4
);
9910 return -TARGET_EFAULT
;
9912 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9913 unlock_user(p
, arg4
, 0);
9915 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9918 #ifdef TARGET_NR_mmap
9919 case TARGET_NR_mmap
:
9920 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9921 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9922 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9923 || defined(TARGET_S390X)
9926 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9927 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9928 return -TARGET_EFAULT
;
9935 unlock_user(v
, arg1
, 0);
9936 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9937 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9941 /* mmap pointers are always untagged */
9942 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9943 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9949 #ifdef TARGET_NR_mmap2
9950 case TARGET_NR_mmap2
:
9952 #define MMAP_SHIFT 12
9954 ret
= target_mmap(arg1
, arg2
, arg3
,
9955 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9956 arg5
, arg6
<< MMAP_SHIFT
);
9957 return get_errno(ret
);
9959 case TARGET_NR_munmap
:
9960 arg1
= cpu_untagged_addr(cpu
, arg1
);
9961 return get_errno(target_munmap(arg1
, arg2
));
9962 case TARGET_NR_mprotect
:
9963 arg1
= cpu_untagged_addr(cpu
, arg1
);
9965 TaskState
*ts
= cpu
->opaque
;
9966 /* Special hack to detect libc making the stack executable. */
9967 if ((arg3
& PROT_GROWSDOWN
)
9968 && arg1
>= ts
->info
->stack_limit
9969 && arg1
<= ts
->info
->start_stack
) {
9970 arg3
&= ~PROT_GROWSDOWN
;
9971 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9972 arg1
= ts
->info
->stack_limit
;
9975 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9976 #ifdef TARGET_NR_mremap
9977 case TARGET_NR_mremap
:
9978 arg1
= cpu_untagged_addr(cpu
, arg1
);
9979 /* mremap new_addr (arg5) is always untagged */
9980 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9982 /* ??? msync/mlock/munlock are broken for softmmu. */
9983 #ifdef TARGET_NR_msync
9984 case TARGET_NR_msync
:
9985 return get_errno(msync(g2h(cpu
, arg1
), arg2
, arg3
));
9987 #ifdef TARGET_NR_mlock
9988 case TARGET_NR_mlock
:
9989 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
9991 #ifdef TARGET_NR_munlock
9992 case TARGET_NR_munlock
:
9993 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
9995 #ifdef TARGET_NR_mlockall
9996 case TARGET_NR_mlockall
:
9997 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9999 #ifdef TARGET_NR_munlockall
10000 case TARGET_NR_munlockall
:
10001 return get_errno(munlockall());
10003 #ifdef TARGET_NR_truncate
10004 case TARGET_NR_truncate
:
10005 if (!(p
= lock_user_string(arg1
)))
10006 return -TARGET_EFAULT
;
10007 ret
= get_errno(truncate(p
, arg2
));
10008 unlock_user(p
, arg1
, 0);
10011 #ifdef TARGET_NR_ftruncate
10012 case TARGET_NR_ftruncate
:
10013 return get_errno(ftruncate(arg1
, arg2
));
10015 case TARGET_NR_fchmod
:
10016 return get_errno(fchmod(arg1
, arg2
));
10017 #if defined(TARGET_NR_fchmodat)
10018 case TARGET_NR_fchmodat
:
10019 if (!(p
= lock_user_string(arg2
)))
10020 return -TARGET_EFAULT
;
10021 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10022 unlock_user(p
, arg2
, 0);
10025 case TARGET_NR_getpriority
:
10026 /* Note that negative values are valid for getpriority, so we must
10027 differentiate based on errno settings. */
10029 ret
= getpriority(arg1
, arg2
);
10030 if (ret
== -1 && errno
!= 0) {
10031 return -host_to_target_errno(errno
);
10033 #ifdef TARGET_ALPHA
10034 /* Return value is the unbiased priority. Signal no error. */
10035 cpu_env
->ir
[IR_V0
] = 0;
10037 /* Return value is a biased priority to avoid negative numbers. */
10041 case TARGET_NR_setpriority
:
10042 return get_errno(setpriority(arg1
, arg2
, arg3
));
10043 #ifdef TARGET_NR_statfs
10044 case TARGET_NR_statfs
:
10045 if (!(p
= lock_user_string(arg1
))) {
10046 return -TARGET_EFAULT
;
10048 ret
= get_errno(statfs(path(p
), &stfs
));
10049 unlock_user(p
, arg1
, 0);
10051 if (!is_error(ret
)) {
10052 struct target_statfs
*target_stfs
;
10054 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10055 return -TARGET_EFAULT
;
10056 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10057 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10058 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10059 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10060 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10061 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10062 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10063 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10064 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10065 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10066 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10067 #ifdef _STATFS_F_FLAGS
10068 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10070 __put_user(0, &target_stfs
->f_flags
);
10072 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10073 unlock_user_struct(target_stfs
, arg2
, 1);
10077 #ifdef TARGET_NR_fstatfs
10078 case TARGET_NR_fstatfs
:
10079 ret
= get_errno(fstatfs(arg1
, &stfs
));
10080 goto convert_statfs
;
10082 #ifdef TARGET_NR_statfs64
10083 case TARGET_NR_statfs64
:
10084 if (!(p
= lock_user_string(arg1
))) {
10085 return -TARGET_EFAULT
;
10087 ret
= get_errno(statfs(path(p
), &stfs
));
10088 unlock_user(p
, arg1
, 0);
10090 if (!is_error(ret
)) {
10091 struct target_statfs64
*target_stfs
;
10093 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10094 return -TARGET_EFAULT
;
10095 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10096 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10097 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10098 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10099 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10100 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10101 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10102 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10103 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10104 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10105 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10106 #ifdef _STATFS_F_FLAGS
10107 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10109 __put_user(0, &target_stfs
->f_flags
);
10111 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10112 unlock_user_struct(target_stfs
, arg3
, 1);
10115 case TARGET_NR_fstatfs64
:
10116 ret
= get_errno(fstatfs(arg1
, &stfs
));
10117 goto convert_statfs64
;
10119 #ifdef TARGET_NR_socketcall
10120 case TARGET_NR_socketcall
:
10121 return do_socketcall(arg1
, arg2
);
10123 #ifdef TARGET_NR_accept
10124 case TARGET_NR_accept
:
10125 return do_accept4(arg1
, arg2
, arg3
, 0);
10127 #ifdef TARGET_NR_accept4
10128 case TARGET_NR_accept4
:
10129 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10131 #ifdef TARGET_NR_bind
10132 case TARGET_NR_bind
:
10133 return do_bind(arg1
, arg2
, arg3
);
10135 #ifdef TARGET_NR_connect
10136 case TARGET_NR_connect
:
10137 return do_connect(arg1
, arg2
, arg3
);
10139 #ifdef TARGET_NR_getpeername
10140 case TARGET_NR_getpeername
:
10141 return do_getpeername(arg1
, arg2
, arg3
);
10143 #ifdef TARGET_NR_getsockname
10144 case TARGET_NR_getsockname
:
10145 return do_getsockname(arg1
, arg2
, arg3
);
10147 #ifdef TARGET_NR_getsockopt
10148 case TARGET_NR_getsockopt
:
10149 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10151 #ifdef TARGET_NR_listen
10152 case TARGET_NR_listen
:
10153 return get_errno(listen(arg1
, arg2
));
10155 #ifdef TARGET_NR_recv
10156 case TARGET_NR_recv
:
10157 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10159 #ifdef TARGET_NR_recvfrom
10160 case TARGET_NR_recvfrom
:
10161 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10163 #ifdef TARGET_NR_recvmsg
10164 case TARGET_NR_recvmsg
:
10165 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10167 #ifdef TARGET_NR_send
10168 case TARGET_NR_send
:
10169 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10171 #ifdef TARGET_NR_sendmsg
10172 case TARGET_NR_sendmsg
:
10173 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10175 #ifdef TARGET_NR_sendmmsg
10176 case TARGET_NR_sendmmsg
:
10177 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10179 #ifdef TARGET_NR_recvmmsg
10180 case TARGET_NR_recvmmsg
:
10181 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10183 #ifdef TARGET_NR_sendto
10184 case TARGET_NR_sendto
:
10185 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10187 #ifdef TARGET_NR_shutdown
10188 case TARGET_NR_shutdown
:
10189 return get_errno(shutdown(arg1
, arg2
));
10191 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10192 case TARGET_NR_getrandom
:
10193 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10195 return -TARGET_EFAULT
;
10197 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10198 unlock_user(p
, arg1
, ret
);
10201 #ifdef TARGET_NR_socket
10202 case TARGET_NR_socket
:
10203 return do_socket(arg1
, arg2
, arg3
);
10205 #ifdef TARGET_NR_socketpair
10206 case TARGET_NR_socketpair
:
10207 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10209 #ifdef TARGET_NR_setsockopt
10210 case TARGET_NR_setsockopt
:
10211 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10213 #if defined(TARGET_NR_syslog)
10214 case TARGET_NR_syslog
:
10219 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10220 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10221 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10222 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10223 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10224 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10225 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10226 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10227 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10228 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10229 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10230 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10233 return -TARGET_EINVAL
;
10238 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10240 return -TARGET_EFAULT
;
10242 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10243 unlock_user(p
, arg2
, arg3
);
10247 return -TARGET_EINVAL
;
10252 case TARGET_NR_setitimer
:
10254 struct itimerval value
, ovalue
, *pvalue
;
10258 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10259 || copy_from_user_timeval(&pvalue
->it_value
,
10260 arg2
+ sizeof(struct target_timeval
)))
10261 return -TARGET_EFAULT
;
10265 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10266 if (!is_error(ret
) && arg3
) {
10267 if (copy_to_user_timeval(arg3
,
10268 &ovalue
.it_interval
)
10269 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10271 return -TARGET_EFAULT
;
10275 case TARGET_NR_getitimer
:
10277 struct itimerval value
;
10279 ret
= get_errno(getitimer(arg1
, &value
));
10280 if (!is_error(ret
) && arg2
) {
10281 if (copy_to_user_timeval(arg2
,
10282 &value
.it_interval
)
10283 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10285 return -TARGET_EFAULT
;
10289 #ifdef TARGET_NR_stat
10290 case TARGET_NR_stat
:
10291 if (!(p
= lock_user_string(arg1
))) {
10292 return -TARGET_EFAULT
;
10294 ret
= get_errno(stat(path(p
), &st
));
10295 unlock_user(p
, arg1
, 0);
10298 #ifdef TARGET_NR_lstat
10299 case TARGET_NR_lstat
:
10300 if (!(p
= lock_user_string(arg1
))) {
10301 return -TARGET_EFAULT
;
10303 ret
= get_errno(lstat(path(p
), &st
));
10304 unlock_user(p
, arg1
, 0);
10307 #ifdef TARGET_NR_fstat
10308 case TARGET_NR_fstat
:
10310 ret
= get_errno(fstat(arg1
, &st
));
10311 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10314 if (!is_error(ret
)) {
10315 struct target_stat
*target_st
;
10317 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10318 return -TARGET_EFAULT
;
10319 memset(target_st
, 0, sizeof(*target_st
));
10320 __put_user(st
.st_dev
, &target_st
->st_dev
);
10321 __put_user(st
.st_ino
, &target_st
->st_ino
);
10322 __put_user(st
.st_mode
, &target_st
->st_mode
);
10323 __put_user(st
.st_uid
, &target_st
->st_uid
);
10324 __put_user(st
.st_gid
, &target_st
->st_gid
);
10325 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10326 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10327 __put_user(st
.st_size
, &target_st
->st_size
);
10328 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10329 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10330 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10331 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10332 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10333 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10334 __put_user(st
.st_atim
.tv_nsec
,
10335 &target_st
->target_st_atime_nsec
);
10336 __put_user(st
.st_mtim
.tv_nsec
,
10337 &target_st
->target_st_mtime_nsec
);
10338 __put_user(st
.st_ctim
.tv_nsec
,
10339 &target_st
->target_st_ctime_nsec
);
10341 unlock_user_struct(target_st
, arg2
, 1);
10346 case TARGET_NR_vhangup
:
10347 return get_errno(vhangup());
10348 #ifdef TARGET_NR_syscall
10349 case TARGET_NR_syscall
:
10350 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10351 arg6
, arg7
, arg8
, 0);
10353 #if defined(TARGET_NR_wait4)
10354 case TARGET_NR_wait4
:
10357 abi_long status_ptr
= arg2
;
10358 struct rusage rusage
, *rusage_ptr
;
10359 abi_ulong target_rusage
= arg4
;
10360 abi_long rusage_err
;
10362 rusage_ptr
= &rusage
;
10365 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10366 if (!is_error(ret
)) {
10367 if (status_ptr
&& ret
) {
10368 status
= host_to_target_waitstatus(status
);
10369 if (put_user_s32(status
, status_ptr
))
10370 return -TARGET_EFAULT
;
10372 if (target_rusage
) {
10373 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10382 #ifdef TARGET_NR_swapoff
10383 case TARGET_NR_swapoff
:
10384 if (!(p
= lock_user_string(arg1
)))
10385 return -TARGET_EFAULT
;
10386 ret
= get_errno(swapoff(p
));
10387 unlock_user(p
, arg1
, 0);
10390 case TARGET_NR_sysinfo
:
10392 struct target_sysinfo
*target_value
;
10393 struct sysinfo value
;
10394 ret
= get_errno(sysinfo(&value
));
10395 if (!is_error(ret
) && arg1
)
10397 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10398 return -TARGET_EFAULT
;
10399 __put_user(value
.uptime
, &target_value
->uptime
);
10400 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10401 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10402 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10403 __put_user(value
.totalram
, &target_value
->totalram
);
10404 __put_user(value
.freeram
, &target_value
->freeram
);
10405 __put_user(value
.sharedram
, &target_value
->sharedram
);
10406 __put_user(value
.bufferram
, &target_value
->bufferram
);
10407 __put_user(value
.totalswap
, &target_value
->totalswap
);
10408 __put_user(value
.freeswap
, &target_value
->freeswap
);
10409 __put_user(value
.procs
, &target_value
->procs
);
10410 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10411 __put_user(value
.freehigh
, &target_value
->freehigh
);
10412 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10413 unlock_user_struct(target_value
, arg1
, 1);
10417 #ifdef TARGET_NR_ipc
10418 case TARGET_NR_ipc
:
10419 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10421 #ifdef TARGET_NR_semget
10422 case TARGET_NR_semget
:
10423 return get_errno(semget(arg1
, arg2
, arg3
));
10425 #ifdef TARGET_NR_semop
10426 case TARGET_NR_semop
:
10427 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
10429 #ifdef TARGET_NR_semtimedop
10430 case TARGET_NR_semtimedop
:
10431 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
10433 #ifdef TARGET_NR_semtimedop_time64
10434 case TARGET_NR_semtimedop_time64
:
10435 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
10437 #ifdef TARGET_NR_semctl
10438 case TARGET_NR_semctl
:
10439 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10441 #ifdef TARGET_NR_msgctl
10442 case TARGET_NR_msgctl
:
10443 return do_msgctl(arg1
, arg2
, arg3
);
10445 #ifdef TARGET_NR_msgget
10446 case TARGET_NR_msgget
:
10447 return get_errno(msgget(arg1
, arg2
));
10449 #ifdef TARGET_NR_msgrcv
10450 case TARGET_NR_msgrcv
:
10451 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10453 #ifdef TARGET_NR_msgsnd
10454 case TARGET_NR_msgsnd
:
10455 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10457 #ifdef TARGET_NR_shmget
10458 case TARGET_NR_shmget
:
10459 return get_errno(shmget(arg1
, arg2
, arg3
));
10461 #ifdef TARGET_NR_shmctl
10462 case TARGET_NR_shmctl
:
10463 return do_shmctl(arg1
, arg2
, arg3
);
10465 #ifdef TARGET_NR_shmat
10466 case TARGET_NR_shmat
:
10467 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10469 #ifdef TARGET_NR_shmdt
10470 case TARGET_NR_shmdt
:
10471 return do_shmdt(arg1
);
10473 case TARGET_NR_fsync
:
10474 return get_errno(fsync(arg1
));
10475 case TARGET_NR_clone
:
10476 /* Linux manages to have three different orderings for its
10477 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10478 * match the kernel's CONFIG_CLONE_* settings.
10479 * Microblaze is further special in that it uses a sixth
10480 * implicit argument to clone for the TLS pointer.
10482 #if defined(TARGET_MICROBLAZE)
10483 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10484 #elif defined(TARGET_CLONE_BACKWARDS)
10485 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10486 #elif defined(TARGET_CLONE_BACKWARDS2)
10487 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10489 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10492 #ifdef __NR_exit_group
10493 /* new thread calls */
10494 case TARGET_NR_exit_group
:
10495 preexit_cleanup(cpu_env
, arg1
);
10496 return get_errno(exit_group(arg1
));
10498 case TARGET_NR_setdomainname
:
10499 if (!(p
= lock_user_string(arg1
)))
10500 return -TARGET_EFAULT
;
10501 ret
= get_errno(setdomainname(p
, arg2
));
10502 unlock_user(p
, arg1
, 0);
10504 case TARGET_NR_uname
:
10505 /* no need to transcode because we use the linux syscall */
10507 struct new_utsname
* buf
;
10509 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10510 return -TARGET_EFAULT
;
10511 ret
= get_errno(sys_uname(buf
));
10512 if (!is_error(ret
)) {
10513 /* Overwrite the native machine name with whatever is being
10515 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10516 sizeof(buf
->machine
));
10517 /* Allow the user to override the reported release. */
10518 if (qemu_uname_release
&& *qemu_uname_release
) {
10519 g_strlcpy(buf
->release
, qemu_uname_release
,
10520 sizeof(buf
->release
));
10523 unlock_user_struct(buf
, arg1
, 1);
10527 case TARGET_NR_modify_ldt
:
10528 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10529 #if !defined(TARGET_X86_64)
10530 case TARGET_NR_vm86
:
10531 return do_vm86(cpu_env
, arg1
, arg2
);
10534 #if defined(TARGET_NR_adjtimex)
10535 case TARGET_NR_adjtimex
:
10537 struct timex host_buf
;
10539 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10540 return -TARGET_EFAULT
;
10542 ret
= get_errno(adjtimex(&host_buf
));
10543 if (!is_error(ret
)) {
10544 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10545 return -TARGET_EFAULT
;
10551 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10552 case TARGET_NR_clock_adjtime
:
10554 struct timex htx
, *phtx
= &htx
;
10556 if (target_to_host_timex(phtx
, arg2
) != 0) {
10557 return -TARGET_EFAULT
;
10559 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10560 if (!is_error(ret
) && phtx
) {
10561 if (host_to_target_timex(arg2
, phtx
) != 0) {
10562 return -TARGET_EFAULT
;
10568 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10569 case TARGET_NR_clock_adjtime64
:
10573 if (target_to_host_timex64(&htx
, arg2
) != 0) {
10574 return -TARGET_EFAULT
;
10576 ret
= get_errno(clock_adjtime(arg1
, &htx
));
10577 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
10578 return -TARGET_EFAULT
;
10583 case TARGET_NR_getpgid
:
10584 return get_errno(getpgid(arg1
));
10585 case TARGET_NR_fchdir
:
10586 return get_errno(fchdir(arg1
));
10587 case TARGET_NR_personality
:
10588 return get_errno(personality(arg1
));
10589 #ifdef TARGET_NR__llseek /* Not on alpha */
10590 case TARGET_NR__llseek
:
10593 #if !defined(__NR_llseek)
10594 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10596 ret
= get_errno(res
);
10601 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10603 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10604 return -TARGET_EFAULT
;
10609 #ifdef TARGET_NR_getdents
10610 case TARGET_NR_getdents
:
10611 return do_getdents(arg1
, arg2
, arg3
);
10612 #endif /* TARGET_NR_getdents */
10613 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10614 case TARGET_NR_getdents64
:
10615 return do_getdents64(arg1
, arg2
, arg3
);
10616 #endif /* TARGET_NR_getdents64 */
10617 #if defined(TARGET_NR__newselect)
10618 case TARGET_NR__newselect
:
10619 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10621 #ifdef TARGET_NR_poll
10622 case TARGET_NR_poll
:
10623 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
10625 #ifdef TARGET_NR_ppoll
10626 case TARGET_NR_ppoll
:
10627 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
10629 #ifdef TARGET_NR_ppoll_time64
10630 case TARGET_NR_ppoll_time64
:
10631 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
10633 case TARGET_NR_flock
:
10634 /* NOTE: the flock constant seems to be the same for every
10636 return get_errno(safe_flock(arg1
, arg2
));
10637 case TARGET_NR_readv
:
10639 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10641 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10642 unlock_iovec(vec
, arg2
, arg3
, 1);
10644 ret
= -host_to_target_errno(errno
);
10648 case TARGET_NR_writev
:
10650 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10652 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10653 unlock_iovec(vec
, arg2
, arg3
, 0);
10655 ret
= -host_to_target_errno(errno
);
10659 #if defined(TARGET_NR_preadv)
10660 case TARGET_NR_preadv
:
10662 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10664 unsigned long low
, high
;
10666 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10667 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10668 unlock_iovec(vec
, arg2
, arg3
, 1);
10670 ret
= -host_to_target_errno(errno
);
10675 #if defined(TARGET_NR_pwritev)
10676 case TARGET_NR_pwritev
:
10678 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10680 unsigned long low
, high
;
10682 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10683 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10684 unlock_iovec(vec
, arg2
, arg3
, 0);
10686 ret
= -host_to_target_errno(errno
);
10691 case TARGET_NR_getsid
:
10692 return get_errno(getsid(arg1
));
10693 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10694 case TARGET_NR_fdatasync
:
10695 return get_errno(fdatasync(arg1
));
10697 case TARGET_NR_sched_getaffinity
:
10699 unsigned int mask_size
;
10700 unsigned long *mask
;
10703 * sched_getaffinity needs multiples of ulong, so need to take
10704 * care of mismatches between target ulong and host ulong sizes.
10706 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10707 return -TARGET_EINVAL
;
10709 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10711 mask
= alloca(mask_size
);
10712 memset(mask
, 0, mask_size
);
10713 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10715 if (!is_error(ret
)) {
10717 /* More data returned than the caller's buffer will fit.
10718 * This only happens if sizeof(abi_long) < sizeof(long)
10719 * and the caller passed us a buffer holding an odd number
10720 * of abi_longs. If the host kernel is actually using the
10721 * extra 4 bytes then fail EINVAL; otherwise we can just
10722 * ignore them and only copy the interesting part.
10724 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10725 if (numcpus
> arg2
* 8) {
10726 return -TARGET_EINVAL
;
10731 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10732 return -TARGET_EFAULT
;
10737 case TARGET_NR_sched_setaffinity
:
10739 unsigned int mask_size
;
10740 unsigned long *mask
;
10743 * sched_setaffinity needs multiples of ulong, so need to take
10744 * care of mismatches between target ulong and host ulong sizes.
10746 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10747 return -TARGET_EINVAL
;
10749 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10750 mask
= alloca(mask_size
);
10752 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10757 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10759 case TARGET_NR_getcpu
:
10761 unsigned cpu
, node
;
10762 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10763 arg2
? &node
: NULL
,
10765 if (is_error(ret
)) {
10768 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10769 return -TARGET_EFAULT
;
10771 if (arg2
&& put_user_u32(node
, arg2
)) {
10772 return -TARGET_EFAULT
;
10776 case TARGET_NR_sched_setparam
:
10778 struct target_sched_param
*target_schp
;
10779 struct sched_param schp
;
10782 return -TARGET_EINVAL
;
10784 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
10785 return -TARGET_EFAULT
;
10787 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10788 unlock_user_struct(target_schp
, arg2
, 0);
10789 return get_errno(sys_sched_setparam(arg1
, &schp
));
10791 case TARGET_NR_sched_getparam
:
10793 struct target_sched_param
*target_schp
;
10794 struct sched_param schp
;
10797 return -TARGET_EINVAL
;
10799 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
10800 if (!is_error(ret
)) {
10801 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
10802 return -TARGET_EFAULT
;
10804 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10805 unlock_user_struct(target_schp
, arg2
, 1);
10809 case TARGET_NR_sched_setscheduler
:
10811 struct target_sched_param
*target_schp
;
10812 struct sched_param schp
;
10814 return -TARGET_EINVAL
;
10816 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
10817 return -TARGET_EFAULT
;
10819 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10820 unlock_user_struct(target_schp
, arg3
, 0);
10821 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
10823 case TARGET_NR_sched_getscheduler
:
10824 return get_errno(sys_sched_getscheduler(arg1
));
10825 case TARGET_NR_sched_getattr
:
10827 struct target_sched_attr
*target_scha
;
10828 struct sched_attr scha
;
10830 return -TARGET_EINVAL
;
10832 if (arg3
> sizeof(scha
)) {
10833 arg3
= sizeof(scha
);
10835 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
10836 if (!is_error(ret
)) {
10837 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10838 if (!target_scha
) {
10839 return -TARGET_EFAULT
;
10841 target_scha
->size
= tswap32(scha
.size
);
10842 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
10843 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
10844 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
10845 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
10846 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
10847 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
10848 target_scha
->sched_period
= tswap64(scha
.sched_period
);
10849 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
10850 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
10851 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
10853 unlock_user(target_scha
, arg2
, arg3
);
10857 case TARGET_NR_sched_setattr
:
10859 struct target_sched_attr
*target_scha
;
10860 struct sched_attr scha
;
10864 return -TARGET_EINVAL
;
10866 if (get_user_u32(size
, arg2
)) {
10867 return -TARGET_EFAULT
;
10870 size
= offsetof(struct target_sched_attr
, sched_util_min
);
10872 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
10873 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10874 return -TARGET_EFAULT
;
10876 return -TARGET_E2BIG
;
10879 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
10882 } else if (zeroed
== 0) {
10883 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
10884 return -TARGET_EFAULT
;
10886 return -TARGET_E2BIG
;
10888 if (size
> sizeof(struct target_sched_attr
)) {
10889 size
= sizeof(struct target_sched_attr
);
10892 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
10893 if (!target_scha
) {
10894 return -TARGET_EFAULT
;
10897 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
10898 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
10899 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
10900 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
10901 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
10902 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
10903 scha
.sched_period
= tswap64(target_scha
->sched_period
);
10904 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
10905 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
10906 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
10908 unlock_user(target_scha
, arg2
, 0);
10909 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
10911 case TARGET_NR_sched_yield
:
10912 return get_errno(sched_yield());
10913 case TARGET_NR_sched_get_priority_max
:
10914 return get_errno(sched_get_priority_max(arg1
));
10915 case TARGET_NR_sched_get_priority_min
:
10916 return get_errno(sched_get_priority_min(arg1
));
10917 #ifdef TARGET_NR_sched_rr_get_interval
10918 case TARGET_NR_sched_rr_get_interval
:
10920 struct timespec ts
;
10921 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10922 if (!is_error(ret
)) {
10923 ret
= host_to_target_timespec(arg2
, &ts
);
10928 #ifdef TARGET_NR_sched_rr_get_interval_time64
10929 case TARGET_NR_sched_rr_get_interval_time64
:
10931 struct timespec ts
;
10932 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10933 if (!is_error(ret
)) {
10934 ret
= host_to_target_timespec64(arg2
, &ts
);
10939 #if defined(TARGET_NR_nanosleep)
10940 case TARGET_NR_nanosleep
:
10942 struct timespec req
, rem
;
10943 target_to_host_timespec(&req
, arg1
);
10944 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10945 if (is_error(ret
) && arg2
) {
10946 host_to_target_timespec(arg2
, &rem
);
10951 case TARGET_NR_prctl
:
10952 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
10954 #ifdef TARGET_NR_arch_prctl
10955 case TARGET_NR_arch_prctl
:
10956 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10958 #ifdef TARGET_NR_pread64
10959 case TARGET_NR_pread64
:
10960 if (regpairs_aligned(cpu_env
, num
)) {
10964 if (arg2
== 0 && arg3
== 0) {
10965 /* Special-case NULL buffer and zero length, which should succeed */
10968 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10970 return -TARGET_EFAULT
;
10973 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10974 unlock_user(p
, arg2
, ret
);
10976 case TARGET_NR_pwrite64
:
10977 if (regpairs_aligned(cpu_env
, num
)) {
10981 if (arg2
== 0 && arg3
== 0) {
10982 /* Special-case NULL buffer and zero length, which should succeed */
10985 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
10987 return -TARGET_EFAULT
;
10990 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10991 unlock_user(p
, arg2
, 0);
10994 case TARGET_NR_getcwd
:
10995 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10996 return -TARGET_EFAULT
;
10997 ret
= get_errno(sys_getcwd1(p
, arg2
));
10998 unlock_user(p
, arg1
, ret
);
11000 case TARGET_NR_capget
:
11001 case TARGET_NR_capset
:
11003 struct target_user_cap_header
*target_header
;
11004 struct target_user_cap_data
*target_data
= NULL
;
11005 struct __user_cap_header_struct header
;
11006 struct __user_cap_data_struct data
[2];
11007 struct __user_cap_data_struct
*dataptr
= NULL
;
11008 int i
, target_datalen
;
11009 int data_items
= 1;
11011 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11012 return -TARGET_EFAULT
;
11014 header
.version
= tswap32(target_header
->version
);
11015 header
.pid
= tswap32(target_header
->pid
);
11017 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11018 /* Version 2 and up takes pointer to two user_data structs */
11022 target_datalen
= sizeof(*target_data
) * data_items
;
11025 if (num
== TARGET_NR_capget
) {
11026 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11028 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11030 if (!target_data
) {
11031 unlock_user_struct(target_header
, arg1
, 0);
11032 return -TARGET_EFAULT
;
11035 if (num
== TARGET_NR_capset
) {
11036 for (i
= 0; i
< data_items
; i
++) {
11037 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11038 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11039 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11046 if (num
== TARGET_NR_capget
) {
11047 ret
= get_errno(capget(&header
, dataptr
));
11049 ret
= get_errno(capset(&header
, dataptr
));
11052 /* The kernel always updates version for both capget and capset */
11053 target_header
->version
= tswap32(header
.version
);
11054 unlock_user_struct(target_header
, arg1
, 1);
11057 if (num
== TARGET_NR_capget
) {
11058 for (i
= 0; i
< data_items
; i
++) {
11059 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11060 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11061 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11063 unlock_user(target_data
, arg2
, target_datalen
);
11065 unlock_user(target_data
, arg2
, 0);
11070 case TARGET_NR_sigaltstack
:
11071 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11073 #ifdef CONFIG_SENDFILE
11074 #ifdef TARGET_NR_sendfile
11075 case TARGET_NR_sendfile
:
11077 off_t
*offp
= NULL
;
11080 ret
= get_user_sal(off
, arg3
);
11081 if (is_error(ret
)) {
11086 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11087 if (!is_error(ret
) && arg3
) {
11088 abi_long ret2
= put_user_sal(off
, arg3
);
11089 if (is_error(ret2
)) {
11096 #ifdef TARGET_NR_sendfile64
11097 case TARGET_NR_sendfile64
:
11099 off_t
*offp
= NULL
;
11102 ret
= get_user_s64(off
, arg3
);
11103 if (is_error(ret
)) {
11108 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11109 if (!is_error(ret
) && arg3
) {
11110 abi_long ret2
= put_user_s64(off
, arg3
);
11111 if (is_error(ret2
)) {
11119 #ifdef TARGET_NR_vfork
11120 case TARGET_NR_vfork
:
11121 return get_errno(do_fork(cpu_env
,
11122 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11125 #ifdef TARGET_NR_ugetrlimit
11126 case TARGET_NR_ugetrlimit
:
11128 struct rlimit rlim
;
11129 int resource
= target_to_host_resource(arg1
);
11130 ret
= get_errno(getrlimit(resource
, &rlim
));
11131 if (!is_error(ret
)) {
11132 struct target_rlimit
*target_rlim
;
11133 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11134 return -TARGET_EFAULT
;
11135 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11136 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11137 unlock_user_struct(target_rlim
, arg2
, 1);
11142 #ifdef TARGET_NR_truncate64
11143 case TARGET_NR_truncate64
:
11144 if (!(p
= lock_user_string(arg1
)))
11145 return -TARGET_EFAULT
;
11146 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11147 unlock_user(p
, arg1
, 0);
11150 #ifdef TARGET_NR_ftruncate64
11151 case TARGET_NR_ftruncate64
:
11152 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11154 #ifdef TARGET_NR_stat64
11155 case TARGET_NR_stat64
:
11156 if (!(p
= lock_user_string(arg1
))) {
11157 return -TARGET_EFAULT
;
11159 ret
= get_errno(stat(path(p
), &st
));
11160 unlock_user(p
, arg1
, 0);
11161 if (!is_error(ret
))
11162 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11165 #ifdef TARGET_NR_lstat64
11166 case TARGET_NR_lstat64
:
11167 if (!(p
= lock_user_string(arg1
))) {
11168 return -TARGET_EFAULT
;
11170 ret
= get_errno(lstat(path(p
), &st
));
11171 unlock_user(p
, arg1
, 0);
11172 if (!is_error(ret
))
11173 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11176 #ifdef TARGET_NR_fstat64
11177 case TARGET_NR_fstat64
:
11178 ret
= get_errno(fstat(arg1
, &st
));
11179 if (!is_error(ret
))
11180 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11183 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11184 #ifdef TARGET_NR_fstatat64
11185 case TARGET_NR_fstatat64
:
11187 #ifdef TARGET_NR_newfstatat
11188 case TARGET_NR_newfstatat
:
11190 if (!(p
= lock_user_string(arg2
))) {
11191 return -TARGET_EFAULT
;
11193 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11194 unlock_user(p
, arg2
, 0);
11195 if (!is_error(ret
))
11196 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11199 #if defined(TARGET_NR_statx)
11200 case TARGET_NR_statx
:
11202 struct target_statx
*target_stx
;
11206 p
= lock_user_string(arg2
);
11208 return -TARGET_EFAULT
;
11210 #if defined(__NR_statx)
11213 * It is assumed that struct statx is architecture independent.
11215 struct target_statx host_stx
;
11218 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11219 if (!is_error(ret
)) {
11220 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11221 unlock_user(p
, arg2
, 0);
11222 return -TARGET_EFAULT
;
11226 if (ret
!= -TARGET_ENOSYS
) {
11227 unlock_user(p
, arg2
, 0);
11232 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11233 unlock_user(p
, arg2
, 0);
11235 if (!is_error(ret
)) {
11236 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11237 return -TARGET_EFAULT
;
11239 memset(target_stx
, 0, sizeof(*target_stx
));
11240 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11241 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11242 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11243 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11244 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11245 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11246 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11247 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11248 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11249 __put_user(st
.st_size
, &target_stx
->stx_size
);
11250 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11251 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11252 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11253 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11254 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11255 unlock_user_struct(target_stx
, arg5
, 1);
11260 #ifdef TARGET_NR_lchown
11261 case TARGET_NR_lchown
:
11262 if (!(p
= lock_user_string(arg1
)))
11263 return -TARGET_EFAULT
;
11264 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11265 unlock_user(p
, arg1
, 0);
11268 #ifdef TARGET_NR_getuid
11269 case TARGET_NR_getuid
:
11270 return get_errno(high2lowuid(getuid()));
11272 #ifdef TARGET_NR_getgid
11273 case TARGET_NR_getgid
:
11274 return get_errno(high2lowgid(getgid()));
11276 #ifdef TARGET_NR_geteuid
11277 case TARGET_NR_geteuid
:
11278 return get_errno(high2lowuid(geteuid()));
11280 #ifdef TARGET_NR_getegid
11281 case TARGET_NR_getegid
:
11282 return get_errno(high2lowgid(getegid()));
11284 case TARGET_NR_setreuid
:
11285 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11286 case TARGET_NR_setregid
:
11287 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11288 case TARGET_NR_getgroups
:
11290 int gidsetsize
= arg1
;
11291 target_id
*target_grouplist
;
11295 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11296 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11297 if (gidsetsize
== 0)
11299 if (!is_error(ret
)) {
11300 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11301 if (!target_grouplist
)
11302 return -TARGET_EFAULT
;
11303 for(i
= 0;i
< ret
; i
++)
11304 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11305 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11309 case TARGET_NR_setgroups
:
11311 int gidsetsize
= arg1
;
11312 target_id
*target_grouplist
;
11313 gid_t
*grouplist
= NULL
;
11316 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11317 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11318 if (!target_grouplist
) {
11319 return -TARGET_EFAULT
;
11321 for (i
= 0; i
< gidsetsize
; i
++) {
11322 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11324 unlock_user(target_grouplist
, arg2
, 0);
11326 return get_errno(setgroups(gidsetsize
, grouplist
));
11328 case TARGET_NR_fchown
:
11329 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11330 #if defined(TARGET_NR_fchownat)
11331 case TARGET_NR_fchownat
:
11332 if (!(p
= lock_user_string(arg2
)))
11333 return -TARGET_EFAULT
;
11334 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11335 low2highgid(arg4
), arg5
));
11336 unlock_user(p
, arg2
, 0);
11339 #ifdef TARGET_NR_setresuid
11340 case TARGET_NR_setresuid
:
11341 return get_errno(sys_setresuid(low2highuid(arg1
),
11343 low2highuid(arg3
)));
11345 #ifdef TARGET_NR_getresuid
11346 case TARGET_NR_getresuid
:
11348 uid_t ruid
, euid
, suid
;
11349 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11350 if (!is_error(ret
)) {
11351 if (put_user_id(high2lowuid(ruid
), arg1
)
11352 || put_user_id(high2lowuid(euid
), arg2
)
11353 || put_user_id(high2lowuid(suid
), arg3
))
11354 return -TARGET_EFAULT
;
11359 #ifdef TARGET_NR_getresgid
11360 case TARGET_NR_setresgid
:
11361 return get_errno(sys_setresgid(low2highgid(arg1
),
11363 low2highgid(arg3
)));
11365 #ifdef TARGET_NR_getresgid
11366 case TARGET_NR_getresgid
:
11368 gid_t rgid
, egid
, sgid
;
11369 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11370 if (!is_error(ret
)) {
11371 if (put_user_id(high2lowgid(rgid
), arg1
)
11372 || put_user_id(high2lowgid(egid
), arg2
)
11373 || put_user_id(high2lowgid(sgid
), arg3
))
11374 return -TARGET_EFAULT
;
11379 #ifdef TARGET_NR_chown
11380 case TARGET_NR_chown
:
11381 if (!(p
= lock_user_string(arg1
)))
11382 return -TARGET_EFAULT
;
11383 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11384 unlock_user(p
, arg1
, 0);
11387 case TARGET_NR_setuid
:
11388 return get_errno(sys_setuid(low2highuid(arg1
)));
11389 case TARGET_NR_setgid
:
11390 return get_errno(sys_setgid(low2highgid(arg1
)));
11391 case TARGET_NR_setfsuid
:
11392 return get_errno(setfsuid(arg1
));
11393 case TARGET_NR_setfsgid
:
11394 return get_errno(setfsgid(arg1
));
11396 #ifdef TARGET_NR_lchown32
11397 case TARGET_NR_lchown32
:
11398 if (!(p
= lock_user_string(arg1
)))
11399 return -TARGET_EFAULT
;
11400 ret
= get_errno(lchown(p
, arg2
, arg3
));
11401 unlock_user(p
, arg1
, 0);
11404 #ifdef TARGET_NR_getuid32
11405 case TARGET_NR_getuid32
:
11406 return get_errno(getuid());
11409 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11410 /* Alpha specific */
11411 case TARGET_NR_getxuid
:
11415 cpu_env
->ir
[IR_A4
]=euid
;
11417 return get_errno(getuid());
11419 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11420 /* Alpha specific */
11421 case TARGET_NR_getxgid
:
11425 cpu_env
->ir
[IR_A4
]=egid
;
11427 return get_errno(getgid());
11429 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11430 /* Alpha specific */
11431 case TARGET_NR_osf_getsysinfo
:
11432 ret
= -TARGET_EOPNOTSUPP
;
11434 case TARGET_GSI_IEEE_FP_CONTROL
:
11436 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11437 uint64_t swcr
= cpu_env
->swcr
;
11439 swcr
&= ~SWCR_STATUS_MASK
;
11440 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11442 if (put_user_u64 (swcr
, arg2
))
11443 return -TARGET_EFAULT
;
11448 /* case GSI_IEEE_STATE_AT_SIGNAL:
11449 -- Not implemented in linux kernel.
11451 -- Retrieves current unaligned access state; not much used.
11452 case GSI_PROC_TYPE:
11453 -- Retrieves implver information; surely not used.
11454 case GSI_GET_HWRPB:
11455 -- Grabs a copy of the HWRPB; surely not used.
11460 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11461 /* Alpha specific */
11462 case TARGET_NR_osf_setsysinfo
:
11463 ret
= -TARGET_EOPNOTSUPP
;
11465 case TARGET_SSI_IEEE_FP_CONTROL
:
11467 uint64_t swcr
, fpcr
;
11469 if (get_user_u64 (swcr
, arg2
)) {
11470 return -TARGET_EFAULT
;
11474 * The kernel calls swcr_update_status to update the
11475 * status bits from the fpcr at every point that it
11476 * could be queried. Therefore, we store the status
11477 * bits only in FPCR.
11479 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
11481 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11482 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
11483 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
11484 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11489 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11491 uint64_t exc
, fpcr
, fex
;
11493 if (get_user_u64(exc
, arg2
)) {
11494 return -TARGET_EFAULT
;
11496 exc
&= SWCR_STATUS_MASK
;
11497 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11499 /* Old exceptions are not signaled. */
11500 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
11502 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
11503 fex
&= (cpu_env
)->swcr
;
11505 /* Update the hardware fpcr. */
11506 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
11507 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11510 int si_code
= TARGET_FPE_FLTUNK
;
11511 target_siginfo_t info
;
11513 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
11514 si_code
= TARGET_FPE_FLTUND
;
11516 if (fex
& SWCR_TRAP_ENABLE_INE
) {
11517 si_code
= TARGET_FPE_FLTRES
;
11519 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
11520 si_code
= TARGET_FPE_FLTUND
;
11522 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
11523 si_code
= TARGET_FPE_FLTOVF
;
11525 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
11526 si_code
= TARGET_FPE_FLTDIV
;
11528 if (fex
& SWCR_TRAP_ENABLE_INV
) {
11529 si_code
= TARGET_FPE_FLTINV
;
11532 info
.si_signo
= SIGFPE
;
11534 info
.si_code
= si_code
;
11535 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
11536 queue_signal(cpu_env
, info
.si_signo
,
11537 QEMU_SI_FAULT
, &info
);
11543 /* case SSI_NVPAIRS:
11544 -- Used with SSIN_UACPROC to enable unaligned accesses.
11545 case SSI_IEEE_STATE_AT_SIGNAL:
11546 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11547 -- Not implemented in linux kernel
11552 #ifdef TARGET_NR_osf_sigprocmask
11553 /* Alpha specific. */
11554 case TARGET_NR_osf_sigprocmask
:
11558 sigset_t set
, oldset
;
11561 case TARGET_SIG_BLOCK
:
11564 case TARGET_SIG_UNBLOCK
:
11567 case TARGET_SIG_SETMASK
:
11571 return -TARGET_EINVAL
;
11574 target_to_host_old_sigset(&set
, &mask
);
11575 ret
= do_sigprocmask(how
, &set
, &oldset
);
11577 host_to_target_old_sigset(&mask
, &oldset
);
11584 #ifdef TARGET_NR_getgid32
11585 case TARGET_NR_getgid32
:
11586 return get_errno(getgid());
11588 #ifdef TARGET_NR_geteuid32
11589 case TARGET_NR_geteuid32
:
11590 return get_errno(geteuid());
11592 #ifdef TARGET_NR_getegid32
11593 case TARGET_NR_getegid32
:
11594 return get_errno(getegid());
11596 #ifdef TARGET_NR_setreuid32
11597 case TARGET_NR_setreuid32
:
11598 return get_errno(setreuid(arg1
, arg2
));
11600 #ifdef TARGET_NR_setregid32
11601 case TARGET_NR_setregid32
:
11602 return get_errno(setregid(arg1
, arg2
));
11604 #ifdef TARGET_NR_getgroups32
11605 case TARGET_NR_getgroups32
:
11607 int gidsetsize
= arg1
;
11608 uint32_t *target_grouplist
;
11612 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11613 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11614 if (gidsetsize
== 0)
11616 if (!is_error(ret
)) {
11617 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11618 if (!target_grouplist
) {
11619 return -TARGET_EFAULT
;
11621 for(i
= 0;i
< ret
; i
++)
11622 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11623 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11628 #ifdef TARGET_NR_setgroups32
11629 case TARGET_NR_setgroups32
:
11631 int gidsetsize
= arg1
;
11632 uint32_t *target_grouplist
;
11636 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11637 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11638 if (!target_grouplist
) {
11639 return -TARGET_EFAULT
;
11641 for(i
= 0;i
< gidsetsize
; i
++)
11642 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11643 unlock_user(target_grouplist
, arg2
, 0);
11644 return get_errno(setgroups(gidsetsize
, grouplist
));
11647 #ifdef TARGET_NR_fchown32
11648 case TARGET_NR_fchown32
:
11649 return get_errno(fchown(arg1
, arg2
, arg3
));
11651 #ifdef TARGET_NR_setresuid32
11652 case TARGET_NR_setresuid32
:
11653 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11655 #ifdef TARGET_NR_getresuid32
11656 case TARGET_NR_getresuid32
:
11658 uid_t ruid
, euid
, suid
;
11659 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11660 if (!is_error(ret
)) {
11661 if (put_user_u32(ruid
, arg1
)
11662 || put_user_u32(euid
, arg2
)
11663 || put_user_u32(suid
, arg3
))
11664 return -TARGET_EFAULT
;
11669 #ifdef TARGET_NR_setresgid32
11670 case TARGET_NR_setresgid32
:
11671 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11673 #ifdef TARGET_NR_getresgid32
11674 case TARGET_NR_getresgid32
:
11676 gid_t rgid
, egid
, sgid
;
11677 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11678 if (!is_error(ret
)) {
11679 if (put_user_u32(rgid
, arg1
)
11680 || put_user_u32(egid
, arg2
)
11681 || put_user_u32(sgid
, arg3
))
11682 return -TARGET_EFAULT
;
11687 #ifdef TARGET_NR_chown32
11688 case TARGET_NR_chown32
:
11689 if (!(p
= lock_user_string(arg1
)))
11690 return -TARGET_EFAULT
;
11691 ret
= get_errno(chown(p
, arg2
, arg3
));
11692 unlock_user(p
, arg1
, 0);
11695 #ifdef TARGET_NR_setuid32
11696 case TARGET_NR_setuid32
:
11697 return get_errno(sys_setuid(arg1
));
11699 #ifdef TARGET_NR_setgid32
11700 case TARGET_NR_setgid32
:
11701 return get_errno(sys_setgid(arg1
));
11703 #ifdef TARGET_NR_setfsuid32
11704 case TARGET_NR_setfsuid32
:
11705 return get_errno(setfsuid(arg1
));
11707 #ifdef TARGET_NR_setfsgid32
11708 case TARGET_NR_setfsgid32
:
11709 return get_errno(setfsgid(arg1
));
11711 #ifdef TARGET_NR_mincore
11712 case TARGET_NR_mincore
:
11714 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11716 return -TARGET_ENOMEM
;
11718 p
= lock_user_string(arg3
);
11720 ret
= -TARGET_EFAULT
;
11722 ret
= get_errno(mincore(a
, arg2
, p
));
11723 unlock_user(p
, arg3
, ret
);
11725 unlock_user(a
, arg1
, 0);
11729 #ifdef TARGET_NR_arm_fadvise64_64
11730 case TARGET_NR_arm_fadvise64_64
:
11731 /* arm_fadvise64_64 looks like fadvise64_64 but
11732 * with different argument order: fd, advice, offset, len
11733 * rather than the usual fd, offset, len, advice.
11734 * Note that offset and len are both 64-bit so appear as
11735 * pairs of 32-bit registers.
11737 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11738 target_offset64(arg5
, arg6
), arg2
);
11739 return -host_to_target_errno(ret
);
11742 #if TARGET_ABI_BITS == 32
11744 #ifdef TARGET_NR_fadvise64_64
11745 case TARGET_NR_fadvise64_64
:
11746 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11747 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11755 /* 6 args: fd, offset (high, low), len (high, low), advice */
11756 if (regpairs_aligned(cpu_env
, num
)) {
11757 /* offset is in (3,4), len in (5,6) and advice in 7 */
11765 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11766 target_offset64(arg4
, arg5
), arg6
);
11767 return -host_to_target_errno(ret
);
11770 #ifdef TARGET_NR_fadvise64
11771 case TARGET_NR_fadvise64
:
11772 /* 5 args: fd, offset (high, low), len, advice */
11773 if (regpairs_aligned(cpu_env
, num
)) {
11774 /* offset is in (3,4), len in 5 and advice in 6 */
11780 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11781 return -host_to_target_errno(ret
);
11784 #else /* not a 32-bit ABI */
11785 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11786 #ifdef TARGET_NR_fadvise64_64
11787 case TARGET_NR_fadvise64_64
:
11789 #ifdef TARGET_NR_fadvise64
11790 case TARGET_NR_fadvise64
:
11792 #ifdef TARGET_S390X
11794 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11795 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11796 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11797 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11801 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11803 #endif /* end of 64-bit ABI fadvise handling */
11805 #ifdef TARGET_NR_madvise
11806 case TARGET_NR_madvise
:
11807 return target_madvise(arg1
, arg2
, arg3
);
11809 #ifdef TARGET_NR_fcntl64
11810 case TARGET_NR_fcntl64
:
11814 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11815 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11818 if (!cpu_env
->eabi
) {
11819 copyfrom
= copy_from_user_oabi_flock64
;
11820 copyto
= copy_to_user_oabi_flock64
;
11824 cmd
= target_to_host_fcntl_cmd(arg2
);
11825 if (cmd
== -TARGET_EINVAL
) {
11830 case TARGET_F_GETLK64
:
11831 ret
= copyfrom(&fl
, arg3
);
11835 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11837 ret
= copyto(arg3
, &fl
);
11841 case TARGET_F_SETLK64
:
11842 case TARGET_F_SETLKW64
:
11843 ret
= copyfrom(&fl
, arg3
);
11847 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11850 ret
= do_fcntl(arg1
, arg2
, arg3
);
11856 #ifdef TARGET_NR_cacheflush
11857 case TARGET_NR_cacheflush
:
11858 /* self-modifying code is handled automatically, so nothing needed */
11861 #ifdef TARGET_NR_getpagesize
11862 case TARGET_NR_getpagesize
:
11863 return TARGET_PAGE_SIZE
;
11865 case TARGET_NR_gettid
:
11866 return get_errno(sys_gettid());
11867 #ifdef TARGET_NR_readahead
11868 case TARGET_NR_readahead
:
11869 #if TARGET_ABI_BITS == 32
11870 if (regpairs_aligned(cpu_env
, num
)) {
11875 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11877 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11882 #ifdef TARGET_NR_setxattr
11883 case TARGET_NR_listxattr
:
11884 case TARGET_NR_llistxattr
:
11888 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11890 return -TARGET_EFAULT
;
11893 p
= lock_user_string(arg1
);
11895 if (num
== TARGET_NR_listxattr
) {
11896 ret
= get_errno(listxattr(p
, b
, arg3
));
11898 ret
= get_errno(llistxattr(p
, b
, arg3
));
11901 ret
= -TARGET_EFAULT
;
11903 unlock_user(p
, arg1
, 0);
11904 unlock_user(b
, arg2
, arg3
);
11907 case TARGET_NR_flistxattr
:
11911 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11913 return -TARGET_EFAULT
;
11916 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11917 unlock_user(b
, arg2
, arg3
);
11920 case TARGET_NR_setxattr
:
11921 case TARGET_NR_lsetxattr
:
11923 void *p
, *n
, *v
= 0;
11925 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11927 return -TARGET_EFAULT
;
11930 p
= lock_user_string(arg1
);
11931 n
= lock_user_string(arg2
);
11933 if (num
== TARGET_NR_setxattr
) {
11934 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11936 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11939 ret
= -TARGET_EFAULT
;
11941 unlock_user(p
, arg1
, 0);
11942 unlock_user(n
, arg2
, 0);
11943 unlock_user(v
, arg3
, 0);
11946 case TARGET_NR_fsetxattr
:
11950 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11952 return -TARGET_EFAULT
;
11955 n
= lock_user_string(arg2
);
11957 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11959 ret
= -TARGET_EFAULT
;
11961 unlock_user(n
, arg2
, 0);
11962 unlock_user(v
, arg3
, 0);
11965 case TARGET_NR_getxattr
:
11966 case TARGET_NR_lgetxattr
:
11968 void *p
, *n
, *v
= 0;
11970 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11972 return -TARGET_EFAULT
;
11975 p
= lock_user_string(arg1
);
11976 n
= lock_user_string(arg2
);
11978 if (num
== TARGET_NR_getxattr
) {
11979 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11981 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11984 ret
= -TARGET_EFAULT
;
11986 unlock_user(p
, arg1
, 0);
11987 unlock_user(n
, arg2
, 0);
11988 unlock_user(v
, arg3
, arg4
);
11991 case TARGET_NR_fgetxattr
:
11995 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11997 return -TARGET_EFAULT
;
12000 n
= lock_user_string(arg2
);
12002 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12004 ret
= -TARGET_EFAULT
;
12006 unlock_user(n
, arg2
, 0);
12007 unlock_user(v
, arg3
, arg4
);
12010 case TARGET_NR_removexattr
:
12011 case TARGET_NR_lremovexattr
:
12014 p
= lock_user_string(arg1
);
12015 n
= lock_user_string(arg2
);
12017 if (num
== TARGET_NR_removexattr
) {
12018 ret
= get_errno(removexattr(p
, n
));
12020 ret
= get_errno(lremovexattr(p
, n
));
12023 ret
= -TARGET_EFAULT
;
12025 unlock_user(p
, arg1
, 0);
12026 unlock_user(n
, arg2
, 0);
12029 case TARGET_NR_fremovexattr
:
12032 n
= lock_user_string(arg2
);
12034 ret
= get_errno(fremovexattr(arg1
, n
));
12036 ret
= -TARGET_EFAULT
;
12038 unlock_user(n
, arg2
, 0);
12042 #endif /* CONFIG_ATTR */
12043 #ifdef TARGET_NR_set_thread_area
12044 case TARGET_NR_set_thread_area
:
12045 #if defined(TARGET_MIPS)
12046 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12048 #elif defined(TARGET_CRIS)
12050 ret
= -TARGET_EINVAL
;
12052 cpu_env
->pregs
[PR_PID
] = arg1
;
12056 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12057 return do_set_thread_area(cpu_env
, arg1
);
12058 #elif defined(TARGET_M68K)
12060 TaskState
*ts
= cpu
->opaque
;
12061 ts
->tp_value
= arg1
;
12065 return -TARGET_ENOSYS
;
12068 #ifdef TARGET_NR_get_thread_area
12069 case TARGET_NR_get_thread_area
:
12070 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12071 return do_get_thread_area(cpu_env
, arg1
);
12072 #elif defined(TARGET_M68K)
12074 TaskState
*ts
= cpu
->opaque
;
12075 return ts
->tp_value
;
12078 return -TARGET_ENOSYS
;
12081 #ifdef TARGET_NR_getdomainname
12082 case TARGET_NR_getdomainname
:
12083 return -TARGET_ENOSYS
;
12086 #ifdef TARGET_NR_clock_settime
12087 case TARGET_NR_clock_settime
:
12089 struct timespec ts
;
12091 ret
= target_to_host_timespec(&ts
, arg2
);
12092 if (!is_error(ret
)) {
12093 ret
= get_errno(clock_settime(arg1
, &ts
));
12098 #ifdef TARGET_NR_clock_settime64
12099 case TARGET_NR_clock_settime64
:
12101 struct timespec ts
;
12103 ret
= target_to_host_timespec64(&ts
, arg2
);
12104 if (!is_error(ret
)) {
12105 ret
= get_errno(clock_settime(arg1
, &ts
));
12110 #ifdef TARGET_NR_clock_gettime
12111 case TARGET_NR_clock_gettime
:
12113 struct timespec ts
;
12114 ret
= get_errno(clock_gettime(arg1
, &ts
));
12115 if (!is_error(ret
)) {
12116 ret
= host_to_target_timespec(arg2
, &ts
);
12121 #ifdef TARGET_NR_clock_gettime64
12122 case TARGET_NR_clock_gettime64
:
12124 struct timespec ts
;
12125 ret
= get_errno(clock_gettime(arg1
, &ts
));
12126 if (!is_error(ret
)) {
12127 ret
= host_to_target_timespec64(arg2
, &ts
);
12132 #ifdef TARGET_NR_clock_getres
12133 case TARGET_NR_clock_getres
:
12135 struct timespec ts
;
12136 ret
= get_errno(clock_getres(arg1
, &ts
));
12137 if (!is_error(ret
)) {
12138 host_to_target_timespec(arg2
, &ts
);
12143 #ifdef TARGET_NR_clock_getres_time64
12144 case TARGET_NR_clock_getres_time64
:
12146 struct timespec ts
;
12147 ret
= get_errno(clock_getres(arg1
, &ts
));
12148 if (!is_error(ret
)) {
12149 host_to_target_timespec64(arg2
, &ts
);
12154 #ifdef TARGET_NR_clock_nanosleep
12155 case TARGET_NR_clock_nanosleep
:
12157 struct timespec ts
;
12158 if (target_to_host_timespec(&ts
, arg3
)) {
12159 return -TARGET_EFAULT
;
12161 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12162 &ts
, arg4
? &ts
: NULL
));
12164 * if the call is interrupted by a signal handler, it fails
12165 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12166 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12168 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12169 host_to_target_timespec(arg4
, &ts
)) {
12170 return -TARGET_EFAULT
;
12176 #ifdef TARGET_NR_clock_nanosleep_time64
12177 case TARGET_NR_clock_nanosleep_time64
:
12179 struct timespec ts
;
12181 if (target_to_host_timespec64(&ts
, arg3
)) {
12182 return -TARGET_EFAULT
;
12185 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12186 &ts
, arg4
? &ts
: NULL
));
12188 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12189 host_to_target_timespec64(arg4
, &ts
)) {
12190 return -TARGET_EFAULT
;
12196 #if defined(TARGET_NR_set_tid_address)
12197 case TARGET_NR_set_tid_address
:
12199 TaskState
*ts
= cpu
->opaque
;
12200 ts
->child_tidptr
= arg1
;
12201 /* do not call host set_tid_address() syscall, instead return tid() */
12202 return get_errno(sys_gettid());
12206 case TARGET_NR_tkill
:
12207 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12209 case TARGET_NR_tgkill
:
12210 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12211 target_to_host_signal(arg3
)));
12213 #ifdef TARGET_NR_set_robust_list
12214 case TARGET_NR_set_robust_list
:
12215 case TARGET_NR_get_robust_list
:
12216 /* The ABI for supporting robust futexes has userspace pass
12217 * the kernel a pointer to a linked list which is updated by
12218 * userspace after the syscall; the list is walked by the kernel
12219 * when the thread exits. Since the linked list in QEMU guest
12220 * memory isn't a valid linked list for the host and we have
12221 * no way to reliably intercept the thread-death event, we can't
12222 * support these. Silently return ENOSYS so that guest userspace
12223 * falls back to a non-robust futex implementation (which should
12224 * be OK except in the corner case of the guest crashing while
12225 * holding a mutex that is shared with another process via
12228 return -TARGET_ENOSYS
;
12231 #if defined(TARGET_NR_utimensat)
12232 case TARGET_NR_utimensat
:
12234 struct timespec
*tsp
, ts
[2];
12238 if (target_to_host_timespec(ts
, arg3
)) {
12239 return -TARGET_EFAULT
;
12241 if (target_to_host_timespec(ts
+ 1, arg3
+
12242 sizeof(struct target_timespec
))) {
12243 return -TARGET_EFAULT
;
12248 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12250 if (!(p
= lock_user_string(arg2
))) {
12251 return -TARGET_EFAULT
;
12253 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12254 unlock_user(p
, arg2
, 0);
12259 #ifdef TARGET_NR_utimensat_time64
12260 case TARGET_NR_utimensat_time64
:
12262 struct timespec
*tsp
, ts
[2];
12266 if (target_to_host_timespec64(ts
, arg3
)) {
12267 return -TARGET_EFAULT
;
12269 if (target_to_host_timespec64(ts
+ 1, arg3
+
12270 sizeof(struct target__kernel_timespec
))) {
12271 return -TARGET_EFAULT
;
12276 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12278 p
= lock_user_string(arg2
);
12280 return -TARGET_EFAULT
;
12282 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12283 unlock_user(p
, arg2
, 0);
12288 #ifdef TARGET_NR_futex
12289 case TARGET_NR_futex
:
12290 return do_futex(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12292 #ifdef TARGET_NR_futex_time64
12293 case TARGET_NR_futex_time64
:
12294 return do_futex_time64(cpu
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12296 #ifdef CONFIG_INOTIFY
12297 #if defined(TARGET_NR_inotify_init)
12298 case TARGET_NR_inotify_init
:
12299 ret
= get_errno(inotify_init());
12301 fd_trans_register(ret
, &target_inotify_trans
);
12305 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12306 case TARGET_NR_inotify_init1
:
12307 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12308 fcntl_flags_tbl
)));
12310 fd_trans_register(ret
, &target_inotify_trans
);
12314 #if defined(TARGET_NR_inotify_add_watch)
12315 case TARGET_NR_inotify_add_watch
:
12316 p
= lock_user_string(arg2
);
12317 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12318 unlock_user(p
, arg2
, 0);
12321 #if defined(TARGET_NR_inotify_rm_watch)
12322 case TARGET_NR_inotify_rm_watch
:
12323 return get_errno(inotify_rm_watch(arg1
, arg2
));
12327 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12328 case TARGET_NR_mq_open
:
12330 struct mq_attr posix_mq_attr
;
12331 struct mq_attr
*pposix_mq_attr
;
12334 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12335 pposix_mq_attr
= NULL
;
12337 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12338 return -TARGET_EFAULT
;
12340 pposix_mq_attr
= &posix_mq_attr
;
12342 p
= lock_user_string(arg1
- 1);
12344 return -TARGET_EFAULT
;
12346 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12347 unlock_user (p
, arg1
, 0);
12351 case TARGET_NR_mq_unlink
:
12352 p
= lock_user_string(arg1
- 1);
12354 return -TARGET_EFAULT
;
12356 ret
= get_errno(mq_unlink(p
));
12357 unlock_user (p
, arg1
, 0);
12360 #ifdef TARGET_NR_mq_timedsend
12361 case TARGET_NR_mq_timedsend
:
12363 struct timespec ts
;
12365 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12367 if (target_to_host_timespec(&ts
, arg5
)) {
12368 return -TARGET_EFAULT
;
12370 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12371 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12372 return -TARGET_EFAULT
;
12375 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12377 unlock_user (p
, arg2
, arg3
);
12381 #ifdef TARGET_NR_mq_timedsend_time64
12382 case TARGET_NR_mq_timedsend_time64
:
12384 struct timespec ts
;
12386 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12388 if (target_to_host_timespec64(&ts
, arg5
)) {
12389 return -TARGET_EFAULT
;
12391 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12392 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12393 return -TARGET_EFAULT
;
12396 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12398 unlock_user(p
, arg2
, arg3
);
12403 #ifdef TARGET_NR_mq_timedreceive
12404 case TARGET_NR_mq_timedreceive
:
12406 struct timespec ts
;
12409 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12411 if (target_to_host_timespec(&ts
, arg5
)) {
12412 return -TARGET_EFAULT
;
12414 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12416 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
12417 return -TARGET_EFAULT
;
12420 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12423 unlock_user (p
, arg2
, arg3
);
12425 put_user_u32(prio
, arg4
);
12429 #ifdef TARGET_NR_mq_timedreceive_time64
12430 case TARGET_NR_mq_timedreceive_time64
:
12432 struct timespec ts
;
12435 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
12437 if (target_to_host_timespec64(&ts
, arg5
)) {
12438 return -TARGET_EFAULT
;
12440 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12442 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
12443 return -TARGET_EFAULT
;
12446 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12449 unlock_user(p
, arg2
, arg3
);
12451 put_user_u32(prio
, arg4
);
12457 /* Not implemented for now... */
12458 /* case TARGET_NR_mq_notify: */
12461 case TARGET_NR_mq_getsetattr
:
12463 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12466 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12467 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12468 &posix_mq_attr_out
));
12469 } else if (arg3
!= 0) {
12470 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12472 if (ret
== 0 && arg3
!= 0) {
12473 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12479 #ifdef CONFIG_SPLICE
12480 #ifdef TARGET_NR_tee
12481 case TARGET_NR_tee
:
12483 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12487 #ifdef TARGET_NR_splice
12488 case TARGET_NR_splice
:
12490 loff_t loff_in
, loff_out
;
12491 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12493 if (get_user_u64(loff_in
, arg2
)) {
12494 return -TARGET_EFAULT
;
12496 ploff_in
= &loff_in
;
12499 if (get_user_u64(loff_out
, arg4
)) {
12500 return -TARGET_EFAULT
;
12502 ploff_out
= &loff_out
;
12504 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12506 if (put_user_u64(loff_in
, arg2
)) {
12507 return -TARGET_EFAULT
;
12511 if (put_user_u64(loff_out
, arg4
)) {
12512 return -TARGET_EFAULT
;
12518 #ifdef TARGET_NR_vmsplice
12519 case TARGET_NR_vmsplice
:
12521 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12523 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12524 unlock_iovec(vec
, arg2
, arg3
, 0);
12526 ret
= -host_to_target_errno(errno
);
12531 #endif /* CONFIG_SPLICE */
12532 #ifdef CONFIG_EVENTFD
12533 #if defined(TARGET_NR_eventfd)
12534 case TARGET_NR_eventfd
:
12535 ret
= get_errno(eventfd(arg1
, 0));
12537 fd_trans_register(ret
, &target_eventfd_trans
);
12541 #if defined(TARGET_NR_eventfd2)
12542 case TARGET_NR_eventfd2
:
12544 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
12545 if (arg2
& TARGET_O_NONBLOCK
) {
12546 host_flags
|= O_NONBLOCK
;
12548 if (arg2
& TARGET_O_CLOEXEC
) {
12549 host_flags
|= O_CLOEXEC
;
12551 ret
= get_errno(eventfd(arg1
, host_flags
));
12553 fd_trans_register(ret
, &target_eventfd_trans
);
12558 #endif /* CONFIG_EVENTFD */
12559 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12560 case TARGET_NR_fallocate
:
12561 #if TARGET_ABI_BITS == 32
12562 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12563 target_offset64(arg5
, arg6
)));
12565 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12569 #if defined(CONFIG_SYNC_FILE_RANGE)
12570 #if defined(TARGET_NR_sync_file_range)
12571 case TARGET_NR_sync_file_range
:
12572 #if TARGET_ABI_BITS == 32
12573 #if defined(TARGET_MIPS)
12574 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12575 target_offset64(arg5
, arg6
), arg7
));
12577 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12578 target_offset64(arg4
, arg5
), arg6
));
12579 #endif /* !TARGET_MIPS */
12581 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12585 #if defined(TARGET_NR_sync_file_range2) || \
12586 defined(TARGET_NR_arm_sync_file_range)
12587 #if defined(TARGET_NR_sync_file_range2)
12588 case TARGET_NR_sync_file_range2
:
12590 #if defined(TARGET_NR_arm_sync_file_range)
12591 case TARGET_NR_arm_sync_file_range
:
12593 /* This is like sync_file_range but the arguments are reordered */
12594 #if TARGET_ABI_BITS == 32
12595 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12596 target_offset64(arg5
, arg6
), arg2
));
12598 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12603 #if defined(TARGET_NR_signalfd4)
12604 case TARGET_NR_signalfd4
:
12605 return do_signalfd4(arg1
, arg2
, arg4
);
12607 #if defined(TARGET_NR_signalfd)
12608 case TARGET_NR_signalfd
:
12609 return do_signalfd4(arg1
, arg2
, 0);
12611 #if defined(CONFIG_EPOLL)
12612 #if defined(TARGET_NR_epoll_create)
12613 case TARGET_NR_epoll_create
:
12614 return get_errno(epoll_create(arg1
));
12616 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12617 case TARGET_NR_epoll_create1
:
12618 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
12620 #if defined(TARGET_NR_epoll_ctl)
12621 case TARGET_NR_epoll_ctl
:
12623 struct epoll_event ep
;
12624 struct epoll_event
*epp
= 0;
12626 if (arg2
!= EPOLL_CTL_DEL
) {
12627 struct target_epoll_event
*target_ep
;
12628 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12629 return -TARGET_EFAULT
;
12631 ep
.events
= tswap32(target_ep
->events
);
12633 * The epoll_data_t union is just opaque data to the kernel,
12634 * so we transfer all 64 bits across and need not worry what
12635 * actual data type it is.
12637 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12638 unlock_user_struct(target_ep
, arg4
, 0);
12641 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12642 * non-null pointer, even though this argument is ignored.
12647 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12651 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12652 #if defined(TARGET_NR_epoll_wait)
12653 case TARGET_NR_epoll_wait
:
12655 #if defined(TARGET_NR_epoll_pwait)
12656 case TARGET_NR_epoll_pwait
:
12659 struct target_epoll_event
*target_ep
;
12660 struct epoll_event
*ep
;
12662 int maxevents
= arg3
;
12663 int timeout
= arg4
;
12665 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12666 return -TARGET_EINVAL
;
12669 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12670 maxevents
* sizeof(struct target_epoll_event
), 1);
12672 return -TARGET_EFAULT
;
12675 ep
= g_try_new(struct epoll_event
, maxevents
);
12677 unlock_user(target_ep
, arg2
, 0);
12678 return -TARGET_ENOMEM
;
12682 #if defined(TARGET_NR_epoll_pwait)
12683 case TARGET_NR_epoll_pwait
:
12685 sigset_t
*set
= NULL
;
12688 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
12694 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12695 set
, SIGSET_T_SIZE
));
12698 finish_sigsuspend_mask(ret
);
12703 #if defined(TARGET_NR_epoll_wait)
12704 case TARGET_NR_epoll_wait
:
12705 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12710 ret
= -TARGET_ENOSYS
;
12712 if (!is_error(ret
)) {
12714 for (i
= 0; i
< ret
; i
++) {
12715 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12716 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12718 unlock_user(target_ep
, arg2
,
12719 ret
* sizeof(struct target_epoll_event
));
12721 unlock_user(target_ep
, arg2
, 0);
12728 #ifdef TARGET_NR_prlimit64
12729 case TARGET_NR_prlimit64
:
12731 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12732 struct target_rlimit64
*target_rnew
, *target_rold
;
12733 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12734 int resource
= target_to_host_resource(arg2
);
12736 if (arg3
&& (resource
!= RLIMIT_AS
&&
12737 resource
!= RLIMIT_DATA
&&
12738 resource
!= RLIMIT_STACK
)) {
12739 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12740 return -TARGET_EFAULT
;
12742 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12743 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12744 unlock_user_struct(target_rnew
, arg3
, 0);
12748 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12749 if (!is_error(ret
) && arg4
) {
12750 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12751 return -TARGET_EFAULT
;
12753 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12754 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12755 unlock_user_struct(target_rold
, arg4
, 1);
12760 #ifdef TARGET_NR_gethostname
12761 case TARGET_NR_gethostname
:
12763 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12765 ret
= get_errno(gethostname(name
, arg2
));
12766 unlock_user(name
, arg1
, arg2
);
12768 ret
= -TARGET_EFAULT
;
12773 #ifdef TARGET_NR_atomic_cmpxchg_32
12774 case TARGET_NR_atomic_cmpxchg_32
:
12776 /* should use start_exclusive from main.c */
12777 abi_ulong mem_value
;
12778 if (get_user_u32(mem_value
, arg6
)) {
12779 target_siginfo_t info
;
12780 info
.si_signo
= SIGSEGV
;
12782 info
.si_code
= TARGET_SEGV_MAPERR
;
12783 info
._sifields
._sigfault
._addr
= arg6
;
12784 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
12788 if (mem_value
== arg2
)
12789 put_user_u32(arg1
, arg6
);
12793 #ifdef TARGET_NR_atomic_barrier
12794 case TARGET_NR_atomic_barrier
:
12795 /* Like the kernel implementation and the
12796 qemu arm barrier, no-op this? */
12800 #ifdef TARGET_NR_timer_create
12801 case TARGET_NR_timer_create
:
12803 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12805 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12808 int timer_index
= next_free_host_timer();
12810 if (timer_index
< 0) {
12811 ret
= -TARGET_EAGAIN
;
12813 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12816 phost_sevp
= &host_sevp
;
12817 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12823 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12827 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12828 return -TARGET_EFAULT
;
12836 #ifdef TARGET_NR_timer_settime
12837 case TARGET_NR_timer_settime
:
12839 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12840 * struct itimerspec * old_value */
12841 target_timer_t timerid
= get_timer_id(arg1
);
12845 } else if (arg3
== 0) {
12846 ret
= -TARGET_EINVAL
;
12848 timer_t htimer
= g_posix_timers
[timerid
];
12849 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12851 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12852 return -TARGET_EFAULT
;
12855 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12856 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12857 return -TARGET_EFAULT
;
12864 #ifdef TARGET_NR_timer_settime64
12865 case TARGET_NR_timer_settime64
:
12867 target_timer_t timerid
= get_timer_id(arg1
);
12871 } else if (arg3
== 0) {
12872 ret
= -TARGET_EINVAL
;
12874 timer_t htimer
= g_posix_timers
[timerid
];
12875 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12877 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
12878 return -TARGET_EFAULT
;
12881 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12882 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
12883 return -TARGET_EFAULT
;
12890 #ifdef TARGET_NR_timer_gettime
12891 case TARGET_NR_timer_gettime
:
12893 /* args: timer_t timerid, struct itimerspec *curr_value */
12894 target_timer_t timerid
= get_timer_id(arg1
);
12898 } else if (!arg2
) {
12899 ret
= -TARGET_EFAULT
;
12901 timer_t htimer
= g_posix_timers
[timerid
];
12902 struct itimerspec hspec
;
12903 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12905 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12906 ret
= -TARGET_EFAULT
;
12913 #ifdef TARGET_NR_timer_gettime64
12914 case TARGET_NR_timer_gettime64
:
12916 /* args: timer_t timerid, struct itimerspec64 *curr_value */
12917 target_timer_t timerid
= get_timer_id(arg1
);
12921 } else if (!arg2
) {
12922 ret
= -TARGET_EFAULT
;
12924 timer_t htimer
= g_posix_timers
[timerid
];
12925 struct itimerspec hspec
;
12926 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12928 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
12929 ret
= -TARGET_EFAULT
;
12936 #ifdef TARGET_NR_timer_getoverrun
12937 case TARGET_NR_timer_getoverrun
:
12939 /* args: timer_t timerid */
12940 target_timer_t timerid
= get_timer_id(arg1
);
12945 timer_t htimer
= g_posix_timers
[timerid
];
12946 ret
= get_errno(timer_getoverrun(htimer
));
12952 #ifdef TARGET_NR_timer_delete
12953 case TARGET_NR_timer_delete
:
12955 /* args: timer_t timerid */
12956 target_timer_t timerid
= get_timer_id(arg1
);
12961 timer_t htimer
= g_posix_timers
[timerid
];
12962 ret
= get_errno(timer_delete(htimer
));
12963 g_posix_timers
[timerid
] = 0;
12969 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12970 case TARGET_NR_timerfd_create
:
12971 return get_errno(timerfd_create(arg1
,
12972 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12975 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12976 case TARGET_NR_timerfd_gettime
:
12978 struct itimerspec its_curr
;
12980 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12982 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12983 return -TARGET_EFAULT
;
12989 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12990 case TARGET_NR_timerfd_gettime64
:
12992 struct itimerspec its_curr
;
12994 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12996 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
12997 return -TARGET_EFAULT
;
13003 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13004 case TARGET_NR_timerfd_settime
:
13006 struct itimerspec its_new
, its_old
, *p_new
;
13009 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13010 return -TARGET_EFAULT
;
13017 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13019 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13020 return -TARGET_EFAULT
;
13026 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13027 case TARGET_NR_timerfd_settime64
:
13029 struct itimerspec its_new
, its_old
, *p_new
;
13032 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13033 return -TARGET_EFAULT
;
13040 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13042 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13043 return -TARGET_EFAULT
;
13049 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13050 case TARGET_NR_ioprio_get
:
13051 return get_errno(ioprio_get(arg1
, arg2
));
13054 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13055 case TARGET_NR_ioprio_set
:
13056 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13059 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13060 case TARGET_NR_setns
:
13061 return get_errno(setns(arg1
, arg2
));
13063 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13064 case TARGET_NR_unshare
:
13065 return get_errno(unshare(arg1
));
13067 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13068 case TARGET_NR_kcmp
:
13069 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13071 #ifdef TARGET_NR_swapcontext
13072 case TARGET_NR_swapcontext
:
13073 /* PowerPC specific. */
13074 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13076 #ifdef TARGET_NR_memfd_create
13077 case TARGET_NR_memfd_create
:
13078 p
= lock_user_string(arg1
);
13080 return -TARGET_EFAULT
;
13082 ret
= get_errno(memfd_create(p
, arg2
));
13083 fd_trans_unregister(ret
);
13084 unlock_user(p
, arg1
, 0);
13087 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13088 case TARGET_NR_membarrier
:
13089 return get_errno(membarrier(arg1
, arg2
));
13092 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13093 case TARGET_NR_copy_file_range
:
13095 loff_t inoff
, outoff
;
13096 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13099 if (get_user_u64(inoff
, arg2
)) {
13100 return -TARGET_EFAULT
;
13105 if (get_user_u64(outoff
, arg4
)) {
13106 return -TARGET_EFAULT
;
13110 /* Do not sign-extend the count parameter. */
13111 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13112 (abi_ulong
)arg5
, arg6
));
13113 if (!is_error(ret
) && ret
> 0) {
13115 if (put_user_u64(inoff
, arg2
)) {
13116 return -TARGET_EFAULT
;
13120 if (put_user_u64(outoff
, arg4
)) {
13121 return -TARGET_EFAULT
;
13129 #if defined(TARGET_NR_pivot_root)
13130 case TARGET_NR_pivot_root
:
13133 p
= lock_user_string(arg1
); /* new_root */
13134 p2
= lock_user_string(arg2
); /* put_old */
13136 ret
= -TARGET_EFAULT
;
13138 ret
= get_errno(pivot_root(p
, p2
));
13140 unlock_user(p2
, arg2
, 0);
13141 unlock_user(p
, arg1
, 0);
13147 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13148 return -TARGET_ENOSYS
;
13153 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13154 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13155 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13158 CPUState
*cpu
= env_cpu(cpu_env
);
13161 #ifdef DEBUG_ERESTARTSYS
13162 /* Debug-only code for exercising the syscall-restart code paths
13163 * in the per-architecture cpu main loops: restart every syscall
13164 * the guest makes once before letting it through.
13170 return -QEMU_ERESTARTSYS
;
13175 record_syscall_start(cpu
, num
, arg1
,
13176 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13178 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13179 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13182 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13183 arg5
, arg6
, arg7
, arg8
);
13185 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13186 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13187 arg3
, arg4
, arg5
, arg6
);
13190 record_syscall_return(cpu
, num
, ret
);